summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-05-16 09:59:13 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-05-20 10:28:53 +0000
commit6c11fb357ec39bf087b8b632e2b1e375aef1b38b (patch)
treec8315530db18a8ee566521c39ab8a6af4f72bc03 /chromium/gpu
parent3ffaed019d0772e59d6cdb2d0d32fe4834c31f72 (diff)
downloadqtwebengine-chromium-6c11fb357ec39bf087b8b632e2b1e375aef1b38b.tar.gz
BASELINE: Update Chromium to 74.0.3729.159
Change-Id: I8d2497da544c275415aedd94dd25328d555de811 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn28
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt29
-rw-r--r--chromium/gpu/GLES2/gl2chromium_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/build_cmd_buffer_lib.py6
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py69
-rw-r--r--chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/client/client_transfer_cache.cc7
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc6
-rw-r--r--chromium/gpu/command_buffer/client/command_buffer_direct_locked.h4
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator_test.cc6
-rw-r--r--chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h88
-rw-r--r--chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h85
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc283
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h14
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_autogen.h40
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h71
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc20
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h30
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface.h2
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_autogen.h33
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h33
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h43
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h33
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h73
-rw-r--r--chromium/gpu/command_buffer/client/gpu_control_client.h3
-rw-r--r--chromium/gpu/command_buffer/client/image_decode_accelerator_interface.h8
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.cc1
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory_unittest.cc6
-rw-r--r--chromium/gpu/command_buffer/client/mock_transfer_buffer.cc6
-rw-r--r--chromium/gpu/command_buffer/client/mock_transfer_buffer.h1
-rw-r--r--chromium/gpu/command_buffer/client/program_info_manager.cc44
-rw-r--r--chromium/gpu/command_buffer/client/program_info_manager.h21
-rw-r--r--chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h4
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.cc112
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.h8
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.cc12
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.h7
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc6
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/client/raster_interface.h23
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer_test.cc6
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer.cc8
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer.h6
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.cc139
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.h68
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc16
-rw-r--r--chromium/gpu/command_buffer/common/constants.h3
-rw-r--r--chromium/gpu/command_buffer/common/context_creation_attribs.h1
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h422
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h115
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h236
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc13
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.h1
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc15
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h5
-rw-r--r--chromium/gpu/command_buffer/common/id_type_unittest.cc16
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h17
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h3
-rw-r--r--chromium/gpu/command_buffer/common/shared_image_usage.h2
-rw-r--r--chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt11
-rw-r--r--chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt2
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn34
-rw-r--r--chromium/gpu/command_buffer/service/buffer_manager.h1
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_direct.cc11
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_direct.h8
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service.cc14
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service.h7
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc7
-rw-r--r--chromium/gpu/command_buffer/service/context_group.cc13
-rw-r--r--chromium/gpu/command_buffer/service/context_group.h6
-rw-r--r--chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h6
-rw-r--r--chromium/gpu/command_buffer/service/decoder_client.h4
-rw-r--r--chromium/gpu/command_buffer/service/decoder_context.h8
-rw-r--r--chromium/gpu/command_buffer/service/error_state.cc6
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.cc249
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.h108
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory.cc212
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory.h66
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc164
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_gl_representation.h46
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc142
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.h66
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc34
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h4
-rw-r--r--chromium/gpu/command_buffer/service/feature_info_unittest.cc23
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager.cc13
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager.h4
-rw-r--r--chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.cc5
-rw-r--r--chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h1
-rw-r--r--chromium/gpu/command_buffer/service/gl_surface_mock.h5
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.cc16
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc7
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc700
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.h4
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h65
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc32
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h38
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc113
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc156
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc64
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_commands.cc38
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc30
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc50
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h12
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc28
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc61
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc26
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc12
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h3
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc48
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc14
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc54
-rw-r--r--chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h1
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.cc3
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.h1
-rw-r--r--chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller.cc1
-rw-r--r--chromium/gpu/command_buffer/service/indexed_buffer_binding_host.cc47
-rw-r--r--chromium/gpu/command_buffer/service/indexed_buffer_binding_host.h13
-rw-r--r--chromium/gpu/command_buffer/service/indexed_buffer_binding_host_unittest.cc8
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.cc1
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/service/multi_draw_manager.cc95
-rw-r--r--chromium/gpu/command_buffer/service/multi_draw_manager.h26
-rw-r--r--chromium/gpu/command_buffer/service/multi_draw_manager_unittest.cc232
-rw-r--r--chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.cc15
-rw-r--r--chromium/gpu/command_buffer/service/program_manager_unittest.cc5
-rw-r--r--chromium/gpu/command_buffer/service/query_manager_unittest.cc9
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc590
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.h11
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_autogen.h5
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_mock.cc26
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_mock.h173
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest.cc114
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h12
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_1.cc2
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc120
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h44
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc65
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.cc1
-rw-r--r--chromium/gpu/command_buffer/service/service_transfer_cache.cc34
-rw-r--r--chromium/gpu/command_buffer/service/service_transfer_cache.h32
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.cc2
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator_unittest.cc26
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.cc1
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing.cc6
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing.h3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc371
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc289
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h26
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc82
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h68
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm411
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc237
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.cc63
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.h4
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.cc89
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.h15
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.h43
-rw-r--r--chromium/gpu/command_buffer/service/test_helper.cc2
-rw-r--r--chromium/gpu/command_buffer/service/texture_definition.cc5
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc3
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h32
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager_unittest.cc6
-rw-r--r--chromium/gpu/command_buffer/service/transform_feedback_manager.cc3
-rw-r--r--chromium/gpu/command_buffer/service/vertex_attrib_manager.cc5
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder.cc325
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc347
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.h31
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.cc6
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.json168
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list_unittest.cc2
-rw-r--r--chromium/gpu/config/gpu_dx_diagnostics_win.cc4
-rw-r--r--chromium/gpu/config/gpu_feature_info.h1
-rw-r--r--chromium/gpu/config/gpu_finch_features.cc55
-rw-r--r--chromium/gpu/config/gpu_finch_features.h6
-rw-r--r--chromium/gpu/config/gpu_info.cc72
-rw-r--r--chromium/gpu/config/gpu_info.h47
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc2
-rw-r--r--chromium/gpu/config/gpu_info_collector_unittest.cc6
-rw-r--r--chromium/gpu/config/gpu_info_collector_win.cc22
-rw-r--r--chromium/gpu/config/gpu_lists_version.h2
-rw-r--r--chromium/gpu/config/gpu_preferences.h9
-rw-r--r--chromium/gpu/config/gpu_preferences_unittest.cc7
-rw-r--r--chromium/gpu/config/gpu_test_expectations_parser_unittest.cc6
-rw-r--r--chromium/gpu/config/gpu_util.cc8
-rw-r--r--chromium/gpu/config/gpu_workaround_list.txt5
-rw-r--r--chromium/gpu/config/nvml_info.cc6
-rw-r--r--chromium/gpu/config/software_rendering_list.json29
-rw-r--r--chromium/gpu/dawn_end2end_tests_main.cc5
-rw-r--r--chromium/gpu/gles2_conform_support/BUILD.gn4
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.cc6
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.h1
-rw-r--r--chromium/gpu/gles2_conform_support/gles2_conform_test.cc1
-rw-r--r--chromium/gpu/ipc/client/BUILD.gn1
-rw-r--r--chromium/gpu/ipc/client/DEPS4
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc8
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h1
-rw-r--r--chromium/gpu/ipc/client/gpu_context_tests.h14
-rw-r--r--chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc146
-rw-r--r--chromium/gpu/ipc/client/image_decode_accelerator_proxy.h35
-rw-r--r--chromium/gpu/ipc/client/raster_in_process_context_tests.cc6
-rw-r--r--chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h1
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom24
-rw-r--r--chromium/gpu/ipc/common/gpu_info.typemap1
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.cc76
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.h51
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc6
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer_unittest.cc12
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi_unittest.cc6
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface_unittest.cc6
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc45
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h7
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc6
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory_unittest.cc12
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h16
-rw-r--r--chromium/gpu/ipc/common/gpu_messages.h9
-rw-r--r--chromium/gpu/ipc/common/gpu_param_traits_macros.h4
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.mojom3
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_struct_traits.h14
-rw-r--r--chromium/gpu/ipc/common/struct_traits_unittest.cc8
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.cc2
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.cc93
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache_unittest.cc6
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc40
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h5
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.cc2
-rw-r--r--chromium/gpu/ipc/raster_in_process_context.cc1
-rw-r--r--chromium/gpu/ipc/service/DEPS1
-rw-r--r--chromium/gpu/ipc/service/child_window_win.cc1
-rw-r--r--chromium/gpu/ipc/service/command_buffer_stub.cc214
-rw-r--r--chromium/gpu/ipc/service/command_buffer_stub.h54
-rw-r--r--chromium/gpu/ipc/service/direct_composition_child_surface_win.cc24
-rw-r--r--chromium/gpu/ipc/service/direct_composition_child_surface_win.h2
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.cc105
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.h4
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.cc152
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.h21
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.cc32
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.h1
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc5
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h8
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc3
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.cc17
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.h9
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_unittest.cc9
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi_unittest.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface_unittest.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc4
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h4
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.cc144
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.h34
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc386
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_worker.h33
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h8
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm20
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.cc80
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.h38
-rw-r--r--chromium/gpu/ipc/service/raster_command_buffer_stub.cc41
-rw-r--r--chromium/gpu/ipc/service/raster_command_buffer_stub.h6
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.cc4
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.cc17
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.h1
-rw-r--r--chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc36
-rw-r--r--chromium/gpu/ipc/service/webgpu_command_buffer_stub.h6
-rw-r--r--chromium/gpu/ipc/webgpu_in_process_context.cc5
-rw-r--r--chromium/gpu/perftests/texture_upload_perftest.cc4
-rw-r--r--chromium/gpu/tools/compositor_model_bench/render_tree.cc7
-rw-r--r--chromium/gpu/vulkan/android/vulkan_android_unittests.cc11
-rw-r--r--chromium/gpu/vulkan/android/vulkan_implementation_android.cc9
-rw-r--r--chromium/gpu/vulkan/android/vulkan_implementation_android.h2
-rwxr-xr-xchromium/gpu/vulkan/generate_bindings.py101
-rw-r--r--chromium/gpu/vulkan/vulkan_command_buffer.cc2
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.cc34
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.h26
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.cc34
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.h23
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.cc13
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.h5
-rw-r--r--chromium/gpu/vulkan/vulkan_surface.cc4
-rw-r--r--chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc9
-rw-r--r--chromium/gpu/vulkan/win32/vulkan_implementation_win32.h2
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.cc13
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.h2
293 files changed, 9576 insertions, 2867 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index 9974be5f0cc..9b6ab9f15d9 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -159,8 +159,6 @@ jumbo_static_library("test_support") {
"command_buffer/service/error_state_mock.h",
"command_buffer/service/gles2_cmd_decoder_mock.cc",
"command_buffer/service/gles2_cmd_decoder_mock.h",
- "command_buffer/service/raster_decoder_mock.cc",
- "command_buffer/service/raster_decoder_mock.h",
"ipc/raster_in_process_context.cc",
"ipc/raster_in_process_context.h",
"ipc/service/gpu_memory_buffer_factory_test_template.h",
@@ -294,6 +292,7 @@ test("gl_tests") {
sources += [ "command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc" ]
} else if (is_mac) {
libs += [ "IOSurface.framework" ]
+ sources += [ "command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc" ]
} else if (is_win) {
deps += [
"//ui/platform_window",
@@ -390,6 +389,7 @@ test("gpu_unittests") {
"command_buffer/service/memory_program_cache_unittest.cc",
"command_buffer/service/mocks.cc",
"command_buffer/service/mocks.h",
+ "command_buffer/service/multi_draw_manager_unittest.cc",
"command_buffer/service/passthrough_program_cache_unittest.cc",
"command_buffer/service/path_manager_unittest.cc",
"command_buffer/service/program_cache_unittest.cc",
@@ -707,6 +707,30 @@ if (is_linux) {
libfuzzer_options = [ "max_len=16384" ]
}
+ fuzzer_test("gpu_raster_passthrough_fuzzer") {
+ sources = [
+ "command_buffer/tests/fuzzer_main.cc",
+ ]
+
+ defines = [
+ "GPU_FUZZER_USE_ANGLE",
+ "GPU_FUZZER_USE_RASTER_DECODER",
+ "GPU_FUZZER_USE_PASSTHROUGH_CMD_DECODER",
+ ]
+
+ deps = [
+ ":gles2",
+ ":gpu",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ "//ui/gfx/geometry",
+ "//ui/gl",
+ "//ui/gl:test_support",
+ ]
+
+ libfuzzer_options = [ "max_len=16384" ]
+ }
+
fuzzer_test("gpu_raster_swiftshader_fuzzer") {
sources = [
"command_buffer/tests/fuzzer_main.cc",
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt
index 0d635375cfa..35cfcb93cf1 100644
--- a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt
@@ -24,14 +24,9 @@ New Procedures and Functions
The command
- GLuint glCreateAndTexStorage2DSharedImageCHROMIUM (GLenum internal_format,
- const GLbyte *mailbox)
+ GLuint glCreateAndTexStorage2DSharedImageCHROMIUM (const GLbyte *mailbox)
- takes two arguments:
-
- internal_format - Must match the internal format of the SharedImage at
- creation time, with the exception that a caller may pass GL_RGB or
- GL_RGBA for a SharedImage created as RGBA_8888.
+ takes one argument:
mailbox - the mailbox referring to the SharedImage to assign to the
first level of the mailbox.
@@ -42,7 +37,7 @@ New Procedures and Functions
3) Assigns the SharedImage represented by mailbox to the first level of
the texture object.
- Note that the texutre created by this function is immutable.
+ Note that the texture created by this function is immutable.
On success, this function extends the lifetime of the SharedImage
referenced by mailbox by taking a ref. The texture object is deleted and
@@ -59,6 +54,24 @@ New Procedures and Functions
The command
+ GLuint glCreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM (
+ const GLbyte *mailbox,
+ GLenum internal_format)
+
+ takes two arguments:
+
+ internal_format - Must match the internal format of the SharedImage at
+ creation time, with the exception that a caller may pass GL_RGB or
+ GL_RGBA for a SharedImage created as RGBA_8888.
+
+ mailbox - the mailbox referring to the SharedImage to assign to the
+ first level of the mailbox.
+
+ This function works similar to glCreateAndTexStorage2DSharedImageCHROMIUM
+ in other respects.
+
+ The command
+
void glBeginSharedImageAccessDirectCHROMIUM (GLuint texture,
GLenum mode)
diff --git a/chromium/gpu/GLES2/gl2chromium_autogen.h b/chromium/gpu/GLES2/gl2chromium_autogen.h
index e46e882a0f6..404c05cee78 100644
--- a/chromium/gpu/GLES2/gl2chromium_autogen.h
+++ b/chromium/gpu/GLES2/gl2chromium_autogen.h
@@ -245,6 +245,8 @@
#define glBlitFramebufferCHROMIUM GLES2_GET_FUN(BlitFramebufferCHROMIUM)
#define glRenderbufferStorageMultisampleCHROMIUM \
GLES2_GET_FUN(RenderbufferStorageMultisampleCHROMIUM)
+#define glRenderbufferStorageMultisampleAdvancedAMD \
+ GLES2_GET_FUN(RenderbufferStorageMultisampleAdvancedAMD)
#define glRenderbufferStorageMultisampleEXT \
GLES2_GET_FUN(RenderbufferStorageMultisampleEXT)
#define glFramebufferTexture2DMultisampleEXT \
@@ -275,6 +277,11 @@
#define glFramebufferParameteri GLES2_GET_FUN(FramebufferParameteri)
#define glBindImageTexture GLES2_GET_FUN(BindImageTexture)
#define glDispatchCompute GLES2_GET_FUN(DispatchCompute)
+#define glGetProgramInterfaceiv GLES2_GET_FUN(GetProgramInterfaceiv)
+#define glGetProgramResourceIndex GLES2_GET_FUN(GetProgramResourceIndex)
+#define glGetProgramResourceName GLES2_GET_FUN(GetProgramResourceName)
+#define glGetProgramResourceiv GLES2_GET_FUN(GetProgramResourceiv)
+#define glGetProgramResourceLocation GLES2_GET_FUN(GetProgramResourceLocation)
#define glMemoryBarrierEXT GLES2_GET_FUN(MemoryBarrierEXT)
#define glMemoryBarrierByRegion GLES2_GET_FUN(MemoryBarrierByRegion)
#define glSwapBuffers GLES2_GET_FUN(SwapBuffers)
@@ -416,6 +423,8 @@
#define glMaxShaderCompilerThreadsKHR GLES2_GET_FUN(MaxShaderCompilerThreadsKHR)
#define glCreateAndTexStorage2DSharedImageCHROMIUM \
GLES2_GET_FUN(CreateAndTexStorage2DSharedImageCHROMIUM)
+#define glCreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM \
+ GLES2_GET_FUN(CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM)
#define glBeginSharedImageAccessDirectCHROMIUM \
GLES2_GET_FUN(BeginSharedImageAccessDirectCHROMIUM)
#define glEndSharedImageAccessDirectCHROMIUM \
diff --git a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
index 2e73406867f..a389ceeae73 100644
--- a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
+++ b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
@@ -7010,7 +7010,7 @@ namespace gles2 {
if capability_es3:
continue
if 'extension_flag' in capability:
- f.write(" if (group_->feature_info()->feature_flags().%s) {\n" %
+ f.write(" if (feature_info()->feature_flags().%s) {\n" %
capability['extension_flag'])
f.write(" ")
f.write(" ExpectEnableDisable(GL_%s, %s);\n" %
@@ -7030,7 +7030,7 @@ namespace gles2 {
""")
f.write("""
void %sDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
- auto* feature_info_ = group_->feature_info();
+ auto* feature_info_ = feature_info();
""" % _prefix)
# We need to sort the keys so the expectations match
for state_name in sorted(_STATE_INFO.keys()):
@@ -7072,7 +7072,7 @@ void %sDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
f.write(guarded_operation)
elif 'no_init' not in state:
if 'extension_flag' in state:
- f.write(" if (group_->feature_info()->feature_flags().%s) {\n" %
+ f.write(" if (feature_info()->feature_flags().%s) {\n" %
state['extension_flag'])
f.write(" ")
args = []
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index 2cc75e3b82b..4032b249110 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -2526,6 +2526,60 @@ _FUNCTION_INFO = {
'type': 'STRn',
'expectation': False,
},
+ 'GetProgramInterfaceiv': {
+ 'type': 'GETn',
+ 'decoder_func': 'DoGetProgramInterfaceiv',
+ 'result': ['SizedResult<GLint>'],
+ 'unit_test': False,
+ 'trace_level': 2,
+ 'es31': True,
+ },
+ 'GetProgramResourceiv': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args':
+ 'GLidProgram program, GLenum program_interface, GLuint index, '
+ 'uint32_t props_bucket_id, GLint* params',
+ 'result': ['SizedResult<GLint>'],
+ 'unit_test': False,
+ 'trace_level': 2,
+ 'es31': True,
+ },
+ 'GetProgramResourceIndex': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args':
+ 'GLidProgram program, GLenum program_interface, '
+ 'uint32_t name_bucket_id, GLuint* index',
+ 'result': ['GLuint'],
+ 'error_return': 'GL_INVALID_INDEX',
+ 'unit_test': False,
+ 'trace_level': 2,
+ 'es31': True,
+ },
+ 'GetProgramResourceLocation': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args':
+ 'GLidProgram program, GLenum program_interface, '
+ 'uint32_t name_bucket_id, GLint* location',
+ 'result': ['GLint'],
+ 'error_return': -1,
+ 'unit_test': False,
+ 'trace_level': 2,
+ 'es31': True,
+ },
+ 'GetProgramResourceName': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['shm'],
+ 'cmd_args':
+ 'GLidProgram program, GLenum program_interface, GLuint index, '
+ 'uint32_t name_bucket_id, void* result',
+ 'result': ['int32_t'],
+ 'unit_test': False,
+ 'trace_level': 2,
+ 'es31': True,
+ },
'GetRenderbufferParameteriv': {
'type': 'GETn',
'decoder_func': 'DoGetRenderbufferParameteriv',
@@ -3019,6 +3073,16 @@ _FUNCTION_INFO = {
'pepper_name': 'RenderbufferStorageMultisampleEXT',
'trace_level': 1,
},
+ 'RenderbufferStorageMultisampleAdvancedAMD': {
+ 'cmd_comment':
+ '// GL_AMD_framebuffer_multisample_advanced\n',
+ 'decoder_func': 'DoRenderbufferStorageMultisampleAdvancedAMD',
+ 'gl_test_func': 'glRenderbufferStorageMultisampleAdvancedAMD',
+ 'unit_test': False,
+ 'extension': 'amd_framebuffer_multisample_advanced',
+ 'extension_flag': 'amd_framebuffer_multisample_advanced',
+ 'trace_level': 1,
+ },
'RenderbufferStorageMultisampleEXT': {
'cmd_comment':
'// GL_EXT_multisampled_render_to_texture\n',
@@ -4240,6 +4304,11 @@ _FUNCTION_INFO = {
'extension': "CHROMIUM_shared_image",
'trace_level': 2,
},
+ 'CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM': {
+ 'type': 'NoCommand',
+ 'extension': "CHROMIUM_shared_image",
+ 'trace_level': 2,
+ },
'CreateAndTexStorage2DSharedImageINTERNAL': {
'decoder_func': 'DoCreateAndTexStorage2DSharedImageINTERNAL',
'internal': True,
diff --git a/chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc b/chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc
index 2c811c66ef4..1609eca597f 100644
--- a/chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc
@@ -16,7 +16,7 @@ class FakeCommandBuffer : public CommandBuffer {
State GetLastState() override {
NOTREACHED();
return State();
- };
+ }
void Flush(int32_t put_offset) override { NOTREACHED(); }
void OrderingBarrier(int32_t put_offset) override { NOTREACHED(); }
State WaitForTokenInRange(int32_t start, int32_t end) override {
diff --git a/chromium/gpu/command_buffer/client/client_transfer_cache.cc b/chromium/gpu/command_buffer/client/client_transfer_cache.cc
index b762dacadae..8eb1367abbd 100644
--- a/chromium/gpu/command_buffer/client/client_transfer_cache.cc
+++ b/chromium/gpu/command_buffer/client/client_transfer_cache.cc
@@ -41,8 +41,13 @@ void ClientTransferCache::UnmapAndCreateEntry(uint32_t type, uint32_t id) {
base::AutoLock hold(lock_);
auto handle = CreateDiscardableHandle(key);
- if (!handle.IsValid())
+ if (!handle.IsValid()) {
+ // Release any data pointers. Keeping these alive longer can lead to issues
+ // with transfer buffer reallocation.
+ mapped_ptr_ = base::nullopt;
+ transfer_buffer_ptr_ = base::nullopt;
return;
+ }
if (mapped_ptr_) {
DCHECK(!transfer_buffer_ptr_);
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
index 5687d2472b3..8e2755df7f4 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
@@ -18,7 +18,6 @@
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/command_buffer/client/command_buffer_direct_locked.h"
#include "gpu/command_buffer/service/mocks.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
@@ -42,9 +41,7 @@ const int32_t kUnusedCommandId = 5; // we use 0 and 2 currently.
class CommandBufferHelperTest : public testing::Test {
protected:
void SetUp() override {
- transfer_buffer_manager_ = std::make_unique<TransferBufferManager>(nullptr);
- command_buffer_.reset(
- new CommandBufferDirectLocked(transfer_buffer_manager_.get()));
+ command_buffer_.reset(new CommandBufferDirectLocked());
api_mock_.reset(new AsyncAPIMock(true, command_buffer_->service()));
command_buffer_->set_handler(api_mock_.get());
@@ -199,7 +196,6 @@ class CommandBufferHelperTest : public testing::Test {
helper_->WaitForGetOffsetInRange(start, end);
}
- std::unique_ptr<TransferBufferManager> transfer_buffer_manager_;
std::unique_ptr<CommandBufferDirectLocked> command_buffer_;
std::unique_ptr<AsyncAPIMock> api_mock_;
std::unique_ptr<CommandBufferHelper> helper_;
diff --git a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
index c7d32cdf38e..fc9d8d65445 100644
--- a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
+++ b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
@@ -13,9 +13,7 @@ namespace gpu {
// until either it gets unlocked or the client waits for progress.
class CommandBufferDirectLocked : public CommandBufferDirect {
public:
- explicit CommandBufferDirectLocked(
- TransferBufferManager* transfer_buffer_manager)
- : CommandBufferDirect(transfer_buffer_manager) {}
+ CommandBufferDirectLocked() = default;
~CommandBufferDirectLocked() override = default;
// Overridden from CommandBufferDirect
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
index 4dc78984bb0..be304f1d755 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
+++ b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -17,7 +17,6 @@
#include "gpu/command_buffer/client/fenced_allocator.h"
#include "gpu/command_buffer/service/command_buffer_direct.h"
#include "gpu/command_buffer/service/mocks.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
@@ -37,9 +36,7 @@ class BaseFencedAllocatorTest : public testing::Test {
static const int kAllocAlignment = 16;
void SetUp() override {
- transfer_buffer_manager_ = std::make_unique<TransferBufferManager>(nullptr);
- command_buffer_.reset(
- new CommandBufferDirect(transfer_buffer_manager_.get()));
+ command_buffer_.reset(new CommandBufferDirect());
api_mock_.reset(new AsyncAPIMock(true, command_buffer_->service()));
command_buffer_->set_handler(api_mock_.get());
@@ -58,7 +55,6 @@ class BaseFencedAllocatorTest : public testing::Test {
int32_t GetToken() { return command_buffer_->GetLastState().token; }
- std::unique_ptr<TransferBufferManager> transfer_buffer_manager_;
std::unique_ptr<CommandBufferDirect> command_buffer_;
std::unique_ptr<AsyncAPIMock> api_mock_;
std::unique_ptr<CommandBufferHelper> helper_;
diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 0a8dab9b68b..b6894208717 100644
--- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -1142,6 +1142,16 @@ GLES2RenderbufferStorageMultisampleCHROMIUM(GLenum target,
gles2::GetGLContext()->RenderbufferStorageMultisampleCHROMIUM(
target, samples, internalformat, width, height);
}
+void GL_APIENTRY
+GLES2RenderbufferStorageMultisampleAdvancedAMD(GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->RenderbufferStorageMultisampleAdvancedAMD(
+ target, samples, storageSamples, internalformat, width, height);
+}
void GL_APIENTRY GLES2RenderbufferStorageMultisampleEXT(GLenum target,
GLsizei samples,
GLenum internalformat,
@@ -1262,6 +1272,46 @@ void GL_APIENTRY GLES2DispatchCompute(GLuint num_groups_x,
gles2::GetGLContext()->DispatchCompute(num_groups_x, num_groups_y,
num_groups_z);
}
+void GL_APIENTRY GLES2GetProgramInterfaceiv(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLint* params) {
+ gles2::GetGLContext()->GetProgramInterfaceiv(program, program_interface,
+ pname, params);
+}
+GLuint GL_APIENTRY GLES2GetProgramResourceIndex(GLuint program,
+ GLenum program_interface,
+ const char* name) {
+ return gles2::GetGLContext()->GetProgramResourceIndex(
+ program, program_interface, name);
+}
+void GL_APIENTRY GLES2GetProgramResourceName(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* name) {
+ gles2::GetGLContext()->GetProgramResourceName(program, program_interface,
+ index, bufsize, length, name);
+}
+void GL_APIENTRY GLES2GetProgramResourceiv(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) {
+ gles2::GetGLContext()->GetProgramResourceiv(program, program_interface, index,
+ prop_count, props, bufsize,
+ length, params);
+}
+GLint GL_APIENTRY GLES2GetProgramResourceLocation(GLuint program,
+ GLenum program_interface,
+ const char* name) {
+ return gles2::GetGLContext()->GetProgramResourceLocation(
+ program, program_interface, name);
+}
void GL_APIENTRY GLES2MemoryBarrierEXT(GLbitfield barriers) {
gles2::GetGLContext()->MemoryBarrierEXT(barriers);
}
@@ -1881,6 +1931,14 @@ GLES2CreateAndTexStorage2DSharedImageCHROMIUM(const GLbyte* mailbox) {
return gles2::GetGLContext()->CreateAndTexStorage2DSharedImageCHROMIUM(
mailbox);
}
+GLuint GL_APIENTRY
+GLES2CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ const GLbyte* mailbox,
+ GLenum internalformat) {
+ return gles2::GetGLContext()
+ ->CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ mailbox, internalformat);
+}
void GL_APIENTRY GLES2BeginSharedImageAccessDirectCHROMIUM(GLuint texture,
GLenum mode) {
gles2::GetGLContext()->BeginSharedImageAccessDirectCHROMIUM(texture, mode);
@@ -2808,6 +2866,11 @@ extern const NameToFunc g_gles2_function_table[] = {
glRenderbufferStorageMultisampleCHROMIUM),
},
{
+ "glRenderbufferStorageMultisampleAdvancedAMD",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glRenderbufferStorageMultisampleAdvancedAMD),
+ },
+ {
"glRenderbufferStorageMultisampleEXT",
reinterpret_cast<GLES2FunctionPointer>(
glRenderbufferStorageMultisampleEXT),
@@ -2918,6 +2981,26 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glDispatchCompute),
},
{
+ "glGetProgramInterfaceiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramInterfaceiv),
+ },
+ {
+ "glGetProgramResourceIndex",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramResourceIndex),
+ },
+ {
+ "glGetProgramResourceName",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramResourceName),
+ },
+ {
+ "glGetProgramResourceiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramResourceiv),
+ },
+ {
+ "glGetProgramResourceLocation",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramResourceLocation),
+ },
+ {
"glMemoryBarrierEXT",
reinterpret_cast<GLES2FunctionPointer>(glMemoryBarrierEXT),
},
@@ -3371,6 +3454,11 @@ extern const NameToFunc g_gles2_function_table[] = {
glCreateAndTexStorage2DSharedImageCHROMIUM),
},
{
+ "glCreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glCreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM),
+ },
+ {
"glBeginSharedImageAccessDirectCHROMIUM",
reinterpret_cast<GLES2FunctionPointer>(
glBeginSharedImageAccessDirectCHROMIUM),
diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index 4b936cbc4d2..02f89cdad67 100644
--- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -2270,6 +2270,19 @@ void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
}
}
+void RenderbufferStorageMultisampleAdvancedAMD(GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::RenderbufferStorageMultisampleAdvancedAMD* c =
+ GetCmdSpace<gles2::cmds::RenderbufferStorageMultisampleAdvancedAMD>();
+ if (c) {
+ c->Init(target, samples, storageSamples, internalformat, width, height);
+ }
+}
+
void RenderbufferStorageMultisampleEXT(GLenum target,
GLsizei samples,
GLenum internalformat,
@@ -2473,6 +2486,73 @@ void DispatchCompute(GLuint num_groups_x,
}
}
+void GetProgramInterfaceiv(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetProgramInterfaceiv* c =
+ GetCmdSpace<gles2::cmds::GetProgramInterfaceiv>();
+ if (c) {
+ c->Init(program, program_interface, pname, params_shm_id,
+ params_shm_offset);
+ }
+}
+
+void GetProgramResourceIndex(GLuint program,
+ GLenum program_interface,
+ uint32_t name_bucket_id,
+ uint32_t index_shm_id,
+ uint32_t index_shm_offset) {
+ gles2::cmds::GetProgramResourceIndex* c =
+ GetCmdSpace<gles2::cmds::GetProgramResourceIndex>();
+ if (c) {
+ c->Init(program, program_interface, name_bucket_id, index_shm_id,
+ index_shm_offset);
+ }
+}
+
+void GetProgramResourceName(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ uint32_t name_bucket_id,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::GetProgramResourceName* c =
+ GetCmdSpace<gles2::cmds::GetProgramResourceName>();
+ if (c) {
+ c->Init(program, program_interface, index, name_bucket_id, result_shm_id,
+ result_shm_offset);
+ }
+}
+
+void GetProgramResourceiv(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ uint32_t props_bucket_id,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetProgramResourceiv* c =
+ GetCmdSpace<gles2::cmds::GetProgramResourceiv>();
+ if (c) {
+ c->Init(program, program_interface, index, props_bucket_id, params_shm_id,
+ params_shm_offset);
+ }
+}
+
+void GetProgramResourceLocation(GLuint program,
+ GLenum program_interface,
+ uint32_t name_bucket_id,
+ uint32_t location_shm_id,
+ uint32_t location_shm_offset) {
+ gles2::cmds::GetProgramResourceLocation* c =
+ GetCmdSpace<gles2::cmds::GetProgramResourceLocation>();
+ if (c) {
+ c->Init(program, program_interface, name_bucket_id, location_shm_id,
+ location_shm_offset);
+ }
+}
+
void MemoryBarrierEXT(GLbitfield barriers) {
gles2::cmds::MemoryBarrierEXT* c =
GetCmdSpace<gles2::cmds::MemoryBarrierEXT>();
@@ -3474,14 +3554,15 @@ void MaxShaderCompilerThreadsKHR(GLuint count) {
}
void CreateAndTexStorage2DSharedImageINTERNALImmediate(GLuint texture,
- const GLbyte* mailbox) {
+ const GLbyte* mailbox,
+ GLenum internalformat) {
const uint32_t size = gles2::cmds::
CreateAndTexStorage2DSharedImageINTERNALImmediate::ComputeSize();
gles2::cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate* c =
GetImmediateCmdSpaceTotalSize<
gles2::cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate>(size);
if (c) {
- c->Init(texture, mailbox);
+ c->Init(texture, mailbox, internalformat);
}
}
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index a5ca58fb6e5..985e6324567 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -21,6 +21,7 @@
#include <string>
#include "base/atomic_sequence_num.h"
+#include "base/bind.h"
#include "base/bits.h"
#include "base/compiler_specific.h"
#include "base/containers/span.h"
@@ -419,6 +420,11 @@ void GLES2Implementation::OnSwapBufferPresented(
pending_presentation_callbacks_.erase(found);
}
+void GLES2Implementation::OnGpuControlReturnData(
+ base::span<const uint8_t> data) {
+ NOTIMPLEMENTED();
+}
+
void GLES2Implementation::FreeSharedMemory(void* mem) {
mapped_memory_->FreePendingToken(mem, helper_->InsertToken());
}
@@ -1808,6 +1814,231 @@ GLuint GLES2Implementation::GetUniformBlockIndex(GLuint program,
return index;
}
+bool GLES2Implementation::GetProgramInterfaceivHelper(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLint* params) {
+ bool success = share_group_->program_info_manager()->GetProgramInterfaceiv(
+ this, program, program_interface, pname, params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ if (success) {
+ GPU_CLIENT_LOG(" 0: " << *params);
+ }
+ });
+ return success;
+}
+
+GLuint GLES2Implementation::GetProgramResourceIndexHelper(
+ GLuint program,
+ GLenum program_interface,
+ const char* name) {
+ typedef cmds::GetProgramResourceIndex::Result Result;
+ SetBucketAsCString(kResultBucketId, name);
+ auto result = GetResultAs<Result>();
+ if (!result) {
+ return GL_INVALID_INDEX;
+ }
+ *result = GL_INVALID_INDEX;
+ helper_->GetProgramResourceIndex(program, program_interface, kResultBucketId,
+ GetResultShmId(), result.offset());
+ WaitForCmd();
+ helper_->SetBucketSize(kResultBucketId, 0);
+ return *result;
+}
+
+GLuint GLES2Implementation::GetProgramResourceIndex(
+ GLuint program,
+ GLenum program_interface,
+ const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramResourceIndex("
+ << program << ", " << program_interface << ", " << name
+ << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetProgramResourceIndex");
+ GLuint index = share_group_->program_info_manager()->GetProgramResourceIndex(
+ this, program, program_interface, name);
+ GPU_CLIENT_LOG("returned " << index);
+ CheckGLError();
+ return index;
+}
+
+bool GLES2Implementation::GetProgramResourceNameHelper(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* name) {
+ DCHECK_LE(0, bufsize);
+ // Clear the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ bool success = false;
+ {
+ // The Result pointer must be scoped to this block because it can be
+ // invalidated below if getting result name causes the transfer buffer to be
+ // reallocated.
+ typedef cmds::GetProgramResourceName::Result Result;
+ auto result = GetResultAs<Result>();
+ if (!result) {
+ return false;
+ }
+ // Set as failed so if the command fails we'll recover.
+ *result = 0;
+ helper_->GetProgramResourceName(program, program_interface, index,
+ kResultBucketId, GetResultShmId(),
+ result.offset());
+ WaitForCmd();
+ success = !!*result;
+ }
+ if (success) {
+ GetResultNameHelper(bufsize, length, name);
+ }
+ return success;
+}
+
+void GLES2Implementation::GetProgramResourceName(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramResourceName("
+ << program << ", " << program_interface << ", " << index
+ << ", " << bufsize << ", " << static_cast<void*>(length)
+ << ", " << static_cast<void*>(name) << ")");
+ if (bufsize < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGetProgramResourceName", "bufsize < 0");
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetProgramResourceName");
+ bool success = share_group_->program_info_manager()->GetProgramResourceName(
+ this, program, program_interface, index, bufsize, length, name);
+ if (success && name) {
+ GPU_CLIENT_LOG(" name: " << name);
+ }
+ CheckGLError();
+}
+
+bool GLES2Implementation::GetProgramResourceivHelper(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) {
+ DCHECK_LE(0, prop_count);
+ DCHECK_LE(0, bufsize);
+ base::CheckedNumeric<uint32_t> bytes = prop_count;
+ bytes *= sizeof(GLenum);
+ if (!bytes.IsValid()) {
+ SetGLError(GL_INVALID_VALUE, "glGetProgramResourceiv", "count overflow");
+ return false;
+ }
+ SetBucketContents(kResultBucketId, props, bytes.ValueOrDefault(0));
+ typedef cmds::GetProgramResourceiv::Result Result;
+ auto result = GetResultAs<Result>();
+ if (!result) {
+ return false;
+ }
+ result->SetNumResults(0);
+ helper_->GetProgramResourceiv(program, program_interface, index,
+ kResultBucketId, GetResultShmId(),
+ result.offset());
+ WaitForCmd();
+ if (length) {
+ *length = result->GetNumResults();
+ }
+ if (result->GetNumResults() > 0) {
+ if (params) {
+ result->CopyResult(params);
+ }
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ return true;
+ }
+ return false;
+}
+
+void GLES2Implementation::GetProgramResourceiv(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramResourceiv(" << program
+ << ", " << program_interface << ", " << index << ", "
+ << prop_count << ", " << static_cast<const void*>(props)
+ << ", " << bufsize << ", " << static_cast<void*>(length)
+ << ", " << static_cast<void*>(params) << ")");
+ if (prop_count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGetProgramResourceiv", "prop_count < 0");
+ return;
+ }
+ if (bufsize < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGetProgramResourceiv", "bufsize < 0");
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetProgramResourceiv");
+ GLsizei param_count = 0;
+ bool success = share_group_->program_info_manager()->GetProgramResourceiv(
+ this, program, program_interface, index, prop_count, props, bufsize,
+ &param_count, params);
+ if (length) {
+ *length = param_count;
+ }
+ if (success && params) {
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei ii = 0; ii < param_count; ++ii) {
+ GPU_CLIENT_LOG(" " << ii << ": " << params[ii]);
+ }
+ });
+ }
+ CheckGLError();
+}
+
+GLint GLES2Implementation::GetProgramResourceLocationHelper(
+ GLuint program,
+ GLenum program_interface,
+ const char* name) {
+ typedef cmds::GetProgramResourceLocation::Result Result;
+ SetBucketAsCString(kResultBucketId, name);
+ auto result = GetResultAs<Result>();
+ if (!result) {
+ return -1;
+ }
+ *result = -1;
+ helper_->GetProgramResourceLocation(program, program_interface,
+ kResultBucketId, GetResultShmId(),
+ result.offset());
+ WaitForCmd();
+ helper_->SetBucketSize(kResultBucketId, 0);
+ return *result;
+}
+
+GLint GLES2Implementation::GetProgramResourceLocation(
+ GLuint program,
+ GLenum program_interface,
+ const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramResourceLocation("
+ << program << ", " << program_interface << ", " << name
+ << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetProgramResourceLocation");
+ GLint location =
+ share_group_->program_info_manager()->GetProgramResourceLocation(
+ this, program, program_interface, name);
+ GPU_CLIENT_LOG("returned " << location);
+ CheckGLError();
+ return location;
+}
+
void GLES2Implementation::LinkProgram(GLuint program) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glLinkProgram(" << program << ")");
@@ -6135,28 +6366,24 @@ void GLES2Implementation::DrawElementsInstancedANGLE(GLenum mode,
"count less than 0.");
return;
}
- if (count == 0) {
- return;
- }
if (primcount < 0) {
SetGLError(GL_INVALID_VALUE, "glDrawElementsInstancedANGLE",
"primcount < 0");
return;
}
- if (primcount == 0) {
- return;
- }
- if (vertex_array_object_manager_->bound_element_array_buffer() != 0 &&
- !ValidateOffset("glDrawElementsInstancedANGLE",
- reinterpret_cast<GLintptr>(indices))) {
- return;
- }
GLuint offset = 0;
bool simulated = false;
- if (!vertex_array_object_manager_->SetupSimulatedIndexAndClientSideBuffers(
- "glDrawElementsInstancedANGLE", this, helper_, count, type, primcount,
- indices, &offset, &simulated)) {
- return;
+ if (count > 0 && primcount > 0) {
+ if (vertex_array_object_manager_->bound_element_array_buffer() != 0 &&
+ !ValidateOffset("glDrawElementsInstancedANGLE",
+ reinterpret_cast<GLintptr>(indices))) {
+ return;
+ }
+ if (!vertex_array_object_manager_->SetupSimulatedIndexAndClientSideBuffers(
+ "glDrawElementsInstancedANGLE", this, helper_, count, type,
+ primcount, indices, &offset, &simulated)) {
+ return;
+ }
}
helper_->DrawElementsInstancedANGLE(mode, count, type, offset, primcount);
RestoreElementAndArrayBuffers(simulated);
@@ -6205,7 +6432,31 @@ GLuint GLES2Implementation::CreateAndTexStorage2DSharedImageCHROMIUM(
"passed an invalid mailbox.";
GLuint client_id;
GetIdHandler(SharedIdNamespaces::kTextures)->MakeIds(this, 0, 1, &client_id);
- helper_->CreateAndTexStorage2DSharedImageINTERNALImmediate(client_id, data);
+ helper_->CreateAndTexStorage2DSharedImageINTERNALImmediate(client_id, data,
+ GL_NONE);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::OrderingBarrier();
+ CheckGLError();
+ return client_id;
+}
+
+GLuint
+GLES2Implementation::CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ const GLbyte* data,
+ GLenum internalformat) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix()
+ << "] CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM("
+ << static_cast<const void*>(data) << ", " << internalformat << ")");
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DCHECK(mailbox.Verify())
+ << "CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM was "
+ "passed an invalid mailbox.";
+ GLuint client_id;
+ GetIdHandler(SharedIdNamespaces::kTextures)->MakeIds(this, 0, 1, &client_id);
+ helper_->CreateAndTexStorage2DSharedImageINTERNALImmediate(client_id, data,
+ internalformat);
if (share_group_->bind_generates_resource())
helper_->CommandBufferHelper::OrderingBarrier();
CheckGLError();
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index 44f7d0bdd56..4a5d198566f 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -186,6 +186,19 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
GLint* values);
bool GetQueryObjectValueHelper(
const char* function_name, GLuint id, GLenum pname, GLuint64* params);
+ bool GetProgramInterfaceivHelper(
+ GLuint program, GLenum program_interface, GLenum pname, GLint* params);
+ GLuint GetProgramResourceIndexHelper(
+ GLuint program, GLenum program_interface, const char* name);
+ bool GetProgramResourceNameHelper(
+ GLuint program, GLenum program_interface, GLuint index, GLsizei bufsize,
+ GLsizei* length, char* name);
+ bool GetProgramResourceivHelper(
+ GLuint program, GLenum program_interface, GLuint index,
+ GLsizei prop_count, const GLenum* props, GLsizei bufsize, GLsizei* length,
+ GLint* params);
+ GLint GetProgramResourceLocationHelper(
+ GLuint program, GLenum program_interface, const char* name);
const scoped_refptr<ShareGroup>& share_group() const { return share_group_; }
@@ -377,6 +390,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
const SwapBuffersCompleteParams& params) final;
void OnSwapBufferPresented(uint64_t swap_id,
const gfx::PresentationFeedback& feedback) final;
+ void OnGpuControlReturnData(base::span<const uint8_t> data) final;
void SendErrorMessage(std::string message, int32_t id);
void CallDeferredErrorCallbacks();
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
index 624ea51c828..6882d12a79c 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -808,6 +808,13 @@ void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
GLsizei width,
GLsizei height) override;
+void RenderbufferStorageMultisampleAdvancedAMD(GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) override;
+
void RenderbufferStorageMultisampleEXT(GLenum target,
GLsizei samples,
GLenum internalformat,
@@ -883,6 +890,35 @@ void DispatchCompute(GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z) override;
+void GetProgramInterfaceiv(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLint* params) override;
+
+GLuint GetProgramResourceIndex(GLuint program,
+ GLenum program_interface,
+ const char* name) override;
+
+void GetProgramResourceName(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* name) override;
+
+void GetProgramResourceiv(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) override;
+
+GLint GetProgramResourceLocation(GLuint program,
+ GLenum program_interface,
+ const char* name) override;
+
void MemoryBarrierEXT(GLbitfield barriers) override;
void MemoryBarrierByRegion(GLbitfield barriers) override;
@@ -1322,6 +1358,10 @@ void MaxShaderCompilerThreadsKHR(GLuint count) override;
GLuint CreateAndTexStorage2DSharedImageCHROMIUM(const GLbyte* mailbox) override;
+GLuint CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ const GLbyte* mailbox,
+ GLenum internalformat) override;
+
void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
index b196b0a5879..13b36769ae3 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -2818,6 +2818,45 @@ void GLES2Implementation::RenderbufferStorageMultisampleCHROMIUM(
CheckGLError();
}
+void GLES2Implementation::RenderbufferStorageMultisampleAdvancedAMD(
+ GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glRenderbufferStorageMultisampleAdvancedAMD("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", "
+ << samples << ", " << storageSamples << ", "
+ << GLES2Util::GetStringRenderBufferFormat(internalformat)
+ << ", " << width << ", " << height << ")");
+ if (samples < 0) {
+ SetGLError(GL_INVALID_VALUE, "glRenderbufferStorageMultisampleAdvancedAMD",
+ "samples < 0");
+ return;
+ }
+ if (storageSamples < 0) {
+ SetGLError(GL_INVALID_VALUE, "glRenderbufferStorageMultisampleAdvancedAMD",
+ "storageSamples < 0");
+ return;
+ }
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glRenderbufferStorageMultisampleAdvancedAMD",
+ "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glRenderbufferStorageMultisampleAdvancedAMD",
+ "height < 0");
+ return;
+ }
+ helper_->RenderbufferStorageMultisampleAdvancedAMD(
+ target, samples, storageSamples, internalformat, width, height);
+ CheckGLError();
+}
+
void GLES2Implementation::RenderbufferStorageMultisampleEXT(
GLenum target,
GLsizei samples,
@@ -3077,6 +3116,38 @@ void GLES2Implementation::DispatchCompute(GLuint num_groups_x,
CheckGLError();
}
+void GLES2Implementation::GetProgramInterfaceiv(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramInterfaceiv("
+ << program << ", "
+ << GLES2Util::GetStringEnum(program_interface) << ", "
+ << GLES2Util::GetStringEnum(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetProgramInterfaceiv");
+ if (GetProgramInterfaceivHelper(program, program_interface, pname, params)) {
+ return;
+ }
+ typedef cmds::GetProgramInterfaceiv::Result Result;
+ ScopedResultPtr<Result> result = GetResultAs<Result>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetProgramInterfaceiv(program, program_interface, pname,
+ GetResultShmId(), result.offset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
void GLES2Implementation::MemoryBarrierEXT(GLbitfield barriers) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMemoryBarrierEXT(" << barriers
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
index 44f1e44435d..a06de62256f 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -16,6 +16,7 @@
#include <memory>
+#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/stl_util.h"
#include "gpu/command_buffer/client/client_test_helper.h"
@@ -3587,12 +3588,29 @@ TEST_F(GLES2ImplementationTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
Mailbox mailbox = Mailbox::Generate();
Cmds expected;
- expected.cmd.Init(kTexturesStartId, mailbox.name);
+ expected.cmd.Init(kTexturesStartId, mailbox.name, GL_NONE);
GLuint id = gl_->CreateAndTexStorage2DSharedImageCHROMIUM(mailbox.name);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
EXPECT_EQ(kTexturesStartId, id);
}
+TEST_F(GLES2ImplementationTest,
+ CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM) {
+ struct Cmds {
+ cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate cmd;
+ GLbyte data[GL_MAILBOX_SIZE_CHROMIUM];
+ };
+
+ Mailbox mailbox = Mailbox::Generate();
+ const GLenum kFormat = GL_RGBA;
+ Cmds expected;
+ expected.cmd.Init(kTexturesStartId, mailbox.name, kFormat);
+ GLuint id = gl_->CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ mailbox.name, kFormat);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kTexturesStartId, id);
+}
+
TEST_F(GLES2ImplementationTest, ProduceTextureDirectCHROMIUM) {
struct Cmds {
cmds::ProduceTextureDirectCHROMIUMImmediate cmd;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
index 722696075b7..b14a3e39a09 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -2528,6 +2528,18 @@ TEST_F(GLES2ImplementationTest, RenderbufferStorageMultisampleCHROMIUM) {
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
+TEST_F(GLES2ImplementationTest, RenderbufferStorageMultisampleAdvancedAMD) {
+ struct Cmds {
+ cmds::RenderbufferStorageMultisampleAdvancedAMD cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_RENDERBUFFER, 2, 3, GL_RGBA4, 5, 6);
+
+ gl_->RenderbufferStorageMultisampleAdvancedAMD(GL_RENDERBUFFER, 2, 3,
+ GL_RGBA4, 5, 6);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
TEST_F(GLES2ImplementationTest, RenderbufferStorageMultisampleEXT) {
struct Cmds {
cmds::RenderbufferStorageMultisampleEXT cmd;
@@ -2712,6 +2724,24 @@ TEST_F(GLES2ImplementationTest, DispatchCompute) {
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
+TEST_F(GLES2ImplementationTest, GetProgramInterfaceiv) {
+ struct Cmds {
+ cmds::GetProgramInterfaceiv cmd;
+ };
+ typedef cmds::GetProgramInterfaceiv::Result::Type ResultType;
+ ResultType result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(uint32_t) + sizeof(ResultType));
+ expected.cmd.Init(123, 2, 3, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<ResultType>(1)))
+ .RetiresOnSaturation();
+ gl_->GetProgramInterfaceiv(123, 2, 3, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<ResultType>(1), result);
+}
+
TEST_F(GLES2ImplementationTest, MemoryBarrierEXT) {
struct Cmds {
cmds::MemoryBarrierEXT cmd;
diff --git a/chromium/gpu/command_buffer/client/gles2_interface.h b/chromium/gpu/command_buffer/client/gles2_interface.h
index 85aa9546c2f..e7194af8fef 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface.h
@@ -34,7 +34,7 @@ class GLES2Interface {
GLES2Interface() = default;
virtual ~GLES2Interface() = default;
- virtual void FreeSharedMemory(void*) {};
+ virtual void FreeSharedMemory(void*) {}
// Include the auto-generated part of this class. We split this because
// it means we can easily edit the non-auto generated parts right here in
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
index 8f024988b3a..2071473f0da 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -596,6 +596,12 @@ virtual void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
GLenum internalformat,
GLsizei width,
GLsizei height) = 0;
+virtual void RenderbufferStorageMultisampleAdvancedAMD(GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) = 0;
virtual void RenderbufferStorageMultisampleEXT(GLenum target,
GLsizei samples,
GLenum internalformat,
@@ -650,6 +656,30 @@ virtual void BindImageTexture(GLuint unit,
virtual void DispatchCompute(GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z) = 0;
+virtual void GetProgramInterfaceiv(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLint* params) = 0;
+virtual GLuint GetProgramResourceIndex(GLuint program,
+ GLenum program_interface,
+ const char* name) = 0;
+virtual void GetProgramResourceName(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* name) = 0;
+virtual void GetProgramResourceiv(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) = 0;
+virtual GLint GetProgramResourceLocation(GLuint program,
+ GLenum program_interface,
+ const char* name) = 0;
virtual void MemoryBarrierEXT(GLbitfield barriers) = 0;
virtual void MemoryBarrierByRegion(GLbitfield barriers) = 0;
virtual void SwapBuffers(GLuint64 swap_id, GLbitfield flags = 0) = 0;
@@ -990,6 +1020,9 @@ virtual void FramebufferTextureMultiviewLayeredANGLE(GLenum target,
virtual void MaxShaderCompilerThreadsKHR(GLuint count) = 0;
virtual GLuint CreateAndTexStorage2DSharedImageCHROMIUM(
const GLbyte* mailbox) = 0;
+virtual GLuint CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ const GLbyte* mailbox,
+ GLenum internalformat) = 0;
virtual void BeginSharedImageAccessDirectCHROMIUM(GLuint texture,
GLenum mode) = 0;
virtual void EndSharedImageAccessDirectCHROMIUM(GLuint texture) = 0;
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index 263313c515b..158134cf608 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -579,6 +579,12 @@ void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
GLenum internalformat,
GLsizei width,
GLsizei height) override;
+void RenderbufferStorageMultisampleAdvancedAMD(GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) override;
void RenderbufferStorageMultisampleEXT(GLenum target,
GLsizei samples,
GLenum internalformat,
@@ -627,6 +633,30 @@ void BindImageTexture(GLuint unit,
void DispatchCompute(GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z) override;
+void GetProgramInterfaceiv(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLint* params) override;
+GLuint GetProgramResourceIndex(GLuint program,
+ GLenum program_interface,
+ const char* name) override;
+void GetProgramResourceName(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* name) override;
+void GetProgramResourceiv(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) override;
+GLint GetProgramResourceLocation(GLuint program,
+ GLenum program_interface,
+ const char* name) override;
void MemoryBarrierEXT(GLbitfield barriers) override;
void MemoryBarrierByRegion(GLbitfield barriers) override;
void SwapBuffers(GLuint64 swap_id, GLbitfield flags) override;
@@ -959,6 +989,9 @@ void FramebufferTextureMultiviewLayeredANGLE(GLenum target,
GLsizei numViews) override;
void MaxShaderCompilerThreadsKHR(GLuint count) override;
GLuint CreateAndTexStorage2DSharedImageCHROMIUM(const GLbyte* mailbox) override;
+GLuint CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ const GLbyte* mailbox,
+ GLenum internalformat) override;
void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index 81e592e25f7..29d079725c7 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -779,6 +779,13 @@ void GLES2InterfaceStub::RenderbufferStorageMultisampleCHROMIUM(
GLenum /* internalformat */,
GLsizei /* width */,
GLsizei /* height */) {}
+void GLES2InterfaceStub::RenderbufferStorageMultisampleAdvancedAMD(
+ GLenum /* target */,
+ GLsizei /* samples */,
+ GLsizei /* storageSamples */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */) {}
void GLES2InterfaceStub::RenderbufferStorageMultisampleEXT(
GLenum /* target */,
GLsizei /* samples */,
@@ -852,6 +859,36 @@ void GLES2InterfaceStub::BindImageTexture(GLuint /* unit */,
void GLES2InterfaceStub::DispatchCompute(GLuint /* num_groups_x */,
GLuint /* num_groups_y */,
GLuint /* num_groups_z */) {}
+void GLES2InterfaceStub::GetProgramInterfaceiv(GLuint /* program */,
+ GLenum /* program_interface */,
+ GLenum /* pname */,
+ GLint* /* params */) {}
+GLuint GLES2InterfaceStub::GetProgramResourceIndex(
+ GLuint /* program */,
+ GLenum /* program_interface */,
+ const char* /* name */) {
+ return 0;
+}
+void GLES2InterfaceStub::GetProgramResourceName(GLuint /* program */,
+ GLenum /* program_interface */,
+ GLuint /* index */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ char* /* name */) {}
+void GLES2InterfaceStub::GetProgramResourceiv(GLuint /* program */,
+ GLenum /* program_interface */,
+ GLuint /* index */,
+ GLsizei /* prop_count */,
+ const GLenum* /* props */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ GLint* /* params */) {}
+GLint GLES2InterfaceStub::GetProgramResourceLocation(
+ GLuint /* program */,
+ GLenum /* program_interface */,
+ const char* /* name */) {
+ return 0;
+}
void GLES2InterfaceStub::MemoryBarrierEXT(GLbitfield /* barriers */) {}
void GLES2InterfaceStub::MemoryBarrierByRegion(GLbitfield /* barriers */) {}
void GLES2InterfaceStub::SwapBuffers(GLuint64 /* swap_id */,
@@ -1280,6 +1317,12 @@ GLuint GLES2InterfaceStub::CreateAndTexStorage2DSharedImageCHROMIUM(
const GLbyte* /* mailbox */) {
return 0;
}
+GLuint
+GLES2InterfaceStub::CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ const GLbyte* /* mailbox */,
+ GLenum /* internalformat */) {
+ return 0;
+}
void GLES2InterfaceStub::BeginSharedImageAccessDirectCHROMIUM(
GLuint /* texture */,
GLenum /* mode */) {}
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index 61c2fa70d65..7e94db26455 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -579,6 +579,12 @@ void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
GLenum internalformat,
GLsizei width,
GLsizei height) override;
+void RenderbufferStorageMultisampleAdvancedAMD(GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) override;
void RenderbufferStorageMultisampleEXT(GLenum target,
GLsizei samples,
GLenum internalformat,
@@ -627,6 +633,30 @@ void BindImageTexture(GLuint unit,
void DispatchCompute(GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z) override;
+void GetProgramInterfaceiv(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLint* params) override;
+GLuint GetProgramResourceIndex(GLuint program,
+ GLenum program_interface,
+ const char* name) override;
+void GetProgramResourceName(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* name) override;
+void GetProgramResourceiv(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) override;
+GLint GetProgramResourceLocation(GLuint program,
+ GLenum program_interface,
+ const char* name) override;
void MemoryBarrierEXT(GLbitfield barriers) override;
void MemoryBarrierByRegion(GLbitfield barriers) override;
void SwapBuffers(GLuint64 swap_id, GLbitfield flags) override;
@@ -959,6 +989,9 @@ void FramebufferTextureMultiviewLayeredANGLE(GLenum target,
GLsizei numViews) override;
void MaxShaderCompilerThreadsKHR(GLuint count) override;
GLuint CreateAndTexStorage2DSharedImageCHROMIUM(const GLbyte* mailbox) override;
+GLuint CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ const GLbyte* mailbox,
+ GLenum internalformat) override;
void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index dcde091ee4b..4b7932e147b 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -1651,6 +1651,19 @@ void GLES2TraceImplementation::RenderbufferStorageMultisampleCHROMIUM(
width, height);
}
+void GLES2TraceImplementation::RenderbufferStorageMultisampleAdvancedAMD(
+ GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::RenderbufferStorageMultisampleAdvancedAMD");
+ gl_->RenderbufferStorageMultisampleAdvancedAMD(
+ target, samples, storageSamples, internalformat, width, height);
+}
+
void GLES2TraceImplementation::RenderbufferStorageMultisampleEXT(
GLenum target,
GLsizei samples,
@@ -1830,6 +1843,55 @@ void GLES2TraceImplementation::DispatchCompute(GLuint num_groups_x,
gl_->DispatchCompute(num_groups_x, num_groups_y, num_groups_z);
}
+void GLES2TraceImplementation::GetProgramInterfaceiv(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramInterfaceiv");
+ gl_->GetProgramInterfaceiv(program, program_interface, pname, params);
+}
+
+GLuint GLES2TraceImplementation::GetProgramResourceIndex(
+ GLuint program,
+ GLenum program_interface,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramResourceIndex");
+ return gl_->GetProgramResourceIndex(program, program_interface, name);
+}
+
+void GLES2TraceImplementation::GetProgramResourceName(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramResourceName");
+ gl_->GetProgramResourceName(program, program_interface, index, bufsize,
+ length, name);
+}
+
+void GLES2TraceImplementation::GetProgramResourceiv(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramResourceiv");
+ gl_->GetProgramResourceiv(program, program_interface, index, prop_count,
+ props, bufsize, length, params);
+}
+
+GLint GLES2TraceImplementation::GetProgramResourceLocation(
+ GLuint program,
+ GLenum program_interface,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetProgramResourceLocation");
+ return gl_->GetProgramResourceLocation(program, program_interface, name);
+}
+
void GLES2TraceImplementation::MemoryBarrierEXT(GLbitfield barriers) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MemoryBarrierEXT");
gl_->MemoryBarrierEXT(barriers);
@@ -2718,6 +2780,17 @@ GLuint GLES2TraceImplementation::CreateAndTexStorage2DSharedImageCHROMIUM(
return gl_->CreateAndTexStorage2DSharedImageCHROMIUM(mailbox);
}
+GLuint GLES2TraceImplementation::
+ CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ const GLbyte* mailbox,
+ GLenum internalformat) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu",
+ "GLES2Trace::CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM");
+ return gl_->CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
+ mailbox, internalformat);
+}
+
void GLES2TraceImplementation::BeginSharedImageAccessDirectCHROMIUM(
GLuint texture,
GLenum mode) {
diff --git a/chromium/gpu/command_buffer/client/gpu_control_client.h b/chromium/gpu/command_buffer/client/gpu_control_client.h
index e9fd10d3104..27822b7a4cc 100644
--- a/chromium/gpu/command_buffer/client/gpu_control_client.h
+++ b/chromium/gpu/command_buffer/client/gpu_control_client.h
@@ -7,6 +7,7 @@
#include <cstdint>
+#include "base/containers/span.h"
#include "ui/gfx/presentation_feedback.h"
namespace gpu {
@@ -30,6 +31,8 @@ class GpuControlClient {
virtual void OnSwapBufferPresented(
uint64_t swap_id,
const gfx::PresentationFeedback& feedback) = 0;
+ // Sent by the WebGPUDecoder
+ virtual void OnGpuControlReturnData(base::span<const uint8_t> data) = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/image_decode_accelerator_interface.h b/chromium/gpu/command_buffer/client/image_decode_accelerator_interface.h
index a2f8b9c343b..d56c6b6ce2f 100644
--- a/chromium/gpu/command_buffer/client/image_decode_accelerator_interface.h
+++ b/chromium/gpu/command_buffer/client/image_decode_accelerator_interface.h
@@ -5,6 +5,8 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_IMAGE_DECODE_ACCELERATOR_INTERFACE_H_
#define GPU_COMMAND_BUFFER_CLIENT_IMAGE_DECODE_ACCELERATOR_INTERFACE_H_
+#include <stdint.h>
+
#include "base/containers/span.h"
#include "gpu/command_buffer/common/command_buffer_id.h"
#include "gpu/command_buffer/common/sync_token.h"
@@ -22,6 +24,9 @@ class ImageDecodeAcceleratorInterface {
public:
virtual ~ImageDecodeAcceleratorInterface() {}
+ virtual bool IsImageSupported(
+ base::span<const uint8_t> encoded_data) const = 0;
+
virtual SyncToken ScheduleImageDecode(
base::span<const uint8_t> encoded_data,
const gfx::Size& output_size,
@@ -29,10 +34,11 @@ class ImageDecodeAcceleratorInterface {
uint32_t transfer_cache_entry_id,
int32_t discardable_handle_shm_id,
uint32_t discardable_handle_shm_offset,
+ uint64_t discardable_handle_release_count,
const gfx::ColorSpace& target_color_space,
bool needs_mips) = 0;
};
} // namespace gpu
-#endif // GPU_COMMAND_BUFFER_CLIENT_IMAGE_DECODE_ACCELERATOR_INTERFACE_H_ \ No newline at end of file
+#endif // GPU_COMMAND_BUFFER_CLIENT_IMAGE_DECODE_ACCELERATOR_INTERFACE_H_
diff --git a/chromium/gpu/command_buffer/client/implementation_base.cc b/chromium/gpu/command_buffer/client/implementation_base.cc
index 3477663c3b1..d1564241d07 100644
--- a/chromium/gpu/command_buffer/client/implementation_base.cc
+++ b/chromium/gpu/command_buffer/client/implementation_base.cc
@@ -6,6 +6,7 @@
#include <algorithm>
+#include "base/bind.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
diff --git a/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc b/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc
index affe2335760..d1844a0443c 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc
+++ b/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -16,7 +16,6 @@
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/command_buffer/client/command_buffer_direct_locked.h"
#include "gpu/command_buffer/service/mocks.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
@@ -34,9 +33,7 @@ class MappedMemoryTestBase : public testing::Test {
static const unsigned int kBufferSize = 1024;
void SetUp() override {
- transfer_buffer_manager_ = std::make_unique<TransferBufferManager>(nullptr);
- command_buffer_.reset(
- new CommandBufferDirectLocked(transfer_buffer_manager_.get()));
+ command_buffer_.reset(new CommandBufferDirectLocked());
api_mock_.reset(new AsyncAPIMock(true, command_buffer_->service()));
command_buffer_->set_handler(api_mock_.get());
@@ -55,7 +52,6 @@ class MappedMemoryTestBase : public testing::Test {
int32_t GetToken() { return command_buffer_->GetLastState().token; }
- std::unique_ptr<TransferBufferManager> transfer_buffer_manager_;
std::unique_ptr<CommandBufferDirectLocked> command_buffer_;
std::unique_ptr<AsyncAPIMock> api_mock_;
std::unique_ptr<CommandBufferHelper> helper_;
diff --git a/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc b/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
index 5b65cfae8fd..c5b2bae99b4 100644
--- a/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
+++ b/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
@@ -48,7 +48,7 @@ bool MockTransferBuffer::Initialize(unsigned int starting_buffer_size,
// Just check they match.
return size_ == starting_buffer_size && result_size_ == result_size &&
alignment_ == alignment && !initialize_fail_;
-};
+}
int MockTransferBuffer::GetShmId() {
return buffer_ids_[actual_buffer_index_];
@@ -134,6 +134,10 @@ unsigned int MockTransferBuffer::GetFragmentedFreeSize() const {
return 0;
}
+unsigned int MockTransferBuffer::GetMaxSize() const {
+ return 0;
+}
+
void MockTransferBuffer::ShrinkLastBlock(unsigned int new_size) {}
uint32_t MockTransferBuffer::MaxTransferBufferSize() {
diff --git a/chromium/gpu/command_buffer/client/mock_transfer_buffer.h b/chromium/gpu/command_buffer/client/mock_transfer_buffer.h
index 995bc78dc34..3106c6970eb 100644
--- a/chromium/gpu/command_buffer/client/mock_transfer_buffer.h
+++ b/chromium/gpu/command_buffer/client/mock_transfer_buffer.h
@@ -50,6 +50,7 @@ class MockTransferBuffer : public TransferBufferInterface {
unsigned int GetFreeSize() const override;
unsigned int GetFragmentedFreeSize() const override;
void ShrinkLastBlock(unsigned int new_size) override;
+ unsigned int GetMaxSize() const override;
uint32_t MaxTransferBufferSize();
unsigned int RoundToAlignment(unsigned int size);
diff --git a/chromium/gpu/command_buffer/client/program_info_manager.cc b/chromium/gpu/command_buffer/client/program_info_manager.cc
index 9581a354b2b..c49efa99371 100644
--- a/chromium/gpu/command_buffer/client/program_info_manager.cc
+++ b/chromium/gpu/command_buffer/client/program_info_manager.cc
@@ -1015,6 +1015,50 @@ bool ProgramInfoManager::GetUniformIndices(GLES2Implementation* gl,
return gl->GetUniformIndicesHelper(program, count, names, indices);
}
+bool ProgramInfoManager::GetProgramInterfaceiv(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ GLenum pname, GLint* params) {
+ // TODO(jiajie.hu@intel.com): The info is not cached for now, so always
+ // fallback to the IPC path.
+ return false;
+}
+
+GLuint ProgramInfoManager::GetProgramResourceIndex(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ const char* name) {
+ // TODO(jiajie.hu@intel.com): The info is not cached for now, so always
+ // fallback to the IPC path.
+ return gl->GetProgramResourceIndexHelper(program, program_interface, name);
+}
+
+bool ProgramInfoManager::GetProgramResourceName(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ GLuint index, GLsizei bufsize, GLsizei* length, char* name) {
+ // TODO(jiajie.hu@intel.com): The info is not cached for now, so always
+ // fallback to the IPC path.
+ return gl->GetProgramResourceNameHelper(
+ program, program_interface, index, bufsize, length, name);
+}
+
+bool ProgramInfoManager::GetProgramResourceiv(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ GLuint index, GLsizei prop_count, const GLenum* props, GLsizei bufsize,
+ GLsizei* length, GLint* params) {
+ // TODO(jiajie.hu@intel.com): The info is not cached for now, so always
+ // fallback to the IPC path.
+ return gl->GetProgramResourceivHelper(
+ program, program_interface, index, prop_count, props, bufsize, length,
+ params);
+}
+
+GLint ProgramInfoManager::GetProgramResourceLocation(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ const char* name) {
+ // TODO(jiajie.hu@intel.com): The info is not cached for now, so always
+ // fallback to the IPC path.
+ return gl->GetProgramResourceLocationHelper(program, program_interface, name);
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/program_info_manager.h b/chromium/gpu/command_buffer/client/program_info_manager.h
index 4f1ddcb55a0..ee39eecb8e1 100644
--- a/chromium/gpu/command_buffer/client/program_info_manager.h
+++ b/chromium/gpu/command_buffer/client/program_info_manager.h
@@ -83,6 +83,27 @@ class GLES2_IMPL_EXPORT ProgramInfoManager {
GLES2Implementation* gl, GLuint program, GLsizei count,
const GLuint* indices, GLenum pname, GLint* params);
+ bool GetProgramInterfaceiv(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ GLenum pname, GLint* params);
+
+ GLuint GetProgramResourceIndex(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ const char* name);
+
+ bool GetProgramResourceName(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ GLuint index, GLsizei bufsize, GLsizei* length, char* name);
+
+ bool GetProgramResourceiv(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ GLuint index, GLsizei prop_count, const GLenum* props, GLsizei bufsize,
+ GLsizei* length, GLint* params);
+
+ GLint GetProgramResourceLocation(
+ GLES2Implementation* gl, GLuint program, GLenum program_interface,
+ const char* name);
+
private:
friend class ProgramInfoManagerTest;
diff --git a/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
index ec362130fcd..1ea6b2d58a2 100644
--- a/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
@@ -87,7 +87,6 @@ void InsertFenceSyncCHROMIUM(GLuint64 release_count) {
void BeginRasterCHROMIUMImmediate(GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLuint color_space_transfer_cache_id,
const GLbyte* mailbox) {
const uint32_t size =
raster::cmds::BeginRasterCHROMIUMImmediate::ComputeSize();
@@ -95,8 +94,7 @@ void BeginRasterCHROMIUMImmediate(GLuint sk_color,
GetImmediateCmdSpaceTotalSize<raster::cmds::BeginRasterCHROMIUMImmediate>(
size);
if (c) {
- c->Init(sk_color, msaa_sample_count, can_use_lcd_text,
- color_space_transfer_cache_id, mailbox);
+ c->Init(sk_color, msaa_sample_count, can_use_lcd_text, mailbox);
}
}
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc
index 2a8d060343f..fe318936d51 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation.cc
@@ -21,6 +21,7 @@
#include "base/bind.h"
#include "base/bits.h"
#include "base/compiler_specific.h"
+#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/numerics/safe_math.h"
#include "base/strings/stringprintf.h"
@@ -29,7 +30,6 @@
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
-#include "cc/paint/color_space_transfer_cache_entry.h"
#include "cc/paint/decode_stashing_image_provider.h"
#include "cc/paint/display_item_list.h"
#include "cc/paint/paint_cache.h"
@@ -91,6 +91,21 @@ namespace {
const uint32_t kMaxTransferCacheEntrySizeForTransferBuffer = 1024;
+void RecordPaintOpSize(size_t size) {
+ constexpr size_t kMinPaintOpSize = 512 * 1024;
+ constexpr size_t kMaxPaintOpSize = 16 * 1024 * 1024;
+
+ // Serialization failure, record max size.
+ if (size == 0u)
+ size = kMaxPaintOpSize;
+
+ if (size < kMinPaintOpSize)
+ return;
+
+ UMA_HISTOGRAM_CUSTOM_COUNTS("GPU.OopRaster.PaintOpSerializationSize", size,
+ kMinPaintOpSize, kMaxPaintOpSize, 50);
+}
+
} // namespace
// Helper to copy data to the GPU service over the transfer cache.
@@ -189,11 +204,13 @@ class RasterImplementation::PaintOpSerializer {
RasterImplementation* ri,
cc::DecodeStashingImageProvider* stashing_image_provider,
TransferCacheSerializeHelperImpl* transfer_cache_helper,
- ClientFontManager* font_manager)
+ ClientFontManager* font_manager,
+ size_t* max_op_size_hint)
: ri_(ri),
stashing_image_provider_(stashing_image_provider),
transfer_cache_helper_(transfer_cache_helper),
- font_manager_(font_manager) {
+ font_manager_(font_manager),
+ max_op_size_hint_(max_op_size_hint) {
buffer_ =
static_cast<char*>(ri_->MapRasterCHROMIUM(initial_size, &free_bytes_));
}
@@ -207,20 +224,47 @@ class RasterImplementation::PaintOpSerializer {
const cc::PaintOp::SerializeOptions& options) {
if (!valid())
return 0;
+
size_t size = op->Serialize(buffer_ + written_bytes_, free_bytes_, options);
+ size_t block_size = *max_op_size_hint_;
+
if (!size) {
// The entries serialized for |op| above will not be transferred since the
// op will be re-serialized once the buffer is remapped.
ri_->paint_cache_->AbortPendingEntries();
-
SendSerializedData();
- buffer_ =
- static_cast<char*>(ri_->MapRasterCHROMIUM(kBlockAlloc, &free_bytes_));
- if (!buffer_) {
- return 0;
+
+ const unsigned int max_size = ri_->transfer_buffer_->GetMaxSize();
+ DCHECK_LE(block_size, max_size);
+ while (true) {
+ buffer_ = static_cast<char*>(
+ ri_->MapRasterCHROMIUM(block_size, &free_bytes_));
+ if (!buffer_) {
+ return 0;
+ }
+
+ size = op->Serialize(buffer_ + written_bytes_, free_bytes_, options);
+ if (size) {
+ *max_op_size_hint_ = std::max(size, *max_op_size_hint_);
+ break;
+ }
+
+ ri_->paint_cache_->AbortPendingEntries();
+ ri_->UnmapRasterCHROMIUM(0u, 0u);
+
+ if (block_size == max_size)
+ break;
+ block_size = std::min(block_size * 2, static_cast<size_t>(max_size));
+ }
+
+ if (!size) {
+ RecordPaintOpSize(0u);
+ LOG(ERROR) << "Failed to serialize op in " << block_size << " bytes.";
+ return 0u;
}
- size = op->Serialize(buffer_ + written_bytes_, free_bytes_, options);
}
+
+ RecordPaintOpSize(size);
DCHECK_LE(size, free_bytes_);
DCHECK(base::CheckAdd<uint32_t>(written_bytes_, size).IsValid());
@@ -261,8 +305,6 @@ class RasterImplementation::PaintOpSerializer {
bool valid() const { return !!buffer_; }
private:
- static constexpr GLsizeiptr kBlockAlloc = 512 * 1024;
-
RasterImplementation* const ri_;
char* buffer_;
cc::DecodeStashingImageProvider* const stashing_image_provider_;
@@ -272,6 +314,8 @@ class RasterImplementation::PaintOpSerializer {
uint32_t written_bytes_ = 0;
uint32_t free_bytes_ = 0;
+ size_t* max_op_size_hint_;
+
DISALLOW_COPY_AND_ASSIGN(PaintOpSerializer);
};
@@ -384,6 +428,11 @@ void RasterImplementation::OnSwapBufferPresented(
NOTREACHED();
}
+void RasterImplementation::OnGpuControlReturnData(
+ base::span<const uint8_t> data) {
+ NOTIMPLEMENTED();
+}
+
void RasterImplementation::SetAggressivelyFreeResources(
bool aggressively_free_resources) {
TRACE_EVENT1("gpu", "RasterImplementation::SetAggressivelyFreeResources",
@@ -1045,28 +1094,15 @@ void RasterImplementation::BeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- const cc::RasterColorSpace& raster_color_space,
+ const gfx::ColorSpace& color_space,
const GLbyte* mailbox) {
DCHECK(!raster_properties_);
- TransferCacheSerializeHelperImpl transfer_cache_serialize_helper(this);
- if (!transfer_cache_serialize_helper.LockEntry(
- cc::TransferCacheEntryType::kColorSpace,
- raster_color_space.color_space_id)) {
- transfer_cache_serialize_helper.CreateEntry(
- cc::ClientColorSpaceTransferCacheEntry(raster_color_space), nullptr);
- }
- transfer_cache_serialize_helper.AssertLocked(
- cc::TransferCacheEntryType::kColorSpace,
- raster_color_space.color_space_id);
-
- helper_->BeginRasterCHROMIUMImmediate(
- sk_color, msaa_sample_count, can_use_lcd_text,
- raster_color_space.color_space_id, mailbox);
- transfer_cache_serialize_helper.FlushEntries();
+ helper_->BeginRasterCHROMIUMImmediate(sk_color, msaa_sample_count,
+ can_use_lcd_text, mailbox);
raster_properties_.emplace(sk_color, can_use_lcd_text,
- raster_color_space.color_space.ToSkColorSpace());
+ color_space.ToSkColorSpace());
}
void RasterImplementation::RasterCHROMIUM(const cc::DisplayItemList* list,
@@ -1076,9 +1112,11 @@ void RasterImplementation::RasterCHROMIUM(const cc::DisplayItemList* list,
const gfx::Rect& playback_rect,
const gfx::Vector2dF& post_translate,
GLfloat post_scale,
- bool requires_clear) {
+ bool requires_clear,
+ size_t* max_op_size_hint) {
TRACE_EVENT1("gpu", "RasterImplementation::RasterCHROMIUM",
"raster_chromium_id", ++raster_chromium_id_);
+ DCHECK(max_op_size_hint);
if (std::abs(post_scale) < std::numeric_limits<float>::epsilon())
return;
@@ -1115,7 +1153,7 @@ void RasterImplementation::RasterCHROMIUM(const cc::DisplayItemList* list,
TransferCacheSerializeHelperImpl transfer_cache_serialize_helper(this);
PaintOpSerializer op_serializer(free_size, this, &stashing_image_provider,
&transfer_cache_serialize_helper,
- &font_manager_);
+ &font_manager_, max_op_size_hint);
cc::PaintOpBufferSerializer::SerializeCallback serialize_cb =
base::BindRepeating(&PaintOpSerializer::Serialize,
base::Unretained(&op_serializer));
@@ -1123,8 +1161,7 @@ void RasterImplementation::RasterCHROMIUM(const cc::DisplayItemList* list,
cc::PaintOpBufferSerializer serializer(
serialize_cb, &stashing_image_provider, &transfer_cache_serialize_helper,
GetOrCreatePaintCache(), font_manager_.strike_server(),
- raster_properties_->color_space.get(),
- raster_properties_->can_use_lcd_text,
+ raster_properties_->color_space, raster_properties_->can_use_lcd_text,
capabilities().context_supports_distance_field_text,
capabilities().max_texture_size,
capabilities().glyph_cache_max_texture_bytes);
@@ -1146,6 +1183,11 @@ void RasterImplementation::EndRasterCHROMIUM() {
FlushPaintCachePurgedEntries();
}
+bool RasterImplementation::CanDecodeWithHardwareAcceleration(
+ base::span<const uint8_t> encoded_data) {
+ return image_decode_accelerator_->IsImageSupported(encoded_data);
+}
+
SyncToken RasterImplementation::ScheduleImageDecode(
base::span<const uint8_t> encoded_data,
const gfx::Size& output_size,
@@ -1177,11 +1219,15 @@ void RasterImplementation::IssueImageDecodeCacheEntryCreation(
DCHECK(image_decode_accelerator_);
DCHECK(handle.IsValid());
+ // Insert a sync token to signal that |handle|'s buffer has been registered.
+ SyncToken sync_token;
+ GenUnverifiedSyncTokenCHROMIUM(sync_token.GetData());
+
// Send the decode request to the service.
*decode_sync_token = image_decode_accelerator_->ScheduleImageDecode(
encoded_data, output_size, gpu_control_->GetCommandBufferID(),
transfer_cache_entry_id, handle.shm_id(), handle.byte_offset(),
- target_color_space, needs_mips);
+ sync_token.release_count(), target_color_space, needs_mips);
}
GLuint RasterImplementation::CreateAndConsumeForGpuRaster(
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.h b/chromium/gpu/command_buffer/client/raster_implementation.h
index c4e01fd8628..3386c834712 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation.h
@@ -124,7 +124,7 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
void BeginRasterCHROMIUM(GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- const cc::RasterColorSpace& raster_color_space,
+ const gfx::ColorSpace& color_space,
const GLbyte* mailbox) override;
void RasterCHROMIUM(const cc::DisplayItemList* list,
cc::ImageProvider* provider,
@@ -133,7 +133,10 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
const gfx::Rect& playback_rect,
const gfx::Vector2dF& post_translate,
GLfloat post_scale,
- bool requires_clear) override;
+ bool requires_clear,
+ size_t* max_op_size_hint) override;
+ bool CanDecodeWithHardwareAcceleration(
+ base::span<const uint8_t> encoded_data) override;
SyncToken ScheduleImageDecode(base::span<const uint8_t> encoded_data,
const gfx::Size& output_size,
uint32_t transfer_cache_entry_id,
@@ -233,6 +236,7 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
const SwapBuffersCompleteParams& params) final;
void OnSwapBufferPresented(uint64_t swap_id,
const gfx::PresentationFeedback& feedback) final;
+ void OnGpuControlReturnData(base::span<const uint8_t> data) final;
// Gets the GLError through our wrapper.
GLenum GetGLError();
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
index 4fd888e1edf..ff592ba39ec 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
@@ -12,7 +12,6 @@
#include <vector>
#include "base/logging.h"
-#include "cc/paint/color_space_transfer_cache_entry.h"
#include "cc/paint/decode_stashing_image_provider.h"
#include "cc/paint/display_item_list.h" // nogncheck
#include "cc/paint/paint_op_buffer_serializer.h"
@@ -128,7 +127,7 @@ void RasterImplementationGLES::BeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- const cc::RasterColorSpace& raster_color_space,
+ const gfx::ColorSpace& color_space,
const GLbyte* mailbox) {
NOTREACHED();
}
@@ -141,7 +140,8 @@ void RasterImplementationGLES::RasterCHROMIUM(
const gfx::Rect& playback_rect,
const gfx::Vector2dF& post_translate,
GLfloat post_scale,
- bool requires_clear) {
+ bool requires_clear,
+ size_t* max_op_size_hint) {
NOTREACHED();
}
@@ -153,6 +153,12 @@ void RasterImplementationGLES::EndRasterCHROMIUM() {
NOTREACHED();
}
+bool RasterImplementationGLES::CanDecodeWithHardwareAcceleration(
+ base::span<const uint8_t> encoded_data) {
+ NOTREACHED();
+ return false;
+}
+
SyncToken RasterImplementationGLES::ScheduleImageDecode(
base::span<const uint8_t> encoded_data,
const gfx::Size& output_size,
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.h b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
index 2de8b7f99d2..321433f6cf0 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
@@ -65,7 +65,7 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
void BeginRasterCHROMIUM(GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- const cc::RasterColorSpace& raster_color_space,
+ const gfx::ColorSpace& color_space,
const GLbyte* mailbox) override;
void RasterCHROMIUM(const cc::DisplayItemList* list,
cc::ImageProvider* provider,
@@ -74,10 +74,13 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
const gfx::Rect& playback_rect,
const gfx::Vector2dF& post_translate,
GLfloat post_scale,
- bool requires_clear) override;
+ bool requires_clear,
+ size_t* max_op_size_hint) override;
void EndRasterCHROMIUM() override;
// Image decode acceleration.
+ bool CanDecodeWithHardwareAcceleration(
+ base::span<const uint8_t> encoded_data) override;
SyncToken ScheduleImageDecode(base::span<const uint8_t> encoded_data,
const gfx::Size& output_size,
uint32_t transfer_cache_entry_id,
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
index 2a6964a0d5b..6e0ac739fb5 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
@@ -13,7 +13,6 @@
#include <vector>
#include "base/containers/flat_map.h"
-#include "cc/paint/color_space_transfer_cache_entry.h"
#include "cc/paint/display_item_list.h"
#include "cc/paint/image_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
@@ -233,9 +232,8 @@ class ContextSupportStub : public ContextSupport {
class ImageProviderStub : public cc::ImageProvider {
public:
~ImageProviderStub() override {}
- ScopedDecodedDrawImage GetDecodedDrawImage(
- const cc::DrawImage& draw_image) override {
- return ScopedDecodedDrawImage();
+ ScopedResult GetRasterContent(const cc::DrawImage& draw_image) override {
+ return ScopedResult();
}
};
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
index 99cf3a984ab..3e34ef9ebbc 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
@@ -14,6 +14,7 @@
#include <memory>
+#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/stl_util.h"
#include "cc/paint/raw_memory_transfer_cache_entry.h"
diff --git a/chromium/gpu/command_buffer/client/raster_interface.h b/chromium/gpu/command_buffer/client/raster_interface.h
index 7373ad4d91a..54911b0164d 100644
--- a/chromium/gpu/command_buffer/client/raster_interface.h
+++ b/chromium/gpu/command_buffer/client/raster_interface.h
@@ -14,7 +14,6 @@
namespace cc {
class DisplayItemList;
class ImageProvider;
-struct RasterColorSpace;
} // namespace cc
namespace gfx {
@@ -51,12 +50,13 @@ class RasterInterface {
GLsizei width,
GLsizei height) = 0;
// OOP-Raster
- virtual void BeginRasterCHROMIUM(
- GLuint sk_color,
- GLuint msaa_sample_count,
- GLboolean can_use_lcd_text,
- const cc::RasterColorSpace& raster_color_space,
- const GLbyte* mailbox) = 0;
+ virtual void BeginRasterCHROMIUM(GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ const gfx::ColorSpace& color_space,
+ const GLbyte* mailbox) = 0;
+
+ static constexpr size_t kDefaultMaxOpSizeHint = 512 * 1024;
virtual void RasterCHROMIUM(const cc::DisplayItemList* list,
cc::ImageProvider* provider,
const gfx::Size& content_size,
@@ -64,7 +64,14 @@ class RasterInterface {
const gfx::Rect& playback_rect,
const gfx::Vector2dF& post_translate,
GLfloat post_scale,
- bool requires_clear) = 0;
+ bool requires_clear,
+ size_t* max_op_size_hint) = 0;
+
+ // Determines if an encoded image can be decoded using hardware decode
+ // acceleration. If this method returns true, then the client can be confident
+ // that a call to ScheduleImageDecode() will succeed.
+ virtual bool CanDecodeWithHardwareAcceleration(
+ base::span<const uint8_t> encoded_data) = 0;
// Schedules a hardware-accelerated image decode and a sync token that's
// released when the image decode is complete. If the decode could not be
diff --git a/chromium/gpu/command_buffer/client/ring_buffer_test.cc b/chromium/gpu/command_buffer/client/ring_buffer_test.cc
index 3b334e40124..4dd4f680c9c 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer_test.cc
+++ b/chromium/gpu/command_buffer/client/ring_buffer_test.cc
@@ -17,7 +17,6 @@
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/command_buffer/service/command_buffer_direct.h"
#include "gpu/command_buffer/service/mocks.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
@@ -59,9 +58,7 @@ class BaseRingBufferTest : public testing::Test {
void SetUp() override {
delay_set_token_ = false;
- transfer_buffer_manager_ = std::make_unique<TransferBufferManager>(nullptr);
- command_buffer_.reset(
- new CommandBufferDirect(transfer_buffer_manager_.get()));
+ command_buffer_.reset(new CommandBufferDirect());
api_mock_.reset(new AsyncAPIMock(true, command_buffer_->service()));
command_buffer_->set_handler(api_mock_.get());
@@ -80,7 +77,6 @@ class BaseRingBufferTest : public testing::Test {
int32_t GetToken() { return command_buffer_->GetLastState().token; }
- std::unique_ptr<TransferBufferManager> transfer_buffer_manager_;
std::unique_ptr<CommandBufferDirect> command_buffer_;
std::unique_ptr<AsyncAPIMock> api_mock_;
std::unique_ptr<CommandBufferHelper> helper_;
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer.cc b/chromium/gpu/command_buffer/client/transfer_buffer.cc
index cf346c8b544..2213dc39c6d 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer.cc
+++ b/chromium/gpu/command_buffer/client/transfer_buffer.cc
@@ -105,6 +105,10 @@ void TransferBuffer::ShrinkLastBlock(unsigned int new_size) {
ring_buffer_->ShrinkLastBlock(new_size);
}
+unsigned int TransferBuffer::GetMaxSize() const {
+ return max_buffer_size_ - result_size_;
+}
+
void TransferBuffer::AllocateRingBuffer(unsigned int size) {
for (;size >= min_buffer_size_; size /= 2) {
int32_t id = -1;
@@ -271,10 +275,6 @@ unsigned int TransferBuffer::GetCurrentMaxAllocationWithoutRealloc() const {
return HaveBuffer() ? ring_buffer_->GetLargestFreeOrPendingSize() : 0;
}
-unsigned int TransferBuffer::GetMaxAllocation() const {
- return HaveBuffer() ? max_buffer_size_ - result_size_ : 0;
-}
-
ScopedTransferBufferPtr::ScopedTransferBufferPtr(
ScopedTransferBufferPtr&& other)
: buffer_(other.buffer_),
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer.h b/chromium/gpu/command_buffer/client/transfer_buffer.h
index 954621d8b62..505772bc0d6 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer.h
+++ b/chromium/gpu/command_buffer/client/transfer_buffer.h
@@ -67,6 +67,8 @@ class GPU_EXPORT TransferBufferInterface {
virtual void ShrinkLastBlock(unsigned int new_size) = 0;
+ virtual unsigned int GetMaxSize() const = 0;
+
protected:
template <typename>
friend class ScopedResultPtr;
@@ -107,10 +109,10 @@ class GPU_EXPORT TransferBuffer : public TransferBufferInterface {
unsigned int GetFreeSize() const override;
unsigned int GetFragmentedFreeSize() const override;
void ShrinkLastBlock(unsigned int new_size) override;
+ unsigned int GetMaxSize() const override;
// These are for testing.
unsigned int GetCurrentMaxAllocationWithoutRealloc() const;
- unsigned int GetMaxAllocation() const;
// We will attempt to shrink the ring buffer once the number of bytes
// allocated reaches this threshold times the high water mark.
@@ -284,7 +286,7 @@ class ScopedResultPtr {
other.result_ = nullptr;
other.transfer_buffer_ = nullptr;
return *this;
- };
+ }
// Dereferencing behaviors
T& operator*() const { return *result_; }
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.cc b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
index a6b06f93af6..f13f32688b2 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
@@ -4,6 +4,11 @@
#include "gpu/command_buffer/client/webgpu_implementation.h"
+#include <vector>
+
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/client/shared_memory_limits.h"
+
#define GPU_CLIENT_SINGLE_THREAD_CHECK()
namespace gpu {
@@ -14,6 +19,140 @@ namespace webgpu {
// instead of having to edit some template or the code generator.
#include "gpu/command_buffer/client/webgpu_implementation_impl_autogen.h"
+WebGPUImplementation::WebGPUImplementation(
+ WebGPUCmdHelper* helper,
+ TransferBufferInterface* transfer_buffer,
+ GpuControl* gpu_control)
+ : ImplementationBase(helper, transfer_buffer, gpu_control),
+ helper_(helper) {}
+
+WebGPUImplementation::~WebGPUImplementation() {}
+
+gpu::ContextResult WebGPUImplementation::Initialize(
+ const SharedMemoryLimits& limits) {
+ TRACE_EVENT0("gpu", "WebGPUImplementation::Initialize");
+ return ImplementationBase::Initialize(limits);
+}
+
+// ContextSupport implementation.
+void WebGPUImplementation::SetAggressivelyFreeResources(
+ bool aggressively_free_resources) {
+ NOTIMPLEMENTED();
+}
+void WebGPUImplementation::Swap(uint32_t flags,
+ SwapCompletedCallback complete_callback,
+ PresentationCallback presentation_callback) {
+ NOTIMPLEMENTED();
+}
+void WebGPUImplementation::SwapWithBounds(
+ const std::vector<gfx::Rect>& rects,
+ uint32_t flags,
+ SwapCompletedCallback swap_completed,
+ PresentationCallback presentation_callback) {
+ NOTIMPLEMENTED();
+}
+void WebGPUImplementation::PartialSwapBuffers(
+ const gfx::Rect& sub_buffer,
+ uint32_t flags,
+ SwapCompletedCallback swap_completed,
+ PresentationCallback presentation_callback) {
+ NOTIMPLEMENTED();
+}
+void WebGPUImplementation::CommitOverlayPlanes(
+ uint32_t flags,
+ SwapCompletedCallback swap_completed,
+ PresentationCallback presentation_callback) {
+ NOTREACHED();
+}
+void WebGPUImplementation::ScheduleOverlayPlane(
+ int plane_z_order,
+ gfx::OverlayTransform plane_transform,
+ unsigned overlay_texture_id,
+ const gfx::Rect& display_bounds,
+ const gfx::RectF& uv_rect,
+ bool enable_blend,
+ unsigned gpu_fence_id) {
+ NOTREACHED();
+}
+uint64_t WebGPUImplementation::ShareGroupTracingGUID() const {
+ NOTIMPLEMENTED();
+ return 0;
+}
+void WebGPUImplementation::SetErrorMessageCallback(
+ base::RepeatingCallback<void(const char*, int32_t)> callback) {
+ NOTIMPLEMENTED();
+}
+bool WebGPUImplementation::ThreadSafeShallowLockDiscardableTexture(
+ uint32_t texture_id) {
+ NOTREACHED();
+ return false;
+}
+void WebGPUImplementation::CompleteLockDiscardableTexureOnContextThread(
+ uint32_t texture_id) {
+ NOTREACHED();
+}
+bool WebGPUImplementation::ThreadsafeDiscardableTextureIsDeletedForTracing(
+ uint32_t texture_id) {
+ NOTREACHED();
+ return false;
+}
+void* WebGPUImplementation::MapTransferCacheEntry(uint32_t serialized_size) {
+ NOTREACHED();
+ return nullptr;
+}
+void WebGPUImplementation::UnmapAndCreateTransferCacheEntry(uint32_t type,
+ uint32_t id) {
+ NOTREACHED();
+}
+bool WebGPUImplementation::ThreadsafeLockTransferCacheEntry(uint32_t type,
+ uint32_t id) {
+ NOTREACHED();
+ return false;
+}
+void WebGPUImplementation::UnlockTransferCacheEntries(
+ const std::vector<std::pair<uint32_t, uint32_t>>& entries) {
+ NOTREACHED();
+}
+void WebGPUImplementation::DeleteTransferCacheEntry(uint32_t type,
+ uint32_t id) {
+ NOTREACHED();
+}
+unsigned int WebGPUImplementation::GetTransferBufferFreeSize() const {
+ NOTREACHED();
+ return 0;
+}
+
+// ImplementationBase implementation.
+void WebGPUImplementation::IssueShallowFlush() {
+ NOTIMPLEMENTED();
+}
+
+// GpuControlClient implementation.
+void WebGPUImplementation::OnGpuControlLostContext() {
+ NOTIMPLEMENTED();
+}
+void WebGPUImplementation::OnGpuControlLostContextMaybeReentrant() {
+ NOTIMPLEMENTED();
+}
+void WebGPUImplementation::OnGpuControlErrorMessage(const char* message,
+ int32_t id) {
+ NOTIMPLEMENTED();
+}
+void WebGPUImplementation::OnGpuControlSwapBuffersCompleted(
+ const SwapBuffersCompleteParams& params) {
+ NOTIMPLEMENTED();
+}
+void WebGPUImplementation::OnSwapBufferPresented(
+ uint64_t swap_id,
+ const gfx::PresentationFeedback& feedback) {
+ NOTIMPLEMENTED();
+}
+void WebGPUImplementation::OnGpuControlReturnData(
+ base::span<const uint8_t> data) {
+ // TODO: Handle return commands
+ NOTIMPLEMENTED();
+}
+
void WebGPUImplementation::Dummy() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] wgDummy()");
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.h b/chromium/gpu/command_buffer/client/webgpu_implementation.h
index 87b224033bc..4c3ddaa791d 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.h
@@ -5,8 +5,13 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_H_
+#include <utility>
+#include <vector>
+
+#include "gpu/command_buffer/client/gpu_control_client.h"
#include "gpu/command_buffer/client/implementation_base.h"
#include "gpu/command_buffer/client/logging.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
#include "gpu/command_buffer/client/webgpu_cmd_helper.h"
#include "gpu/command_buffer/client/webgpu_export.h"
#include "gpu/command_buffer/client/webgpu_interface.h"
@@ -14,16 +19,73 @@
namespace gpu {
namespace webgpu {
-class WEBGPU_EXPORT WebGPUImplementation final : public WebGPUInterface {
+class WEBGPU_EXPORT WebGPUImplementation final : public WebGPUInterface,
+ public ImplementationBase {
public:
- explicit WebGPUImplementation(WebGPUCmdHelper* helper) : helper_(helper) {}
- ~WebGPUImplementation() override {}
+ explicit WebGPUImplementation(WebGPUCmdHelper* helper,
+ TransferBufferInterface* transfer_buffer,
+ GpuControl* gpu_control);
+ ~WebGPUImplementation() override;
+
+ gpu::ContextResult Initialize(const SharedMemoryLimits& limits);
// Include the auto-generated part of this class. We split this because
// it means we can easily edit the non-auto generated parts right here in
// this file instead of having to edit some template or the code generator.
#include "gpu/command_buffer/client/webgpu_implementation_autogen.h"
+ // ContextSupport implementation.
+ void SetAggressivelyFreeResources(bool aggressively_free_resources) override;
+ void Swap(uint32_t flags,
+ SwapCompletedCallback complete_callback,
+ PresentationCallback presentation_callback) override;
+ void SwapWithBounds(const std::vector<gfx::Rect>& rects,
+ uint32_t flags,
+ SwapCompletedCallback swap_completed,
+ PresentationCallback presentation_callback) override;
+ void PartialSwapBuffers(const gfx::Rect& sub_buffer,
+ uint32_t flags,
+ SwapCompletedCallback swap_completed,
+ PresentationCallback presentation_callback) override;
+ void CommitOverlayPlanes(uint32_t flags,
+ SwapCompletedCallback swap_completed,
+ PresentationCallback presentation_callback) override;
+ void ScheduleOverlayPlane(int plane_z_order,
+ gfx::OverlayTransform plane_transform,
+ unsigned overlay_texture_id,
+ const gfx::Rect& display_bounds,
+ const gfx::RectF& uv_rect,
+ bool enable_blend,
+ unsigned gpu_fence_id) override;
+ uint64_t ShareGroupTracingGUID() const override;
+ void SetErrorMessageCallback(
+ base::RepeatingCallback<void(const char*, int32_t)> callback) override;
+ bool ThreadSafeShallowLockDiscardableTexture(uint32_t texture_id) override;
+ void CompleteLockDiscardableTexureOnContextThread(
+ uint32_t texture_id) override;
+ bool ThreadsafeDiscardableTextureIsDeletedForTracing(
+ uint32_t texture_id) override;
+ void* MapTransferCacheEntry(uint32_t serialized_size) override;
+ void UnmapAndCreateTransferCacheEntry(uint32_t type, uint32_t id) override;
+ bool ThreadsafeLockTransferCacheEntry(uint32_t type, uint32_t id) override;
+ void UnlockTransferCacheEntries(
+ const std::vector<std::pair<uint32_t, uint32_t>>& entries) override;
+ void DeleteTransferCacheEntry(uint32_t type, uint32_t id) override;
+ unsigned int GetTransferBufferFreeSize() const override;
+
+ // ImplementationBase implementation.
+ void IssueShallowFlush() override;
+
+ // GpuControlClient implementation.
+ void OnGpuControlLostContext() final;
+ void OnGpuControlLostContextMaybeReentrant() final;
+ void OnGpuControlErrorMessage(const char* message, int32_t id) final;
+ void OnGpuControlSwapBuffersCompleted(
+ const SwapBuffersCompleteParams& params) final;
+ void OnSwapBufferPresented(uint64_t swap_id,
+ const gfx::PresentationFeedback& feedback) final;
+ void OnGpuControlReturnData(base::span<const uint8_t> data) final;
+
private:
const char* GetLogPrefix() const { return "webgpu"; }
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc
index 3238cc16348..bf1c529008f 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc
@@ -54,13 +54,24 @@ class WebGPUImplementationTest : public testing::Test {
helper_.reset(new WebGPUCmdHelper(command_buffer_.get()));
helper_->Initialize(limits.command_buffer_size);
+ gpu_control_.reset(new StrictMock<MockClientGpuControl>());
+
+ EXPECT_CALL(*gpu_control_, GetCapabilities())
+ .WillOnce(ReturnRef(capabilities_));
{
InSequence sequence;
- gl_.reset(new WebGPUImplementation(helper_.get()));
+ gl_.reset(new WebGPUImplementation(helper_.get(), transfer_buffer_.get(),
+ gpu_control_.get()));
}
+ // The client should be set to something non-null.
+ EXPECT_CALL(*gpu_control_, SetGpuControlClient(gl_.get())).Times(1);
+
+ if (gl_->Initialize(limits) != gpu::ContextResult::kSuccess)
+ return false;
+
helper_->CommandBufferHelper::Finish();
Mock::VerifyAndClearExpectations(gl_.get());
@@ -87,6 +98,7 @@ class WebGPUImplementationTest : public testing::Test {
// For command buffer.
EXPECT_CALL(*command_buffer_, DestroyTransferBuffer(_)).Times(AtLeast(1));
// The client should be unset.
+ EXPECT_CALL(*gpu_control_, SetGpuControlClient(nullptr)).Times(1);
gl_.reset();
}
@@ -101,10 +113,12 @@ class WebGPUImplementationTest : public testing::Test {
}
std::unique_ptr<MockClientCommandBuffer> command_buffer_;
+ std::unique_ptr<MockClientGpuControl> gpu_control_;
std::unique_ptr<WebGPUCmdHelper> helper_;
std::unique_ptr<MockTransferBuffer> transfer_buffer_;
std::unique_ptr<WebGPUImplementation> gl_;
CommandBufferEntry* commands_ = nullptr;
+ Capabilities capabilities_;
};
#include "base/macros.h"
diff --git a/chromium/gpu/command_buffer/common/constants.h b/chromium/gpu/command_buffer/common/constants.h
index 5bbdfa71d8f..4e79b09e1a6 100644
--- a/chromium/gpu/command_buffer/common/constants.h
+++ b/chromium/gpu/command_buffer/common/constants.h
@@ -76,7 +76,8 @@ enum CommandBufferNamespace : int8_t {
GPU_IO,
IN_PROCESS,
- VIZ_OUTPUT_SURFACE,
+ VIZ_SKIA_OUTPUT_SURFACE,
+ VIZ_SKIA_OUTPUT_SURFACE_NON_DDL,
NUM_COMMAND_BUFFER_NAMESPACES
};
diff --git a/chromium/gpu/command_buffer/common/context_creation_attribs.h b/chromium/gpu/command_buffer/common/context_creation_attribs.h
index ed03f156e87..7c8ba03cde5 100644
--- a/chromium/gpu/command_buffer/common/context_creation_attribs.h
+++ b/chromium/gpu/command_buffer/common/context_creation_attribs.h
@@ -65,6 +65,7 @@ struct GPU_EXPORT ContextCreationAttribs {
bool enable_raster_interface = false;
bool enable_oop_rasterization = false;
bool enable_swap_timestamps_if_supported = false;
+ bool backed_by_surface_texture = false;
ContextType context_type = CONTEXT_TYPE_OPENGLES2;
ColorSpace color_space = COLOR_SPACE_UNSPECIFIED;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index d45e669919b..68d9861198e 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -11349,6 +11349,81 @@ static_assert(
offsetof(RenderbufferStorageMultisampleCHROMIUM, height) == 20,
"offset of RenderbufferStorageMultisampleCHROMIUM height should be 20");
+// GL_AMD_framebuffer_multisample_advanced
+struct RenderbufferStorageMultisampleAdvancedAMD {
+ typedef RenderbufferStorageMultisampleAdvancedAMD ValueType;
+ static const CommandId kCmdId = kRenderbufferStorageMultisampleAdvancedAMD;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLsizei _samples,
+ GLsizei _storageSamples,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height) {
+ SetHeader();
+ target = _target;
+ samples = _samples;
+ storageSamples = _storageSamples;
+ internalformat = _internalformat;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLsizei _samples,
+ GLsizei _storageSamples,
+ GLenum _internalformat,
+ GLsizei _width,
+ GLsizei _height) {
+ static_cast<ValueType*>(cmd)->Init(_target, _samples, _storageSamples,
+ _internalformat, _width, _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ int32_t samples;
+ int32_t storageSamples;
+ uint32_t internalformat;
+ int32_t width;
+ int32_t height;
+};
+
+static_assert(sizeof(RenderbufferStorageMultisampleAdvancedAMD) == 28,
+ "size of RenderbufferStorageMultisampleAdvancedAMD should be 28");
+static_assert(
+ offsetof(RenderbufferStorageMultisampleAdvancedAMD, header) == 0,
+ "offset of RenderbufferStorageMultisampleAdvancedAMD header should be 0");
+static_assert(
+ offsetof(RenderbufferStorageMultisampleAdvancedAMD, target) == 4,
+ "offset of RenderbufferStorageMultisampleAdvancedAMD target should be 4");
+static_assert(
+ offsetof(RenderbufferStorageMultisampleAdvancedAMD, samples) == 8,
+ "offset of RenderbufferStorageMultisampleAdvancedAMD samples should be 8");
+static_assert(offsetof(RenderbufferStorageMultisampleAdvancedAMD,
+ storageSamples) == 12,
+ "offset of RenderbufferStorageMultisampleAdvancedAMD "
+ "storageSamples should be 12");
+static_assert(offsetof(RenderbufferStorageMultisampleAdvancedAMD,
+ internalformat) == 16,
+ "offset of RenderbufferStorageMultisampleAdvancedAMD "
+ "internalformat should be 16");
+static_assert(
+ offsetof(RenderbufferStorageMultisampleAdvancedAMD, width) == 20,
+ "offset of RenderbufferStorageMultisampleAdvancedAMD width should be 20");
+static_assert(
+ offsetof(RenderbufferStorageMultisampleAdvancedAMD, height) == 24,
+ "offset of RenderbufferStorageMultisampleAdvancedAMD height should be 24");
+
// GL_EXT_multisampled_render_to_texture
struct RenderbufferStorageMultisampleEXT {
typedef RenderbufferStorageMultisampleEXT ValueType;
@@ -12293,6 +12368,334 @@ static_assert(offsetof(DispatchCompute, num_groups_y) == 8,
static_assert(offsetof(DispatchCompute, num_groups_z) == 12,
"offset of DispatchCompute num_groups_z should be 12");
+struct GetProgramInterfaceiv {
+ typedef GetProgramInterfaceiv ValueType;
+ static const CommandId kCmdId = kGetProgramInterfaceiv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLenum _program_interface,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ program = _program;
+ program_interface = _program_interface;
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLenum _program_interface,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_program, _program_interface, _pname,
+ _params_shm_id, _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t program_interface;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+static_assert(sizeof(GetProgramInterfaceiv) == 24,
+ "size of GetProgramInterfaceiv should be 24");
+static_assert(offsetof(GetProgramInterfaceiv, header) == 0,
+ "offset of GetProgramInterfaceiv header should be 0");
+static_assert(offsetof(GetProgramInterfaceiv, program) == 4,
+ "offset of GetProgramInterfaceiv program should be 4");
+static_assert(offsetof(GetProgramInterfaceiv, program_interface) == 8,
+ "offset of GetProgramInterfaceiv program_interface should be 8");
+static_assert(offsetof(GetProgramInterfaceiv, pname) == 12,
+ "offset of GetProgramInterfaceiv pname should be 12");
+static_assert(offsetof(GetProgramInterfaceiv, params_shm_id) == 16,
+ "offset of GetProgramInterfaceiv params_shm_id should be 16");
+static_assert(offsetof(GetProgramInterfaceiv, params_shm_offset) == 20,
+ "offset of GetProgramInterfaceiv params_shm_offset should be 20");
+
+struct GetProgramResourceIndex {
+ typedef GetProgramResourceIndex ValueType;
+ static const CommandId kCmdId = kGetProgramResourceIndex;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ typedef GLuint Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLenum _program_interface,
+ uint32_t _name_bucket_id,
+ uint32_t _index_shm_id,
+ uint32_t _index_shm_offset) {
+ SetHeader();
+ program = _program;
+ program_interface = _program_interface;
+ name_bucket_id = _name_bucket_id;
+ index_shm_id = _index_shm_id;
+ index_shm_offset = _index_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLenum _program_interface,
+ uint32_t _name_bucket_id,
+ uint32_t _index_shm_id,
+ uint32_t _index_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_program, _program_interface,
+ _name_bucket_id, _index_shm_id,
+ _index_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t program_interface;
+ uint32_t name_bucket_id;
+ uint32_t index_shm_id;
+ uint32_t index_shm_offset;
+};
+
+static_assert(sizeof(GetProgramResourceIndex) == 24,
+ "size of GetProgramResourceIndex should be 24");
+static_assert(offsetof(GetProgramResourceIndex, header) == 0,
+ "offset of GetProgramResourceIndex header should be 0");
+static_assert(offsetof(GetProgramResourceIndex, program) == 4,
+ "offset of GetProgramResourceIndex program should be 4");
+static_assert(
+ offsetof(GetProgramResourceIndex, program_interface) == 8,
+ "offset of GetProgramResourceIndex program_interface should be 8");
+static_assert(offsetof(GetProgramResourceIndex, name_bucket_id) == 12,
+ "offset of GetProgramResourceIndex name_bucket_id should be 12");
+static_assert(offsetof(GetProgramResourceIndex, index_shm_id) == 16,
+ "offset of GetProgramResourceIndex index_shm_id should be 16");
+static_assert(
+ offsetof(GetProgramResourceIndex, index_shm_offset) == 20,
+ "offset of GetProgramResourceIndex index_shm_offset should be 20");
+
+struct GetProgramResourceName {
+ typedef GetProgramResourceName ValueType;
+ static const CommandId kCmdId = kGetProgramResourceName;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ typedef int32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLenum _program_interface,
+ GLuint _index,
+ uint32_t _name_bucket_id,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ program = _program;
+ program_interface = _program_interface;
+ index = _index;
+ name_bucket_id = _name_bucket_id;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLenum _program_interface,
+ GLuint _index,
+ uint32_t _name_bucket_id,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_program, _program_interface, _index,
+ _name_bucket_id, _result_shm_id,
+ _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t program_interface;
+ uint32_t index;
+ uint32_t name_bucket_id;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+static_assert(sizeof(GetProgramResourceName) == 28,
+ "size of GetProgramResourceName should be 28");
+static_assert(offsetof(GetProgramResourceName, header) == 0,
+ "offset of GetProgramResourceName header should be 0");
+static_assert(offsetof(GetProgramResourceName, program) == 4,
+ "offset of GetProgramResourceName program should be 4");
+static_assert(offsetof(GetProgramResourceName, program_interface) == 8,
+ "offset of GetProgramResourceName program_interface should be 8");
+static_assert(offsetof(GetProgramResourceName, index) == 12,
+ "offset of GetProgramResourceName index should be 12");
+static_assert(offsetof(GetProgramResourceName, name_bucket_id) == 16,
+ "offset of GetProgramResourceName name_bucket_id should be 16");
+static_assert(offsetof(GetProgramResourceName, result_shm_id) == 20,
+ "offset of GetProgramResourceName result_shm_id should be 20");
+static_assert(
+ offsetof(GetProgramResourceName, result_shm_offset) == 24,
+ "offset of GetProgramResourceName result_shm_offset should be 24");
+
+struct GetProgramResourceiv {
+ typedef GetProgramResourceiv ValueType;
+ static const CommandId kCmdId = kGetProgramResourceiv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLenum _program_interface,
+ GLuint _index,
+ uint32_t _props_bucket_id,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ program = _program;
+ program_interface = _program_interface;
+ index = _index;
+ props_bucket_id = _props_bucket_id;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLenum _program_interface,
+ GLuint _index,
+ uint32_t _props_bucket_id,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_program, _program_interface, _index,
+ _props_bucket_id, _params_shm_id,
+ _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t program_interface;
+ uint32_t index;
+ uint32_t props_bucket_id;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+static_assert(sizeof(GetProgramResourceiv) == 28,
+ "size of GetProgramResourceiv should be 28");
+static_assert(offsetof(GetProgramResourceiv, header) == 0,
+ "offset of GetProgramResourceiv header should be 0");
+static_assert(offsetof(GetProgramResourceiv, program) == 4,
+ "offset of GetProgramResourceiv program should be 4");
+static_assert(offsetof(GetProgramResourceiv, program_interface) == 8,
+ "offset of GetProgramResourceiv program_interface should be 8");
+static_assert(offsetof(GetProgramResourceiv, index) == 12,
+ "offset of GetProgramResourceiv index should be 12");
+static_assert(offsetof(GetProgramResourceiv, props_bucket_id) == 16,
+ "offset of GetProgramResourceiv props_bucket_id should be 16");
+static_assert(offsetof(GetProgramResourceiv, params_shm_id) == 20,
+ "offset of GetProgramResourceiv params_shm_id should be 20");
+static_assert(offsetof(GetProgramResourceiv, params_shm_offset) == 24,
+ "offset of GetProgramResourceiv params_shm_offset should be 24");
+
+struct GetProgramResourceLocation {
+ typedef GetProgramResourceLocation ValueType;
+ static const CommandId kCmdId = kGetProgramResourceLocation;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ typedef GLint Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _program,
+ GLenum _program_interface,
+ uint32_t _name_bucket_id,
+ uint32_t _location_shm_id,
+ uint32_t _location_shm_offset) {
+ SetHeader();
+ program = _program;
+ program_interface = _program_interface;
+ name_bucket_id = _name_bucket_id;
+ location_shm_id = _location_shm_id;
+ location_shm_offset = _location_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _program,
+ GLenum _program_interface,
+ uint32_t _name_bucket_id,
+ uint32_t _location_shm_id,
+ uint32_t _location_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_program, _program_interface,
+ _name_bucket_id, _location_shm_id,
+ _location_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t program;
+ uint32_t program_interface;
+ uint32_t name_bucket_id;
+ uint32_t location_shm_id;
+ uint32_t location_shm_offset;
+};
+
+static_assert(sizeof(GetProgramResourceLocation) == 24,
+ "size of GetProgramResourceLocation should be 24");
+static_assert(offsetof(GetProgramResourceLocation, header) == 0,
+ "offset of GetProgramResourceLocation header should be 0");
+static_assert(offsetof(GetProgramResourceLocation, program) == 4,
+ "offset of GetProgramResourceLocation program should be 4");
+static_assert(
+ offsetof(GetProgramResourceLocation, program_interface) == 8,
+ "offset of GetProgramResourceLocation program_interface should be 8");
+static_assert(
+ offsetof(GetProgramResourceLocation, name_bucket_id) == 12,
+ "offset of GetProgramResourceLocation name_bucket_id should be 12");
+static_assert(
+ offsetof(GetProgramResourceLocation, location_shm_id) == 16,
+ "offset of GetProgramResourceLocation location_shm_id should be 16");
+static_assert(
+ offsetof(GetProgramResourceLocation, location_shm_offset) == 20,
+ "offset of GetProgramResourceLocation location_shm_offset should be 20");
+
struct MemoryBarrierEXT {
typedef MemoryBarrierEXT ValueType;
static const CommandId kCmdId = kMemoryBarrierEXT;
@@ -17115,25 +17518,30 @@ struct CreateAndTexStorage2DSharedImageINTERNALImmediate {
void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
- void Init(GLuint _texture, const GLbyte* _mailbox) {
+ void Init(GLuint _texture, const GLbyte* _mailbox, GLenum _internalformat) {
SetHeader();
texture = _texture;
+ internalformat = _internalformat;
memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
}
- void* Set(void* cmd, GLuint _texture, const GLbyte* _mailbox) {
- static_cast<ValueType*>(cmd)->Init(_texture, _mailbox);
+ void* Set(void* cmd,
+ GLuint _texture,
+ const GLbyte* _mailbox,
+ GLenum _internalformat) {
+ static_cast<ValueType*>(cmd)->Init(_texture, _mailbox, _internalformat);
const uint32_t size = ComputeSize();
return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
}
gpu::CommandHeader header;
uint32_t texture;
+ uint32_t internalformat;
};
static_assert(
- sizeof(CreateAndTexStorage2DSharedImageINTERNALImmediate) == 8,
- "size of CreateAndTexStorage2DSharedImageINTERNALImmediate should be 8");
+ sizeof(CreateAndTexStorage2DSharedImageINTERNALImmediate) == 12,
+ "size of CreateAndTexStorage2DSharedImageINTERNALImmediate should be 12");
static_assert(offsetof(CreateAndTexStorage2DSharedImageINTERNALImmediate,
header) == 0,
"offset of CreateAndTexStorage2DSharedImageINTERNALImmediate "
@@ -17142,6 +17550,10 @@ static_assert(offsetof(CreateAndTexStorage2DSharedImageINTERNALImmediate,
texture) == 4,
"offset of CreateAndTexStorage2DSharedImageINTERNALImmediate "
"texture should be 4");
+static_assert(offsetof(CreateAndTexStorage2DSharedImageINTERNALImmediate,
+ internalformat) == 8,
+ "offset of CreateAndTexStorage2DSharedImageINTERNALImmediate "
+ "internalformat should be 8");
struct BeginSharedImageAccessDirectCHROMIUM {
typedef BeginSharedImageAccessDirectCHROMIUM ValueType;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 62958cdf8a0..790e72622c7 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -3813,6 +3813,26 @@ TEST_F(GLES2FormatTest, RenderbufferStorageMultisampleCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, RenderbufferStorageMultisampleAdvancedAMD) {
+ cmds::RenderbufferStorageMultisampleAdvancedAMD& cmd =
+ *GetBufferAs<cmds::RenderbufferStorageMultisampleAdvancedAMD>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLsizei>(12),
+ static_cast<GLsizei>(13), static_cast<GLenum>(14),
+ static_cast<GLsizei>(15), static_cast<GLsizei>(16));
+ EXPECT_EQ(static_cast<uint32_t>(
+ cmds::RenderbufferStorageMultisampleAdvancedAMD::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.samples);
+ EXPECT_EQ(static_cast<GLsizei>(13), cmd.storageSamples);
+ EXPECT_EQ(static_cast<GLenum>(14), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, RenderbufferStorageMultisampleEXT) {
cmds::RenderbufferStorageMultisampleEXT& cmd =
*GetBufferAs<cmds::RenderbufferStorageMultisampleEXT>();
@@ -4122,6 +4142,97 @@ TEST_F(GLES2FormatTest, DispatchCompute) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, GetProgramInterfaceiv) {
+ cmds::GetProgramInterfaceiv& cmd =
+ *GetBufferAs<cmds::GetProgramInterfaceiv>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12),
+ static_cast<GLenum>(13), static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetProgramInterfaceiv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.program_interface);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetProgramResourceIndex) {
+ cmds::GetProgramResourceIndex& cmd =
+ *GetBufferAs<cmds::GetProgramResourceIndex>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12),
+ static_cast<uint32_t>(13), static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetProgramResourceIndex::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.program_interface);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.name_bucket_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.index_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.index_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetProgramResourceName) {
+ cmds::GetProgramResourceName& cmd =
+ *GetBufferAs<cmds::GetProgramResourceName>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12),
+ static_cast<GLuint>(13), static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15), static_cast<uint32_t>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetProgramResourceName::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.program_interface);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.index);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.name_bucket_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(16), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetProgramResourceiv) {
+ cmds::GetProgramResourceiv& cmd = *GetBufferAs<cmds::GetProgramResourceiv>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12),
+ static_cast<GLuint>(13), static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15), static_cast<uint32_t>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetProgramResourceiv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.program_interface);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.index);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.props_bucket_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(16), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, GetProgramResourceLocation) {
+ cmds::GetProgramResourceLocation& cmd =
+ *GetBufferAs<cmds::GetProgramResourceLocation>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12),
+ static_cast<uint32_t>(13), static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetProgramResourceLocation::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.program_interface);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.name_bucket_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.location_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.location_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, MemoryBarrierEXT) {
cmds::MemoryBarrierEXT& cmd = *GetBufferAs<cmds::MemoryBarrierEXT>();
void* next_cmd = cmd.Set(&cmd, static_cast<GLbitfield>(11));
@@ -5680,7 +5791,8 @@ TEST_F(GLES2FormatTest, CreateAndTexStorage2DSharedImageINTERNALImmediate) {
};
cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetBufferAs<cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), data);
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), data, static_cast<GLenum>(12));
EXPECT_EQ(
static_cast<uint32_t>(
cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate::kCmdId),
@@ -5688,6 +5800,7 @@ TEST_F(GLES2FormatTest, CreateAndTexStorage2DSharedImageINTERNALImmediate) {
EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
cmd.header.size * 4u);
EXPECT_EQ(static_cast<GLuint>(11), cmd.texture);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.internalformat);
CheckBytesWrittenMatchesExpectedSize(
next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
}
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index c3321ba34c6..b3dd9b38feb 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -241,121 +241,127 @@
OP(WaitSync) /* 482 */ \
OP(BlitFramebufferCHROMIUM) /* 483 */ \
OP(RenderbufferStorageMultisampleCHROMIUM) /* 484 */ \
- OP(RenderbufferStorageMultisampleEXT) /* 485 */ \
- OP(FramebufferTexture2DMultisampleEXT) /* 486 */ \
- OP(TexStorage2DEXT) /* 487 */ \
- OP(GenQueriesEXTImmediate) /* 488 */ \
- OP(DeleteQueriesEXTImmediate) /* 489 */ \
- OP(QueryCounterEXT) /* 490 */ \
- OP(BeginQueryEXT) /* 491 */ \
- OP(BeginTransformFeedback) /* 492 */ \
- OP(EndQueryEXT) /* 493 */ \
- OP(EndTransformFeedback) /* 494 */ \
- OP(SetDisjointValueSyncCHROMIUM) /* 495 */ \
- OP(InsertEventMarkerEXT) /* 496 */ \
- OP(PushGroupMarkerEXT) /* 497 */ \
- OP(PopGroupMarkerEXT) /* 498 */ \
- OP(GenVertexArraysOESImmediate) /* 499 */ \
- OP(DeleteVertexArraysOESImmediate) /* 500 */ \
- OP(IsVertexArrayOES) /* 501 */ \
- OP(BindVertexArrayOES) /* 502 */ \
- OP(FramebufferParameteri) /* 503 */ \
- OP(BindImageTexture) /* 504 */ \
- OP(DispatchCompute) /* 505 */ \
- OP(MemoryBarrierEXT) /* 506 */ \
- OP(MemoryBarrierByRegion) /* 507 */ \
- OP(SwapBuffers) /* 508 */ \
- OP(GetMaxValueInBufferCHROMIUM) /* 509 */ \
- OP(EnableFeatureCHROMIUM) /* 510 */ \
- OP(MapBufferRange) /* 511 */ \
- OP(UnmapBuffer) /* 512 */ \
- OP(FlushMappedBufferRange) /* 513 */ \
- OP(ResizeCHROMIUM) /* 514 */ \
- OP(GetRequestableExtensionsCHROMIUM) /* 515 */ \
- OP(RequestExtensionCHROMIUM) /* 516 */ \
- OP(GetProgramInfoCHROMIUM) /* 517 */ \
- OP(GetUniformBlocksCHROMIUM) /* 518 */ \
- OP(GetTransformFeedbackVaryingsCHROMIUM) /* 519 */ \
- OP(GetUniformsES3CHROMIUM) /* 520 */ \
- OP(DescheduleUntilFinishedCHROMIUM) /* 521 */ \
- OP(GetTranslatedShaderSourceANGLE) /* 522 */ \
- OP(PostSubBufferCHROMIUM) /* 523 */ \
- OP(CopyTextureCHROMIUM) /* 524 */ \
- OP(CopySubTextureCHROMIUM) /* 525 */ \
- OP(DrawArraysInstancedANGLE) /* 526 */ \
- OP(DrawElementsInstancedANGLE) /* 527 */ \
- OP(VertexAttribDivisorANGLE) /* 528 */ \
- OP(ProduceTextureDirectCHROMIUMImmediate) /* 529 */ \
- OP(CreateAndConsumeTextureINTERNALImmediate) /* 530 */ \
- OP(BindUniformLocationCHROMIUMBucket) /* 531 */ \
- OP(BindTexImage2DCHROMIUM) /* 532 */ \
- OP(BindTexImage2DWithInternalformatCHROMIUM) /* 533 */ \
- OP(ReleaseTexImage2DCHROMIUM) /* 534 */ \
- OP(TraceBeginCHROMIUM) /* 535 */ \
- OP(TraceEndCHROMIUM) /* 536 */ \
- OP(DiscardFramebufferEXTImmediate) /* 537 */ \
- OP(LoseContextCHROMIUM) /* 538 */ \
- OP(InsertFenceSyncCHROMIUM) /* 539 */ \
- OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 540 */ \
- OP(DrawBuffersEXTImmediate) /* 541 */ \
- OP(DiscardBackbufferCHROMIUM) /* 542 */ \
- OP(ScheduleOverlayPlaneCHROMIUM) /* 543 */ \
- OP(ScheduleCALayerSharedStateCHROMIUM) /* 544 */ \
- OP(ScheduleCALayerCHROMIUM) /* 545 */ \
- OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 546 */ \
- OP(CommitOverlayPlanesCHROMIUM) /* 547 */ \
- OP(FlushDriverCachesCHROMIUM) /* 548 */ \
- OP(ScheduleDCLayerCHROMIUM) /* 549 */ \
- OP(SetActiveURLCHROMIUM) /* 550 */ \
- OP(MatrixLoadfCHROMIUMImmediate) /* 551 */ \
- OP(MatrixLoadIdentityCHROMIUM) /* 552 */ \
- OP(GenPathsCHROMIUM) /* 553 */ \
- OP(DeletePathsCHROMIUM) /* 554 */ \
- OP(IsPathCHROMIUM) /* 555 */ \
- OP(PathCommandsCHROMIUM) /* 556 */ \
- OP(PathParameterfCHROMIUM) /* 557 */ \
- OP(PathParameteriCHROMIUM) /* 558 */ \
- OP(PathStencilFuncCHROMIUM) /* 559 */ \
- OP(StencilFillPathCHROMIUM) /* 560 */ \
- OP(StencilStrokePathCHROMIUM) /* 561 */ \
- OP(CoverFillPathCHROMIUM) /* 562 */ \
- OP(CoverStrokePathCHROMIUM) /* 563 */ \
- OP(StencilThenCoverFillPathCHROMIUM) /* 564 */ \
- OP(StencilThenCoverStrokePathCHROMIUM) /* 565 */ \
- OP(StencilFillPathInstancedCHROMIUM) /* 566 */ \
- OP(StencilStrokePathInstancedCHROMIUM) /* 567 */ \
- OP(CoverFillPathInstancedCHROMIUM) /* 568 */ \
- OP(CoverStrokePathInstancedCHROMIUM) /* 569 */ \
- OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 570 */ \
- OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 571 */ \
- OP(BindFragmentInputLocationCHROMIUMBucket) /* 572 */ \
- OP(ProgramPathFragmentInputGenCHROMIUM) /* 573 */ \
- OP(CoverageModulationCHROMIUM) /* 574 */ \
- OP(BlendBarrierKHR) /* 575 */ \
- OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 576 */ \
- OP(BindFragDataLocationIndexedEXTBucket) /* 577 */ \
- OP(BindFragDataLocationEXTBucket) /* 578 */ \
- OP(GetFragDataIndexEXT) /* 579 */ \
- OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 580 */ \
- OP(OverlayPromotionHintCHROMIUM) /* 581 */ \
- OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 582 */ \
- OP(SetDrawRectangleCHROMIUM) /* 583 */ \
- OP(SetEnableDCLayersCHROMIUM) /* 584 */ \
- OP(InitializeDiscardableTextureCHROMIUM) /* 585 */ \
- OP(UnlockDiscardableTextureCHROMIUM) /* 586 */ \
- OP(LockDiscardableTextureCHROMIUM) /* 587 */ \
- OP(TexStorage2DImageCHROMIUM) /* 588 */ \
- OP(SetColorSpaceMetadataCHROMIUM) /* 589 */ \
- OP(WindowRectanglesEXTImmediate) /* 590 */ \
- OP(CreateGpuFenceINTERNAL) /* 591 */ \
- OP(WaitGpuFenceCHROMIUM) /* 592 */ \
- OP(DestroyGpuFenceCHROMIUM) /* 593 */ \
- OP(SetReadbackBufferShadowAllocationINTERNAL) /* 594 */ \
- OP(FramebufferTextureMultiviewLayeredANGLE) /* 595 */ \
- OP(MaxShaderCompilerThreadsKHR) /* 596 */ \
- OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 597 */ \
- OP(BeginSharedImageAccessDirectCHROMIUM) /* 598 */ \
- OP(EndSharedImageAccessDirectCHROMIUM) /* 599 */
+ OP(RenderbufferStorageMultisampleAdvancedAMD) /* 485 */ \
+ OP(RenderbufferStorageMultisampleEXT) /* 486 */ \
+ OP(FramebufferTexture2DMultisampleEXT) /* 487 */ \
+ OP(TexStorage2DEXT) /* 488 */ \
+ OP(GenQueriesEXTImmediate) /* 489 */ \
+ OP(DeleteQueriesEXTImmediate) /* 490 */ \
+ OP(QueryCounterEXT) /* 491 */ \
+ OP(BeginQueryEXT) /* 492 */ \
+ OP(BeginTransformFeedback) /* 493 */ \
+ OP(EndQueryEXT) /* 494 */ \
+ OP(EndTransformFeedback) /* 495 */ \
+ OP(SetDisjointValueSyncCHROMIUM) /* 496 */ \
+ OP(InsertEventMarkerEXT) /* 497 */ \
+ OP(PushGroupMarkerEXT) /* 498 */ \
+ OP(PopGroupMarkerEXT) /* 499 */ \
+ OP(GenVertexArraysOESImmediate) /* 500 */ \
+ OP(DeleteVertexArraysOESImmediate) /* 501 */ \
+ OP(IsVertexArrayOES) /* 502 */ \
+ OP(BindVertexArrayOES) /* 503 */ \
+ OP(FramebufferParameteri) /* 504 */ \
+ OP(BindImageTexture) /* 505 */ \
+ OP(DispatchCompute) /* 506 */ \
+ OP(GetProgramInterfaceiv) /* 507 */ \
+ OP(GetProgramResourceIndex) /* 508 */ \
+ OP(GetProgramResourceName) /* 509 */ \
+ OP(GetProgramResourceiv) /* 510 */ \
+ OP(GetProgramResourceLocation) /* 511 */ \
+ OP(MemoryBarrierEXT) /* 512 */ \
+ OP(MemoryBarrierByRegion) /* 513 */ \
+ OP(SwapBuffers) /* 514 */ \
+ OP(GetMaxValueInBufferCHROMIUM) /* 515 */ \
+ OP(EnableFeatureCHROMIUM) /* 516 */ \
+ OP(MapBufferRange) /* 517 */ \
+ OP(UnmapBuffer) /* 518 */ \
+ OP(FlushMappedBufferRange) /* 519 */ \
+ OP(ResizeCHROMIUM) /* 520 */ \
+ OP(GetRequestableExtensionsCHROMIUM) /* 521 */ \
+ OP(RequestExtensionCHROMIUM) /* 522 */ \
+ OP(GetProgramInfoCHROMIUM) /* 523 */ \
+ OP(GetUniformBlocksCHROMIUM) /* 524 */ \
+ OP(GetTransformFeedbackVaryingsCHROMIUM) /* 525 */ \
+ OP(GetUniformsES3CHROMIUM) /* 526 */ \
+ OP(DescheduleUntilFinishedCHROMIUM) /* 527 */ \
+ OP(GetTranslatedShaderSourceANGLE) /* 528 */ \
+ OP(PostSubBufferCHROMIUM) /* 529 */ \
+ OP(CopyTextureCHROMIUM) /* 530 */ \
+ OP(CopySubTextureCHROMIUM) /* 531 */ \
+ OP(DrawArraysInstancedANGLE) /* 532 */ \
+ OP(DrawElementsInstancedANGLE) /* 533 */ \
+ OP(VertexAttribDivisorANGLE) /* 534 */ \
+ OP(ProduceTextureDirectCHROMIUMImmediate) /* 535 */ \
+ OP(CreateAndConsumeTextureINTERNALImmediate) /* 536 */ \
+ OP(BindUniformLocationCHROMIUMBucket) /* 537 */ \
+ OP(BindTexImage2DCHROMIUM) /* 538 */ \
+ OP(BindTexImage2DWithInternalformatCHROMIUM) /* 539 */ \
+ OP(ReleaseTexImage2DCHROMIUM) /* 540 */ \
+ OP(TraceBeginCHROMIUM) /* 541 */ \
+ OP(TraceEndCHROMIUM) /* 542 */ \
+ OP(DiscardFramebufferEXTImmediate) /* 543 */ \
+ OP(LoseContextCHROMIUM) /* 544 */ \
+ OP(InsertFenceSyncCHROMIUM) /* 545 */ \
+ OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 546 */ \
+ OP(DrawBuffersEXTImmediate) /* 547 */ \
+ OP(DiscardBackbufferCHROMIUM) /* 548 */ \
+ OP(ScheduleOverlayPlaneCHROMIUM) /* 549 */ \
+ OP(ScheduleCALayerSharedStateCHROMIUM) /* 550 */ \
+ OP(ScheduleCALayerCHROMIUM) /* 551 */ \
+ OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 552 */ \
+ OP(CommitOverlayPlanesCHROMIUM) /* 553 */ \
+ OP(FlushDriverCachesCHROMIUM) /* 554 */ \
+ OP(ScheduleDCLayerCHROMIUM) /* 555 */ \
+ OP(SetActiveURLCHROMIUM) /* 556 */ \
+ OP(MatrixLoadfCHROMIUMImmediate) /* 557 */ \
+ OP(MatrixLoadIdentityCHROMIUM) /* 558 */ \
+ OP(GenPathsCHROMIUM) /* 559 */ \
+ OP(DeletePathsCHROMIUM) /* 560 */ \
+ OP(IsPathCHROMIUM) /* 561 */ \
+ OP(PathCommandsCHROMIUM) /* 562 */ \
+ OP(PathParameterfCHROMIUM) /* 563 */ \
+ OP(PathParameteriCHROMIUM) /* 564 */ \
+ OP(PathStencilFuncCHROMIUM) /* 565 */ \
+ OP(StencilFillPathCHROMIUM) /* 566 */ \
+ OP(StencilStrokePathCHROMIUM) /* 567 */ \
+ OP(CoverFillPathCHROMIUM) /* 568 */ \
+ OP(CoverStrokePathCHROMIUM) /* 569 */ \
+ OP(StencilThenCoverFillPathCHROMIUM) /* 570 */ \
+ OP(StencilThenCoverStrokePathCHROMIUM) /* 571 */ \
+ OP(StencilFillPathInstancedCHROMIUM) /* 572 */ \
+ OP(StencilStrokePathInstancedCHROMIUM) /* 573 */ \
+ OP(CoverFillPathInstancedCHROMIUM) /* 574 */ \
+ OP(CoverStrokePathInstancedCHROMIUM) /* 575 */ \
+ OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 576 */ \
+ OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 577 */ \
+ OP(BindFragmentInputLocationCHROMIUMBucket) /* 578 */ \
+ OP(ProgramPathFragmentInputGenCHROMIUM) /* 579 */ \
+ OP(CoverageModulationCHROMIUM) /* 580 */ \
+ OP(BlendBarrierKHR) /* 581 */ \
+ OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 582 */ \
+ OP(BindFragDataLocationIndexedEXTBucket) /* 583 */ \
+ OP(BindFragDataLocationEXTBucket) /* 584 */ \
+ OP(GetFragDataIndexEXT) /* 585 */ \
+ OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 586 */ \
+ OP(OverlayPromotionHintCHROMIUM) /* 587 */ \
+ OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 588 */ \
+ OP(SetDrawRectangleCHROMIUM) /* 589 */ \
+ OP(SetEnableDCLayersCHROMIUM) /* 590 */ \
+ OP(InitializeDiscardableTextureCHROMIUM) /* 591 */ \
+ OP(UnlockDiscardableTextureCHROMIUM) /* 592 */ \
+ OP(LockDiscardableTextureCHROMIUM) /* 593 */ \
+ OP(TexStorage2DImageCHROMIUM) /* 594 */ \
+ OP(SetColorSpaceMetadataCHROMIUM) /* 595 */ \
+ OP(WindowRectanglesEXTImmediate) /* 596 */ \
+ OP(CreateGpuFenceINTERNAL) /* 597 */ \
+ OP(WaitGpuFenceCHROMIUM) /* 598 */ \
+ OP(DestroyGpuFenceCHROMIUM) /* 599 */ \
+ OP(SetReadbackBufferShadowAllocationINTERNAL) /* 600 */ \
+ OP(FramebufferTextureMultiviewLayeredANGLE) /* 601 */ \
+ OP(MaxShaderCompilerThreadsKHR) /* 602 */ \
+ OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 603 */ \
+ OP(BeginSharedImageAccessDirectCHROMIUM) /* 604 */ \
+ OP(EndSharedImageAccessDirectCHROMIUM) /* 605 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index e3a0a8ec0f5..10cb22921f3 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -1759,6 +1759,19 @@ bool GLES2Util::IsFloatFormat(uint32_t internal_format) {
}
// static
+bool GLES2Util::IsFloat32Format(uint32_t internal_format) {
+ switch (internal_format) {
+ case GL_R32F:
+ case GL_RG32F:
+ case GL_RGB32F:
+ case GL_RGBA32F:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
uint32_t GLES2Util::ConvertToSizedFormat(uint32_t format, uint32_t type) {
switch (format) {
case GL_RGB:
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
index 25a28052368..7ec260bc20c 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
@@ -218,6 +218,7 @@ class GLES2_UTILS_EXPORT GLES2Util {
static bool IsSignedIntegerFormat(uint32_t internal_format);
static bool IsIntegerFormat(uint32_t internal_format);
static bool IsFloatFormat(uint32_t internal_format);
+ static bool IsFloat32Format(uint32_t internal_format);
static uint32_t ConvertToSizedFormat(uint32_t format, uint32_t type);
static bool IsSizedColorFormat(uint32_t internal_format);
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
index e6bb4c71bc5..66254edc7cb 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
@@ -106,4 +106,19 @@ GPU_EXPORT uint32_t GetBufferTextureTarget(gfx::BufferUsage usage,
return found ? gpu::GetPlatformSpecificTextureTarget() : GL_TEXTURE_2D;
}
+GPU_EXPORT bool NativeBufferNeedsPlatformSpecificTextureTarget(
+ gfx::BufferFormat format) {
+#if defined(USE_OZONE)
+ // Always use GL_TEXTURE_2D as the target for RGB textures.
+ // https://crbug.com/916728
+ if (format == gfx::BufferFormat::R_8 || format == gfx::BufferFormat::RG_88 ||
+ format == gfx::BufferFormat::RGBA_8888 ||
+ format == gfx::BufferFormat::RGBX_8888 ||
+ format == gfx::BufferFormat::BGRX_8888) {
+ return false;
+ }
+#endif
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
index 3cd069b5f5c..c14b9e077a5 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
@@ -63,6 +63,11 @@ GPU_EXPORT uint32_t GetBufferTextureTarget(gfx::BufferUsage usage,
gfx::BufferFormat format,
const Capabilities& capabilities);
+// Returns whether a native GMB with the given format needs to be bound to the
+// platform-specfic texture target or GL_TEXTURE_2D.
+GPU_EXPORT bool NativeBufferNeedsPlatformSpecificTextureTarget(
+ gfx::BufferFormat format);
+
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_COMMON_GPU_MEMORY_BUFFER_SUPPORT_H_
diff --git a/chromium/gpu/command_buffer/common/id_type_unittest.cc b/chromium/gpu/command_buffer/common/id_type_unittest.cc
index 1ba2d018474..a8f0b349e8b 100644
--- a/chromium/gpu/command_buffer/common/id_type_unittest.cc
+++ b/chromium/gpu/command_buffer/common/id_type_unittest.cc
@@ -188,13 +188,13 @@ TEST_P(IdTypeSpecificValueTest, StdMap) {
EXPECT_EQ(map[other_id()], "other_id");
}
-INSTANTIATE_TEST_CASE_P(,
- IdTypeSpecificValueTest,
- ::testing::Values(std::numeric_limits<int>::min(),
- -1,
- 0,
- 1,
- 123,
- std::numeric_limits<int>::max()));
+INSTANTIATE_TEST_SUITE_P(,
+ IdTypeSpecificValueTest,
+ ::testing::Values(std::numeric_limits<int>::min(),
+ -1,
+ 0,
+ 1,
+ 123,
+ std::numeric_limits<int>::max()));
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
index 56631893db9..9c5c5703464 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
@@ -375,13 +375,11 @@ struct BeginRasterCHROMIUMImmediate {
void Init(GLuint _sk_color,
GLuint _msaa_sample_count,
GLboolean _can_use_lcd_text,
- GLuint _color_space_transfer_cache_id,
const GLbyte* _mailbox) {
SetHeader();
sk_color = _sk_color;
msaa_sample_count = _msaa_sample_count;
can_use_lcd_text = _can_use_lcd_text;
- color_space_transfer_cache_id = _color_space_transfer_cache_id;
memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
}
@@ -389,11 +387,9 @@ struct BeginRasterCHROMIUMImmediate {
GLuint _sk_color,
GLuint _msaa_sample_count,
GLboolean _can_use_lcd_text,
- GLuint _color_space_transfer_cache_id,
const GLbyte* _mailbox) {
- static_cast<ValueType*>(cmd)->Init(
- _sk_color, _msaa_sample_count, _can_use_lcd_text,
- _color_space_transfer_cache_id, _mailbox);
+ static_cast<ValueType*>(cmd)->Init(_sk_color, _msaa_sample_count,
+ _can_use_lcd_text, _mailbox);
const uint32_t size = ComputeSize();
return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
}
@@ -402,11 +398,10 @@ struct BeginRasterCHROMIUMImmediate {
uint32_t sk_color;
uint32_t msaa_sample_count;
uint32_t can_use_lcd_text;
- uint32_t color_space_transfer_cache_id;
};
-static_assert(sizeof(BeginRasterCHROMIUMImmediate) == 20,
- "size of BeginRasterCHROMIUMImmediate should be 20");
+static_assert(sizeof(BeginRasterCHROMIUMImmediate) == 16,
+ "size of BeginRasterCHROMIUMImmediate should be 16");
static_assert(offsetof(BeginRasterCHROMIUMImmediate, header) == 0,
"offset of BeginRasterCHROMIUMImmediate header should be 0");
static_assert(offsetof(BeginRasterCHROMIUMImmediate, sk_color) == 4,
@@ -417,10 +412,6 @@ static_assert(
static_assert(
offsetof(BeginRasterCHROMIUMImmediate, can_use_lcd_text) == 12,
"offset of BeginRasterCHROMIUMImmediate can_use_lcd_text should be 12");
-static_assert(offsetof(BeginRasterCHROMIUMImmediate,
- color_space_transfer_cache_id) == 16,
- "offset of BeginRasterCHROMIUMImmediate "
- "color_space_transfer_cache_id should be 16");
struct RasterCHROMIUM {
typedef RasterCHROMIUM ValueType;
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
index 1402583c19d..15145ccf394 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
@@ -155,7 +155,7 @@ TEST_F(RasterFormatTest, BeginRasterCHROMIUMImmediate) {
*GetBufferAs<cmds::BeginRasterCHROMIUMImmediate>();
void* next_cmd =
cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
- static_cast<GLboolean>(13), static_cast<GLuint>(14), data);
+ static_cast<GLboolean>(13), data);
EXPECT_EQ(static_cast<uint32_t>(cmds::BeginRasterCHROMIUMImmediate::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
@@ -163,7 +163,6 @@ TEST_F(RasterFormatTest, BeginRasterCHROMIUMImmediate) {
EXPECT_EQ(static_cast<GLuint>(11), cmd.sk_color);
EXPECT_EQ(static_cast<GLuint>(12), cmd.msaa_sample_count);
EXPECT_EQ(static_cast<GLboolean>(13), cmd.can_use_lcd_text);
- EXPECT_EQ(static_cast<GLuint>(14), cmd.color_space_transfer_cache_id);
CheckBytesWrittenMatchesExpectedSize(
next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
}
diff --git a/chromium/gpu/command_buffer/common/shared_image_usage.h b/chromium/gpu/command_buffer/common/shared_image_usage.h
index 45c6cbafec5..34311ebaa93 100644
--- a/chromium/gpu/command_buffer/common/shared_image_usage.h
+++ b/chromium/gpu/command_buffer/common/shared_image_usage.h
@@ -24,6 +24,8 @@ enum SharedImageUsage : uint32_t {
// TODO(backer): Fold back into SHARED_IMAGE_USAGE_RASTER once RasterInterface
// can CPU raster (CopySubImage?) to SkImage.
SHARED_IMAGE_USAGE_OOP_RASTERIZATION = 1 << 5,
+ // Image will be used for RGB emulation in WebGL on Mac.
+ SHARED_IMAGE_USAGE_RGB_EMULATION = 1 << 6,
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
index 740a8dcd3d8..aa55852d05d 100644
--- a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
@@ -242,6 +242,7 @@ GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width,
GL_APICALL void GL_APIENTRY glWaitSync (GLsync sync, GLbitfieldSyncFlushFlags flags, GLuint64 timeout);
GL_APICALL void GL_APIENTRY glBlitFramebufferCHROMIUM (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenumBlitFilter filter);
GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleCHROMIUM (GLenumRenderBufferTarget target, GLsizei samples, GLenumRenderBufferFormat internalformat, GLsizei width, GLsizei height);
+GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleAdvancedAMD (GLenumRenderBufferTarget target, GLsizei samples, GLsizei storageSamples, GLenumRenderBufferFormat internalformat, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleEXT (GLenumRenderBufferTarget target, GLsizei samples, GLenumRenderBufferFormat internalformat, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glFramebufferTexture2DMultisampleEXT (GLenumFramebufferTarget target, GLenumAttachment attachment, GLenumTextureTarget textarget, GLidTexture texture, GLint level, GLsizei samples);
GL_APICALL void GL_APIENTRY glTexStorage2DEXT (GLenumTextureBindTarget target, GLsizei levels, GLenumTextureInternalFormatStorage internalFormat, GLsizei width, GLsizei height);
@@ -271,6 +272,13 @@ GL_APICALL void GL_APIENTRY glFramebufferParameteri (GLenumFramebufferTa
GL_APICALL void GL_APIENTRY glBindImageTexture (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format);
GL_APICALL void GL_APIENTRY glDispatchCompute (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z);
+
+GL_APICALL void GL_APIENTRY glGetProgramInterfaceiv (GLidProgram program, GLenum program_interface, GLenum pname, GLint* params);
+GL_APICALL GLuint GL_APIENTRY glGetProgramResourceIndex (GLidProgram program, GLenum program_interface, const char* name);
+GL_APICALL void GL_APIENTRY glGetProgramResourceName (GLidProgram program, GLenum program_interface, GLuint index, GLsizeiNotNegative bufsize, GLsizeiOptional* length, char* name);
+GL_APICALL void GL_APIENTRY glGetProgramResourceiv (GLidProgram program, GLenum program_interface, GLuint index, GLsizeiNotNegative prop_count, const GLenum* props, GLsizeiNotNegative bufsize, GLsizeiOptional* length, GLint* params);
+GL_APICALL GLint GL_APIENTRY glGetProgramResourceLocation (GLidProgram program, GLenum program_interface, const char* name);
+
GL_APICALL void GL_APIENTRY glMemoryBarrierEXT (GLbitfield barriers);
GL_APICALL void GL_APIENTRY glMemoryBarrierByRegion (GLbitfield barriers);
@@ -422,6 +430,7 @@ GL_APICALL void GL_APIENTRY glMaxShaderCompilerThreadsKHR (GLuint count)
// Extension CHROMIUM_shared_image
GL_APICALL GLuint GL_APIENTRY glCreateAndTexStorage2DSharedImageCHROMIUM (const GLbyte* mailbox);
-GL_APICALL void GL_APIENTRY glCreateAndTexStorage2DSharedImageINTERNAL (GLuint texture, const GLbyte* mailbox);
+GL_APICALL GLuint GL_APIENTRY glCreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM (const GLbyte* mailbox, GLenum internalformat);
+GL_APICALL void GL_APIENTRY glCreateAndTexStorage2DSharedImageINTERNAL (GLuint texture, const GLbyte* mailbox, GLenum internalformat);
GL_APICALL void GL_APIENTRY glBeginSharedImageAccessDirectCHROMIUM (GLuint texture, GLenumSharedImageAccessMode mode);
GL_APICALL void GL_APIENTRY glEndSharedImageAccessDirectCHROMIUM (GLuint texture);
diff --git a/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
index 40ee7becae7..bc9938e1090 100644
--- a/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
@@ -26,7 +26,7 @@ GL_APICALL void GL_APIENTRY glWaitSyncTokenCHROMIUM (const GLbyte* sync_
GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusKHR (void);
// Extension CHROMIUM_raster_transport
-GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint sk_color, GLuint msaa_sample_count, GLboolean can_use_lcd_text, GLuint color_space_transfer_cache_id, const GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint sk_color, GLuint msaa_sample_count, GLboolean can_use_lcd_text, const GLbyte* mailbox);
GL_APICALL void GL_APIENTRY glRasterCHROMIUM (GLuint raster_shm_id, GLuint raster_shm_offset, GLuint raster_shm_size, GLuint font_shm_id, GLuint font_shm_offset, GLuint font_shm_size);
GL_APICALL void GL_APIENTRY glEndRasterCHROMIUM (void);
GL_APICALL void GL_APIENTRY glCreateTransferCacheEntryINTERNAL (GLuint entry_type, GLuint entry_id, GLuint handle_shm_id, GLuint handle_shm_offset, GLuint data_shm_id, GLuint data_shm_offset, GLuint data_size);
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index c0fd9edb8ad..6955858281e 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -6,6 +6,7 @@ import("//build/config/jumbo.gni")
import("//build/config/ui.gni")
import("//gpu/vulkan/features.gni")
import("//third_party/protobuf/proto_library.gni")
+import("//ui/gl/features.gni")
group("service") {
if (is_component_build) {
@@ -256,6 +257,8 @@ target(link_target_type, "gles2_sources") {
"webgpu_cmd_validation_implementation_autogen.h",
"webgpu_decoder.cc",
"webgpu_decoder.h",
+ "webgpu_decoder_impl.cc",
+ "webgpu_decoder_impl.h",
"wrapped_sk_image.cc",
"wrapped_sk_image.h",
]
@@ -272,17 +275,18 @@ target(link_target_type, "gles2_sources") {
include_dirs = [ "//third_party/mesa_headers" ]
public_deps = [
+ "//cc/paint",
"//gpu/command_buffer/common",
"//gpu/command_buffer/common:gles2_sources",
"//gpu/command_buffer/common:raster_sources",
"//gpu/command_buffer/common:webgpu_sources",
+ "//skia",
]
deps = [
":disk_cache_proto",
":service",
"//base",
"//base/third_party/dynamic_annotations",
- "//cc/paint",
"//components/viz/common:resource_format_utils",
"//gpu/command_buffer/client",
"//gpu/command_buffer/common:gles2_utils",
@@ -304,12 +308,38 @@ target(link_target_type, "gles2_sources") {
]
if (enable_vulkan) {
- deps += [ "//components/viz/common:vulkan_context_provider" ]
+ deps += [
+ "//components/viz/common:vulkan_context_provider",
+ "//gpu/vulkan",
+ ]
+
+ if (is_linux) {
+ sources += [
+ "external_vk_image_backing.cc",
+ "external_vk_image_backing.h",
+ "external_vk_image_factory.cc",
+ "external_vk_image_factory.h",
+ "external_vk_image_gl_representation.cc",
+ "external_vk_image_gl_representation.h",
+ "external_vk_image_skia_representation.cc",
+ "external_vk_image_skia_representation.h",
+ ]
+ }
+ }
+
+ if (use_dawn) {
+ deps += [ "//third_party/dawn:libdawn_native" ]
}
if (is_mac) {
+ sources += [
+ "shared_image_backing_factory_iosurface.h",
+ "shared_image_backing_factory_iosurface.mm",
+ ]
+
# Required by gles2_cmd_decoder.cc on Mac.
libs = [
+ "Cocoa.framework",
"IOSurface.framework",
"OpenGL.framework",
]
diff --git a/chromium/gpu/command_buffer/service/buffer_manager.h b/chromium/gpu/command_buffer/service/buffer_manager.h
index 7f1f3af53ac..86c3561104a 100644
--- a/chromium/gpu/command_buffer/service/buffer_manager.h
+++ b/chromium/gpu/command_buffer/service/buffer_manager.h
@@ -14,7 +14,6 @@
#include <unordered_map>
#include <vector>
-#include "base/debug/stack_trace.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
diff --git a/chromium/gpu/command_buffer/service/command_buffer_direct.cc b/chromium/gpu/command_buffer/service/command_buffer_direct.cc
index 45439dc4eb2..5b9e2a7c177 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_direct.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_direct.cc
@@ -16,9 +16,8 @@ uint64_t g_next_command_buffer_id = 1;
} // anonymous namespace
-CommandBufferDirect::CommandBufferDirect(
- TransferBufferManager* transfer_buffer_manager)
- : service_(this, transfer_buffer_manager),
+CommandBufferDirect::CommandBufferDirect()
+ : service_(this, nullptr),
command_buffer_id_(
CommandBufferId::FromUnsafeValue(g_next_command_buffer_id++)) {}
@@ -49,6 +48,8 @@ CommandBuffer::State CommandBufferDirect::WaitForGetOffsetInRange(
void CommandBufferDirect::Flush(int32_t put_offset) {
DCHECK(handler_);
+ if (GetLastState().error != gpu::error::kNoError)
+ return;
service_.Flush(put_offset, handler_);
}
@@ -102,4 +103,8 @@ scoped_refptr<Buffer> CommandBufferDirect::CreateTransferBufferWithId(
return service_.CreateTransferBufferWithId(size, id);
}
+void CommandBufferDirect::HandleReturnData(base::span<const uint8_t> data) {
+ NOTIMPLEMENTED();
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/command_buffer_direct.h b/chromium/gpu/command_buffer/service/command_buffer_direct.h
index d12b11f8476..523336507eb 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_direct.h
+++ b/chromium/gpu/command_buffer/service/command_buffer_direct.h
@@ -15,17 +15,16 @@
namespace gpu {
class AsyncAPIInterface;
-class TransferBufferManager;
class GPU_EXPORT CommandBufferDirect : public CommandBuffer,
public CommandBufferServiceClient,
public DecoderClient {
public:
- explicit CommandBufferDirect(TransferBufferManager* transfer_buffer_manager);
+ CommandBufferDirect();
~CommandBufferDirect() override;
void set_handler(AsyncAPIInterface* handler) { handler_ = handler; }
- CommandBufferServiceBase* service() { return &service_; }
+ CommandBufferService* service() { return &service_; }
// CommandBuffer implementation:
CommandBuffer::State GetLastState() override;
@@ -40,7 +39,7 @@ class GPU_EXPORT CommandBufferDirect : public CommandBuffer,
int32_t* id) override;
void DestroyTransferBuffer(int32_t id) override;
- // CommandBufferServiceBase implementation:
+ // CommandBufferServiceClient implementation:
CommandBatchProcessedResult OnCommandBatchProcessed() override;
void OnParseError() override;
@@ -52,6 +51,7 @@ class GPU_EXPORT CommandBufferDirect : public CommandBuffer,
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
void ScheduleGrContextCleanup() override {}
+ void HandleReturnData(base::span<const uint8_t> data) override;
scoped_refptr<Buffer> CreateTransferBufferWithId(uint32_t size, int32_t id);
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service.cc b/chromium/gpu/command_buffer/service/command_buffer_service.cc
index 89b0e1b0f8e..04407cd1488 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_service.cc
@@ -18,12 +18,12 @@
namespace gpu {
-CommandBufferService::CommandBufferService(
- CommandBufferServiceClient* client,
- TransferBufferManager* transfer_buffer_manager)
- : client_(client), transfer_buffer_manager_(transfer_buffer_manager) {
+CommandBufferService::CommandBufferService(CommandBufferServiceClient* client,
+ MemoryTracker* memory_tracker)
+ : client_(client),
+ transfer_buffer_manager_(
+ std::make_unique<TransferBufferManager>(memory_tracker)) {
DCHECK(client_);
- DCHECK(transfer_buffer_manager_);
state_.token = 0;
}
@@ -204,4 +204,8 @@ void CommandBufferService::SetScheduled(bool scheduled) {
scheduled_ = scheduled;
}
+size_t CommandBufferService::GetSharedMemoryBytesAllocated() const {
+ return transfer_buffer_manager_->shared_memory_bytes_allocated();
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service.h b/chromium/gpu/command_buffer/service/command_buffer_service.h
index 42e1716a0b3..3e52c3d0875 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service.h
+++ b/chromium/gpu/command_buffer/service/command_buffer_service.h
@@ -18,6 +18,7 @@
namespace gpu {
+class MemoryTracker;
class TransferBufferManager;
class GPU_EXPORT CommandBufferServiceBase {
@@ -73,7 +74,7 @@ class GPU_EXPORT CommandBufferService : public CommandBufferServiceBase {
static const int kParseCommandsSlice = 20;
CommandBufferService(CommandBufferServiceClient* client,
- TransferBufferManager* transfer_buffer_manager);
+ MemoryTracker* memory_tracker);
~CommandBufferService() override;
// CommandBufferServiceBase implementation:
@@ -123,9 +124,11 @@ class GPU_EXPORT CommandBufferService : public CommandBufferServiceBase {
state_.get_offset = get_offset;
}
+ size_t GetSharedMemoryBytesAllocated() const;
+
private:
CommandBufferServiceClient* client_;
- TransferBufferManager* transfer_buffer_manager_;
+ std::unique_ptr<TransferBufferManager> transfer_buffer_manager_;
CommandBuffer::State state_;
int32_t put_offset_ = 0;
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc b/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc
index 52c9e7de5b4..9237e0b9c1a 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc
@@ -12,7 +12,6 @@
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/mocks.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
@@ -43,9 +42,8 @@ class CommandBufferServiceTest : public testing::Test,
// Creates a CommandBufferService, with a buffer of the specified size (in
// entries).
void MakeService(unsigned int entry_count) {
- transfer_buffer_manager_ = std::make_unique<TransferBufferManager>(nullptr);
- command_buffer_service_ = std::make_unique<CommandBufferService>(
- this, transfer_buffer_manager_.get());
+ command_buffer_service_ =
+ std::make_unique<CommandBufferService>(this, nullptr);
api_mock_.reset(new AsyncAPIMock(false, command_buffer_service_.get()));
SetNewGetBuffer(entry_count * sizeof(CommandBufferEntry));
}
@@ -95,7 +93,6 @@ class CommandBufferServiceTest : public testing::Test,
MOCK_METHOD0(OnParseError, void());
private:
- std::unique_ptr<TransferBufferManager> transfer_buffer_manager_;
std::unique_ptr<CommandBufferService> command_buffer_service_;
std::unique_ptr<AsyncAPIMock> api_mock_;
scoped_refptr<Buffer> buffer_;
diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc
index f7b2eb37696..cfa6423afd8 100644
--- a/chromium/gpu/command_buffer/service/context_group.cc
+++ b/chromium/gpu/command_buffer/service/context_group.cc
@@ -24,7 +24,6 @@
#include "gpu/command_buffer/service/shader_manager.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/texture_manager.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "gpu/config/gpu_preferences.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_version_info.h"
@@ -57,6 +56,7 @@ DisallowedFeatures AdjustDisallowedFeatures(
adjusted_disallowed_features.oes_texture_float_linear = true;
adjusted_disallowed_features.ext_color_buffer_half_float = true;
adjusted_disallowed_features.oes_texture_half_float_linear = true;
+ adjusted_disallowed_features.ext_float_blend = true;
}
return adjusted_disallowed_features;
}
@@ -128,8 +128,6 @@ ContextGroup::ContextGroup(
DCHECK(discardable_manager);
DCHECK(feature_info_);
DCHECK(mailbox_manager_);
- transfer_buffer_manager_ =
- std::make_unique<TransferBufferManager>(memory_tracker_.get());
use_passthrough_cmd_decoder_ = supports_passthrough_command_decoders &&
gpu_preferences_.use_passthrough_cmd_decoder;
}
@@ -174,6 +172,15 @@ gpu::ContextResult ContextGroup::Initialize(
feature_info_->Initialize(context_type, use_passthrough_cmd_decoder_,
adjusted_disallowed_features);
+ // Fail early if ES3 is requested and driver does not support it.
+ if ((context_type == CONTEXT_TYPE_WEBGL2 ||
+ context_type == CONTEXT_TYPE_OPENGLES3) &&
+ !feature_info_->IsES3Capable()) {
+ LOG(ERROR) << "ContextResult::kFatalFailure: "
+ << "ES3 is blacklisted/disabled/unsupported by driver.";
+ return gpu::ContextResult::kFatalFailure;
+ }
+
const GLint kMinRenderbufferSize = 512; // GL says 1 pixel!
GLint max_renderbuffer_size = 0;
if (!QueryGLFeature(
diff --git a/chromium/gpu/command_buffer/service/context_group.h b/chromium/gpu/command_buffer/service/context_group.h
index 3b3cacba99c..7ff6e3d8270 100644
--- a/chromium/gpu/command_buffer/service/context_group.h
+++ b/chromium/gpu/command_buffer/service/context_group.h
@@ -33,7 +33,6 @@ namespace gpu {
class ImageFactory;
struct GpuPreferences;
class MailboxManager;
-class TransferBufferManager;
class SharedImageManager;
class SharedImageRepresentationFactory;
class ServiceDiscardableManager;
@@ -199,10 +198,6 @@ class GPU_GLES2_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
return shader_manager_.get();
}
- TransferBufferManager* transfer_buffer_manager() const {
- return transfer_buffer_manager_.get();
- }
-
SamplerManager* sampler_manager() const {
return sampler_manager_.get();
}
@@ -274,7 +269,6 @@ class GPU_GLES2_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
std::unique_ptr<MemoryTracker> memory_tracker_;
ShaderTranslatorCache* shader_translator_cache_;
FramebufferCompletenessCache* framebuffer_completeness_cache_;
- std::unique_ptr<TransferBufferManager> transfer_buffer_manager_;
bool enforce_gl_minimums_;
bool bind_generates_resource_;
diff --git a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
index a163f2922ea..ec299201fb8 100644
--- a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
+++ b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
@@ -54,7 +54,7 @@ class MockCopyTexImageResourceManager : public CopyTexImageResourceManager {
GLsizei width,
GLsizei height,
GLuint source_framebuffer,
- GLenum source_framebuffer_internal_format) override{};
+ GLenum source_framebuffer_internal_format) override {}
private:
DISALLOW_COPY_AND_ASSIGN(MockCopyTexImageResourceManager);
@@ -141,7 +141,7 @@ class MockCopyTextureResourceManager
bool unpremultiply_alpha,
bool dither,
const GLfloat transform_matrix[16],
- CopyTexImageResourceManager* luma_emulation_blitter) override{};
+ CopyTexImageResourceManager* luma_emulation_blitter) override {}
void DoCopyTextureWithTransform(
DecoderContext* decoder,
GLenum source_target,
@@ -160,7 +160,7 @@ class MockCopyTextureResourceManager
bool dither,
const GLfloat transform_matrix[16],
CopyTextureMethod method,
- CopyTexImageResourceManager* luma_emulation_blitter) override{};
+ CopyTexImageResourceManager* luma_emulation_blitter) override {}
private:
DISALLOW_COPY_AND_ASSIGN(MockCopyTextureResourceManager);
diff --git a/chromium/gpu/command_buffer/service/decoder_client.h b/chromium/gpu/command_buffer/service/decoder_client.h
index 66da71f6b80..d16d8b4bc70 100644
--- a/chromium/gpu/command_buffer/service/decoder_client.h
+++ b/chromium/gpu/command_buffer/service/decoder_client.h
@@ -9,6 +9,7 @@
#include <string>
+#include "base/containers/span.h"
#include "gpu/gpu_export.h"
#include "url/gurl.h"
@@ -48,6 +49,9 @@ class GPU_EXPORT DecoderClient {
virtual void ScheduleGrContextCleanup() = 0;
virtual void SetActiveURL(GURL url) {}
+
+ // Called by the decoder to pass a variable-size block of data to the client.
+ virtual void HandleReturnData(base::span<const uint8_t> data) = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/decoder_context.h b/chromium/gpu/command_buffer/service/decoder_context.h
index 61b0954c9cf..ac8af444a17 100644
--- a/chromium/gpu/command_buffer/service/decoder_context.h
+++ b/chromium/gpu/command_buffer/service/decoder_context.h
@@ -154,6 +154,7 @@ class GPU_GLES2_EXPORT DecoderContext : public AsyncAPIInterface,
//
// Methods required by GLES2DecoderHelper.
+ // Only functional for GLES2 Decoders.
//
virtual gles2::ContextGroup* GetContextGroup() = 0;
virtual gles2::ErrorState* GetErrorState() = 0;
@@ -219,6 +220,13 @@ class GPU_GLES2_EXPORT DecoderContext : public AsyncAPIInterface,
// Restores texture states for a given service id.
virtual void RestoreTextureState(unsigned service_id) = 0;
+
+ //
+ // Methods required by ImageDecodeAcceleratorStub
+ //
+ // Returns the ID of a RasterDecoder. This is not applicable to other
+ // implementations and it returns a negative number in that case.
+ virtual int GetRasterDecoderId() const = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/error_state.cc b/chromium/gpu/command_buffer/service/error_state.cc
index 72c18d23b5c..431218500fa 100644
--- a/chromium/gpu/command_buffer/service/error_state.cc
+++ b/chromium/gpu/command_buffer/service/error_state.cc
@@ -172,9 +172,9 @@ void ErrorStateImpl::SetGLErrorInvalidParami(
} else {
SetGLError(
filename, line, error, function_name,
- (std::string("trying to set ") +
- GLES2Util::GetStringEnum(pname) + " to " +
- base::IntToString(param)).c_str());
+ (std::string("trying to set ") + GLES2Util::GetStringEnum(pname) +
+ " to " + base::NumberToString(param))
+ .c_str());
}
}
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
new file mode 100644
index 00000000000..7a80d6fb406
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
@@ -0,0 +1,249 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/external_vk_image_backing.h"
+
+#include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
+#include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "ui/gl/gl_context.h"
+
+#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586
+
+namespace gpu {
+
+ExternalVkImageBacking::ExternalVkImageBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ SharedContextState* context_state,
+ VkImage image,
+ VkDeviceMemory memory,
+ size_t memory_size,
+ VkFormat vk_format)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ memory_size),
+ context_state_(context_state),
+ image_(image),
+ memory_(memory),
+ memory_size_(memory_size),
+ vk_format_(vk_format) {}
+
+ExternalVkImageBacking::~ExternalVkImageBacking() {
+ // Destroy() will do any necessary cleanup.
+}
+
+VkSemaphore ExternalVkImageBacking::CreateExternalVkSemaphore() {
+ VkExportSemaphoreCreateInfo export_info;
+ export_info.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
+ export_info.pNext = nullptr;
+ export_info.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+ VkSemaphoreCreateInfo sem_info;
+ sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ sem_info.pNext = &export_info;
+ sem_info.flags = 0;
+
+ VkSemaphore semaphore;
+ VkResult result = vkCreateSemaphore(device(), &sem_info, nullptr, &semaphore);
+
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "Failed to create VkSemaphore: " << result;
+ return VK_NULL_HANDLE;
+ }
+
+ return semaphore;
+}
+
+bool ExternalVkImageBacking::BeginVulkanReadAccess(
+ VkSemaphore* gl_write_finished_semaphore) {
+ if (is_write_in_progress_) {
+ LOG(ERROR) << "Unable to begin read access for ExternalVkImageBacking "
+ << "because a write access is in progress";
+ return false;
+ }
+ ++reads_in_progress_;
+ *gl_write_finished_semaphore = gl_write_finished_semaphore_;
+ gl_write_finished_semaphore_ = VK_NULL_HANDLE;
+ return true;
+}
+
+void ExternalVkImageBacking::EndVulkanReadAccess(
+ VkSemaphore vulkan_read_finished_semaphore) {
+ DCHECK_NE(0u, reads_in_progress_);
+ --reads_in_progress_;
+ // GL only needs to block on the latest semaphore. Destroy any existing
+ // semaphore if it's not used yet.
+ if (vulkan_read_finished_semaphore_ != VK_NULL_HANDLE) {
+ // TODO(crbug.com/932260): This call is safe because we previously called
+ // vkQueueWaitIdle in ExternalVkImageSkiaRepresentation::EndReadAccess.
+ // However, vkQueueWaitIdle is a blocking call and should eventually be
+ // replaced with better alternatives.
+ vkDestroySemaphore(device(), vulkan_read_finished_semaphore_, nullptr);
+ }
+ vulkan_read_finished_semaphore_ = vulkan_read_finished_semaphore;
+}
+
+bool ExternalVkImageBacking::BeginGlWriteAccess(
+ VkSemaphore* vulkan_read_finished_semaphore) {
+ if (is_write_in_progress_ || reads_in_progress_) {
+ LOG(ERROR) << "Unable to begin write access for ExternalVkImageBacking "
+ << "because another read or write access is in progress";
+ return false;
+ }
+ is_write_in_progress_ = true;
+ *vulkan_read_finished_semaphore = vulkan_read_finished_semaphore_;
+ vulkan_read_finished_semaphore_ = VK_NULL_HANDLE;
+ return true;
+}
+
+void ExternalVkImageBacking::EndGlWriteAccess(
+ VkSemaphore gl_write_finished_semaphore) {
+ DCHECK(is_write_in_progress_);
+ is_write_in_progress_ = false;
+ // Vulkan only needs to block on the latest semaphore. Destroy any existing
+ // semaphore if it's not used yet.
+ if (gl_write_finished_semaphore_ != VK_NULL_HANDLE) {
+ // This call is safe because this semaphore has only been used in GL and
+ // therefore it's not associated with any unfinished task in a VkQueue.
+ vkDestroySemaphore(device(), gl_write_finished_semaphore_, nullptr);
+ }
+ gl_write_finished_semaphore_ = gl_write_finished_semaphore;
+}
+
+bool ExternalVkImageBacking::BeginGlReadAccess() {
+ if (is_write_in_progress_)
+ return false;
+ ++reads_in_progress_;
+ return true;
+}
+
+void ExternalVkImageBacking::EndGlReadAccess() {
+ DCHECK_NE(0u, reads_in_progress_);
+ --reads_in_progress_;
+}
+
+bool ExternalVkImageBacking::IsCleared() const {
+ return is_cleared_;
+}
+
+void ExternalVkImageBacking::SetCleared() {
+ is_cleared_ = true;
+}
+
+void ExternalVkImageBacking::Update() {}
+
+void ExternalVkImageBacking::Destroy() {
+ // TODO(crbug.com/932260): We call vkQueueWaitIdle to ensure all these objects
+ // are no longer associated with any queue command that has not completed
+ // execution yet. Remove this call once we have better alternatives.
+ vkQueueWaitIdle(context_state()
+ ->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanQueue());
+ vkDestroyImage(device(), image_, nullptr);
+ vkFreeMemory(device(), memory_, nullptr);
+ if (vulkan_read_finished_semaphore_ != VK_NULL_HANDLE)
+ vkDestroySemaphore(device(), vulkan_read_finished_semaphore_, nullptr);
+ if (gl_write_finished_semaphore_ != VK_NULL_HANDLE)
+ vkDestroySemaphore(device(), gl_write_finished_semaphore_, nullptr);
+}
+
+bool ExternalVkImageBacking::ProduceLegacyMailbox(
+ MailboxManager* mailbox_manager) {
+ // It is not safe to produce a legacy mailbox because it would bypass the
+ // synchronization between Vulkan and GL that is implemented in the
+ // representation classes.
+ return false;
+}
+
+std::unique_ptr<SharedImageRepresentationGLTexture>
+ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ VkMemoryGetFdInfoKHR get_fd_info;
+ get_fd_info.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
+ get_fd_info.pNext = nullptr;
+ get_fd_info.memory = memory_;
+ get_fd_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+ int memory_fd = -1;
+ vkGetMemoryFdKHR(device(), &get_fd_info, &memory_fd);
+ if (memory_fd < 0) {
+ LOG(ERROR) << "Unable to extract file descriptor out of external VkImage";
+ return nullptr;
+ }
+
+ gl::GLApi* api = gl::g_current_gl_context;
+
+ constexpr GLenum target = GL_TEXTURE_2D;
+ constexpr GLenum get_target = GL_TEXTURE_BINDING_2D;
+ GLuint internal_format = viz::TextureStorageFormat(format());
+
+ GLuint memory_object;
+ api->glCreateMemoryObjectsEXTFn(1, &memory_object);
+ api->glImportMemoryFdEXTFn(memory_object, memory_size_,
+ GL_HANDLE_TYPE_OPAQUE_FD_EXT, memory_fd);
+ GLuint texture_service_id;
+ api->glGenTexturesFn(1, &texture_service_id);
+ GLint old_texture_binding = 0;
+ api->glGetIntegervFn(get_target, &old_texture_binding);
+ api->glBindTextureFn(target, texture_service_id);
+ api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, internal_format, size().width(),
+ size().height(), memory_object, 0);
+
+ gles2::Texture* texture = new gles2::Texture(texture_service_id);
+ texture->SetLightweightRef();
+ texture->SetTarget(target, 1);
+ texture->sampler_state_.min_filter = GL_LINEAR;
+ texture->sampler_state_.mag_filter = GL_LINEAR;
+ texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ // If the backing is already cleared, no need to clear it again.
+ gfx::Rect cleared_rect;
+ if (is_cleared_)
+ cleared_rect = gfx::Rect(size());
+
+ GLenum gl_format = viz::GLDataFormat(format());
+ GLenum gl_type = viz::GLDataType(format());
+ texture->SetLevelInfo(target, 0, internal_format, size().width(),
+ size().height(), 1, 0, gl_format, gl_type,
+ cleared_rect);
+ texture->SetImmutable(true);
+
+ api->glBindTextureFn(target, old_texture_binding);
+
+ return std::make_unique<ExternalVkImageGlRepresentation>(
+ manager, this, tracker, texture, texture_service_id);
+}
+
+std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ExternalVkImageBacking::ProduceGLTexturePassthrough(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ // Passthrough command decoder is not currently used on Linux.
+ return nullptr;
+}
+
+std::unique_ptr<SharedImageRepresentationSkia>
+ExternalVkImageBacking::ProduceSkia(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ // This backing type is only used when vulkan is enabled, so SkiaRenderer
+ // should also be using Vulkan.
+ DCHECK(context_state_->use_vulkan_gr_context());
+ return std::make_unique<ExternalVkImageSkiaRepresentation>(manager, this,
+ tracker);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
new file mode 100644
index 00000000000..9ac5fc87f56
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
@@ -0,0 +1,108 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_BACKING_H_
+#define GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_BACKING_H_
+
+#include <memory>
+
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+
+namespace gpu {
+
+class ExternalVkImageBacking : public SharedImageBacking {
+ public:
+ ExternalVkImageBacking(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ SharedContextState* context_state,
+ VkImage image,
+ VkDeviceMemory memory,
+ size_t memory_size,
+ VkFormat vk_format);
+ ~ExternalVkImageBacking() override;
+
+ VkImage image() { return image_; }
+ VkDeviceMemory memory() { return memory_; }
+ size_t memory_size() { return memory_size_; }
+ VkFormat vk_format() { return vk_format_; }
+ SharedContextState* context_state() { return context_state_; }
+ VkDevice device() {
+ return context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ }
+ using SharedImageBacking::have_context;
+
+ VkSemaphore CreateExternalVkSemaphore();
+
+ // Notifies the backing that a Vulkan read will start. Return false if there
+ // is currently a write in progress. Otherwise, returns true and provides the
+ // latest semaphore (if any) that GL has signalled after ending its write
+ // access if it has not been waited on yet.
+ bool BeginVulkanReadAccess(VkSemaphore* gl_write_finished_semaphore);
+
+ // Notifies the backing that a Vulkan read has ended. The representation must
+ // provide a semaphore that has been signalled at the end of the read access.
+ void EndVulkanReadAccess(VkSemaphore vulkan_read_finished_semaphore);
+
+ // Notifies the backing that a GL read will start. Return false if there is
+ // currently any other read or write in progress. Otherwise, returns true and
+ // provides the latest semaphore (if any) that Vulkan has signalled after
+ // ending its read access if it has not been waited on yet.
+ bool BeginGlWriteAccess(VkSemaphore* vulkan_read_finished_semaphore);
+
+ // Notifies the backing that a GL write has ended. The representation must
+ // provide a semaphore that has been signalled at the end of the write access.
+ void EndGlWriteAccess(VkSemaphore gl_write_finished_semaphore);
+
+ // TODO(crbug.com/932214): Once Vulkan writes are possible, these methods
+ // should also take/provide semaphores. There should also be a
+ // BeginVulkanWriteAccess and EndVulkanWriteAccess.
+ bool BeginGlReadAccess();
+ void EndGlReadAccess();
+
+ // SharedImageBacking implementation.
+ bool IsCleared() const override;
+ void SetCleared() override;
+ void Update() override;
+ void Destroy() override;
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
+
+ protected:
+ // SharedImageBacking implementation.
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+
+ private:
+ SharedContextState* const context_state_;
+ VkImage image_;
+ VkDeviceMemory memory_;
+ VkSemaphore vulkan_read_finished_semaphore_ = VK_NULL_HANDLE;
+ VkSemaphore gl_write_finished_semaphore_ = VK_NULL_HANDLE;
+ size_t memory_size_;
+ bool is_cleared_ = false;
+ VkFormat vk_format_;
+ bool is_write_in_progress_ = false;
+ uint32_t reads_in_progress_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalVkImageBacking);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_BACKING_H_
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory.cc b/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
new file mode 100644
index 00000000000..08088e84fee
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
@@ -0,0 +1,212 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/external_vk_image_factory.h"
+
+#include <unistd.h>
+
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/service/external_vk_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/vulkan/vulkan_command_buffer.h"
+#include "gpu/vulkan/vulkan_command_pool.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_implementation.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
+
+namespace gpu {
+
+ExternalVkImageFactory::ExternalVkImageFactory(
+ SharedContextState* context_state)
+ : context_state_(context_state) {}
+
+ExternalVkImageFactory::~ExternalVkImageFactory() {
+ if (command_pool_) {
+ command_pool_->Destroy();
+ command_pool_.reset();
+ }
+}
+
+std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ VkDevice device = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+
+ VkFormat vk_format = ToVkFormat(format);
+ VkResult result;
+ VkImage image;
+ result = CreateExternalVkImage(vk_format, size, &image);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "Failed to create external VkImage: " << result;
+ return nullptr;
+ }
+
+ VkMemoryRequirements requirements;
+ vkGetImageMemoryRequirements(device, image, &requirements);
+
+ if (!requirements.memoryTypeBits) {
+ LOG(ERROR) << "Unable to find appropriate memory type for external VkImage";
+ vkDestroyImage(device, image, nullptr);
+ return nullptr;
+ }
+
+ constexpr uint32_t kInvalidTypeIndex = 32;
+ uint32_t type_index = kInvalidTypeIndex;
+ for (int i = 0; i < 32; i++) {
+ if ((1u << i) & requirements.memoryTypeBits) {
+ type_index = i;
+ break;
+ }
+ }
+ DCHECK_NE(kInvalidTypeIndex, type_index);
+
+ VkExportMemoryAllocateInfoKHR external_info;
+ external_info.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
+ external_info.pNext = nullptr;
+ external_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+ VkMemoryAllocateInfo mem_alloc_info;
+ mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ mem_alloc_info.pNext = &external_info;
+ mem_alloc_info.allocationSize = requirements.size;
+ mem_alloc_info.memoryTypeIndex = type_index;
+
+ VkDeviceMemory memory;
+ // TODO(crbug.com/932286): Allocating a separate piece of memory for every
+ // VkImage might have too much overhead. It is recommended that one large
+ // VkDeviceMemory be sub-allocated to multiple VkImages instead.
+ result = vkAllocateMemory(device, &mem_alloc_info, nullptr, &memory);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "Failed to allocate memory for external VkImage: " << result;
+ vkDestroyImage(device, image, nullptr);
+ return nullptr;
+ }
+
+ result = vkBindImageMemory(device, image, memory, 0);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "Failed to bind memory to external VkImage: " << result;
+ vkFreeMemory(device, memory, nullptr);
+ vkDestroyImage(device, image, nullptr);
+ return nullptr;
+ }
+
+ TransitionToColorAttachment(image);
+
+ return std::make_unique<ExternalVkImageBacking>(
+ mailbox, format, size, color_space, usage, context_state_, image, memory,
+ requirements.size, vk_format);
+}
+
+std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ NOTIMPLEMENTED();
+ return nullptr;
+}
+
+std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ // GpuMemoryBuffers are not supported on Linux.
+ NOTREACHED();
+ return nullptr;
+}
+
+VkResult ExternalVkImageFactory::CreateExternalVkImage(VkFormat format,
+ const gfx::Size& size,
+ VkImage* image) {
+ VkDevice device = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+
+ VkExternalMemoryImageCreateInfoKHR external_info;
+ external_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR;
+ external_info.pNext = nullptr;
+ external_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+ VkImageCreateInfo create_info;
+ create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ create_info.pNext = &external_info;
+ create_info.flags = 0;
+ create_info.imageType = VK_IMAGE_TYPE_2D;
+ create_info.format = format;
+ create_info.extent = {size.width(), size.height(), 1};
+ create_info.mipLevels = 1;
+ create_info.arrayLayers = 1;
+ create_info.samples = VK_SAMPLE_COUNT_1_BIT;
+ create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
+ create_info.usage =
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+ create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ create_info.queueFamilyIndexCount = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanQueueIndex();
+ create_info.pQueueFamilyIndices = nullptr;
+ create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ return vkCreateImage(device, &create_info, nullptr, image);
+}
+
+void ExternalVkImageFactory::TransitionToColorAttachment(VkImage image) {
+ if (!command_pool_) {
+ command_pool_ = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->CreateCommandPool();
+ }
+ std::unique_ptr<VulkanCommandBuffer> command_buffer =
+ command_pool_->CreatePrimaryCommandBuffer();
+ CHECK(command_buffer->Initialize());
+ {
+ ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
+ VkImageMemoryBarrier image_memory_barrier;
+ image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ image_memory_barrier.pNext = nullptr;
+ image_memory_barrier.srcAccessMask = 0;
+ image_memory_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ image_memory_barrier.image = image;
+ image_memory_barrier.subresourceRange.aspectMask =
+ VK_IMAGE_ASPECT_COLOR_BIT;
+ image_memory_barrier.subresourceRange.baseMipLevel = 0;
+ image_memory_barrier.subresourceRange.levelCount = 1;
+ image_memory_barrier.subresourceRange.baseArrayLayer = 0;
+ image_memory_barrier.subresourceRange.layerCount = 1;
+ vkCmdPipelineBarrier(recorder.handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0,
+ nullptr, 0, nullptr, 1, &image_memory_barrier);
+ }
+ command_buffer->Submit(0, nullptr, 0, nullptr);
+ // TODO(crbug.com/932260): Remove blocking call to VkQueueWaitIdle once we
+ // have a better approach for determining when |command_buffer| is safe to
+ // destroy.
+ vkQueueWaitIdle(context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanQueue());
+ command_buffer->Destroy();
+ command_buffer.reset();
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory.h b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
new file mode 100644
index 00000000000..accad99c61b
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
@@ -0,0 +1,66 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_FACTORY_H_
+#define GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_FACTORY_H_
+
+#include <vulkan/vulkan.h>
+#include <memory>
+
+#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+
+namespace gpu {
+class SharedContextState;
+class VulkanCommandPool;
+
+// This class is the SharedImageBackingFactory that is used on Linux when
+// Vulkan/GL interoperability is required. The created backing is a VkImage that
+// can be exported out of Vulkan and be used in GL. Synchronization between
+// Vulkan and GL is done using VkSemaphores that are created with special flags
+// that allow it to be exported out and shared with GL.
+class ExternalVkImageFactory : public SharedImageBackingFactory {
+ public:
+ ExternalVkImageFactory(SharedContextState* context_state);
+ ~ExternalVkImageFactory() override;
+
+ // SharedImageBackingFactory implementation.
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+
+ private:
+ VkResult CreateExternalVkImage(VkFormat format,
+ const gfx::Size& size,
+ VkImage* image);
+
+ void TransitionToColorAttachment(VkImage image);
+
+ SharedContextState* const context_state_;
+ std::unique_ptr<VulkanCommandPool> command_pool_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalVkImageFactory);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_FACTORY_H_
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
new file mode 100644
index 00000000000..f906582c2b3
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
@@ -0,0 +1,164 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
+
+#include "gpu/vulkan/vulkan_function_pointers.h"
+
+#define GL_LAYOUT_COLOR_ATTACHMENT_EXT 0x958E
+#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586
+
+namespace gpu {
+
+ExternalVkImageGlRepresentation::ExternalVkImageGlRepresentation(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture,
+ GLuint texture_service_id)
+ : SharedImageRepresentationGLTexture(manager, backing, tracker),
+ texture_(texture),
+ texture_service_id_(texture_service_id) {}
+
+ExternalVkImageGlRepresentation::~ExternalVkImageGlRepresentation() {
+ texture_->RemoveLightweightRef(backing_impl()->have_context());
+}
+
+gles2::Texture* ExternalVkImageGlRepresentation::GetTexture() {
+ return texture_;
+}
+
+bool ExternalVkImageGlRepresentation::BeginAccess(GLenum mode) {
+ // There should not be multiple accesses in progress on the same
+ // representation.
+ if (current_access_mode_) {
+ LOG(ERROR) << "BeginAccess called on ExternalVkImageGlRepresentation before"
+ << " the previous access ended.";
+ return false;
+ }
+
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ // If there is a write in progress, we can't start a read.
+ if (!backing_impl()->BeginGlReadAccess())
+ return false;
+ current_access_mode_ = mode;
+ // In reading mode, there is no need to wait on a semaphore because Vulkan
+ // never writes into the backing.
+ // TODO(crbug.com/932214): Implement synchronization when Vulkan can also
+ // write into the backing.
+ return true;
+ }
+
+ DCHECK_EQ(static_cast<GLenum>(GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM),
+ mode);
+
+ // See if it is possible to begin writing, i.e. there is no other read or
+ // write in progress. If so, take the latest semaphore that Vulkan signalled
+ // after reading.
+ VkSemaphore vulkan_read_finished_semaphore;
+ if (!backing_impl()->BeginGlWriteAccess(&vulkan_read_finished_semaphore))
+ return false;
+
+ if (vulkan_read_finished_semaphore != VK_NULL_HANDLE) {
+ GLuint gl_semaphore =
+ ImportVkSemaphoreIntoGL(vulkan_read_finished_semaphore);
+ if (!gl_semaphore) {
+ // TODO(crbug.com/932260): This call is safe because we previously called
+ // vkQueueWaitIdle in ExternalVkImageSkiaRepresentation::EndReadAccess.
+ // However, vkQueueWaitIdle is a blocking call and should eventually be
+ // replaced with better alternatives.
+ vkDestroySemaphore(backing_impl()->device(),
+ vulkan_read_finished_semaphore, nullptr);
+ return false;
+ }
+ GLenum src_layout = GL_LAYOUT_COLOR_ATTACHMENT_EXT;
+ api()->glWaitSemaphoreEXTFn(gl_semaphore, 0, nullptr, 1,
+ &texture_service_id_, &src_layout);
+ api()->glDeleteSemaphoresEXTFn(1, &gl_semaphore);
+ // TODO(crbug.com/932260): This call is safe because we previously called
+ // vkQueueWaitIdle in ExternalVkImageSkiaRepresentation::EndReadAccess.
+ vkDestroySemaphore(backing_impl()->device(), vulkan_read_finished_semaphore,
+ nullptr);
+ }
+ current_access_mode_ = mode;
+ return true;
+}
+
+void ExternalVkImageGlRepresentation::EndAccess() {
+ if (!current_access_mode_) {
+ // TODO(crbug.com/933452): We should be able to handle this failure more
+ // gracefully rather than shutting down the whole process.
+ LOG(FATAL) << "EndAccess called on ExternalVkImageGlRepresentation before "
+ << "BeginAccess";
+ return;
+ }
+
+ if (current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ // Since Vulkan never writes into the backing, there is no need to signal
+ // that GL is done reading.
+ // TODO(crbug.com/932214): Implement synchronization when Vulkan can also
+ // write into the backing.
+ backing_impl()->EndGlReadAccess();
+ current_access_mode_ = 0;
+ return;
+ }
+
+ DCHECK_EQ(static_cast<GLenum>(GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM),
+ current_access_mode_);
+ current_access_mode_ = 0;
+
+ VkSemaphore gl_write_finished_semaphore =
+ backing_impl()->CreateExternalVkSemaphore();
+ if (!gl_write_finished_semaphore) {
+ // TODO(crbug.com/933452): We should be able to handle this failure more
+ // gracefully rather than shutting down the whole process.
+ LOG(FATAL) << "Unable to create a VkSemaphore in "
+ << "ExternalVkImageGlRepresentation for synchronization with "
+ << "Vulkan";
+ return;
+ }
+ GLuint gl_semaphore = ImportVkSemaphoreIntoGL(gl_write_finished_semaphore);
+ if (!gl_semaphore) {
+ // It is safe to destroy the VkSemaphore here because it has not been sent
+ // to a VkQueue before.
+ vkDestroySemaphore(backing_impl()->device(), gl_write_finished_semaphore,
+ nullptr);
+ // TODO(crbug.com/933452): We should be able to handle this failure more
+ // gracefully rather than shutting down the whole process.
+ LOG(FATAL) << "Unable to export VkSemaphore into GL in "
+ << "ExternalVkImageGlRepresentation for synchronization with "
+ << "Vulkan";
+ return;
+ }
+ GLenum dst_layout = GL_LAYOUT_COLOR_ATTACHMENT_EXT;
+ api()->glSignalSemaphoreEXTFn(gl_semaphore, 0, nullptr, 1,
+ &texture_service_id_, &dst_layout);
+ api()->glDeleteSemaphoresEXTFn(1, &gl_semaphore);
+ backing_impl()->EndGlWriteAccess(gl_write_finished_semaphore);
+}
+
+GLuint ExternalVkImageGlRepresentation::ImportVkSemaphoreIntoGL(
+ VkSemaphore semaphore) {
+ VkSemaphoreGetFdInfoKHR info;
+ info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
+ info.pNext = nullptr;
+ info.semaphore = semaphore;
+ info.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+ int fd = -1;
+ bool result = vkGetSemaphoreFdKHR(backing_impl()->device(), &info, &fd);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "vkGetSemaphoreFdKHR failed : " << result;
+ return 0;
+ }
+
+ gl::GLApi* api = gl::g_current_gl_context;
+ GLuint gl_semaphore;
+ api->glGenSemaphoresEXTFn(1, &gl_semaphore);
+ api->glImportSemaphoreFdEXTFn(gl_semaphore, GL_HANDLE_TYPE_OPAQUE_FD_EXT, fd);
+
+ return gl_semaphore;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.h b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.h
new file mode 100644
index 00000000000..580cbbe3b05
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.h
@@ -0,0 +1,46 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_GL_REPRESENTATION_H_
+#define GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_GL_REPRESENTATION_H_
+
+#include "gpu/command_buffer/service/external_vk_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+
+namespace gpu {
+
+class ExternalVkImageGlRepresentation
+ : public SharedImageRepresentationGLTexture {
+ public:
+ ExternalVkImageGlRepresentation(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture,
+ GLuint texture_service_id);
+ ~ExternalVkImageGlRepresentation() override;
+
+ // SharedImageRepresentationGLTexture implementation.
+ gles2::Texture* GetTexture() override;
+ bool BeginAccess(GLenum mode) override;
+ void EndAccess() override;
+
+ private:
+ ExternalVkImageBacking* backing_impl() {
+ return static_cast<ExternalVkImageBacking*>(backing());
+ }
+
+ gl::GLApi* api() { return gl::g_current_gl_context; }
+
+ GLuint ImportVkSemaphoreIntoGL(VkSemaphore semaphore);
+
+ gles2::Texture* texture_;
+ GLuint texture_service_id_;
+ GLenum current_access_mode_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalVkImageGlRepresentation);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_GL_REPRESENTATION_H_
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
new file mode 100644
index 00000000000..0a89358fa53
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
@@ -0,0 +1,142 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
+
+#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
+
+namespace gpu {
+
+ExternalVkImageSkiaRepresentation::ExternalVkImageSkiaRepresentation(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker)
+ : SharedImageRepresentationSkia(manager, backing, tracker) {}
+
+ExternalVkImageSkiaRepresentation::~ExternalVkImageSkiaRepresentation() =
+ default;
+
+sk_sp<SkSurface> ExternalVkImageSkiaRepresentation::BeginWriteAccess(
+ GrContext* gr_context,
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props) {
+ // TODO(crbug.com/932214): Implement this method.
+ NOTIMPLEMENTED();
+ return nullptr;
+}
+
+void ExternalVkImageSkiaRepresentation::EndWriteAccess(
+ sk_sp<SkSurface> surface) {
+ // TODO(crbug.com/932214): Implement this method.
+ NOTIMPLEMENTED();
+}
+
+sk_sp<SkPromiseImageTexture> ExternalVkImageSkiaRepresentation::BeginReadAccess(
+ SkSurface* sk_surface) {
+ DCHECK(!read_surface_) << "Previous read hasn't ended yet";
+
+ VkSemaphore gl_write_finished_semaphore;
+ // This can return false if another write access is currently in progress.
+ if (!backing_impl()->BeginVulkanReadAccess(&gl_write_finished_semaphore))
+ return nullptr;
+
+ if (gl_write_finished_semaphore != VK_NULL_HANDLE) {
+ // Submit wait semaphore to the queue. Note that Skia uses the same queue
+ // exposed by vk_queue(), so this will work due to Vulkan queue ordering.
+ if (!vk_implementation()->SubmitWaitSemaphore(
+ vk_queue(), gl_write_finished_semaphore)) {
+ LOG(ERROR) << "Failed to wait on semaphore";
+ // Since the semaphore was not actually sent to the queue, it is safe to
+ // destroy it here.
+ vkDestroySemaphore(vk_device(), gl_write_finished_semaphore, nullptr);
+ return nullptr;
+ }
+ }
+
+ // Create backend texture from the VkImage.
+ GrVkAlloc alloc = {backing_impl()->memory(), 0, backing_impl()->memory_size(),
+ 0};
+ GrVkImageInfo vk_info = {
+ backing_impl()->image(), alloc,
+ VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ backing_impl()->vk_format(), 1 /* levelCount */};
+ // TODO(bsalomon): Determine whether it makes sense to attempt to reuse this
+ // if the vk_info stays the same on subsequent calls.
+ auto promise_texture = SkPromiseImageTexture::Make(
+ GrBackendTexture(size().width(), size().height(), vk_info));
+
+ // Cache the sk surface in the representation so that it can be used in the
+ // EndReadAccess.
+ read_surface_ = sk_sp<SkSurface>(sk_surface);
+
+ // TODO(samans): This function should take a sk_sp<SkSurface> instead of a
+ // SkSurface* so we don't have to manually add a reference here.
+ read_surface_->ref();
+
+ // TODO(crbug.com/932260): Need to do better semaphore cleanup management.
+ // Waiting on device to be idle to delete the semaphore is costly. Instead use
+ // a fence to get signal when semaphore submission is done.
+ if (gl_write_finished_semaphore) {
+ VkResult result = vkQueueWaitIdle(vk_queue());
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "vkQueueWaitIdle failed: " << result;
+ return nullptr;
+ }
+ vkDestroySemaphore(vk_device(), gl_write_finished_semaphore, nullptr);
+ }
+
+ return promise_texture;
+}
+
+void ExternalVkImageSkiaRepresentation::EndReadAccess() {
+ DCHECK(read_surface_) << "EndReadAccess is called before BeginReadAccess";
+
+ VkSemaphore vulkan_write_finished_semaphore =
+ backing_impl()->CreateExternalVkSemaphore();
+
+ if (vulkan_write_finished_semaphore == VK_NULL_HANDLE) {
+ // TODO(crbug.com/933452): We should be able to handle this failure more
+ // gracefully rather than shutting down the whole process.
+ LOG(FATAL) << "Unable to create a VkSemaphore in "
+ << "ExternalVkImageSkiaRepresentation";
+ read_surface_ = nullptr;
+ return;
+ }
+
+ GrBackendSemaphore gr_semaphore;
+ gr_semaphore.initVulkan(vulkan_write_finished_semaphore);
+
+ // If GrSemaphoresSubmitted::kNo is returned, the GPU back-end did not
+ // create or add any semaphores to signal on the GPU; the caller should not
+ // instruct the GPU to wait on any of the semaphores.
+ if (read_surface_->flushAndSignalSemaphores(1, &gr_semaphore) ==
+ GrSemaphoresSubmitted::kNo) {
+ // TODO(crbug.com/933452): We should be able to handle this failure more
+ // gracefully rather than shutting down the whole process.
+ LOG(FATAL) << "Unable to signal VkSemaphore in "
+ "ExternalVkImageSkiaRepresentation";
+ vkDestroySemaphore(vk_device(), vulkan_write_finished_semaphore, nullptr);
+ read_surface_ = nullptr;
+ return;
+ }
+
+ read_surface_ = nullptr;
+
+ // Wait for the queue to get idle, so that when
+ // |vulkan_write_finished_semaphore| gets destroyed, we can guarantee it's not
+ // associated with any unexecuted command.
+ VkResult result = vkQueueWaitIdle(vk_queue());
+ if (result != VK_SUCCESS) {
+ // TODO(crbug.com/933452): We should be able to handle this failure more
+ // gracefully rather than shutting down the whole process.
+ LOG(FATAL) << "vkQueueWaitIdle failed: " << result;
+ return;
+ }
+
+ backing_impl()->EndVulkanReadAccess(vulkan_write_finished_semaphore);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.h b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.h
new file mode 100644
index 00000000000..6e7de12998a
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.h
@@ -0,0 +1,66 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_SKIA_REPRESENTATION_H_
+#define GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_SKIA_REPRESENTATION_H_
+
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/service/external_vk_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_implementation.h"
+
+namespace gpu {
+
+class ExternalVkImageSkiaRepresentation : public SharedImageRepresentationSkia {
+ public:
+ ExternalVkImageSkiaRepresentation(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker);
+ ~ExternalVkImageSkiaRepresentation() override;
+
+ // SharedImageRepresentationSkia implementation.
+ sk_sp<SkSurface> BeginWriteAccess(
+ GrContext* gr_context,
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props) override;
+ void EndWriteAccess(sk_sp<SkSurface> surface) override;
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(SkSurface* sk_surface) override;
+ void EndReadAccess() override;
+
+ private:
+ gpu::VulkanImplementation* vk_implementation() {
+ return backing_impl()
+ ->context_state()
+ ->vk_context_provider()
+ ->GetVulkanImplementation();
+ }
+
+ VkDevice vk_device() {
+ return backing_impl()
+ ->context_state()
+ ->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ }
+
+ VkQueue vk_queue() {
+ return backing_impl()
+ ->context_state()
+ ->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanQueue();
+ }
+
+ ExternalVkImageBacking* backing_impl() {
+ return static_cast<ExternalVkImageBacking*>(backing());
+ }
+
+ sk_sp<SkSurface> read_surface_ = nullptr;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_SKIA_REPRESENTATION_H_
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index ae7705b8d6a..0716c4f8f1a 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -262,15 +262,6 @@ void FeatureInfo::Initialize(ContextType context_type,
disallowed_features_ = disallowed_features;
context_type_ = context_type;
is_passthrough_cmd_decoder_ = is_passthrough_cmd_decoder;
- switch (context_type) {
- case CONTEXT_TYPE_WEBGL1:
- case CONTEXT_TYPE_OPENGLES2:
- break;
- default:
- // https://crbug.com/826509
- workarounds_.use_client_side_arrays_for_stream_buffers = false;
- break;
- }
InitializeFeatures();
initialized_ = true;
}
@@ -335,6 +326,13 @@ void FeatureInfo::EnableCHROMIUMTextureStorageImage() {
}
}
+void FeatureInfo::EnableEXTFloatBlend() {
+ if (!feature_flags_.ext_float_blend) {
+ AddExtensionString("GL_EXT_float_blend");
+ feature_flags_.ext_float_blend = true;
+ }
+}
+
void FeatureInfo::EnableEXTColorBufferFloat() {
if (!ext_color_buffer_float_available_)
return;
@@ -1562,6 +1560,16 @@ void FeatureInfo::InitializeFeatures() {
AddExtensionString("GL_WEBGL_multi_draw_instanced");
}
}
+
+ if (gfx::HasExtension(extensions, "GL_NV_internalformat_sample_query")) {
+ feature_flags_.nv_internalformat_sample_query = true;
+ }
+
+ if (gfx::HasExtension(extensions,
+ "GL_AMD_framebuffer_multisample_advanced")) {
+ feature_flags_.amd_framebuffer_multisample_advanced = true;
+ AddExtensionString("GL_AMD_framebuffer_multisample_advanced");
+ }
}
void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
@@ -1665,6 +1673,14 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
}
}
+ // Assume all desktop (!gl_version_info_->is_es) supports float blend
+ if (!gl_version_info_->is_es ||
+ gfx::HasExtension(extensions, "GL_EXT_float_blend")) {
+ if (!disallowed_features_.ext_float_blend) {
+ EnableEXTFloatBlend();
+ }
+ }
+
if (may_enable_chromium_color_buffer_float &&
!had_native_chromium_color_buffer_float_ext) {
static_assert(GL_RGBA32F_ARB == GL_RGBA32F &&
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index 922eb929adf..c5ceec10fac 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -143,6 +143,9 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool khr_robust_buffer_access_behavior = false;
bool webgl_multi_draw = false;
bool webgl_multi_draw_instanced = false;
+ bool nv_internalformat_sample_query = false;
+ bool amd_framebuffer_multisample_advanced = false;
+ bool ext_float_blend = false;
};
FeatureInfo();
@@ -201,6 +204,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
void EnableCHROMIUMTextureStorageImage();
void EnableCHROMIUMColorBufferFloatRGBA();
void EnableCHROMIUMColorBufferFloatRGB();
+ void EnableEXTFloatBlend();
void EnableEXTColorBufferFloat();
void EnableEXTColorBufferHalfFloat();
void EnableOESTextureFloatLinear();
diff --git a/chromium/gpu/command_buffer/service/feature_info_unittest.cc b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
index af214f27ebc..ca61ad0c7c9 100644
--- a/chromium/gpu/command_buffer/service/feature_info_unittest.cc
+++ b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
@@ -173,9 +173,9 @@ static const MockedGLVersionKind kGLVersionKinds[] = {
ES3_on_Version3_2Compatibility
};
-INSTANTIATE_TEST_CASE_P(Service,
- FeatureInfoTest,
- ::testing::ValuesIn(kGLVersionKinds));
+INSTANTIATE_TEST_SUITE_P(Service,
+ FeatureInfoTest,
+ ::testing::ValuesIn(kGLVersionKinds));
TEST_P(FeatureInfoTest, Basic) {
SetupWithoutInit();
@@ -1440,23 +1440,6 @@ TEST_P(FeatureInfoTest, InitializeOES_element_index_uint) {
EXPECT_TRUE(info_->validators()->index_type.IsValid(GL_UNSIGNED_INT));
}
-TEST_P(FeatureInfoTest, InitializeVAOsWithClientSideArrays) {
- gpu::GpuDriverBugWorkarounds workarounds;
- workarounds.use_client_side_arrays_for_stream_buffers = true;
- SetupInitExpectationsWithWorkarounds("GL_OES_vertex_array_object",
- workarounds);
- if (GetContextType() == CONTEXT_TYPE_OPENGLES2) {
- EXPECT_TRUE(info_->workarounds().use_client_side_arrays_for_stream_buffers);
- EXPECT_FALSE(info_->feature_flags().native_vertex_array_object);
- } else { // CONTEXT_TYPE_OPENGLES3
- // We only turn on use_client_side_arrays_for_stream_buffers on ES2
- // contexts. See https://crbug.com/826509.
- EXPECT_FALSE(
- info_->workarounds().use_client_side_arrays_for_stream_buffers);
- EXPECT_TRUE(info_->feature_flags().native_vertex_array_object);
- }
-}
-
TEST_P(FeatureInfoTest, InitializeEXT_blend_minmax) {
SetupInitExpectations("GL_EXT_blend_minmax");
EXPECT_TRUE(gfx::HasExtension(info_->extensions(), "GL_EXT_blend_minmax"));
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager.cc b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
index e3f79cf764a..dab94578dd5 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
@@ -375,14 +375,14 @@ void FramebufferManager::CreateFramebuffer(
DCHECK(result.second);
}
-Framebuffer::Framebuffer(
- FramebufferManager* manager, GLuint service_id)
+Framebuffer::Framebuffer(FramebufferManager* manager, GLuint service_id)
: manager_(manager),
deleted_(false),
service_id_(service_id),
has_been_bound_(false),
framebuffer_complete_state_count_id_(0),
draw_buffer_type_mask_(0u),
+ draw_buffer_float32_mask_(0u),
draw_buffer_bound_mask_(0u),
adjusted_draw_buffer_bound_mask_(0u),
read_buffer_(GL_COLOR_ATTACHMENT0) {
@@ -639,6 +639,10 @@ bool Framebuffer::HasStencilAttachment() const {
return attachments_.find(GL_STENCIL_ATTACHMENT) != attachments_.end();
}
+bool Framebuffer::HasActiveFloat32ColorAttachment() const {
+ return draw_buffer_float32_mask_ != 0u;
+}
+
GLenum Framebuffer::GetReadBufferInternalFormat() const {
if (read_buffer_ == GL_NONE)
return 0;
@@ -943,6 +947,7 @@ void Framebuffer::UnbindTexture(
void Framebuffer::UpdateDrawBufferMasks() {
draw_buffer_type_mask_ = 0u;
+ draw_buffer_float32_mask_ = 0u;
draw_buffer_bound_mask_ = 0u;
for (uint32_t index = 0; index < manager_->max_color_attachments_; ++index) {
GLenum draw_buffer = draw_buffers_[index];
@@ -964,6 +969,10 @@ void Framebuffer::UpdateDrawBufferMasks() {
size_t shift_bits = index * 2;
draw_buffer_type_mask_ |= base_type << shift_bits;
draw_buffer_bound_mask_ |= 0x3 << shift_bits;
+
+ if (GLES2Util::IsFloat32Format(internal_format)) {
+ draw_buffer_float32_mask_ |= 0x3 << shift_bits;
+ }
}
}
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager.h b/chromium/gpu/command_buffer/service/framebuffer_manager.h
index 19303807c2b..f48538ec311 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager.h
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager.h
@@ -155,6 +155,7 @@ class GPU_GLES2_EXPORT Framebuffer : public base::RefCounted<Framebuffer> {
bool HasColorAttachment(int index) const;
bool HasDepthAttachment() const;
bool HasStencilAttachment() const;
+ bool HasActiveFloat32ColorAttachment() const;
GLenum GetDepthFormat() const;
GLenum GetStencilFormat() const;
GLenum GetDrawBufferInternalFormat() const;
@@ -295,6 +296,9 @@ class GPU_GLES2_EXPORT Framebuffer : public base::RefCounted<Framebuffer> {
// We have up to 16 draw buffers, each is encoded into 2 bits, total 32 bits:
// the lowest 2 bits for draw buffer 0, the highest 2 bits for draw buffer 15.
uint32_t draw_buffer_type_mask_;
+ // Same layout as above, 0x03 if it's 32bit float color attachment, 0x00 if
+ // not
+ uint32_t draw_buffer_float32_mask_;
// Same layout as above, 2 bits per draw buffer, 0x03 if a draw buffer has a
// bound image, 0x00 if not.
uint32_t draw_buffer_bound_mask_;
diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.cc b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.cc
index ad14c9dbeb0..4abec36f840 100644
--- a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.cc
+++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.cc
@@ -16,10 +16,15 @@ gfx::Size GLStreamTextureImageStub::GetSize() {
unsigned GLStreamTextureImageStub::GetInternalFormat() {
return 0;
}
+GLStreamTextureImageStub::BindOrCopy
+GLStreamTextureImageStub::ShouldBindOrCopy() {
+ return BIND;
+}
bool GLStreamTextureImageStub::BindTexImage(unsigned target) {
return false;
}
bool GLStreamTextureImageStub::CopyTexImage(unsigned target) {
+ NOTREACHED();
return false;
}
bool GLStreamTextureImageStub::CopyTexSubImage(unsigned target,
diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h
index bf55d29fe46..8de116f9a1b 100644
--- a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h
+++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h
@@ -17,6 +17,7 @@ class GLStreamTextureImageStub : public GLStreamTextureImage {
// Overridden from GLImage:
gfx::Size GetSize() override;
unsigned GetInternalFormat() override;
+ BindOrCopy ShouldBindOrCopy() override;
bool BindTexImage(unsigned target) override;
void ReleaseTexImage(unsigned target) override {}
bool CopyTexImage(unsigned target) override;
diff --git a/chromium/gpu/command_buffer/service/gl_surface_mock.h b/chromium/gpu/command_buffer/service/gl_surface_mock.h
index 948bc59eb74..7812ab7fe4f 100644
--- a/chromium/gpu/command_buffer/service/gl_surface_mock.h
+++ b/chromium/gpu/command_buffer/service/gl_surface_mock.h
@@ -24,14 +24,13 @@ class GLSurfaceMock : public gl::GLSurface {
ColorSpace color_space,
bool alpha));
MOCK_METHOD0(IsOffscreen, bool());
- MOCK_METHOD1(SwapBuffers,
- gfx::SwapResult(const PresentationCallback& callback));
+ MOCK_METHOD1(SwapBuffers, gfx::SwapResult(PresentationCallback callback));
MOCK_METHOD5(PostSubBuffer,
gfx::SwapResult(int x,
int y,
int width,
int height,
- const PresentationCallback& callback));
+ PresentationCallback callback));
MOCK_METHOD0(SupportsPostSubBuffer, bool());
MOCK_METHOD0(GetSize, gfx::Size());
MOCK_METHOD0(GetHandle, void*());
diff --git a/chromium/gpu/command_buffer/service/gl_utils.cc b/chromium/gpu/command_buffer/service/gl_utils.cc
index 39428c16fb2..25def54d770 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.cc
+++ b/chromium/gpu/command_buffer/service/gl_utils.cc
@@ -903,6 +903,19 @@ CopyTextureMethod GetCopyTextureCHROMIUMMethod(const FeatureInfo* feature_info,
break;
}
+ // CopyTex{Sub}Image2D() from GL_RGB10_A2 has issues on some Android chipsets.
+ if (source_internal_format == GL_RGB10_A2) {
+ if (feature_info->workarounds().disable_copy_tex_image_2d_rgb10_a2_tegra) {
+ if (dest_internal_format == GL_RGBA4)
+ return CopyTextureMethod::DIRECT_DRAW;
+ return CopyTextureMethod::DRAW_AND_COPY;
+ }
+ if (feature_info->workarounds().disable_copy_tex_image_2d_rgb10_a2_adreno &&
+ dest_internal_format != GL_RGB10_A2) {
+ return CopyTextureMethod::DRAW_AND_COPY;
+ }
+ }
+
// CopyTexImage* should not allow internalformat of GL_BGRA_EXT and
// GL_BGRA8_EXT. https://crbug.com/663086.
bool copy_tex_image_format_valid =
@@ -1018,7 +1031,8 @@ bool ValidateCopyTextureCHROMIUMInternalFormats(const FeatureInfo* feature_info,
source_internal_format == GL_BGRA8_EXT ||
source_internal_format == GL_RGB_YCBCR_420V_CHROMIUM ||
source_internal_format == GL_RGB_YCBCR_422_CHROMIUM ||
- source_internal_format == GL_R16_EXT;
+ source_internal_format == GL_R16_EXT ||
+ source_internal_format == GL_RGB10_A2;
if (!valid_source_format) {
*output_error_msg = "invalid source internal format " +
GLES2Util::GetStringEnum(source_internal_format);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
index 939bf436f71..a58de9551ac 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -43,6 +43,7 @@ enum {
S_FORMAT_RGB_YCBCR_420V_CHROMIUM,
S_FORMAT_RGB_YCBCR_422_CHROMIUM,
S_FORMAT_COMPRESSED,
+ S_FORMAT_RGB10_A2,
NUM_S_FORMAT
};
@@ -185,8 +186,12 @@ ShaderId GetFragmentShaderId(bool premultiply_alpha,
case GL_ETC1_RGB8_OES:
sourceFormatIndex = S_FORMAT_COMPRESSED;
break;
+ case GL_RGB10_A2:
+ sourceFormatIndex = S_FORMAT_RGB10_A2;
+ break;
default:
- NOTREACHED();
+ NOTREACHED() << "Invalid source format "
+ << gl::GLEnums::GetStringEnum(source_format);
break;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index a3caf17ca6a..87220701d86 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -19,6 +19,8 @@
#include <unordered_map>
#include <utility>
+#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/containers/flat_set.h"
@@ -603,6 +605,11 @@ Outputter* GLES2Decoder::outputter() const {
return outputter_;
}
+int GLES2Decoder::GetRasterDecoderId() const {
+ NOTREACHED();
+ return -1;
+}
+
// This class implements GLES2Decoder so we don't have to expose all the GLES2
// cmd stuff to outside this class.
class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
@@ -785,6 +792,13 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLsizei width,
GLsizei height,
ForcedMultisampleMode mode);
+ void RenderbufferStorageMultisampleHelperAMD(GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ ForcedMultisampleMode mode);
bool RegenerateRenderbufferIfNeeded(Renderbuffer* renderbuffer);
PathManager* path_manager() { return group_->path_manager(); }
@@ -828,6 +842,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
bool InitializeShaderTranslator();
void DestroyShaderTranslator();
+ GLint ComputeMaxSamples();
void UpdateCapabilities();
// Helpers for the glGen and glDelete functions.
@@ -974,6 +989,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Get the size (in pixels) of the currently bound frame buffer (either FBO
// or regular back buffer).
gfx::Size GetBoundReadFramebufferSize();
+ gfx::Size GetBoundDrawFramebufferSize();
// Get the service side ID for the bound read framebuffer.
// If it's back buffer, 0 is returned.
@@ -1164,7 +1180,8 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
void DoCreateAndConsumeTextureINTERNAL(GLuint client_id,
const volatile GLbyte* key);
void DoCreateAndTexStorage2DSharedImageINTERNAL(GLuint client_id,
- const volatile GLbyte* data);
+ const volatile GLbyte* data,
+ GLenum internal_format);
void DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id, GLenum mode);
void DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id);
void DoApplyScreenSpaceAntialiasingCHROMIUM();
@@ -1498,7 +1515,8 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLenum gl_error,
const char* func_name);
- bool CheckBoundDrawFramebufferValid(const char* func_name);
+ bool CheckBoundDrawFramebufferValid(const char* func_name,
+ bool check_float_blending = false);
// Generates |gl_error| if the bound read fbo is incomplete.
bool CheckBoundReadFramebufferValid(const char* func_name, GLenum gl_error);
// This is only used by DoBlitFramebufferCHROMIUM which operates read/draw
@@ -1921,6 +1939,14 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLenum target, GLsizei samples, GLenum internalformat,
GLsizei width, GLsizei height);
+ // Handler for glRenderbufferStorageMultisampleAdvancedAMD.
+ void DoRenderbufferStorageMultisampleAdvancedAMD(GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height);
+
// Handler for glRenderbufferStorageMultisampleEXT
// (multisampled_render_to_texture).
void DoRenderbufferStorageMultisampleEXT(
@@ -1930,12 +1956,23 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Wrapper for glFenceSync.
GLsync DoFenceSync(GLenum condition, GLbitfield flags);
+ GLsizei InternalFormatSampleCountsHelper(
+ GLenum target,
+ GLenum format,
+ std::vector<GLint>* out_sample_counts);
+
// Common validation for multisample extensions.
bool ValidateRenderbufferStorageMultisample(GLsizei samples,
GLenum internalformat,
GLsizei width,
GLsizei height);
+ // validation for multisample AMD extension.
+ bool ValidateRenderbufferStorageMultisampleAMD(GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height);
// Verifies that the currently bound multisample renderbuffer is valid
// Very slow! Only done on platforms with driver bugs that return invalid
// buffers under memory pressure
@@ -3588,12 +3625,8 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
group_->max_transform_feedback_separate_attribs(), needs_emulation));
if (feature_info_->IsWebGL2OrES3Context()) {
- if (!feature_info_->IsES3Capable()) {
- Destroy(true);
- LOG(ERROR) << "ContextResult::kFatalFailure: "
- "ES3 is blacklisted/disabled/unsupported by driver.";
- return gpu::ContextResult::kFatalFailure;
- }
+ // Verified in ContextGroup.
+ DCHECK(feature_info_->IsES3Capable());
feature_info_->EnableES3Validators();
frag_depth_explicitly_enabled_ = true;
@@ -3614,7 +3647,8 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
state_.indexed_uniform_buffer_bindings =
base::MakeRefCounted<gles2::IndexedBufferBindingHost>(
group_->max_uniform_buffer_bindings(), GL_UNIFORM_BUFFER,
- needs_emulation);
+ needs_emulation,
+ workarounds().round_down_uniform_bind_buffer_range_size);
state_.indexed_uniform_buffer_bindings->SetIsBound(true);
state_.InitGenericAttribs(group_->max_vertex_attribs());
@@ -4154,7 +4188,7 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
if (feature_info_->feature_flags().multisampled_render_to_texture ||
feature_info_->feature_flags().chromium_framebuffer_multisample ||
feature_info_->IsWebGL2OrES3Context()) {
- DoGetIntegerv(GL_MAX_SAMPLES, &caps.max_samples, 1);
+ caps.max_samples = ComputeMaxSamples();
}
caps.num_stencil_bits = num_stencil_bits_;
@@ -4271,6 +4305,40 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
return caps;
}
+GLint GLES2DecoderImpl::ComputeMaxSamples() {
+ GLint max_samples = 0;
+ DoGetIntegerv(GL_MAX_SAMPLES, &max_samples, 1);
+
+ if (feature_info_->IsWebGLContext() &&
+ feature_info_->feature_flags().nv_internalformat_sample_query) {
+ std::vector<GLint> temp;
+
+ auto minWithSamplesForFormat = [&](GLenum internalformat) {
+ temp.clear();
+ InternalFormatSampleCountsHelper(GL_RENDERBUFFER, internalformat, &temp);
+ max_samples = std::min(max_samples, temp[0]);
+ };
+
+ // OpenGL ES 3.0.5, section 4.4.2.2: "Implementations must support creation
+ // of renderbuffers in these required formats with up to the value of
+ // MAX_SAMPLES multisamples, with the exception of signed and unsigned
+ // integer formats."
+
+ // OpenGL ES 3.0.5, section 3.8.3.1
+ minWithSamplesForFormat(GL_RGBA8);
+ minWithSamplesForFormat(GL_SRGB8_ALPHA8);
+ minWithSamplesForFormat(GL_RGB10_A2);
+ minWithSamplesForFormat(GL_RGBA4);
+ minWithSamplesForFormat(GL_RGB5_A1);
+ minWithSamplesForFormat(GL_RGB8);
+ minWithSamplesForFormat(GL_RGB565);
+ minWithSamplesForFormat(GL_RG8);
+ minWithSamplesForFormat(GL_R8);
+ }
+
+ return max_samples;
+}
+
void GLES2DecoderImpl::UpdateCapabilities() {
util_.set_num_compressed_texture_formats(
validators_->compressed_texture_format.GetValues().size());
@@ -4877,7 +4945,9 @@ bool GLES2DecoderImpl::CheckFramebufferValid(
return true;
}
-bool GLES2DecoderImpl::CheckBoundDrawFramebufferValid(const char* func_name) {
+bool GLES2DecoderImpl::CheckBoundDrawFramebufferValid(
+ const char* func_name,
+ bool check_float_blending) {
Framebuffer* framebuffer = GetBoundDrawFramebuffer();
bool valid = CheckFramebufferValid(
framebuffer, GetDrawFramebufferTarget(),
@@ -4885,6 +4955,19 @@ bool GLES2DecoderImpl::CheckBoundDrawFramebufferValid(const char* func_name) {
if (!valid)
return false;
+ if (check_float_blending) {
+ // only is true when called by DoMultiDrawArrays or DoMultiDrawElements
+ if (framebuffer && state_.GetEnabled(GL_BLEND) &&
+ !features().ext_float_blend) {
+ if (framebuffer->HasActiveFloat32ColorAttachment()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
+ "GL_BLEND with floating-point color attachments "
+ "requires the EXT_float_blend extension");
+ return false;
+ }
+ }
+ }
+
if (!SupportsSeparateFramebufferBinds())
OnUseFramebuffer();
@@ -4958,6 +5041,17 @@ gfx::Size GLES2DecoderImpl::GetBoundReadFramebufferSize() {
}
}
+gfx::Size GLES2DecoderImpl::GetBoundDrawFramebufferSize() {
+ Framebuffer* framebuffer = GetBoundDrawFramebuffer();
+ if (framebuffer) {
+ return framebuffer->GetFramebufferValidSize();
+ } else if (offscreen_target_frame_buffer_.get()) {
+ return offscreen_size_;
+ } else {
+ return surface_->GetSize();
+ }
+}
+
GLuint GLES2DecoderImpl::GetBoundReadFramebufferServiceId() {
Framebuffer* framebuffer = GetBoundReadFramebuffer();
if (framebuffer) {
@@ -8658,7 +8752,21 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
const char* func_name = "glBlitFramebufferCHROMIUM";
DCHECK(!ShouldDeferReads() && !ShouldDeferDraws());
- if (!CheckBoundFramebufferValid(func_name)) {
+ if (!CheckFramebufferValid(GetBoundDrawFramebuffer(),
+ GetDrawFramebufferTarget(),
+ GL_INVALID_FRAMEBUFFER_OPERATION, func_name)) {
+ return;
+ }
+
+ // We need to get this before checking if the read framebuffer is valid.
+ // Checking the read framebuffer may clear attachments which would mark the
+ // draw framebuffer as incomplete. Framebuffer::GetFramebufferValidSize()
+ // requires the framebuffer to be complete.
+ gfx::Size draw_size = GetBoundDrawFramebufferSize();
+
+ if (!CheckFramebufferValid(GetBoundReadFramebuffer(),
+ GetReadFramebufferTarget(),
+ GL_INVALID_FRAMEBUFFER_OPERATION, func_name)) {
return;
}
@@ -8875,52 +8983,161 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
if (workarounds().adjust_src_dst_region_for_blitframebuffer) {
gfx::Size read_size = GetBoundReadFramebufferSize();
- gfx::Rect src_bounds(0, 0, read_size.width(), read_size.height());
GLint src_x = srcX1 > srcX0 ? srcX0 : srcX1;
GLint src_y = srcY1 > srcY0 ? srcY0 : srcY1;
- GLuint src_width = 0, src_height = 0;
- if (!src_width_temp.Abs().AssignIfValid(&src_width))
- src_width = 0;
- if (!src_height_temp.Abs().AssignIfValid(&src_height))
- src_height = 0;
+ unsigned int src_width = base::checked_cast<unsigned int>(
+ src_width_temp.Abs().ValueOrDefault(0));
+ unsigned int src_height = base::checked_cast<unsigned int>(
+ src_height_temp.Abs().ValueOrDefault(0));
+
+ GLint dst_x = dstX1 > dstX0 ? dstX0 : dstX1;
+ GLint dst_y = dstY1 > dstY0 ? dstY0 : dstY1;
+ unsigned int dst_width = base::checked_cast<unsigned int>(
+ dst_width_temp.Abs().ValueOrDefault(0));
+ unsigned int dst_height = base::checked_cast<unsigned int>(
+ dst_height_temp.Abs().ValueOrDefault(0));
+
+ if (dst_width == 0 || src_width == 0 || dst_height == 0 ||
+ src_height == 0) {
+ return;
+ }
+ gfx::Rect src_bounds(0, 0, read_size.width(), read_size.height());
gfx::Rect src_region(src_x, src_y, src_width, src_height);
- if (!src_bounds.Contains(src_region) &&
- (src_width != 0) && (src_height != 0)) {
+
+ gfx::Rect dst_bounds(0, 0, draw_size.width(), draw_size.height());
+ gfx::Rect dst_region(dst_x, dst_y, dst_width, dst_height);
+
+ if (gfx::IntersectRects(dst_bounds, dst_region).IsEmpty()) {
+ return;
+ }
+
+ bool x_flipped = ((srcX1 > srcX0) && (dstX1 < dstX0)) ||
+ ((srcX1 < srcX0) && (dstX1 > dstX0));
+ bool y_flipped = ((srcY1 > srcY0) && (dstY1 < dstY0)) ||
+ ((srcY1 < srcY0) && (dstY1 > dstY0));
+
+ if (!dst_bounds.Contains(dst_region)) {
+ // dst_region is not within dst_bounds. We want to adjust it to a
+ // reasonable size. This is done by halving the dst_region until it is at
+ // most twice the size of the framebuffer. We cut it in half instead
+ // of arbitrarily shrinking it to fit so that we don't end up with
+ // non-power-of-two scale factors which could mess up pixel interpolation.
+ // Naively clipping the dst rect and then proportionally sizing the
+ // src rect yields incorrect results.
+
+ unsigned int dst_x_halvings = 0;
+ unsigned int dst_y_halvings = 0;
+ int dst_origin_x = dst_x;
+ int dst_origin_y = dst_y;
+
+ int dst_clipped_width = dst_region.width();
+ while (dst_clipped_width > 2 * dst_bounds.width()) {
+ dst_clipped_width = dst_clipped_width >> 1;
+ dst_x_halvings++;
+ }
+
+ int dst_clipped_height = dst_region.height();
+ while (dst_clipped_height > 2 * dst_bounds.height()) {
+ dst_clipped_height = dst_clipped_height >> 1;
+ dst_y_halvings++;
+ }
+
+ // Before this block, we check that the two rectangles intersect.
+ // Now, compute the location of a new region origin such that we use the
+ // scaled dimensions but the new region has the same intersection as the
+ // original region.
+
+ int left = dst_region.x();
+ int right = dst_region.right();
+ int top = dst_region.y();
+ int bottom = dst_region.bottom();
+
+ if (left >= 0 && left < dst_bounds.width()) {
+ // Left edge is in-bounds
+ dst_origin_x = dst_x;
+ } else if (right > 0 && right <= dst_bounds.width()) {
+ // Right edge is in-bounds
+ dst_origin_x = right - dst_clipped_width;
+ } else {
+ // Region completely spans bounds
+ dst_origin_x = dst_x;
+ }
+
+ if (top >= 0 && top < dst_bounds.height()) {
+ // Top edge is in-bounds
+ dst_origin_y = dst_y;
+ } else if (bottom > 0 && bottom <= dst_bounds.height()) {
+ // Bottom edge is in-bounds
+ dst_origin_y = bottom - dst_clipped_height;
+ } else {
+ // Region completely spans bounds
+ dst_origin_y = dst_y;
+ }
+
+ dst_region.SetRect(dst_origin_x, dst_origin_y, dst_clipped_width,
+ dst_clipped_height);
+
+ // Offsets from the bottom left corner of the original region to
+ // the bottom left corner of the clipped region.
+ // This value (after it is scaled) is the respective offset we will apply
+ // to the src origin.
+ base::CheckedNumeric<unsigned int> checked_xoffset(dst_region.x() -
+ dst_x);
+ base::CheckedNumeric<unsigned int> checked_yoffset(dst_region.y() -
+ dst_y);
+
+ // if X/Y is reversed, use the top/right out-of-bounds region to compute
+ // the origin offset instead of the left/bottom out-of-bounds region
+ if (x_flipped) {
+ checked_xoffset = (dst_x + dst_width - dst_region.right());
+ }
+ if (y_flipped) {
+ checked_yoffset = (dst_y + dst_height - dst_region.bottom());
+ }
+
+ // These offsets should never overflow.
+ unsigned int xoffset, yoffset;
+ if (!checked_xoffset.AssignIfValid(&xoffset) ||
+ !checked_yoffset.AssignIfValid(&yoffset)) {
+ NOTREACHED();
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_VALUE, func_name,
+ "the width or height of src or dst region overflowed");
+ return;
+ }
+
+ // Adjust the src region by the same factor
+ src_region.SetRect(src_x + (xoffset >> dst_x_halvings),
+ src_y + (yoffset >> dst_y_halvings),
+ src_region.width() >> dst_x_halvings,
+ src_region.height() >> dst_y_halvings);
+
+ // If the src was scaled to 0, set it to 1 so the src is non-empty.
+ if (src_region.width() == 0) {
+ src_region.set_width(1);
+ }
+ if (src_region.height() == 0) {
+ src_region.set_height(1);
+ }
+ }
+
+ if (!src_bounds.Contains(src_region)) {
// If pixels lying outside the read framebuffer, adjust src region
// and dst region to appropriate in-bounds regions respectively.
- src_bounds.Intersect(src_region);
- GLuint src_real_width = src_bounds.width();
- GLuint src_real_height = src_bounds.height();
- GLuint xoffset = src_bounds.x() - src_x;
- GLuint yoffset = src_bounds.y() - src_y;
+ src_region.Intersect(src_bounds);
+ GLuint src_real_width = src_region.width();
+ GLuint src_real_height = src_region.height();
+ GLuint xoffset = src_region.x() - src_x;
+ GLuint yoffset = src_region.y() - src_y;
// if X/Y is reversed, use the top/right out-of-bounds region for mapping
// to dst region, instead of left/bottom out-of-bounds region for mapping.
- if (((srcX1 > srcX0) && (dstX1 < dstX0)) ||
- ((srcX1 < srcX0) && (dstX1 > dstX0))) {
- xoffset = src_x + src_width - src_bounds.x() - src_bounds.width();
- }
- if (((srcY1 > srcY0) && (dstY1 < dstY0)) ||
- ((srcY1 < srcY0) && (dstY1 > dstY0))) {
- yoffset = src_y + src_height - src_bounds.y() - src_bounds.height();
+ if (x_flipped) {
+ xoffset = src_x + src_width - src_region.x() - src_region.width();
}
-
- GLint dst_x = dstX1 > dstX0 ? dstX0 : dstX1;
- GLint dst_y = dstY1 > dstY0 ? dstY0 : dstY1;
- base::CheckedNumeric<GLint> dst_width_temp = dstX1;
- dst_width_temp -= dstX0;
- base::CheckedNumeric<GLint> dst_height_temp = dstY1;
- dst_height_temp -= dstY0;
- GLuint dst_width = 0, dst_height = 0;
- if (!dst_width_temp.IsValid() || !dst_height_temp.IsValid()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
- "the width or height of dst region overflow");
- return;
+ if (y_flipped) {
+ yoffset = src_y + src_height - src_region.y() - src_region.height();
}
- if (!dst_width_temp.Abs().AssignIfValid(&dst_width))
- dst_width = 0;
- if (!dst_height_temp.Abs().AssignIfValid(&dst_height))
- dst_height = 0;
GLfloat dst_mapping_width =
static_cast<GLfloat>(src_real_width) * dst_width / src_width;
@@ -8941,21 +9158,22 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
GLuint dst_mapping_y1 =
std::round(dst_y + dst_mapping_yoffset + dst_mapping_height);
- // adjust the src region and dst region to fit the read framebuffer
- srcX0 = srcX0 < srcX1 ?
- src_bounds.x() : src_bounds.x() + src_bounds.width();
- srcY0 = srcY0 < srcY1 ?
- src_bounds.y() : src_bounds.y() + src_bounds.height();
- srcX1 = srcX0 < srcX1 ?
- src_bounds.x() + src_bounds.width() : src_bounds.x();
- srcY1 = srcY0 < srcY1 ?
- src_bounds.y() + src_bounds.height() : src_bounds.y();
-
- dstX0 = dstX0 < dstX1 ? dst_mapping_x0 : dst_mapping_x1;
- dstY0 = dstY0 < dstY1 ? dst_mapping_y0 : dst_mapping_y1;
- dstX1 = dstX0 < dstX1 ? dst_mapping_x1 : dst_mapping_x0;
- dstY1 = dstY0 < dstY1 ? dst_mapping_y1 : dst_mapping_y0;
+ dst_region.SetRect(dst_mapping_x0, dst_mapping_y0,
+ dst_mapping_x1 - dst_mapping_x0,
+ dst_mapping_y1 - dst_mapping_y0);
}
+
+ // Set the src and dst endpoints. If they were previously flipped,
+ // set them as flipped.
+ srcX0 = srcX0 < srcX1 ? src_region.x() : src_region.right();
+ srcY0 = srcY0 < srcY1 ? src_region.y() : src_region.bottom();
+ srcX1 = srcX0 < srcX1 ? src_region.right() : src_region.x();
+ srcY1 = srcY0 < srcY1 ? src_region.bottom() : src_region.y();
+
+ dstX0 = dstX0 < dstX1 ? dst_region.x() : dst_region.right();
+ dstY0 = dstY0 < dstY1 ? dst_region.y() : dst_region.bottom();
+ dstX1 = dstX0 < dstX1 ? dst_region.right() : dst_region.x();
+ dstY1 = dstY0 < dstY1 ? dst_region.bottom() : dst_region.y();
}
bool enable_srgb =
@@ -9091,6 +9309,18 @@ void GLES2DecoderImpl::RenderbufferStorageMultisampleHelper(
}
}
+void GLES2DecoderImpl::RenderbufferStorageMultisampleHelperAMD(
+ GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ ForcedMultisampleMode mode) {
+ api()->glRenderbufferStorageMultisampleAdvancedAMDFn(
+ target, samples, storageSamples, internal_format, width, height);
+}
+
bool GLES2DecoderImpl::RegenerateRenderbufferIfNeeded(
Renderbuffer* renderbuffer) {
if (!renderbuffer->RegenerateAndBindBackingObjectIfNeeded(workarounds())) {
@@ -9139,6 +9369,36 @@ bool GLES2DecoderImpl::ValidateRenderbufferStorageMultisample(
return true;
}
+bool GLES2DecoderImpl::ValidateRenderbufferStorageMultisampleAMD(
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ if (samples > renderbuffer_manager()->max_samples()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRenderbufferStorageMultisample",
+ "samples too large");
+ return false;
+ }
+
+ if (width > renderbuffer_manager()->max_renderbuffer_size() ||
+ height > renderbuffer_manager()->max_renderbuffer_size()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRenderbufferStorageMultisample",
+ "dimensions too large");
+ return false;
+ }
+
+ uint32_t estimated_size = 0;
+ if (!renderbuffer_manager()->ComputeEstimatedRenderbufferSize(
+ width, height, samples, internalformat, &estimated_size)) {
+ LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glRenderbufferStorageMultisample",
+ "dimensions too large");
+ return false;
+ }
+
+ return true;
+}
+
void GLES2DecoderImpl::DoRenderbufferStorageMultisampleCHROMIUM(
GLenum target, GLsizei samples, GLenum internalformat,
GLsizei width, GLsizei height) {
@@ -9180,6 +9440,51 @@ void GLES2DecoderImpl::DoRenderbufferStorageMultisampleCHROMIUM(
}
}
+void GLES2DecoderImpl::DoRenderbufferStorageMultisampleAdvancedAMD(
+ GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ Renderbuffer* renderbuffer = GetRenderbufferInfoForTarget(GL_RENDERBUFFER);
+ if (!renderbuffer) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "glRenderbufferStorageMultisampleAdvancedAMD",
+ "no renderbuffer bound");
+ return;
+ }
+
+ if (!ValidateRenderbufferStorageMultisampleAMD(
+ samples, storageSamples, internalformat, width, height)) {
+ return;
+ }
+
+ GLenum impl_format =
+ renderbuffer_manager()->InternalRenderbufferFormatToImplFormat(
+ internalformat);
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(
+ "glRenderbufferStorageMultisampleAdvancedAMD");
+ RenderbufferStorageMultisampleHelperAMD(
+ target, samples, storageSamples, impl_format, width, height, kDoNotForce);
+ GLenum error =
+ LOCAL_PEEK_GL_ERROR("glRenderbufferStorageMultisampleAdvancedAMD");
+ if (error == GL_NO_ERROR) {
+ if (workarounds().validate_multisample_buffer_allocation) {
+ if (!VerifyMultisampleRenderbufferIntegrity(renderbuffer->service_id(),
+ impl_format)) {
+ LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY,
+ "glRenderbufferStorageMultisampleAdvancedAMD",
+ "out of memory");
+ return;
+ }
+ }
+
+ renderbuffer_manager()->SetInfoAndInvalidate(renderbuffer, samples,
+ internalformat, width, height);
+ }
+}
+
// This is the handler for multisampled_render_to_texture extensions.
void GLES2DecoderImpl::DoRenderbufferStorageMultisampleEXT(
GLenum target, GLsizei samples, GLenum internalformat,
@@ -10338,7 +10643,9 @@ bool GLES2DecoderImpl::DoBindOrCopyTexImageIfNeeded(Texture* texture,
if (texture_unit)
api()->glActiveTextureFn(texture_unit);
api()->glBindTextureFn(textarget, texture->service_id());
- if (image->BindTexImage(textarget)) {
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ bool rv = image->BindTexImage(textarget);
+ DCHECK(rv) << "BindTexImage() failed";
image_state = Texture::BOUND;
} else {
DoCopyTexImage(texture, textarget, image);
@@ -10394,28 +10701,15 @@ bool GLES2DecoderImpl::PrepareTexturesForRender(bool* textures_set,
if (!texture_ref) {
LOCAL_RENDER_WARNING(
std::string("there is no texture bound to the unit ") +
- base::UintToString(texture_unit_index));
+ base::NumberToString(texture_unit_index));
} else {
LOCAL_RENDER_WARNING(
std::string("texture bound to texture unit ") +
- base::UintToString(texture_unit_index) +
+ base::NumberToString(texture_unit_index) +
" is not renderable. It maybe non-power-of-2 and have"
" incompatible texture filtering.");
}
continue;
- } else if (!texture_ref->texture()->CompatibleWithSamplerUniformType(
- uniform_info->type)) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION, function_name,
- (std::string("Texture bound to texture unit ") +
- base::UintToString(texture_unit_index) +
- " with internal format " +
- GLES2Util::GetStringEnum(
- texture_ref->texture()->GetInternalFormatOfBaseLevel()) +
- " is not compatible with sampler type " +
- GLES2Util::GetStringEnum(uniform_info->type))
- .c_str());
- return false;
}
if (textarget != GL_TEXTURE_CUBE_MAP) {
@@ -10972,7 +11266,7 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawArrays(
return error::kNoError;
}
- if (!CheckBoundDrawFramebufferValid(function_name)) {
+ if (!CheckBoundDrawFramebufferValid(function_name, true)) {
return error::kNoError;
}
@@ -11179,7 +11473,7 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements(
return error::kNoError;
}
- if (!CheckBoundDrawFramebufferValid(function_name)) {
+ if (!CheckBoundDrawFramebufferValid(function_name, true)) {
return error::kNoError;
}
@@ -11189,6 +11483,14 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements(
return error::kNoError;
}
+ if (state_.bound_transform_feedback.get() &&
+ state_.bound_transform_feedback->active() &&
+ !state_.bound_transform_feedback->paused()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
+ "transformfeedback is active and not paused");
+ return error::kNoError;
+ }
+
GLuint total_max_vertex_accessed = 0;
GLsizei total_max_primcount = 0;
if (!CheckMultiDrawElementsVertices(
@@ -11202,14 +11504,6 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements(
return error::kNoError;
}
- if (state_.bound_transform_feedback.get() &&
- state_.bound_transform_feedback->active() &&
- !state_.bound_transform_feedback->paused()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
- "transformfeedback is active and not paused");
- return error::kNoError;
- }
-
if (feature_info_->IsWebGL2OrES3Context()) {
if (!AttribsTypeMatch()) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
@@ -11309,9 +11603,9 @@ error::Error GLES2DecoderImpl::HandleDrawElements(
*static_cast<const volatile gles2::cmds::DrawElements*>(cmd_data);
GLsizei count = static_cast<GLsizei>(c.count);
int32_t offset = static_cast<int32_t>(c.index_offset);
- return DoMultiDrawElements("glDrawArrays", false, static_cast<GLenum>(c.mode),
- &count, static_cast<GLenum>(c.type), &offset,
- nullptr, 1);
+ return DoMultiDrawElements("glDrawElements", false,
+ static_cast<GLenum>(c.mode), &count,
+ static_cast<GLenum>(c.type), &offset, nullptr, 1);
}
error::Error GLES2DecoderImpl::HandleDrawElementsInstancedANGLE(
@@ -11740,6 +12034,34 @@ error::Error GLES2DecoderImpl::HandleGetProgramInfoLog(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleGetProgramResourceiv(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ // Unimplemented for WebGL 2.0 Compute context.
+ return error::kUnknownCommand;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramResourceIndex(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ // Unimplemented for WebGL 2.0 Compute context.
+ return error::kUnknownCommand;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramResourceLocation(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ // Unimplemented for WebGL 2.0 Compute context.
+ return error::kUnknownCommand;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramResourceName(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ // Unimplemented for WebGL 2.0 Compute context.
+ return error::kUnknownCommand;
+}
+
error::Error GLES2DecoderImpl::HandleGetShaderInfoLog(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -13031,8 +13353,8 @@ error::Error GLES2DecoderImpl::HandlePostSubBufferCHROMIUM(
client_->OnSwapBuffers(c.swap_id(), c.flags);
surface_->PostSubBufferAsync(
c.x, c.y, c.width, c.height,
- base::Bind(&GLES2DecoderImpl::FinishAsyncSwapBuffers,
- weak_ptr_factory_.GetWeakPtr(), c.swap_id()),
+ base::BindOnce(&GLES2DecoderImpl::FinishAsyncSwapBuffers,
+ weak_ptr_factory_.GetWeakPtr(), c.swap_id()),
base::DoNothing());
} else {
client_->OnSwapBuffers(c.swap_id(), c.flags);
@@ -16251,8 +16573,8 @@ void GLES2DecoderImpl::DoSwapBuffers(uint64_t swap_id, GLbitfield flags) {
client_->OnSwapBuffers(swap_id, flags);
surface_->SwapBuffersAsync(
- base::Bind(&GLES2DecoderImpl::FinishAsyncSwapBuffers,
- weak_ptr_factory_.GetWeakPtr(), swap_id),
+ base::BindOnce(&GLES2DecoderImpl::FinishAsyncSwapBuffers,
+ weak_ptr_factory_.GetWeakPtr(), swap_id),
base::DoNothing());
} else {
client_->OnSwapBuffers(swap_id, flags);
@@ -16304,8 +16626,8 @@ void GLES2DecoderImpl::DoCommitOverlayPlanes(uint64_t swap_id,
if (supports_async_swap_) {
client_->OnSwapBuffers(swap_id, flags);
surface_->CommitOverlayPlanesAsync(
- base::Bind(&GLES2DecoderImpl::FinishAsyncSwapBuffers,
- weak_ptr_factory_.GetWeakPtr(), swap_id),
+ base::BindOnce(&GLES2DecoderImpl::FinishAsyncSwapBuffers,
+ weak_ptr_factory_.GetWeakPtr(), swap_id),
base::DoNothing());
} else {
client_->OnSwapBuffers(swap_id, flags);
@@ -16454,6 +16776,9 @@ error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM(
std::string::npos) {
feature_info_->EnableOESTextureHalfFloatLinear();
}
+ if (feature_str.find("GL_EXT_float_blend ") != std::string::npos) {
+ feature_info_->EnableEXTFloatBlend();
+ }
UpdateCapabilities();
@@ -17441,8 +17766,10 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
if (image && internal_format == source_internal_format && dest_level == 0 &&
!unpack_flip_y && !unpack_premultiply_alpha_change) {
api()->glBindTextureFn(dest_binding_target, dest_texture->service_id());
- if (image->CopyTexImage(dest_target))
+ if (image->ShouldBindOrCopy() == gl::GLImage::COPY &&
+ image->CopyTexImage(dest_target)) {
return;
+ }
}
DoBindOrCopyTexImageIfNeeded(source_texture, source_target, 0);
@@ -18110,7 +18437,9 @@ void GLES2DecoderImpl::DoCreateAndConsumeTextureINTERNAL(
void GLES2DecoderImpl::DoCreateAndTexStorage2DSharedImageINTERNAL(
GLuint client_id,
- const volatile GLbyte* data) {
+ const volatile GLbyte* data,
+ GLenum internal_format) {
+ // TODO(https://crbug.com/924198): Implement support for internal format.
TRACE_EVENT2("gpu",
"GLES2DecoderImpl::DoCreateAndTexStorage2DSharedImageCHROMIUM",
"context", logger_.GetLogPrefix(), "mailbox[0]",
@@ -18135,8 +18464,21 @@ void GLES2DecoderImpl::DoCreateAndTexStorage2DSharedImageINTERNAL(
return;
}
- std::unique_ptr<SharedImageRepresentationGLTexture> shared_image =
- group_->shared_image_representation_factory()->ProduceGLTexture(mailbox);
+ std::unique_ptr<SharedImageRepresentationGLTexture> shared_image;
+ if (internal_format == GL_RGB) {
+ shared_image = group_->shared_image_representation_factory()
+ ->ProduceRGBEmulationGLTexture(mailbox);
+ } else if (internal_format == GL_NONE) {
+ shared_image =
+ group_->shared_image_representation_factory()->ProduceGLTexture(
+ mailbox);
+ } else {
+ LOCAL_SET_GL_ERROR(GL_INVALID_ENUM,
+ "DoCreateAndTexStorage2DSharedImageINTERNAL",
+ "invalid internal format");
+ return;
+ }
+
if (!shared_image) {
// Mailbox missing, generate a texture.
bool result = GenTexturesHelper(1, &client_id);
@@ -18332,7 +18674,7 @@ void GLES2DecoderImpl::BindTexImage2DCHROMIUMImpl(const char* function_name,
Texture::ImageState image_state = Texture::UNBOUND;
- {
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
ScopedGLErrorSuppressor suppressor(
"GLES2DecoderImpl::DoBindTexImage2DCHROMIUM", error_state_.get());
@@ -18652,6 +18994,80 @@ GLsync GLES2DecoderImpl::DoFenceSync(GLenum condition, GLbitfield flags) {
return api()->glFenceSyncFn(condition, flags);
}
+GLsizei GLES2DecoderImpl::InternalFormatSampleCountsHelper(
+ GLenum target,
+ GLenum internalformat,
+ std::vector<GLint>* out_sample_counts) {
+ DCHECK(out_sample_counts == nullptr || out_sample_counts->size() == 0);
+
+ GLint num_sample_counts = 0;
+ if (gl_version_info().IsLowerThanGL(4, 2)) {
+ // No multisampling for integer formats.
+ if (GLES2Util::IsIntegerFormat(internalformat)) {
+ return 0;
+ }
+
+ GLint max_samples = renderbuffer_manager()->max_samples();
+ num_sample_counts = max_samples;
+
+ if (out_sample_counts != nullptr) {
+ out_sample_counts->reserve(num_sample_counts);
+ for (GLint sample_count = max_samples; sample_count > 0; --sample_count) {
+ out_sample_counts->push_back(sample_count);
+ }
+ }
+ } else {
+ api()->glGetInternalformativFn(target, internalformat, GL_NUM_SAMPLE_COUNTS,
+ 1, &num_sample_counts);
+
+ bool remove_nonconformant_sample_counts =
+ feature_info_->IsWebGLContext() &&
+ feature_info_->feature_flags().nv_internalformat_sample_query;
+
+ if (out_sample_counts != nullptr || remove_nonconformant_sample_counts) {
+ std::vector<GLint> sample_counts(num_sample_counts);
+ api()->glGetInternalformativFn(target, internalformat, GL_SAMPLES,
+ num_sample_counts, sample_counts.data());
+
+ if (remove_nonconformant_sample_counts) {
+ ScopedGLErrorSuppressor suppressor(
+ "GLES2DecoderImpl::InternalFormatSampleCountsHelper",
+ error_state_.get());
+
+ auto is_nonconformant = [this, target,
+ internalformat](GLint sample_count) {
+ GLint conformant = GL_FALSE;
+ api()->glGetInternalformatSampleivNVFn(target, internalformat,
+ sample_count, GL_CONFORMANT_NV,
+ 1, &conformant);
+
+ // getInternalformatSampleivNV does not work for all formats on NVIDIA
+ // Shield TV drivers. Assume that formats with large sample counts are
+ // non-conformant in case the query generates an error.
+ if (api()->glGetErrorFn() != GL_NO_ERROR) {
+ return sample_count > 8;
+ }
+ return conformant == GL_FALSE;
+ };
+
+ sample_counts.erase(
+ std::remove_if(sample_counts.begin(), sample_counts.end(),
+ is_nonconformant),
+ sample_counts.end());
+ num_sample_counts = sample_counts.size();
+ }
+
+ if (out_sample_counts != nullptr) {
+ *out_sample_counts = std::move(sample_counts);
+ }
+ }
+ }
+
+ DCHECK(out_sample_counts == nullptr ||
+ out_sample_counts->size() == static_cast<size_t>(num_sample_counts));
+ return num_sample_counts;
+}
+
error::Error GLES2DecoderImpl::HandleGetInternalformativ(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -18677,52 +19093,37 @@ error::Error GLES2DecoderImpl::HandleGetInternalformativ(
}
typedef cmds::GetInternalformativ::Result Result;
+
+ GLsizei num_sample_counts = 0;
+ std::vector<GLint> sample_counts;
+
GLsizei num_values = 0;
- std::vector<GLint> samples;
- if (gl_version_info().IsLowerThanGL(4, 2)) {
- if (!GLES2Util::IsIntegerFormat(format)) {
- // No multisampling for integer formats.
- GLint max_samples = renderbuffer_manager()->max_samples();
- while (max_samples > 0) {
- samples.push_back(max_samples);
- --max_samples;
- }
- }
- switch (pname) {
- case GL_NUM_SAMPLE_COUNTS:
- num_values = 1;
- break;
- case GL_SAMPLES:
- num_values = static_cast<GLsizei>(samples.size());
- break;
- default:
- NOTREACHED();
- break;
- }
- } else {
- switch (pname) {
- case GL_NUM_SAMPLE_COUNTS:
- num_values = 1;
- break;
- case GL_SAMPLES:
- {
- GLint value = 0;
- api()->glGetInternalformativFn(target, format, GL_NUM_SAMPLE_COUNTS,
- 1, &value);
- num_values = static_cast<GLsizei>(value);
- }
- break;
- default:
- NOTREACHED();
- break;
- }
+ GLint* values = nullptr;
+ switch (pname) {
+ case GL_NUM_SAMPLE_COUNTS:
+ num_sample_counts =
+ InternalFormatSampleCountsHelper(target, format, nullptr);
+ num_values = 1;
+ values = &num_sample_counts;
+ break;
+ case GL_SAMPLES:
+ num_sample_counts =
+ InternalFormatSampleCountsHelper(target, format, &sample_counts);
+ num_values = num_sample_counts;
+ values = sample_counts.data();
+ break;
+ default:
+ NOTREACHED();
+ break;
}
+
uint32_t checked_size = 0;
if (!Result::ComputeSize(num_values).AssignIfValid(&checked_size)) {
return error::kOutOfBounds;
}
Result* result = GetSharedMemoryAs<Result*>(
c.params_shm_id, c.params_shm_offset, checked_size);
+
GLint* params = result ? result->GetData() : nullptr;
if (params == nullptr) {
return error::kOutOfBounds;
@@ -18731,23 +19132,8 @@ error::Error GLES2DecoderImpl::HandleGetInternalformativ(
if (result->size != 0) {
return error::kInvalidArguments;
}
- if (gl_version_info().IsLowerThanGL(4, 2)) {
- switch (pname) {
- case GL_NUM_SAMPLE_COUNTS:
- params[0] = static_cast<GLint>(samples.size());
- break;
- case GL_SAMPLES:
- for (size_t ii = 0; ii < samples.size(); ++ii) {
- params[ii] = samples[ii];
- }
- break;
- default:
- NOTREACHED();
- break;
- }
- } else {
- api()->glGetInternalformativFn(target, format, pname, num_values, params);
- }
+
+ std::copy(values, &values[num_values], params);
result->SetNumResults(num_values);
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
index 37f590e1cf5..6cdf5a36c0d 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -63,6 +63,7 @@ struct DisallowedFeatures {
ext_color_buffer_half_float = false;
oes_texture_float_linear = false;
oes_texture_half_float_linear = false;
+ ext_float_blend = false;
}
bool operator==(const DisallowedFeatures& other) const {
@@ -76,6 +77,7 @@ struct DisallowedFeatures {
bool ext_color_buffer_half_float = false;
bool oes_texture_float_linear = false;
bool oes_texture_half_float_linear = false;
+ bool ext_float_blend = false;
};
// This class implements the DecoderContext interface, decoding GLES2
@@ -136,6 +138,8 @@ class GPU_GLES2_EXPORT GLES2Decoder : public CommonDecoder,
Outputter* outputter() const override;
+ int GetRasterDecoderId() const override;
+
// Set the surface associated with the default FBO.
virtual void SetSurface(const scoped_refptr<gl::GLSurface>& surface) = 0;
// Releases the surface associated with the GL context.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index 912d5b30d38..113ce91d6fb 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -4313,6 +4313,62 @@ error::Error GLES2DecoderImpl::HandleRenderbufferStorageMultisampleCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleRenderbufferStorageMultisampleAdvancedAMD(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::RenderbufferStorageMultisampleAdvancedAMD& c =
+ *static_cast<const volatile gles2::cmds::
+ RenderbufferStorageMultisampleAdvancedAMD*>(cmd_data);
+ if (!features().amd_framebuffer_multisample_advanced) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei samples = static_cast<GLsizei>(c.samples);
+ GLsizei storageSamples = static_cast<GLsizei>(c.storageSamples);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->render_buffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glRenderbufferStorageMultisampleAdvancedAMD", target, "target");
+ return error::kNoError;
+ }
+ if (samples < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleAdvancedAMD",
+ "samples < 0");
+ return error::kNoError;
+ }
+ if (storageSamples < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleAdvancedAMD",
+ "storageSamples < 0");
+ return error::kNoError;
+ }
+ if (!validators_->render_buffer_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glRenderbufferStorageMultisampleAdvancedAMD", internalformat,
+ "internalformat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleAdvancedAMD",
+ "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleAdvancedAMD",
+ "height < 0");
+ return error::kNoError;
+ }
+ DoRenderbufferStorageMultisampleAdvancedAMD(target, samples, storageSamples,
+ internalformat, width, height);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderImpl::HandleRenderbufferStorageMultisampleEXT(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -4668,6 +4724,12 @@ error::Error GLES2DecoderImpl::HandleDispatchCompute(
return error::kUnknownCommand;
}
+error::Error GLES2DecoderImpl::HandleGetProgramInterfaceiv(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
error::Error GLES2DecoderImpl::HandleMemoryBarrierEXT(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -5562,10 +5624,11 @@ GLES2DecoderImpl::HandleCreateAndTexStorage2DSharedImageINTERNALImmediate(
}
volatile const GLbyte* mailbox = GetImmediateDataAs<volatile const GLbyte*>(
c, mailbox_size, immediate_data_size);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
if (mailbox == nullptr) {
return error::kOutOfBounds;
}
- DoCreateAndTexStorage2DSharedImageINTERNAL(texture, mailbox);
+ DoCreateAndTexStorage2DSharedImageINTERNAL(texture, mailbox, internalformat);
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index f2b5153e3a3..2b5b7f41a05 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -7,6 +7,7 @@
#include <string>
#include <utility>
+#include "base/bind.h"
#include "base/callback.h"
#include "base/stl_util.h"
#include "base/strings/string_split.h"
@@ -1685,7 +1686,9 @@ void GLES2DecoderPassthroughImpl::BindOnePendingImage(
return;
// TODO: internalformat?
- if (!image->BindTexImage(target))
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND)
+ image->BindTexImage(target);
+ else
image->CopyTexImage(target);
// If copy / bind fail, then we could keep the bind state the same.
@@ -2175,10 +2178,22 @@ void GLES2DecoderPassthroughImpl::RemovePendingQuery(GLuint service_id) {
void GLES2DecoderPassthroughImpl::ReadBackBuffersIntoShadowCopies(
const BufferShadowUpdateMap& updates) {
+ if (updates.empty()) {
+ return;
+ }
+
GLint old_binding = 0;
api()->glGetIntegervFn(GL_ARRAY_BUFFER_BINDING, &old_binding);
for (const auto& u : updates) {
- GLuint service_id = u.first;
+ GLuint client_id = u.first;
+ GLuint service_id = 0;
+ if (!resources_->buffer_id_map.GetServiceID(client_id, &service_id)) {
+ // Buffer no longer exists, this shadow update should have been removed by
+ // DoDeleteBuffers
+ DCHECK(false);
+ continue;
+ }
+
const auto& update = u.second;
void* shadow = update.shm->GetDataAddress(update.shm_offset, update.size);
@@ -2436,14 +2451,13 @@ error::Error GLES2DecoderPassthroughImpl::BindTexImage2DCHROMIUMImpl(
return error::kNoError;
}
- if (internalformat) {
- if (!image->BindTexImageWithInternalformat(target, internalformat)) {
- image->CopyTexImage(target);
- }
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ if (internalformat)
+ image->BindTexImageWithInternalformat(target, internalformat);
+ else
+ image->BindTexImage(target);
} else {
- if (!image->BindTexImage(target)) {
- image->CopyTexImage(target);
- }
+ image->CopyTexImage(target);
}
// Target is already validated
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index 046f7918b8a..765ee0cee14 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -305,6 +305,32 @@ error::Error DoGetProgramiv(GLuint program,
GLsizei* length,
GLint* params);
error::Error DoGetProgramInfoLog(GLuint program, std::string* infolog);
+error::Error DoGetProgramInterfaceiv(GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params);
+error::Error DoGetProgramResourceiv(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params);
+error::Error DoGetProgramResourceIndex(GLuint program,
+ GLenum program_interface,
+ const char* name,
+ GLuint* index);
+error::Error DoGetProgramResourceLocation(GLuint program,
+ GLenum program_interface,
+ const char* name,
+ GLint* location);
+error::Error DoGetProgramResourceName(GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ std::string* name);
error::Error DoGetRenderbufferParameteriv(GLenum target,
GLenum pname,
GLsizei bufsize,
@@ -710,6 +736,12 @@ error::Error DoRenderbufferStorageMultisampleCHROMIUM(GLenum target,
GLenum internalformat,
GLsizei width,
GLsizei height);
+error::Error DoRenderbufferStorageMultisampleAdvancedAMD(GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height);
error::Error DoRenderbufferStorageMultisampleEXT(GLenum target,
GLsizei samples,
GLenum internalformat,
@@ -1040,8 +1072,7 @@ error::Error DoBeginRasterCHROMIUM(GLuint texture_id,
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint color_type,
- GLuint color_space_transfer_cache_id);
+ GLint color_type);
error::Error DoRasterCHROMIUM(GLuint raster_shm_id,
GLuint raster_shm_offset,
GLsizeiptr raster_shm_size,
@@ -1084,7 +1115,8 @@ error::Error DoUnlockDiscardableTextureCHROMIUM(GLuint texture_id);
error::Error DoLockDiscardableTextureCHROMIUM(GLuint texture_id);
error::Error DoCreateAndTexStorage2DSharedImageINTERNAL(
GLuint client_id,
- const volatile GLbyte* mailbox);
+ const volatile GLbyte* mailbox,
+ GLenum internalformat);
error::Error DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id,
GLenum mode);
error::Error DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index fc5aca5b2bb..2ff9b5985c3 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -926,6 +926,9 @@ error::Error GLES2DecoderPassthroughImpl::DoDeleteBuffers(
return update.first == client_id;
};
base::EraseIf(buffer_shadow_updates_, is_the_deleted_buffer);
+ for (PendingQuery& pending_query : pending_queries_) {
+ base::EraseIf(pending_query.buffer_shadow_updates, is_the_deleted_buffer);
+ }
}
api()->glDeleteBuffersARBFn(n, service_ids.data());
@@ -1705,6 +1708,82 @@ error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoLog(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoGetProgramInterfaceiv(
+ GLuint program,
+ GLenum program_interface,
+ GLenum pname,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) {
+ // glGetProgramInterfaceivRobustANGLE remains to be implemented in ANGLE.
+ if (bufsize < 1) {
+ return error::kOutOfBounds;
+ }
+ *length = 1;
+ api()->glGetProgramInterfaceivFn(GetProgramServiceID(program, resources_),
+ program_interface, pname, params);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoGetProgramResourceiv(
+ GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ GLsizei prop_count,
+ const GLenum* props,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params) {
+ api()->glGetProgramResourceivFn(GetProgramServiceID(program, resources_),
+ program_interface, index, prop_count, props,
+ bufsize, length, params);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoGetProgramResourceIndex(
+ GLuint program,
+ GLenum program_interface,
+ const char* name,
+ GLuint* index) {
+ *index = api()->glGetProgramResourceIndexFn(
+ GetProgramServiceID(program, resources_), program_interface, name);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoGetProgramResourceLocation(
+ GLuint program,
+ GLenum program_interface,
+ const char* name,
+ GLint* location) {
+ *location = api()->glGetProgramResourceLocationFn(
+ GetProgramServiceID(program, resources_), program_interface, name);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoGetProgramResourceName(
+ GLuint program,
+ GLenum program_interface,
+ GLuint index,
+ std::string* name) {
+ CheckErrorCallbackState();
+
+ GLuint service_id = GetProgramServiceID(program, resources_);
+ GLint max_name_length = 0;
+ api()->glGetProgramInterfaceivFn(service_id, program_interface,
+ GL_MAX_NAME_LENGTH, &max_name_length);
+ if (CheckErrorCallbackState()) {
+ return error::kNoError;
+ }
+
+ std::vector<GLchar> buffer(max_name_length, 0);
+ GLsizei length = 0;
+ api()->glGetProgramResourceNameFn(service_id, program_interface, index,
+ max_name_length, &length, buffer.data());
+ DCHECK_LE(length, max_name_length);
+ *name = length > 0 ? std::string(buffer.data(), length) : std::string();
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoGetRenderbufferParameteriv(
GLenum target,
GLenum pname,
@@ -3106,6 +3185,20 @@ GLES2DecoderPassthroughImpl::DoRenderbufferStorageMultisampleCHROMIUM(
return error::kNoError;
}
+error::Error
+GLES2DecoderPassthroughImpl::DoRenderbufferStorageMultisampleAdvancedAMD(
+ GLenum target,
+ GLsizei samples,
+ GLsizei storageSamples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ DCHECK(feature_info_->feature_flags().amd_framebuffer_multisample_advanced);
+ api()->glRenderbufferStorageMultisampleAdvancedAMDFn(
+ target, samples, storageSamples, internalformat, width, height);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoRenderbufferStorageMultisampleEXT(
GLenum target,
GLsizei samples,
@@ -3774,7 +3867,7 @@ error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoCHROMIUM(
if (size > 1 || parsed_service_name.IsArrayName()) {
for (GLint location_index = 1; location_index < size; location_index++) {
std::string array_element_name = parsed_service_name.base_name() + "[" +
- base::IntToString(location_index) +
+ base::NumberToString(location_index) +
"]";
int32_t element_location = api()->glGetUniformLocationFn(
service_program, array_element_name.c_str());
@@ -4877,8 +4970,7 @@ error::Error GLES2DecoderPassthroughImpl::DoBeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint color_type,
- GLuint color_space_transfer_cache_id) {
+ GLint color_type) {
NOTIMPLEMENTED();
return error::kNoError;
}
@@ -4985,6 +5077,12 @@ GLES2DecoderPassthroughImpl::DoSetReadbackBufferShadowAllocationINTERNAL(
update.shm_offset = shm_offset;
update.size = size;
+ GLuint buffer_service_id = 0;
+ if (!resources_->buffer_id_map.GetServiceID(buffer_id, &buffer_service_id)) {
+ InsertError(GL_INVALID_OPERATION, "Invalid buffer ID");
+ return error::kNoError;
+ }
+
if (!update.shm) {
return error::kInvalidArguments;
}
@@ -5051,7 +5149,14 @@ error::Error GLES2DecoderPassthroughImpl::DoUnlockDiscardableTextureCHROMIUM(
error::Error
GLES2DecoderPassthroughImpl::DoCreateAndTexStorage2DSharedImageINTERNAL(
GLuint texture_client_id,
- const volatile GLbyte* mailbox) {
+ const volatile GLbyte* mailbox,
+ GLenum internalformat) {
+ // RGB emulation is not needed here.
+ if (internalformat != GL_NONE) {
+ InsertError(GL_INVALID_ENUM, "internal format not supported.");
+ return error::kNoError;
+ }
+
if (!texture_client_id ||
resources_->texture_id_map.HasClientID(texture_client_id)) {
InsertError(GL_INVALID_OPERATION, "invalid client ID");
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
index 7d7e982db81..07aafbdfb11 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
@@ -483,6 +483,162 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetProgramInfoLog(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleGetProgramResourceiv(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2ComputeContext()) {
+ return error::kUnknownCommand;
+ }
+ const volatile gles2::cmds::GetProgramResourceiv& c =
+ *static_cast<const volatile gles2::cmds::GetProgramResourceiv*>(cmd_data);
+ GLuint program = static_cast<GLuint>(c.program);
+ GLenum program_interface = static_cast<GLenum>(c.program_interface);
+ GLuint index = static_cast<GLuint>(c.index);
+ uint32_t props_bucket_id = c.props_bucket_id;
+ uint32_t params_shm_id = c.params_shm_id;
+ uint32_t params_shm_offset = c.params_shm_offset;
+
+ Bucket* bucket = GetBucket(props_bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ GLsizei prop_count = static_cast<GLsizei>(bucket->size() / sizeof(GLenum));
+ const GLenum* props = bucket->GetDataAs<const GLenum*>(0, bucket->size());
+ unsigned int buffer_size = 0;
+ typedef cmds::GetProgramResourceiv::Result Result;
+ Result* result = GetSharedMemoryAndSizeAs<Result*>(
+ params_shm_id, params_shm_offset, sizeof(Result), &buffer_size);
+ GLint* params = result ? result->GetData() : nullptr;
+ if (params == nullptr) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ GLsizei bufsize = Result::ComputeMaxResults(buffer_size);
+ GLsizei length = 0;
+ error::Error error = DoGetProgramResourceiv(
+ program, program_interface, index, prop_count, props, bufsize, &length,
+ params);
+ if (error != error::kNoError) {
+ return error;
+ }
+ if (length > bufsize) {
+ return error::kOutOfBounds;
+ }
+ result->SetNumResults(length);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleGetProgramResourceIndex(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2ComputeContext()) {
+ return error::kUnknownCommand;
+ }
+ const volatile gles2::cmds::GetProgramResourceIndex& c =
+ *static_cast<const volatile gles2::cmds::GetProgramResourceIndex*>(
+ cmd_data);
+ GLuint program = static_cast<GLuint>(c.program);
+ GLenum program_interface = static_cast<GLenum>(c.program_interface);
+ uint32_t name_bucket_id = c.name_bucket_id;
+ uint32_t index_shm_id = c.index_shm_id;
+ uint32_t index_shm_offset = c.index_shm_offset;
+
+ Bucket* bucket = GetBucket(name_bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ GLuint* index = GetSharedMemoryAs<GLuint*>(
+ index_shm_id, index_shm_offset, sizeof(GLuint));
+ if (!index) {
+ return error::kOutOfBounds;
+ }
+ if (*index != GL_INVALID_INDEX) {
+ return error::kInvalidArguments;
+ }
+ return DoGetProgramResourceIndex(
+ program, program_interface, name_str.c_str(), index);
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleGetProgramResourceLocation(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2ComputeContext()) {
+ return error::kUnknownCommand;
+ }
+ const volatile gles2::cmds::GetProgramResourceLocation& c =
+ *static_cast<const volatile gles2::cmds::GetProgramResourceLocation*>(
+ cmd_data);
+ GLuint program = static_cast<GLuint>(c.program);
+ GLenum program_interface = static_cast<GLenum>(c.program_interface);
+ uint32_t name_bucket_id = c.name_bucket_id;
+ uint32_t location_shm_id = c.location_shm_id;
+ uint32_t location_shm_offset = c.location_shm_offset;
+
+ Bucket* bucket = GetBucket(name_bucket_id);
+ if (!bucket) {
+ return error::kInvalidArguments;
+ }
+ std::string name_str;
+ if (!bucket->GetAsString(&name_str)) {
+ return error::kInvalidArguments;
+ }
+ GLint* location = GetSharedMemoryAs<GLint*>(
+ location_shm_id, location_shm_offset, sizeof(GLint));
+ if (!location) {
+ return error::kOutOfBounds;
+ }
+ if (*location != -1) {
+ return error::kInvalidArguments;
+ }
+ return DoGetProgramResourceLocation(
+ program, program_interface, name_str.c_str(), location);
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleGetProgramResourceName(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2ComputeContext()) {
+ return error::kUnknownCommand;
+ }
+ const volatile gles2::cmds::GetProgramResourceName& c =
+ *static_cast<const volatile gles2::cmds::GetProgramResourceName*>(
+ cmd_data);
+ GLuint program = static_cast<GLuint>(c.program);
+ GLenum program_interface = static_cast<GLenum>(c.program_interface);
+ GLuint index = static_cast<GLuint>(c.index);
+ uint32_t name_bucket_id = c.name_bucket_id;
+ uint32_t result_shm_id = c.result_shm_id;
+ uint32_t result_shm_offset = c.result_shm_offset;
+
+ typedef cmds::GetProgramResourceName::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ result_shm_id, result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (*result != 0) {
+ return error::kInvalidArguments;
+ }
+ std::string name;
+ error::Error error =
+ DoGetProgramResourceName(program, program_interface, index, &name);
+ if (error != error::kNoError) {
+ return error;
+ }
+ *result = 1;
+ Bucket* bucket = CreateBucket(name_bucket_id);
+ bucket->SetFromString(name.c_str());
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleGetShaderInfoLog(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index 2339a57ad66..7c21c2986bd 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -3701,6 +3701,31 @@ GLES2DecoderPassthroughImpl::HandleRenderbufferStorageMultisampleCHROMIUM(
}
error::Error
+GLES2DecoderPassthroughImpl::HandleRenderbufferStorageMultisampleAdvancedAMD(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::RenderbufferStorageMultisampleAdvancedAMD& c =
+ *static_cast<const volatile gles2::cmds::
+ RenderbufferStorageMultisampleAdvancedAMD*>(cmd_data);
+ if (!features().amd_framebuffer_multisample_advanced) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei samples = static_cast<GLsizei>(c.samples);
+ GLsizei storageSamples = static_cast<GLsizei>(c.storageSamples);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ error::Error error = DoRenderbufferStorageMultisampleAdvancedAMD(
+ target, samples, storageSamples, internalformat, width, height);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error
GLES2DecoderPassthroughImpl::HandleRenderbufferStorageMultisampleEXT(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -3995,6 +4020,40 @@ error::Error GLES2DecoderPassthroughImpl::HandleDispatchCompute(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleGetProgramInterfaceiv(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2ComputeContext())
+ return error::kUnknownCommand;
+ const volatile gles2::cmds::GetProgramInterfaceiv& c =
+ *static_cast<const volatile gles2::cmds::GetProgramInterfaceiv*>(
+ cmd_data);
+ GLuint program = c.program;
+ GLenum program_interface = static_cast<GLenum>(c.program_interface);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ unsigned int buffer_size = 0;
+ typedef cmds::GetProgramInterfaceiv::Result Result;
+ Result* result = GetSharedMemoryAndSizeAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, sizeof(Result), &buffer_size);
+ GLint* params = result ? result->GetData() : nullptr;
+ if (params == nullptr) {
+ return error::kOutOfBounds;
+ }
+ GLsizei bufsize = Result::ComputeMaxResults(buffer_size);
+ GLsizei written_values = 0;
+ GLsizei* length = &written_values;
+ error::Error error = DoGetProgramInterfaceiv(program, program_interface,
+ pname, bufsize, length, params);
+ if (error != error::kNoError) {
+ return error;
+ }
+ if (written_values > bufsize) {
+ return error::kOutOfBounds;
+ }
+ result->SetNumResults(written_values);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleMemoryBarrierEXT(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -4840,11 +4899,12 @@ error::Error GLES2DecoderPassthroughImpl::
}
volatile const GLbyte* mailbox = GetImmediateDataAs<volatile const GLbyte*>(
c, mailbox_size, immediate_data_size);
+ GLenum internalformat = static_cast<GLenum>(c.internalformat);
if (mailbox == nullptr) {
return error::kOutOfBounds;
}
- error::Error error =
- DoCreateAndTexStorage2DSharedImageINTERNAL(texture, mailbox);
+ error::Error error = DoCreateAndTexStorage2DSharedImageINTERNAL(
+ texture, mailbox, internalformat);
if (error != error::kNoError) {
return error;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_commands.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_commands.cc
index 3fbe718c1f5..3fadfd90c46 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_commands.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_commands.cc
@@ -15,33 +15,33 @@ using namespace cmds;
template <typename T>
class GLES2DecoderPassthroughFixedCommandTest
: public GLES2DecoderPassthroughTest {};
-TYPED_TEST_CASE_P(GLES2DecoderPassthroughFixedCommandTest);
+TYPED_TEST_SUITE_P(GLES2DecoderPassthroughFixedCommandTest);
TYPED_TEST_P(GLES2DecoderPassthroughFixedCommandTest, InvalidCommand) {
TypeParam cmd;
cmd.SetHeader();
EXPECT_EQ(error::kUnknownCommand, this->ExecuteCmd(cmd));
}
-REGISTER_TYPED_TEST_CASE_P(GLES2DecoderPassthroughFixedCommandTest,
- InvalidCommand);
+REGISTER_TYPED_TEST_SUITE_P(GLES2DecoderPassthroughFixedCommandTest,
+ InvalidCommand);
template <typename T>
class GLES2DecoderPassthroughImmediateNoArgCommandTest
: public GLES2DecoderPassthroughTest {};
-TYPED_TEST_CASE_P(GLES2DecoderPassthroughImmediateNoArgCommandTest);
+TYPED_TEST_SUITE_P(GLES2DecoderPassthroughImmediateNoArgCommandTest);
TYPED_TEST_P(GLES2DecoderPassthroughImmediateNoArgCommandTest, InvalidCommand) {
TypeParam& cmd = *(this->template GetImmediateAs<TypeParam>());
cmd.SetHeader();
EXPECT_EQ(error::kUnknownCommand, this->ExecuteImmediateCmd(cmd, 64));
}
-REGISTER_TYPED_TEST_CASE_P(GLES2DecoderPassthroughImmediateNoArgCommandTest,
- InvalidCommand);
+REGISTER_TYPED_TEST_SUITE_P(GLES2DecoderPassthroughImmediateNoArgCommandTest,
+ InvalidCommand);
template <typename T>
class GLES2DecoderPassthroughImmediateSizeArgCommandTest
: public GLES2DecoderPassthroughTest {};
-TYPED_TEST_CASE_P(GLES2DecoderPassthroughImmediateSizeArgCommandTest);
+TYPED_TEST_SUITE_P(GLES2DecoderPassthroughImmediateSizeArgCommandTest);
TYPED_TEST_P(GLES2DecoderPassthroughImmediateSizeArgCommandTest,
InvalidCommand) {
@@ -49,8 +49,8 @@ TYPED_TEST_P(GLES2DecoderPassthroughImmediateSizeArgCommandTest,
cmd.SetHeader(0);
EXPECT_EQ(error::kUnknownCommand, this->ExecuteImmediateCmd(cmd, 0));
}
-REGISTER_TYPED_TEST_CASE_P(GLES2DecoderPassthroughImmediateSizeArgCommandTest,
- InvalidCommand);
+REGISTER_TYPED_TEST_SUITE_P(GLES2DecoderPassthroughImmediateSizeArgCommandTest,
+ InvalidCommand);
using ES3FixedCommandTypes0 =
::testing::Types<BindBufferBase,
@@ -143,16 +143,16 @@ using ES3ImmediateSizeArgCommandTypes0 =
UniformMatrix4x2fvImmediate,
UniformMatrix4x3fvImmediate>;
-INSTANTIATE_TYPED_TEST_CASE_P(0,
- GLES2DecoderPassthroughFixedCommandTest,
- ES3FixedCommandTypes0);
-INSTANTIATE_TYPED_TEST_CASE_P(1,
- GLES2DecoderPassthroughFixedCommandTest,
- ES3FixedCommandTypes1);
-INSTANTIATE_TYPED_TEST_CASE_P(0,
- GLES2DecoderPassthroughImmediateNoArgCommandTest,
- ES3ImmediateNoArgCommandTypes0);
-INSTANTIATE_TYPED_TEST_CASE_P(
+INSTANTIATE_TYPED_TEST_SUITE_P(0,
+ GLES2DecoderPassthroughFixedCommandTest,
+ ES3FixedCommandTypes0);
+INSTANTIATE_TYPED_TEST_SUITE_P(1,
+ GLES2DecoderPassthroughFixedCommandTest,
+ ES3FixedCommandTypes1);
+INSTANTIATE_TYPED_TEST_SUITE_P(0,
+ GLES2DecoderPassthroughImmediateNoArgCommandTest,
+ ES3ImmediateNoArgCommandTypes0);
+INSTANTIATE_TYPED_TEST_SUITE_P(
0,
GLES2DecoderPassthroughImmediateSizeArgCommandTest,
ES3ImmediateSizeArgCommandTypes0);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
index eac90b80d02..9a221b58b29 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
@@ -101,8 +101,9 @@ TEST_F(GLES2DecoderPassthroughTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
// Make sure the new client ID is associated with the provided service ID.
@@ -136,8 +137,9 @@ TEST_F(GLES2DecoderPassthroughTest,
Mailbox mailbox;
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
// CreateAndTexStorage2DSharedImage should fail if the mailbox is invalid.
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
@@ -165,8 +167,9 @@ TEST_F(GLES2DecoderPassthroughTest,
{
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
@@ -175,8 +178,9 @@ TEST_F(GLES2DecoderPassthroughTest,
{
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
@@ -196,8 +200,9 @@ TEST_F(GLES2DecoderPassthroughTest, BeginEndSharedImageAccessCRHOMIUM) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
// Begin/end read access for the created image.
@@ -259,8 +264,9 @@ TEST_F(GLES2DecoderPassthroughTest,
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
// Try to begin access with a shared image representation that fails
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index a982b5ceb93..af6398cc69c 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/bind.h"
#include "base/command_line.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
@@ -465,9 +466,8 @@ TEST_P(GLES3DecoderTest, GetInternalformativValidArgsSamples) {
GL_NUM_SAMPLE_COUNTS, 1, _))
.WillOnce(SetArgPointee<4>(kNumSampleCounts))
.RetiresOnSaturation();
- EXPECT_CALL(*gl_, GetInternalformativ(GL_RENDERBUFFER, GL_RGBA8,
- GL_SAMPLES, kNumSampleCounts,
- result->GetData()))
+ EXPECT_CALL(*gl_, GetInternalformativ(GL_RENDERBUFFER, GL_RGBA8, GL_SAMPLES,
+ kNumSampleCounts, _))
.Times(1)
.RetiresOnSaturation();
result->size = 0;
@@ -1852,31 +1852,43 @@ void GLES3DecoderRGBBackbufferTest::SetUp() {
SetupDefaultProgram();
}
-INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderTest, ::testing::Bool());
+
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderWithShaderTest,
+ ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderWithShaderTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderManualInitTest,
+ ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderManualInitTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderRGBBackbufferTest,
+ ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderRGBBackbufferTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderDoCommandsTest,
+ ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderDoCommandsTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderDescheduleUntilFinishedTest,
+ ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderDescheduleUntilFinishedTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest, ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES3DecoderTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, WebGL2DecoderTest, ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES3DecoderWithShaderTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES3DecoderWithShaderTest,
+ ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES3DecoderManualInitTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES3DecoderManualInitTest,
+ ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service,
- GLES3DecoderRGBBackbufferTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES3DecoderRGBBackbufferTest,
+ ::testing::Bool());
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h
index 96292308580..09a217d1fde 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h
@@ -22,10 +22,10 @@ void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations(bool es3_capable) {
ExpectEnableDisable(GL_SAMPLE_COVERAGE, false);
ExpectEnableDisable(GL_SCISSOR_TEST, false);
ExpectEnableDisable(GL_STENCIL_TEST, false);
- if (group_->feature_info()->feature_flags().ext_multisample_compatibility) {
+ if (feature_info()->feature_flags().ext_multisample_compatibility) {
ExpectEnableDisable(GL_MULTISAMPLE_EXT, true);
}
- if (group_->feature_info()->feature_flags().ext_multisample_compatibility) {
+ if (feature_info()->feature_flags().ext_multisample_compatibility) {
ExpectEnableDisable(GL_SAMPLE_ALPHA_TO_ONE_EXT, false);
}
if (es3_capable) {
@@ -35,7 +35,7 @@ void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations(bool es3_capable) {
}
void GLES2DecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
- auto* feature_info_ = group_->feature_info();
+ auto* feature_info_ = feature_info();
EXPECT_CALL(*gl_, BlendColor(0.0f, 0.0f, 0.0f, 0.0f))
.Times(1)
.RetiresOnSaturation();
@@ -53,9 +53,7 @@ void GLES2DecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
EXPECT_CALL(*gl_, ColorMask(true, true, true, true))
.Times(1)
.RetiresOnSaturation();
- if (group_->feature_info()
- ->feature_flags()
- .chromium_framebuffer_mixed_samples) {
+ if (feature_info()->feature_flags().chromium_framebuffer_mixed_samples) {
EXPECT_CALL(*gl_, CoverageModulationNV(GL_NONE))
.Times(1)
.RetiresOnSaturation();
@@ -92,7 +90,7 @@ void GLES2DecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
.Times(1)
.RetiresOnSaturation();
}
- if (group_->feature_info()->feature_flags().chromium_path_rendering) {
+ if (feature_info()->feature_flags().chromium_path_rendering) {
EXPECT_CALL(*gl_, PathStencilFuncNV(GL_ALWAYS, 0, 0xFFFFFFFFU))
.Times(1)
.RetiresOnSaturation();
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
index 9ea6081e9bf..bd8874bf3b2 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
@@ -46,8 +46,8 @@ class GLES3DecoderTest1 : public GLES2DecoderTest1 {
}
};
-INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest1, ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES3DecoderTest1, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderTest1, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest1, ::testing::Bool());
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::GenerateMipmap, 0>(
@@ -61,7 +61,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::GenerateMipmap, 0>(
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::CheckFramebufferStatus, 0>(
@@ -76,14 +76,14 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::CheckFramebufferStatus, 0>(
DoFramebufferRenderbuffer(
GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER,
client_renderbuffer_id_, kServiceRenderbufferId, GL_NO_ERROR);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Clear, 0>(bool valid) {
if (valid) {
SetupExpectationsForApplyingDefaultDirtyState();
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::ColorMask, 0>(
@@ -92,7 +92,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::ColorMask, 0>(
// will be considered RGB.
DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
kServiceFramebufferId);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::CopyTexImage2D, 0>(
@@ -103,7 +103,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::CopyTexImage2D, 0>(
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::CopyTexSubImage2D, 0>(
@@ -113,7 +113,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::CopyTexSubImage2D, 0>(
DoTexImage2D(GL_TEXTURE_2D, 2, GL_RGBA, 16, 16, 0, GL_RGBA,
GL_UNSIGNED_BYTE, shared_memory_id_, kSharedMemoryOffset);
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::DetachShader, 0>(bool valid) {
@@ -126,7 +126,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::DetachShader, 0>(bool valid) {
attach_cmd.Init(client_program_id_, client_shader_id_);
EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(
@@ -141,7 +141,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::FramebufferRenderbuffer, 0>(
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::FramebufferTextureLayer, 0>(
@@ -154,26 +154,26 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::FramebufferTextureLayer, 0>(
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<
cmds::GetBufferParameteri64v, 0>(bool /* valid */) {
DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<
cmds::GetBufferParameteriv, 0>(bool /* valid */) {
DoBindBuffer(GL_ARRAY_BUFFER, client_buffer_id_, kServiceBufferId);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<
cmds::GetFramebufferAttachmentParameteriv, 0>(bool /* valid */) {
DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
kServiceFramebufferId);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramiv, 0>(
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
index e1e262e6090..4c5b2ffd507 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
@@ -574,8 +574,8 @@ class GLES3DecoderTest2 : public GLES2DecoderTest2 {
}
};
-INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest2, ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES3DecoderTest2, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderTest2, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest2, ::testing::Bool());
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramInfoLog, 0>(
@@ -635,7 +635,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramInfoLog, 0>(
EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
program->Link(nullptr, Program::kCountOnlyStaticallyUsed, this);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<
@@ -643,7 +643,7 @@ void GLES2DecoderTestBase::SpecializedSetup<
bool /* valid */) {
DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
kServiceRenderbufferId);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::RenderbufferStorage, 0>(
@@ -677,7 +677,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::GenQueriesEXTImmediate, 0>(
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(client_query_id_)));
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::DeleteQueriesEXTImmediate, 0>(
@@ -690,7 +690,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::DeleteQueriesEXTImmediate, 0>(
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd, sizeof(client_query_id_)));
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::LinkProgram, 0>(
@@ -737,121 +737,121 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::LinkProgram, 0>(
attach_cmd.Init(client_program_id_, kClientFragmentShaderId);
EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform1f, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform1fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform1ivImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_INT);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2f, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_VEC2);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2i, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_INT_VEC2);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_VEC2);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform2ivImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_INT_VEC2);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3f, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_VEC3);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3i, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_INT_VEC3);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_VEC3);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3ivImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_INT_VEC3);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_VEC4);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4i, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_INT_VEC4);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_VEC4);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT2);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterf, 0>(
bool /* valid */) {
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameteri, 0>(
bool /* valid */) {
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterfvImmediate, 0>(
bool /* valid */) {
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterivImmediate, 0>(
bool /* valid */) {
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribiv, 0>(
@@ -864,7 +864,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribiv, 0>(
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribfv, 0>(
@@ -877,7 +877,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribfv, 0>(
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribIiv, 0>(
@@ -890,7 +890,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribIiv, 0>(
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
}
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribIuiv, 0>(
@@ -903,8 +903,7 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::GetVertexAttribIuiv, 0>(
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
}
-};
-
+}
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
index 6bace907132..75bad056c21 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
@@ -49,62 +49,62 @@ class GLES3DecoderTest3 : public GLES2DecoderTest3 {
}
};
-INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest3, ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES3DecoderTest3, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderTest3, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest3, ::testing::Bool());
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4ivImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_INT_VEC4);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<UniformMatrix3fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT3);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<UniformMatrix4fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT4);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<UniformMatrix2x3fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT2x3);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<
UniformMatrix2x4fvImmediate, 0>(bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT2x4);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<
UniformMatrix3x2fvImmediate, 0>(bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT3x2);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<
UniformMatrix3x4fvImmediate, 0>(bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT3x4);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<
UniformMatrix4x2fvImmediate, 0>(bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT4x2);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<
UniformMatrix4x3fvImmediate, 0>(bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT4x3);
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<UseProgram, 0>(
@@ -119,7 +119,7 @@ void GLES2DecoderTestBase::SpecializedSetup<UseProgram, 0>(
LinkProgram link_cmd;
link_cmd.Init(client_program_id_);
EXPECT_EQ(error::kNoError, ExecuteCmd(link_cmd));
-};
+}
template <>
void GLES2DecoderTestBase::SpecializedSetup<ValidateProgram, 0>(
@@ -138,7 +138,7 @@ void GLES2DecoderTestBase::SpecializedSetup<ValidateProgram, 0>(
EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_INFO_LOG_LENGTH, _))
.WillOnce(SetArgPointee<2>(0))
.RetiresOnSaturation();
-};
+}
TEST_P(GLES2DecoderTest3, TraceBeginCHROMIUM) {
const uint32_t kCategoryBucketId = 123;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4.cc
index d5b10acf9d5..31fe2a5a957 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4.cc
@@ -50,8 +50,8 @@ class GLES3DecoderTest4 : public GLES2DecoderTest4 {
}
};
-INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest4, ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service, GLES3DecoderTest4, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderTest4, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest4, ::testing::Bool());
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
index acbfc810252..631e3bf9893 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
@@ -407,9 +407,9 @@ class GLES2DecoderVertexArraysOESTest : public GLES2DecoderWithShaderTest {
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderVertexArraysOESTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderVertexArraysOESTest,
+ ::testing::Bool());
class GLES2DecoderEmulatedVertexArraysOESTest
: public GLES2DecoderVertexArraysOESTest {
@@ -430,9 +430,9 @@ class GLES2DecoderEmulatedVertexArraysOESTest
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderEmulatedVertexArraysOESTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderEmulatedVertexArraysOESTest,
+ ::testing::Bool());
// Test vertex array objects with native support
TEST_P(GLES2DecoderVertexArraysOESTest, GenVertexArraysOESImmediateValidArgs) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
index 856224e909e..ad7830a2c39 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -1407,6 +1407,7 @@ void GLES2DecoderTestBase::DoBindTexImage2DCHROMIUM(GLenum target,
cmds::BindTexImage2DCHROMIUM bind_tex_image_2d_cmd;
bind_tex_image_2d_cmd.Init(target, image_id);
EXPECT_CALL(*gl_, GetError())
+ .Times(AtMost(2))
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
index 0ba7ee75291..d81387cf5f9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
@@ -65,6 +65,7 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
void ScheduleGrContextCleanup() override {}
+ void HandleReturnData(base::span<const uint8_t> data) override {}
// Template to call glGenXXX functions.
template <typename T>
@@ -854,6 +855,7 @@ class GLES2DecoderPassthroughTestBase : public testing::Test,
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
void ScheduleGrContextCleanup() override {}
+ void HandleReturnData(base::span<const uint8_t> data) override {}
void SetUp() override;
void TearDown() override;
@@ -999,6 +1001,7 @@ class GLES2DecoderPassthroughTestBase : public testing::Test,
return &passthrough_discardable_manager_;
}
ContextGroup* group() { return group_.get(); }
+ FeatureInfo* feature_info() { return group_->feature_info(); }
static const size_t kSharedBufferSize = 2048;
static const uint32_t kSharedMemoryOffset = 132;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc
index 254648101bf..a2df617cc6a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc
@@ -96,7 +96,7 @@ TEST_P(GLES2DecoderDrawOOMTest, ContextLostReasonWhenStatusIsUnknown) {
EXPECT_EQ(error::kUnknown, GetContextLostReason());
}
-INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderDrawOOMTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderDrawOOMTest, ::testing::Bool());
class GLES2DecoderLostContextTest : public GLES2DecoderManualInitTest {
protected:
@@ -339,9 +339,9 @@ TEST_P(GLES2DecoderLostContextTest, LoseGroupFromRobustness) {
ClearCurrentDecoderError();
}
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderLostContextTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderLostContextTest,
+ ::testing::Bool());
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
index 145cbcd9200..e9e84e7f32e 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
@@ -82,9 +82,9 @@ class GLES2DecoderRestoreStateTest : public GLES2DecoderManualInitTest {
void AddExpectationsForBindSampler(GLuint unit, GLuint id);
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderRestoreStateTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderRestoreStateTest,
+ ::testing::Bool());
void GLES2DecoderRestoreStateTest::AddExpectationsForActiveTexture(
GLenum unit) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
index 69e8df6eafe..9e18273729f 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
@@ -72,9 +72,9 @@ class GLES2DecoderGeometryInstancingTest : public GLES2DecoderWithShaderTest {
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderGeometryInstancingTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderGeometryInstancingTest,
+ ::testing::Bool());
void GLES2DecoderManualInitTest::DirtyStateMaskTest(GLuint color_bits,
bool depth_mask,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
index 015fdbbb0f0..6477d349b71 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
@@ -30,9 +30,9 @@ class GLES2DecoderTestDisabledExtensions : public GLES2DecoderTest {
public:
GLES2DecoderTestDisabledExtensions() = default;
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderTestDisabledExtensions,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderTestDisabledExtensions,
+ ::testing::Bool());
TEST_P(GLES2DecoderTestDisabledExtensions, CHROMIUMPathRenderingDisabled) {
const GLuint kClientPathId = 0;
@@ -477,9 +477,9 @@ class GLES2DecoderTestWithCHROMIUMPathRendering : public GLES2DecoderTest {
static const GLuint kServicePathId = 311;
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderTestWithCHROMIUMPathRendering,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderTestWithCHROMIUMPathRendering,
+ ::testing::Bool());
class GLES2DecoderTestWithBlendEquationAdvanced : public GLES2DecoderTest {
public:
@@ -497,9 +497,9 @@ class GLES2DecoderTestWithBlendEquationAdvanced : public GLES2DecoderTest {
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderTestWithBlendEquationAdvanced,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderTestWithBlendEquationAdvanced,
+ ::testing::Bool());
class GLES2DecoderTestWithEXTMultisampleCompatibility
: public GLES2DecoderTest {
@@ -518,9 +518,9 @@ class GLES2DecoderTestWithEXTMultisampleCompatibility
InitDecoder(init);
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderTestWithEXTMultisampleCompatibility,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderTestWithEXTMultisampleCompatibility,
+ ::testing::Bool());
class GLES2DecoderTestWithBlendFuncExtended : public GLES2DecoderTest {
public:
@@ -537,9 +537,9 @@ class GLES2DecoderTestWithBlendFuncExtended : public GLES2DecoderTest {
InitDecoder(init);
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderTestWithBlendFuncExtended,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderTestWithBlendFuncExtended,
+ ::testing::Bool());
class GLES2DecoderTestWithCHROMIUMFramebufferMixedSamples
: public GLES2DecoderTest {
@@ -558,9 +558,9 @@ class GLES2DecoderTestWithCHROMIUMFramebufferMixedSamples
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderTestWithCHROMIUMFramebufferMixedSamples,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderTestWithCHROMIUMFramebufferMixedSamples,
+ ::testing::Bool());
TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, GenDeletePaths) {
static GLuint kFirstClientID = client_path_id_ + 88;
@@ -1742,9 +1742,9 @@ class GLES2DecoderTestWithCHROMIUMRasterTransport : public GLES2DecoderTest {
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderTestWithCHROMIUMRasterTransport,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderTestWithCHROMIUMRasterTransport,
+ ::testing::Bool());
class GLES3DecoderTestWithEXTWindowRectangles : public GLES3DecoderTest {
public:
@@ -1763,9 +1763,9 @@ class GLES3DecoderTestWithEXTWindowRectangles : public GLES3DecoderTest {
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES3DecoderTestWithEXTWindowRectangles,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES3DecoderTestWithEXTWindowRectangles,
+ ::testing::Bool());
TEST_P(GLES3DecoderTestWithEXTWindowRectangles,
WindowRectanglesEXTImmediateValidArgs) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
index ce62a0a132b..f9dcad305f9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -1468,7 +1468,7 @@ TEST_P(GLES2ReadPixelsAsyncTest, ReadPixelsAsyncChangePackAlignment) {
FinishReadPixelsAndCheckResult(kWidth, kHeight, pixels);
}
-INSTANTIATE_TEST_CASE_P(Service, GLES2ReadPixelsAsyncTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, GLES2ReadPixelsAsyncTest, ::testing::Bool());
// Check that if a renderbuffer is attached and GL returns
// GL_FRAMEBUFFER_COMPLETE that the buffer is cleared and state is restored.
@@ -2389,9 +2389,9 @@ class GLES2DecoderMultisampledRenderToTextureTest
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderMultisampledRenderToTextureTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderMultisampledRenderToTextureTest,
+ ::testing::Bool());
TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
NotCompatibleWithRenderbufferStorageMultisampleCHROMIUM_EXT) {
@@ -4034,9 +4034,9 @@ TEST_P(GLES2DecoderTestWithDrawRectangle, FramebufferDrawRectangleClear) {
}
}
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderTestWithDrawRectangle,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderTestWithDrawRectangle,
+ ::testing::Bool());
TEST_P(GLES2DecoderManualInitTest, MESAFramebufferFlipYExtensionEnabled) {
InitState init;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
index 6a14f508f08..31c401512e2 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
@@ -295,7 +295,7 @@ TEST_P(GLES2DecoderWithShaderTest, GetUniformivBadSharedMemoryFails) {
cmd.Init(client_program_id_, kUniform2FakeLocation, shared_memory_id_,
kInvalidSharedMemoryOffset);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
-};
+}
TEST_P(GLES3DecoderWithShaderTest, GetUniformuivSucceeds) {
GetUniformuiv::Result* result =
@@ -389,7 +389,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetUniformuivBadSharedMemoryFails) {
cmd.Init(client_program_id_, kUniform2FakeLocation, shared_memory_id_,
kInvalidSharedMemoryOffset);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
-};
+}
TEST_P(GLES2DecoderWithShaderTest, GetUniformfvSucceeds) {
GetUniformfv::Result* result =
@@ -483,7 +483,7 @@ TEST_P(GLES2DecoderWithShaderTest, GetUniformfvBadSharedMemoryFails) {
cmd.Init(client_program_id_, kUniform2FakeLocation, shared_memory_id_,
kInvalidSharedMemoryOffset);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
-};
+}
TEST_P(GLES2DecoderWithShaderTest, GetAttachedShadersSucceeds) {
GetAttachedShaders cmd;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index b94cd64e5a4..64b7799d879 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -3171,8 +3171,9 @@ TEST_P(GLES2DecoderTest, CreateAndConsumeTextureCHROMIUMInvalidTexture) {
ProduceTextureDirectCHROMIUMImmediate& produce_cmd =
*GetImmediateAs<ProduceTextureDirectCHROMIUMImmediate>();
produce_cmd.Init(client_texture_id_, mailbox.name);
- EXPECT_EQ(error::kNoError,
- ExecuteImmediateCmd(produce_cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(
+ error::kNoError,
+ ExecuteImmediateCmd(produce_cmd, sizeof(mailbox.name) + sizeof(GLenum)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
// Attempt to consume the mailbox with an invalid texture id.
@@ -3270,8 +3271,9 @@ TEST_P(GLES2DecoderTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
// Make sure the new client ID is associated with the produced service ID.
@@ -3300,8 +3302,9 @@ TEST_P(GLES2DecoderTest,
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
// CreateAndTexStorage2DSharedImage should fail if the mailbox is invalid.
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
@@ -3331,8 +3334,9 @@ TEST_P(GLES2DecoderTest,
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(client_texture_id_, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(client_texture_id_, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
// CreateAndTexStorage2DSharedImage should fail.
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
@@ -3354,8 +3358,9 @@ TEST_P(GLES2DecoderTest, BeginEndSharedImageAccessCRHOMIUM) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
// Begin/end read access for the created image.
@@ -3414,8 +3419,9 @@ TEST_P(GLES2DecoderTest, BeginSharedImageAccessDirectCHROMIUMCantBeginAccess) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ cmd.Init(kNewClientId, mailbox.name, GL_NONE);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(mailbox.name) + sizeof(GLenum)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
// Try to begin access with a shared image representation that fails
@@ -3766,6 +3772,7 @@ class MockGLImage : public gl::GLImage {
// Overridden from gl::GLImage:
MOCK_METHOD0(GetSize, gfx::Size());
MOCK_METHOD0(GetInternalFormat, unsigned());
+ MOCK_METHOD0(ShouldBindOrCopy, gl::GLImage::BindOrCopy());
MOCK_METHOD1(BindTexImage, bool(unsigned));
MOCK_METHOD1(ReleaseTexImage, void(unsigned));
MOCK_METHOD1(CopyTexImage, bool(unsigned));
@@ -3806,9 +3813,10 @@ TEST_P(GLES2DecoderWithShaderTest, CopyTexImage) {
GetImageManagerForTest()->AddImage(image.get(), kImageId);
// Bind image to texture.
+ EXPECT_CALL(*image.get(), ShouldBindOrCopy())
+ .WillRepeatedly(Return(gl::GLImage::COPY));
EXPECT_CALL(*image.get(), BindTexImage(GL_TEXTURE_2D))
- .Times(1)
- .WillRepeatedly(Return(false))
+ .Times(0)
.RetiresOnSaturation();
EXPECT_CALL(*image.get(), GetSize())
.Times(1)
@@ -3845,8 +3853,7 @@ TEST_P(GLES2DecoderWithShaderTest, CopyTexImage) {
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*image.get(), BindTexImage(GL_TEXTURE_2D))
- .Times(1)
- .WillRepeatedly(Return(false))
+ .Times(0)
.RetiresOnSaturation();
EXPECT_CALL(*image.get(), CopyTexImage(GL_TEXTURE_2D))
.Times(1)
@@ -3873,8 +3880,7 @@ TEST_P(GLES2DecoderWithShaderTest, CopyTexImage) {
release_tex_image_2d_cmd.Init(GL_TEXTURE_2D, kImageId);
EXPECT_EQ(error::kNoError, ExecuteCmd(release_tex_image_2d_cmd));
EXPECT_CALL(*image.get(), BindTexImage(GL_TEXTURE_2D))
- .Times(2)
- .WillRepeatedly(Return(false))
+ .Times(0)
.RetiresOnSaturation();
EXPECT_CALL(*image.get(), GetSize())
.Times(1)
@@ -4227,9 +4233,9 @@ class GLES2DecoderCompressedFormatsTest : public GLES2DecoderManualInitTest {
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderCompressedFormatsTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderCompressedFormatsTest,
+ ::testing::Bool());
TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsS3TC) {
const GLenum formats[] = {
@@ -4406,9 +4412,9 @@ class GLES2DecoderTexStorageFormatAndTypeTest
}
};
-INSTANTIATE_TEST_CASE_P(Service,
- GLES2DecoderTexStorageFormatAndTypeTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ GLES2DecoderTexStorageFormatAndTypeTest,
+ ::testing::Bool());
TEST_P(GLES2DecoderTexStorageFormatAndTypeTest, ES2) {
InitState init;
diff --git a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h
index d40df9d8947..aa854125ed4 100644
--- a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h
+++ b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h
@@ -8,6 +8,7 @@
#include "base/macros.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/single_thread_task_runner.h"
+#include "base/timer/timer.h"
#include "gpu/command_buffer/common/command_buffer_id.h"
#include "gpu/command_buffer/common/context_creation_attribs.h"
#include "gpu/command_buffer/service/memory_tracking.h"
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.cc b/chromium/gpu/command_buffer/service/gpu_switches.cc
index 14cf67b6c29..94d418153c3 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.cc
+++ b/chromium/gpu/command_buffer/service/gpu_switches.cc
@@ -80,4 +80,7 @@ const char kEnableRasterToSkImage[] = "enable-raster-to-sk-image";
const char kEnablePassthroughRasterDecoder[] =
"enable-passthrough-raster-decoder";
+// Enable Vulkan support, must also have ENABLE_VULKAN defined.
+const char kEnableVulkan[] = "enable-vulkan";
+
} // namespace switches
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.h b/chromium/gpu/command_buffer/service/gpu_switches.h
index 7fa91593549..3562b0776f5 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.h
+++ b/chromium/gpu/command_buffer/service/gpu_switches.h
@@ -38,6 +38,7 @@ GPU_EXPORT extern const char kEmulateShaderPrecision[];
GPU_EXPORT extern const char kUseCmdDecoder[];
GPU_EXPORT extern const char kEnableRasterToSkImage[];
GPU_EXPORT extern const char kEnablePassthroughRasterDecoder[];
+GPU_EXPORT extern const char kEnableVulkan[];
} // namespace switches
diff --git a/chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc b/chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc
index 9be021248f7..0381d176ec1 100644
--- a/chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc
@@ -53,7 +53,7 @@ class GPUTracerTester : public GPUTracer {
public:
explicit GPUTracerTester(GLES2Decoder* decoder)
: GPUTracer(decoder), tracing_enabled_(0) {
- gpu_timing_client_->SetCpuTimeForTesting(base::Bind(&FakeCpuTime));
+ gpu_timing_client_->SetCpuTimeForTesting(base::BindRepeating(&FakeCpuTime));
// Force tracing to be dependent on our mock variable here.
gpu_trace_srv_category = &tracing_enabled_;
@@ -100,7 +100,7 @@ class BaseGpuTest : public GpuServiceTest {
gl_fake_queries_.ExpectNoDisjointCalls(*gl_);
gpu_timing_client_ = GetGLContext()->CreateGPUTimingClient();
- gpu_timing_client_->SetCpuTimeForTesting(base::Bind(&FakeCpuTime));
+ gpu_timing_client_->SetCpuTimeForTesting(base::BindRepeating(&FakeCpuTime));
gl_fake_queries_.Reset();
}
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller.cc b/chromium/gpu/command_buffer/service/gr_cache_controller.cc
index 2a0b7db0e7b..8e26fe334b1 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller.cc
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller.cc
@@ -6,6 +6,7 @@
#include <chrono>
+#include "base/bind.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "ui/gl/gl_context.h"
diff --git a/chromium/gpu/command_buffer/service/indexed_buffer_binding_host.cc b/chromium/gpu/command_buffer/service/indexed_buffer_binding_host.cc
index ea5f0327c56..07d0e925cb0 100644
--- a/chromium/gpu/command_buffer/service/indexed_buffer_binding_host.cc
+++ b/chromium/gpu/command_buffer/service/indexed_buffer_binding_host.cc
@@ -75,11 +75,15 @@ void IndexedBufferBindingHost::IndexedBufferBinding::Reset() {
effective_full_buffer_size = 0;
}
-IndexedBufferBindingHost::IndexedBufferBindingHost(uint32_t max_bindings,
- GLenum target,
- bool needs_emulation)
+IndexedBufferBindingHost::IndexedBufferBindingHost(
+ uint32_t max_bindings,
+ GLenum target,
+ bool needs_emulation,
+ bool round_down_uniform_bind_buffer_range_size)
: is_bound_(false),
needs_emulation_(needs_emulation),
+ round_down_uniform_bind_buffer_range_size_(
+ round_down_uniform_bind_buffer_range_size),
max_non_null_binding_index_plus_one_(0u),
target_(target) {
DCHECK(needs_emulation);
@@ -113,7 +117,8 @@ void IndexedBufferBindingHost::DoBindBufferRange(GLuint index,
GLuint service_id = buffer ? buffer->service_id() : 0;
if (buffer && needs_emulation_) {
DoAdjustedBindBufferRange(target_, index, service_id, offset, size,
- buffer->size());
+ buffer->size(),
+ round_down_uniform_bind_buffer_range_size_);
} else {
glBindBufferRange(target_, index, service_id, offset, size);
}
@@ -130,8 +135,13 @@ void IndexedBufferBindingHost::DoBindBufferRange(GLuint index,
// static
void IndexedBufferBindingHost::DoAdjustedBindBufferRange(
- GLenum target, GLuint index, GLuint service_id, GLintptr offset,
- GLsizeiptr size, GLsizeiptr full_buffer_size) {
+ GLenum target,
+ GLuint index,
+ GLuint service_id,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLsizeiptr full_buffer_size,
+ bool round_down_uniform_bind_buffer_range_size) {
GLsizeiptr adjusted_size = size;
if (offset >= full_buffer_size) {
// Situation 1: We can't really call glBindBufferRange with reasonable
@@ -142,7 +152,8 @@ void IndexedBufferBindingHost::DoAdjustedBindBufferRange(
// MacOSX with AMD/4.1.
glBindBufferBase(target, index, service_id);
return;
- } else if (offset + size > full_buffer_size) {
+ }
+ if (offset + size > full_buffer_size) {
adjusted_size = full_buffer_size - offset;
// size needs to be a multiple of 4.
adjusted_size = adjusted_size & ~3;
@@ -153,6 +164,22 @@ void IndexedBufferBindingHost::DoAdjustedBindBufferRange(
return;
}
}
+ if (round_down_uniform_bind_buffer_range_size) {
+ adjusted_size = adjusted_size & ~3;
+ if (adjusted_size == 0) {
+ // This case is invalid and we shouldn't call the driver.
+ // Without rounding, this would generate INVALID_OPERATION
+ // at draw time because the size is not enough to fill the smallest
+ // possible uniform block (4 bytes).
+ // The size of the range is set in DoBindBufferRange and validated in
+ // BufferManager::RequestBuffersAccess. It is fine to not bind the buffer
+ // because any draw call with this buffer range binding will generate
+ // INVALID_OPERATION.
+ // Clear the buffer binding because it will not be used.
+ glBindBufferBase(target, index, 0);
+ return;
+ }
+ }
glBindBufferRange(target, index, service_id, offset, adjusted_size);
}
@@ -168,7 +195,8 @@ void IndexedBufferBindingHost::OnBufferData(Buffer* buffer) {
buffer_bindings_[ii].effective_full_buffer_size != buffer->size()) {
DoAdjustedBindBufferRange(target_, ii, buffer->service_id(),
buffer_bindings_[ii].offset,
- buffer_bindings_[ii].size, buffer->size());
+ buffer_bindings_[ii].size, buffer->size(),
+ round_down_uniform_bind_buffer_range_size_);
buffer_bindings_[ii].effective_full_buffer_size = buffer->size();
}
}
@@ -206,7 +234,8 @@ void IndexedBufferBindingHost::SetIsBound(bool is_bound) {
buffer_bindings_[ii].effective_full_buffer_size != buffer->size()) {
DoAdjustedBindBufferRange(target_, ii, buffer->service_id(),
buffer_bindings_[ii].offset,
- buffer_bindings_[ii].size, buffer->size());
+ buffer_bindings_[ii].size, buffer->size(),
+ round_down_uniform_bind_buffer_range_size_);
buffer_bindings_[ii].effective_full_buffer_size = buffer->size();
}
}
diff --git a/chromium/gpu/command_buffer/service/indexed_buffer_binding_host.h b/chromium/gpu/command_buffer/service/indexed_buffer_binding_host.h
index af2f467c1cd..b28018d8fe4 100644
--- a/chromium/gpu/command_buffer/service/indexed_buffer_binding_host.h
+++ b/chromium/gpu/command_buffer/service/indexed_buffer_binding_host.h
@@ -27,7 +27,8 @@ class GPU_GLES2_EXPORT IndexedBufferBindingHost
// out-of-bounds buffer accesses.
IndexedBufferBindingHost(uint32_t max_bindings,
GLenum target,
- bool needs_emulation);
+ bool needs_emulation,
+ bool round_down_uniform_bind_buffer_range_size);
// The following two functions do state update and call the underlying GL
// function. All validations have been done already and the GL function is
@@ -111,14 +112,20 @@ class GPU_GLES2_EXPORT IndexedBufferBindingHost
// This is called when |needs_emulation_| is true, where the range
// (offset + size) can't go beyond the buffer's size.
static void DoAdjustedBindBufferRange(
- GLenum target, GLuint index, GLuint service_id, GLintptr offset,
- GLsizeiptr size, GLsizeiptr full_buffer_size);
+ GLenum target,
+ GLuint index,
+ GLuint service_id,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLsizeiptr full_buffer_size,
+ bool round_down_uniform_bind_buffer_range_size);
void UpdateMaxNonNullBindingIndex(size_t changed_index);
std::vector<IndexedBufferBinding> buffer_bindings_;
bool needs_emulation_;
+ bool round_down_uniform_bind_buffer_range_size_;
// This is used for optimization purpose in context switching.
size_t max_non_null_binding_index_plus_one_;
diff --git a/chromium/gpu/command_buffer/service/indexed_buffer_binding_host_unittest.cc b/chromium/gpu/command_buffer/service/indexed_buffer_binding_host_unittest.cc
index 3d37f16bac5..8bf24f1cbb5 100644
--- a/chromium/gpu/command_buffer/service/indexed_buffer_binding_host_unittest.cc
+++ b/chromium/gpu/command_buffer/service/indexed_buffer_binding_host_unittest.cc
@@ -21,10 +21,12 @@ class IndexedBufferBindingHostTest : public GpuServiceTest {
IndexedBufferBindingHostTest()
: uniform_host_(new IndexedBufferBindingHost(kMaxBindings,
GL_UNIFORM_BUFFER,
- true)),
+ true,
+ false)),
tf_host_(new IndexedBufferBindingHost(kMaxBindings,
GL_TRANSFORM_FEEDBACK_BUFFER,
- true)),
+ true,
+ false)),
buffer_manager_(new BufferManager(nullptr, nullptr)) {
buffer_manager_->CreateBuffer(kBufferClientId, kBufferServiceId);
buffer_ = buffer_manager_->GetBuffer(kBufferClientId);
@@ -140,7 +142,7 @@ TEST_F(IndexedBufferBindingHostTest, RestoreBindings) {
uniform_host_->DoBindBufferBase(kIndex, buffer_.get());
// Set up the second host
scoped_refptr<IndexedBufferBindingHost> other =
- new IndexedBufferBindingHost(kMaxBindings, kTarget, true);
+ new IndexedBufferBindingHost(kMaxBindings, kTarget, true, false);
EXPECT_CALL(*gl_, BindBufferRange(kTarget, kOtherIndex, kBufferServiceId,
kOffset, clamped_size))
.Times(1)
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.cc b/chromium/gpu/command_buffer/service/memory_program_cache.cc
index 30c3f816417..13dc774c017 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.cc
@@ -13,7 +13,6 @@
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/checked_math.h"
-#include "base/sha1.h"
#include "base/strings/string_number_conversions.h"
#include "base/system/sys_info.h"
#include "build/build_config.h"
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
index 0aa22b27103..8308addbb9c 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
@@ -98,6 +98,7 @@ class MemoryProgramCacheTest : public GpuServiceTest, public DecoderClient {
void OnRescheduleAfterFinished() override {}
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override {}
void ScheduleGrContextCleanup() override {}
+ void HandleReturnData(base::span<const uint8_t> data) override {}
int32_t shader_cache_count() { return shader_cache_count_; }
const std::string& shader_cache_shader() { return shader_cache_shader_; }
diff --git a/chromium/gpu/command_buffer/service/multi_draw_manager.cc b/chromium/gpu/command_buffer/service/multi_draw_manager.cc
index 498ed6adfed..6000fa98708 100644
--- a/chromium/gpu/command_buffer/service/multi_draw_manager.cc
+++ b/chromium/gpu/command_buffer/service/multi_draw_manager.cc
@@ -12,8 +12,8 @@
namespace gpu {
namespace gles2 {
-MultiDrawManager::ResultData::ResultData()
- : draw_function(DrawFunction::None) {}
+MultiDrawManager::ResultData::ResultData() = default;
+MultiDrawManager::ResultData::~ResultData() = default;
MultiDrawManager::ResultData::ResultData(ResultData&& rhs)
: draw_function(rhs.draw_function),
@@ -25,7 +25,6 @@ MultiDrawManager::ResultData::ResultData(ResultData&& rhs)
offsets(std::move(rhs.offsets)),
indices(std::move(rhs.indices)),
instance_counts(std::move(rhs.instance_counts)) {
- rhs.draw_function = DrawFunction::None;
}
MultiDrawManager::ResultData& MultiDrawManager::ResultData::operator=(
@@ -42,33 +41,32 @@ MultiDrawManager::ResultData& MultiDrawManager::ResultData::operator=(
std::swap(offsets, rhs.offsets);
std::swap(indices, rhs.indices);
std::swap(instance_counts, rhs.instance_counts);
-
- rhs.draw_function = DrawFunction::None;
return *this;
}
-MultiDrawManager::ResultData::~ResultData() {}
-
MultiDrawManager::MultiDrawManager(IndexStorageType index_type)
- : current_draw_offset_(0), index_type_(index_type), result_() {}
+ : draw_state_(DrawState::End),
+ current_draw_offset_(0),
+ index_type_(index_type),
+ result_() {}
bool MultiDrawManager::Begin(GLsizei drawcount) {
- result_.drawcount = drawcount;
- current_draw_offset_ = 0;
- if (result_.draw_function != DrawFunction::None) {
- NOTREACHED();
+ if (draw_state_ != DrawState::End) {
return false;
}
+ result_.drawcount = drawcount;
+ current_draw_offset_ = 0;
+ draw_state_ = DrawState::Begin;
return true;
}
bool MultiDrawManager::End(ResultData* result) {
DCHECK(result);
-
- if (result_.draw_function == DrawFunction::None ||
+ if (draw_state_ != DrawState::Draw ||
current_draw_offset_ != result_.drawcount) {
return false;
}
+ draw_state_ = DrawState::End;
*result = std::move(result_);
return true;
}
@@ -77,10 +75,7 @@ bool MultiDrawManager::MultiDrawArrays(GLenum mode,
const GLint* firsts,
const GLsizei* counts,
GLsizei drawcount) {
- if (!EnsureDrawArraysFunction(DrawFunction::DrawArrays, mode) ||
- base::CheckAdd(current_draw_offset_, drawcount).ValueOrDie() >
- result_.drawcount) {
- NOTREACHED();
+ if (!EnsureDrawArraysFunction(DrawFunction::DrawArrays, mode, drawcount)) {
return false;
}
std::copy(firsts, firsts + drawcount, &result_.firsts[current_draw_offset_]);
@@ -94,10 +89,8 @@ bool MultiDrawManager::MultiDrawArraysInstanced(GLenum mode,
const GLsizei* counts,
const GLsizei* instance_counts,
GLsizei drawcount) {
- if (!EnsureDrawArraysFunction(DrawFunction::DrawArraysInstanced, mode) ||
- base::CheckAdd(current_draw_offset_, drawcount).ValueOrDie() >
- result_.drawcount) {
- NOTREACHED();
+ if (!EnsureDrawArraysFunction(DrawFunction::DrawArraysInstanced, mode,
+ drawcount)) {
return false;
}
std::copy(firsts, firsts + drawcount, &result_.firsts[current_draw_offset_]);
@@ -113,10 +106,8 @@ bool MultiDrawManager::MultiDrawElements(GLenum mode,
GLenum type,
const GLsizei* offsets,
GLsizei drawcount) {
- if (!EnsureDrawElementsFunction(DrawFunction::DrawElements, mode, type) ||
- base::CheckAdd(current_draw_offset_, drawcount).ValueOrDie() >
- result_.drawcount) {
- NOTREACHED();
+ if (!EnsureDrawElementsFunction(DrawFunction::DrawElements, mode, type,
+ drawcount)) {
return false;
}
std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]);
@@ -145,10 +136,7 @@ bool MultiDrawManager::MultiDrawElementsInstanced(
const GLsizei* instance_counts,
GLsizei drawcount) {
if (!EnsureDrawElementsFunction(DrawFunction::DrawElementsInstanced, mode,
- type) ||
- base::CheckAdd(current_draw_offset_, drawcount).ValueOrDie() >
- result_.drawcount) {
- NOTREACHED();
+ type, drawcount)) {
return false;
}
std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]);
@@ -199,30 +187,65 @@ void MultiDrawManager::ResizeArrays() {
}
}
+bool MultiDrawManager::ValidateDrawcount(GLsizei drawcount) const {
+ if (drawcount < 0) {
+ return false;
+ }
+ GLsizei new_offset;
+ if (!base::CheckAdd(current_draw_offset_, drawcount)
+ .AssignIfValid(&new_offset)) {
+ return false;
+ }
+ if (new_offset > result_.drawcount) {
+ return false;
+ }
+ return true;
+}
+
bool MultiDrawManager::EnsureDrawArraysFunction(DrawFunction draw_function,
- GLenum mode) {
- bool first_call = result_.draw_function == DrawFunction::None;
+ GLenum mode,
+ GLsizei drawcount) {
+ if (!ValidateDrawcount(drawcount)) {
+ return false;
+ }
+ bool invalid_draw_state = draw_state_ == DrawState::End;
+ bool first_call = draw_state_ == DrawState::Begin;
bool enums_match = result_.mode == mode;
+
+ if (invalid_draw_state || (!first_call && !enums_match)) {
+ return false;
+ }
if (first_call) {
+ draw_state_ = DrawState::Draw;
result_.draw_function = draw_function;
result_.mode = mode;
ResizeArrays();
}
- return first_call || enums_match;
+ return true;
}
bool MultiDrawManager::EnsureDrawElementsFunction(DrawFunction draw_function,
GLenum mode,
- GLenum type) {
- bool first_call = result_.draw_function == DrawFunction::None;
+ GLenum type,
+ GLsizei drawcount) {
+ if (!ValidateDrawcount(drawcount)) {
+ return false;
+ }
+ bool invalid_draw_state = draw_state_ == DrawState::End;
+ bool first_call = draw_state_ == DrawState::Begin;
bool enums_match = result_.mode == mode && result_.type == type;
+
+ if (invalid_draw_state || (!first_call && !enums_match)) {
+ return false;
+ }
if (first_call) {
+ draw_state_ = DrawState::Draw;
result_.draw_function = draw_function;
result_.mode = mode;
result_.type = type;
ResizeArrays();
}
- return first_call || enums_match;
+ return true;
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/multi_draw_manager.h b/chromium/gpu/command_buffer/service/multi_draw_manager.h
index daa42d9e8dc..2baef1653bb 100644
--- a/chromium/gpu/command_buffer/service/multi_draw_manager.h
+++ b/chromium/gpu/command_buffer/service/multi_draw_manager.h
@@ -20,18 +20,17 @@ namespace gles2 {
class GPU_GLES2_EXPORT MultiDrawManager {
public:
enum class DrawFunction {
- None,
DrawArrays,
DrawArraysInstanced,
DrawElements,
DrawElementsInstanced,
};
- struct ResultData {
+ struct GPU_GLES2_EXPORT ResultData {
DrawFunction draw_function;
- GLsizei drawcount;
- GLenum mode;
- GLenum type;
+ GLsizei drawcount = 0;
+ GLenum mode = 0;
+ GLenum type = 0;
std::vector<GLint> firsts;
std::vector<GLsizei> counts;
std::vector<GLsizei> offsets;
@@ -39,9 +38,9 @@ class GPU_GLES2_EXPORT MultiDrawManager {
std::vector<GLsizei> instance_counts;
ResultData();
+ ~ResultData();
ResultData(ResultData&& rhs);
ResultData& operator=(ResultData&& rhs);
- ~ResultData();
};
enum class IndexStorageType {
@@ -76,11 +75,22 @@ class GPU_GLES2_EXPORT MultiDrawManager {
private:
void ResizeArrays();
- bool EnsureDrawArraysFunction(DrawFunction draw_function, GLenum mode);
+ bool ValidateDrawcount(GLsizei drawcount) const;
+ bool EnsureDrawArraysFunction(DrawFunction draw_function,
+ GLenum mode,
+ GLsizei drawcount);
bool EnsureDrawElementsFunction(DrawFunction draw_function,
GLenum mode,
- GLenum type);
+ GLenum type,
+ GLsizei drawcount);
+
+ enum class DrawState {
+ Begin,
+ Draw,
+ End,
+ };
+ DrawState draw_state_;
GLsizei current_draw_offset_;
IndexStorageType index_type_;
ResultData result_;
diff --git a/chromium/gpu/command_buffer/service/multi_draw_manager_unittest.cc b/chromium/gpu/command_buffer/service/multi_draw_manager_unittest.cc
new file mode 100644
index 00000000000..5145588a644
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/multi_draw_manager_unittest.cc
@@ -0,0 +1,232 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/multi_draw_manager.h"
+
+#include <memory>
+#include <tuple>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+using Param = std::tuple<MultiDrawManager::IndexStorageType,
+ MultiDrawManager::DrawFunction>;
+
+} // namespace
+
+class MultiDrawManagerTest : public testing::TestWithParam<Param> {
+ public:
+ MultiDrawManagerTest()
+ : multi_draw_manager_(new MultiDrawManager(std::get<0>(GetParam()))) {}
+
+ protected:
+ bool DoMultiDraw(uint32_t count,
+ GLenum mode = GL_TRIANGLES,
+ GLenum type = GL_UNSIGNED_INT) {
+ std::vector<GLsizei> data(count);
+ switch (std::get<1>(GetParam())) {
+ case MultiDrawManager::DrawFunction::DrawArrays:
+ return multi_draw_manager_->MultiDrawArrays(mode, data.data(),
+ data.data(), count);
+
+ case MultiDrawManager::DrawFunction::DrawArraysInstanced:
+ return multi_draw_manager_->MultiDrawArraysInstanced(
+ mode, data.data(), data.data(), data.data(), count);
+
+ case MultiDrawManager::DrawFunction::DrawElements:
+ return multi_draw_manager_->MultiDrawElements(mode, data.data(), type,
+ data.data(), count);
+
+ case MultiDrawManager::DrawFunction::DrawElementsInstanced:
+ return multi_draw_manager_->MultiDrawElementsInstanced(
+ mode, data.data(), type, data.data(), data.data(), count);
+ }
+ }
+
+ void CheckResultSize(uint32_t count,
+ const MultiDrawManager::ResultData& result) {
+ MultiDrawManager::DrawFunction draw_function = std::get<1>(GetParam());
+ EXPECT_TRUE(draw_function == result.draw_function);
+
+ switch (draw_function) {
+ case MultiDrawManager::DrawFunction::DrawArraysInstanced:
+ EXPECT_TRUE(result.instance_counts.size() == count);
+ FALLTHROUGH;
+ case MultiDrawManager::DrawFunction::DrawArrays:
+ EXPECT_TRUE(result.firsts.size() == count);
+ EXPECT_TRUE(result.counts.size() == count);
+ break;
+ case MultiDrawManager::DrawFunction::DrawElementsInstanced:
+ EXPECT_TRUE(result.instance_counts.size() == count);
+ FALLTHROUGH;
+ case MultiDrawManager::DrawFunction::DrawElements:
+ EXPECT_TRUE(result.counts.size() == count);
+ switch (std::get<0>(GetParam())) {
+ case MultiDrawManager::IndexStorageType::Offset:
+ EXPECT_TRUE(result.offsets.size() == count);
+ break;
+ case MultiDrawManager::IndexStorageType::Pointer:
+ EXPECT_TRUE(result.indices.size() == count);
+ break;
+ }
+ break;
+ }
+ }
+
+ std::unique_ptr<MultiDrawManager> multi_draw_manager_;
+};
+
+// Test that the simple case succeeds
+TEST_P(MultiDrawManagerTest, Success) {
+ MultiDrawManager::ResultData result;
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_TRUE(DoMultiDraw(100));
+ EXPECT_TRUE(multi_draw_manager_->End(&result));
+ CheckResultSize(100, result);
+}
+
+// Test that the internal state of the multi draw manager resets such that
+// successive valid multi draws can be executed
+TEST_P(MultiDrawManagerTest, SuccessAfterSuccess) {
+ MultiDrawManager::ResultData result;
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_TRUE(DoMultiDraw(100));
+ EXPECT_TRUE(multi_draw_manager_->End(&result));
+ CheckResultSize(100, result);
+
+ EXPECT_TRUE(multi_draw_manager_->Begin(1000));
+ EXPECT_TRUE(DoMultiDraw(1000));
+ EXPECT_TRUE(multi_draw_manager_->End(&result));
+ CheckResultSize(1000, result);
+}
+
+// Test that multiple chunked multi draw calls succeed
+TEST_P(MultiDrawManagerTest, SuccessMultiple) {
+ MultiDrawManager::ResultData result;
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_TRUE(DoMultiDraw(83));
+ EXPECT_TRUE(DoMultiDraw(4));
+ EXPECT_TRUE(DoMultiDraw(13));
+ EXPECT_TRUE(multi_draw_manager_->End(&result));
+ CheckResultSize(100, result);
+}
+
+// Test that it is invalid to submit an empty multi draw
+TEST_P(MultiDrawManagerTest, Empty) {
+ MultiDrawManager::ResultData result;
+ EXPECT_TRUE(multi_draw_manager_->Begin(0));
+ EXPECT_FALSE(multi_draw_manager_->End(&result));
+}
+
+// Test that it is invalid to end a multi draw if it has not been started
+TEST_P(MultiDrawManagerTest, EndBeforeBegin) {
+ MultiDrawManager::ResultData result;
+ EXPECT_FALSE(multi_draw_manager_->End(&result));
+}
+
+// Test that it is invalid to begin a multi draw twice
+TEST_P(MultiDrawManagerTest, BeginAfterBegin) {
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_FALSE(multi_draw_manager_->Begin(100));
+}
+
+// Test that it is invalid to begin a multi draw twice, even if
+// the first begin was empty
+TEST_P(MultiDrawManagerTest, BeginAfterEmptyBegin) {
+ EXPECT_TRUE(multi_draw_manager_->Begin(0));
+ EXPECT_FALSE(multi_draw_manager_->Begin(100));
+}
+
+// Test that it is invalid to do a multi draw before begin
+TEST_P(MultiDrawManagerTest, DrawBeforeBegin) {
+ EXPECT_FALSE(DoMultiDraw(1));
+}
+
+// Test that it is invalid to end a multi draw twice
+TEST_P(MultiDrawManagerTest, DoubleEnd) {
+ MultiDrawManager::ResultData result;
+ EXPECT_TRUE(multi_draw_manager_->Begin(1));
+ EXPECT_TRUE(DoMultiDraw(1));
+ EXPECT_TRUE(multi_draw_manager_->End(&result));
+ EXPECT_FALSE(multi_draw_manager_->End(&result));
+}
+
+// Test that it is invalid to end a multi draw before the drawcount
+// is saturated
+TEST_P(MultiDrawManagerTest, Underflow) {
+ MultiDrawManager::ResultData result;
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_TRUE(DoMultiDraw(99));
+ EXPECT_FALSE(multi_draw_manager_->End(&result));
+}
+
+// Test that it is invalid to end a multi draw before the drawcount
+// is saturated, using multiple chunks
+TEST_P(MultiDrawManagerTest, UnderflowMultiple) {
+ MultiDrawManager::ResultData result;
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_TRUE(DoMultiDraw(42));
+ EXPECT_TRUE(DoMultiDraw(31));
+ EXPECT_TRUE(DoMultiDraw(26));
+ EXPECT_FALSE(multi_draw_manager_->End(&result));
+}
+
+// Test that it is invalid to do a multi draw that overflows the drawcount
+TEST_P(MultiDrawManagerTest, Overflow) {
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_FALSE(DoMultiDraw(101));
+}
+
+// Test that it is invalid to do a multi draw that overflows the drawcount,
+// using multiple chunks
+TEST_P(MultiDrawManagerTest, OverflowMultiple) {
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_TRUE(DoMultiDraw(31));
+ EXPECT_TRUE(DoMultiDraw(49));
+ EXPECT_FALSE(DoMultiDraw(21));
+}
+
+// Test that it is invalid to do a multi draw that does not match the first
+// chunk's draw mode
+TEST_P(MultiDrawManagerTest, DrawModeMismatch) {
+ MultiDrawManager::ResultData result;
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_TRUE(DoMultiDraw(50, GL_TRIANGLES));
+ EXPECT_FALSE(DoMultiDraw(50, GL_LINES));
+}
+
+// Test that it is invalid to do a multi draw that does not match the first
+// chunk's element type
+TEST_P(MultiDrawManagerTest, ElementTypeMismatch) {
+ MultiDrawManager::DrawFunction draw_function = std::get<1>(GetParam());
+ if (draw_function != MultiDrawManager::DrawFunction::DrawElements &&
+ draw_function != MultiDrawManager::DrawFunction::DrawElementsInstanced) {
+ return;
+ }
+
+ MultiDrawManager::ResultData result;
+ EXPECT_TRUE(multi_draw_manager_->Begin(100));
+ EXPECT_TRUE(DoMultiDraw(50, GL_TRIANGLES, GL_UNSIGNED_INT));
+ EXPECT_FALSE(DoMultiDraw(50, GL_TRIANGLES, GL_UNSIGNED_SHORT));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ ,
+ MultiDrawManagerTest,
+ testing::Combine(
+ testing::Values(MultiDrawManager::IndexStorageType::Offset,
+ MultiDrawManager::IndexStorageType::Pointer),
+ testing::Values(
+ MultiDrawManager::DrawFunction::DrawArrays,
+ MultiDrawManager::DrawFunction::DrawArraysInstanced,
+ MultiDrawManager::DrawFunction::DrawElements,
+ MultiDrawManager::DrawFunction::DrawElementsInstanced)));
+
+} // namespace gles2
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc b/chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc
index 394f47efb96..3916153aaf0 100644
--- a/chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc
@@ -48,6 +48,7 @@ class PassthroughProgramCacheTest : public GpuServiceTest,
void OnRescheduleAfterFinished() override {}
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override {}
void ScheduleGrContextCleanup() override {}
+ void HandleReturnData(base::span<const uint8_t> data) override {}
int32_t blob_count() { return blob_count_; }
diff --git a/chromium/gpu/command_buffer/service/program_manager.cc b/chromium/gpu/command_buffer/service/program_manager.cc
index dfee7338097..12ba131a213 100644
--- a/chromium/gpu/command_buffer/service/program_manager.cc
+++ b/chromium/gpu/command_buffer/service/program_manager.cc
@@ -987,7 +987,7 @@ bool Program::UpdateUniforms() {
if (size > 1) {
for (GLsizei ii = 1; ii < size; ++ii) {
std::string element_name(service_base_name + "[" +
- base::IntToString(ii) + "]");
+ base::NumberToString(ii) + "]");
service_locations[ii] =
glGetUniformLocation(service_id_, element_name.c_str());
}
@@ -1111,7 +1111,7 @@ void Program::UpdateFragmentInputs() {
}
for (GLsizei jj = 1; jj < size; ++jj) {
- std::string array_spec(std::string("[") + base::IntToString(jj) + "]");
+ std::string array_spec(std::string("[") + base::NumberToString(jj) + "]");
std::string client_element_name =
parsed_client_name.base_name() + array_spec;
@@ -1177,15 +1177,16 @@ void Program::UpdateProgramOutputs() {
if (color_name >= 0) {
GLint index = 0;
for (size_t ii = 0; ii < output_var.getOutermostArraySize(); ++ii) {
- std::string array_spec(
- std::string("[") + base::IntToString(ii) + "]");
+ std::string array_spec(std::string("[") + base::NumberToString(ii) +
+ "]");
program_output_infos_.push_back(ProgramOutputInfo(
color_name + ii, index, client_name + array_spec));
}
}
} else {
for (size_t ii = 0; ii < output_var.getOutermostArraySize(); ++ii) {
- std::string array_spec(std::string("[") + base::IntToString(ii) + "]");
+ std::string array_spec(std::string("[") + base::NumberToString(ii) +
+ "]");
std::string service_element_name(service_name + array_spec);
GLint color_name =
glGetFragDataLocation(service_id_, service_element_name.c_str());
@@ -1266,7 +1267,7 @@ void Program::ExecuteProgramOutputBindCalls() {
std::string name = output_var.name;
std::string array_spec;
if (is_array) {
- array_spec = std::string("[") + base::IntToString(jj) + "]";
+ array_spec = std::string("[") + base::NumberToString(jj) + "]";
name += array_spec;
}
auto it = bind_program_output_location_index_map_.find(name);
@@ -2082,7 +2083,7 @@ bool Program::DetectProgramOutputLocationBindingConflicts() const {
for (size_t jj = 0; jj < count; ++jj) {
std::string name = output_var.name;
if (is_array)
- name += std::string("[") + base::IntToString(jj) + "]";
+ name += std::string("[") + base::NumberToString(jj) + "]";
auto it = bind_program_output_location_index_map_.find(name);
if (it == bind_program_output_location_index_map_.end())
diff --git a/chromium/gpu/command_buffer/service/program_manager_unittest.cc b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
index 095a9a112e6..adc65411643 100644
--- a/chromium/gpu/command_buffer/service/program_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
@@ -95,6 +95,7 @@ class ProgramManagerTestBase : public GpuServiceTest, public DecoderClient {
void OnRescheduleAfterFinished() override {}
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override {}
void ScheduleGrContextCleanup() override {}
+ void HandleReturnData(base::span<const uint8_t> data) override {}
std::unique_ptr<ProgramManager> manager_;
GpuPreferences gpu_preferences_;
@@ -2545,7 +2546,7 @@ testing::tuple<const char*, const char*> make_gl_ext_tuple(
}
}
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
SupportedContexts,
ProgramManagerWithPathRenderingTest,
testing::Values(
@@ -2635,7 +2636,7 @@ TEST_P(ProgramManagerDualSourceBlendingES2Test, UseSecondaryFragData) {
EXPECT_TRUE(LinkAsExpected(program, true));
}
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
SupportedContexts,
ProgramManagerDualSourceBlendingES2Test,
testing::Values(
diff --git a/chromium/gpu/command_buffer/service/query_manager_unittest.cc b/chromium/gpu/command_buffer/service/query_manager_unittest.cc
index 55508cd7557..e4156821c81 100644
--- a/chromium/gpu/command_buffer/service/query_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/query_manager_unittest.cc
@@ -7,6 +7,7 @@
#include <memory>
+#include "base/bind.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/service/error_state_mock.h"
@@ -561,7 +562,7 @@ TEST_F(QueryManagerTest, TimeElapsedQuery) {
const base::subtle::Atomic32 kSubmitCount = 123;
gl::GPUTimingFake fake_timing_queries;
decoder_->GetGLContext()->CreateGPUTimingClient()->SetCpuTimeForTesting(
- base::Bind(&gl::GPUTimingFake::GetFakeCPUTime));
+ base::BindRepeating(&gl::GPUTimingFake::GetFakeCPUTime));
QueryManager::Query* query = CreateQuery(
kTarget, kClient1Id, shared_memory_id_, kSharedMemoryOffset, 0);
@@ -593,7 +594,7 @@ TEST_F(QueryManagerTest, TimeElapsedPauseResume) {
const base::subtle::Atomic32 kSubmitCount = 123;
gl::GPUTimingFake fake_timing_queries;
decoder_->GetGLContext()->CreateGPUTimingClient()->SetCpuTimeForTesting(
- base::Bind(&gl::GPUTimingFake::GetFakeCPUTime));
+ base::BindRepeating(&gl::GPUTimingFake::GetFakeCPUTime));
QueryManager::Query* query = CreateQuery(
kTarget, kClient1Id, shared_memory_id_, kSharedMemoryOffset, 0);
@@ -698,7 +699,7 @@ TEST_F(QueryManagerTest, TimeStampQuery) {
gl::GPUTimingFake fake_timing_queries;
decoder_->GetGLContext()->CreateGPUTimingClient()->SetCpuTimeForTesting(
- base::Bind(&gl::GPUTimingFake::GetFakeCPUTime));
+ base::BindRepeating(&gl::GPUTimingFake::GetFakeCPUTime));
QueryManager::Query* query = CreateQuery(
kTarget, kClient1Id, shared_memory_id_, kSharedMemoryOffset, 0);
@@ -725,7 +726,7 @@ TEST_F(QueryManagerTest, TimeStampQueryPending) {
gl::GPUTimingFake fake_timing_queries;
decoder_->GetGLContext()->CreateGPUTimingClient()->SetCpuTimeForTesting(
- base::Bind(&gl::GPUTimingFake::GetFakeCPUTime));
+ base::BindRepeating(&gl::GPUTimingFake::GetFakeCPUTime));
QueryManager::Query* query = CreateQuery(
kTarget, kClient1Id, shared_memory_id_, kSharedMemoryOffset, 0);
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index b306574e4a5..21f408fb758 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -9,9 +9,11 @@
#include <algorithm>
#include <memory>
#include <string>
+#include <utility>
#include <vector>
#include "base/atomic_sequence_num.h"
+#include "base/bind.h"
#include "base/containers/flat_map.h"
#include "base/debug/crash_logging.h"
#include "base/logging.h"
@@ -21,7 +23,6 @@
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
-#include "cc/paint/color_space_transfer_cache_entry.h"
#include "cc/paint/paint_cache.h"
#include "cc/paint/paint_op_buffer.h"
#include "cc/paint/transfer_cache_entry.h"
@@ -36,7 +37,6 @@
#include "gpu/command_buffer/common/raster_cmd_ids.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
-#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/error_state.h"
@@ -52,13 +52,13 @@
#include "gpu/command_buffer/service/raster_cmd_validation.h"
#include "gpu/command_buffer/service/service_font_manager.h"
#include "gpu/command_buffer/service/service_transfer_cache.h"
+#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/command_buffer/service/wrapped_sk_image.h"
#include "third_party/skia/include/core/SkCanvas.h"
-#include "third_party/skia/include/core/SkColorSpaceXformCanvas.h"
#include "third_party/skia/include/core/SkDeferredDisplayListRecorder.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/core/SkSurfaceProps.h"
@@ -218,7 +218,10 @@ class RasterDecoderImpl final : public RasterDecoder,
RasterDecoderImpl(DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
gles2::Outputter* outputter,
- gles2::ContextGroup* group,
+ const GpuFeatureInfo& gpu_feature_info,
+ const GpuPreferences& gpu_preferences,
+ MemoryTracker* memory_tracker,
+ SharedImageManager* shared_image_manager,
scoped_refptr<SharedContextState> shared_context_state);
~RasterDecoderImpl() override;
@@ -237,7 +240,7 @@ class RasterDecoderImpl final : public RasterDecoder,
gl::GLContext* GetGLContext() override;
gl::GLSurface* GetGLSurface() override;
const gles2::FeatureInfo* GetFeatureInfo() const override {
- return feature_info_.get();
+ return feature_info();
}
Capabilities GetCapabilities() override;
const gles2::ContextState* GetContextState() override;
@@ -335,6 +338,7 @@ class RasterDecoderImpl final : public RasterDecoder,
NOTIMPLEMENTED();
return false;
}
+ int GetRasterDecoderId() const override;
int DecoderIdForTest() override;
ServiceTransferCache* GetTransferCacheForTest() override;
void SetUpForRasterCHROMIUMForTest() override;
@@ -359,7 +363,7 @@ class RasterDecoderImpl final : public RasterDecoder,
private:
gles2::ContextState* state() const {
- if (use_passthrough()) {
+ if (use_passthrough_) {
NOTREACHED();
return nullptr;
}
@@ -371,12 +375,29 @@ class RasterDecoderImpl final : public RasterDecoder,
return shared_context_state_->transfer_cache();
}
+ const gles2::FeatureInfo* feature_info() const {
+ return shared_context_state_->feature_info();
+ }
+
const gles2::FeatureInfo::FeatureFlags& features() const {
- return feature_info_->feature_flags();
+ return feature_info()->feature_flags();
}
const GpuDriverBugWorkarounds& workarounds() const {
- return feature_info_->workarounds();
+ return feature_info()->workarounds();
+ }
+
+ void FlushToWorkAroundMacCrashes() {
+#if defined(OS_MACOSX)
+ // This function does aggressive flushes to work around crashes in the
+ // macOS OpenGL driver.
+ // https://crbug.com/906453
+ if (!flush_workaround_disabled_for_test_) {
+ if (gr_context())
+ gr_context()->flush();
+ api()->glFlushFn();
+ }
+#endif
}
bool IsRobustnessSupported() {
@@ -386,23 +407,9 @@ class RasterDecoderImpl final : public RasterDecoder,
}
const gl::GLVersionInfo& gl_version_info() {
- return feature_info_->gl_version_info();
+ return feature_info()->gl_version_info();
}
- MemoryTracker* memory_tracker() { return group_->memory_tracker(); }
-
- bool use_passthrough() const { return group_->use_passthrough_cmd_decoder(); }
-
- gles2::BufferManager* buffer_manager() { return group_->buffer_manager(); }
-
- const gles2::TextureManager* texture_manager() const {
- return group_->texture_manager();
- }
-
- gles2::TextureManager* texture_manager() { return group_->texture_manager(); }
-
- gles2::ImageManager* image_manager() { return group_->image_manager(); }
-
// Set remaining commands to process to 0 to force DoCommands to return
// and allow context preemption and GPU watchdog checks in
// CommandExecutor().
@@ -429,15 +436,10 @@ class RasterDecoderImpl final : public RasterDecoder,
GLsizei width,
GLsizei height,
const volatile GLbyte* mailboxes);
- // If the texture has an image but that image is not bound or copied to the
- // texture, this will first attempt to bind it, and if that fails
- // CopyTexImage on it.
- void DoBindOrCopyTexImageIfNeeded(gles2::Texture* texture, GLenum textarget);
void DoLoseContextCHROMIUM(GLenum current, GLenum other) { NOTIMPLEMENTED(); }
void DoBeginRasterCHROMIUM(GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLuint color_space_transfer_cache_id,
const volatile GLbyte* key);
void DoRasterCHROMIUM(GLuint raster_shm_id,
GLuint raster_shm_offset,
@@ -476,7 +478,7 @@ class RasterDecoderImpl final : public RasterDecoder,
void LogClientServiceMapping(const char* function_name,
GLuint client_id,
GLuint service_id) {
- if (service_logging_) {
+ if (gpu_preferences_.enable_gpu_service_logging_gpu) {
VLOG(1) << "[" << logger_.GetLogPrefix() << "] " << function_name
<< ": client_id = " << client_id
<< ", service_id = " << service_id;
@@ -520,7 +522,9 @@ class RasterDecoderImpl final : public RasterDecoder,
// Number of commands remaining to be processed in DoCommands().
int commands_to_process_ = 0;
+ bool supports_gpu_raster_ = false;
bool supports_oop_raster_ = false;
+ bool use_passthrough_ = false;
bool use_ddl_ = false;
bool has_robustness_extension_ = false;
@@ -535,27 +539,24 @@ class RasterDecoderImpl final : public RasterDecoder,
DecoderClient* client_;
+ GpuPreferences gpu_preferences_;
+
gles2::DebugMarkerManager debug_marker_manager_;
gles2::Logger logger_;
std::unique_ptr<gles2::ErrorState> error_state_;
bool context_lost_ = false;
- // The ContextGroup for this decoder uses to track resources.
- scoped_refptr<gles2::ContextGroup> group_;
scoped_refptr<SharedContextState> shared_context_state_;
std::unique_ptr<Validators> validators_;
- scoped_refptr<gles2::FeatureInfo> feature_info_;
+ SharedImageRepresentationFactory shared_image_representation_factory_;
std::unique_ptr<QueryManager> query_manager_;
gles2::GLES2Util util_;
- // An optional behaviour to lose the context and group when OOM.
+ // An optional behaviour to lose the context when OOM.
bool lose_context_when_out_of_memory_ = false;
- // Log extra info.
- bool service_logging_;
-
std::unique_ptr<gles2::CopyTexImageResourceManager> copy_tex_image_blit_;
std::unique_ptr<gles2::CopyTextureCHROMIUMResourceManager>
copy_texture_chromium_;
@@ -573,8 +574,7 @@ class RasterDecoderImpl final : public RasterDecoder,
std::unique_ptr<cc::ServicePaintCache> paint_cache_;
std::unique_ptr<SkDeferredDisplayListRecorder> recorder_;
- std::unique_ptr<SkCanvas> raster_canvas_;
- uint32_t raster_color_space_id_;
+ SkCanvas* raster_canvas_ = nullptr; // ptr into recorder_ or sk_surface_
std::vector<SkDiscardableHandleId> locked_handles_;
// Tracing helpers.
@@ -610,9 +610,14 @@ RasterDecoder* RasterDecoder::Create(
DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
gles2::Outputter* outputter,
- gles2::ContextGroup* group,
+ const GpuFeatureInfo& gpu_feature_info,
+ const GpuPreferences& gpu_preferences,
+ MemoryTracker* memory_tracker,
+ SharedImageManager* shared_image_manager,
scoped_refptr<SharedContextState> shared_context_state) {
- return new RasterDecoderImpl(client, command_buffer_service, outputter, group,
+ return new RasterDecoderImpl(client, command_buffer_service, outputter,
+ gpu_feature_info, gpu_preferences,
+ memory_tracker, shared_image_manager,
std::move(shared_context_state));
}
@@ -660,23 +665,30 @@ RasterDecoderImpl::RasterDecoderImpl(
DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
gles2::Outputter* outputter,
- gles2::ContextGroup* group,
+ const GpuFeatureInfo& gpu_feature_info,
+ const GpuPreferences& gpu_preferences,
+ MemoryTracker* memory_tracker,
+ SharedImageManager* shared_image_manager,
scoped_refptr<SharedContextState> shared_context_state)
: RasterDecoder(command_buffer_service, outputter),
raster_decoder_id_(g_raster_decoder_id.GetNext() + 1),
+ supports_gpu_raster_(
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] ==
+ kGpuFeatureStatusEnabled),
+ use_passthrough_(gles2::PassthroughCommandDecoderSupported() &&
+ gpu_preferences.use_passthrough_cmd_decoder),
client_(client),
+ gpu_preferences_(gpu_preferences),
logger_(&debug_marker_manager_,
base::BindRepeating(&DecoderClient::OnConsoleMessage,
base::Unretained(client_),
0),
- group->gpu_preferences().disable_gl_error_limit),
+ gpu_preferences_.disable_gl_error_limit),
error_state_(gles2::ErrorState::Create(this, &logger_)),
- group_(group),
shared_context_state_(std::move(shared_context_state)),
validators_(new Validators),
- feature_info_(group_->feature_info()),
- service_logging_(
- group_->gpu_preferences().enable_gpu_service_logging_gpu),
+ shared_image_representation_factory_(shared_image_manager,
+ memory_tracker),
gpu_decoder_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("gpu.decoder"))),
font_manager_(base::MakeRefCounted<ServiceFontManager>(this)),
@@ -711,10 +723,10 @@ ContextResult RasterDecoderImpl::Initialize(
return ContextResult::kFatalFailure;
}
- if (group_->gpu_preferences().enable_gpu_debugging)
+ if (gpu_preferences_.enable_gpu_debugging)
set_debug(true);
- if (group_->gpu_preferences().enable_gpu_command_logging)
+ if (gpu_preferences_.enable_gpu_command_logging)
SetLogCommands(true);
DCHECK_EQ(surface.get(), shared_context_state_->surface());
@@ -728,15 +740,6 @@ ContextResult RasterDecoderImpl::Initialize(
lose_context_when_out_of_memory_ =
attrib_helper.lose_context_when_out_of_memory;
- auto result =
- group_->Initialize(this, attrib_helper.context_type, disallowed_features);
- if (result != ContextResult::kSuccess) {
- group_ =
- nullptr; // Must not destroy ContextGroup if it is not initialized.
- Destroy(true);
- return result;
- }
-
CHECK_GL_ERROR();
query_manager_ = std::make_unique<QueryManager>();
@@ -756,7 +759,7 @@ ContextResult RasterDecoderImpl::Initialize(
supports_oop_raster_ = !!shared_context_state_->gr_context();
if (supports_oop_raster_)
paint_cache_ = std::make_unique<cc::ServicePaintCache>();
- use_ddl_ = group_->gpu_preferences().enable_oop_rasterization_ddl;
+ use_ddl_ = gpu_preferences_.enable_oop_rasterization_ddl;
}
return ContextResult::kSuccess;
@@ -787,10 +790,6 @@ void RasterDecoderImpl::Destroy(bool have_context) {
if (gr_context()) {
gr_context()->flush();
}
- } else {
- if (group_ && group_->texture_manager()) {
- group_->texture_manager()->MarkContextLost();
- }
}
copy_tex_image_blit_.reset();
@@ -801,11 +800,6 @@ void RasterDecoderImpl::Destroy(bool have_context) {
query_manager_.reset();
}
- if (group_.get()) {
- group_->Destroy(this, have_context);
- group_ = nullptr;
- }
-
// Destroy the surface before the context, some surface destructors make GL
// calls.
if (context_.get()) {
@@ -834,7 +828,6 @@ bool RasterDecoderImpl::MakeCurrent() {
!shared_context_state_->MakeCurrent(nullptr)) {
LOG(ERROR) << " RasterDecoderImpl: Context lost during MakeCurrent.";
MarkContextLost(error::kMakeCurrentFailed);
- group_->LoseContexts(error::kUnknown);
return false;
}
@@ -843,7 +836,6 @@ bool RasterDecoderImpl::MakeCurrent() {
if (CheckResetStatus()) {
LOG(ERROR)
<< " RasterDecoderImpl: Context reset detected after MakeCurrent.";
- group_->LoseContexts(error::kUnknown);
return false;
}
@@ -863,22 +855,23 @@ gl::GLSurface* RasterDecoderImpl::GetGLSurface() {
Capabilities RasterDecoderImpl::GetCapabilities() {
Capabilities caps;
- caps.gpu_rasterization =
- group_->gpu_feature_info()
- .status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] ==
- kGpuFeatureStatusEnabled;
+ caps.gpu_rasterization = supports_gpu_raster_;
caps.supports_oop_raster = supports_oop_raster_;
caps.gpu_memory_buffer_formats =
- feature_info_->feature_flags().gpu_memory_buffer_formats;
+ feature_info()->feature_flags().gpu_memory_buffer_formats;
caps.texture_target_exception_list =
- group_->gpu_preferences().texture_target_exception_list;
+ gpu_preferences_.texture_target_exception_list;
caps.texture_format_bgra8888 =
- feature_info_->feature_flags().ext_texture_format_bgra8888;
+ feature_info()->feature_flags().ext_texture_format_bgra8888;
caps.texture_storage_image =
- feature_info_->feature_flags().chromium_texture_storage_image;
- caps.texture_storage = feature_info_->feature_flags().ext_texture_storage;
- caps.max_texture_size = texture_manager()->MaxSizeForTarget(GL_TEXTURE_2D);
- caps.sync_query = feature_info_->feature_flags().chromium_sync_query;
+ feature_info()->feature_flags().chromium_texture_storage_image;
+ caps.texture_storage = feature_info()->feature_flags().ext_texture_storage;
+ api()->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &caps.max_texture_size);
+ if (feature_info()->workarounds().max_texture_size) {
+ caps.max_texture_size = std::min(
+ caps.max_texture_size, feature_info()->workarounds().max_texture_size);
+ }
+ caps.sync_query = feature_info()->feature_flags().chromium_sync_query;
if (gr_context()) {
caps.context_supports_distance_field_text =
@@ -1086,13 +1079,14 @@ gles2::Logger* RasterDecoderImpl::GetLogger() {
}
void RasterDecoderImpl::SetIgnoreCachedStateForTest(bool ignore) {
- if (use_passthrough())
+ if (use_passthrough_)
return;
state()->SetIgnoreCachedStateForTest(ignore);
}
gles2::ImageManager* RasterDecoderImpl::GetImageManagerForTest() {
- return group_->image_manager();
+ NOTREACHED();
+ return nullptr;
}
void RasterDecoderImpl::SetCopyTextureResourceManagerForTest(
@@ -1132,18 +1126,6 @@ error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
int process_pos = 0;
CommandId command = static_cast<CommandId>(0);
-#if defined(OS_MACOSX)
- if (!flush_workaround_disabled_for_test_) {
- // Flush before and after decoding commands.
- // TODO(ccameron): This is to determine if this high frequency flushing
- // affects crash rates.
- // https://crbug.com/906453
- if (gr_context())
- gr_context()->flush();
- api()->glFlushFn();
- }
-#endif
-
while (process_pos < num_entries && result == error::kNoError &&
commands_to_process_--) {
const unsigned int size = cmd_data->value_header.size;
@@ -1224,6 +1206,11 @@ error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
process_pos += size;
cmd_data += size;
}
+
+ // Workaround for https://crbug.com/906453: Flush after every command that
+ // is not between a BeginRaster and EndRaster.
+ if (!sk_surface_)
+ FlushToWorkAroundMacCrashes();
}
*entries_processed = process_pos;
@@ -1236,14 +1223,6 @@ error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
if (supports_oop_raster_)
client_->ScheduleGrContextCleanup();
-#if defined(OS_MACOSX)
- if (!flush_workaround_disabled_for_test_) {
- if (gr_context())
- gr_context()->flush();
- api()->glFlushFn();
- }
-#endif
-
return result;
}
@@ -1272,7 +1251,7 @@ void RasterDecoderImpl::BindImage(uint32_t client_texture_id,
}
gles2::ContextGroup* RasterDecoderImpl::GetContextGroup() {
- return group_.get();
+ return nullptr;
}
gles2::ErrorState* RasterDecoderImpl::GetErrorState() {
@@ -1292,7 +1271,8 @@ RasterDecoderImpl::CreateAbstractTexture(GLenum target,
}
bool RasterDecoderImpl::IsCompressedTextureFormat(unsigned format) {
- return feature_info_->validators()->compressed_texture_format.IsValid(format);
+ return feature_info()->validators()->compressed_texture_format.IsValid(
+ format);
}
bool RasterDecoderImpl::ClearLevel(gles2::Texture* texture,
@@ -1349,7 +1329,7 @@ bool RasterDecoderImpl::ClearLevel(gles2::Texture* texture,
texture->service_id(), gr_context());
base::Optional<ScopedPixelUnpackState> pixel_unpack_state;
if (shared_context_state_->need_context_state_reset()) {
- pixel_unpack_state.emplace(state(), gr_context(), group_->feature_info());
+ pixel_unpack_state.emplace(state(), gr_context(), feature_info());
}
// Add extra scope to destroy zero and the object it owns right
// after its usage.
@@ -1361,8 +1341,8 @@ bool RasterDecoderImpl::ClearLevel(gles2::Texture* texture,
GLint h = y + tile_height > height ? height - y : tile_height;
api()->glTexSubImage2DFn(
target, level, xoffset, yoffset + y, width, h,
- gles2::TextureManager::AdjustTexFormat(feature_info_.get(), format),
- type, zero.get());
+ gles2::TextureManager::AdjustTexFormat(feature_info(), format), type,
+ zero.get());
y += tile_height;
}
}
@@ -1380,6 +1360,10 @@ bool RasterDecoderImpl::ClearCompressedTextureLevel(gles2::Texture* texture,
return false;
}
+int RasterDecoderImpl::GetRasterDecoderId() const {
+ return raster_decoder_id_;
+}
+
int RasterDecoderImpl::DecoderIdForTest() {
return raster_decoder_id_;
}
@@ -1391,9 +1375,10 @@ ServiceTransferCache* RasterDecoderImpl::GetTransferCacheForTest() {
void RasterDecoderImpl::SetUpForRasterCHROMIUMForTest() {
// Some tests use mock GL which doesn't work with skia. Just use a bitmap
// backed surface for OOP raster commands.
- sk_surface_ = SkSurface::MakeRaster(SkImageInfo::MakeN32Premul(10, 10));
- raster_canvas_ = SkCreateColorSpaceXformCanvas(sk_surface_->getCanvas(),
- SkColorSpace::MakeSRGB());
+ auto info = SkImageInfo::MakeN32(10, 10, kPremul_SkAlphaType,
+ SkColorSpace::MakeSRGB());
+ sk_surface_ = SkSurface::MakeRaster(info);
+ raster_canvas_ = sk_surface_->getCanvas();
}
void RasterDecoderImpl::SetOOMErrorForTest() {
@@ -1409,21 +1394,15 @@ void RasterDecoderImpl::OnContextLostError() {
if (!WasContextLost()) {
// Need to lose current context before broadcasting!
CheckResetStatus();
- group_->LoseContexts(error::kUnknown);
reset_by_robustness_extension_ = true;
}
}
void RasterDecoderImpl::OnOutOfMemoryError() {
if (lose_context_when_out_of_memory_ && !WasContextLost()) {
- error::ContextLostReason other = error::kOutOfMemory;
- if (CheckResetStatus()) {
- other = error::kUnknown;
- } else {
- // Need to lose current context before broadcasting!
+ if (!CheckResetStatus()) {
MarkContextLost(error::kOutOfMemory);
}
- group_->LoseContexts(other);
}
}
@@ -1629,7 +1608,7 @@ bool RasterDecoderImpl::InitializeCopyTexImageBlitter() {
if (!copy_tex_image_blit_.get()) {
LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopySubTexture");
copy_tex_image_blit_.reset(
- new gles2::CopyTexImageResourceManager(feature_info_.get()));
+ new gles2::CopyTexImageResourceManager(feature_info()));
copy_tex_image_blit_->Initialize(this);
if (LOCAL_PEEK_GL_ERROR("glCopySubTexture") != GL_NO_ERROR)
return false;
@@ -1652,7 +1631,7 @@ bool RasterDecoderImpl::InitializeCopyTextureCHROMIUM() {
// CopyTex{Sub}Image2D for luminance, alpha, and luminance_alpha
// textures.
if (gles2::CopyTexImageResourceManager::CopyTexImageRequiresBlit(
- feature_info_.get(), GL_LUMINANCE)) {
+ feature_info(), GL_LUMINANCE)) {
if (!InitializeCopyTexImageBlitter())
return false;
}
@@ -1677,32 +1656,51 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
DLOG_IF(ERROR, !dest_mailbox.Verify())
<< "CopySubTexture was passed an invalid mailbox";
- if (use_passthrough()) {
- // TODO(piman): use shared image representations instead.
- gles2::TexturePassthrough* source_texture =
- gles2::TexturePassthrough::CheckedCast(
- group_->mailbox_manager()->ConsumeTexture(source_mailbox));
- gles2::TexturePassthrough* dest_texture =
- gles2::TexturePassthrough::CheckedCast(
- group_->mailbox_manager()->ConsumeTexture(dest_mailbox));
- if (!source_texture || !dest_texture) {
+ if (source_mailbox == dest_mailbox) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glCopySubTexture",
+ "source and destination mailboxes are the same");
+ return;
+ }
+
+ if (use_passthrough_) {
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ source_shared_image =
+ shared_image_representation_factory_.ProduceGLTexturePassthrough(
+ source_mailbox);
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ dest_shared_image =
+ shared_image_representation_factory_.ProduceGLTexturePassthrough(
+ dest_mailbox);
+ if (!source_shared_image || !dest_shared_image) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
"unknown mailbox");
return;
}
- if (source_texture->is_bind_pending()) {
- gl::GLImage* image =
- source_texture->GetLevelImage(source_texture->target(), 0);
- if (image) {
- api()->glBindTextureFn(source_texture->target(),
- source_texture->service_id());
- if (!image->BindTexImage(source_texture->target())) {
- image->CopyTexImage(source_texture->target());
- }
- source_texture->set_is_bind_pending(false);
- }
+
+ SharedImageRepresentationGLTexturePassthrough::ScopedAccess source_access(
+ source_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ if (!source_access.success()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "unable to access source for read");
+ return;
+ }
+
+ SharedImageRepresentationGLTexturePassthrough::ScopedAccess dest_access(
+ dest_shared_image.get(),
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+ if (!dest_access.success()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "unable to access destination for write");
+ return;
}
+ gles2::TexturePassthrough* source_texture =
+ source_shared_image->GetTexturePassthrough().get();
+ gles2::TexturePassthrough* dest_texture =
+ dest_shared_image->GetTexturePassthrough().get();
+ DCHECK(!source_texture->is_bind_pending());
+ DCHECK_NE(source_texture->service_id(), dest_texture->service_id());
+
api()->glCopySubTextureCHROMIUMFn(
source_texture->service_id(), /*source_level=*/0,
dest_texture->target(), dest_texture->service_id(),
@@ -1712,116 +1710,70 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
return;
}
- // TODO(piman): use shared image representations instead.
- gles2::Texture* source_texture = gles2::Texture::CheckedCast(
- group_->mailbox_manager()->ConsumeTexture(source_mailbox));
- gles2::Texture* dest_texture = gles2::Texture::CheckedCast(
- group_->mailbox_manager()->ConsumeTexture(dest_mailbox));
- if (!source_texture || !dest_texture) {
+ std::unique_ptr<SharedImageRepresentationGLTexture> source_shared_image =
+ shared_image_representation_factory_.ProduceGLTexture(source_mailbox);
+ std::unique_ptr<SharedImageRepresentationGLTexture> dest_shared_image =
+ shared_image_representation_factory_.ProduceGLTexture(dest_mailbox);
+ if (!source_shared_image || !dest_shared_image) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown mailbox");
return;
}
- if (source_texture == dest_texture) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glCopySubTexture",
- "source and destination textures are the same");
+
+ SharedImageRepresentationGLTexture::ScopedAccess source_access(
+ source_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ if (!source_access.success()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "unable to access source for read");
return;
}
+
+ gles2::Texture* source_texture = source_shared_image->GetTexture();
GLenum source_target = source_texture->target();
- GLenum dest_target = dest_texture->target();
- if (!source_target || !dest_target) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glCopySubTexture",
- "textures not initialized");
+ DCHECK(source_target);
+ GLint source_level = 0;
+ gfx::Size source_size = source_shared_image->size();
+ gfx::Rect source_rect(x, y, width, height);
+ if (!gfx::Rect(source_size).Contains(source_rect)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "source texture bad dimensions.");
return;
}
- GLint source_level = 0;
- GLint dest_level = 0;
-
- ScopedTextureBinder binder(state(), dest_target, dest_texture->service_id(),
- gr_context());
- base::Optional<ScopedPixelUnpackState> pixel_unpack_state;
-
- int source_width = 0;
- int source_height = 0;
- gl::GLImage* image =
- source_texture->GetLevelImage(source_target, 0 /* level */);
- if (image) {
- gfx::Size size = image->GetSize();
- source_width = size.width();
- source_height = size.height();
- if (source_width <= 0 || source_height <= 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "invalid image size");
- return;
- }
-
- // Ideally we should not need to check that the sub-texture copy rectangle
- // is valid in two different ways, here and below. However currently there
- // is no guarantee that a texture backed by a GLImage will have sensible
- // level info. If this synchronization were to be enforced then this and
- // other functions in this file could be cleaned up.
- // See: https://crbug.com/586476
- int32_t max_x;
- int32_t max_y;
- if (!base::CheckAdd(x, width).AssignIfValid(&max_x) ||
- !base::CheckAdd(y, height).AssignIfValid(&max_y) || x < 0 || y < 0 ||
- max_x > source_width || max_y > source_height) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "source texture bad dimensions");
- return;
- }
-
- if (image->GetType() == gl::GLImage::Type::MEMORY &&
- shared_context_state_->need_context_state_reset()) {
- // If the image is in shared memory, we may need upload the pixel data
- // with SubTexImage2D, so we need reset pixel unpack state if gl context
- // state has been touched by skia.
- pixel_unpack_state.emplace(state(), gr_context(), group_->feature_info());
- }
- } else {
- if (!source_texture->GetLevelSize(source_target, 0 /* level */,
- &source_width, &source_height, nullptr)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "source texture has no data for level");
- return;
- }
-
- // Check that this type of texture is allowed.
- if (!texture_manager()->ValidForTarget(source_target, 0 /* level */,
- source_width, source_height, 1)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "source texture bad dimensions");
- return;
- }
+ SharedImageRepresentationGLTexture::ScopedAccess dest_access(
+ dest_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+ if (!dest_access.success()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "unable to access destination for write");
+ return;
+ }
- if (!source_texture->ValidForTexture(source_target, 0 /* level */, x, y, 0,
- width, height, 1)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "source texture bad dimensions.");
- return;
- }
+ gles2::Texture* dest_texture = dest_shared_image->GetTexture();
+ GLenum dest_target = dest_texture->target();
+ DCHECK(dest_target);
+ GLint dest_level = 0;
+ gfx::Size dest_size = dest_shared_image->size();
+ gfx::Rect dest_rect(xoffset, yoffset, width, height);
+ if (!gfx::Rect(dest_size).Contains(dest_rect)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "destination texture bad dimensions.");
+ return;
}
+ DCHECK_NE(source_texture->service_id(), dest_texture->service_id());
+
GLenum source_type = 0;
GLenum source_internal_format = 0;
- source_texture->GetLevelType(source_target, 0 /* level */, &source_type,
+ source_texture->GetLevelType(source_target, source_level, &source_type,
&source_internal_format);
GLenum dest_type = 0;
GLenum dest_internal_format = 0;
bool dest_level_defined = dest_texture->GetLevelType(
- dest_target, 0 /* level */, &dest_type, &dest_internal_format);
- if (!dest_level_defined) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glCopySubTexture",
- "destination texture is not defined");
- return;
- }
- if (!dest_texture->ValidForTexture(dest_target, 0 /* level */, xoffset,
- yoffset, 0, width, height, 1)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "destination texture bad dimensions.");
- return;
- }
+ dest_target, dest_level, &dest_type, &dest_internal_format);
+ DCHECK(dest_level_defined);
+
+ // TODO(piman): Do we need this check? It might always be true by
+ // construction.
std::string output_error_msg;
if (!ValidateCopyTextureCHROMIUMInternalFormats(
GetFeatureInfo(), source_internal_format, dest_internal_format,
@@ -1831,66 +1783,80 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
return;
}
- if (feature_info_->feature_flags().desktop_srgb_support) {
- bool enable_framebuffer_srgb =
- gles2::GLES2Util::GetColorEncodingFromInternalFormat(
- source_internal_format) == GL_SRGB ||
- gles2::GLES2Util::GetColorEncodingFromInternalFormat(
- dest_internal_format) == GL_SRGB;
- state()->EnableDisableFramebufferSRGB(enable_framebuffer_srgb);
- }
-
// Clear the source texture if necessary.
- if (!texture_manager()->ClearTextureLevel(this, source_texture, source_target,
- 0 /* level */)) {
+ if (!gles2::TextureManager::ClearTextureLevel(this, source_texture,
+ source_target, 0 /* level */)) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glCopySubTexture",
"source texture dimensions too big");
return;
}
- int dest_width = 0;
- int dest_height = 0;
- bool ok = dest_texture->GetLevelSize(dest_target, dest_level, &dest_width,
- &dest_height, nullptr);
- DCHECK(ok);
- if (xoffset != 0 || yoffset != 0 || width != dest_width ||
- height != dest_height) {
- gfx::Rect cleared_rect;
- if (gles2::TextureManager::CombineAdjacentRects(
- dest_texture->GetLevelClearedRect(dest_target, dest_level),
- gfx::Rect(xoffset, yoffset, width, height), &cleared_rect)) {
- DCHECK_GE(cleared_rect.size().GetArea(),
- dest_texture->GetLevelClearedRect(dest_target, dest_level)
- .size()
- .GetArea());
- dest_texture->SetLevelClearedRect(dest_target, dest_level, cleared_rect);
- } else {
- // Otherwise clear part of texture level that is not already cleared.
- if (!texture_manager()->ClearTextureLevel(this, dest_texture, dest_target,
- dest_level)) {
- LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glCopySubTexture",
- "destination texture dimensions too big");
+ gfx::Rect new_cleared_rect;
+ gfx::Rect old_cleared_rect =
+ dest_texture->GetLevelClearedRect(dest_target, dest_level);
+ if (gles2::TextureManager::CombineAdjacentRects(
+ dest_texture->GetLevelClearedRect(dest_target, dest_level), dest_rect,
+ &new_cleared_rect)) {
+ DCHECK(old_cleared_rect.IsEmpty() ||
+ new_cleared_rect.Contains(old_cleared_rect));
+ } else {
+ // Otherwise clear part of texture level that is not already cleared.
+ if (!gles2::TextureManager::ClearTextureLevel(this, dest_texture,
+ dest_target, dest_level)) {
+ LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glCopySubTexture",
+ "destination texture dimensions too big");
+ return;
+ }
+ new_cleared_rect = gfx::Rect(dest_size);
+ }
+
+ ScopedTextureBinder binder(state(), dest_target, dest_texture->service_id(),
+ gr_context());
+
+ gles2::Texture::ImageState image_state;
+ gl::GLImage* image =
+ source_texture->GetLevelImage(source_target, 0, &image_state);
+ if (image) {
+ base::Optional<ScopedPixelUnpackState> pixel_unpack_state;
+ if (image->GetType() == gl::GLImage::Type::MEMORY &&
+ shared_context_state_->need_context_state_reset()) {
+ // If the image is in shared memory, we may need upload the pixel data
+ // with SubTexImage2D, so we need reset pixel unpack state if gl context
+ // state has been touched by skia.
+ pixel_unpack_state.emplace(state(), gr_context(), feature_info());
+ }
+
+ // Try to copy by uploading to the destination texture.
+ if (dest_internal_format == source_internal_format) {
+ if (image->CopyTexSubImage(dest_target, gfx::Point(xoffset, yoffset),
+ gfx::Rect(x, y, width, height))) {
+ dest_texture->SetLevelClearedRect(dest_target, dest_level,
+ new_cleared_rect);
return;
}
}
- } else {
- dest_texture->SetLevelCleared(dest_target, dest_level, true);
- }
- // TODO(qiankun.miao@intel.com): Support level > 0 for CopyTexSubImage.
- if (image && dest_internal_format == source_internal_format &&
- dest_level == 0) {
- if (image->CopyTexSubImage(dest_target, gfx::Point(xoffset, yoffset),
- gfx::Rect(x, y, width, height))) {
- return;
+ // Otherwise, update the source if needed.
+ if (image_state == gles2::Texture::UNBOUND) {
+ ScopedGLErrorSuppressor suppressor(
+ "RasterDecoderImpl::DoCopySubTextureINTERNAL", error_state_.get());
+ api()->glBindTextureFn(source_target, source_texture->service_id());
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ bool rv = image->BindTexImage(source_target);
+ DCHECK(rv) << "BindTexImage() failed";
+ image_state = gles2::Texture::BOUND;
+ } else {
+ bool rv = image->CopyTexImage(source_target);
+ DCHECK(rv) << "CopyTexImage() failed";
+ image_state = gles2::Texture::COPIED;
+ }
+ source_texture->SetLevelImageState(source_target, 0, image_state);
}
}
if (!InitializeCopyTextureCHROMIUM())
return;
- DoBindOrCopyTexImageIfNeeded(source_texture, source_target);
-
// GL_TEXTURE_EXTERNAL_OES texture requires apply a transform matrix
// before presenting.
if (source_target == GL_TEXTURE_EXTERNAL_OES) {
@@ -1904,10 +1870,13 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
dest_level, dest_internal_format, xoffset, yoffset, x, y, width,
- height, dest_width, dest_height, source_width, source_height,
- false /* unpack_flip_y */, false /* unpack_premultiply_alpha */,
+ height, dest_size.width(), dest_size.height(), source_size.width(),
+ source_size.height(), false /* unpack_flip_y */,
+ false /* unpack_premultiply_alpha */,
false /* unpack_unmultiply_alpha */, false /* dither */,
transform_matrix, copy_tex_image_blit_.get());
+ dest_texture->SetLevelClearedRect(dest_target, dest_level,
+ new_cleared_rect);
return;
}
}
@@ -1935,10 +1904,11 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
dest_level, dest_internal_format, xoffset, yoffset, x, y, width, height,
- dest_width, dest_height, source_width, source_height,
- false /* unpack_flip_y */, false /* unpack_premultiply_alpha */,
- false /* unpack_unmultiply_alpha */, false /* dither */, method,
- copy_tex_image_blit_.get());
+ dest_size.width(), dest_size.height(), source_size.width(),
+ source_size.height(), false /* unpack_flip_y */,
+ false /* unpack_premultiply_alpha */, false /* unpack_unmultiply_alpha */,
+ false /* dither */, method, copy_tex_image_blit_.get());
+ dest_texture->SetLevelClearedRect(dest_target, dest_level, new_cleared_rect);
in_copy_sub_texture_ = false;
if (reset_texture_state_) {
reset_texture_state_ = false;
@@ -1956,30 +1926,6 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
}
}
-void RasterDecoderImpl::DoBindOrCopyTexImageIfNeeded(gles2::Texture* texture,
- GLenum textarget) {
- // Image is already in use if texture is attached to a framebuffer.
- if (texture && !texture->IsAttachedToFramebuffer()) {
- gles2::Texture::ImageState image_state;
- gl::GLImage* image = texture->GetLevelImage(textarget, 0, &image_state);
- if (image && image_state == gles2::Texture::UNBOUND) {
- ScopedGLErrorSuppressor suppressor(
- "RasterDecoderImpl::DoBindOrCopyTexImageIfNeeded",
- error_state_.get());
- api()->glBindTextureFn(textarget, texture->service_id());
- if (!image->BindTexImage(textarget)) {
- // Note: We update the state to COPIED prior to calling CopyTexImage()
- // as that allows the GLImage implemenatation to set it back to
- // UNBOUND and ensure that CopyTexImage() is called each time the
- // texture is used.
- texture->SetLevelImageState(textarget, 0, gles2::Texture::COPIED);
- bool rv = image->CopyTexImage(textarget);
- DCHECK(rv) << "CopyTexImage() failed";
- }
- }
- }
-}
-
namespace {
// Helper to read client data from transfer cache.
@@ -2059,8 +2005,11 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLuint color_space_transfer_cache_id,
const volatile GLbyte* key) {
+ // Workaround for https://crbug.com/906453: Flush before BeginRaster (the
+ // commands between BeginRaster and EndRaster will not flush).
+ FlushToWorkAroundMacCrashes();
+
if (!gr_context()) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
"chromium_raster_transport not enabled via attribs");
@@ -2079,8 +2028,7 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
"generated by ProduceTextureCHROMIUM.";
DCHECK(!shared_image_);
- shared_image_ =
- group_->shared_image_representation_factory()->ProduceSkia(mailbox);
+ shared_image_ = shared_image_representation_factory_.ProduceSkia(mailbox);
if (!shared_image_) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glBeginRasterCHROMIUM",
"passed invalid mailbox.");
@@ -2117,36 +2065,17 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
return;
}
- TransferCacheDeserializeHelperImpl transfer_cache_deserializer(
- raster_decoder_id_, transfer_cache());
- auto* color_space_entry =
- transfer_cache_deserializer
- .GetEntryAs<cc::ServiceColorSpaceTransferCacheEntry>(
- color_space_transfer_cache_id);
- if (!color_space_entry || !color_space_entry->color_space().IsValid()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
- "failed to find valid color space");
- shared_image_->EndWriteAccess(std::move(sk_surface_));
- shared_image_.reset();
- return;
- }
-
- SkCanvas* canvas = nullptr;
if (use_ddl_) {
SkSurfaceCharacterization characterization;
bool result = sk_surface_->characterize(&characterization);
DCHECK(result) << "Failed to characterize raster SkSurface.";
recorder_ =
std::make_unique<SkDeferredDisplayListRecorder>(characterization);
- canvas = recorder_->getCanvas();
+ raster_canvas_ = recorder_->getCanvas();
} else {
- canvas = sk_surface_->getCanvas();
+ raster_canvas_ = sk_surface_->getCanvas();
}
- raster_canvas_ = SkCreateColorSpaceXformCanvas(
- canvas, color_space_entry->color_space().ToSkColorSpace());
- raster_color_space_id_ = color_space_transfer_cache_id;
-
// All or nothing clearing, as no way to validate the client's input on what
// is the "used" part of the texture.
// TODO(enne): This doesn't handle the case where the background color
@@ -2212,7 +2141,6 @@ void RasterDecoderImpl::DoRasterCHROMIUM(GLuint raster_shm_id,
alignas(
cc::PaintOpBuffer::PaintOpAlign) char data[sizeof(cc::LargestPaintOp)];
- SkCanvas* canvas = raster_canvas_.get();
cc::PlaybackParams playback_params(nullptr, SkMatrix::I());
TransferCacheDeserializeHelperImpl impl(raster_decoder_id_, transfer_cache());
cc::PaintOp::DeserializeOptions options(
@@ -2234,7 +2162,7 @@ void RasterDecoderImpl::DoRasterCHROMIUM(GLuint raster_shm_id,
return;
}
- deserialized_op->Raster(canvas, playback_params);
+ deserialized_op->Raster(raster_canvas_, playback_params);
deserialized_op->DestroyThis();
paint_buffer_size -= skip;
@@ -2252,7 +2180,7 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
shared_context_state_->set_need_context_state_reset(true);
- raster_canvas_.reset();
+ raster_canvas_ = nullptr;
if (use_ddl_) {
auto ddl = recorder_->detach();
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.h b/chromium/gpu/command_buffer/service/raster_decoder.h
index 717b79dc551..0c0575f5c7b 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder.h
@@ -13,8 +13,12 @@
namespace gpu {
class DecoderClient;
-class SharedContextState;
+struct GpuFeatureInfo;
+struct GpuPreferences;
+class MemoryTracker;
class ServiceTransferCache;
+class SharedContextState;
+class SharedImageManager;
namespace gles2 {
class CopyTextureCHROMIUMResourceManager;
@@ -35,7 +39,10 @@ class GPU_GLES2_EXPORT RasterDecoder : public DecoderContext,
DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
gles2::Outputter* outputter,
- gles2::ContextGroup* group,
+ const GpuFeatureInfo& gpu_feature_info,
+ const GpuPreferences& gpu_preferences,
+ MemoryTracker* memory_tracker,
+ SharedImageManager* shared_image_manager,
scoped_refptr<SharedContextState> shared_context_state);
~RasterDecoder() override;
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
index f4b23b0ab3d..1a0c03a200f 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
@@ -114,8 +114,6 @@ error::Error RasterDecoderImpl::HandleBeginRasterCHROMIUMImmediate(
GLuint sk_color = static_cast<GLuint>(c.sk_color);
GLuint msaa_sample_count = static_cast<GLuint>(c.msaa_sample_count);
GLboolean can_use_lcd_text = static_cast<GLboolean>(c.can_use_lcd_text);
- GLuint color_space_transfer_cache_id =
- static_cast<GLuint>(c.color_space_transfer_cache_id);
uint32_t mailbox_size;
if (!gles2::GLES2Util::ComputeDataSize<GLbyte, 16>(1, &mailbox_size)) {
return error::kOutOfBounds;
@@ -129,8 +127,7 @@ error::Error RasterDecoderImpl::HandleBeginRasterCHROMIUMImmediate(
if (mailbox == nullptr) {
return error::kOutOfBounds;
}
- DoBeginRasterCHROMIUM(sk_color, msaa_sample_count, can_use_lcd_text,
- color_space_transfer_cache_id, mailbox);
+ DoBeginRasterCHROMIUM(sk_color, msaa_sample_count, can_use_lcd_text, mailbox);
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_mock.cc b/chromium/gpu/command_buffer/service/raster_decoder_mock.cc
deleted file mode 100644
index 85a53cbcfb3..00000000000
--- a/chromium/gpu/command_buffer/service/raster_decoder_mock.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/command_buffer/service/raster_decoder_mock.h"
-
-#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
-
-namespace gpu {
-namespace raster {
-
-MockRasterDecoder::MockRasterDecoder(
- CommandBufferServiceBase* command_buffer_service)
- : RasterDecoder(command_buffer_service, /*outputter=*/nullptr),
- weak_ptr_factory_(this) {
- ON_CALL(*this, MakeCurrent()).WillByDefault(testing::Return(true));
-}
-
-MockRasterDecoder::~MockRasterDecoder() = default;
-
-base::WeakPtr<DecoderContext> MockRasterDecoder::AsWeakPtr() {
- return weak_ptr_factory_.GetWeakPtr();
-}
-
-} // namespace raster
-} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_mock.h b/chromium/gpu/command_buffer/service/raster_decoder_mock.h
deleted file mode 100644
index 6c5424cccf9..00000000000
--- a/chromium/gpu/command_buffer/service/raster_decoder_mock.h
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains the mock RasterDecoder class.
-
-#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_MOCK_H_
-#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_MOCK_H_
-
-#include <stdint.h>
-
-#include <vector>
-
-#include "base/callback.h"
-#include "base/macros.h"
-#include "gpu/command_buffer/common/context_creation_attribs.h"
-#include "gpu/command_buffer/common/mailbox.h"
-#include "gpu/command_buffer/service/raster_decoder.h"
-#include "gpu/command_buffer/service/shader_translator.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "ui/gfx/geometry/size.h"
-
-namespace gl {
-class GLContext;
-class GLSurface;
-} // namespace gl
-
-namespace gpu {
-class QueryManager;
-
-namespace gles2 {
-class ContextGroup;
-class ErrorState;
-class FeatureInfo;
-class GpuFenceManager;
-class GLES2Util;
-class ImageManager;
-class Logger;
-class Texture;
-struct ContextState;
-} // namespace gles2
-
-namespace raster {
-
-class MockRasterDecoder : public RasterDecoder {
- public:
- explicit MockRasterDecoder(CommandBufferServiceBase* command_buffer_service);
- ~MockRasterDecoder() override;
-
- base::WeakPtr<DecoderContext> AsWeakPtr() override;
-
- MOCK_METHOD5(
- Initialize,
- gpu::ContextResult(const scoped_refptr<gl::GLSurface>& surface,
- const scoped_refptr<gl::GLContext>& context,
- bool offscreen,
- const gles2::DisallowedFeatures& disallowed_features,
- const ContextCreationAttribs& attrib_helper));
- MOCK_METHOD1(Destroy, void(bool have_context));
- MOCK_METHOD0(MakeCurrent, bool());
- MOCK_METHOD1(GetServiceIdForTesting, uint32_t(uint32_t client_id));
- MOCK_METHOD0(GetGLES2Util, gles2::GLES2Util*());
- MOCK_METHOD0(GetGLSurface, gl::GLSurface*());
- MOCK_METHOD0(GetGLContext, gl::GLContext*());
- MOCK_METHOD0(GetContextGroup, gles2::ContextGroup*());
- MOCK_CONST_METHOD0(GetFeatureInfo, const gles2::FeatureInfo*());
- MOCK_METHOD0(GetContextState, const gles2::ContextState*());
- MOCK_METHOD0(GetCapabilities, Capabilities());
- MOCK_CONST_METHOD0(HasPendingQueries, bool());
- MOCK_METHOD1(ProcessPendingQueries, void(bool));
- MOCK_CONST_METHOD0(HasMoreIdleWork, bool());
- MOCK_METHOD0(PerformIdleWork, void());
- MOCK_CONST_METHOD0(HasPollingWork, bool());
- MOCK_METHOD0(PerformPollingWork, void());
- MOCK_CONST_METHOD0(RestoreGlobalState, void());
- MOCK_CONST_METHOD0(ClearAllAttributes, void());
- MOCK_CONST_METHOD0(RestoreAllAttributes, void());
- MOCK_METHOD1(RestoreState, void(const gles2::ContextState* prev_state));
- MOCK_CONST_METHOD0(RestoreActiveTexture, void());
- MOCK_CONST_METHOD1(RestoreAllTextureUnitAndSamplerBindings,
- void(const gles2::ContextState* state));
- MOCK_CONST_METHOD1(RestoreActiveTextureUnitBinding,
- void(unsigned int target));
- MOCK_METHOD0(RestoreAllExternalTextureBindingsIfNeeded, void());
- MOCK_METHOD1(RestoreBufferBinding, void(unsigned int target));
- MOCK_CONST_METHOD0(RestoreBufferBindings, void());
- MOCK_CONST_METHOD0(RestoreFramebufferBindings, void());
- MOCK_CONST_METHOD0(RestoreProgramBindings, void());
- MOCK_METHOD0(RestoreRenderbufferBindings, void());
- MOCK_METHOD1(RestoreTextureState, void(unsigned service_id));
- MOCK_CONST_METHOD1(RestoreTextureUnitBindings, void(unsigned unit));
- MOCK_METHOD1(RestoreVertexAttribArray, void(unsigned index));
-
- MOCK_METHOD0(GetQueryManager, QueryManager*());
- MOCK_METHOD2(SetQueryCallback, void(unsigned int, base::OnceClosure));
- MOCK_METHOD0(GetGpuFenceManager, gpu::gles2::GpuFenceManager*());
- MOCK_METHOD1(SetIgnoreCachedStateForTest, void(bool ignore));
- MOCK_METHOD0(GetImageManagerForTest, gles2::ImageManager*());
- MOCK_METHOD0(GetTransferCacheForTest, ServiceTransferCache*());
- MOCK_METHOD0(DecoderIdForTest, int());
- MOCK_METHOD0(SetUpForRasterCHROMIUMForTest, void());
- MOCK_METHOD0(SetOOMErrorForTest, void());
- MOCK_METHOD0(DisableFlushWorkaroundForTest, void());
- MOCK_METHOD4(DoCommands,
- error::Error(unsigned int num_commands,
- const volatile void* buffer,
- int num_entries,
- int* entries_processed));
- MOCK_METHOD2(GetServiceTextureId,
- bool(uint32_t client_texture_id, uint32_t* service_texture_id));
- MOCK_METHOD0(GetErrorState, gles2::ErrorState*());
- MOCK_METHOD8(CreateAbstractTexture,
- std::unique_ptr<gpu::gles2::AbstractTexture>(
- unsigned /* GLenum */ target,
- unsigned /* GLenum */ internal_format,
- int /* GLsizei */ width,
- int /* GLsizei */ height,
- int /* GLsizei */ depth,
- int /* GLint */ border,
- unsigned /* GLenum */ format,
- unsigned /* GLenum */ type));
-
- MOCK_METHOD0(GetLogger, gles2::Logger*());
- MOCK_CONST_METHOD0(WasContextLost, bool());
- MOCK_CONST_METHOD0(WasContextLostByRobustnessExtension, bool());
- MOCK_METHOD1(MarkContextLost, void(gpu::error::ContextLostReason reason));
- MOCK_METHOD0(CheckResetStatus, bool());
- MOCK_METHOD4(BindImage,
- void(uint32_t client_texture_id,
- uint32_t texture_target,
- gl::GLImage* image,
- bool can_bind_to_sampler));
- MOCK_METHOD1(IsCompressedTextureFormat, bool(unsigned format));
- MOCK_METHOD9(ClearLevel,
- bool(gles2::Texture* texture,
- unsigned target,
- int level,
- unsigned format,
- unsigned type,
- int xoffset,
- int yoffset,
- int width,
- int height));
- MOCK_METHOD6(ClearCompressedTextureLevel,
- bool(gles2::Texture* texture,
- unsigned target,
- int level,
- unsigned format,
- int width,
- int height));
- MOCK_METHOD8(ClearLevel3D,
- bool(gles2::Texture* texture,
- unsigned target,
- int level,
- unsigned format,
- unsigned type,
- int width,
- int height,
- int depth));
- MOCK_METHOD1(SetCopyTextureResourceManagerForTest,
- void(gles2::CopyTextureCHROMIUMResourceManager*
- copy_texture_resource_manager));
-
- private:
- base::WeakPtrFactory<MockRasterDecoder> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(MockRasterDecoder);
-};
-
-} // namespace raster
-} // namespace gpu
-
-#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_MOCK_H_
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
index f9ec337a663..6837104f946 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
@@ -5,18 +5,20 @@
#include "gpu/command_buffer/service/raster_decoder.h"
#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+#include "base/bind_helpers.h"
#include "base/command_line.h"
#include "base/memory/ptr_util.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/raster_cmd_format.h"
-#include "gpu/command_buffer/service/context_group.h"
-#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
-#include "gpu/command_buffer/service/program_manager.h"
#include "gpu/command_buffer/service/query_manager.h"
#include "gpu/command_buffer/service/raster_decoder_unittest_base.h"
#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -51,10 +53,10 @@ class RasterDecoderTest : public RasterDecoderTestBase {
RasterDecoderTest() = default;
};
-INSTANTIATE_TEST_CASE_P(Service, RasterDecoderTest, ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service,
- RasterDecoderManualInitTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, RasterDecoderTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ RasterDecoderManualInitTest,
+ ::testing::Bool());
const GLsync kGlSync = reinterpret_cast<GLsync>(0xdeadbeef);
@@ -147,6 +149,53 @@ TEST_P(RasterDecoderTest, BeginEndQueryEXTCommandsIssuedCHROMIUM) {
EXPECT_FALSE(query->IsPending());
}
+TEST_P(RasterDecoderTest, CopyTexSubImage2DSizeMismatch) {
+ shared_context_state_->set_need_context_state_reset(true);
+ // Create uninitialized source texture.
+ gpu::Mailbox source_texture_mailbox =
+ CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RGBA_8888,
+ /*width=*/1, /*height=*/1,
+ /*cleared=*/true);
+ GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
+ CopyMailboxes(mailboxes, source_texture_mailbox, client_texture_mailbox_);
+
+ SharedImageRepresentationFactory repr_factory(shared_image_manager(),
+ nullptr);
+ auto representation = repr_factory.ProduceGLTexture(client_texture_mailbox_);
+ gles2::Texture* dest_texture = representation->GetTexture();
+
+ {
+ // This will initialize the bottom right corner of destination.
+ SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
+ auto& cmd = *GetImmediateAs<CopySubTextureINTERNALImmediate>();
+ cmd.Init(1, 1, 0, 0, 1, 1, mailboxes);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(dest_texture->GetLevelClearedRect(GL_TEXTURE_2D, 0),
+ gfx::Rect(1, 1, 1, 1));
+ }
+
+ {
+ // Dest rect outside of dest bounds
+ auto& cmd = *GetImmediateAs<CopySubTextureINTERNALImmediate>();
+ cmd.Init(2, 2, 0, 0, 1, 1, mailboxes);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ EXPECT_EQ(dest_texture->GetLevelClearedRect(GL_TEXTURE_2D, 0),
+ gfx::Rect(1, 1, 1, 1));
+ }
+
+ {
+ // Source rect outside of source bounds
+ auto& cmd = *GetImmediateAs<CopySubTextureINTERNALImmediate>();
+ cmd.Init(0, 0, 0, 0, 2, 2, mailboxes);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ EXPECT_EQ(dest_texture->GetLevelClearedRect(GL_TEXTURE_2D, 0),
+ gfx::Rect(1, 1, 1, 1));
+ }
+}
+
TEST_P(RasterDecoderTest, CopyTexSubImage2DTwiceClearsUnclearedTexture) {
shared_context_state_->set_need_context_state_reset(true);
// Create uninitialized source texture.
@@ -154,6 +203,8 @@ TEST_P(RasterDecoderTest, CopyTexSubImage2DTwiceClearsUnclearedTexture) {
CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RGBA_8888,
/*width=*/2, /*height=*/2,
/*cleared=*/false);
+ GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
+ CopyMailboxes(mailboxes, source_texture_mailbox, client_texture_mailbox_);
// This will initialize the top half of destination.
{
@@ -164,8 +215,6 @@ TEST_P(RasterDecoderTest, CopyTexSubImage2DTwiceClearsUnclearedTexture) {
GL_UNSIGNED_BYTE, 0, 0, 2, 2, 0);
SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
auto& cmd = *GetImmediateAs<CopySubTextureINTERNALImmediate>();
- GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
- CopyMailboxes(mailboxes, source_texture_mailbox, client_texture_mailbox_);
cmd.Init(0, 0, 0, 0, 2, 1, mailboxes);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
}
@@ -179,15 +228,14 @@ TEST_P(RasterDecoderTest, CopyTexSubImage2DTwiceClearsUnclearedTexture) {
GL_UNSIGNED_BYTE, 0, 1, 2, 1, 0);
SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
auto& cmd = *GetImmediateAs<CopySubTextureINTERNALImmediate>();
- GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
- CopyMailboxes(mailboxes, source_texture_mailbox, client_texture_mailbox_);
cmd.Init(1, 1, 0, 0, 1, 1, mailboxes);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
}
- auto* texture = gles2::Texture::CheckedCast(
- group().mailbox_manager()->ConsumeTexture(client_texture_mailbox_));
- EXPECT_TRUE(texture->SafeToRenderFrom());
+ SharedImageRepresentationFactory repr_factory(shared_image_manager(),
+ nullptr);
+ auto representation = repr_factory.ProduceGLTexture(client_texture_mailbox_);
+ EXPECT_TRUE(representation->GetTexture()->SafeToRenderFrom());
}
TEST_P(RasterDecoderManualInitTest, CopyTexSubImage2DValidateColorFormat) {
@@ -201,7 +249,6 @@ TEST_P(RasterDecoderManualInitTest, CopyTexSubImage2DValidateColorFormat) {
CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RED_8,
/*width=*/2, /*height=*/2, /*cleared=*/true);
- SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
auto& copy_cmd = *GetImmediateAs<CopySubTextureINTERNALImmediate>();
GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
CopyMailboxes(mailboxes, client_texture_mailbox_, dest_texture_mailbox);
@@ -219,16 +266,10 @@ TEST_P(RasterDecoderTest, YieldAfterEndRasterCHROMIUM) {
class RasterDecoderOOPTest : public testing::Test, DecoderClient {
public:
- RasterDecoderOOPTest() : shader_translator_cache_(gpu_preferences_) {}
-
void SetUp() override {
gl::GLSurfaceTestSupport::InitializeOneOff();
gpu::GpuDriverBugWorkarounds workarounds;
- GpuFeatureInfo gpu_feature_info;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] =
- kGpuFeatureStatusEnabled;
-
scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
scoped_refptr<gl::GLSurface> surface =
gl::init::CreateOffscreenGLSurface(gfx::Size());
@@ -236,23 +277,16 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
share_group.get(), surface.get(), gl::GLContextAttribs());
ASSERT_TRUE(context->MakeCurrent(surface.get()));
- auto feature_info =
- base::MakeRefCounted<gles2::FeatureInfo>(workarounds, gpu_feature_info);
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] =
+ kGpuFeatureStatusEnabled;
+ auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
+ workarounds, gpu_feature_info_);
context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), std::move(surface), std::move(context),
false /* use_virtualized_gl_contexts */, base::DoNothing());
context_state_->InitializeGrContext(workarounds, nullptr);
context_state_->InitializeGL(GpuPreferences(), feature_info);
-
- group_ = new gles2::ContextGroup(
- gpu_preferences_, false, &mailbox_manager_,
- nullptr /* memory_tracker */, &shader_translator_cache_,
- &framebuffer_completeness_cache_, feature_info,
- false /* bind_generates_resource */, &image_manager_,
- nullptr /* image_factory */, nullptr /* progress_reporter */,
- gpu_feature_info, &discardable_manager_,
- nullptr /* passthrough_discardable_manager */, &shared_image_manager_);
}
void TearDown() override {
context_state_ = nullptr;
@@ -268,11 +302,13 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
void OnRescheduleAfterFinished() override {}
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override {}
void ScheduleGrContextCleanup() override {}
+ void HandleReturnData(base::span<const uint8_t> data) override {}
std::unique_ptr<RasterDecoder> CreateDecoder() {
- auto decoder = base::WrapUnique(
- RasterDecoder::Create(this, &command_buffer_service_, &outputter_,
- group_.get(), context_state_));
+ auto decoder = base::WrapUnique(RasterDecoder::Create(
+ this, &command_buffer_service_, &outputter_, gpu_feature_info_,
+ GpuPreferences(), nullptr /* memory_tracker */, &shared_image_manager_,
+ context_state_));
ContextCreationAttribs attribs;
attribs.enable_oop_rasterization = true;
attribs.enable_raster_interface = true;
@@ -294,18 +330,12 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
}
protected:
+ GpuFeatureInfo gpu_feature_info_;
gles2::TraceOutputter outputter_;
FakeCommandBufferServiceBase command_buffer_service_;
scoped_refptr<SharedContextState> context_state_;
- GpuPreferences gpu_preferences_;
- gles2::MailboxManagerImpl mailbox_manager_;
- gles2::ShaderTranslatorCache shader_translator_cache_;
- gles2::FramebufferCompletenessCache framebuffer_completeness_cache_;
- gles2::ImageManager image_manager_;
- ServiceDiscardableManager discardable_manager_;
SharedImageManager shared_image_manager_;
- scoped_refptr<gles2::ContextGroup> group_;
};
TEST_F(RasterDecoderOOPTest, StateRestoreAcrossDecoders) {
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h
index bb80c0486de..200cbb57aba 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h
@@ -23,10 +23,10 @@ void RasterDecoderTestBase::SetupInitCapabilitiesExpectations(
ExpectEnableDisable(GL_SAMPLE_COVERAGE, false);
ExpectEnableDisable(GL_SCISSOR_TEST, false);
ExpectEnableDisable(GL_STENCIL_TEST, false);
- if (group_->feature_info()->feature_flags().ext_multisample_compatibility) {
+ if (feature_info()->feature_flags().ext_multisample_compatibility) {
ExpectEnableDisable(GL_MULTISAMPLE_EXT, true);
}
- if (group_->feature_info()->feature_flags().ext_multisample_compatibility) {
+ if (feature_info()->feature_flags().ext_multisample_compatibility) {
ExpectEnableDisable(GL_SAMPLE_ALPHA_TO_ONE_EXT, false);
}
if (es3_capable) {
@@ -36,7 +36,7 @@ void RasterDecoderTestBase::SetupInitCapabilitiesExpectations(
}
void RasterDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
- auto* feature_info_ = group_->feature_info();
+ auto* feature_info_ = feature_info();
EXPECT_CALL(*gl_, BlendColor(0.0f, 0.0f, 0.0f, 0.0f))
.Times(1)
.RetiresOnSaturation();
@@ -54,9 +54,7 @@ void RasterDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
EXPECT_CALL(*gl_, ColorMask(true, true, true, true))
.Times(1)
.RetiresOnSaturation();
- if (group_->feature_info()
- ->feature_flags()
- .chromium_framebuffer_mixed_samples) {
+ if (feature_info()->feature_flags().chromium_framebuffer_mixed_samples) {
EXPECT_CALL(*gl_, CoverageModulationNV(GL_NONE))
.Times(1)
.RetiresOnSaturation();
@@ -93,7 +91,7 @@ void RasterDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
.Times(1)
.RetiresOnSaturation();
}
- if (group_->feature_info()->feature_flags().chromium_path_rendering) {
+ if (feature_info()->feature_flags().chromium_path_rendering) {
EXPECT_CALL(*gl_, PathStencilFuncNV(GL_ALWAYS, 0, 0xFFFFFFFFU))
.Times(1)
.RetiresOnSaturation();
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_1.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1.cc
index cedfca9ee4f..717f5f55629 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_1.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1.cc
@@ -33,7 +33,7 @@ class RasterDecoderTest1 : public RasterDecoderTestBase {
RasterDecoderTest1() = default;
};
-INSTANTIATE_TEST_CASE_P(Service, RasterDecoderTest1, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, RasterDecoderTest1, ::testing::Bool());
#include "gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h"
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
index 7c953e1c84c..ef5fcd2a7e2 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
@@ -10,14 +10,17 @@
#include <algorithm>
#include <memory>
#include <string>
+#include <utility>
#include <vector>
+#include "base/bind_helpers.h"
#include "base/command_line.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/raster_cmd_format.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/copy_texture_chromium_mock.h"
#include "gpu/command_buffer/service/gpu_switches.h"
@@ -26,6 +29,7 @@
#include "gpu/command_buffer/service/program_manager.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "gpu/command_buffer/service/vertex_attrib_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -59,7 +63,7 @@ RasterDecoderTestBase::RasterDecoderTestBase()
shared_memory_address_(nullptr),
shared_memory_base_(nullptr),
ignore_cached_state_for_test_(GetParam()),
- shader_translator_cache_(gpu_preferences_),
+ memory_tracker_(nullptr),
copy_texture_manager_(nullptr) {
memset(immediate_buffer_, 0xEE, sizeof(immediate_buffer_));
}
@@ -97,7 +101,7 @@ void RasterDecoderTestBase::AddExpectationsForRestoreAttribState(
.Times(1)
.RetiresOnSaturation();
- if (attrib != 0 || group_->feature_info()->gl_version_info().is_es) {
+ if (attrib != 0 || feature_info()->gl_version_info().is_es) {
// TODO(bajones): Not sure if I can tell which of these will be called
EXPECT_CALL(*gl_, EnableVertexAttribArray(attrib))
.Times(testing::AtMost(1))
@@ -120,7 +124,7 @@ void RasterDecoderTestBase::SetupInitStateManualExpectations(bool es3_capable) {
EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0))
.Times(1)
.RetiresOnSaturation();
- if (group_->feature_info()->feature_flags().ext_window_rectangles) {
+ if (feature_info()->feature_flags().ext_window_rectangles) {
EXPECT_CALL(*gl_, WindowRectanglesEXT(GL_EXCLUSIVE_EXT, 0, nullptr))
.Times(1)
.RetiresOnSaturation();
@@ -147,19 +151,13 @@ gpu::Mailbox RasterDecoderTestBase::CreateFakeTexture(
GLsizei width,
GLsizei height,
bool cleared) {
- // Create texture and temporary ref.
- const GLuint kTempClientId = next_fake_texture_client_id_++;
- auto* temp_ref =
- group_->texture_manager()->CreateTexture(kTempClientId, service_id);
- group_->texture_manager()->SetTarget(temp_ref, GL_TEXTURE_2D);
- group_->texture_manager()->SetLevelInfo(
- temp_ref, GL_TEXTURE_2D, 0, viz::GLInternalFormat(resource_format),
- /*width=*/width, /*height=*/height, 1, 0,
- viz::GLDataFormat(resource_format), viz::GLDataType(resource_format),
- cleared ? gfx::Rect(width, height) : gfx::Rect());
- gpu::Mailbox mailbox = gpu::Mailbox::Generate();
- group_->mailbox_manager()->ProduceTexture(mailbox, temp_ref->texture());
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ gpu::Mailbox mailbox = gpu::Mailbox::GenerateForSharedImage();
+ std::unique_ptr<SharedImageBacking> backing =
+ SharedImageBackingFactoryGLTexture::CreateSharedImageForTest(
+ mailbox, GL_TEXTURE_2D, service_id, cleared, resource_format,
+ gfx::Size(width, height), SHARED_IMAGE_USAGE_RASTER);
+ shared_images_.push_back(
+ shared_image_manager_.Register(std::move(backing), &memory_tracker_));
return mailbox;
}
@@ -168,8 +166,7 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
for (const std::string& extension : init.extensions) {
all_extensions += extension + " ";
}
- const bool bind_generates_resource(false);
- const ContextType context_type(CONTEXT_TYPE_OPENGLES2);
+ const ContextType context_type = CONTEXT_TYPE_OPENGLES2;
// For easier substring/extension matching
gl::SetGLGetProcAddressProc(gl::MockGLInterface::GetGLProcAddress);
@@ -178,17 +175,6 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
gl_.reset(new StrictMock<MockGLInterface>());
::gl::MockGLInterface::SetGLInterface(gl_.get());
- GpuFeatureInfo gpu_feature_info;
- scoped_refptr<gles2::FeatureInfo> feature_info =
- new gles2::FeatureInfo(init.workarounds, gpu_feature_info);
-
- group_ = scoped_refptr<gles2::ContextGroup>(new gles2::ContextGroup(
- gpu_preferences_, false, &mailbox_manager_, nullptr /* memory_tracker */,
- &shader_translator_cache_, &framebuffer_completeness_cache_, feature_info,
- bind_generates_resource, &image_manager_, nullptr /* image_factory */,
- nullptr /* progress_reporter */, gpu_feature_info, &discardable_manager_,
- nullptr /* passthrough_discardable_manager */, &shared_image_manager_));
-
InSequence sequence;
surface_ = new gl::GLSurfaceStub;
@@ -202,54 +188,37 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
context_->GLContextStub::MakeCurrent(surface_.get());
- gles2::TestHelper::SetupContextGroupInitExpectations(
- gl_.get(), gles2::DisallowedFeatures(), all_extensions.c_str(),
- init.gl_version.c_str(), context_type, bind_generates_resource);
-
- // We initialize the ContextGroup with a MockRasterDecoder so that
- // we can use the ContextGroup to figure out how the real RasterDecoder
- // will initialize itself.
- command_buffer_service_.reset(new FakeCommandBufferServiceBase());
- command_buffer_service_for_mock_decoder_.reset(
- new FakeCommandBufferServiceBase());
- mock_decoder_.reset(
- new MockRasterDecoder(command_buffer_service_for_mock_decoder_.get()));
-
- EXPECT_EQ(group_->Initialize(mock_decoder_.get(), context_type,
- gles2::DisallowedFeatures()),
- gpu::ContextResult::kSuccess);
-
- scoped_refptr<gpu::Buffer> buffer =
- command_buffer_service_->CreateTransferBufferHelper(kSharedBufferSize,
- &shared_memory_id_);
- shared_memory_offset_ = kSharedMemoryOffset;
- shared_memory_address_ =
- static_cast<int8_t*>(buffer->memory()) + shared_memory_offset_;
- shared_memory_base_ = buffer->memory();
- ClearSharedMemory();
-
- ContextCreationAttribs attribs;
- attribs.lose_context_when_out_of_memory =
- init.lose_context_when_out_of_memory;
- attribs.context_type = context_type;
+ GpuFeatureInfo gpu_feature_info;
+ feature_info_ = base::MakeRefCounted<gles2::FeatureInfo>(init.workarounds,
+ gpu_feature_info);
+ gles2::TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
+ gl_.get(), all_extensions.c_str(), "", init.gl_version.c_str(),
+ context_type);
+ feature_info_->Initialize(gpu::CONTEXT_TYPE_OPENGLES2,
+ gpu_preferences_.use_passthrough_cmd_decoder &&
+ gles2::PassthroughCommandDecoderSupported(),
+ gles2::DisallowedFeatures());
// Setup expectations for SharedContextState::InitializeGL().
EXPECT_CALL(*gl_, GetIntegerv(GL_MAX_VERTEX_ATTRIBS, _))
.WillOnce(SetArgPointee<1>(8u))
.RetiresOnSaturation();
- SetupInitCapabilitiesExpectations(group_->feature_info()->IsES3Capable());
- SetupInitStateExpectations(group_->feature_info()->IsES3Capable());
+ SetupInitCapabilitiesExpectations(feature_info()->IsES3Capable());
+ SetupInitStateExpectations(feature_info()->IsES3Capable());
shared_context_state_ = base::MakeRefCounted<SharedContextState>(
new gl::GLShareGroup(), surface_, context_,
- feature_info->workarounds().use_virtualized_gl_contexts,
+ feature_info()->workarounds().use_virtualized_gl_contexts,
base::DoNothing());
- shared_context_state_->InitializeGL(GpuPreferences(), feature_info);
+ shared_context_state_->InitializeGL(GpuPreferences(), feature_info_);
+
+ command_buffer_service_.reset(new FakeCommandBufferServiceBase());
- decoder_.reset(RasterDecoder::Create(this, command_buffer_service_.get(),
- &outputter_, group_.get(),
- shared_context_state_));
+ decoder_.reset(RasterDecoder::Create(
+ this, command_buffer_service_.get(), &outputter_, gpu_feature_info,
+ gpu_preferences_, nullptr /* memory_tracker */, &shared_image_manager_,
+ shared_context_state_));
decoder_->SetIgnoreCachedStateForTest(ignore_cached_state_for_test_);
decoder_->DisableFlushWorkaroundForTest();
decoder_->GetLogger()->set_log_synthesized_gl_errors(false);
@@ -257,6 +226,11 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
copy_texture_manager_ = new gles2::MockCopyTextureResourceManager();
decoder_->SetCopyTextureResourceManagerForTest(copy_texture_manager_);
+ ContextCreationAttribs attribs;
+ attribs.lose_context_when_out_of_memory =
+ init.lose_context_when_out_of_memory;
+ attribs.context_type = context_type;
+
ASSERT_EQ(decoder_->Initialize(surface_, shared_context_state_->context(),
true, gles2::DisallowedFeatures(), attribs),
gpu::ContextResult::kSuccess);
@@ -269,6 +243,15 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
decoder_->MakeCurrent();
decoder_->BeginDecoding();
+ scoped_refptr<gpu::Buffer> buffer =
+ command_buffer_service_->CreateTransferBufferHelper(kSharedBufferSize,
+ &shared_memory_id_);
+ shared_memory_offset_ = kSharedMemoryOffset;
+ shared_memory_address_ =
+ static_cast<int8_t*>(buffer->memory()) + shared_memory_offset_;
+ shared_memory_base_ = buffer->memory();
+ ClearSharedMemory();
+
client_texture_mailbox_ = CreateFakeTexture(
kServiceTextureId, viz::ResourceFormat::RGBA_8888, /*width=*/2,
/*height=*/2, /*cleared=*/false);
@@ -291,9 +274,10 @@ void RasterDecoderTestBase::ResetDecoder() {
decoder_->Destroy(!decoder_->WasContextLost());
decoder_.reset();
- group_->Destroy(mock_decoder_.get(), false);
command_buffer_service_.reset();
- command_buffer_service_for_mock_decoder_.reset();
+ for (auto& image : shared_images_)
+ image->OnContextLost();
+ shared_images_.clear();
::gl::MockGLInterface::SetGLInterface(nullptr);
gl_.reset();
gl::init::ShutdownGL(false);
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
index 6ac4cf08acb..9e72426645b 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
@@ -17,23 +17,15 @@
#include "base/message_loop/message_loop.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/client/client_test_helper.h"
-#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/raster_cmd_format.h"
-#include "gpu/command_buffer/service/buffer_manager.h"
-#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/decoder_client.h"
-#include "gpu/command_buffer/service/framebuffer_manager.h"
#include "gpu/command_buffer/service/gl_context_mock.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
-#include "gpu/command_buffer/service/image_manager.h"
-#include "gpu/command_buffer/service/mailbox_manager_impl.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/raster_decoder.h"
-#include "gpu/command_buffer/service/raster_decoder_mock.h"
-#include "gpu/command_buffer/service/service_discardable_manager.h"
-#include "gpu/command_buffer/service/shader_manager.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/test_helper.h"
-#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_preferences.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -44,7 +36,6 @@
namespace gpu {
namespace gles2 {
-class ImageManager;
class MockCopyTextureResourceManager;
} // namespace gles2
@@ -63,6 +54,7 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
void ScheduleGrContextCleanup() override {}
+ void HandleReturnData(base::span<const uint8_t> data) override {}
// Template to call glGenXXX functions.
template <typename T>
@@ -121,10 +113,6 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
return reinterpret_cast<T>(ptr);
}
- gles2::TextureRef* GetTexture(GLuint client_id) {
- return group_->texture_manager()->GetTexture(client_id);
- }
-
void SetBucketData(uint32_t bucket_id, const void* data, uint32_t data_size);
void SetBucketAsCString(uint32_t bucket_id, const char* str);
// If we want a valid bucket, just set |count_in_header| as |count|,
@@ -152,12 +140,6 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
void InitDecoder(const InitState& init);
void ResetDecoder();
- const gles2::ContextGroup& group() const { return *group_.get(); }
-
- void LoseContexts(error::ContextLostReason reason) const {
- group_->LoseContexts(reason);
- }
-
error::ContextLostReason GetContextLostReason() const {
return command_buffer_service_->GetState().context_lost_reason;
}
@@ -167,9 +149,6 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
}
RasterDecoder* GetDecoder() const { return decoder_.get(); }
- gles2::ImageManager* GetImageManagerForTest() {
- return decoder_->GetImageManagerForTest();
- }
typedef gles2::TestHelper::AttribInfo AttribInfo;
typedef gles2::TestHelper::UniformInfo UniformInfo;
@@ -194,7 +173,6 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
// EXPECT_EQ that expect both types to be the same.
GLint GetGLError();
- void DoBindTexture(GLenum target, GLuint client_id, GLuint service_id);
void SetScopedTextureBinderExpectations(GLenum target);
void SetupClearTextureExpectations(GLuint service_id,
@@ -212,6 +190,9 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
GLvoid* BufferOffset(unsigned i) { return reinterpret_cast<GLvoid*>(i); }
+ SharedImageManager* shared_image_manager() { return &shared_image_manager_; }
+ gles2::FeatureInfo* feature_info() { return feature_info_.get(); }
+
protected:
static const GLint kMaxTextureSize = 2048;
static const GLint kNumTextureUnits = 8;
@@ -240,13 +221,11 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
// Use StrictMock to make 100% sure we know how GL will be called.
std::unique_ptr<::testing::StrictMock<::gl::MockGLInterface>> gl_;
+ scoped_refptr<gles2::FeatureInfo> feature_info_;
scoped_refptr<gl::GLSurfaceStub> surface_;
scoped_refptr<GLContextMock> context_;
std::unique_ptr<FakeCommandBufferServiceBase> command_buffer_service_;
gles2::TraceOutputter outputter_;
- std::unique_ptr<MockRasterDecoder> mock_decoder_;
- std::unique_ptr<FakeCommandBufferServiceBase>
- command_buffer_service_for_mock_decoder_;
std::unique_ptr<RasterDecoder> decoder_;
gpu::Mailbox client_texture_mailbox_;
@@ -263,13 +242,10 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
private:
GpuPreferences gpu_preferences_;
- gles2::MailboxManagerImpl mailbox_manager_;
- gles2::ShaderTranslatorCache shader_translator_cache_;
- gles2::FramebufferCompletenessCache framebuffer_completeness_cache_;
- gles2::ImageManager image_manager_;
- ServiceDiscardableManager discardable_manager_;
SharedImageManager shared_image_manager_;
- scoped_refptr<gles2::ContextGroup> group_;
+ MemoryTypeTracker memory_tracker_;
+ std::vector<std::unique_ptr<SharedImageRepresentationFactoryRef>>
+ shared_images_;
base::MessageLoop message_loop_;
gles2::MockCopyTextureResourceManager* copy_texture_manager_; // not owned
GLuint next_fake_texture_client_id_ = 271828;
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
index 3c47d6845da..2574c192163 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
@@ -37,10 +37,6 @@ class RasterDecoderOOMTest : public RasterDecoderManualInitTest {
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(reset_status));
}
- // Other contexts in the group should be lost also.
- EXPECT_CALL(*mock_decoder_, MarkContextLost(expected_other_reason))
- .Times(1)
- .RetiresOnSaturation();
// glGetError merges driver error state with decoder error state. Return
// GL_NO_ERROR from mock driver and GL_OUT_OF_MEMORY from decoder.
@@ -97,7 +93,7 @@ TEST_P(RasterDecoderOOMTest, ContextLostReasonWhenStatusIsUnknown) {
EXPECT_EQ(error::kUnknown, GetContextLostReason());
}
-INSTANTIATE_TEST_CASE_P(Service, RasterDecoderOOMTest, ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service, RasterDecoderOOMTest, ::testing::Bool());
class RasterDecoderLostContextTest : public RasterDecoderManualInitTest {
protected:
@@ -143,8 +139,6 @@ class RasterDecoderLostContextTest : public RasterDecoderManualInitTest {
TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrent) {
Init(/*has_robustness=*/false);
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(false));
- // Expect the group to be lost.
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
EXPECT_FALSE(decoder_->WasContextLost());
decoder_->MakeCurrent();
EXPECT_TRUE(decoder_->WasContextLost());
@@ -161,8 +155,6 @@ TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrentWithRobustness) {
// extension.
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB()).Times(0);
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(false));
- // Expect the group to be lost.
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
decoder_->MakeCurrent();
EXPECT_TRUE(decoder_->WasContextLost());
EXPECT_FALSE(decoder_->WasContextLostByRobustnessExtension());
@@ -173,29 +165,6 @@ TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrentWithRobustness) {
ClearCurrentDecoderError();
}
-TEST_P(RasterDecoderLostContextTest, TextureDestroyAfterLostFromMakeCurrent) {
- Init(/*has_robustness=*/true);
-
- CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RGBA_8888,
- /*width=*/2, /*height=*/2,
- /*cleared=*/false);
-
- // The texture should never be deleted at the GL level.
- EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(kNewServiceId)))
- .Times(0)
- .RetiresOnSaturation();
-
- // Force context lost for MakeCurrent().
- EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(false));
- // Expect the group to be lost.
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
-
- decoder_->MakeCurrent();
- EXPECT_TRUE(decoder_->WasContextLost());
- EXPECT_EQ(error::kMakeCurrentFailed, GetContextLostReason());
- ClearCurrentDecoderError();
-}
-
TEST_P(RasterDecoderLostContextTest, QueryDestroyAfterLostFromMakeCurrent) {
Init(/*has_robustness=*/false);
@@ -236,8 +205,6 @@ TEST_P(RasterDecoderLostContextTest, QueryDestroyAfterLostFromMakeCurrent) {
// Force context lost for MakeCurrent().
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(false));
- // Expect the group to be lost.
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
decoder_->MakeCurrent();
EXPECT_TRUE(decoder_->WasContextLost());
@@ -252,8 +219,6 @@ TEST_P(RasterDecoderLostContextTest, LostFromResetAfterMakeCurrent) {
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(GL_GUILTY_CONTEXT_RESET_KHR));
- // Expect the group to be lost.
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
decoder_->MakeCurrent();
EXPECT_TRUE(decoder_->WasContextLost());
EXPECT_TRUE(decoder_->WasContextLostByRobustnessExtension());
@@ -266,9 +231,6 @@ TEST_P(RasterDecoderLostContextTest, LostFromResetAfterMakeCurrent) {
TEST_P(RasterDecoderLostContextTest, LoseGuiltyFromGLError) {
Init(/*has_robustness=*/true);
- // Always expect other contexts to be signaled as 'kUnknown' since we can't
- // query their status without making them current.
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
DoGetErrorWithContextLost(GL_GUILTY_CONTEXT_RESET_KHR);
EXPECT_TRUE(decoder_->WasContextLost());
EXPECT_TRUE(decoder_->WasContextLostByRobustnessExtension());
@@ -277,34 +239,15 @@ TEST_P(RasterDecoderLostContextTest, LoseGuiltyFromGLError) {
TEST_P(RasterDecoderLostContextTest, LoseInnocentFromGLError) {
Init(/*has_robustness=*/true);
- // Always expect other contexts to be signaled as 'kUnknown' since we can't
- // query their status without making them current.
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
DoGetErrorWithContextLost(GL_INNOCENT_CONTEXT_RESET_KHR);
EXPECT_TRUE(decoder_->WasContextLost());
EXPECT_TRUE(decoder_->WasContextLostByRobustnessExtension());
EXPECT_EQ(error::kInnocent, GetContextLostReason());
}
-TEST_P(RasterDecoderLostContextTest, LoseGroupFromRobustness) {
- // If one context in a group is lost through robustness,
- // the other ones should also get lost and query the reset status.
- Init(true);
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
- // There should be no GL calls, since we might not have a current context.
- EXPECT_CALL(*gl_, GetGraphicsResetStatusARB()).Times(0);
- LoseContexts(error::kUnknown);
- EXPECT_TRUE(decoder_->WasContextLost());
- EXPECT_EQ(error::kUnknown, GetContextLostReason());
-
- // We didn't process commands, so we need to clear the decoder error,
- // so that we can shut down cleanly.
- ClearCurrentDecoderError();
-}
-
-INSTANTIATE_TEST_CASE_P(Service,
- RasterDecoderLostContextTest,
- ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ RasterDecoderLostContextTest,
+ ::testing::Bool());
} // namespace raster
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc
index 112948d9381..6246f037237 100644
--- a/chromium/gpu/command_buffer/service/scheduler.cc
+++ b/chromium/gpu/command_buffer/service/scheduler.cc
@@ -6,6 +6,7 @@
#include <algorithm>
+#include "base/bind.h"
#include "base/callback.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.cc b/chromium/gpu/command_buffer/service/service_transfer_cache.cc
index 3373500d061..56f7b63cb51 100644
--- a/chromium/gpu/command_buffer/service/service_transfer_cache.cc
+++ b/chromium/gpu/command_buffer/service/service_transfer_cache.cc
@@ -6,6 +6,8 @@
#include <inttypes.h>
+#include <utility>
+
#include "base/bind.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
@@ -13,6 +15,7 @@
#include "base/trace_event/memory_dump_manager.h"
#include "cc/paint/image_transfer_cache_entry.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
+#include "third_party/skia/include/core/SkImage.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gl/trace_util.h"
@@ -204,6 +207,37 @@ void ServiceTransferCache::DeleteAllEntriesForDecoder(int decoder_id) {
}
}
+bool ServiceTransferCache::CreateLockedImageEntry(
+ int decoder_id,
+ uint32_t entry_id,
+ ServiceDiscardableHandle handle,
+ GrContext* context,
+ base::span<const uint8_t> decoded_image,
+ size_t row_bytes,
+ const SkImageInfo& image_info,
+ bool needs_mips,
+ sk_sp<SkColorSpace> target_color_space) {
+ EntryKey key(decoder_id, cc::TransferCacheEntryType::kImage, entry_id);
+ auto found = entries_.Peek(key);
+ if (found != entries_.end())
+ return false;
+
+ // Create the service-side image transfer cache entry. Note that this involves
+ // uploading the image if it fits in GPU memory.
+ auto entry = std::make_unique<cc::ServiceImageTransferCacheEntry>();
+ if (!entry->BuildFromDecodedData(context, decoded_image, row_bytes,
+ image_info, needs_mips,
+ target_color_space)) {
+ return false;
+ }
+
+ // Insert it in the transfer cache.
+ total_size_ += entry->CachedSize();
+ entries_.Put(key, CacheEntryInternal(handle, std::move(entry)));
+ EnforceLimits();
+ return true;
+}
+
bool ServiceTransferCache::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) {
diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.h b/chromium/gpu/command_buffer/service/service_transfer_cache.h
index 39229664258..8b2f16a1ab6 100644
--- a/chromium/gpu/command_buffer/service/service_transfer_cache.h
+++ b/chromium/gpu/command_buffer/service/service_transfer_cache.h
@@ -5,7 +5,10 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_SERVICE_TRANSFER_CACHE_H_
#define GPU_COMMAND_BUFFER_SERVICE_SERVICE_TRANSFER_CACHE_H_
-#include <vector>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
#include "base/containers/mru_cache.h"
#include "base/containers/span.h"
@@ -14,12 +17,18 @@
#include "gpu/command_buffer/common/discardable_handle.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/gpu_gles2_export.h"
+#include "third_party/skia/include/core/SkRefCnt.h"
+
+class GrContext;
+class SkColorSpace;
+struct SkImageInfo;
namespace gpu {
-// ServiceTransferCache is a GPU process interface for retreiving cached entries
+// ServiceTransferCache is a GPU process interface for retrieving cached entries
// from the transfer cache. These entries are populated by client calls to the
-// ClientTransferCache.
+// ClientTransferCache or by an image decode accelerator task in the GPU
+// process.
//
// In addition to access, the ServiceTransferCache is also responsible for
// unlocking and deleting entries when no longer needed, as well as enforcing
@@ -51,6 +60,23 @@ class GPU_GLES2_EXPORT ServiceTransferCache
cc::ServiceTransferCacheEntry* GetEntry(const EntryKey& key);
void DeleteAllEntriesForDecoder(int decoder_id);
+ // Creates an image transfer cache entry using the decoded data in
+ // |decoded_image|. The |context| will be used to upload the image (if it's
+ // determined to fit in the GPU). |row_bytes| is the stride, and |image_info|
+ // describes the decoded data. |decoder_id| and |entry_id| are used for
+ // creating the ServiceTransferCache::EntryKey (assuming
+ // cc::TransferCacheEntryType:kImage for the type). Returns true if the entry
+ // could be created and inserted; false otherwise.
+ bool CreateLockedImageEntry(int decoder_id,
+ uint32_t entry_id,
+ ServiceDiscardableHandle handle,
+ GrContext* context,
+ base::span<const uint8_t> decoded_image,
+ size_t row_bytes,
+ const SkImageInfo& image_info,
+ bool needs_mips,
+ sk_sp<SkColorSpace> target_color_space);
+
void PurgeMemory(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level);
diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc
index 86b2898c142..cbf3860ae36 100644
--- a/chromium/gpu/command_buffer/service/service_utils.cc
+++ b/chromium/gpu/command_buffer/service/service_utils.cc
@@ -169,6 +169,8 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) {
command_line->HasSwitch(switches::kEnableUnsafeWebGPU);
gpu_preferences.enable_raster_to_sk_image =
command_line->HasSwitch(switches::kEnableRasterToSkImage);
+ gpu_preferences.enable_vulkan =
+ command_line->HasSwitch(switches::kEnableVulkan);
return gpu_preferences;
}
diff --git a/chromium/gpu/command_buffer/service/shader_translator_unittest.cc b/chromium/gpu/command_buffer/service/shader_translator_unittest.cc
index 214725c8d68..c97fd5219dc 100644
--- a/chromium/gpu/command_buffer/service/shader_translator_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shader_translator_unittest.cc
@@ -550,7 +550,7 @@ testing::tuple<const char*, const char*> make_gl_glsl_tuple(
// certain version of GLSL to be guaranteed to be supported. Test
// that ShaderTranslator produces a GLSL shader with the exact
// specified GLSL version for each known OpenGL version.
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
KnownOpenGLContexts,
ShaderTranslatorOutputVersionTest,
testing::Values(make_gl_glsl_tuple("4.5", "#version 450\n"),
@@ -570,16 +570,16 @@ INSTANTIATE_TEST_CASE_P(
// similar shader. We do not expect that future 3.3+ specs contain
// the "all eariler GLSL versions" clause, since 3.3 did not contain
// it either.
-INSTANTIATE_TEST_CASE_P(OldOrUnknownOpenGLContexts,
- ShaderTranslatorOutputVersionTest,
- testing::Values(make_gl_glsl_tuple("3.4", ""),
- make_gl_glsl_tuple("2.0", "")));
+INSTANTIATE_TEST_SUITE_P(OldOrUnknownOpenGLContexts,
+ ShaderTranslatorOutputVersionTest,
+ testing::Values(make_gl_glsl_tuple("3.4", ""),
+ make_gl_glsl_tuple("2.0", "")));
// Test data for the above test. Cases for the future OpenGL versions. The
// code assumes that the future OpenGL specs specify the clause that all
// earlier GLSL versions are supported. We select the highest GLSL
// version known at the time of writing.
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
BackwardsCompatibleFutureOpenGLContexts,
ShaderTranslatorOutputVersionTest,
testing::Values(make_gl_glsl_tuple("5.0", "#version 450\n"),
@@ -589,13 +589,13 @@ INSTANTIATE_TEST_CASE_P(
// contexts, the shader is such that GLSL 1.0 is used. The translator
// selects GLSL 1.0 by not output any version at the moment, though we
// do not know if that would be correct for the future OpenGL ES specs.
-INSTANTIATE_TEST_CASE_P(OpenGLESContexts,
- ShaderTranslatorOutputVersionTest,
- testing::Values(make_gl_glsl_tuple("opengl es 2.0", ""),
- make_gl_glsl_tuple("opengl es 3.0", ""),
- make_gl_glsl_tuple("opengl es 3.1", ""),
- make_gl_glsl_tuple("opengl es 3.2",
- "")));
+INSTANTIATE_TEST_SUITE_P(
+ OpenGLESContexts,
+ ShaderTranslatorOutputVersionTest,
+ testing::Values(make_gl_glsl_tuple("opengl es 2.0", ""),
+ make_gl_glsl_tuple("opengl es 3.0", ""),
+ make_gl_glsl_tuple("opengl es 3.1", ""),
+ make_gl_glsl_tuple("opengl es 3.2", "")));
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc
index 02acb107330..6f3c86edccd 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/shared_context_state.cc
@@ -113,6 +113,7 @@ void SharedContextState::InitializeGrContext(
options.fGlyphCacheTextureMaximumBytes = glyph_cache_max_texture_bytes_;
options.fPersistentCache = cache;
options.fAvoidStencilBuffers = workarounds.avoid_stencil_buffers;
+ options.fDisallowGLSLBinaryCaching = workarounds.disable_program_disk_cache;
owned_gr_context_ = GrContext::MakeGL(std::move(interface), options);
gr_context_ = owned_gr_context_.get();
if (!gr_context_) {
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.cc b/chromium/gpu/command_buffer/service/shared_image_backing.cc
index 87b08bc390d..79a516fd973 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing.cc
@@ -30,6 +30,12 @@ SharedImageBacking::ProduceGLTexture(SharedImageManager* manager,
return nullptr;
}
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageBacking::ProduceRGBEmulationGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ return nullptr;
+}
+
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
SharedImageBacking::ProduceGLTexturePassthrough(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.h b/chromium/gpu/command_buffer/service/shared_image_backing.h
index 779465fdd63..731c53975ce 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing.h
@@ -85,6 +85,9 @@ class GPU_GLES2_EXPORT SharedImageBacking {
virtual std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
SharedImageManager* manager,
MemoryTypeTracker* tracker);
+ virtual std::unique_ptr<SharedImageRepresentationGLTexture>
+ ProduceRGBEmulationGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker);
virtual std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
ProduceGLTexturePassthrough(SharedImageManager* manager,
MemoryTypeTracker* tracker);
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
index 8f3f5057b29..75565a014d2 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
@@ -5,6 +5,12 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h"
#include <sync/sync.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+#include <vector>
#include "base/android/android_hardware_buffer_compat.h"
#include "base/android/scoped_hardware_buffer_handle.h"
@@ -39,6 +45,38 @@
#include "ui/gl/gl_version_info.h"
namespace gpu {
+namespace {
+
+bool InsertWriteFence(base::ScopedFD write_sync_fd) {
+ if (write_sync_fd.is_valid()) {
+ if (!InsertEglFenceAndWait(std::move(write_sync_fd)))
+ return false;
+ }
+
+ return true;
+}
+
+bool InsertReadFences(std::vector<base::ScopedFD> read_sync_fds) {
+ for (auto& fd : read_sync_fds) {
+ if (!InsertEglFenceAndWait(std::move(fd)))
+ return false;
+ }
+
+ return true;
+}
+
+bool InsertAllFencesForWriting(base::ScopedFD write_sync_fd,
+ std::vector<base::ScopedFD> read_sync_fds) {
+ if (!InsertWriteFence(std::move(write_sync_fd)))
+ return false;
+
+ if (!InsertReadFences(std::move(read_sync_fds)))
+ return false;
+
+ return true;
+}
+
+} // namespace
// Implementation of SharedImageBacking that holds an AHardwareBuffer. This
// can be used to create a GL texture or a VK Image from the AHardwareBuffer
@@ -62,11 +100,12 @@ class SharedImageBackingAHB : public SharedImageBacking {
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
void Destroy() override;
SharedContextState* GetContextState() const;
- base::ScopedFD TakeGLWriteSyncFd();
- base::ScopedFD TakeVkReadSyncFd();
+ base::ScopedFD GetWriteSyncFd() const;
+ std::vector<base::ScopedFD> GetReadSyncFds() const;
+ void ClearReadSyncFds();
base::android::ScopedHardwareBufferHandle GetAhbHandle();
- void SetGLWriteSyncFd(base::ScopedFD fd);
- void SetVkReadSyncFd(base::ScopedFD fd);
+ void SetWriteSyncFd(base::ScopedFD fd);
+ void AddReadSyncFd(base::ScopedFD fd);
protected:
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
@@ -78,25 +117,20 @@ class SharedImageBackingAHB : public SharedImageBacking {
MemoryTypeTracker* tracker) override;
private:
- bool GenGLTexture();
+ gles2::Texture* GenGLTexture();
base::android::ScopedHardwareBufferHandle hardware_buffer_handle_;
- // This texture will be lazily initialised/created when ProduceGLTexture is
- // called.
- gles2::Texture* texture_ = nullptr;
+ gles2::Texture* legacy_texture_ = nullptr;
- // TODO(vikassoni): In future when we add begin/end write support, we will
- // need to properly use this flag to pass the is_cleared_ information to
- // the GL texture representation while begin write and back to this class from
- // the GL texture represntation after end write. This is because this class
- // will not know if SetCleared() arrives during begin write happening on GL
- // texture representation.
bool is_cleared_ = false;
SharedContextState* context_state_ = nullptr;
- base::ScopedFD gl_write_sync_fd_;
- base::ScopedFD vk_read_sync_fd_;
- sk_sp<SkPromiseImageTexture> cached_promise_texture_;
+ // All reads and writes must wait for exiting writes to complete.
+ base::ScopedFD write_sync_fd_;
+
+ // All writes must wait for existing reads to complete.
+ std::vector<base::ScopedFD> read_sync_fds_;
+
DISALLOW_COPY_AND_ASSIGN(SharedImageBackingAHB);
};
@@ -111,21 +145,20 @@ class SharedImageRepresentationGLTextureAHB
: SharedImageRepresentationGLTexture(manager, backing, tracker),
texture_(texture) {}
+ ~SharedImageRepresentationGLTextureAHB() override {
+ if (texture_)
+ texture_->RemoveLightweightRef(has_context());
+ }
+
gles2::Texture* GetTexture() override { return texture_; }
bool BeginAccess(GLenum mode) override {
- // TODO(vikassoni): Currently Skia Vk backing never does a write. So GL read
- // do not need to wait for the Vk write to finish. Eventually when Vk starts
- // writing, we will need to TakeVkWriteSyncFd() and wait on it for mode =
- // GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM.
-
- // Wait on Vk read if GL is going to write.
- // TODO(vikassoni): GL writes should wait on both Vk read and Vk writes.
- if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) {
- base::ScopedFD sync_fd = ahb_backing()->TakeVkReadSyncFd();
-
- // Create an egl fence sync and do a server side wait.
- if (!InsertEglFenceAndWait(std::move(sync_fd)))
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ if (!InsertWriteFence(ahb_backing()->GetWriteSyncFd()))
+ return false;
+ } else if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) {
+ if (!InsertAllFencesForWriting(ahb_backing()->GetWriteSyncFd(),
+ ahb_backing()->GetReadSyncFds()))
return false;
}
mode_ = mode;
@@ -133,17 +166,22 @@ class SharedImageRepresentationGLTextureAHB
}
void EndAccess() override {
- // TODO(vikassoni): Currently Skia Vk backing never does a write. So Vk
- // writes do not need to wait on GL to finish the read. Eventually when Vk
- // starts writing, we will need to create and set a GLReadSyncFd for mode =
- // GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM for Vk to wait on it.
- if (mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) {
+ if (mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ InsertReadFences(ahb_backing()->GetReadSyncFds());
+ } else if (mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) {
base::ScopedFD sync_fd = CreateEglFenceAndExportFd();
if (!sync_fd.is_valid())
return;
// Pass this fd to its backing.
- ahb_backing()->SetGLWriteSyncFd(std::move(sync_fd));
+ ahb_backing()->SetWriteSyncFd(std::move(sync_fd));
+
+ ahb_backing()->ClearReadSyncFds();
+
+ if (texture_) {
+ if (texture_->IsLevelCleared(texture_->target(), 0))
+ backing()->SetCleared();
+ }
}
}
@@ -167,16 +205,20 @@ class SharedImageRepresentationSkiaGLAHB
SharedImageBacking* backing,
sk_sp<SkPromiseImageTexture> cached_promise_image_texture,
MemoryTypeTracker* tracker,
- GLenum target,
- GLuint service_id)
+ gles2::Texture* texture)
: SharedImageRepresentationSkia(manager, backing, tracker),
- promise_texture_(cached_promise_image_texture) {
+ promise_texture_(cached_promise_image_texture),
+ texture_(std::move(texture)) {
#if DCHECK_IS_ON()
context_ = gl::GLContext::GetCurrent();
#endif
}
- ~SharedImageRepresentationSkiaGLAHB() override { DCHECK(!write_surface_); }
+ ~SharedImageRepresentationSkiaGLAHB() override {
+ DCHECK(!write_surface_);
+ if (texture_)
+ texture_->RemoveLightweightRef(has_context());
+ }
sk_sp<SkSurface> BeginWriteAccess(
GrContext* gr_context,
@@ -188,13 +230,8 @@ class SharedImageRepresentationSkiaGLAHB
if (write_surface_)
return nullptr;
- // Synchronise this access with the Vk reads.
- // TODO(vikassoni): SkiaGL writes should wait on both Vk read and Vk writes.
- base::ScopedFD sync_fd = ahb_backing()->TakeVkReadSyncFd();
-
- // Create an egl fence sync and do a server side wait.
- if (!InsertEglFenceAndWait(std::move(sync_fd)))
- return nullptr;
+ InsertAllFencesForWriting(ahb_backing()->GetWriteSyncFd(),
+ ahb_backing()->GetReadSyncFds());
if (!promise_texture_) {
return nullptr;
@@ -204,8 +241,8 @@ class SharedImageRepresentationSkiaGLAHB
/*gpu_compositing=*/true, format());
auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
gr_context, promise_texture_->backendTexture(),
- kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, nullptr,
- &surface_props);
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
+ backing()->color_space().ToSkColorSpace(), &surface_props);
write_surface_ = surface.get();
return surface;
}
@@ -224,23 +261,30 @@ class SharedImageRepresentationSkiaGLAHB
return;
// Pass this fd to its backing.
- ahb_backing()->SetGLWriteSyncFd(std::move(sync_fd));
+ ahb_backing()->SetWriteSyncFd(std::move(sync_fd));
+
+ ahb_backing()->ClearReadSyncFds();
+
+ if (texture_) {
+ if (texture_->IsLevelCleared(texture_->target(), 0))
+ backing()->SetCleared();
+ }
}
sk_sp<SkPromiseImageTexture> BeginReadAccess(SkSurface* sk_surface) override {
CheckContext();
- // TODO(vikassoni): Currently Skia Vk backing never does a write. So this
- // read do not need to wait for the Vk write to finish. Eventually when Vk
- // starts writing, we might need to TakeVkWriteSyncFd() and wait on it.
+ if (!InsertWriteFence(ahb_backing()->GetWriteSyncFd()))
+ return nullptr;
return promise_texture_;
}
void EndReadAccess() override {
CheckContext();
- // TODO(vikassoni): Currently Skia Vk backing never does a write. So Vk
- // writes do not need to wait on this read to finish. Eventually when Vk
- // starts writing, we will need to create and set a SkiaGLReadSyncFd.
- // TODO(ericrk): Handle begin/end correctness checks.
+
+ base::ScopedFD sync_fd = CreateEglFenceAndExportFd();
+ if (!sync_fd.is_valid())
+ return;
+ ahb_backing()->AddReadSyncFd(std::move(sync_fd));
}
private:
@@ -255,6 +299,7 @@ class SharedImageRepresentationSkiaGLAHB
}
sk_sp<SkPromiseImageTexture> promise_texture_;
+ gles2::Texture* texture_;
SkSurface* write_surface_ = nullptr;
#if DCHECK_IS_ON()
gl::GLContext* context_;
@@ -271,18 +316,9 @@ class SharedImageRepresentationSkiaVkAHB
SharedImageBackingAHB* ahb_backing =
static_cast<SharedImageBackingAHB*>(backing);
DCHECK(ahb_backing);
- SharedContextState* context_state = ahb_backing->GetContextState();
- DCHECK(context_state);
- DCHECK(context_state->vk_context_provider());
-
- vk_device_ = context_state->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanDevice();
- vk_phy_device_ = context_state->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanPhysicalDevice();
- vk_implementation_ =
- context_state->vk_context_provider()->GetVulkanImplementation();
+ context_state_ = ahb_backing->GetContextState();
+ DCHECK(context_state_);
+ DCHECK(context_state_->vk_context_provider());
}
~SharedImageRepresentationSkiaVkAHB() override { DCHECK(!read_surface_); }
@@ -304,18 +340,21 @@ class SharedImageRepresentationSkiaVkAHB
DCHECK(sk_surface);
// Synchronise the read access with the GL writes.
- base::ScopedFD sync_fd = ahb_backing()->TakeGLWriteSyncFd();
+ base::ScopedFD sync_fd = ahb_backing()->GetWriteSyncFd();
+ VkSemaphore semaphore = VK_NULL_HANDLE;
// We need to wait only if there is a valid fd.
if (sync_fd.is_valid()) {
- // Do a client side wait for now.
- // TODO(vikassoni): There seems to be a skia bug -
- // https://bugs.chromium.org/p/chromium/issues/detail?id=916812 currently
- // where wait() on the sk surface crashes. Remove the sync_wait() and
- // apply CL mentioned in the bug when the issue is fixed.
- static const int InfiniteSyncWaitTimeout = -1;
- if (sync_wait(sync_fd.get(), InfiniteSyncWaitTimeout) < 0) {
- LOG(ERROR) << "Failed while waiting on GL Write sync fd";
+ // Import the above sync fd into a semaphore.
+ if (!vk_implementation()->ImportSemaphoreFdKHR(
+ vk_device(), std::move(sync_fd), &semaphore)) {
+ return nullptr;
+ }
+
+ // Submit wait semaphore to the queue. Note that Skia uses the same queue
+ // exposed by vk_queue(), so this will work due to Vulkan queue ordering.
+ if (!vk_implementation()->SubmitWaitSemaphore(vk_queue(), semaphore)) {
+ vkDestroySemaphore(vk_device(), semaphore, nullptr);
return nullptr;
}
}
@@ -325,8 +364,8 @@ class SharedImageRepresentationSkiaVkAHB
VkImageCreateInfo vk_image_info;
VkDeviceMemory vk_device_memory;
VkDeviceSize mem_allocation_size;
- if (!vk_implementation_->CreateVkImageAndImportAHB(
- vk_device_, vk_phy_device_, size(), ahb_backing()->GetAhbHandle(),
+ if (!vk_implementation()->CreateVkImageAndImportAHB(
+ vk_device(), vk_phy_device(), size(), ahb_backing()->GetAhbHandle(),
&vk_image, &vk_image_info, &vk_device_memory,
&mem_allocation_size)) {
return nullptr;
@@ -345,8 +384,8 @@ class SharedImageRepresentationSkiaVkAHB
auto promise_texture = SkPromiseImageTexture::Make(
GrBackendTexture(size().width(), size().height(), vk_info));
if (!promise_texture) {
- vkDestroyImage(vk_device_, vk_image, nullptr);
- vkFreeMemory(vk_device_, vk_device_memory, nullptr);
+ vkDestroyImage(vk_device(), vk_image, nullptr);
+ vkFreeMemory(vk_device(), vk_device_memory, nullptr);
return nullptr;
}
@@ -354,6 +393,18 @@ class SharedImageRepresentationSkiaVkAHB
// EndReadAccess. Also make sure previous read_surface_ have been consumed
// by EndReadAccess() call.
read_surface_ = sk_surface;
+
+ // TODO(vikassoni): Need to do better semaphore cleanup management. Waiting
+ // on device to be idle to delete the semaphore is costly. Instead use a
+ // fence to get signal when semaphore submission is done.
+ if (semaphore != VK_NULL_HANDLE) {
+ VkResult result = vkQueueWaitIdle(vk_queue());
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "vkQueueWaitIdle failed: " << result;
+ return nullptr;
+ }
+ vkDestroySemaphore(vk_device(), semaphore, nullptr);
+ }
return promise_texture;
}
@@ -372,10 +423,9 @@ class SharedImageRepresentationSkiaVkAHB
sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
sem_info.pNext = &export_info;
sem_info.flags = 0;
- bool result =
- vkCreateSemaphore(vk_device_, &sem_info, nullptr, &vk_semaphore);
+ VkResult result =
+ vkCreateSemaphore(vk_device(), &sem_info, nullptr, &vk_semaphore);
if (result != VK_SUCCESS) {
- // TODO(vikassoni): add more error handling rather than just return ?
LOG(ERROR) << "vkCreateSemaphore failed";
read_surface_ = nullptr;
return;
@@ -388,7 +438,7 @@ class SharedImageRepresentationSkiaVkAHB
// instruct the GPU to wait on any of the semaphores.
if (read_surface_->flushAndSignalSemaphores(1, &gr_semaphore) ==
GrSemaphoresSubmitted::kNo) {
- vkDestroySemaphore(vk_device_, vk_semaphore, nullptr);
+ vkDestroySemaphore(vk_device(), vk_semaphore, nullptr);
read_surface_ = nullptr;
return;
}
@@ -400,10 +450,10 @@ class SharedImageRepresentationSkiaVkAHB
// GPU. The caller must delete the semaphores created.
// Export a sync fd from the semaphore.
base::ScopedFD sync_fd;
- vk_implementation_->GetSemaphoreFdKHR(vk_device_, vk_semaphore, &sync_fd);
+ vk_implementation()->GetSemaphoreFdKHR(vk_device(), vk_semaphore, &sync_fd);
// pass this sync fd to the backing.
- ahb_backing()->SetVkReadSyncFd(std::move(sync_fd));
+ ahb_backing()->AddReadSyncFd(std::move(sync_fd));
// TODO(vikassoni): We need to wait for the queue submission to complete
// before we can destroy the semaphore. This will decrease the performance.
@@ -411,8 +461,12 @@ class SharedImageRepresentationSkiaVkAHB
// in a STL queue instead of destroying it. Later use a fence to check if
// the batch that refers the semaphore has completed execution. Delete the
// semaphore once the fence is signalled.
- vkDeviceWaitIdle(vk_device_);
- vkDestroySemaphore(vk_device_, vk_semaphore, nullptr);
+ result = vkQueueWaitIdle(vk_queue());
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "vkQueueWaitIdle failed: " << result;
+ return;
+ }
+ vkDestroySemaphore(vk_device(), vk_semaphore, nullptr);
}
private:
@@ -420,10 +474,30 @@ class SharedImageRepresentationSkiaVkAHB
return static_cast<SharedImageBackingAHB*>(backing());
}
+ gpu::VulkanImplementation* vk_implementation() {
+ return context_state_->vk_context_provider()->GetVulkanImplementation();
+ }
+
+ VkDevice vk_device() {
+ return context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ }
+
+ VkPhysicalDevice vk_phy_device() {
+ return context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanPhysicalDevice();
+ }
+
+ VkQueue vk_queue() {
+ return context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanQueue();
+ }
+
SkSurface* read_surface_ = nullptr;
- gpu::VulkanImplementation* vk_implementation_ = nullptr;
- VkDevice vk_device_ = VK_NULL_HANDLE;
- VkPhysicalDevice vk_phy_device_ = VK_NULL_HANDLE;
+ SharedContextState* context_state_ = nullptr;
};
SharedImageBackingAHB::SharedImageBackingAHB(
@@ -450,18 +524,15 @@ SharedImageBackingAHB::~SharedImageBackingAHB() {
// Check to make sure buffer is explicitly destroyed using Destroy() api
// before this destructor is called.
DCHECK(!hardware_buffer_handle_.is_valid());
- DCHECK(!texture_);
}
bool SharedImageBackingAHB::IsCleared() const {
- if (texture_)
- return texture_->IsLevelCleared(texture_->target(), 0);
return is_cleared_;
}
void SharedImageBackingAHB::SetCleared() {
- if (texture_)
- texture_->SetLevelCleared(texture_->target(), 0, true);
+ if (legacy_texture_)
+ legacy_texture_->SetLevelCleared(legacy_texture_->target(), 0, true);
is_cleared_ = true;
}
@@ -470,18 +541,18 @@ void SharedImageBackingAHB::Update() {}
bool SharedImageBackingAHB::ProduceLegacyMailbox(
MailboxManager* mailbox_manager) {
DCHECK(hardware_buffer_handle_.is_valid());
- if (!GenGLTexture())
+ legacy_texture_ = GenGLTexture();
+ if (!legacy_texture_)
return false;
- DCHECK(texture_);
- mailbox_manager->ProduceTexture(mailbox(), texture_);
+ mailbox_manager->ProduceTexture(mailbox(), legacy_texture_);
return true;
}
void SharedImageBackingAHB::Destroy() {
DCHECK(hardware_buffer_handle_.is_valid());
- if (texture_) {
- texture_->RemoveLightweightRef(have_context());
- texture_ = nullptr;
+ if (legacy_texture_) {
+ legacy_texture_->RemoveLightweightRef(have_context());
+ legacy_texture_ = nullptr;
}
hardware_buffer_handle_.reset();
}
@@ -490,20 +561,33 @@ SharedContextState* SharedImageBackingAHB::GetContextState() const {
return context_state_;
}
-base::ScopedFD SharedImageBackingAHB::TakeGLWriteSyncFd() {
- return std::move(gl_write_sync_fd_);
+base::ScopedFD SharedImageBackingAHB::GetWriteSyncFd() const {
+ base::ScopedFD dup_fd =
+ base::ScopedFD(HANDLE_EINTR(dup(write_sync_fd_.get())));
+ return dup_fd;
+}
+
+void SharedImageBackingAHB::SetWriteSyncFd(base::ScopedFD fd) {
+ write_sync_fd_ = std::move(fd);
}
-void SharedImageBackingAHB::SetGLWriteSyncFd(base::ScopedFD fd) {
- gl_write_sync_fd_ = std::move(fd);
+std::vector<base::ScopedFD> SharedImageBackingAHB::GetReadSyncFds() const {
+ std::vector<base::ScopedFD> dup_fds{read_sync_fds_.size()};
+ for (size_t i = 0; i < read_sync_fds_.size(); ++i) {
+ base::ScopedFD dup_fd =
+ base::ScopedFD(HANDLE_EINTR(dup(read_sync_fds_[i].get())));
+ dup_fds.emplace_back(std::move(dup_fd));
+ }
+ return dup_fds;
}
-base::ScopedFD SharedImageBackingAHB::TakeVkReadSyncFd() {
- return std::move(vk_read_sync_fd_);
+void SharedImageBackingAHB::AddReadSyncFd(base::ScopedFD fd) {
+ read_sync_fds_.emplace_back(std::move(fd));
}
-void SharedImageBackingAHB::SetVkReadSyncFd(base::ScopedFD fd) {
- vk_read_sync_fd_ = std::move(fd);
+void SharedImageBackingAHB::ClearReadSyncFds() {
+ // Swap with a new vector to release capacity.
+ std::vector<base::ScopedFD>().swap(read_sync_fds_);
}
base::android::ScopedHardwareBufferHandle
@@ -516,12 +600,12 @@ SharedImageBackingAHB::ProduceGLTexture(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
// Use same texture for all the texture representations generated from same
// backing.
- if (!GenGLTexture())
+ auto* texture = GenGLTexture();
+ if (!texture)
return nullptr;
- DCHECK(texture_);
return std::make_unique<SharedImageRepresentationGLTextureAHB>(
- manager, this, tracker, texture_);
+ manager, this, tracker, std::move(texture));
}
std::unique_ptr<SharedImageRepresentationSkia>
@@ -535,26 +619,21 @@ SharedImageBackingAHB::ProduceSkia(SharedImageManager* manager,
return std::make_unique<SharedImageRepresentationSkiaVkAHB>(manager, this);
}
- if (!GenGLTexture())
+ auto* texture = GenGLTexture();
+ if (!texture)
return nullptr;
- if (!cached_promise_texture_) {
- GrBackendTexture backend_texture;
- GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(),
- texture_->target(), size(), texture_->service_id(),
- format(), &backend_texture);
- cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
- }
- DCHECK(texture_);
+ GrBackendTexture backend_texture;
+ GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(),
+ texture->target(), size(), texture->service_id(),
+ format(), &backend_texture);
+ sk_sp<SkPromiseImageTexture> promise_texture =
+ SkPromiseImageTexture::Make(backend_texture);
return std::make_unique<SharedImageRepresentationSkiaGLAHB>(
- manager, this, cached_promise_texture_, tracker, texture_->target(),
- texture_->service_id());
+ manager, this, promise_texture, tracker, std::move(texture));
}
-bool SharedImageBackingAHB::GenGLTexture() {
- if (texture_)
- return true;
-
+gles2::Texture* SharedImageBackingAHB::GenGLTexture() {
DCHECK(hardware_buffer_handle_.is_valid());
// Target for AHB backed egl images.
@@ -583,23 +662,23 @@ bool SharedImageBackingAHB::GenGLTexture() {
LOG(ERROR) << "Failed to create EGL image ";
api->glBindTextureFn(target, old_texture_binding);
api->glDeleteTexturesFn(1, &service_id);
- return false;
+ return nullptr;
}
if (!egl_image->BindTexImage(target)) {
LOG(ERROR) << "Failed to bind egl image";
api->glBindTextureFn(target, old_texture_binding);
api->glDeleteTexturesFn(1, &service_id);
- return false;
+ return nullptr;
}
// Create a gles2 Texture.
- texture_ = new gles2::Texture(service_id);
- texture_->SetLightweightRef();
- texture_->SetTarget(target, 1);
- texture_->sampler_state_.min_filter = GL_LINEAR;
- texture_->sampler_state_.mag_filter = GL_LINEAR;
- texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ auto* texture = new gles2::Texture(service_id);
+ texture->SetLightweightRef();
+ texture->SetTarget(target, 1);
+ texture->sampler_state_.min_filter = GL_LINEAR;
+ texture->sampler_state_.mag_filter = GL_LINEAR;
+ texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
// If the backing is already cleared, no need to clear it again.
gfx::Rect cleared_rect;
@@ -608,14 +687,14 @@ bool SharedImageBackingAHB::GenGLTexture() {
GLenum gl_format = viz::GLDataFormat(format());
GLenum gl_type = viz::GLDataType(format());
- texture_->SetLevelInfo(target, 0, egl_image->GetInternalFormat(),
- size().width(), size().height(), 1, 0, gl_format,
- gl_type, cleared_rect);
- texture_->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
- texture_->SetImmutable(true);
+ texture->SetLevelInfo(target, 0, egl_image->GetInternalFormat(),
+ size().width(), size().height(), 1, 0, gl_format,
+ gl_type, cleared_rect);
+ texture->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
+ texture->SetImmutable(true);
api->glBindTextureFn(target, old_texture_binding);
DCHECK_EQ(egl_image->GetInternalFormat(), gl_format);
- return true;
+ return texture;
}
SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB(
@@ -688,6 +767,10 @@ SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB(
max_gl_texture_size_ =
std::min(max_gl_texture_size_, workarounds.max_texture_size);
}
+ // Ensure max_texture_size_ is less than INT_MAX so that gfx::Rect and friends
+ // can be used to accurately represent all valid sub-rects, with overflow
+ // cases, clamped to INT_MAX, always invalid.
+ max_gl_texture_size_ = std::min(max_gl_texture_size_, INT_MAX - 1);
}
SharedImageBackingFactoryAHB::~SharedImageBackingFactoryAHB() = default;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
index 71542287e4c..067fbafb62b 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
@@ -4,6 +4,10 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
+#include <algorithm>
+#include <string>
+#include <utility>
+
#include "base/feature_list.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
@@ -34,15 +38,16 @@ namespace gpu {
namespace {
+using UnpackStateAttribs =
+ SharedImageBackingFactoryGLTexture::UnpackStateAttribs;
+
class ScopedResetAndRestoreUnpackState {
public:
ScopedResetAndRestoreUnpackState(gl::GLApi* api,
- bool es3_capable,
- bool desktop_gl,
- bool supports_unpack_subimage,
+ const UnpackStateAttribs& attribs,
bool uploading_data)
: api_(api) {
- if (es3_capable) {
+ if (attribs.es3_capable) {
// Need to unbind any GL_PIXEL_UNPACK_BUFFER for the nullptr in
// glTexImage2D to mean "no pixels" (as opposed to offset 0 in the
// buffer).
@@ -55,7 +60,7 @@ class ScopedResetAndRestoreUnpackState {
if (unpack_alignment_ != 4)
api_->glPixelStoreiFn(GL_UNPACK_ALIGNMENT, 4);
- if (es3_capable || supports_unpack_subimage) {
+ if (attribs.es3_capable || attribs.supports_unpack_subimage) {
api_->glGetIntegervFn(GL_UNPACK_ROW_LENGTH, &unpack_row_length_);
if (unpack_row_length_)
api_->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, 0);
@@ -67,7 +72,7 @@ class ScopedResetAndRestoreUnpackState {
api_->glPixelStoreiFn(GL_UNPACK_SKIP_PIXELS, 0);
}
- if (es3_capable) {
+ if (attribs.es3_capable) {
api_->glGetIntegervFn(GL_UNPACK_SKIP_IMAGES, &unpack_skip_images_);
if (unpack_skip_images_)
api_->glPixelStoreiFn(GL_UNPACK_SKIP_IMAGES, 0);
@@ -76,7 +81,7 @@ class ScopedResetAndRestoreUnpackState {
api_->glPixelStoreiFn(GL_UNPACK_IMAGE_HEIGHT, 0);
}
- if (desktop_gl) {
+ if (attribs.desktop_gl) {
api->glGetBooleanvFn(GL_UNPACK_SWAP_BYTES, &unpack_swap_bytes_);
if (unpack_swap_bytes_)
api->glPixelStoreiFn(GL_UNPACK_SWAP_BYTES, GL_FALSE);
@@ -227,11 +232,30 @@ class SharedImageRepresentationGLTexturePassthroughImpl
scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
};
+class SharedImageBackingWithReadAccess : public SharedImageBacking {
+ public:
+ SharedImageBackingWithReadAccess(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ size_t estimated_size)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size) {}
+ ~SharedImageBackingWithReadAccess() override = default;
+
+ virtual void BeginReadAccess() = 0;
+};
+
class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
public:
SharedImageRepresentationSkiaImpl(
SharedImageManager* manager,
- SharedImageBacking* backing,
+ SharedImageBackingWithReadAccess* backing,
sk_sp<SkPromiseImageTexture> cached_promise_texture,
MemoryTypeTracker* tracker,
GLenum target,
@@ -266,8 +290,8 @@ class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
/*gpu_compositing=*/true, format());
auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
gr_context, promise_texture_->backendTexture(),
- kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, nullptr,
- &surface_props);
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
+ backing()->color_space().ToSkColorSpace(), &surface_props);
write_surface_ = surface.get();
return surface;
}
@@ -282,6 +306,8 @@ class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
sk_sp<SkPromiseImageTexture> BeginReadAccess(SkSurface* sk_surface) override {
CheckContext();
+ static_cast<SharedImageBackingWithReadAccess*>(backing())
+ ->BeginReadAccess();
return promise_texture_;
}
@@ -308,25 +334,30 @@ class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
// Implementation of SharedImageBacking that creates a GL Texture and stores it
// as a gles2::Texture. Can be used with the legacy mailbox implementation.
-class SharedImageBackingGLTexture : public SharedImageBacking {
+class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
public:
SharedImageBackingGLTexture(const Mailbox& mailbox,
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
- gles2::Texture* texture)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- texture->estimated_size()),
- texture_(texture) {
+ gles2::Texture* texture,
+ const UnpackStateAttribs& attribs)
+ : SharedImageBackingWithReadAccess(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ texture->estimated_size()),
+ texture_(texture),
+ attribs_(attribs) {
DCHECK(texture_);
}
- ~SharedImageBackingGLTexture() override { DCHECK(!texture_); }
+ ~SharedImageBackingGLTexture() override {
+ DCHECK(!texture_);
+ DCHECK(!rgb_emulation_texture_);
+ }
bool IsCleared() const override {
return texture_->IsLevelCleared(texture_->target(), 0);
@@ -346,10 +377,13 @@ class SharedImageBackingGLTexture : public SharedImageBacking {
gl::GLImage* image = texture_->GetLevelImage(target, 0, &old_state);
if (!image)
return;
- image->ReleaseTexImage(target);
+ if (old_state == gles2::Texture::BOUND)
+ image->ReleaseTexImage(target);
gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND;
- if (image->BindTexImage(target))
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND &&
+ image->BindTexImage(target)) {
new_state = gles2::Texture::BOUND;
+ }
if (old_state != new_state)
texture_->SetLevelImage(target, 0, image, new_state);
}
@@ -364,6 +398,11 @@ class SharedImageBackingGLTexture : public SharedImageBacking {
DCHECK(texture_);
texture_->RemoveLightweightRef(have_context());
texture_ = nullptr;
+
+ if (rgb_emulation_texture_) {
+ rgb_emulation_texture_->RemoveLightweightRef(have_context());
+ rgb_emulation_texture_ = nullptr;
+ }
}
void OnMemoryDump(const std::string& dump_name,
@@ -386,6 +425,29 @@ class SharedImageBackingGLTexture : public SharedImageBacking {
texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name);
}
+ void BeginReadAccess() override {
+ GLenum target = texture_->target();
+ gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND;
+ gl::GLImage* image = texture_->GetLevelImage(target, 0, &old_state);
+ if (image && old_state == gpu::gles2::Texture::UNBOUND) {
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+ api->glBindTextureFn(target, texture_->service_id());
+ gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND;
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ if (image->BindTexImage(target))
+ new_state = gles2::Texture::BOUND;
+ } else {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs_,
+ /*upload=*/true);
+ if (image->CopyTexImage(target))
+ new_state = gles2::Texture::COPIED;
+ }
+ if (old_state != new_state)
+ texture_->SetLevelImage(target, 0, image, new_state);
+ }
+ }
+
protected:
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
SharedImageManager* manager,
@@ -393,6 +455,57 @@ class SharedImageBackingGLTexture : public SharedImageBacking {
return std::make_unique<SharedImageRepresentationGLTextureImpl>(
manager, this, tracker, texture_);
}
+
+ std::unique_ptr<SharedImageRepresentationGLTexture>
+ ProduceRGBEmulationGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override {
+ if (!rgb_emulation_texture_) {
+ GLenum target = texture_->target();
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+
+ bool framebuffer_attachment_angle =
+ (usage() & (SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
+ GLuint service_id = MakeTextureAndSetParameters(
+ api, target, framebuffer_attachment_angle);
+
+ gles2::Texture::ImageState image_state = gles2::Texture::BOUND;
+ gl::GLImage* image = texture_->GetLevelImage(target, 0, &image_state);
+ if (!image) {
+ LOG(ERROR) << "Texture is not bound to an image.";
+ return nullptr;
+ }
+
+ DCHECK(image->ShouldBindOrCopy() == gl::GLImage::BIND);
+ const GLenum internal_format = GL_RGB;
+ if (!image->BindTexImageWithInternalformat(target, internal_format)) {
+ LOG(ERROR) << "Failed to bind image to rgb texture.";
+ api->glDeleteTexturesFn(1, &service_id);
+ return nullptr;
+ }
+
+ rgb_emulation_texture_ = new gles2::Texture(service_id);
+ rgb_emulation_texture_->SetLightweightRef();
+ rgb_emulation_texture_->SetTarget(target, 1);
+ rgb_emulation_texture_->sampler_state_.min_filter = GL_LINEAR;
+ rgb_emulation_texture_->sampler_state_.mag_filter = GL_LINEAR;
+ rgb_emulation_texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ rgb_emulation_texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+
+ const gles2::Texture::LevelInfo* info = texture_->GetLevelInfo(target, 0);
+ rgb_emulation_texture_->SetLevelInfo(
+ target, 0, internal_format, info->width, info->height, 1, 0,
+ info->format, info->type, info->cleared_rect);
+
+ rgb_emulation_texture_->SetLevelImage(target, 0, image, image_state);
+ rgb_emulation_texture_->SetImmutable(true);
+ }
+
+ return std::make_unique<SharedImageRepresentationGLTextureImpl>(
+ manager, this, tracker, rgb_emulation_texture_);
+ }
+
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker) override {
@@ -405,13 +518,16 @@ class SharedImageBackingGLTexture : public SharedImageBacking {
private:
gles2::Texture* texture_ = nullptr;
+ gles2::Texture* rgb_emulation_texture_ = nullptr;
sk_sp<SkPromiseImageTexture> cached_promise_texture_;
+ const UnpackStateAttribs attribs_;
};
// Implementation of SharedImageBacking that creates a GL Texture and stores it
// as a gles2::TexturePassthrough. Can be used with the legacy mailbox
// implementation.
-class SharedImageBackingPassthroughGLTexture : public SharedImageBacking {
+class SharedImageBackingPassthroughGLTexture
+ : public SharedImageBackingWithReadAccess {
public:
SharedImageBackingPassthroughGLTexture(
const Mailbox& mailbox,
@@ -421,12 +537,12 @@ class SharedImageBackingPassthroughGLTexture : public SharedImageBacking {
uint32_t usage,
scoped_refptr<gles2::TexturePassthrough> passthrough_texture,
bool is_cleared)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- passthrough_texture->estimated_size()),
+ : SharedImageBackingWithReadAccess(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ passthrough_texture->estimated_size()),
texture_passthrough_(std::move(passthrough_texture)),
is_cleared_(is_cleared) {
DCHECK(texture_passthrough_);
@@ -449,7 +565,9 @@ class SharedImageBackingPassthroughGLTexture : public SharedImageBacking {
if (!image)
return;
image->ReleaseTexImage(target);
- if (!image->BindTexImage(target))
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND)
+ image->BindTexImage(target);
+ else
image->CopyTexImage(target);
}
@@ -486,6 +604,8 @@ class SharedImageBackingPassthroughGLTexture : public SharedImageBacking {
gl_image->OnMemoryDump(pmd, client_tracing_id, dump_name);
}
+ void BeginReadAccess() override {}
+
protected:
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
ProduceGLTexturePassthrough(SharedImageManager* manager,
@@ -520,10 +640,18 @@ SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
image_factory_(image_factory) {
gl::GLApi* api = gl::g_current_gl_context;
api->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &max_texture_size_);
- if (workarounds.max_texture_size) {
+ // When the passthrough command decoder is used, the max_texture_size
+ // workaround is implemented by ANGLE. Trying to adjust the max size here
+ // would cause discrepency between what we think the max size is and what
+ // ANGLE tells the clients.
+ if (!use_passthrough_ && workarounds.max_texture_size) {
max_texture_size_ =
std::min(max_texture_size_, workarounds.max_texture_size);
}
+ // Ensure max_texture_size_ is less than INT_MAX so that gfx::Rect and friends
+ // can be used to accurately represent all valid sub-rects, with overflow
+ // cases, clamped to INT_MAX, always invalid.
+ max_texture_size_ = std::min(max_texture_size_, INT_MAX - 1);
// TODO(piman): Can we extract the logic out of FeatureInfo?
scoped_refptr<gles2::FeatureInfo> feature_info =
@@ -533,12 +661,12 @@ SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
gpu_memory_buffer_formats_ =
feature_info->feature_flags().gpu_memory_buffer_formats;
texture_usage_angle_ = feature_info->feature_flags().angle_texture_usage;
- es3_capable_ = feature_info->IsES3Capable();
- desktop_gl_ = !feature_info->gl_version_info().is_es;
+ attribs.es3_capable = feature_info->IsES3Capable();
+ attribs.desktop_gl = !feature_info->gl_version_info().is_es;
// Can't use the value from feature_info, as we unconditionally enable this
// extension, and assume it can't be used if PBOs are not used (which isn't
// true for Skia used direclty against GL).
- supports_unpack_subimage_ =
+ attribs.supports_unpack_subimage =
gl::g_current_gl_driver->ext.b_GL_EXT_unpack_subimage;
bool enable_texture_storage =
feature_info->feature_flags().ext_texture_storage;
@@ -718,6 +846,8 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
image = image_factory_->CreateAnonymousImage(
size, format_info.buffer_format, gfx::BufferUsage::SCANOUT,
&is_cleared);
+ // A SCANOUT image should not require copy.
+ DCHECK(!image || image->ShouldBindOrCopy() == gl::GLImage::BIND);
if (!image || !image->BindTexImage(target)) {
LOG(ERROR) << "CreateSharedImage: Failed to create image";
api->glDeleteTexturesFn(1, &service_id);
@@ -732,16 +862,14 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
size.width(), size.height());
needs_subimage_upload = !pixel_data.empty();
} else if (format_info.is_compressed) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(
- api, es3_capable_, desktop_gl_, supports_unpack_subimage_,
- !pixel_data.empty());
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
api->glCompressedTexImage2DFn(target, 0, format_info.image_internal_format,
size.width(), size.height(), 0,
pixel_data.size(), pixel_data.data());
} else {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(
- api, es3_capable_, desktop_gl_, supports_unpack_subimage_,
- !pixel_data.empty());
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
api->glTexImage2DFn(target, 0, format_info.image_internal_format,
size.width(), size.height(), 0,
format_info.adjusted_format, format_info.gl_type,
@@ -751,19 +879,19 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
// If we are using a buffer or TexStorage API but have data to upload, do so
// now via TexSubImage2D.
if (needs_subimage_upload) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(
- api, es3_capable_, desktop_gl_, supports_unpack_subimage_,
- !pixel_data.empty());
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
format_info.adjusted_format, format_info.gl_type,
pixel_data.data());
}
- return MakeBacking(mailbox, target, service_id, image, gles2::Texture::BOUND,
- level_info_internal_format, format_info.gl_format,
- format_info.gl_type, format_info.swizzle,
+ return MakeBacking(use_passthrough_, mailbox, target, service_id, image,
+ gles2::Texture::BOUND, level_info_internal_format,
+ format_info.gl_format, format_info.gl_type,
+ format_info.swizzle,
pixel_data.empty() ? is_cleared : true, format, size,
- color_space, usage);
+ color_space, usage, attribs);
}
std::unique_ptr<SharedImageBacking>
@@ -786,15 +914,26 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
return nullptr;
}
- GLenum target = handle.type == gfx::SHARED_MEMORY_BUFFER
- ? GL_TEXTURE_2D
- : gpu::GetPlatformSpecificTextureTarget();
+ GLenum target =
+ (handle.type == gfx::SHARED_MEMORY_BUFFER ||
+ !NativeBufferNeedsPlatformSpecificTextureTarget(buffer_format))
+ ? GL_TEXTURE_2D
+ : gpu::GetPlatformSpecificTextureTarget();
scoped_refptr<gl::GLImage> image = MakeGLImage(
client_id, std::move(handle), buffer_format, surface_handle, size);
if (!image) {
LOG(ERROR) << "Failed to create image.";
return nullptr;
}
+ // If we decide to use GL_TEXTURE_2D at the target for a native buffer, we
+ // would like to verify that it will actually work. If the image expects to be
+ // copied, there is no way to do this verification here, because copying is
+ // done lazily after the SharedImage is created, so require that the image is
+ // bindable. Currently NativeBufferNeedsPlatformSpecificTextureTarget can
+ // only return false on Chrome OS where GLImageNativePixmap is used which is
+ // always bindable.
+ DCHECK(handle.type == gfx::SHARED_MEMORY_BUFFER || target != GL_TEXTURE_2D ||
+ image->ShouldBindOrCopy() == gl::GLImage::BIND);
if (color_space.IsValid())
image->SetColorSpace(color_space);
@@ -808,25 +947,53 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
GLuint service_id = MakeTextureAndSetParameters(
api, target, for_framebuffer_attachment && texture_usage_angle_);
+ bool is_rgb_emulation = usage & SHARED_IMAGE_USAGE_RGB_EMULATION;
- // TODO(piman): RGB emulation
gles2::Texture::ImageState image_state = gles2::Texture::UNBOUND;
- if (image->BindTexImage(target)) {
- image_state = gles2::Texture::BOUND;
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ bool is_bound = false;
+ if (is_rgb_emulation)
+ is_bound = image->BindTexImageWithInternalformat(target, GL_RGB);
+ else
+ is_bound = image->BindTexImage(target);
+ if (is_bound) {
+ image_state = gles2::Texture::BOUND;
+ } else {
+ LOG(ERROR) << "Failed to bind image to target.";
+ api->glDeleteTexturesFn(1, &service_id);
+ return nullptr;
+ }
} else if (use_passthrough_) {
image->CopyTexImage(target);
image_state = gles2::Texture::COPIED;
}
- GLuint internal_format = image->GetInternalFormat();
+ GLuint internal_format =
+ is_rgb_emulation ? GL_RGB : image->GetInternalFormat();
GLenum gl_format =
gles2::TextureManager::ExtractFormatFromStorageFormat(internal_format);
GLenum gl_type =
gles2::TextureManager::ExtractTypeFromStorageFormat(internal_format);
- return MakeBacking(mailbox, target, service_id, image, image_state,
- internal_format, gl_format, gl_type, nullptr, true, format,
- size, color_space, usage);
+ return MakeBacking(use_passthrough_, mailbox, target, service_id, image,
+ image_state, internal_format, gl_format, gl_type, nullptr,
+ true, format, size, color_space, usage, attribs);
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryGLTexture::CreateSharedImageForTest(
+ const Mailbox& mailbox,
+ GLenum target,
+ GLuint service_id,
+ bool is_cleared,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ uint32_t usage) {
+ return MakeBacking(false, mailbox, target, service_id, nullptr,
+ gles2::Texture::UNBOUND, viz::GLInternalFormat(format),
+ viz::GLDataFormat(format), viz::GLDataType(format),
+ nullptr, is_cleared, format, size, gfx::ColorSpace(),
+ usage, UnpackStateAttribs());
}
scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLTexture::MakeGLImage(
@@ -856,6 +1023,7 @@ scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLTexture::MakeGLImage(
std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryGLTexture::MakeBacking(
+ bool passthrough,
const Mailbox& mailbox,
GLenum target,
GLuint service_id,
@@ -869,8 +1037,9 @@ SharedImageBackingFactoryGLTexture::MakeBacking(
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
- uint32_t usage) {
- if (use_passthrough_) {
+ uint32_t usage,
+ const UnpackStateAttribs& attribs) {
+ if (passthrough) {
scoped_refptr<gles2::TexturePassthrough> passthrough_texture =
base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
if (image)
@@ -904,7 +1073,7 @@ SharedImageBackingFactoryGLTexture::MakeBacking(
texture->SetImmutable(true);
return std::make_unique<SharedImageBackingGLTexture>(
- mailbox, format, size, color_space, usage, texture);
+ mailbox, format, size, color_space, usage, texture, attribs);
}
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
index 5705b92bf1b..43c609f2256 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
@@ -35,6 +35,12 @@ class ImageFactory;
class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
: public SharedImageBackingFactory {
public:
+ struct UnpackStateAttribs {
+ bool es3_capable = false;
+ bool desktop_gl = false;
+ bool supports_unpack_subimage = false;
+ };
+
SharedImageBackingFactoryGLTexture(const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
const GpuFeatureInfo& gpu_feature_info,
@@ -65,13 +71,23 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
const gfx::ColorSpace& color_space,
uint32_t usage) override;
+ static std::unique_ptr<SharedImageBacking> CreateSharedImageForTest(
+ const Mailbox& mailbox,
+ GLenum target,
+ GLuint service_id,
+ bool is_cleared,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ uint32_t usage);
+
private:
scoped_refptr<gl::GLImage> MakeGLImage(int client_id,
gfx::GpuMemoryBufferHandle handle,
gfx::BufferFormat format,
SurfaceHandle surface_handle,
const gfx::Size& size);
- std::unique_ptr<SharedImageBacking> MakeBacking(
+ static std::unique_ptr<SharedImageBacking> MakeBacking(
+ bool passthrough,
const Mailbox& mailbox,
GLenum target,
GLuint service_id,
@@ -85,7 +101,9 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
- uint32_t usage);
+ uint32_t usage,
+ const UnpackStateAttribs& attribs);
+
struct FormatInfo {
FormatInfo();
~FormatInfo();
@@ -135,9 +153,7 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
GpuMemoryBufferFormatSet gpu_memory_buffer_formats_;
int32_t max_texture_size_ = 0;
bool texture_usage_angle_ = false;
- bool es3_capable_ = false;
- bool desktop_gl_ = false;
- bool supports_unpack_subimage_ = false;
+ UnpackStateAttribs attribs;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
index a1ee348f004..ce49f5d2c03 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
@@ -248,6 +248,17 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
EXPECT_EQ(color_space, gl_representation->color_space());
EXPECT_EQ(usage, gl_representation->usage());
gl_representation.reset();
+
+ auto gl_representation_rgb =
+ shared_image_representation_factory_->ProduceRGBEmulationGLTexture(
+ mailbox);
+ EXPECT_TRUE(gl_representation_rgb);
+ EXPECT_TRUE(gl_representation_rgb->GetTexture()->service_id());
+ EXPECT_EQ(size, gl_representation_rgb->size());
+ EXPECT_EQ(format, gl_representation_rgb->format());
+ EXPECT_EQ(color_space, gl_representation_rgb->color_space());
+ EXPECT_EQ(usage, gl_representation_rgb->usage());
+ gl_representation_rgb.reset();
}
// Next, validate a SharedImageRepresentationGLTexturePassthrough.
@@ -288,7 +299,8 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
shared_image.reset();
EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
- if (!use_passthrough()) {
+ if (!use_passthrough() &&
+ context_state_->feature_info()->feature_flags().ext_texture_rg) {
// Create a R-8 image texture, and check that the internal_format is that of
// the image (GL_RGBA for TextureImageFactory). This only matters for the
// validating decoder.
@@ -526,18 +538,31 @@ class StubImage : public gl::GLImageStub {
return InternalFormatForGpuMemoryBufferFormat(format_);
}
+ BindOrCopy ShouldBindOrCopy() override { return BIND; }
+
bool BindTexImage(unsigned target) override {
if (!bound_) {
bound_ = true;
++update_counter_;
}
return true;
- };
+ }
+
+ bool BindTexImageWithInternalformat(unsigned target,
+ unsigned internal_format) override {
+ internal_format_ = internal_format;
+ if (!bound_) {
+ bound_ = true;
+ ++update_counter_;
+ }
+ return true;
+ }
void ReleaseTexImage(unsigned target) override { bound_ = false; }
bool bound() const { return bound_; }
int update_counter() const { return update_counter_; }
+ unsigned internal_format() const { return internal_format_; }
private:
~StubImage() override = default;
@@ -546,6 +571,7 @@ class StubImage : public gl::GLImageStub {
gfx::BufferFormat format_;
bool bound_ = false;
int update_counter_ = 0;
+ unsigned internal_format_ = GL_RGBA;
};
class SharedImageBackingFactoryGLTextureWithGMBTest
@@ -664,12 +690,52 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
EXPECT_EQ(format, shm_image->format());
}
-INSTANTIATE_TEST_CASE_P(Service,
- SharedImageBackingFactoryGLTextureTest,
- ::testing::Bool());
-INSTANTIATE_TEST_CASE_P(Service,
- SharedImageBackingFactoryGLTextureWithGMBTest,
- ::testing::Bool());
+TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
+ GpuMemoryBufferImportNative_WithRGBEmulation) {
+ if (use_passthrough())
+ return;
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ gfx::Size size(256, 256);
+ gfx::BufferFormat format = gfx::BufferFormat::RGBA_8888;
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
+
+ gfx::GpuMemoryBufferHandle handle;
+ handle.type = gfx::NATIVE_PIXMAP;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, kClientId, std::move(handle), format, kNullSurfaceHandle, size,
+ color_space, usage);
+ ASSERT_TRUE(backing);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ auto representation =
+ shared_image_representation_factory_->ProduceRGBEmulationGLTexture(
+ mailbox);
+ EXPECT_TRUE(representation);
+ EXPECT_TRUE(representation->GetTexture()->service_id());
+ EXPECT_EQ(size, representation->size());
+ EXPECT_EQ(viz::ResourceFormat::RGBA_8888, representation->format());
+ EXPECT_EQ(color_space, representation->color_space());
+ EXPECT_EQ(usage, representation->usage());
+
+ scoped_refptr<gl::GLImage> image =
+ representation->GetTexture()->GetLevelImage(GL_TEXTURE_2D, 0);
+ ASSERT_EQ(image->GetType(), gl::GLImage::Type::NONE);
+ auto* stub_image = static_cast<StubImage*>(image.get());
+ EXPECT_EQ(stub_image->internal_format(), (unsigned)GL_RGB);
+ EXPECT_TRUE(stub_image->bound());
+ EXPECT_EQ(stub_image->update_counter(), 1);
+}
+
+INSTANTIATE_TEST_SUITE_P(Service,
+ SharedImageBackingFactoryGLTextureTest,
+ ::testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Service,
+ SharedImageBackingFactoryGLTextureWithGMBTest,
+ ::testing::Bool());
} // anonymous namespace
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
new file mode 100644
index 00000000000..003f059ad88
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
@@ -0,0 +1,68 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_IOSURFACE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_IOSURFACE_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/gpu_gles2_export.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gfx {
+class Size;
+class ColorSpace;
+} // namespace gfx
+
+namespace gpu {
+class GpuDriverBugWorkarounds;
+struct GpuFeatureInfo;
+struct Mailbox;
+class SharedImageBacking;
+
+// Implementation of SharedImageBackingFactory that produce IOSurface backed
+// SharedImages. This is meant to be used on macOS only.
+class GPU_GLES2_EXPORT SharedImageBackingFactoryIOSurface
+ : public SharedImageBackingFactory {
+ public:
+ SharedImageBackingFactoryIOSurface(const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info);
+ ~SharedImageBackingFactoryIOSurface() override;
+
+ // SharedImageBackingFactory implementation.
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+
+ private:
+ bool format_supported_by_gl_[viz::RESOURCE_FORMAT_MAX + 1];
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageBackingFactoryIOSurface);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_IOSURFACE_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
new file mode 100644
index 00000000000..c02c4abd62e
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
@@ -0,0 +1,411 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h"
+
+#include "base/mac/scoped_cftyperef.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "components/viz/common/resources/resource_sizes.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/skia_utils.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "ui/gfx/mac/io_surface.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image_io_surface.h"
+
+namespace gpu {
+
+namespace {
+
+struct GLFormatInfo {
+ bool supported = false;
+
+ // GL internal_format/format/type triplet.
+ GLuint internal_format = 0;
+ GLenum format = 0;
+ GLenum type = 0;
+};
+
+// Get GL format triplets and modify them to match the logic in
+// gl_image_iosurface.mm
+GLFormatInfo GetGLFormatInfo(viz::ResourceFormat format) {
+ GLFormatInfo info = {
+ true,
+ viz::GLInternalFormat(format),
+ viz::GLDataFormat(format),
+ viz::GLDataType(format),
+ };
+
+ if (info.internal_format == GL_ZERO || info.format == GL_ZERO ||
+ info.type == GL_ZERO) {
+ return {false, GL_ZERO, GL_ZERO, GL_ZERO};
+ }
+
+ switch (format) {
+ case viz::BGRA_8888:
+ info.format = GL_RGBA;
+ info.internal_format = GL_RGBA;
+ break;
+
+ // Technically we should use GL_RGB but CGLTexImageIOSurface2D() (and
+ // OpenGL ES 3.0, for the case) support only GL_RGBA (the hardware ignores
+ // the alpha channel anyway), see https://crbug.com/797347.
+ case viz::BGRX_1010102:
+ info.format = GL_RGBA;
+ info.internal_format = GL_RGBA;
+ break;
+
+ default:
+ break;
+ }
+
+ return info;
+}
+
+void FlushIOSurfaceGLOperations() {
+ // The CGLTexImageIOSurface2D documentation says that we need to call
+ // glFlush, otherwise there is the risk of a race between different
+ // graphics contexts.
+ gl::GLApi* api = gl::g_current_gl_context;
+ api->glFlushFn();
+}
+
+} // anonymous namespace
+
+// Representation of a SharedImageBackingIOSurface as a GL Texture.
+class SharedImageRepresentationGLTextureIOSurface
+ : public SharedImageRepresentationGLTexture {
+ public:
+ SharedImageRepresentationGLTextureIOSurface(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture)
+ : SharedImageRepresentationGLTexture(manager, backing, tracker),
+ texture_(texture) {
+ DCHECK(texture_);
+ }
+
+ ~SharedImageRepresentationGLTextureIOSurface() override {
+ texture_->RemoveLightweightRef(has_context());
+ }
+
+ gles2::Texture* GetTexture() override { return texture_; }
+
+ bool BeginAccess(GLenum mode) override { return true; }
+
+ void EndAccess() override { FlushIOSurfaceGLOperations(); }
+
+ private:
+ gles2::Texture* texture_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLTextureIOSurface);
+};
+
+// Representation of a SharedImageBackingIOSurface as a Skia Texture.
+class SharedImageRepresentationSkiaIOSurface
+ : public SharedImageRepresentationSkia {
+ public:
+ SharedImageRepresentationSkiaIOSurface(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ sk_sp<SkPromiseImageTexture> promise_texture,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture)
+ : SharedImageRepresentationSkia(manager, backing, tracker),
+ promise_texture_(std::move(promise_texture)),
+ texture_(texture) {
+ DCHECK(texture_);
+ DCHECK(promise_texture_);
+ }
+
+ ~SharedImageRepresentationSkiaIOSurface() override {
+ texture_->RemoveLightweightRef(has_context());
+ }
+
+ sk_sp<SkSurface> BeginWriteAccess(
+ GrContext* gr_context,
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props) override {
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format());
+
+ return SkSurface::MakeFromBackendTextureAsRenderTarget(
+ gr_context, promise_texture_->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
+ backing()->color_space().ToSkColorSpace(), &surface_props);
+ }
+
+ void EndWriteAccess(sk_sp<SkSurface> surface) override {
+ FlushIOSurfaceGLOperations();
+
+ if (texture_->IsLevelCleared(texture_->target(), 0)) {
+ backing()->SetCleared();
+ }
+ }
+
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(SkSurface* sk_surface) override {
+ return promise_texture_;
+ }
+
+ void EndReadAccess() override { FlushIOSurfaceGLOperations(); }
+
+ private:
+ sk_sp<SkPromiseImageTexture> promise_texture_;
+ gles2::Texture* texture_;
+};
+
+// Implementation of SharedImageBacking by wrapping IOSurfaces
+class SharedImageBackingIOSurface : public SharedImageBacking {
+ public:
+ SharedImageBackingIOSurface(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
+ size_t estimated_size)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size),
+ io_surface_(std::move(io_surface)) {
+ DCHECK(io_surface_);
+ }
+ ~SharedImageBackingIOSurface() final { DCHECK(!io_surface_); }
+
+ bool IsCleared() const final { return is_cleared_; }
+ void SetCleared() final {
+ if (legacy_texture_) {
+ legacy_texture_->SetLevelCleared(legacy_texture_->target(), 0, true);
+ }
+
+ is_cleared_ = true;
+ }
+
+ void Update() final {}
+
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final {
+ DCHECK(io_surface_);
+
+ legacy_texture_ = GenGLTexture();
+ if (!legacy_texture_) {
+ return false;
+ }
+
+ mailbox_manager->ProduceTexture(mailbox(), legacy_texture_);
+ return true;
+ }
+ void Destroy() final {
+ DCHECK(io_surface_);
+
+ if (legacy_texture_) {
+ legacy_texture_->RemoveLightweightRef(have_context());
+ legacy_texture_ = nullptr;
+ }
+
+ io_surface_.reset();
+ }
+
+ protected:
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final {
+ gles2::Texture* texture = GenGLTexture();
+ if (!texture) {
+ return nullptr;
+ }
+
+ return std::make_unique<SharedImageRepresentationGLTextureIOSurface>(
+ manager, this, tracker, texture);
+ }
+
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override {
+ gles2::Texture* texture = GenGLTexture();
+ if (!texture) {
+ return nullptr;
+ }
+
+ GrBackendTexture backend_texture;
+ GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(),
+ texture->target(), size(), texture->service_id(),
+ format(), &backend_texture);
+ sk_sp<SkPromiseImageTexture> promise_texture =
+ SkPromiseImageTexture::Make(backend_texture);
+ return std::make_unique<SharedImageRepresentationSkiaIOSurface>(
+ manager, this, promise_texture, tracker, texture);
+ }
+
+ private:
+ gles2::Texture* GenGLTexture() {
+ GLFormatInfo gl_info = GetGLFormatInfo(format());
+ DCHECK(gl_info.supported);
+
+ // Wrap the IOSurface in a GLImageIOSurface
+ scoped_refptr<gl::GLImageIOSurface> image(
+ gl::GLImageIOSurface::Create(size(), gl_info.internal_format));
+ if (!image->Initialize(io_surface_, gfx::GenericSharedMemoryId(),
+ viz::BufferFormat(format()))) {
+ LOG(ERROR) << "Failed to create GLImageIOSurface";
+ return nullptr;
+ }
+
+ gl::GLApi* api = gl::g_current_gl_context;
+
+ // Save the currently bound rectangle texture to reset it once we are done.
+ GLint old_texture_binding = 0;
+ api->glGetIntegervFn(GL_TEXTURE_BINDING_RECTANGLE, &old_texture_binding);
+
+ // Create a gles2 rectangle texture to bind to the IOSurface.
+ GLuint service_id = 0;
+ api->glGenTexturesFn(1, &service_id);
+ api->glBindTextureFn(GL_TEXTURE_RECTANGLE, service_id);
+ api->glTexParameteriFn(GL_TEXTURE_RECTANGLE, GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR);
+ api->glTexParameteriFn(GL_TEXTURE_RECTANGLE, GL_TEXTURE_MAG_FILTER,
+ GL_LINEAR);
+ api->glTexParameteriFn(GL_TEXTURE_RECTANGLE, GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(GL_TEXTURE_RECTANGLE, GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE);
+
+ // Bind the GLImageIOSurface to our texture
+ if (!image->BindTexImage(GL_TEXTURE_RECTANGLE)) {
+ LOG(ERROR) << "Failed to bind GLImageIOSurface";
+ api->glBindTextureFn(GL_TEXTURE_RECTANGLE, old_texture_binding);
+ api->glDeleteTexturesFn(1, &service_id);
+ return nullptr;
+ }
+
+ // If the backing is already cleared, no need to clear it again.
+ gfx::Rect cleared_rect;
+ if (is_cleared_) {
+ cleared_rect = gfx::Rect(size());
+ }
+
+ // Manually create a gles2::Texture wrapping our driver texture.
+ gles2::Texture* texture = new gles2::Texture(service_id);
+ texture->SetLightweightRef();
+ texture->SetTarget(GL_TEXTURE_RECTANGLE, 1);
+ texture->sampler_state_.min_filter = GL_LINEAR;
+ texture->sampler_state_.mag_filter = GL_LINEAR;
+ texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture->SetLevelInfo(GL_TEXTURE_RECTANGLE, 0, gl_info.internal_format,
+ size().width(), size().height(), 1, 0, gl_info.format,
+ gl_info.type, cleared_rect);
+ texture->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, image.get(),
+ gles2::Texture::BOUND);
+ texture->SetImmutable(true);
+
+ DCHECK_EQ(image->GetInternalFormat(), gl_info.format);
+
+ api->glBindTextureFn(GL_TEXTURE_RECTANGLE, old_texture_binding);
+ return texture;
+ }
+
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface_;
+ bool is_cleared_ = false;
+
+ // A texture for the associated legacy mailbox.
+ gles2::Texture* legacy_texture_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageBackingIOSurface);
+};
+
+// Implementation of SharedImageBackingFactoryIOSurface that creates
+// SharedImageBackings wrapping IOSurfaces.
+SharedImageBackingFactoryIOSurface::SharedImageBackingFactoryIOSurface(
+ const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info) {
+ scoped_refptr<gles2::FeatureInfo> feature_info =
+ new gles2::FeatureInfo(workarounds, gpu_feature_info);
+ feature_info->Initialize(ContextType::CONTEXT_TYPE_OPENGLES2, false,
+ gles2::DisallowedFeatures());
+ const gles2::Validators* validators = feature_info->validators();
+
+ // Precompute for each format if we can use it with GL.
+ for (int i = 0; i <= viz::RESOURCE_FORMAT_MAX; ++i) {
+ viz::ResourceFormat format = static_cast<viz::ResourceFormat>(i);
+ GLFormatInfo gl_info = GetGLFormatInfo(format);
+
+ format_supported_by_gl_[i] =
+ gl_info.supported &&
+ validators->texture_internal_format.IsValid(gl_info.internal_format) &&
+ validators->texture_format.IsValid(gl_info.format) &&
+ validators->pixel_type.IsValid(gl_info.type);
+ }
+}
+
+SharedImageBackingFactoryIOSurface::~SharedImageBackingFactoryIOSurface() =
+ default;
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryIOSurface::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ // Check the format is supported and for simplicity always require it to be
+ // supported for GL.
+ if (!format_supported_by_gl_[format]) {
+ LOG(ERROR) << "viz::ResourceFormat " << format
+ << " not supported by IOSurfaces";
+ return nullptr;
+ }
+
+ // Calculate SharedImage size in bytes.
+ size_t estimated_size;
+ if (!viz::ResourceSizes::MaybeSizeInBytes(size, format, &estimated_size)) {
+ LOG(ERROR) << "Failed to calculate SharedImage size";
+ return nullptr;
+ }
+
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
+ gfx::CreateIOSurface(size, viz::BufferFormat(format), false));
+ if (!io_surface) {
+ LOG(ERROR) << "Failed to allocate IOSurface.";
+ return nullptr;
+ }
+
+ gfx::IOSurfaceSetColorSpace(io_surface, color_space);
+
+ return std::make_unique<SharedImageBackingIOSurface>(
+ mailbox, format, size, color_space, usage, std::move(io_surface),
+ estimated_size);
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryIOSurface::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ NOTIMPLEMENTED();
+ return nullptr;
+}
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryIOSurface::CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ NOTIMPLEMENTED();
+ return nullptr;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
new file mode 100644
index 00000000000..aeb672a6315
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
@@ -0,0 +1,237 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/bind_helpers.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/mailbox_manager_impl.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/config/gpu_feature_info.h"
+#include "gpu/config/gpu_preferences.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/skia/include/core/SkImage.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/init/gl_factory.h"
+
+namespace gpu {
+namespace {
+
+class SharedImageBackingFactoryIOSurfaceTest : public testing::Test {
+ public:
+ void SetUp() override {
+ surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
+ ASSERT_TRUE(surface_);
+ context_ = gl::init::CreateGLContext(nullptr, surface_.get(),
+ gl::GLContextAttribs());
+ ASSERT_TRUE(context_);
+ bool result = context_->MakeCurrent(surface_.get());
+ ASSERT_TRUE(result);
+
+ GpuDriverBugWorkarounds workarounds;
+ scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
+ context_state_ = base::MakeRefCounted<SharedContextState>(
+ std::move(share_group), surface_, context_,
+ false /* use_virtualized_gl_contexts */, base::DoNothing());
+ context_state_->InitializeGrContext(workarounds, nullptr);
+ auto feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
+ context_state_->InitializeGL(GpuPreferences(), std::move(feature_info));
+
+ backing_factory_ = std::make_unique<SharedImageBackingFactoryIOSurface>(
+ workarounds, GpuFeatureInfo());
+
+ memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
+ shared_image_representation_factory_ =
+ std::make_unique<SharedImageRepresentationFactory>(
+ &shared_image_manager_, nullptr);
+ }
+
+ GrContext* gr_context() { return context_state_->gr_context(); }
+
+ protected:
+ scoped_refptr<gl::GLSurface> surface_;
+ scoped_refptr<gl::GLContext> context_;
+ scoped_refptr<SharedContextState> context_state_;
+ std::unique_ptr<SharedImageBackingFactoryIOSurface> backing_factory_;
+ gles2::MailboxManagerImpl mailbox_manager_;
+ SharedImageManager shared_image_manager_;
+ std::unique_ptr<MemoryTypeTracker> memory_type_tracker_;
+ std::unique_ptr<SharedImageRepresentationFactory>
+ shared_image_representation_factory_;
+};
+
+// Basic test to check creation and deletion of IOSurface backed shared image.
+TEST_F(SharedImageBackingFactoryIOSurfaceTest, Basic) {
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
+ viz::ResourceFormat format = viz::ResourceFormat::RGBA_8888;
+ gfx::Size size(256, 256);
+ gfx::ColorSpace color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY;
+
+ auto backing = backing_factory_->CreateSharedImage(mailbox, format, size,
+ color_space, usage);
+ EXPECT_TRUE(backing);
+
+ // Check clearing.
+ if (!backing->IsCleared()) {
+ backing->SetCleared();
+ EXPECT_TRUE(backing->IsCleared());
+ }
+
+ // First, validate via a legacy mailbox.
+ GLenum expected_target = GL_TEXTURE_RECTANGLE;
+ EXPECT_TRUE(backing->ProduceLegacyMailbox(&mailbox_manager_));
+ TextureBase* texture_base = mailbox_manager_.ConsumeTexture(mailbox);
+
+ // Currently there is no support for passthrough texture on Mac and hence
+ // in IOSurface backing. So the TextureBase* should be pointing to a Texture
+ // object.
+ auto* texture = gles2::Texture::CheckedCast(texture_base);
+ ASSERT_TRUE(texture);
+ EXPECT_EQ(texture->target(), expected_target);
+ EXPECT_TRUE(texture->IsImmutable());
+ int width, height, depth;
+ bool has_level =
+ texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height, &depth);
+ EXPECT_TRUE(has_level);
+ EXPECT_EQ(width, size.width());
+ EXPECT_EQ(height, size.height());
+
+ // Next validate via a SharedImageRepresentationGLTexture.
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexture()->service_id());
+ EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+
+ // Finally, validate a SharedImageRepresentationSkia.
+ auto skia_representation =
+ shared_image_representation_factory_->ProduceSkia(mailbox);
+ EXPECT_TRUE(skia_representation);
+ auto surface = skia_representation->BeginWriteAccess(
+ gr_context(), 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ EXPECT_TRUE(surface);
+ EXPECT_EQ(size.width(), surface->width());
+ EXPECT_EQ(size.height(), surface->height());
+ skia_representation->EndWriteAccess(std::move(surface));
+ auto promise_texture = skia_representation->BeginReadAccess(nullptr);
+ EXPECT_TRUE(promise_texture);
+ if (promise_texture) {
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+ }
+ skia_representation->EndReadAccess();
+ skia_representation.reset();
+
+ factory_ref.reset();
+ EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+}
+
+// Test to check interaction between Gl and skia GL representations.
+// We write to a GL texture using gl representation and then read from skia
+// representation.
+TEST_F(SharedImageBackingFactoryIOSurfaceTest, GLSkiaGL) {
+ // Create a backing using mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto format = viz::ResourceFormat::RGBA_8888;
+ gfx::Size size(1, 1);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY;
+ auto backing = backing_factory_->CreateSharedImage(mailbox, format, size,
+ color_space, usage);
+ EXPECT_TRUE(backing);
+
+ GLenum expected_target = GL_TEXTURE_RECTANGLE;
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ // Create a SharedImageRepresentationGLTexture.
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
+
+ // Create an FBO.
+ GLuint fbo = 0;
+ gl::GLApi* api = gl::g_current_gl_context;
+ api->glGenFramebuffersEXTFn(1, &fbo);
+ api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
+
+ // Attach the texture to FBO.
+ api->glFramebufferTexture2DEXTFn(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ gl_representation->GetTexture()->target(),
+ gl_representation->GetTexture()->service_id(), 0);
+
+ // Set the clear color to green.
+ api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
+ api->glClearFn(GL_COLOR_BUFFER_BIT);
+ gl_representation.reset();
+
+ // Next create a SharedImageRepresentationSkia to read back the texture data.
+ auto skia_representation =
+ shared_image_representation_factory_->ProduceSkia(mailbox);
+ EXPECT_TRUE(skia_representation);
+ auto promise_texture = skia_representation->BeginReadAccess(nullptr);
+ EXPECT_TRUE(promise_texture);
+ if (promise_texture) {
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+ }
+
+ // Create an Sk Image from GrBackendTexture.
+ auto sk_image = SkImage::MakeFromTexture(
+ gr_context(), promise_texture->backendTexture(), kTopLeft_GrSurfaceOrigin,
+ kRGBA_8888_SkColorType, kOpaque_SkAlphaType, nullptr);
+
+ SkImageInfo dst_info =
+ SkImageInfo::Make(size.width(), size.height(), kRGBA_8888_SkColorType,
+ kOpaque_SkAlphaType, nullptr);
+
+ const int num_pixels = size.width() * size.height();
+ std::unique_ptr<uint8_t[]> dst_pixels(new uint8_t[num_pixels * 4]());
+
+ // Read back pixels from Sk Image.
+ EXPECT_TRUE(sk_image->readPixels(dst_info, dst_pixels.get(),
+ dst_info.minRowBytes(), 0, 0));
+ skia_representation->EndReadAccess();
+
+ // Compare the pixel values.
+ EXPECT_EQ(dst_pixels[0], 0);
+ EXPECT_EQ(dst_pixels[1], 255);
+ EXPECT_EQ(dst_pixels[2], 0);
+ EXPECT_EQ(dst_pixels[3], 255);
+
+ skia_representation.reset();
+ factory_ref.reset();
+ EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+}
+
+} // anonymous namespace
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc
index e848d6c53e1..c53291ad460 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc
@@ -8,6 +8,7 @@
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
+#include "build/build_config.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
@@ -25,6 +26,14 @@
#include "gpu/config/gpu_preferences.h"
#include "ui/gl/trace_util.h"
+#if defined(USE_X11) && BUILDFLAG(ENABLE_VULKAN)
+#include "gpu/command_buffer/service/external_vk_image_factory.h"
+#elif defined(OS_ANDROID) && BUILDFLAG(ENABLE_VULKAN)
+#include "gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h"
+#elif defined(OS_MACOSX)
+#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h"
+#endif
+
namespace gpu {
// Overrides for flat_set lookups:
bool operator<(
@@ -62,10 +71,25 @@ SharedImageFactory::SharedImageFactory(
workarounds,
gpu_feature_info,
image_factory)),
+#if defined(USE_X11) && BUILDFLAG(ENABLE_VULKAN)
+ interop_backing_factory_(
+ std::make_unique<ExternalVkImageFactory>(context_state)),
+#elif defined(OS_ANDROID) && BUILDFLAG(ENABLE_VULKAN)
+ interop_backing_factory_(
+ std::make_unique<SharedImageBackingFactoryAHB>(workarounds,
+ gpu_feature_info,
+ context_state)),
+#elif defined(OS_MACOSX)
+ interop_backing_factory_(
+ std::make_unique<SharedImageBackingFactoryIOSurface>(
+ workarounds,
+ gpu_feature_info)),
+#endif
wrapped_sk_image_factory_(
gpu_preferences.enable_raster_to_sk_image
? std::make_unique<raster::WrappedSkImageFactory>(context_state)
- : nullptr) {}
+ : nullptr) {
+}
SharedImageFactory::~SharedImageFactory() {
DCHECK(shared_images_.empty());
@@ -76,18 +100,47 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
+ if (using_vulkan_ && (usage & SHARED_IMAGE_USAGE_GLES2) &&
+ (usage & SHARED_IMAGE_USAGE_OOP_RASTERIZATION)) {
+ // TODO(crbug.com/932214): The interop backings don't currently support
+ // Vulkan writes so they cannot be used for OOP-R.
+ LOG(ERROR) << "Bad SharedImage usage combination: "
+ << "SHARED_IMAGE_USAGE_GLES2 | "
+ << "SHARED_IMAGE_USAGE_OOP_RASTERIZATION";
+ return false;
+ }
std::unique_ptr<SharedImageBacking> backing;
bool using_wrapped_sk_image = wrapped_sk_image_factory_ &&
(usage & SHARED_IMAGE_USAGE_OOP_RASTERIZATION);
+ // If |shared_image_manager_| is thread safe, it means the display is running
+ // on a separate thread (which uses a separate GL context or VkDeviceQueue).
+ bool share_between_threads = shared_image_manager_->is_thread_safe() &&
+ (usage & SHARED_IMAGE_USAGE_DISPLAY);
+ bool share_between_gl_vulkan = using_vulkan_ &&
+ (usage & SHARED_IMAGE_USAGE_GLES2) &&
+ (usage & SHARED_IMAGE_USAGE_DISPLAY);
+ bool using_interop_factory = share_between_threads || share_between_gl_vulkan;
+ // TODO(penghuang): make sure all shared image are created with correct usage.
+ // https://crbug.com/937480
+ // using_interop_factory = shared_image_manager_->is_thread_safe();
if (using_wrapped_sk_image) {
backing = wrapped_sk_image_factory_->CreateSharedImage(
mailbox, format, size, color_space, usage);
+ } else if (using_interop_factory) {
+ if (!interop_backing_factory_) {
+ LOG(ERROR) << "Unable to create SharedImage backing: GL / Vulkan "
+ << "interoperability is not supported on this platform";
+ return false;
+ }
+ backing = interop_backing_factory_->CreateSharedImage(mailbox, format, size,
+ color_space, usage);
} else {
backing = backing_factory_->CreateSharedImage(mailbox, format, size,
color_space, usage);
}
- return RegisterBacking(std::move(backing), !using_wrapped_sk_image);
+ return RegisterBacking(std::move(backing),
+ !using_wrapped_sk_image && !using_interop_factory);
}
bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
@@ -214,6 +267,12 @@ SharedImageRepresentationFactory::ProduceGLTexture(const Mailbox& mailbox) {
return manager_->ProduceGLTexture(mailbox, tracker_.get());
}
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageRepresentationFactory::ProduceRGBEmulationGLTexture(
+ const Mailbox& mailbox) {
+ return manager_->ProduceRGBEmulationGLTexture(mailbox, tracker_.get());
+}
+
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
SharedImageRepresentationFactory::ProduceGLTexturePassthrough(
const Mailbox& mailbox) {
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h
index 8968052f892..ed7b17b75b2 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.h
@@ -92,6 +92,8 @@ class GPU_GLES2_EXPORT SharedImageFactory {
// eventually.
std::unique_ptr<SharedImageBackingFactory> backing_factory_;
+ std::unique_ptr<SharedImageBackingFactory> interop_backing_factory_;
+
// Non-null if gpu_preferences.enable_raster_to_sk_image.
std::unique_ptr<raster::WrappedSkImageFactory> wrapped_sk_image_factory_;
};
@@ -106,6 +108,8 @@ class GPU_GLES2_EXPORT SharedImageRepresentationFactory {
// MemoryTypeTracker.
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
const Mailbox& mailbox);
+ std::unique_ptr<SharedImageRepresentationGLTexture>
+ ProduceRGBEmulationGLTexture(const Mailbox& mailbox);
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
ProduceGLTexturePassthrough(const Mailbox& mailbox);
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc
index d9abf9b9d75..5bb02bb3e4b 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc
@@ -3,18 +3,30 @@
// found in the LICENSE file.
#include "gpu/command_buffer/service/shared_image_manager.h"
-#include "gpu/command_buffer/service/shared_image_representation.h"
#include <inttypes.h>
+#include <utility>
+
#include "base/logging.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
#include "ui/gl/trace_util.h"
+#if DCHECK_IS_ON()
+#define CALLED_ON_VALID_THREAD() \
+ do { \
+ if (!this->is_thread_safe()) \
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); \
+ } while (false)
+#else
+#define CALLED_ON_VALID_THREAD()
+#endif
+
namespace gpu {
// Overrides for flat_set lookups:
bool operator<(const std::unique_ptr<SharedImageBacking>& lhs,
@@ -32,42 +44,75 @@ bool operator<(const std::unique_ptr<SharedImageBacking>& lhs,
return lhs->mailbox() < rhs;
}
-SharedImageManager::SharedImageManager() = default;
+class SharedImageManager::AutoLock {
+ public:
+ explicit AutoLock(SharedImageManager* manager) {
+ if (manager->is_thread_safe())
+ auto_lock_.emplace(manager->lock_.value());
+ }
+ ~AutoLock() = default;
+
+ private:
+ base::Optional<base::AutoLock> auto_lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(AutoLock);
+};
+
+SharedImageManager::SharedImageManager(bool thread_safe) {
+ if (thread_safe)
+ lock_.emplace();
+ CALLED_ON_VALID_THREAD();
+}
SharedImageManager::~SharedImageManager() {
+ CALLED_ON_VALID_THREAD();
+#if DCHECK_IS_ON()
+ AutoLock auto_lock(this);
+#endif
DCHECK(images_.empty());
}
std::unique_ptr<SharedImageRepresentationFactoryRef>
SharedImageManager::Register(std::unique_ptr<SharedImageBacking> backing,
MemoryTypeTracker* tracker) {
+ CALLED_ON_VALID_THREAD();
DCHECK(backing->mailbox().IsSharedImage());
- if (images_.find(backing->mailbox()) != images_.end()) {
- LOG(ERROR) << "ShraedImageManager::Register: Trying to register an "
+
+ AutoLock autolock(this);
+ const auto lower_bound = images_.lower_bound(backing->mailbox());
+ if (lower_bound != images_.end() &&
+ (*lower_bound)->mailbox() == backing->mailbox()) {
+ LOG(ERROR) << "SharedImageManager::Register: Trying to register an "
"already registered mailbox.";
backing->Destroy();
return nullptr;
}
+
auto factory_ref = std::make_unique<SharedImageRepresentationFactoryRef>(
this, backing.get(), tracker);
- images_.emplace(std::move(backing));
+ images_.emplace_hint(lower_bound, std::move(backing));
return factory_ref;
}
void SharedImageManager::OnContextLost(const Mailbox& mailbox) {
+ CALLED_ON_VALID_THREAD();
+
+ AutoLock autolock(this);
auto found = images_.find(mailbox);
if (found == images_.end()) {
LOG(ERROR) << "SharedImageManager::OnContextLost: Trying to mark constext "
"lost on a non existent mailbox.";
return;
}
-
(*found)->OnContextLost();
}
std::unique_ptr<SharedImageRepresentationGLTexture>
SharedImageManager::ProduceGLTexture(const Mailbox& mailbox,
MemoryTypeTracker* tracker) {
+ CALLED_ON_VALID_THREAD();
+
+ AutoLock autolock(this);
auto found = images_.find(mailbox);
if (found == images_.end()) {
LOG(ERROR) << "SharedImageManager::ProduceGLTexture: Trying to produce a "
@@ -85,9 +130,32 @@ SharedImageManager::ProduceGLTexture(const Mailbox& mailbox,
return representation;
}
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageManager::ProduceRGBEmulationGLTexture(const Mailbox& mailbox,
+ MemoryTypeTracker* tracker) {
+ auto found = images_.find(mailbox);
+ if (found == images_.end()) {
+ LOG(ERROR) << "SharedImageManager::ProduceRGBEmulationGLTexture: Trying to "
+ "produce a representation from a non-existent mailbox.";
+ return nullptr;
+ }
+
+ auto representation = (*found)->ProduceRGBEmulationGLTexture(this, tracker);
+ if (!representation) {
+ LOG(ERROR) << "SharedImageManager::ProduceRGBEmulationGLTexture: Trying to "
+ "produce a representation from an incompatible mailbox.";
+ return nullptr;
+ }
+
+ return representation;
+}
+
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
SharedImageManager::ProduceGLTexturePassthrough(const Mailbox& mailbox,
MemoryTypeTracker* tracker) {
+ CALLED_ON_VALID_THREAD();
+
+ AutoLock autolock(this);
auto found = images_.find(mailbox);
if (found == images_.end()) {
LOG(ERROR) << "SharedImageManager::ProduceGLTexturePassthrough: Trying to "
@@ -108,6 +176,9 @@ SharedImageManager::ProduceGLTexturePassthrough(const Mailbox& mailbox,
std::unique_ptr<SharedImageRepresentationSkia> SharedImageManager::ProduceSkia(
const Mailbox& mailbox,
MemoryTypeTracker* tracker) {
+ CALLED_ON_VALID_THREAD();
+
+ AutoLock autolock(this);
auto found = images_.find(mailbox);
if (found == images_.end()) {
LOG(ERROR) << "SharedImageManager::ProduceSkia: Trying to Produce a "
@@ -128,6 +199,9 @@ std::unique_ptr<SharedImageRepresentationSkia> SharedImageManager::ProduceSkia(
void SharedImageManager::OnRepresentationDestroyed(
const Mailbox& mailbox,
SharedImageRepresentation* representation) {
+ CALLED_ON_VALID_THREAD();
+
+ AutoLock autolock(this);
auto found = images_.find(mailbox);
if (found == images_.end()) {
LOG(ERROR) << "SharedImageManager::OnRepresentationDestroyed: Trying to "
@@ -149,6 +223,9 @@ void SharedImageManager::OnMemoryDump(const Mailbox& mailbox,
base::trace_event::ProcessMemoryDump* pmd,
int client_id,
uint64_t client_tracing_id) {
+ CALLED_ON_VALID_THREAD();
+
+ AutoLock autolock(this);
auto found = images_.find(mailbox);
if (found == images_.end()) {
LOG(ERROR) << "SharedImageManager::OnMemoryDump: Trying to dump memory for "
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.h b/chromium/gpu/command_buffer/service/shared_image_manager.h
index 51134762305..043dc6016f8 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.h
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.h
@@ -6,6 +6,9 @@
#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_MANAGER_H_
#include "base/containers/flat_set.h"
+#include "base/optional.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/gpu_gles2_export.h"
@@ -15,7 +18,7 @@ class SharedImageRepresentationFactoryRef;
class GPU_GLES2_EXPORT SharedImageManager {
public:
- SharedImageManager();
+ explicit SharedImageManager(bool thread_safe = false);
~SharedImageManager();
// Registers a SharedImageBacking with the manager and returns a
@@ -34,6 +37,8 @@ class GPU_GLES2_EXPORT SharedImageManager {
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
const Mailbox& mailbox,
MemoryTypeTracker* ref);
+ std::unique_ptr<SharedImageRepresentationGLTexture>
+ ProduceRGBEmulationGLTexture(const Mailbox& mailbox, MemoryTypeTracker* ref);
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
ProduceGLTexturePassthrough(const Mailbox& mailbox, MemoryTypeTracker* ref);
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
@@ -50,9 +55,17 @@ class GPU_GLES2_EXPORT SharedImageManager {
int client_id,
uint64_t client_tracing_id);
+ bool is_thread_safe() const { return !!lock_; }
+
private:
+ class AutoLock;
+ // The lock for protecting |images_|.
+ base::Optional<base::Lock> lock_;
+
base::flat_set<std::unique_ptr<SharedImageBacking>> images_;
+ THREAD_CHECKER(thread_checker_);
+
DISALLOW_COPY_AND_ASSIGN(SharedImageManager);
};
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.h b/chromium/gpu/command_buffer/service/shared_image_representation.h
index d9a3464e116..a1c5e550a98 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.h
@@ -43,15 +43,20 @@ class GPU_GLES2_EXPORT SharedImageRepresentation {
// Indicates that the underlying graphics context has been lost, and the
// backing should be treated as destroyed.
- void OnContextLost() { backing_->OnContextLost(); }
+ void OnContextLost() {
+ has_context_ = false;
+ backing_->OnContextLost();
+ }
protected:
SharedImageBacking* backing() const { return backing_; }
+ bool has_context() const { return has_context_; }
private:
SharedImageManager* manager_;
SharedImageBacking* backing_;
MemoryTypeTracker* tracker_;
+ bool has_context_ = true;
};
class SharedImageRepresentationFactoryRef : public SharedImageRepresentation {
@@ -71,6 +76,24 @@ class SharedImageRepresentationFactoryRef : public SharedImageRepresentation {
class GPU_GLES2_EXPORT SharedImageRepresentationGLTexture
: public SharedImageRepresentation {
public:
+ class ScopedAccess {
+ public:
+ ScopedAccess(SharedImageRepresentationGLTexture* representation,
+ GLenum mode)
+ : representation_(representation),
+ success_(representation_->BeginAccess(mode)) {}
+ ~ScopedAccess() {
+ if (success_)
+ representation_->EndAccess();
+ }
+
+ bool success() const { return success_; }
+
+ private:
+ SharedImageRepresentationGLTexture* representation_;
+ bool success_;
+ };
+
SharedImageRepresentationGLTexture(SharedImageManager* manager,
SharedImageBacking* backing,
MemoryTypeTracker* tracker)
@@ -87,6 +110,24 @@ class GPU_GLES2_EXPORT SharedImageRepresentationGLTexture
class GPU_GLES2_EXPORT SharedImageRepresentationGLTexturePassthrough
: public SharedImageRepresentation {
public:
+ class ScopedAccess {
+ public:
+ ScopedAccess(SharedImageRepresentationGLTexturePassthrough* representation,
+ GLenum mode)
+ : representation_(representation),
+ success_(representation_->BeginAccess(mode)) {}
+ ~ScopedAccess() {
+ if (success_)
+ representation_->EndAccess();
+ }
+
+ bool success() const { return success_; }
+
+ private:
+ SharedImageRepresentationGLTexturePassthrough* representation_;
+ bool success_;
+ };
+
SharedImageRepresentationGLTexturePassthrough(SharedImageManager* manager,
SharedImageBacking* backing,
MemoryTypeTracker* tracker)
diff --git a/chromium/gpu/command_buffer/service/test_helper.cc b/chromium/gpu/command_buffer/service/test_helper.cc
index 86c8fab2f14..55ad0276ebc 100644
--- a/chromium/gpu/command_buffer/service/test_helper.cc
+++ b/chromium/gpu/command_buffer/service/test_helper.cc
@@ -919,7 +919,7 @@ void TestHelper::SetupProgramSuccessExpectations(
}
for (GLsizei jj = 1; jj < info.size; ++jj) {
std::string element_name(std::string(base_name) + "[" +
- base::IntToString(jj) + "]");
+ base::NumberToString(jj) + "]");
EXPECT_CALL(*gl, GetUniformLocation(service_id, StrEq(element_name)))
.WillOnce(Return(info.real_location + jj * 2))
.RetiresOnSaturation();
diff --git a/chromium/gpu/command_buffer/service/texture_definition.cc b/chromium/gpu/command_buffer/service/texture_definition.cc
index 8d6e07c3514..91288c6fd84 100644
--- a/chromium/gpu/command_buffer/service/texture_definition.cc
+++ b/chromium/gpu/command_buffer/service/texture_definition.cc
@@ -34,6 +34,7 @@ class GLImageSync : public gl::GLImage {
// Implement GLImage.
gfx::Size GetSize() override;
unsigned GetInternalFormat() override;
+ BindOrCopy ShouldBindOrCopy() override;
bool BindTexImage(unsigned target) override;
void ReleaseTexImage(unsigned target) override;
bool CopyTexImage(unsigned target) override;
@@ -83,6 +84,10 @@ unsigned GLImageSync::GetInternalFormat() {
return GL_RGBA;
}
+GLImageSync::BindOrCopy GLImageSync::ShouldBindOrCopy() {
+ return BIND;
+}
+
bool GLImageSync::BindTexImage(unsigned target) {
NOTREACHED();
return false;
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index c8632ba30b3..380f521ce06 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -2068,6 +2068,7 @@ void TextureManager::Initialize() {
GL_TEXTURE_CUBE_MAP, &black_texture_ids_[kCubeMap]);
if (feature_info_->IsWebGL2OrES3Context()) {
+ DCHECK(feature_info_->IsES3Capable());
default_textures_[kTexture3D] = CreateDefaultAndBlackTextures(
GL_TEXTURE_3D, &black_texture_ids_[kTexture3D]);
default_textures_[kTexture2DArray] = CreateDefaultAndBlackTextures(
@@ -2199,6 +2200,7 @@ bool TextureManager::ClearRenderableLevels(DecoderContext* decoder,
return ref->texture()->ClearRenderableLevels(decoder);
}
+// static
bool TextureManager::ClearTextureLevel(DecoderContext* decoder,
TextureRef* ref,
GLenum target,
@@ -2208,6 +2210,7 @@ bool TextureManager::ClearTextureLevel(DecoderContext* decoder,
return ClearTextureLevel(decoder, texture, target, level);
}
+// static
bool TextureManager::ClearTextureLevel(DecoderContext* decoder,
Texture* texture,
GLenum target,
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index fe981ad524b..1f66199e48a 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -34,11 +34,18 @@ class ProgressReporter;
namespace gpu {
class DecoderContext;
+class ExternalVkImageBacking;
+class ExternalVkImageGlRepresentation;
class ServiceDiscardableManager;
class SharedImageBackingGLTexture;
class SharedImageBackingFactoryGLTexture;
class SharedImageBackingAHB;
class SharedImageRepresentationGLTexture;
+class SharedImageRepresentationGLTextureAHB;
+class SharedImageRepresentationSkiaGLAHB;
+class SharedImageBackingIOSurface;
+class SharedImageRepresentationGLTextureIOSurface;
+class SharedImageRepresentationSkiaIOSurface;
namespace gles2 {
class GLStreamTextureImage;
@@ -364,9 +371,16 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
private:
friend class MailboxManagerSync;
friend class MailboxManagerTest;
+ friend class gpu::ExternalVkImageBacking;
+ friend class gpu::ExternalVkImageGlRepresentation;
friend class gpu::SharedImageBackingGLTexture;
friend class gpu::SharedImageBackingFactoryGLTexture;
friend class gpu::SharedImageBackingAHB;
+ friend class gpu::SharedImageRepresentationGLTextureAHB;
+ friend class gpu::SharedImageRepresentationSkiaGLAHB;
+ friend class gpu::SharedImageBackingIOSurface;
+ friend class gpu::SharedImageRepresentationGLTextureIOSurface;
+ friend class gpu::SharedImageRepresentationSkiaIOSurface;
friend class TextureDefinition;
friend class TextureManager;
friend class TextureRef;
@@ -935,15 +949,15 @@ class GPU_GLES2_EXPORT TextureManager
bool ClearRenderableLevels(DecoderContext* decoder, TextureRef* ref);
// Clear a specific level.
- bool ClearTextureLevel(DecoderContext* decoder,
- TextureRef* ref,
- GLenum target,
- GLint level);
-
- bool ClearTextureLevel(DecoderContext* decoder,
- Texture* texture,
- GLenum target,
- GLint level);
+ static bool ClearTextureLevel(DecoderContext* decoder,
+ TextureRef* ref,
+ GLenum target,
+ GLint level);
+
+ static bool ClearTextureLevel(DecoderContext* decoder,
+ Texture* texture,
+ GLenum target,
+ GLint level);
// Creates a new texture info.
TextureRef* CreateTexture(GLuint client_id, GLuint service_id);
diff --git a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
index 7a61c0733b5..032b734c481 100644
--- a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
@@ -2134,9 +2134,9 @@ TEST_P(ProduceConsumeTextureTest, ProduceConsumeTextureWithImage) {
static const GLenum kTextureTargets[] = {GL_TEXTURE_2D, GL_TEXTURE_EXTERNAL_OES,
GL_TEXTURE_RECTANGLE_ARB, };
-INSTANTIATE_TEST_CASE_P(Target,
- ProduceConsumeTextureTest,
- ::testing::ValuesIn(kTextureTargets));
+INSTANTIATE_TEST_SUITE_P(Target,
+ ProduceConsumeTextureTest,
+ ::testing::ValuesIn(kTextureTargets));
TEST_F(ProduceConsumeTextureTest, ProduceConsumeCube) {
manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_CUBE_MAP);
diff --git a/chromium/gpu/command_buffer/service/transform_feedback_manager.cc b/chromium/gpu/command_buffer/service/transform_feedback_manager.cc
index 95f7a57cae0..90fa073c083 100644
--- a/chromium/gpu/command_buffer/service/transform_feedback_manager.cc
+++ b/chromium/gpu/command_buffer/service/transform_feedback_manager.cc
@@ -17,7 +17,8 @@ TransformFeedback::TransformFeedback(TransformFeedbackManager* manager,
: IndexedBufferBindingHost(
manager->max_transform_feedback_separate_attribs(),
GL_TRANSFORM_FEEDBACK_BUFFER,
- manager->needs_emulation()),
+ manager->needs_emulation(),
+ false),
manager_(manager),
client_id_(client_id),
service_id_(service_id),
diff --git a/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc b/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc
index 4b09b0f5b44..1112c5b3bb1 100644
--- a/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc
+++ b/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc
@@ -286,8 +286,9 @@ bool VertexAttribManager::ValidateBindings(
ERRORSTATE_SET_GL_ERROR(
error_state, GL_INVALID_OPERATION, function_name,
(std::string(
- "attempt to access out of range vertices in attribute ") +
- base::UintToString(attrib->index())).c_str());
+ "attempt to access out of range vertices in attribute ") +
+ base::NumberToString(attrib->index()))
+ .c_str());
return false;
}
if (use_client_side_arrays_for_stream_buffers) {
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder.cc b/chromium/gpu/command_buffer/service/webgpu_decoder.cc
index 6e1cb4e7d64..8df347ace1f 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder.cc
@@ -4,251 +4,17 @@
#include "gpu/command_buffer/service/webgpu_decoder.h"
-#include "base/macros.h"
-#include "gpu/command_buffer/common/webgpu_cmd_format.h"
-#include "gpu/command_buffer/common/webgpu_cmd_ids.h"
-#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/webgpu_decoder_impl.h"
namespace gpu {
namespace webgpu {
-class WebGPUDecoderImpl final : public WebGPUDecoder {
- public:
- WebGPUDecoderImpl(DecoderClient* client,
- CommandBufferServiceBase* command_buffer_service,
- gles2::Outputter* outputter);
- ~WebGPUDecoderImpl() override;
-
- // DecoderContext implementation.
- base::WeakPtr<DecoderContext> AsWeakPtr() override {
- NOTIMPLEMENTED();
- return nullptr;
- }
- ContextResult Initialize(
- const scoped_refptr<gl::GLSurface>& surface,
- const scoped_refptr<gl::GLContext>& context,
- bool offscreen,
- const gles2::DisallowedFeatures& disallowed_features,
- const ContextCreationAttribs& attrib_helper) override {
- return ContextResult::kSuccess;
- }
- const gles2::ContextState* GetContextState() override {
- NOTREACHED();
- return nullptr;
- }
- void Destroy(bool have_context) override {}
- bool MakeCurrent() override { return true; }
- gl::GLContext* GetGLContext() override { return nullptr; }
- gl::GLSurface* GetGLSurface() override {
- NOTREACHED();
- return nullptr;
- }
- const gles2::FeatureInfo* GetFeatureInfo() const override {
- NOTREACHED();
- return nullptr;
- }
- Capabilities GetCapabilities() override { return {}; }
- void RestoreGlobalState() const override { NOTREACHED(); }
- void ClearAllAttributes() const override { NOTREACHED(); }
- void RestoreAllAttributes() const override { NOTREACHED(); }
- void RestoreState(const gles2::ContextState* prev_state) override {
- NOTREACHED();
- }
- void RestoreActiveTexture() const override { NOTREACHED(); }
- void RestoreAllTextureUnitAndSamplerBindings(
- const gles2::ContextState* prev_state) const override {
- NOTREACHED();
- }
- void RestoreActiveTextureUnitBinding(unsigned int target) const override {
- NOTREACHED();
- }
- void RestoreBufferBinding(unsigned int target) override { NOTREACHED(); }
- void RestoreBufferBindings() const override { NOTREACHED(); }
- void RestoreFramebufferBindings() const override { NOTREACHED(); }
- void RestoreRenderbufferBindings() override { NOTREACHED(); }
- void RestoreProgramBindings() const override { NOTREACHED(); }
- void RestoreTextureState(unsigned service_id) override { NOTREACHED(); }
- void RestoreTextureUnitBindings(unsigned unit) const override {
- NOTREACHED();
- }
- void RestoreVertexAttribArray(unsigned index) override { NOTREACHED(); }
- void RestoreAllExternalTextureBindingsIfNeeded() override { NOTREACHED(); }
- QueryManager* GetQueryManager() override {
- NOTREACHED();
- return nullptr;
- }
- void SetQueryCallback(unsigned int query_client_id,
- base::OnceClosure callback) override {
- NOTREACHED();
- }
- gles2::GpuFenceManager* GetGpuFenceManager() override {
- NOTREACHED();
- return nullptr;
- }
- bool HasPendingQueries() const override { return false; }
- void ProcessPendingQueries(bool did_finish) override {}
- bool HasMoreIdleWork() const override { return false; }
- void PerformIdleWork() override { NOTREACHED(); }
- bool HasPollingWork() const override { return false; }
- void PerformPollingWork() override { NOTREACHED(); }
- TextureBase* GetTextureBase(uint32_t client_id) override {
- NOTREACHED();
- return nullptr;
- }
- void SetLevelInfo(uint32_t client_id,
- int level,
- unsigned internal_format,
- unsigned width,
- unsigned height,
- unsigned depth,
- unsigned format,
- unsigned type,
- const gfx::Rect& cleared_rect) override {
- NOTREACHED();
- }
- bool WasContextLost() const override {
- NOTIMPLEMENTED();
- return false;
- }
- bool WasContextLostByRobustnessExtension() const override {
- NOTREACHED();
- return false;
- }
- void MarkContextLost(error::ContextLostReason reason) override {
- NOTIMPLEMENTED();
- }
- bool CheckResetStatus() override {
- NOTREACHED();
- return false;
- }
- void BeginDecoding() override {}
- void EndDecoding() override {}
- const char* GetCommandName(unsigned int command_id) const;
- error::Error DoCommands(unsigned int num_commands,
- const volatile void* buffer,
- int num_entries,
- int* entries_processed) override;
- base::StringPiece GetLogPrefix() override {
- NOTIMPLEMENTED();
- return "";
- }
- void BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- gl::GLImage* image,
- bool can_bind_to_sampler) override {
- NOTREACHED();
- }
- gles2::ContextGroup* GetContextGroup() override {
- NOTREACHED();
- return nullptr;
- }
- gles2::ErrorState* GetErrorState() override {
- NOTREACHED();
- return nullptr;
- }
- std::unique_ptr<gles2::AbstractTexture> CreateAbstractTexture(
- GLenum target,
- GLenum internal_format,
- GLsizei width,
- GLsizei height,
- GLsizei depth,
- GLint border,
- GLenum format,
- GLenum type) override {
- NOTREACHED();
- return nullptr;
- }
- bool IsCompressedTextureFormat(unsigned format) override {
- NOTREACHED();
- return false;
- }
- bool ClearLevel(gles2::Texture* texture,
- unsigned target,
- int level,
- unsigned format,
- unsigned type,
- int xoffset,
- int yoffset,
- int width,
- int height) override {
- NOTREACHED();
- return false;
- }
- bool ClearCompressedTextureLevel(gles2::Texture* texture,
- unsigned target,
- int level,
- unsigned format,
- int width,
- int height) override {
- NOTREACHED();
- return false;
- }
- bool ClearLevel3D(gles2::Texture* texture,
- unsigned target,
- int level,
- unsigned format,
- unsigned type,
- int width,
- int height,
- int depth) override {
- NOTREACHED();
- return false;
- }
- bool initialized() const override { return true; }
- void SetLogCommands(bool log_commands) override { NOTIMPLEMENTED(); }
- gles2::Outputter* outputter() const override {
- NOTIMPLEMENTED();
- return nullptr;
- }
-
- private:
- typedef error::Error (WebGPUDecoderImpl::*CmdHandler)(
- uint32_t immediate_data_size,
- const volatile void* data);
-
- // A struct to hold info about each command.
- struct CommandInfo {
- CmdHandler cmd_handler;
- uint8_t arg_flags; // How to handle the arguments for this command
- uint8_t cmd_flags; // How to handle this command
- uint16_t arg_count; // How many arguments are expected for this command.
- };
-
- // A table of CommandInfo for all the commands.
- static const CommandInfo command_info[kNumCommands - kFirstWebGPUCommand];
-
-// Generate a member function prototype for each command in an automated and
-// typesafe way.
-#define WEBGPU_CMD_OP(name) \
- Error Handle##name(uint32_t immediate_data_size, const volatile void* data);
- WEBGPU_COMMAND_LIST(WEBGPU_CMD_OP)
-#undef WEBGPU_CMD_OP
-
- // The current decoder error communicates the decoder error through command
- // processing functions that do not return the error value. Should be set
- // only if not returning an error.
- error::Error current_decoder_error_ = error::kNoError;
-
- DISALLOW_COPY_AND_ASSIGN(WebGPUDecoderImpl);
-};
-
-constexpr WebGPUDecoderImpl::CommandInfo WebGPUDecoderImpl::command_info[] = {
-#define WEBGPU_CMD_OP(name) \
- { \
- &WebGPUDecoderImpl::Handle##name, cmds::name::kArgFlags, \
- cmds::name::cmd_flags, \
- sizeof(cmds::name) / sizeof(CommandBufferEntry) - 1, \
- }, /* NOLINT */
- WEBGPU_COMMAND_LIST(WEBGPU_CMD_OP)
-#undef WEBGPU_CMD_OP
-};
-
// static
WebGPUDecoder* WebGPUDecoder::Create(
DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
gles2::Outputter* outputter) {
- return new WebGPUDecoderImpl(client, command_buffer_service, outputter);
+ return CreateWebGPUDecoderImpl(client, command_buffer_service, outputter);
}
WebGPUDecoder::WebGPUDecoder(DecoderClient* client,
@@ -258,92 +24,5 @@ WebGPUDecoder::WebGPUDecoder(DecoderClient* client,
WebGPUDecoder::~WebGPUDecoder() {}
-WebGPUDecoderImpl::WebGPUDecoderImpl(
- DecoderClient* client,
- CommandBufferServiceBase* command_buffer_service,
- gles2::Outputter* outputter)
- : WebGPUDecoder(client, command_buffer_service, outputter) {}
-
-WebGPUDecoderImpl::~WebGPUDecoderImpl() {}
-
-const char* WebGPUDecoderImpl::GetCommandName(unsigned int command_id) const {
- if (command_id >= kFirstWebGPUCommand && command_id < kNumCommands) {
- return webgpu::GetCommandName(static_cast<CommandId>(command_id));
- }
- return GetCommonCommandName(static_cast<cmd::CommandId>(command_id));
-}
-
-error::Error WebGPUDecoderImpl::DoCommands(unsigned int num_commands,
- const volatile void* buffer,
- int num_entries,
- int* entries_processed) {
- DCHECK(entries_processed);
- int commands_to_process = num_commands;
- error::Error result = error::kNoError;
- const volatile CommandBufferEntry* cmd_data =
- static_cast<const volatile CommandBufferEntry*>(buffer);
- int process_pos = 0;
- CommandId command = static_cast<CommandId>(0);
-
- while (process_pos < num_entries && result == error::kNoError &&
- commands_to_process--) {
- const unsigned int size = cmd_data->value_header.size;
- command = static_cast<CommandId>(cmd_data->value_header.command);
-
- if (size == 0) {
- result = error::kInvalidSize;
- break;
- }
-
- if (static_cast<int>(size) + process_pos > num_entries) {
- result = error::kOutOfBounds;
- break;
- }
-
- const unsigned int arg_count = size - 1;
- unsigned int command_index = command - kFirstWebGPUCommand;
- if (command_index < base::size(command_info)) {
- const CommandInfo& info = command_info[command_index];
- unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
- if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
- (info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
- uint32_t immediate_data_size = (arg_count - info_arg_count) *
- sizeof(CommandBufferEntry); // NOLINT
- result = (this->*info.cmd_handler)(immediate_data_size, cmd_data);
- } else {
- result = error::kInvalidArguments;
- }
- } else {
- result = DoCommonCommand(command, arg_count, cmd_data);
- }
-
- if (result == error::kNoError &&
- current_decoder_error_ != error::kNoError) {
- result = current_decoder_error_;
- current_decoder_error_ = error::kNoError;
- }
-
- if (result != error::kDeferCommandUntilLater) {
- process_pos += size;
- cmd_data += size;
- }
- }
-
- *entries_processed = process_pos;
-
- if (error::IsError(result)) {
- LOG(ERROR) << "Error: " << result << " for Command "
- << GetCommandName(command);
- }
-
- return result;
-}
-
-error::Error WebGPUDecoderImpl::HandleDummy(uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- DLOG(ERROR) << "WebGPUDecoderImpl::HandleDummy";
- return error::kNoError;
-}
-
} // namespace webgpu
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
new file mode 100644
index 00000000000..216b702109d
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
@@ -0,0 +1,347 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/webgpu_decoder_impl.h"
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/common/webgpu_cmd_format.h"
+#include "gpu/command_buffer/common/webgpu_cmd_ids.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/webgpu_decoder.h"
+
+namespace gpu {
+namespace webgpu {
+
+class WebGPUDecoderImpl final : public WebGPUDecoder {
+ public:
+ WebGPUDecoderImpl(DecoderClient* client,
+ CommandBufferServiceBase* command_buffer_service,
+ gles2::Outputter* outputter);
+ ~WebGPUDecoderImpl() override;
+
+ // DecoderContext implementation.
+ base::WeakPtr<DecoderContext> AsWeakPtr() override {
+ NOTIMPLEMENTED();
+ return nullptr;
+ }
+ ContextResult Initialize(
+ const scoped_refptr<gl::GLSurface>& surface,
+ const scoped_refptr<gl::GLContext>& context,
+ bool offscreen,
+ const gles2::DisallowedFeatures& disallowed_features,
+ const ContextCreationAttribs& attrib_helper) override {
+ return ContextResult::kSuccess;
+ }
+ const gles2::ContextState* GetContextState() override {
+ NOTREACHED();
+ return nullptr;
+ }
+ void Destroy(bool have_context) override {}
+ bool MakeCurrent() override { return true; }
+ gl::GLContext* GetGLContext() override { return nullptr; }
+ gl::GLSurface* GetGLSurface() override {
+ NOTREACHED();
+ return nullptr;
+ }
+ const gles2::FeatureInfo* GetFeatureInfo() const override {
+ NOTREACHED();
+ return nullptr;
+ }
+ Capabilities GetCapabilities() override { return {}; }
+ void RestoreGlobalState() const override { NOTREACHED(); }
+ void ClearAllAttributes() const override { NOTREACHED(); }
+ void RestoreAllAttributes() const override { NOTREACHED(); }
+ void RestoreState(const gles2::ContextState* prev_state) override {
+ NOTREACHED();
+ }
+ void RestoreActiveTexture() const override { NOTREACHED(); }
+ void RestoreAllTextureUnitAndSamplerBindings(
+ const gles2::ContextState* prev_state) const override {
+ NOTREACHED();
+ }
+ void RestoreActiveTextureUnitBinding(unsigned int target) const override {
+ NOTREACHED();
+ }
+ void RestoreBufferBinding(unsigned int target) override { NOTREACHED(); }
+ void RestoreBufferBindings() const override { NOTREACHED(); }
+ void RestoreFramebufferBindings() const override { NOTREACHED(); }
+ void RestoreRenderbufferBindings() override { NOTREACHED(); }
+ void RestoreProgramBindings() const override { NOTREACHED(); }
+ void RestoreTextureState(unsigned service_id) override { NOTREACHED(); }
+ void RestoreTextureUnitBindings(unsigned unit) const override {
+ NOTREACHED();
+ }
+ void RestoreVertexAttribArray(unsigned index) override { NOTREACHED(); }
+ void RestoreAllExternalTextureBindingsIfNeeded() override { NOTREACHED(); }
+ QueryManager* GetQueryManager() override {
+ NOTREACHED();
+ return nullptr;
+ }
+ void SetQueryCallback(unsigned int query_client_id,
+ base::OnceClosure callback) override {
+ NOTREACHED();
+ }
+ gles2::GpuFenceManager* GetGpuFenceManager() override {
+ NOTREACHED();
+ return nullptr;
+ }
+ bool HasPendingQueries() const override { return false; }
+ void ProcessPendingQueries(bool did_finish) override {}
+ bool HasMoreIdleWork() const override { return false; }
+ void PerformIdleWork() override { NOTREACHED(); }
+ bool HasPollingWork() const override { return false; }
+ void PerformPollingWork() override { NOTREACHED(); }
+ TextureBase* GetTextureBase(uint32_t client_id) override {
+ NOTREACHED();
+ return nullptr;
+ }
+ void SetLevelInfo(uint32_t client_id,
+ int level,
+ unsigned internal_format,
+ unsigned width,
+ unsigned height,
+ unsigned depth,
+ unsigned format,
+ unsigned type,
+ const gfx::Rect& cleared_rect) override {
+ NOTREACHED();
+ }
+ bool WasContextLost() const override {
+ NOTIMPLEMENTED();
+ return false;
+ }
+ bool WasContextLostByRobustnessExtension() const override {
+ NOTREACHED();
+ return false;
+ }
+ void MarkContextLost(error::ContextLostReason reason) override {
+ NOTIMPLEMENTED();
+ }
+ bool CheckResetStatus() override {
+ NOTREACHED();
+ return false;
+ }
+ void BeginDecoding() override {}
+ void EndDecoding() override {}
+ const char* GetCommandName(unsigned int command_id) const;
+ error::Error DoCommands(unsigned int num_commands,
+ const volatile void* buffer,
+ int num_entries,
+ int* entries_processed) override;
+ base::StringPiece GetLogPrefix() override {
+ NOTIMPLEMENTED();
+ return "";
+ }
+ void BindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler) override {
+ NOTREACHED();
+ }
+ gles2::ContextGroup* GetContextGroup() override { return nullptr; }
+ gles2::ErrorState* GetErrorState() override {
+ NOTREACHED();
+ return nullptr;
+ }
+ std::unique_ptr<gles2::AbstractTexture> CreateAbstractTexture(
+ GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type) override {
+ NOTREACHED();
+ return nullptr;
+ }
+ bool IsCompressedTextureFormat(unsigned format) override {
+ NOTREACHED();
+ return false;
+ }
+ bool ClearLevel(gles2::Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ unsigned type,
+ int xoffset,
+ int yoffset,
+ int width,
+ int height) override {
+ NOTREACHED();
+ return false;
+ }
+ bool ClearCompressedTextureLevel(gles2::Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height) override {
+ NOTREACHED();
+ return false;
+ }
+ bool ClearLevel3D(gles2::Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ unsigned type,
+ int width,
+ int height,
+ int depth) override {
+ NOTREACHED();
+ return false;
+ }
+ bool initialized() const override { return true; }
+ void SetLogCommands(bool log_commands) override { NOTIMPLEMENTED(); }
+ gles2::Outputter* outputter() const override {
+ NOTIMPLEMENTED();
+ return nullptr;
+ }
+ int GetRasterDecoderId() const override {
+ NOTREACHED();
+ return -1;
+ }
+
+ private:
+ typedef error::Error (WebGPUDecoderImpl::*CmdHandler)(
+ uint32_t immediate_data_size,
+ const volatile void* data);
+
+ // A struct to hold info about each command.
+ struct CommandInfo {
+ CmdHandler cmd_handler;
+ uint8_t arg_flags; // How to handle the arguments for this command
+ uint8_t cmd_flags; // How to handle this command
+ uint16_t arg_count; // How many arguments are expected for this command.
+ };
+
+ // A table of CommandInfo for all the commands.
+ static const CommandInfo command_info[kNumCommands - kFirstWebGPUCommand];
+
+// Generate a member function prototype for each command in an automated and
+// typesafe way.
+#define WEBGPU_CMD_OP(name) \
+ Error Handle##name(uint32_t immediate_data_size, const volatile void* data);
+ WEBGPU_COMMAND_LIST(WEBGPU_CMD_OP)
+#undef WEBGPU_CMD_OP
+
+ // The current decoder error communicates the decoder error through command
+ // processing functions that do not return the error value. Should be set
+ // only if not returning an error.
+ error::Error current_decoder_error_ = error::kNoError;
+
+ DISALLOW_COPY_AND_ASSIGN(WebGPUDecoderImpl);
+};
+
+constexpr WebGPUDecoderImpl::CommandInfo WebGPUDecoderImpl::command_info[] = {
+#define WEBGPU_CMD_OP(name) \
+ { \
+ &WebGPUDecoderImpl::Handle##name, \
+ cmds::name::kArgFlags, \
+ cmds::name::cmd_flags, \
+ sizeof(cmds::name) / sizeof(CommandBufferEntry) - 1, \
+ }, /* NOLINT */
+ WEBGPU_COMMAND_LIST(WEBGPU_CMD_OP)
+#undef WEBGPU_CMD_OP
+};
+
+WebGPUDecoder* CreateWebGPUDecoderImpl(
+ DecoderClient* client,
+ CommandBufferServiceBase* command_buffer_service,
+ gles2::Outputter* outputter) {
+ return new WebGPUDecoderImpl(client, command_buffer_service, outputter);
+}
+
+WebGPUDecoderImpl::WebGPUDecoderImpl(
+ DecoderClient* client,
+ CommandBufferServiceBase* command_buffer_service,
+ gles2::Outputter* outputter)
+ : WebGPUDecoder(client, command_buffer_service, outputter) {}
+
+WebGPUDecoderImpl::~WebGPUDecoderImpl() {}
+
+const char* WebGPUDecoderImpl::GetCommandName(unsigned int command_id) const {
+ if (command_id >= kFirstWebGPUCommand && command_id < kNumCommands) {
+ return webgpu::GetCommandName(static_cast<CommandId>(command_id));
+ }
+ return GetCommonCommandName(static_cast<cmd::CommandId>(command_id));
+}
+
+error::Error WebGPUDecoderImpl::DoCommands(unsigned int num_commands,
+ const volatile void* buffer,
+ int num_entries,
+ int* entries_processed) {
+ DCHECK(entries_processed);
+ int commands_to_process = num_commands;
+ error::Error result = error::kNoError;
+ const volatile CommandBufferEntry* cmd_data =
+ static_cast<const volatile CommandBufferEntry*>(buffer);
+ int process_pos = 0;
+ CommandId command = static_cast<CommandId>(0);
+
+ while (process_pos < num_entries && result == error::kNoError &&
+ commands_to_process--) {
+ const unsigned int size = cmd_data->value_header.size;
+ command = static_cast<CommandId>(cmd_data->value_header.command);
+
+ if (size == 0) {
+ result = error::kInvalidSize;
+ break;
+ }
+
+ if (static_cast<int>(size) + process_pos > num_entries) {
+ result = error::kOutOfBounds;
+ break;
+ }
+
+ const unsigned int arg_count = size - 1;
+ unsigned int command_index = command - kFirstWebGPUCommand;
+ if (command_index < base::size(command_info)) {
+ const CommandInfo& info = command_info[command_index];
+ unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
+ if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
+ (info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
+ uint32_t immediate_data_size = (arg_count - info_arg_count) *
+ sizeof(CommandBufferEntry); // NOLINT
+ result = (this->*info.cmd_handler)(immediate_data_size, cmd_data);
+ } else {
+ result = error::kInvalidArguments;
+ }
+ } else {
+ result = DoCommonCommand(command, arg_count, cmd_data);
+ }
+
+ if (result == error::kNoError &&
+ current_decoder_error_ != error::kNoError) {
+ result = current_decoder_error_;
+ current_decoder_error_ = error::kNoError;
+ }
+
+ if (result != error::kDeferCommandUntilLater) {
+ process_pos += size;
+ cmd_data += size;
+ }
+ }
+
+ *entries_processed = process_pos;
+
+ if (error::IsError(result)) {
+ LOG(ERROR) << "Error: " << result << " for Command "
+ << GetCommandName(command);
+ }
+
+ return result;
+}
+
+error::Error WebGPUDecoderImpl::HandleDummy(uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ DLOG(ERROR) << "WebGPUDecoderImpl::HandleDummy";
+ return error::kNoError;
+}
+
+} // namespace webgpu
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.h b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.h
new file mode 100644
index 00000000000..d91f20c4558
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.h
@@ -0,0 +1,31 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_WEBGPU_DECODER_IMPL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_WEBGPU_DECODER_IMPL_H_
+
+#include "gpu/gpu_gles2_export.h"
+
+namespace gpu {
+
+class CommandBufferServiceBase;
+class DecoderClient;
+
+namespace gles2 {
+class Outputter;
+} // namespace gles2
+
+namespace webgpu {
+
+class WebGPUDecoder;
+
+GPU_GLES2_EXPORT WebGPUDecoder* CreateWebGPUDecoderImpl(
+ DecoderClient* client,
+ CommandBufferServiceBase* command_buffer_service,
+ gles2::Outputter* outputter);
+
+} // namespace webgpu
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_WEBGPU_DECODER_IMPL_H_
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
index b551da60f9d..f4b05d415a0 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
@@ -70,6 +70,7 @@ class WrappedSkImage : public SharedImageBacking {
sk_sp<SkSurface> GetSkSurface(int final_msaa_count,
SkColorType color_type,
+ sk_sp<SkColorSpace> color_space,
const SkSurfaceProps& surface_props) {
if (context_state_->context_lost())
return nullptr;
@@ -79,7 +80,7 @@ class WrappedSkImage : public SharedImageBacking {
DCHECK(gr_texture.isValid());
return SkSurface::MakeFromBackendTextureAsRenderTarget(
context_state_->gr_context(), gr_texture, kTopLeft_GrSurfaceOrigin,
- final_msaa_count, color_type, /*colorSpace=*/nullptr, &surface_props);
+ final_msaa_count, color_type, color_space, &surface_props);
}
sk_sp<SkPromiseImageTexture> promise_texture() { return promise_texture_; }
@@ -194,7 +195,8 @@ class WrappedSkImageRepresentation : public SharedImageRepresentationSkia {
/*gpu_compositing=*/true, format());
auto surface = wrapped_sk_image()->GetSkSurface(
- final_msaa_count, sk_color_type, surface_props);
+ final_msaa_count, sk_color_type,
+ backing()->color_space().ToSkColorSpace(), surface_props);
write_surface_ = surface.get();
return surface;
}
diff --git a/chromium/gpu/config/gpu_driver_bug_list.json b/chromium/gpu/config/gpu_driver_bug_list.json
index 4996becd8a7..c02208ca061 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.json
+++ b/chromium/gpu/config/gpu_driver_bug_list.json
@@ -9,6 +9,11 @@
"type": "android"
},
"gl_vendor": "Imagination.*",
+ "gl_type": "gles",
+ "gl_version": {
+ "op": "<",
+ "value": "3.0"
+ },
"features": [
"use_client_side_arrays_for_stream_buffers"
]
@@ -21,6 +26,11 @@
"type": "android"
},
"gl_vendor": "ARM.*",
+ "gl_type": "gles",
+ "gl_version": {
+ "op": "<",
+ "value": "3.0"
+ },
"features": [
"use_client_side_arrays_for_stream_buffers"
]
@@ -1953,7 +1963,7 @@
},
{
"id": 197,
- "description": "adjust src/dst region if blitting pixels outside read framebuffer on Mac",
+ "description": "adjust src/dst region if blitting pixels outside framebuffer on Mac",
"cr_bugs": [644740],
"os": {
"type": "macosx"
@@ -1964,7 +1974,7 @@
},
{
"id": 198,
- "description": "adjust src/dst region if blitting pixels outside read framebuffer on Linux Intel",
+ "description": "adjust src/dst region if blitting pixels outside framebuffer on Linux Intel",
"cr_bugs": [664740],
"os": {
"type": "linux"
@@ -1976,7 +1986,7 @@
},
{
"id": 199,
- "description": "adjust src/dst region if blitting pixels outside read framebuffer on Linux AMD",
+ "description": "adjust src/dst region if blitting pixels outside framebuffer on Linux AMD",
"cr_bugs": [664740],
"os": {
"type": "linux"
@@ -2645,15 +2655,16 @@
},
{
"id": 254,
- "description": "Limit MSAA samples to 4x on AMD Stoney",
- "cr_bugs": [798936],
+ "description": "Limit MSAA quality samples to 4 and storage samples 2 on AMD Stoney",
+ "cr_bugs": [875471],
"os": {
"type" : "chromeos"
},
"vendor_id": "0x1002",
"device_id": ["0x98e4"],
"features": [
- "max_msaa_sample_count_4"
+ "max_msaa_sample_count_4",
+ "use_eqaa_storage_samples_2"
]
},
{
@@ -3069,6 +3080,86 @@
]
},
{
+ "id": 287,
+ "description": "glCopyTexImage2D on Adreno fails if source is GL_RGB10_A2 and destination is not.",
+ "cr_bugs": [925986],
+ "os": {
+ "type": "android",
+ "version": {
+ "op": ">=",
+ "value": "5.0.0"
+ }
+ },
+ "gl_vendor": "Qualcomm.*",
+ "gl_renderer": ".*4\\d\\d",
+ "gl_renderer": "Adreno \\(TM\\) [345].*",
+ "features": [
+ "disable_copy_tex_image_2d_rgb10_a2_adreno"
+ ]
+ },
+ {
+ "id": 288,
+ "description": "glCopyTexImage2D on NVIDIA Tegra fails in certain cases if source is GL_RGB10_A2.",
+ "cr_bugs": [925986],
+ "os": {
+ "type": "android"
+ },
+ "gl_vendor": "NVIDIA.*",
+ "gl_type": "gles",
+ "gl_version": {
+ "op": ">=",
+ "value": "3.0"
+ },
+ "features": [
+ "disable_copy_tex_image_2d_rgb10_a2_tegra"
+ ]
+ },
+ {
+ "id": 289,
+ "description": "Fake entry for testing command buffer init failures on ES 2.0",
+ "cr_bugs": [923134],
+ "test_group": 3,
+ "features": [
+ "disable_es3_gl_context_for_testing"
+ ]
+ },
+ {
+ "id": 290,
+ "description": "Round down glBindBufferRange size to a multiple of 4 on Qualcomm Adreno GPUs for uniform buffers",
+ "cr_bugs": [906743],
+ "os": {
+ "type": "android"
+ },
+ "gl_renderer": "Adreno \\(TM\\) .*",
+ "features": [
+ "round_down_uniform_bind_buffer_range_size"
+ ]
+ },
+ {
+ "id": 291,
+ "description": "adjust src/dst region if blitting pixels outside framebuffer on Linux NVIDIA",
+ "cr_bugs": [830046],
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x10de",
+ "features": [
+ "adjust_src_dst_region_for_blitframebuffer"
+ ]
+ },
+ {
+ "id": 292,
+ "description": "adjust src/dst region if blitting pixels outside framebuffer on Android NVIDIA",
+ "cr_bugs": [830046],
+ "os": {
+ "type": "android"
+ },
+ "gl_vendor": "NVIDIA.*",
+ "features": [
+ "adjust_src_dst_region_for_blitframebuffer"
+ ]
+ },
+ {
"id": 293,
"cr_bugs": [931527],
"description": "Frequent crashes in glClear on Android N with driver 12.0.04rel0",
@@ -3102,6 +3193,71 @@
"features": [
"disable_direct_composition_layers"
]
+ },
+ {
+ "id": 295,
+ "description": "Avoid waiting on a egl fence before swapping buffers and rely on implicit sync on Intel GPUs",
+ "cr_bugs": [938286],
+ "os": {
+ "type": "linux"
+ },
+ "gl_vendor": "Intel.*",
+ "features": [
+ "rely_on_implicit_sync_for_swap_buffers"
+ ]
+ },
+ {
+ "id": 296,
+ "description": "Avoid waiting on a egl fence before swapping buffers and rely on implicit sync on Broadcom GPUs",
+ "cr_bugs": [938286],
+ "os": {
+ "type": "linux"
+ },
+ "gl_vendor": "Broadcom.*",
+ "features": [
+ "rely_on_implicit_sync_for_swap_buffers"
+ ]
+ },
+ {
+ "id": 297,
+ "cr_bugs": [938678],
+ "description": "Needed to pass dEQP-EGL.functional.robustness.reset_context.shaders.infinite_loop.*",
+ "os": {
+ "type" : "chromeos"
+ },
+ "vendor_id": "0x1002",
+ "features": [
+ "exit_on_context_lost"
+ ]
+ },
+ {
+ "id": 298,
+ "cr_bugs": [941716],
+ "description": "AImageReader is very crashy on this driver version",
+ "os": {
+ "type" : "android"
+ },
+ "gl_vendor": "Qualcomm.*",
+ "driver_version": {
+ "op": "=",
+ "value": "269.0"
+ },
+ "features": [
+ "disable_aimagereader"
+ ]
+ },
+ {
+ "id": 299,
+ "description": "Context lost recovery often fails on PowerVR Rogue GE8* GPUs on Android.",
+ "cr_bugs": [942106],
+ "os": {
+ "type": "android"
+ },
+ "gl_vendor": "Imagination.*",
+ "gl_renderer": "PowerVR Rogue GE8.*",
+ "features": [
+ "exit_on_context_lost"
+ ]
}
]
}
diff --git a/chromium/gpu/config/gpu_driver_bug_list_unittest.cc b/chromium/gpu/config/gpu_driver_bug_list_unittest.cc
index 88646b3e1d4..58739a41a17 100644
--- a/chromium/gpu/config/gpu_driver_bug_list_unittest.cc
+++ b/chromium/gpu/config/gpu_driver_bug_list_unittest.cc
@@ -24,6 +24,7 @@ TEST_F(GpuDriverBugListTest, CurrentListForARM) {
GPUInfo gpu_info;
gpu_info.gl_vendor = "ARM";
gpu_info.gl_renderer = "MALi_T604";
+ gpu_info.gl_version = "OpenGL ES 2.0";
std::set<int> bugs = list->MakeDecision(
GpuControlList::kOsAndroid, "4.1", gpu_info);
EXPECT_EQ(1u, bugs.count(USE_CLIENT_SIDE_ARRAYS_FOR_STREAM_BUFFERS));
@@ -34,6 +35,7 @@ TEST_F(GpuDriverBugListTest, CurrentListForImagination) {
GPUInfo gpu_info;
gpu_info.gl_vendor = "Imagination Technologies";
gpu_info.gl_renderer = "PowerVR SGX 540";
+ gpu_info.gl_version = "OpenGL ES 2.0";
std::set<int> bugs = list->MakeDecision(
GpuControlList::kOsAndroid, "4.1", gpu_info);
EXPECT_EQ(1u, bugs.count(USE_CLIENT_SIDE_ARRAYS_FOR_STREAM_BUFFERS));
diff --git a/chromium/gpu/config/gpu_dx_diagnostics_win.cc b/chromium/gpu/config/gpu_dx_diagnostics_win.cc
index 9e85f16c96e..43562b09725 100644
--- a/chromium/gpu/config/gpu_dx_diagnostics_win.cc
+++ b/chromium/gpu/config/gpu_dx_diagnostics_win.cc
@@ -43,10 +43,10 @@ void RecurseDiagnosticTree(DxDiagNode* output,
if (SUCCEEDED(hr)) {
switch (variant.vt) {
case VT_UI4:
- output->values[prop_name8] = base::UintToString(variant.ulVal);
+ output->values[prop_name8] = base::NumberToString(variant.ulVal);
break;
case VT_I4:
- output->values[prop_name8] = base::IntToString(variant.lVal);
+ output->values[prop_name8] = base::NumberToString(variant.lVal);
break;
case VT_BOOL:
output->values[prop_name8] = variant.boolVal ? "true" : "false";
diff --git a/chromium/gpu/config/gpu_feature_info.h b/chromium/gpu/config/gpu_feature_info.h
index 94f5f7b4353..e78b2f8ec6d 100644
--- a/chromium/gpu/config/gpu_feature_info.h
+++ b/chromium/gpu/config/gpu_feature_info.h
@@ -38,6 +38,7 @@ enum AntialiasingMode {
struct GPU_EXPORT WebglPreferences {
AntialiasingMode anti_aliasing_mode = kAntialiasingModeUnspecified;
uint32_t msaa_sample_count = 8;
+ uint32_t eqaa_storage_sample_count = 4;
// WebGL-specific numeric limits.
uint32_t max_active_webgl_contexts = 0;
uint32_t max_active_webgl_contexts_on_worker = 0;
diff --git a/chromium/gpu/config/gpu_finch_features.cc b/chromium/gpu/config/gpu_finch_features.cc
index a8748066ea1..f81d1e743a9 100644
--- a/chromium/gpu/config/gpu_finch_features.cc
+++ b/chromium/gpu/config/gpu_finch_features.cc
@@ -3,7 +3,31 @@
// found in the LICENSE file.
#include "gpu/config/gpu_finch_features.h"
+#if defined(OS_ANDROID)
+#include "base/android/build_info.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/strings/string_split.h"
+#include "ui/gl/android/android_surface_control_compat.h"
+#endif
+
namespace features {
+namespace {
+
+#if defined(OS_ANDROID)
+bool FieldIsInBlacklist(const char* current_value, std::string blacklist_str) {
+ std::vector<std::string> blacklist = base::SplitString(
+ blacklist_str, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+ for (const std::string& value : blacklist) {
+ if (value == current_value)
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+} // namespace
+
#if defined(OS_ANDROID)
// Use android AImageReader when playing videos with MediaPlayer.
const base::Feature kAImageReaderMediaPlayer{"AImageReaderMediaPlayer",
@@ -44,6 +68,12 @@ const base::Feature kDefaultPassthroughCommandDecoder{
const base::Feature kDirectCompositionPreferNV12Overlays{
"DirectCompositionPreferNV12Overlays", base::FEATURE_ENABLED_BY_DEFAULT};
+// Allow putting a video swapchain underneath the main swapchain, so overlays
+// can be used even if there are controls on top of the video. It can be
+// enabled only when overlay is supported.
+const base::Feature kDirectCompositionUnderlays{
+ "DirectCompositionUnderlays", base::FEATURE_DISABLED_BY_DEFAULT};
+
// Causes us to use the SharedImageManager, removing support for the old
// mailbox system. Any consumers of the GPU process using the old mailbox
// system will experience undefined results.
@@ -68,4 +98,29 @@ const base::Feature kDirectCompositionUseNV12DecodeSwapChain{
const base::Feature kVaapiJpegImageDecodeAcceleration{
"VaapiJpegImageDecodeAcceleration", base::FEATURE_DISABLED_BY_DEFAULT};
+#if defined(OS_ANDROID)
+bool IsAndroidSurfaceControlEnabled() {
+ if (!gl::SurfaceControl::IsSupported())
+ return false;
+
+ if (!base::FeatureList::IsEnabled(kAndroidSurfaceControl))
+ return false;
+
+ if (FieldIsInBlacklist(base::android::BuildInfo::GetInstance()->model(),
+ base::GetFieldTrialParamValueByFeature(
+ kAndroidSurfaceControl, "blacklisted_models"))) {
+ return false;
+ }
+
+ if (FieldIsInBlacklist(
+ base::android::BuildInfo::GetInstance()->android_build_id(),
+ base::GetFieldTrialParamValueByFeature(kAndroidSurfaceControl,
+ "blacklisted_build_ids"))) {
+ return false;
+ }
+
+ return true;
+}
+#endif
+
} // namespace features
diff --git a/chromium/gpu/config/gpu_finch_features.h b/chromium/gpu/config/gpu_finch_features.h
index 833ec88c081..1e668915577 100644
--- a/chromium/gpu/config/gpu_finch_features.h
+++ b/chromium/gpu/config/gpu_finch_features.h
@@ -29,6 +29,8 @@ GPU_EXPORT extern const base::Feature kDefaultPassthroughCommandDecoder;
GPU_EXPORT extern const base::Feature kDirectCompositionPreferNV12Overlays;
+GPU_EXPORT extern const base::Feature kDirectCompositionUnderlays;
+
GPU_EXPORT extern const base::Feature kSharedImageManager;
GPU_EXPORT extern const base::Feature kUseDCOverlaysForSoftwareProtectedVideo;
@@ -37,6 +39,10 @@ GPU_EXPORT extern const base::Feature kDirectCompositionUseNV12DecodeSwapChain;
GPU_EXPORT extern const base::Feature kVaapiJpegImageDecodeAcceleration;
+#if defined(OS_ANDROID)
+GPU_EXPORT bool IsAndroidSurfaceControlEnabled();
+#endif
+
} // namespace features
#endif // GPU_CONFIG_GPU_FEATURES_H_
diff --git a/chromium/gpu/config/gpu_info.cc b/chromium/gpu/config/gpu_info.cc
index 9525f669df8..38abaf0102c 100644
--- a/chromium/gpu/config/gpu_info.cc
+++ b/chromium/gpu/config/gpu_info.cc
@@ -50,12 +50,55 @@ void EnumerateVideoEncodeAcceleratorSupportedProfile(
enumerator->EndVideoEncodeAcceleratorSupportedProfile();
}
+const char* ImageDecodeAcceleratorTypeToString(
+ gpu::ImageDecodeAcceleratorType type) {
+ switch (type) {
+ case gpu::ImageDecodeAcceleratorType::kJpeg:
+ return "JPEG";
+ case gpu::ImageDecodeAcceleratorType::kUnknown:
+ return "Unknown";
+ }
+}
+
+const char* ImageDecodeAcceleratorSubsamplingToString(
+ gpu::ImageDecodeAcceleratorSubsampling subsampling) {
+ switch (subsampling) {
+ case gpu::ImageDecodeAcceleratorSubsampling::k420:
+ return "4:2:0";
+ case gpu::ImageDecodeAcceleratorSubsampling::k422:
+ return "4:2:2";
+ }
+}
+
+void EnumerateImageDecodeAcceleratorSupportedProfile(
+ const gpu::ImageDecodeAcceleratorSupportedProfile& profile,
+ gpu::GPUInfo::Enumerator* enumerator) {
+ enumerator->BeginImageDecodeAcceleratorSupportedProfile();
+ enumerator->AddString("imageType",
+ ImageDecodeAcceleratorTypeToString(profile.image_type));
+ enumerator->AddString("minEncodedDimensions",
+ profile.min_encoded_dimensions.ToString());
+ enumerator->AddString("maxEncodedDimensions",
+ profile.max_encoded_dimensions.ToString());
+ std::string subsamplings;
+ for (size_t i = 0; i < profile.subsamplings.size(); i++) {
+ if (i > 0)
+ subsamplings += ", ";
+ subsamplings +=
+ ImageDecodeAcceleratorSubsamplingToString(profile.subsamplings[i]);
+ }
+ enumerator->AddString("subsamplings", subsamplings);
+ enumerator->EndImageDecodeAcceleratorSupportedProfile();
+}
+
#if defined(OS_WIN)
void EnumerateOverlayCapability(const gpu::OverlayCapability& cap,
gpu::GPUInfo::Enumerator* enumerator) {
+ std::string key_string = "overlayCap";
+ key_string += OverlayFormatToString(cap.format);
enumerator->BeginOverlayCapability();
- enumerator->AddInt("format", static_cast<int>(cap.format));
- enumerator->AddInt("isScalingSupported", cap.is_scaling_supported);
+ enumerator->AddString(key_string.c_str(),
+ cap.is_scaling_supported ? "SCALING" : "DIRECT");
enumerator->EndOverlayCapability();
}
@@ -102,6 +145,24 @@ VideoDecodeAcceleratorCapabilities::VideoDecodeAcceleratorCapabilities(
VideoDecodeAcceleratorCapabilities::~VideoDecodeAcceleratorCapabilities() =
default;
+ImageDecodeAcceleratorSupportedProfile::ImageDecodeAcceleratorSupportedProfile()
+ : image_type(ImageDecodeAcceleratorType::kUnknown) {}
+
+ImageDecodeAcceleratorSupportedProfile::ImageDecodeAcceleratorSupportedProfile(
+ const ImageDecodeAcceleratorSupportedProfile& other) = default;
+
+ImageDecodeAcceleratorSupportedProfile::ImageDecodeAcceleratorSupportedProfile(
+ ImageDecodeAcceleratorSupportedProfile&& other) = default;
+
+ImageDecodeAcceleratorSupportedProfile::
+ ~ImageDecodeAcceleratorSupportedProfile() = default;
+
+ImageDecodeAcceleratorSupportedProfile& ImageDecodeAcceleratorSupportedProfile::
+operator=(const ImageDecodeAcceleratorSupportedProfile& other) = default;
+
+ImageDecodeAcceleratorSupportedProfile& ImageDecodeAcceleratorSupportedProfile::
+operator=(ImageDecodeAcceleratorSupportedProfile&& other) = default;
+
GPUInfo::GPUDevice::GPUDevice()
: vendor_id(0),
device_id(0),
@@ -199,10 +260,15 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
VideoEncodeAcceleratorSupportedProfiles
video_encode_accelerator_supported_profiles;
bool jpeg_decode_accelerator_supported;
+
+ ImageDecodeAcceleratorSupportedProfiles
+ image_decode_accelerator_supported_profiles;
+
#if defined(USE_X11)
VisualID system_visual;
VisualID rgba_visual;
#endif
+
bool oop_rasterization_supported;
};
@@ -263,6 +329,8 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
EnumerateVideoEncodeAcceleratorSupportedProfile(profile, enumerator);
enumerator->AddBool("jpegDecodeAcceleratorSupported",
jpeg_decode_accelerator_supported);
+ for (const auto& profile : image_decode_accelerator_supported_profiles)
+ EnumerateImageDecodeAcceleratorSupportedProfile(profile, enumerator);
#if defined(USE_X11)
enumerator->AddInt64("systemVisual", system_visual);
enumerator->AddInt64("rgbaVisual", rgba_visual);
diff --git a/chromium/gpu/config/gpu_info.h b/chromium/gpu/config/gpu_info.h
index 30c565600bb..88420f366f2 100644
--- a/chromium/gpu/config/gpu_info.h
+++ b/chromium/gpu/config/gpu_info.h
@@ -113,6 +113,45 @@ struct GPU_EXPORT VideoEncodeAcceleratorSupportedProfile {
using VideoEncodeAcceleratorSupportedProfiles =
std::vector<VideoEncodeAcceleratorSupportedProfile>;
+enum class ImageDecodeAcceleratorType {
+ kJpeg = 0,
+ kUnknown = 1,
+ kMaxValue = kUnknown,
+};
+
+enum class ImageDecodeAcceleratorSubsampling {
+ k420 = 0,
+ k422 = 1,
+ kMaxValue = k422,
+};
+
+// Specification of an image decoding profile supported by a hardware decoder.
+struct GPU_EXPORT ImageDecodeAcceleratorSupportedProfile {
+ ImageDecodeAcceleratorSupportedProfile();
+ ImageDecodeAcceleratorSupportedProfile(
+ const ImageDecodeAcceleratorSupportedProfile& other);
+ ImageDecodeAcceleratorSupportedProfile(
+ ImageDecodeAcceleratorSupportedProfile&& other);
+ ~ImageDecodeAcceleratorSupportedProfile();
+ ImageDecodeAcceleratorSupportedProfile& operator=(
+ const ImageDecodeAcceleratorSupportedProfile& other);
+ ImageDecodeAcceleratorSupportedProfile& operator=(
+ ImageDecodeAcceleratorSupportedProfile&& other);
+
+ // Fields common to all image types.
+ // Type of image to which this profile applies, e.g., JPEG.
+ ImageDecodeAcceleratorType image_type;
+ // Minimum and maximum supported pixel dimensions of the encoded image.
+ gfx::Size min_encoded_dimensions;
+ gfx::Size max_encoded_dimensions;
+
+ // Fields specific to |image_type| == kJpeg.
+ // The supported chroma subsampling formats, e.g. 4:2:0.
+ std::vector<ImageDecodeAcceleratorSubsampling> subsamplings;
+};
+using ImageDecodeAcceleratorSupportedProfiles =
+ std::vector<ImageDecodeAcceleratorSupportedProfile>;
+
#if defined(OS_WIN)
// Common overlay formats that we're interested in. Must match the OverlayFormat
// enum in //tools/metrics/histograms/enums.xml. Mapped to corresponding DXGI
@@ -294,6 +333,9 @@ struct GPU_EXPORT GPUInfo {
video_encode_accelerator_supported_profiles;
bool jpeg_decode_accelerator_supported;
+ ImageDecodeAcceleratorSupportedProfiles
+ image_decode_accelerator_supported_profiles;
+
#if defined(USE_X11)
VisualID system_visual;
VisualID rgba_visual;
@@ -335,6 +377,11 @@ struct GPU_EXPORT GPUInfo {
virtual void BeginVideoEncodeAcceleratorSupportedProfile() = 0;
virtual void EndVideoEncodeAcceleratorSupportedProfile() = 0;
+ // Markers indicating that an ImageDecodeAcceleratorSupportedProfile is
+ // being described.
+ virtual void BeginImageDecodeAcceleratorSupportedProfile() = 0;
+ virtual void EndImageDecodeAcceleratorSupportedProfile() = 0;
+
// Markers indicating that "auxiliary" attributes of the GPUInfo
// (according to the DevTools protocol) are being described.
virtual void BeginAuxAttributes() = 0;
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index c47e7f39393..d5737b62f17 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -215,7 +215,7 @@ bool CollectGraphicsInfoGL(GPUInfo* gpu_info,
gfx::HasExtension(extension_set, "GL_NV_framebuffer_multisample")) {
glGetIntegerv(GL_MAX_SAMPLES, &max_samples);
}
- gpu_info->max_msaa_samples = base::IntToString(max_samples);
+ gpu_info->max_msaa_samples = base::NumberToString(max_samples);
base::UmaHistogramSparse("GPU.MaxMSAASampleCount", max_samples);
#if defined(OS_ANDROID)
diff --git a/chromium/gpu/config/gpu_info_collector_unittest.cc b/chromium/gpu/config/gpu_info_collector_unittest.cc
index c73e99e2d33..77aa910c116 100644
--- a/chromium/gpu/config/gpu_info_collector_unittest.cc
+++ b/chromium/gpu/config/gpu_info_collector_unittest.cc
@@ -196,9 +196,9 @@ class GPUInfoCollectorTest
std::vector<std::string> split_extensions_;
};
-INSTANTIATE_TEST_CASE_P(GPUConfig,
- GPUInfoCollectorTest,
- ::testing::ValuesIn(kMockedOperatingSystemKinds));
+INSTANTIATE_TEST_SUITE_P(GPUConfig,
+ GPUInfoCollectorTest,
+ ::testing::ValuesIn(kMockedOperatingSystemKinds));
// TODO(rlp): Test the vendor and device id collection if deemed necessary as
// it involves several complicated mocks for each platform.
diff --git a/chromium/gpu/config/gpu_info_collector_win.cc b/chromium/gpu/config/gpu_info_collector_win.cc
index bde5ff343a1..4b0453b89f0 100644
--- a/chromium/gpu/config/gpu_info_collector_win.cc
+++ b/chromium/gpu/config/gpu_info_collector_win.cc
@@ -320,18 +320,17 @@ bool BadAMDVulkanDriverVersion() {
// 32-bit dll will be used to detect the AMD Vulkan driver.
const base::FilePath kAmdDriver64(FILE_PATH_LITERAL("amdvlk64.dll"));
const base::FilePath kAmdDriver32(FILE_PATH_LITERAL("amdvlk32.dll"));
- auto file_version_info =
- base::WrapUnique(FileVersionInfoWin::CreateFileVersionInfo(kAmdDriver64));
+ std::unique_ptr<FileVersionInfoWin> file_version_info =
+ FileVersionInfoWin::CreateFileVersionInfoWin(kAmdDriver64);
if (!file_version_info) {
- file_version_info.reset(
- FileVersionInfoWin::CreateFileVersionInfo(kAmdDriver32));
+ file_version_info =
+ FileVersionInfoWin::CreateFileVersionInfoWin(kAmdDriver32);
if (!file_version_info)
return false;
}
const VS_FIXEDFILEINFO* fixed_file_info =
- static_cast<FileVersionInfoWin*>(file_version_info.get())
- ->fixed_file_info();
+ file_version_info->fixed_file_info();
const int major = HIWORD(fixed_file_info->dwFileVersionMS);
const int minor = LOWORD(fixed_file_info->dwFileVersionMS);
const int minor_1 = HIWORD(fixed_file_info->dwFileVersionLS);
@@ -348,17 +347,14 @@ bool BadAMDVulkanDriverVersion() {
}
bool BadVulkanDllVersion() {
- std::unique_ptr<FileVersionInfoWin> file_version_info(
- static_cast<FileVersionInfoWin*>(
- FileVersionInfoWin::CreateFileVersionInfo(
- base::FilePath(FILE_PATH_LITERAL("vulkan-1.dll")))));
-
+ std::unique_ptr<FileVersionInfoWin> file_version_info =
+ FileVersionInfoWin::CreateFileVersionInfoWin(
+ base::FilePath(FILE_PATH_LITERAL("vulkan-1.dll")));
if (!file_version_info)
return false;
const VS_FIXEDFILEINFO* fixed_file_info =
- static_cast<FileVersionInfoWin*>(file_version_info.get())
- ->fixed_file_info();
+ file_version_info->fixed_file_info();
const int major = HIWORD(fixed_file_info->dwFileVersionMS);
const int minor = LOWORD(fixed_file_info->dwFileVersionMS);
const int build_1 = HIWORD(fixed_file_info->dwFileVersionLS);
diff --git a/chromium/gpu/config/gpu_lists_version.h b/chromium/gpu/config/gpu_lists_version.h
index c62f3f4ecf1..dea28bec633 100644
--- a/chromium/gpu/config/gpu_lists_version.h
+++ b/chromium/gpu/config/gpu_lists_version.h
@@ -3,6 +3,6 @@
#ifndef GPU_CONFIG_GPU_LISTS_VERSION_H_
#define GPU_CONFIG_GPU_LISTS_VERSION_H_
-#define GPU_LISTS_VERSION "5540b58bbc57c34c02929d9c7997fed8b4737efb"
+#define GPU_LISTS_VERSION "7e9e689503f506e8e943b05c4f50c0c3e6597794"
#endif // GPU_CONFIG_GPU_LISTS_VERSION_H_
diff --git a/chromium/gpu/config/gpu_preferences.h b/chromium/gpu/config/gpu_preferences.h
index 96f525f0624..b5d39972765 100644
--- a/chromium/gpu/config/gpu_preferences.h
+++ b/chromium/gpu/config/gpu_preferences.h
@@ -56,12 +56,6 @@ struct GPU_EXPORT GpuPreferences {
// ===================================
// Settings from //content/public/common/content_switches.h
- // Runs the renderer and plugins in the same process as the browser.
- bool single_process = false;
-
- // Run the GPU process as a thread in the browser process.
- bool in_process_gpu = false;
-
// Disables hardware acceleration of video decode, where available.
bool disable_accelerated_video_decode = false;
@@ -101,9 +95,6 @@ struct GPU_EXPORT GpuPreferences {
bool log_gpu_control_list_decisions = false;
- // Enable exporting of events to ETW (on Windows).
- bool enable_trace_export_events_to_etw = false;
-
// ===================================
// Settings from //gpu/command_buffer/service/gpu_switches.cc
diff --git a/chromium/gpu/config/gpu_preferences_unittest.cc b/chromium/gpu/config/gpu_preferences_unittest.cc
index 260ef9ce36f..14708613b86 100644
--- a/chromium/gpu/config/gpu_preferences_unittest.cc
+++ b/chromium/gpu/config/gpu_preferences_unittest.cc
@@ -14,8 +14,6 @@ namespace gpu {
namespace {
void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
- EXPECT_EQ(left.single_process, right.single_process);
- EXPECT_EQ(left.in_process_gpu, right.in_process_gpu);
EXPECT_EQ(left.disable_accelerated_video_decode,
right.disable_accelerated_video_decode);
EXPECT_EQ(left.disable_accelerated_video_encode,
@@ -35,8 +33,6 @@ void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
right.disable_software_rasterizer);
EXPECT_EQ(left.log_gpu_control_list_decisions,
right.log_gpu_control_list_decisions);
- EXPECT_EQ(left.enable_trace_export_events_to_etw,
- right.enable_trace_export_events_to_etw);
EXPECT_EQ(left.compile_shader_always_succeeds,
right.compile_shader_always_succeeds);
EXPECT_EQ(left.disable_gl_error_limit, right.disable_gl_error_limit);
@@ -112,8 +108,6 @@ TEST(GpuPreferencesTest, EncodeDecode) {
prefs_mojom.name = value; \
EXPECT_EQ(input_prefs.name, prefs_mojom.name);
- GPU_PREFERENCES_FIELD(single_process, true)
- GPU_PREFERENCES_FIELD(in_process_gpu, true)
GPU_PREFERENCES_FIELD(disable_accelerated_video_decode, true)
GPU_PREFERENCES_FIELD(disable_accelerated_video_encode, true)
GPU_PREFERENCES_FIELD(gpu_startup_dialog, true)
@@ -127,7 +121,6 @@ TEST(GpuPreferencesTest, EncodeDecode) {
GPU_PREFERENCES_FIELD(enable_media_foundation_vea_on_windows7, true)
GPU_PREFERENCES_FIELD(disable_software_rasterizer, true)
GPU_PREFERENCES_FIELD(log_gpu_control_list_decisions, true)
- GPU_PREFERENCES_FIELD(enable_trace_export_events_to_etw, true)
GPU_PREFERENCES_FIELD(compile_shader_always_succeeds, true)
GPU_PREFERENCES_FIELD(disable_gl_error_limit, true)
GPU_PREFERENCES_FIELD(disable_glsl_translator, true)
diff --git a/chromium/gpu/config/gpu_test_expectations_parser_unittest.cc b/chromium/gpu/config/gpu_test_expectations_parser_unittest.cc
index 72acc7c9af3..d1000f68bbd 100644
--- a/chromium/gpu/config/gpu_test_expectations_parser_unittest.cc
+++ b/chromium/gpu/config/gpu_test_expectations_parser_unittest.cc
@@ -345,9 +345,9 @@ TEST_F(GPUTestExpectationsParserTest, MultipleAPIsConflict) {
EXPECT_NE(0u, parser.GetErrorMessages().size());
}
-INSTANTIATE_TEST_CASE_P(GPUTestExpectationsParser,
- GPUTestExpectationsParserParamTest,
- ::testing::ValuesIn(kOSVersionsWithFamily));
+INSTANTIATE_TEST_SUITE_P(GPUTestExpectationsParser,
+ GPUTestExpectationsParserParamTest,
+ ::testing::ValuesIn(kOSVersionsWithFamily));
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc
index 40115b2bcb0..7c5e2d9a28b 100644
--- a/chromium/gpu/config/gpu_util.cc
+++ b/chromium/gpu/config/gpu_util.cc
@@ -51,9 +51,7 @@ GpuFeatureStatus GetAndroidSurfaceControlFeatureStatus(
if (!gpu_preferences.enable_android_surface_control)
return kGpuFeatureStatusDisabled;
- if (!gl::SurfaceControl::IsSupported())
- return kGpuFeatureStatusDisabled;
-
+ DCHECK(gl::SurfaceControl::IsSupported());
return kGpuFeatureStatusEnabled;
#endif
}
@@ -232,6 +230,10 @@ void AppendWorkaroundsToCommandLine(const GpuFeatureInfo& gpu_feature_info,
if (gpu_feature_info.IsWorkaroundEnabled(DISABLE_ES3_GL_CONTEXT)) {
command_line->AppendSwitch(switches::kDisableES3GLContext);
}
+ if (gpu_feature_info.IsWorkaroundEnabled(
+ DISABLE_ES3_GL_CONTEXT_FOR_TESTING)) {
+ command_line->AppendSwitch(switches::kDisableES3GLContextForTesting);
+ }
#if defined(OS_WIN)
if (gpu_feature_info.IsWorkaroundEnabled(DISABLE_DIRECT_COMPOSITION)) {
command_line->AppendSwitch(switches::kDisableDirectComposition);
diff --git a/chromium/gpu/config/gpu_workaround_list.txt b/chromium/gpu/config/gpu_workaround_list.txt
index c8e2cbadd83..b8bb9cf53ae 100644
--- a/chromium/gpu/config/gpu_workaround_list.txt
+++ b/chromium/gpu/config/gpu_workaround_list.txt
@@ -26,6 +26,7 @@ disable_direct_composition_layers
disable_discard_framebuffer
disable_dxgi_zero_copy_video
disable_es3_gl_context
+disable_es3_gl_context_for_testing
disable_ext_draw_buffers
disable_framebuffer_cmaa
disable_gl_rgb_format
@@ -87,6 +88,7 @@ restore_scissor_on_fbo_change
rewrite_do_while_loops
rewrite_float_unary_minus_operator
rewrite_texelfetchoffset_to_texelfetch
+round_down_uniform_bind_buffer_range_size
scalarize_vec_and_mat_constructor_args
set_zero_level_before_generating_mipmap
simulate_out_of_memory_on_large_textures
@@ -107,3 +109,6 @@ use_virtualized_gl_contexts
validate_multisample_buffer_allocation
wake_up_gpu_before_drawing
use_copyteximage2d_instead_of_readpixels_on_multisampled_textures
+disable_copy_tex_image_2d_rgb10_a2_adreno
+disable_copy_tex_image_2d_rgb10_a2_tegra
+use_eqaa_storage_samples_2
diff --git a/chromium/gpu/config/nvml_info.cc b/chromium/gpu/config/nvml_info.cc
index 2233380b5d3..066d6c26f93 100644
--- a/chromium/gpu/config/nvml_info.cc
+++ b/chromium/gpu/config/nvml_info.cc
@@ -43,10 +43,8 @@ bool GetNvmlDeviceInfo(uint32_t pci_device_id,
}
dll_path = dll_path.Append(L"NVIDIA Corporation\\NVSMI\\nvml.dll");
- std::unique_ptr<FileVersionInfoWin> file_version_info(
- static_cast<FileVersionInfoWin*>(
- FileVersionInfoWin::CreateFileVersionInfo(dll_path)));
-
+ std::unique_ptr<FileVersionInfoWin> file_version_info =
+ FileVersionInfoWin::CreateFileVersionInfoWin(dll_path);
if (!file_version_info) {
return false;
}
diff --git a/chromium/gpu/config/software_rendering_list.json b/chromium/gpu/config/software_rendering_list.json
index abb9a898f3a..5deceb6c29e 100644
--- a/chromium/gpu/config/software_rendering_list.json
+++ b/chromium/gpu/config/software_rendering_list.json
@@ -1364,7 +1364,7 @@
},
{
"id": 137,
- "description": "GPU rasterization on CrOS is blacklisted on anything but Intel, Mali, Imagination, or AMD GPUs for now.",
+ "description": "GPU rasterization on CrOS is blacklisted on anything but Intel, Imagination, or AMD GPUs for now.",
"cr_bugs": [684094],
"os": {
"type": "chromeos"
@@ -1374,7 +1374,6 @@
],
"exceptions": [
{ "vendor_id": "0x8086" },
- { "gl_renderer": "Mali-T.*" },
{ "gl_renderer": "PowerVR.*" },
{ "vendor_id": "0x1002" }
]
@@ -1690,15 +1689,6 @@
]
},
{
- "id": 158,
- "cr_bugs": [829435],
- "description": "OOP rasterization on top of ANGLE not supported",
- "gl_renderer": "ANGLE.*",
- "features": [
- "oop_rasterization"
- ]
- },
- {
"id": 159,
"cr_bugs": [902247],
"description": "Disallow OpenGL use on Mac with old NVIDIA GPUs",
@@ -1724,6 +1714,23 @@
"features": [
"all"
]
+ },
+ {
+ "id": 161,
+ "cr_bugs": [890688],
+ "description": "Newer Mesa drivers experience visual corruption on very old hardware",
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x8086",
+ "device_id": ["0x2a42"],
+ "driver_version": {
+ "op": "=",
+ "value": "18.1.7"
+ },
+ "features": [
+ "all"
+ ]
}
]
}
diff --git a/chromium/gpu/dawn_end2end_tests_main.cc b/chromium/gpu/dawn_end2end_tests_main.cc
index 1188ccb5fce..d6a85ab1fa4 100644
--- a/chromium/gpu/dawn_end2end_tests_main.cc
+++ b/chromium/gpu/dawn_end2end_tests_main.cc
@@ -19,10 +19,15 @@ int RunHelper(base::TestSuite* test_suite) {
} // namespace
+// Definition located in third_party/dawn/src/tests/DawnTest.h
+// Forward declared here to avoid pulling in the Dawn headers.
+void InitDawnEnd2EndTestEnvironment(int argc, char** argv);
+
int main(int argc, char** argv) {
base::CommandLine::Init(argc, argv);
testing::InitGoogleMock(&argc, argv);
base::TestSuite test_suite(argc, argv);
+ InitDawnEnd2EndTestEnvironment(argc, argv);
int rt = base::LaunchUnitTestsWithOptions(
argc, argv,
1, // Run tests serially.
diff --git a/chromium/gpu/gles2_conform_support/BUILD.gn b/chromium/gpu/gles2_conform_support/BUILD.gn
index 3326ce747b7..24a0bd3dabd 100644
--- a/chromium/gpu/gles2_conform_support/BUILD.gn
+++ b/chromium/gpu/gles2_conform_support/BUILD.gn
@@ -390,6 +390,10 @@ if (internal_gles2_conform_tests) {
# Also compile the sources generated by this action.
sources += get_target_outputs(":generate_gles2_conform_embedded_data")
+ # Do not apply Chromium code rules to this third-party code.
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+
defines = [
"GTF_API=GTF_GLES20",
"HKEMBEDDEDFILESYSTEM",
diff --git a/chromium/gpu/gles2_conform_support/egl/context.cc b/chromium/gpu/gles2_conform_support/egl/context.cc
index 06f61d4c10c..0101e171ec4 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.cc
+++ b/chromium/gpu/gles2_conform_support/egl/context.cc
@@ -16,7 +16,6 @@
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "gpu/gles2_conform_support/egl/config.h"
#include "gpu/gles2_conform_support/egl/display.h"
#include "gpu/gles2_conform_support/egl/surface.h"
@@ -267,10 +266,7 @@ bool Context::CreateService(gl::GLSurface* gl_surface) {
nullptr /* progress_reporter */, gpu_feature_info, &discardable_manager_,
&passthrough_discardable_manager_, &shared_image_manager_));
- transfer_buffer_manager_ =
- std::make_unique<gpu::TransferBufferManager>(nullptr);
- std::unique_ptr<gpu::CommandBufferDirect> command_buffer(
- new gpu::CommandBufferDirect(transfer_buffer_manager_.get()));
+ auto command_buffer = std::make_unique<gpu::CommandBufferDirect>();
std::unique_ptr<gpu::gles2::GLES2Decoder> decoder(
gpu::gles2::GLES2Decoder::Create(command_buffer.get(),
diff --git a/chromium/gpu/gles2_conform_support/egl/context.h b/chromium/gpu/gles2_conform_support/egl/context.h
index 914d3bd0918..9a99d9fd413 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.h
+++ b/chromium/gpu/gles2_conform_support/egl/context.h
@@ -111,7 +111,6 @@ class Context : public base::RefCountedThreadSafe<Context>,
bool is_current_in_some_thread_;
bool is_destroyed_;
const gpu::GpuDriverBugWorkarounds gpu_driver_bug_workarounds_;
- std::unique_ptr<gpu::TransferBufferManager> transfer_buffer_manager_;
std::unique_ptr<gpu::CommandBufferDirect> command_buffer_;
std::unique_ptr<gpu::gles2::GLES2CmdHelper> gles2_cmd_helper_;
diff --git a/chromium/gpu/gles2_conform_support/gles2_conform_test.cc b/chromium/gpu/gles2_conform_support/gles2_conform_test.cc
index 40ea39108ea..8245d862b7d 100644
--- a/chromium/gpu/gles2_conform_support/gles2_conform_test.cc
+++ b/chromium/gpu/gles2_conform_support/gles2_conform_test.cc
@@ -10,6 +10,7 @@
#include <string>
#include "base/base_paths.h"
+#include "base/bind.h"
#include "base/command_line.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
diff --git a/chromium/gpu/ipc/client/BUILD.gn b/chromium/gpu/ipc/client/BUILD.gn
index acab7a514be..5a9ee4b7902 100644
--- a/chromium/gpu/ipc/client/BUILD.gn
+++ b/chromium/gpu/ipc/client/BUILD.gn
@@ -37,6 +37,7 @@ source_set("ipc_client_sources") {
"//gpu/command_buffer/common:common_sources",
"//gpu/config:config_sources",
"//gpu/ipc/common:ipc_common_sources",
+ "//media/filters:jpeg_parser",
"//mojo/public/cpp/system",
"//ui/gfx:color_space",
"//ui/gfx/geometry",
diff --git a/chromium/gpu/ipc/client/DEPS b/chromium/gpu/ipc/client/DEPS
index 1caa6b11d40..7a79c6ac010 100644
--- a/chromium/gpu/ipc/client/DEPS
+++ b/chromium/gpu/ipc/client/DEPS
@@ -10,8 +10,10 @@ specific_include_rules = {
"gpu_in_process_context_tests.cc": [
"+components/viz/test/test_gpu_memory_buffer_manager.h",
],
+ "image_decode_accelerator_proxy.cc": [
+ "+media/filters/jpeg_parser.h",
+ ],
"raster_in_process_context_tests.cc": [
- "+cc/paint/color_space_transfer_cache_entry.h",
"+components/viz/common/resources/resource_format.h",
"+components/viz/common/resources/resource_format_utils.h",
"+components/viz/test/test_gpu_memory_buffer_manager.h",
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index d1405a8425c..1f72c9aaee9 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -8,6 +8,7 @@
#include <utility>
#include <vector>
+#include "base/bind.h"
#include "base/callback.h"
#include "base/command_line.h"
#include "base/location.h"
@@ -149,6 +150,7 @@ bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_BufferPresented, OnBufferPresented);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_GetGpuFenceHandleComplete,
OnGetGpuFenceHandleComplete);
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ReturnData, OnReturnData);
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
@@ -631,6 +633,12 @@ void CommandBufferProxyImpl::OnGetGpuFenceHandleComplete(
std::move(callback).Run(std::move(gpu_fence));
}
+void CommandBufferProxyImpl::OnReturnData(const std::vector<uint8_t>& data) {
+ if (gpu_control_client_) {
+ gpu_control_client_->OnGpuControlReturnData(data);
+ }
+}
+
void CommandBufferProxyImpl::TakeFrontBuffer(const gpu::Mailbox& mailbox) {
CheckLock();
base::AutoLock lock(last_state_lock_);
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index 6b0629bf466..6464b6bd6b7 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -184,6 +184,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
const gfx::PresentationFeedback& feedback);
void OnGetGpuFenceHandleComplete(uint32_t gpu_fence_id,
const gfx::GpuFenceHandle&);
+ void OnReturnData(const std::vector<uint8_t>& data);
// Try to read an updated copy of the state from shared memory, and calls
// OnGpuStateError() if the new state has an error.
diff --git a/chromium/gpu/ipc/client/gpu_context_tests.h b/chromium/gpu/ipc/client/gpu_context_tests.h
index 9c5d563bab6..a45ae619fc4 100644
--- a/chromium/gpu/ipc/client/gpu_context_tests.h
+++ b/chromium/gpu/ipc/client/gpu_context_tests.h
@@ -50,7 +50,7 @@ CONTEXT_TEST_F(SignalTest, BasicSignalSyncTokenTest) {
gl_->GenSyncTokenCHROMIUM(sync_token.GetData());
TestSignalSyncToken(sync_token);
-};
+}
CONTEXT_TEST_F(SignalTest, EmptySignalSyncTokenTest) {
#if defined(OS_WIN)
@@ -64,7 +64,7 @@ CONTEXT_TEST_F(SignalTest, EmptySignalSyncTokenTest) {
// immediately.
gpu::SyncToken sync_token;
TestSignalSyncToken(sync_token);
-};
+}
CONTEXT_TEST_F(SignalTest, InvalidSignalSyncTokenTest) {
#if defined(OS_WIN)
@@ -80,7 +80,7 @@ CONTEXT_TEST_F(SignalTest, InvalidSignalSyncTokenTest) {
gpu::CommandBufferId::FromUnsafeValue(1297824234),
9123743439);
TestSignalSyncToken(sync_token);
-};
+}
CONTEXT_TEST_F(SignalTest, BasicSignalQueryTest) {
#if defined(OS_WIN)
@@ -97,7 +97,7 @@ CONTEXT_TEST_F(SignalTest, BasicSignalQueryTest) {
gl_->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
TestSignalQuery(query);
gl_->DeleteQueriesEXT(1, &query);
-};
+}
CONTEXT_TEST_F(SignalTest, SignalQueryUnboundTest) {
#if defined(OS_WIN)
@@ -111,7 +111,7 @@ CONTEXT_TEST_F(SignalTest, SignalQueryUnboundTest) {
gl_->GenQueriesEXT(1, &query);
TestSignalQuery(query);
gl_->DeleteQueriesEXT(1, &query);
-};
+}
CONTEXT_TEST_F(SignalTest, InvalidSignalQueryUnboundTest) {
#if defined(OS_WIN)
@@ -129,7 +129,7 @@ CONTEXT_TEST_F(SignalTest, InvalidSignalQueryUnboundTest) {
TestSignalQuery(928729083);
TestSignalQuery(928729082);
TestSignalQuery(928729081);
-};
+}
// The GpuFenceTest doesn't currently work on ChromeOS, apparently
// due to inconsistent initialization of InProcessCommandBuffer which
@@ -191,6 +191,6 @@ CONTEXT_TEST_F(GpuFenceTest, BasicGpuFenceTest) {
#endif // defined(OS_ANDROID)
-}; // namespace
+} // namespace
#endif // GPU_IPC_CLIENT_GPU_CONTEXT_TESTS_H_
diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
index f0b167f7c10..d992ee9eb34 100644
--- a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
+++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
@@ -4,23 +4,160 @@
#include "gpu/ipc/client/image_decode_accelerator_proxy.h"
+#include <string.h>
+
+#include <algorithm>
+#include <utility>
#include <vector>
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
#include "gpu/command_buffer/common/constants.h"
+#include "gpu/config/gpu_info.h"
#include "gpu/ipc/client/gpu_channel_host.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
+#include "media/filters/jpeg_parser.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
namespace gpu {
+namespace {
+
+bool IsJpegImage(base::span<const uint8_t> encoded_data) {
+ if (encoded_data.size() < 3u)
+ return false;
+ return memcmp("\xFF\xD8\xFF", encoded_data.data(), 3u) == 0;
+}
+
+ImageDecodeAcceleratorType GetImageType(
+ base::span<const uint8_t> encoded_data) {
+ static_assert(static_cast<int>(ImageDecodeAcceleratorType::kMaxValue) == 1,
+ "GetImageType() must be adapted to support all image types in "
+ "ImageDecodeAcceleratorType");
+
+ // Currently, only JPEG images are supported.
+ if (IsJpegImage(encoded_data))
+ return ImageDecodeAcceleratorType::kJpeg;
+
+ return ImageDecodeAcceleratorType::kUnknown;
+}
+
+bool GetJpegSubsampling(const media::JpegParseResult& parse_result,
+ ImageDecodeAcceleratorSubsampling* subsampling) {
+ static_assert(
+ static_cast<int>(ImageDecodeAcceleratorSubsampling::kMaxValue) == 1,
+ "GetJpegSubsampling() must be adapted to support all "
+ "subsampling factors in ImageDecodeAcceleratorSubsampling");
+
+ // Currently, only 3 components are supported (this excludes, for example,
+ // grayscale and CMYK JPEGs).
+ if (parse_result.frame_header.num_components != 3u)
+ return false;
+
+ const uint8_t comp0_h =
+ parse_result.frame_header.components[0].horizontal_sampling_factor;
+ const uint8_t comp0_v =
+ parse_result.frame_header.components[0].vertical_sampling_factor;
+ const uint8_t comp1_h =
+ parse_result.frame_header.components[1].horizontal_sampling_factor;
+ const uint8_t comp1_v =
+ parse_result.frame_header.components[1].vertical_sampling_factor;
+ const uint8_t comp2_h =
+ parse_result.frame_header.components[2].horizontal_sampling_factor;
+ const uint8_t comp2_v =
+ parse_result.frame_header.components[2].vertical_sampling_factor;
+
+ if (comp0_h == 2u && (comp1_h == 1u && comp1_v == 1u) &&
+ (comp2_h == 1u && comp2_v == 1u)) {
+ if (comp0_v == 2u) {
+ *subsampling = ImageDecodeAcceleratorSubsampling::k420;
+ return true;
+ } else if (comp0_v == 1u) {
+ *subsampling = ImageDecodeAcceleratorSubsampling::k422;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool IsSupportedJpegImage(
+ base::span<const uint8_t> encoded_data,
+ const ImageDecodeAcceleratorSupportedProfile& supported_profile) {
+ DCHECK(IsJpegImage(encoded_data));
+ DCHECK_EQ(ImageDecodeAcceleratorType::kJpeg, supported_profile.image_type);
+
+ // First, parse the JPEG file. This fails for progressive JPEGs (which we
+ // don't support anyway).
+ media::JpegParseResult parse_result;
+ if (!media::ParseJpegPicture(encoded_data.data(), encoded_data.size(),
+ &parse_result)) {
+ return false;
+ }
+
+ // Now, check the chroma subsampling format.
+ ImageDecodeAcceleratorSubsampling subsampling;
+ if (!GetJpegSubsampling(parse_result, &subsampling))
+ return false;
+ if (std::find(supported_profile.subsamplings.cbegin(),
+ supported_profile.subsamplings.cend(),
+ subsampling) == supported_profile.subsamplings.cend()) {
+ return false;
+ }
+
+ // Now, check the dimensions.
+ const int encoded_width =
+ base::strict_cast<int>(parse_result.frame_header.coded_width);
+ const int encoded_height =
+ base::strict_cast<int>(parse_result.frame_header.coded_height);
+ if (encoded_width < supported_profile.min_encoded_dimensions.width() ||
+ encoded_height < supported_profile.min_encoded_dimensions.height() ||
+ encoded_width > supported_profile.max_encoded_dimensions.width() ||
+ encoded_height > supported_profile.max_encoded_dimensions.height()) {
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
ImageDecodeAcceleratorProxy::ImageDecodeAcceleratorProxy(GpuChannelHost* host,
int32_t route_id)
: host_(host), route_id_(route_id) {}
ImageDecodeAcceleratorProxy::~ImageDecodeAcceleratorProxy() {}
+bool ImageDecodeAcceleratorProxy::IsImageSupported(
+ base::span<const uint8_t> encoded_data) const {
+ DCHECK(host_);
+
+ const ImageDecodeAcceleratorType image_type = GetImageType(encoded_data);
+ if (image_type == ImageDecodeAcceleratorType::kUnknown)
+ return false;
+
+ // Find the image decode accelerator supported profile according to the type
+ // of the image.
+ const std::vector<ImageDecodeAcceleratorSupportedProfile>& profiles =
+ host_->gpu_info().image_decode_accelerator_supported_profiles;
+ auto profile_it = std::find_if(
+ profiles.cbegin(), profiles.cend(),
+ [image_type](const ImageDecodeAcceleratorSupportedProfile& profile) {
+ return profile.image_type == image_type;
+ });
+ if (profile_it == profiles.cend())
+ return false;
+
+ // Validate the image according to that profile.
+ if (image_type == ImageDecodeAcceleratorType::kJpeg)
+ return IsSupportedJpegImage(encoded_data, *profile_it);
+
+ NOTREACHED();
+ return false;
+}
+
SyncToken ImageDecodeAcceleratorProxy::ScheduleImageDecode(
base::span<const uint8_t> encoded_data,
const gfx::Size& output_size,
@@ -28,11 +165,13 @@ SyncToken ImageDecodeAcceleratorProxy::ScheduleImageDecode(
uint32_t transfer_cache_entry_id,
int32_t discardable_handle_shm_id,
uint32_t discardable_handle_shm_offset,
+ uint64_t discardable_handle_release_count,
const gfx::ColorSpace& target_color_space,
bool needs_mips) {
DCHECK(host_);
DCHECK_EQ(host_->channel_id(),
ChannelIdFromCommandBufferId(raster_decoder_command_buffer_id));
+ DCHECK(IsImageSupported(encoded_data));
GpuChannelMsg_ScheduleImageDecode_Params params;
params.encoded_data =
@@ -43,13 +182,18 @@ SyncToken ImageDecodeAcceleratorProxy::ScheduleImageDecode(
params.transfer_cache_entry_id = transfer_cache_entry_id;
params.discardable_handle_shm_id = discardable_handle_shm_id;
params.discardable_handle_shm_offset = discardable_handle_shm_offset;
+ params.discardable_handle_release_count = discardable_handle_release_count;
params.target_color_space = target_color_space;
params.needs_mips = needs_mips;
base::AutoLock lock(lock_);
- uint64_t release_count = ++next_release_count_;
+ const uint64_t release_count = ++next_release_count_;
// Note: we send the message under the lock to guarantee monotonicity of the
// release counts as seen by the service.
+ // The EnsureFlush() call makes sure that the sync token corresponding to
+ // |discardable_handle_release_count| is visible to the service before
+ // processing the image decode request.
+ host_->EnsureFlush(UINT32_MAX);
host_->Send(new GpuChannelMsg_ScheduleImageDecode(
route_id_, std::move(params), release_count));
return SyncToken(
diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h
index 4b183b9e396..b40a3feda80 100644
--- a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h
+++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h
@@ -29,13 +29,18 @@ class GpuChannelHost;
// (1) Create a locked ClientImageTransferCacheEntry without a backing
// SkPixmap. This entry should not be serialized over the command buffer.
//
-// (2) Call ScheduleImageDecode().
+// (2) Insert a sync token in the command buffer that is released after the
+// discardable handle's buffer corresponding to the transfer cache entry has
+// been registered.
//
-// (3) Issue a server wait on the sync token returned in step (2).
+// (3) Call ScheduleImageDecode(). The release count of the sync token from the
+// previous step is passed for the |discardable_handle_release_count|
+// parameter.
+//
+// (4) Issue a server wait on the sync token returned in step (3).
//
// When the service is done with the decode, a ServiceImageTransferCacheEntry
-// will be created/locked with the decoded data and the sync token is
-// released.
+// will be created/locked with the decoded data and the sync token is released.
//
// Objects of this class are thread-safe.
//
@@ -45,14 +50,25 @@ class ImageDecodeAcceleratorProxy : public ImageDecodeAcceleratorInterface {
ImageDecodeAcceleratorProxy(GpuChannelHost* host, int32_t route_id);
~ImageDecodeAcceleratorProxy() override;
+ // Determines if an encoded image is supported by the hardware accelerator.
+ // The ScheduleImageDecode() method should only be called for images for which
+ // IsImageSupported() returns true. Otherwise, the client faces a GPU channel
+ // teardown if the decode fails.
+ bool IsImageSupported(base::span<const uint8_t> encoded_data) const override;
+
// Schedules a hardware-accelerated image decode on the GPU process. The image
- // in |encoded_data| is decoded and scaled to |output_size|. Upon completion,
- // a service-side transfer cache entry will be created with the decoded data
- // using |transfer_cache_entry_id|, |discardable_handle_shm_id|, and
+ // in |encoded_data| is decoded and scaled to |output_size|. Upon completion
+ // and after the sync token corresponding to
+ // |discardable_handle_release_count| has been released, a service-side
+ // transfer cache entry will be created with the decoded data using
+ // |transfer_cache_entry_id|, |discardable_handle_shm_id|, and
// |discardable_handle_shm_offset|. The |raster_decoder_command_buffer_id| is
// used to look up the appropriate command buffer and create the transfer
- // cache entry correctly. Returns a sync token that will be released after the
- // decode is done and the service-side transfer cache entry is created.
+ // cache entry correctly. Note that it is assumed that
+ // |discardable_handle_release_count| is associated to
+ // |raster_decoder_command_buffer_id|. Returns a sync token that will be
+ // released after the decode is done and the service-side transfer cache entry
+ // is created.
SyncToken ScheduleImageDecode(
base::span<const uint8_t> encoded_data,
const gfx::Size& output_size,
@@ -60,6 +76,7 @@ class ImageDecodeAcceleratorProxy : public ImageDecodeAcceleratorInterface {
uint32_t transfer_cache_entry_id,
int32_t discardable_handle_shm_id,
uint32_t discardable_handle_shm_offset,
+ uint64_t discardable_handle_release_count,
const gfx::ColorSpace& target_color_space,
bool needs_mips) override;
diff --git a/chromium/gpu/ipc/client/raster_in_process_context_tests.cc b/chromium/gpu/ipc/client/raster_in_process_context_tests.cc
index f6d52f2d719..520066df3f0 100644
--- a/chromium/gpu/ipc/client/raster_in_process_context_tests.cc
+++ b/chromium/gpu/ipc/client/raster_in_process_context_tests.cc
@@ -5,7 +5,6 @@
#include <memory>
#include "build/build_config.h"
-#include "cc/paint/color_space_transfer_cache_entry.h"
#include "components/viz/common/resources/resource_format.h"
#include "components/viz/test/test_gpu_memory_buffer_manager.h"
#include "gpu/command_buffer/client/raster_implementation.h"
@@ -105,10 +104,9 @@ TEST_F(RasterInProcessCommandBufferTest,
ri_->WaitSyncTokenCHROMIUM(sii->GenUnverifiedSyncToken().GetConstData());
// Call BeginRasterCHROMIUM.
- cc::RasterColorSpace raster_color_space(color_space, 0);
ri_->BeginRasterCHROMIUM(/*sk_color=*/0, /*msaa_sample_count=*/0,
- /*can_use_lcd_text=*/false,
- raster_color_space, mailbox.name);
+ /*can_use_lcd_text=*/false, color_space,
+ mailbox.name);
EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), ri_->GetError());
// Should flag an error this command is not allowed between a Begin and
diff --git a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
index b6a8e1a028d..be7eae4fc34 100644
--- a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
+++ b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
@@ -195,6 +195,7 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::ContextCreationAttribs)
IPC_STRUCT_TRAITS_MEMBER(enable_raster_interface)
IPC_STRUCT_TRAITS_MEMBER(enable_oop_rasterization)
IPC_STRUCT_TRAITS_MEMBER(enable_swap_timestamps_if_supported)
+ IPC_STRUCT_TRAITS_MEMBER(backed_by_surface_texture)
IPC_STRUCT_TRAITS_END()
IPC_STRUCT_TRAITS_BEGIN(gpu::GpuMemoryBufferFormatSet)
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index 654b3d1b5c6..fb42f0746cb 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -76,6 +76,26 @@ struct VideoEncodeAcceleratorSupportedProfile {
uint32 max_framerate_denominator;
};
+// gpu::ImageDecodeAcceleratorType
+enum ImageDecodeAcceleratorType {
+ kJpeg,
+ kUnknown,
+};
+
+// gpu::ImageDecodeAcceleratorSubsampling
+enum ImageDecodeAcceleratorSubsampling {
+ k420,
+ k422,
+};
+
+// gpu::ImageDecodeAcceleratorSupportedProfile
+struct ImageDecodeAcceleratorSupportedProfile {
+ ImageDecodeAcceleratorType image_type;
+ gfx.mojom.Size min_encoded_dimensions;
+ gfx.mojom.Size max_encoded_dimensions;
+ array<ImageDecodeAcceleratorSubsampling> subsamplings;
+};
+
// gpu::OverlayFormat
[EnableIf=is_win]
enum OverlayFormat {
@@ -142,6 +162,10 @@ struct GpuInfo {
array<VideoEncodeAcceleratorSupportedProfile>
video_encode_accelerator_supported_profiles;
bool jpeg_decode_accelerator_supported;
+
+ array<ImageDecodeAcceleratorSupportedProfile>
+ image_decode_accelerator_supported_profiles;
+
uint64 system_visual;
uint64 rgba_visual;
bool oop_rasterization_supported;
diff --git a/chromium/gpu/ipc/common/gpu_info.typemap b/chromium/gpu/ipc/common/gpu_info.typemap
index 946138f0da2..4ff5fe2c19c 100644
--- a/chromium/gpu/ipc/common/gpu_info.typemap
+++ b/chromium/gpu/ipc/common/gpu_info.typemap
@@ -21,4 +21,5 @@ type_mappings = [
"gpu.mojom.VideoDecodeAcceleratorSupportedProfile=gpu::VideoDecodeAcceleratorSupportedProfile",
"gpu.mojom.VideoDecodeAcceleratorCapabilities=gpu::VideoDecodeAcceleratorCapabilities",
"gpu.mojom.VideoEncodeAcceleratorSupportedProfile=gpu::VideoEncodeAcceleratorSupportedProfile",
+ "gpu.mojom.ImageDecodeAcceleratorSupportedProfile=gpu::ImageDecodeAcceleratorSupportedProfile",
]
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
index 117dd3e7b61..26148ba366f 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
@@ -5,6 +5,7 @@
#include "gpu/ipc/common/gpu_info_struct_traits.h"
#include "build/build_config.h"
+#include "base/logging.h"
#include "mojo/public/cpp/base/time_mojom_traits.h"
namespace mojo {
@@ -218,6 +219,77 @@ bool StructTraits<gpu::mojom::VideoEncodeAcceleratorSupportedProfileDataView,
data.ReadMaxResolution(&out->max_resolution);
}
+// static
+gpu::mojom::ImageDecodeAcceleratorType EnumTraits<
+ gpu::mojom::ImageDecodeAcceleratorType,
+ gpu::ImageDecodeAcceleratorType>::ToMojom(gpu::ImageDecodeAcceleratorType
+ image_type) {
+ switch (image_type) {
+ case gpu::ImageDecodeAcceleratorType::kJpeg:
+ return gpu::mojom::ImageDecodeAcceleratorType::kJpeg;
+ case gpu::ImageDecodeAcceleratorType::kUnknown:
+ return gpu::mojom::ImageDecodeAcceleratorType::kUnknown;
+ }
+}
+
+// static
+bool EnumTraits<gpu::mojom::ImageDecodeAcceleratorType,
+ gpu::ImageDecodeAcceleratorType>::
+ FromMojom(gpu::mojom::ImageDecodeAcceleratorType input,
+ gpu::ImageDecodeAcceleratorType* out) {
+ switch (input) {
+ case gpu::mojom::ImageDecodeAcceleratorType::kJpeg:
+ *out = gpu::ImageDecodeAcceleratorType::kJpeg;
+ return true;
+ case gpu::mojom::ImageDecodeAcceleratorType::kUnknown:
+ *out = gpu::ImageDecodeAcceleratorType::kUnknown;
+ return true;
+ }
+ NOTREACHED() << "Invalid ImageDecodeAcceleratorType: " << input;
+ return false;
+}
+
+// static
+gpu::mojom::ImageDecodeAcceleratorSubsampling
+EnumTraits<gpu::mojom::ImageDecodeAcceleratorSubsampling,
+ gpu::ImageDecodeAcceleratorSubsampling>::
+ ToMojom(gpu::ImageDecodeAcceleratorSubsampling subsampling) {
+ switch (subsampling) {
+ case gpu::ImageDecodeAcceleratorSubsampling::k420:
+ return gpu::mojom::ImageDecodeAcceleratorSubsampling::k420;
+ case gpu::ImageDecodeAcceleratorSubsampling::k422:
+ return gpu::mojom::ImageDecodeAcceleratorSubsampling::k422;
+ }
+}
+
+// static
+bool EnumTraits<gpu::mojom::ImageDecodeAcceleratorSubsampling,
+ gpu::ImageDecodeAcceleratorSubsampling>::
+ FromMojom(gpu::mojom::ImageDecodeAcceleratorSubsampling input,
+ gpu::ImageDecodeAcceleratorSubsampling* out) {
+ switch (input) {
+ case gpu::mojom::ImageDecodeAcceleratorSubsampling::k420:
+ *out = gpu::ImageDecodeAcceleratorSubsampling::k420;
+ return true;
+ case gpu::mojom::ImageDecodeAcceleratorSubsampling::k422:
+ *out = gpu::ImageDecodeAcceleratorSubsampling::k422;
+ return true;
+ }
+ NOTREACHED() << "Invalid ImageDecodeAcceleratorSubsampling: " << input;
+ return false;
+}
+
+// static
+bool StructTraits<gpu::mojom::ImageDecodeAcceleratorSupportedProfileDataView,
+ gpu::ImageDecodeAcceleratorSupportedProfile>::
+ Read(gpu::mojom::ImageDecodeAcceleratorSupportedProfileDataView data,
+ gpu::ImageDecodeAcceleratorSupportedProfile* out) {
+ return data.ReadImageType(&out->image_type) &&
+ data.ReadMinEncodedDimensions(&out->min_encoded_dimensions) &&
+ data.ReadMaxEncodedDimensions(&out->max_encoded_dimensions) &&
+ data.ReadSubsamplings(&out->subsamplings);
+}
+
#if defined(OS_WIN)
// static
gpu::mojom::OverlayFormat
@@ -322,7 +394,9 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
data.ReadVideoDecodeAcceleratorCapabilities(
&out->video_decode_accelerator_capabilities) &&
data.ReadVideoEncodeAcceleratorSupportedProfiles(
- &out->video_encode_accelerator_supported_profiles);
+ &out->video_encode_accelerator_supported_profiles) &&
+ data.ReadImageDecodeAcceleratorSupportedProfiles(
+ &out->image_decode_accelerator_supported_profiles);
}
} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
index ab0a82aa1c1..926f8eaf303 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
@@ -141,6 +141,52 @@ struct StructTraits<gpu::mojom::VideoEncodeAcceleratorSupportedProfileDataView,
}
};
+template <>
+struct EnumTraits<gpu::mojom::ImageDecodeAcceleratorType,
+ gpu::ImageDecodeAcceleratorType> {
+ static gpu::mojom::ImageDecodeAcceleratorType ToMojom(
+ gpu::ImageDecodeAcceleratorType image_type);
+ static bool FromMojom(gpu::mojom::ImageDecodeAcceleratorType input,
+ gpu::ImageDecodeAcceleratorType* out);
+};
+
+template <>
+struct EnumTraits<gpu::mojom::ImageDecodeAcceleratorSubsampling,
+ gpu::ImageDecodeAcceleratorSubsampling> {
+ static gpu::mojom::ImageDecodeAcceleratorSubsampling ToMojom(
+ gpu::ImageDecodeAcceleratorSubsampling subsampling);
+ static bool FromMojom(gpu::mojom::ImageDecodeAcceleratorSubsampling input,
+ gpu::ImageDecodeAcceleratorSubsampling* out);
+};
+
+template <>
+struct StructTraits<gpu::mojom::ImageDecodeAcceleratorSupportedProfileDataView,
+ gpu::ImageDecodeAcceleratorSupportedProfile> {
+ static bool Read(
+ gpu::mojom::ImageDecodeAcceleratorSupportedProfileDataView data,
+ gpu::ImageDecodeAcceleratorSupportedProfile* out);
+
+ static gpu::ImageDecodeAcceleratorType image_type(
+ const gpu::ImageDecodeAcceleratorSupportedProfile& input) {
+ return input.image_type;
+ }
+
+ static const gfx::Size& min_encoded_dimensions(
+ const gpu::ImageDecodeAcceleratorSupportedProfile& input) {
+ return input.min_encoded_dimensions;
+ }
+
+ static const gfx::Size& max_encoded_dimensions(
+ const gpu::ImageDecodeAcceleratorSupportedProfile& input) {
+ return input.max_encoded_dimensions;
+ }
+
+ static std::vector<gpu::ImageDecodeAcceleratorSubsampling> subsamplings(
+ const gpu::ImageDecodeAcceleratorSupportedProfile& input) {
+ return input.subsamplings;
+ }
+};
+
#if defined(OS_WIN)
template <>
struct EnumTraits<gpu::mojom::OverlayFormat, gpu::OverlayFormat> {
@@ -323,6 +369,11 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
return input.jpeg_decode_accelerator_supported;
}
+ static std::vector<gpu::ImageDecodeAcceleratorSupportedProfile>
+ image_decode_accelerator_supported_profiles(const gpu::GPUInfo& input) {
+ return input.image_decode_accelerator_supported_profiles;
+ }
+
static uint64_t system_visual(const gpu::GPUInfo& input) {
#if defined(USE_X11)
return input.system_visual;
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc
index 02154f9badf..8c67295f1a3 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc
@@ -13,6 +13,7 @@
#include "base/memory/ptr_util.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "ui/gfx/geometry/size.h"
+#include "ui/gl/android/android_surface_control_compat.h"
namespace gpu {
@@ -35,6 +36,9 @@ AHardwareBuffer_Desc GetBufferDescription(const gfx::Size& size,
case gfx::BufferFormat::RGBX_8888:
desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM;
break;
+ case gfx::BufferFormat::BGR_565:
+ desc.format = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
+ break;
default:
NOTREACHED();
}
@@ -44,6 +48,8 @@ AHardwareBuffer_Desc GetBufferDescription(const gfx::Size& size,
case gfx::BufferUsage::SCANOUT:
desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
+ if (usage == gfx::BufferUsage::SCANOUT)
+ desc.usage |= gl::SurfaceControl::RequiredUsage();
break;
default:
NOTREACHED();
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer_unittest.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer_unittest.cc
index a7f3a626e33..01e419b36c3 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer_unittest.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer_unittest.cc
@@ -8,13 +8,13 @@
namespace gpu {
namespace {
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplAndroidHardwareBuffer,
- GpuMemoryBufferImplTest,
- GpuMemoryBufferImplAndroidHardwareBuffer);
+INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferImplAndroidHardwareBuffer,
+ GpuMemoryBufferImplTest,
+ GpuMemoryBufferImplAndroidHardwareBuffer);
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplAndroidHardwareBuffer,
- GpuMemoryBufferImplCreateTest,
- GpuMemoryBufferImplAndroidHardwareBuffer);
+INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferImplAndroidHardwareBuffer,
+ GpuMemoryBufferImplCreateTest,
+ GpuMemoryBufferImplAndroidHardwareBuffer);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi_unittest.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi_unittest.cc
index f1a24dc10f2..586dbdb0030 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi_unittest.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi_unittest.cc
@@ -9,8 +9,8 @@ namespace gpu {
namespace {
// Disabled by default as it requires DX11.
-INSTANTIATE_TYPED_TEST_CASE_P(DISABLED_GpuMemoryBufferImplDXGI,
- GpuMemoryBufferImplTest,
- GpuMemoryBufferImplDXGI);
+INSTANTIATE_TYPED_TEST_SUITE_P(DISABLED_GpuMemoryBufferImplDXGI,
+ GpuMemoryBufferImplTest,
+ GpuMemoryBufferImplDXGI);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface_unittest.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface_unittest.cc
index 44a53ae6c8f..2855e9f512e 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface_unittest.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface_unittest.cc
@@ -8,9 +8,9 @@
namespace gpu {
namespace {
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplIOSurface,
- GpuMemoryBufferImplTest,
- GpuMemoryBufferImplIOSurface);
+INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferImplIOSurface,
+ GpuMemoryBufferImplTest,
+ GpuMemoryBufferImplIOSurface);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc
index b9252d505be..569e0156296 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc
@@ -6,6 +6,7 @@
#include <utility>
+#include "base/bind.h"
#include "base/memory/ptr_util.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "ui/gfx/buffer_format_util.h"
@@ -35,11 +36,11 @@ GpuMemoryBufferImplNativePixmap::GpuMemoryBufferImplNativePixmap(
DestructionCallback callback,
std::unique_ptr<gfx::ClientNativePixmap> pixmap,
const std::vector<gfx::NativePixmapPlane>& planes,
- base::ScopedFD fd)
+ std::vector<base::ScopedFD> fds)
: GpuMemoryBufferImpl(id, size, format, std::move(callback)),
pixmap_(std::move(pixmap)),
planes_(planes),
- fd_(std::move(fd)) {}
+ fds_(std::move(fds)) {}
GpuMemoryBufferImplNativePixmap::~GpuMemoryBufferImplNativePixmap() = default;
@@ -52,42 +53,33 @@ GpuMemoryBufferImplNativePixmap::CreateFromHandle(
gfx::BufferFormat format,
gfx::BufferUsage usage,
DestructionCallback callback) {
- // GpuMemoryBufferImpl needs the FD to implement GetHandle() but
- // gfx::ClientNativePixmapFactory::ImportFromHandle is expected to take
- // ownership of the FD passed in the handle so we have to dup it here in
- // order to pass a valid FD to the GpuMemoryBufferImpl ctor.
- base::ScopedFD scoped_native_pixmap_handle_fd;
- base::ScopedFD scoped_fd;
- if (!handle.native_pixmap_handle.fds.empty()) {
- // Take ownership of FD at index 0.
- scoped_native_pixmap_handle_fd.reset(handle.native_pixmap_handle.fds[0].fd);
-
- // Close all remaining FDs.
- for (size_t i = 1; i < handle.native_pixmap_handle.fds.size(); ++i)
- base::ScopedFD scoped_fd(handle.native_pixmap_handle.fds[i].fd);
-
+ std::vector<base::ScopedFD> fds;
+ std::vector<base::ScopedFD> dup_fds;
+ for (auto& fd : handle.native_pixmap_handle.fds) {
+ DCHECK(fd.auto_close);
+ // Take ownership of FD
+ fds.emplace_back(fd.fd);
// Duplicate FD for GpuMemoryBufferImplNativePixmap ctor.
- scoped_fd.reset(HANDLE_EINTR(dup(scoped_native_pixmap_handle_fd.get())));
- if (!scoped_fd.is_valid()) {
+ dup_fds.emplace_back(HANDLE_EINTR(dup(fd.fd)));
+ if (!dup_fds.back().is_valid()) {
PLOG(ERROR) << "dup";
return nullptr;
}
}
gfx::NativePixmapHandle native_pixmap_handle;
- if (scoped_native_pixmap_handle_fd.is_valid()) {
- native_pixmap_handle.fds.emplace_back(
- scoped_native_pixmap_handle_fd.release(), true /* auto_close */);
+ for (auto& fd : dup_fds) {
+ native_pixmap_handle.fds.emplace_back(fd.release(), true /* auto_close */);
}
native_pixmap_handle.planes = handle.native_pixmap_handle.planes;
std::unique_ptr<gfx::ClientNativePixmap> native_pixmap =
- client_native_pixmap_factory->ImportFromHandle(native_pixmap_handle, size,
- usage);
+ client_native_pixmap_factory->ImportFromHandle(
+ std::move(native_pixmap_handle), size, usage);
DCHECK(native_pixmap);
return base::WrapUnique(new GpuMemoryBufferImplNativePixmap(
handle.id, size, format, std::move(callback), std::move(native_pixmap),
- handle.native_pixmap_handle.planes, std::move(scoped_fd)));
+ handle.native_pixmap_handle.planes, std::move(fds)));
}
// static
@@ -144,8 +136,9 @@ gfx::GpuMemoryBufferHandle GpuMemoryBufferImplNativePixmap::CloneHandle()
handle.type = gfx::NATIVE_PIXMAP;
handle.id = id_;
gfx::NativePixmapHandle native_pixmap_handle;
- if (fd_.is_valid())
- native_pixmap_handle.fds.emplace_back(fd_.get(), false /* auto_close */);
+ for (const auto& fd : fds_) {
+ native_pixmap_handle.fds.emplace_back(fd.get(), false /* auto_close */);
+ }
native_pixmap_handle.planes = planes_;
handle.native_pixmap_handle = gfx::CloneHandleForIPC(native_pixmap_handle);
return handle;
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h
index ddc63f5e540..44a0559ac59 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h
@@ -8,6 +8,7 @@
#include <stddef.h>
#include <memory>
+#include <vector>
#include "base/macros.h"
#include "gpu/gpu_export.h"
@@ -57,11 +58,11 @@ class GPU_EXPORT GpuMemoryBufferImplNativePixmap : public GpuMemoryBufferImpl {
DestructionCallback callback,
std::unique_ptr<gfx::ClientNativePixmap> native_pixmap,
const std::vector<gfx::NativePixmapPlane>& planes,
- base::ScopedFD fd);
+ std::vector<base::ScopedFD> fds);
- std::unique_ptr<gfx::ClientNativePixmap> pixmap_;
+ const std::unique_ptr<gfx::ClientNativePixmap> pixmap_;
std::vector<gfx::NativePixmapPlane> planes_;
- base::ScopedFD fd_;
+ std::vector<base::ScopedFD> fds_;
DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferImplNativePixmap);
};
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc
index 2b0dc47046f..bf77a6d5494 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc
@@ -8,9 +8,9 @@
namespace gpu {
namespace {
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplNativePixmap,
- GpuMemoryBufferImplTest,
- GpuMemoryBufferImplNativePixmap);
+INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferImplNativePixmap,
+ GpuMemoryBufferImplTest,
+ GpuMemoryBufferImplNativePixmap);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory_unittest.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory_unittest.cc
index af9565148ec..80b32ba543e 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory_unittest.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory_unittest.cc
@@ -10,13 +10,13 @@
namespace gpu {
namespace {
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplSharedMemory,
- GpuMemoryBufferImplTest,
- GpuMemoryBufferImplSharedMemory);
+INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferImplSharedMemory,
+ GpuMemoryBufferImplTest,
+ GpuMemoryBufferImplSharedMemory);
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferImplSharedMemory,
- GpuMemoryBufferImplCreateTest,
- GpuMemoryBufferImplSharedMemory);
+INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferImplSharedMemory,
+ GpuMemoryBufferImplCreateTest,
+ GpuMemoryBufferImplSharedMemory);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
index 5b3a9a0c34a..1f1658db352 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
@@ -81,7 +81,7 @@ class GpuMemoryBufferImplCreateTest : public testing::Test {
GpuMemoryBufferSupport gpu_memory_buffer_support_;
};
-TYPED_TEST_CASE_P(GpuMemoryBufferImplTest);
+TYPED_TEST_SUITE_P(GpuMemoryBufferImplTest);
TYPED_TEST_P(GpuMemoryBufferImplTest, CreateFromHandle) {
const gfx::Size kBufferSize(8, 8);
@@ -298,13 +298,13 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, SerializeAndDeserialize) {
// The GpuMemoryBufferImplTest test case verifies behavior that is expected
// from a GpuMemoryBuffer implementation in order to be conformant.
-REGISTER_TYPED_TEST_CASE_P(GpuMemoryBufferImplTest,
- CreateFromHandle,
- Map,
- PersistentMap,
- SerializeAndDeserialize);
+REGISTER_TYPED_TEST_SUITE_P(GpuMemoryBufferImplTest,
+ CreateFromHandle,
+ Map,
+ PersistentMap,
+ SerializeAndDeserialize);
-TYPED_TEST_CASE_P(GpuMemoryBufferImplCreateTest);
+TYPED_TEST_SUITE_P(GpuMemoryBufferImplCreateTest);
TYPED_TEST_P(GpuMemoryBufferImplCreateTest, Create) {
const gfx::GpuMemoryBufferId kBufferId(1);
@@ -332,7 +332,7 @@ TYPED_TEST_P(GpuMemoryBufferImplCreateTest, Create) {
// The GpuMemoryBufferImplCreateTest test case verifies behavior that is
// expected from a GpuMemoryBuffer Create() implementation in order to be
// conformant.
-REGISTER_TYPED_TEST_CASE_P(GpuMemoryBufferImplCreateTest, Create);
+REGISTER_TYPED_TEST_SUITE_P(GpuMemoryBufferImplCreateTest, Create);
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/gpu_messages.h b/chromium/gpu/ipc/common/gpu_messages.h
index 4d8771b9ee6..463001b29e4 100644
--- a/chromium/gpu/ipc/common/gpu_messages.h
+++ b/chromium/gpu/ipc/common/gpu_messages.h
@@ -112,6 +112,7 @@ IPC_STRUCT_BEGIN(GpuChannelMsg_ScheduleImageDecode_Params)
IPC_STRUCT_MEMBER(uint32_t, transfer_cache_entry_id)
IPC_STRUCT_MEMBER(int32_t, discardable_handle_shm_id)
IPC_STRUCT_MEMBER(uint32_t, discardable_handle_shm_offset)
+ IPC_STRUCT_MEMBER(uint64_t, discardable_handle_release_count)
IPC_STRUCT_MEMBER(gfx::ColorSpace, target_color_space)
IPC_STRUCT_MEMBER(bool, needs_mips)
IPC_STRUCT_END()
@@ -165,7 +166,7 @@ IPC_MESSAGE_ROUTED1(GpuChannelMsg_RegisterSharedImageUploadBuffer,
IPC_MESSAGE_ROUTED2(
GpuChannelMsg_ScheduleImageDecode,
GpuChannelMsg_ScheduleImageDecode_Params /* decode_params */,
- uint64_t /* release_count */)
+ uint64_t /* decode_release_count */)
// Crash the GPU process in similar way to how chrome://gpucrash does.
// This is only supported in testing environments, and is otherwise ignored.
@@ -311,4 +312,10 @@ IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_GetGpuFenceHandleComplete,
uint32_t /* gpu_fence_id */,
gfx::GpuFenceHandle)
+// Returns a block of data from the GPU process to the renderer.
+// This contains server->client messages produced by dawn_wire and is used to
+// remote WebGPU.
+IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ReturnData,
+ std::vector<uint8_t> /* data */)
+
#endif // GPU_IPC_COMMON_GPU_MESSAGES_H_
diff --git a/chromium/gpu/ipc/common/gpu_param_traits_macros.h b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
index 621890d2d60..9fd93a4b637 100644
--- a/chromium/gpu/ipc/common/gpu_param_traits_macros.h
+++ b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
@@ -22,7 +22,7 @@
IPC_ENUM_TRAITS_MAX_VALUE(gpu::SchedulingPriority,
gpu::SchedulingPriority::kLast)
IPC_ENUM_TRAITS_MAX_VALUE(gpu::ContextResult,
- gpu::ContextResult::kLastContextResult);
+ gpu::ContextResult::kLastContextResult)
IPC_STRUCT_TRAITS_BEGIN(gpu::SwapBuffersCompleteParams)
IPC_STRUCT_TRAITS_MEMBER(ca_layer_params)
@@ -30,6 +30,6 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::SwapBuffersCompleteParams)
IPC_STRUCT_TRAITS_MEMBER(swap_response)
IPC_STRUCT_TRAITS_END()
-IPC_ENUM_TRAITS_MAX_VALUE(viz::ResourceFormat, viz::RESOURCE_FORMAT_MAX);
+IPC_ENUM_TRAITS_MAX_VALUE(viz::ResourceFormat, viz::RESOURCE_FORMAT_MAX)
#endif // GPU_IPC_COMMON_GPU_PARAM_TRAITS_MACROS_H_
diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom
index c80521f496c..92b32cfb14a 100644
--- a/chromium/gpu/ipc/common/gpu_preferences.mojom
+++ b/chromium/gpu/ipc/common/gpu_preferences.mojom
@@ -17,8 +17,6 @@ enum VpxDecodeVendors {
// gpu::GpuPreferences
struct GpuPreferences {
- bool single_process;
- bool in_process_gpu;
bool disable_accelerated_video_decode;
bool disable_accelerated_video_encode;
bool gpu_startup_dialog;
@@ -34,7 +32,6 @@ struct GpuPreferences {
bool enable_media_foundation_vea_on_windows7;
bool disable_software_rasterizer;
bool log_gpu_control_list_decisions;
- bool enable_trace_export_events_to_etw;
bool compile_shader_always_succeeds;
bool disable_gl_error_limit;
diff --git a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
index 3b3b52d0add..2487731fa62 100644
--- a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
@@ -56,8 +56,6 @@ template <>
struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool Read(gpu::mojom::GpuPreferencesDataView prefs,
gpu::GpuPreferences* out) {
- out->single_process = prefs.single_process();
- out->in_process_gpu = prefs.in_process_gpu();
out->disable_accelerated_video_decode =
prefs.disable_accelerated_video_decode();
out->disable_accelerated_video_encode =
@@ -76,8 +74,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->disable_software_rasterizer = prefs.disable_software_rasterizer();
out->log_gpu_control_list_decisions =
prefs.log_gpu_control_list_decisions();
- out->enable_trace_export_events_to_etw =
- prefs.enable_trace_export_events_to_etw();
out->compile_shader_always_succeeds =
prefs.compile_shader_always_succeeds();
out->disable_gl_error_limit = prefs.disable_gl_error_limit();
@@ -134,12 +130,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
return true;
}
- static bool single_process(const gpu::GpuPreferences& prefs) {
- return prefs.single_process;
- }
- static bool in_process_gpu(const gpu::GpuPreferences& prefs) {
- return prefs.in_process_gpu;
- }
static bool disable_accelerated_video_decode(
const gpu::GpuPreferences& prefs) {
return prefs.disable_accelerated_video_decode;
@@ -181,10 +171,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool log_gpu_control_list_decisions(const gpu::GpuPreferences& prefs) {
return prefs.log_gpu_control_list_decisions;
}
- static bool enable_trace_export_events_to_etw(
- const gpu::GpuPreferences& prefs) {
- return prefs.enable_trace_export_events_to_etw;
- }
static bool compile_shader_always_succeeds(const gpu::GpuPreferences& prefs) {
return prefs.compile_shader_always_succeeds;
}
diff --git a/chromium/gpu/ipc/common/struct_traits_unittest.cc b/chromium/gpu/ipc/common/struct_traits_unittest.cc
index 95e18dc339a..2c51f08dc95 100644
--- a/chromium/gpu/ipc/common/struct_traits_unittest.cc
+++ b/chromium/gpu/ipc/common/struct_traits_unittest.cc
@@ -418,8 +418,8 @@ TEST_F(StructTraitsTest, VideoEncodeAcceleratorSupportedProfile) {
TEST_F(StructTraitsTest, GpuPreferences) {
GpuPreferences prefs;
- prefs.single_process = true;
- prefs.in_process_gpu = true;
+ prefs.gpu_startup_dialog = true;
+ prefs.disable_gpu_watchdog = true;
#if defined(OS_WIN)
const GpuPreferences::VpxDecodeVendors vendor =
GpuPreferences::VPX_VENDOR_AMD;
@@ -430,8 +430,8 @@ TEST_F(StructTraitsTest, GpuPreferences) {
mojom::TraitsTestServicePtr proxy = GetTraitsTestProxy();
GpuPreferences echo;
proxy->EchoGpuPreferences(prefs, &echo);
- EXPECT_TRUE(echo.single_process);
- EXPECT_TRUE(echo.in_process_gpu);
+ EXPECT_TRUE(echo.gpu_startup_dialog);
+ EXPECT_TRUE(echo.disable_gpu_watchdog);
EXPECT_TRUE(echo.enable_gpu_driver_debug_logging);
#if defined(OS_WIN)
EXPECT_EQ(vendor, echo.enable_accelerated_vpx_decode);
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
index d35beb1cd54..af8eb2acd8c 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
@@ -88,6 +88,8 @@ GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations(
bool GetImageNeedsPlatformSpecificTextureTarget(gfx::BufferFormat format,
gfx::BufferUsage usage) {
+ if (!NativeBufferNeedsPlatformSpecificTextureTarget(format))
+ return false;
#if defined(USE_OZONE) || defined(OS_MACOSX) || defined(OS_WIN) || \
defined(OS_ANDROID)
GpuMemoryBufferSupport support;
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.cc b/chromium/gpu/ipc/host/shader_disk_cache.cc
index fff52b9cccc..21762df6b78 100644
--- a/chromium/gpu/ipc/host/shader_disk_cache.cc
+++ b/chromium/gpu/ipc/host/shader_disk_cache.cc
@@ -4,8 +4,10 @@
#include "gpu/ipc/host/shader_disk_cache.h"
+#include "base/bind.h"
#include "base/command_line.h"
#include "base/macros.h"
+#include "base/memory/ref_counted.h"
#include "base/strings/string_number_conversions.h"
#include "base/system/sys_info.h"
#include "base/threading/thread_checker.h"
@@ -145,19 +147,20 @@ class ShaderClearHelper : public base::ThreadChecker {
};
// When the cache is asked to open an entry an Entry** is passed to it. The
-// underying Entry* must stay alive for the duration of the call, so it is
-// owned by the callback. If the underlying state machine is deleted before
-// the callback runs, close the entry.
+// underlying Entry* must stay alive for the duration of the call, so it is ref
+// counted. If the underlying state machine is deleted before the callback runs,
+// close the entry.
template <typename T>
-void OnEntryOpenComplete(base::WeakPtr<T> state_machine,
- std::unique_ptr<disk_cache::Entry*> entry,
- int rv) {
+void OnEntryOpenComplete(
+ base::WeakPtr<T> state_machine,
+ scoped_refptr<base::RefCountedData<disk_cache::Entry*>> entry_ptr,
+ int rv) {
if (!state_machine) {
if (rv == net::OK)
- (*entry)->Close();
+ entry_ptr->data->Close();
return;
}
- state_machine->set_entry(*entry);
+ state_machine->set_entry(entry_ptr->data);
state_machine->OnOpComplete(rv);
}
@@ -185,19 +188,18 @@ ShaderDiskCacheEntry::~ShaderDiskCacheEntry() {
void ShaderDiskCacheEntry::Cache() {
DCHECK(CalledOnValidThread());
- // The Entry* passed to the cache must stay alive even if this class is
- // deleted, so store it in the callback.
- auto entry = std::make_unique<disk_cache::Entry*>(nullptr);
- disk_cache::Entry** closure_owned_entry_ptr = entry.get();
- auto callback = base::Bind(&OnEntryOpenComplete<ShaderDiskCacheEntry>,
- weak_ptr_factory_.GetWeakPtr(),
- base::Passed(std::move(entry)));
+ // The Entry* passed to the cache may be used after this class is deleted or
+ // after the callback is deleted, so make it ref counted.
+ auto entry_ptr =
+ base::MakeRefCounted<base::RefCountedData<disk_cache::Entry*>>();
+ auto callback = base::BindOnce(&OnEntryOpenComplete<ShaderDiskCacheEntry>,
+ weak_ptr_factory_.GetWeakPtr(), entry_ptr);
- int rv = cache_->backend()->OpenEntry(key_, net::HIGHEST,
- closure_owned_entry_ptr, callback);
+ int rv = cache_->backend()->OpenEntry(key_, net::HIGHEST, &entry_ptr->data,
+ std::move(callback));
if (rv != net::ERR_IO_PENDING) {
- entry_ = *closure_owned_entry_ptr;
+ entry_ = entry_ptr->data;
OnOpComplete(rv);
}
}
@@ -235,19 +237,18 @@ int ShaderDiskCacheEntry::OpenCallback(int rv) {
op_type_ = CREATE_ENTRY;
- // The Entry* passed to the cache must stay alive even if this class is
- // deleted, so store it in the callback.
- auto entry = std::make_unique<disk_cache::Entry*>(nullptr);
- disk_cache::Entry** closure_owned_entry_ptr = entry.get();
- auto callback = base::Bind(&OnEntryOpenComplete<ShaderDiskCacheEntry>,
- weak_ptr_factory_.GetWeakPtr(),
- base::Passed(std::move(entry)));
+ // The Entry* passed to the cache may be used after this class is deleted or
+ // after the callback is deleted, so make it ref counted.
+ auto entry_ptr =
+ base::MakeRefCounted<base::RefCountedData<disk_cache::Entry*>>();
+ auto callback = base::BindOnce(&OnEntryOpenComplete<ShaderDiskCacheEntry>,
+ weak_ptr_factory_.GetWeakPtr(), entry_ptr);
int create_rv = cache_->backend()->CreateEntry(
- key_, net::HIGHEST, closure_owned_entry_ptr, callback);
+ key_, net::HIGHEST, &entry_ptr->data, std::move(callback));
if (create_rv != net::ERR_IO_PENDING)
- entry_ = *closure_owned_entry_ptr;
+ entry_ = entry_ptr->data;
return create_rv;
}
@@ -262,8 +263,8 @@ int ShaderDiskCacheEntry::WriteCallback(int rv) {
op_type_ = WRITE_DATA;
auto io_buf = base::MakeRefCounted<net::StringIOBuffer>(shader_);
return entry_->WriteData(1, 0, io_buf.get(), shader_.length(),
- base::Bind(&ShaderDiskCacheEntry::OnOpComplete,
- weak_ptr_factory_.GetWeakPtr()),
+ base::BindOnce(&ShaderDiskCacheEntry::OnOpComplete,
+ weak_ptr_factory_.GetWeakPtr()),
false);
}
@@ -327,18 +328,17 @@ int ShaderDiskReadHelper::OpenNextEntry() {
if (!iter_)
iter_ = cache_->backend()->CreateIterator();
- // The Entry* passed to the cache must stay alive even if this class is
- // deleted, so store it in the callback.
- auto entry = std::make_unique<disk_cache::Entry*>(nullptr);
- disk_cache::Entry** closure_owned_entry_ptr = entry.get();
- auto callback = base::Bind(&OnEntryOpenComplete<ShaderDiskReadHelper>,
- weak_ptr_factory_.GetWeakPtr(),
- base::Passed(std::move(entry)));
+ // The Entry* passed to the cache may be used after this class is deleted or
+ // after the callback is deleted, so make it ref counted.
+ auto entry_ptr =
+ base::MakeRefCounted<base::RefCountedData<disk_cache::Entry*>>();
+ auto callback = base::BindOnce(&OnEntryOpenComplete<ShaderDiskReadHelper>,
+ weak_ptr_factory_.GetWeakPtr(), entry_ptr);
- int rv = iter_->OpenNextEntry(closure_owned_entry_ptr, callback);
+ int rv = iter_->OpenNextEntry(&entry_ptr->data, std::move(callback));
if (rv != net::ERR_IO_PENDING)
- entry_ = *closure_owned_entry_ptr;
+ entry_ = entry_ptr->data;
return rv;
}
@@ -356,8 +356,8 @@ int ShaderDiskReadHelper::OpenNextEntryComplete(int rv) {
op_type_ = READ_COMPLETE;
buf_ = base::MakeRefCounted<net::IOBufferWithSize>(entry_->GetDataSize(1));
return entry_->ReadData(1, 0, buf_.get(), buf_->size(),
- base::Bind(&ShaderDiskReadHelper::OnOpComplete,
- weak_ptr_factory_.GetWeakPtr()));
+ base::BindOnce(&ShaderDiskReadHelper::OnOpComplete,
+ weak_ptr_factory_.GetWeakPtr()));
}
int ShaderDiskReadHelper::ReadComplete(int rv) {
@@ -415,14 +415,15 @@ void ShaderClearHelper::DoClearShaderCache(int rv) {
switch (op_type_) {
case VERIFY_CACHE_SETUP:
rv = cache_->SetAvailableCallback(
- base::Bind(&ShaderClearHelper::DoClearShaderCache,
- weak_ptr_factory_.GetWeakPtr()));
+ base::BindRepeating(&ShaderClearHelper::DoClearShaderCache,
+ weak_ptr_factory_.GetWeakPtr()));
op_type_ = DELETE_CACHE;
break;
case DELETE_CACHE:
- rv = cache_->Clear(delete_begin_, delete_end_,
- base::Bind(&ShaderClearHelper::DoClearShaderCache,
- weak_ptr_factory_.GetWeakPtr()));
+ rv = cache_->Clear(
+ delete_begin_, delete_end_,
+ base::BindRepeating(&ShaderClearHelper::DoClearShaderCache,
+ weak_ptr_factory_.GetWeakPtr()));
op_type_ = TERMINATE;
break;
case TERMINATE:
@@ -574,7 +575,7 @@ void ShaderDiskCache::Init() {
int rv = disk_cache::CreateCacheBackend(
net::SHADER_CACHE, net::CACHE_BACKEND_DEFAULT,
cache_path_.Append(kGpuCachePath), CacheSizeBytes(), true, nullptr,
- &backend_, base::Bind(&ShaderDiskCache::CacheCreatedCallback, this));
+ &backend_, base::BindOnce(&ShaderDiskCache::CacheCreatedCallback, this));
if (rv == net::OK)
cache_available_ = true;
diff --git a/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
index dbb811c7259..ea35670dcd2 100644
--- a/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
+++ b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
@@ -76,7 +76,7 @@ TEST_F(ShaderDiskCacheTest, ClearsCache) {
rv = cache->Clear(time, time, clear_cb.callback());
ASSERT_EQ(net::OK, clear_cb.GetResult(rv));
EXPECT_EQ(0, cache->Size());
-};
+}
// For https://crbug.com/663589.
TEST_F(ShaderDiskCacheTest, SafeToDeleteCacheMidEntryOpen) {
@@ -103,7 +103,7 @@ TEST_F(ShaderDiskCacheTest, SafeToDeleteCacheMidEntryOpen) {
net::TestCompletionCallback available_cb2;
int rv2 = cache->SetAvailableCallback(available_cb2.callback());
ASSERT_EQ(net::OK, available_cb2.GetResult(rv2));
-};
+}
TEST_F(ShaderDiskCacheTest, MultipleLoaderCallbacks) {
InitCache();
@@ -138,6 +138,6 @@ TEST_F(ShaderDiskCacheTest, MultipleLoaderCallbacks) {
int rv2 = cache->SetAvailableCallback(available_cb2.callback());
ASSERT_EQ(net::OK, available_cb2.GetResult(rv2));
EXPECT_EQ(count, loaded_calls);
-};
+}
} // namespace gpu
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index 5fc6dec47cb..d8628b91f2c 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -54,7 +54,6 @@
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "gpu/command_buffer/service/webgpu_decoder.h"
#include "gpu/config/gpu_crash_keys.h"
#include "gpu/config/gpu_feature_info.h"
@@ -389,10 +388,6 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
return gpu::ContextResult::kTransientFailure;
}
- // TODO(crbug.com/832243): This could use the TransferBufferManager owned by
- // |context_group_| instead.
- transfer_buffer_manager_ = std::make_unique<TransferBufferManager>(nullptr);
-
GpuDriverBugWorkarounds workarounds(
task_executor_->gpu_feature_info().enabled_gpu_driver_bug_workarounds);
@@ -411,8 +406,13 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
base::ThreadTaskRunnerHandle::Get());
}
- auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
- workarounds, task_executor_->gpu_feature_info());
+ gpu::GpuFeatureInfo gpu_feature_info = task_executor_->gpu_feature_info();
+ if (params.attribs.backed_by_surface_texture) {
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] =
+ kGpuFeatureStatusDisabled;
+ }
+ auto feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, gpu_feature_info);
context_group_ = base::MakeRefCounted<gles2::ContextGroup>(
task_executor_->gpu_preferences(),
gles2::PassthroughCommandDecoderSupported(),
@@ -448,7 +448,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
: "0");
command_buffer_ = std::make_unique<CommandBufferService>(
- this, transfer_buffer_manager_.get());
+ this, context_group_->memory_tracker());
if (!surface_) {
if (params.is_offscreen) {
@@ -604,7 +604,9 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
decoder_.reset(raster::RasterDecoder::Create(
this, command_buffer_.get(), task_executor_->outputter(),
- context_group_.get(), context_state_));
+ task_executor_->gpu_feature_info(), task_executor_->gpu_preferences(),
+ context_group_->memory_tracker(),
+ task_executor_->shared_image_manager(), context_state_));
} else {
decoder_.reset(gles2::GLES2Decoder::Create(this, command_buffer_.get(),
task_executor_->outputter(),
@@ -679,6 +681,9 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
image_factory_ = params.image_factory;
+ if (gpu_channel_manager_delegate_)
+ gpu_channel_manager_delegate_->DidCreateContextSuccessfully();
+
return gpu::ContextResult::kSuccess;
}
@@ -726,7 +731,6 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
decoder_.reset();
}
command_buffer_.reset();
- transfer_buffer_manager_.reset();
surface_ = nullptr;
context_ = nullptr;
@@ -1196,6 +1200,14 @@ void InProcessCommandBuffer::ScheduleGrContextCleanup() {
gr_cache_controller_->ScheduleGrContextCleanup();
}
+void InProcessCommandBuffer::HandleReturnData(base::span<const uint8_t> data) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ std::vector<uint8_t> vec(data.data(), data.data() + data.size());
+ PostOrRunClientCallback(base::BindOnce(
+ &InProcessCommandBuffer::HandleReturnDataOnOriginThread,
+ client_thread_weak_ptr_factory_.GetWeakPtr(), std::move(vec)));
+}
+
void InProcessCommandBuffer::PostOrRunClientCallback(
base::OnceClosure callback) {
DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
@@ -1554,6 +1566,14 @@ void InProcessCommandBuffer::BufferPresentedOnOriginThread(
}
}
+void InProcessCommandBuffer::HandleReturnDataOnOriginThread(
+ std::vector<uint8_t> data) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+ if (gpu_control_client_) {
+ gpu_control_client_->OnGpuControlReturnData(data);
+ }
+}
+
void InProcessCommandBuffer::SetUpdateVSyncParametersCallback(
const UpdateVSyncParametersCallback& callback) {
DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index a2ac2361a34..c5729e71821 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -64,7 +64,6 @@ class ImageFactory;
class SharedImageFactory;
class SharedImageInterface;
class SyncPointClientState;
-class TransferBufferManager;
struct ContextCreationAttribs;
struct SwapBuffersCompleteParams;
@@ -157,6 +156,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
void ScheduleGrContextCleanup() override;
+ void HandleReturnData(base::span<const uint8_t> data) override;
// ImageTransportSurfaceDelegate implementation:
#if defined(OS_WIN)
@@ -309,6 +309,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
uint32_t flags,
const gfx::PresentationFeedback& feedback);
+ void HandleReturnDataOnOriginThread(std::vector<uint8_t> data);
+
const CommandBufferId command_buffer_id_;
// Members accessed on the gpu thread (possibly with the exception of
@@ -316,7 +318,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
bool use_virtualized_gl_context_ = false;
raster::GrShaderCache* gr_shader_cache_ = nullptr;
scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
- std::unique_ptr<TransferBufferManager> transfer_buffer_manager_;
std::unique_ptr<CommandBufferService> command_buffer_;
std::unique_ptr<DecoderContext> decoder_;
base::Optional<raster::GrCacheController> gr_cache_controller_;
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
index 24545c06412..3d143532b61 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
@@ -4,6 +4,7 @@
#include "gpu/ipc/in_process_gpu_thread_holder.h"
+#include "base/bind.h"
#include "base/command_line.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -82,6 +83,7 @@ void InProcessGpuThreadHolder::DeleteOnGpuThread() {
task_executor_.reset();
scheduler_.reset();
sync_point_manager_.reset();
+ shared_image_manager_.reset();
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/raster_in_process_context.cc b/chromium/gpu/ipc/raster_in_process_context.cc
index 088a47da072..4d66c671055 100644
--- a/chromium/gpu/ipc/raster_in_process_context.cc
+++ b/chromium/gpu/ipc/raster_in_process_context.cc
@@ -6,6 +6,7 @@
#include <utility>
+#include "base/bind.h"
#include "base/command_line.h"
#include "base/logging.h"
#include "base/test/test_simple_task_runner.h"
diff --git a/chromium/gpu/ipc/service/DEPS b/chromium/gpu/ipc/service/DEPS
index 0af5af51ea8..0f69224582f 100644
--- a/chromium/gpu/ipc/service/DEPS
+++ b/chromium/gpu/ipc/service/DEPS
@@ -1,4 +1,5 @@
include_rules = [
+ "+cc/paint",
"+components/viz/common/features.h",
"+components/viz/common/resources/resource_format.h",
"+third_party/skia",
diff --git a/chromium/gpu/ipc/service/child_window_win.cc b/chromium/gpu/ipc/service/child_window_win.cc
index fb6e7dccc7f..289e39ff66f 100644
--- a/chromium/gpu/ipc/service/child_window_win.cc
+++ b/chromium/gpu/ipc/service/child_window_win.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
diff --git a/chromium/gpu/ipc/service/command_buffer_stub.cc b/chromium/gpu/ipc/service/command_buffer_stub.cc
index 2ed549a6bdb..1adfcb0f26b 100644
--- a/chromium/gpu/ipc/service/command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/command_buffer_stub.cc
@@ -13,20 +13,16 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/shared_memory.h"
+#include "base/no_destructor.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
-#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/decoder_context.h"
-#include "gpu/command_buffer/service/gl_context_virtual.h"
-#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
#include "gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h"
-#include "gpu/command_buffer/service/gpu_fence_manager.h"
-#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/logger.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
@@ -34,17 +30,13 @@
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
-#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "gpu/config/gpu_crash_keys.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
-#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "gpu/ipc/service/image_transport_surface.h"
-#include "ui/gfx/gpu_fence.h"
-#include "ui/gfx/gpu_fence_handle.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
@@ -140,6 +132,7 @@ CommandBufferStub::CommandBufferStub(
int32_t stream_id,
int32_t route_id)
: channel_(channel),
+ context_type_(init_params.attribs.context_type),
active_url_(init_params.active_url),
active_url_hash_(base::Hash(active_url_.possibly_invalid_spec())),
initialized_(false),
@@ -184,35 +177,30 @@ bool CommandBufferStub::OnMessageReceived(const IPC::Message& message) {
have_context = true;
}
- // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
- // here. This is so the reply can be delayed if the scheduler is unscheduled.
- bool handled = true;
- IPC_BEGIN_MESSAGE_MAP(CommandBufferStub, message)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetGetBuffer, OnSetGetBuffer);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_TakeFrontBuffer, OnTakeFrontBuffer);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ReturnFrontBuffer,
- OnReturnFrontBuffer);
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
- OnWaitForTokenInRange);
- IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
- OnWaitForGetOffsetInRange);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
- OnRegisterTransferBuffer);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
- OnDestroyTransferBuffer);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken, OnSignalSyncToken)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery, OnSignalQuery)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
- OnCreateStreamTexture)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateGpuFenceFromHandle,
- OnCreateGpuFenceFromHandle)
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_GetGpuFenceHandle,
- OnGetGpuFenceHandle)
- IPC_MESSAGE_UNHANDLED(handled = false)
- IPC_END_MESSAGE_MAP()
+ bool handled = HandleMessage(message);
+ if (!handled) {
+ handled = true;
+ // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message
+ // handlers here. This is so the reply can be delayed if the scheduler is
+ // unscheduled.
+ IPC_BEGIN_MESSAGE_MAP(CommandBufferStub, message)
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetGetBuffer, OnSetGetBuffer);
+ IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
+ OnWaitForTokenInRange);
+ IPC_MESSAGE_HANDLER_DELAY_REPLY(
+ GpuCommandBufferMsg_WaitForGetOffsetInRange,
+ OnWaitForGetOffsetInRange);
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
+ OnRegisterTransferBuffer);
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
+ OnDestroyTransferBuffer);
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken,
+ OnSignalSyncToken)
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery, OnSignalQuery)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ }
CheckCompleteWaits();
@@ -417,16 +405,6 @@ void CommandBufferStub::Destroy() {
command_buffer_.reset();
}
-void CommandBufferStub::OnCreateStreamTexture(uint32_t texture_id,
- int32_t stream_id,
- bool* succeeded) {
-#if defined(OS_ANDROID)
- *succeeded = StreamTexture::Create(this, texture_id, stream_id);
-#else
- *succeeded = false;
-#endif
-}
-
void CommandBufferStub::OnSetGetBuffer(int32_t shm_id) {
TRACE_EVENT0("gpu", "CommandBufferStub::OnSetGetBuffer");
if (command_buffer_)
@@ -550,7 +528,8 @@ void CommandBufferStub::OnAsyncFlush(
CommandBuffer::State pre_state = command_buffer_->GetState();
FastSetActiveURL(active_url_, active_url_hash_, channel_);
- MailboxManager* mailbox_manager = context_group_->mailbox_manager();
+ MailboxManager* mailbox_manager =
+ channel_->gpu_channel_manager()->mailbox_manager();
if (mailbox_manager->UsesSync()) {
for (const auto& sync_token : sync_token_fences)
mailbox_manager->PullTextureUpdates(sync_token);
@@ -633,54 +612,11 @@ void CommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) {
OnSignalAck(id);
}
}
-
-void CommandBufferStub::OnCreateGpuFenceFromHandle(
- uint32_t gpu_fence_id,
- const gfx::GpuFenceHandle& handle) {
- if (!context_group_->feature_info()->feature_flags().chromium_gpu_fence) {
- DLOG(ERROR) << "CHROMIUM_gpu_fence unavailable";
- command_buffer_->SetParseError(error::kLostContext);
- return;
- }
-
- if (decoder_context_->GetGpuFenceManager()->CreateGpuFenceFromHandle(
- gpu_fence_id, handle))
- return;
-
- // The insertion failed. This shouldn't happen, force context loss to avoid
- // inconsistent state.
- command_buffer_->SetParseError(error::kLostContext);
- CheckContextLost();
-}
-
-void CommandBufferStub::OnGetGpuFenceHandle(uint32_t gpu_fence_id) {
- if (!context_group_->feature_info()->feature_flags().chromium_gpu_fence) {
- DLOG(ERROR) << "CHROMIUM_gpu_fence unavailable";
- command_buffer_->SetParseError(error::kLostContext);
- return;
- }
-
- auto* manager = decoder_context_->GetGpuFenceManager();
- gfx::GpuFenceHandle handle;
- if (manager->IsValidGpuFence(gpu_fence_id)) {
- std::unique_ptr<gfx::GpuFence> gpu_fence =
- manager->GetGpuFence(gpu_fence_id);
- handle = gfx::CloneHandleForIPC(gpu_fence->GetGpuFenceHandle());
- } else {
- // Retrieval failed. This shouldn't happen, force context loss to avoid
- // inconsistent state.
- DLOG(ERROR) << "GpuFence not found";
- command_buffer_->SetParseError(error::kLostContext);
- CheckContextLost();
- }
- Send(new GpuCommandBufferMsg_GetGpuFenceHandleComplete(route_id_,
- gpu_fence_id, handle));
-}
-
void CommandBufferStub::OnFenceSyncRelease(uint64_t release) {
SyncToken sync_token(CommandBufferNamespace::GPU_IO, command_buffer_id_,
release);
- MailboxManager* mailbox_manager = context_group_->mailbox_manager();
+ MailboxManager* mailbox_manager =
+ channel_->gpu_channel_manager()->mailbox_manager();
if (mailbox_manager->UsesSync() && MakeCurrent())
mailbox_manager->PushTextureUpdates(sync_token);
@@ -707,53 +643,11 @@ void CommandBufferStub::ScheduleGrContextCleanup() {
channel_->gpu_channel_manager()->ScheduleGrContextCleanup();
}
-void CommandBufferStub::OnCreateImage(
- GpuCommandBufferMsg_CreateImage_Params params) {
- TRACE_EVENT0("gpu", "CommandBufferStub::OnCreateImage");
- const int32_t id = params.id;
- const gfx::Size& size = params.size;
- const gfx::BufferFormat& format = params.format;
- const uint64_t image_release_count = params.image_release_count;
-
- gles2::ImageManager* image_manager = channel_->image_manager();
- DCHECK(image_manager);
- if (image_manager->LookupImage(id)) {
- LOG(ERROR) << "Image already exists with same ID.";
- return;
- }
-
- if (!gpu::IsImageFromGpuMemoryBufferFormatSupported(
- format, decoder_context_->GetCapabilities())) {
- LOG(ERROR) << "Format is not supported.";
- return;
- }
-
- if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, format)) {
- LOG(ERROR) << "Invalid image size for format.";
- return;
- }
-
- scoped_refptr<gl::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
- std::move(params.gpu_memory_buffer), size, format, surface_handle_);
- if (!image.get())
- return;
-
- image_manager->AddImage(image.get(), id);
- if (image_release_count)
- sync_point_client_state_->ReleaseFenceSync(image_release_count);
-}
-
-void CommandBufferStub::OnDestroyImage(int32_t id) {
- TRACE_EVENT0("gpu", "CommandBufferStub::OnDestroyImage");
-
- gles2::ImageManager* image_manager = channel_->image_manager();
- DCHECK(image_manager);
- if (!image_manager->LookupImage(id)) {
- LOG(ERROR) << "Image with ID doesn't exist.";
- return;
- }
-
- image_manager->RemoveImage(id);
+void CommandBufferStub::HandleReturnData(base::span<const uint8_t> data) {
+ std::vector<uint8_t> vec(data.begin(), data.end());
+ IPC::Message* msg =
+ new GpuCommandBufferMsg_ReturnData(route_id_, std::move(vec));
+ Send(msg);
}
void CommandBufferStub::OnConsoleMessage(int32_t id,
@@ -782,15 +676,31 @@ void CommandBufferStub::RemoveDestructionObserver(
}
std::unique_ptr<MemoryTracker> CommandBufferStub::CreateMemoryTracker(
- const GPUCreateCommandBufferConfig init_params) const {
+ const GPUCreateCommandBufferConfig& init_params) const {
+ MemoryTrackerFactory current_factory = GetMemoryTrackerFactory();
+ if (current_factory)
+ return current_factory.Run(init_params);
+
return std::make_unique<GpuCommandBufferMemoryTracker>(
channel_->client_id(), channel_->client_tracing_id(),
command_buffer_id_.GetUnsafeValue(), init_params.attribs.context_type,
channel_->task_runner());
}
-MemoryTracker* CommandBufferStub::GetMemoryTracker() const {
- return context_group_->memory_tracker();
+// static
+void CommandBufferStub::SetMemoryTrackerFactoryForTesting(
+ MemoryTrackerFactory factory) {
+ SetOrGetMemoryTrackerFactory(factory);
+}
+
+scoped_refptr<Buffer> CommandBufferStub::GetTransferBuffer(int32_t id) {
+ return command_buffer_->GetTransferBuffer(id);
+}
+
+void CommandBufferStub::RegisterTransferBufferForTest(
+ int32_t id,
+ scoped_refptr<Buffer> buffer) {
+ command_buffer_->RegisterTransferBuffer(id, std::move(buffer));
}
bool CommandBufferStub::CheckContextLost() {
@@ -820,4 +730,20 @@ void CommandBufferStub::MarkContextLost() {
command_buffer_->SetParseError(error::kLostContext);
}
+// static
+CommandBufferStub::MemoryTrackerFactory
+CommandBufferStub::GetMemoryTrackerFactory() {
+ return SetOrGetMemoryTrackerFactory(base::NullCallback());
+}
+
+// static
+CommandBufferStub::MemoryTrackerFactory
+CommandBufferStub::SetOrGetMemoryTrackerFactory(MemoryTrackerFactory factory) {
+ static base::NoDestructor<MemoryTrackerFactory> current_factory{
+ base::NullCallback()};
+ if (factory)
+ *current_factory = factory;
+ return *current_factory;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/command_buffer_stub.h b/chromium/gpu/ipc/service/command_buffer_stub.h
index a1fb4bed007..983b1f51841 100644
--- a/chromium/gpu/ipc/service/command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/command_buffer_stub.h
@@ -12,6 +12,7 @@
#include <string>
#include <vector>
+#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/observer_list.h"
@@ -30,7 +31,6 @@
#include "ipc/ipc_listener.h"
#include "ipc/ipc_sender.h"
#include "ui/gfx/geometry/size.h"
-#include "ui/gfx/gpu_fence_handle.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/swap_result.h"
#include "ui/gl/gl_share_group.h"
@@ -39,11 +39,10 @@
#include "url/gurl.h"
struct GPUCreateCommandBufferConfig;
-struct GpuCommandBufferMsg_CreateImage_Params;
namespace gpu {
class DecoderContext;
-struct Mailbox;
+class MemoryTracker;
struct SyncToken;
struct WaitForCommandState;
class GpuChannel;
@@ -84,6 +83,8 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
const GPUCreateCommandBufferConfig& init_params,
base::UnsafeSharedMemoryRegion shared_state_shm) = 0;
+ virtual MemoryTracker* GetMemoryTracker() const = 0;
+
// IPC::Listener implementation:
bool OnMessageReceived(const IPC::Message& message) override;
@@ -101,8 +102,18 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
void OnDescheduleUntilFinished() override;
void OnRescheduleAfterFinished() override;
void ScheduleGrContextCleanup() override;
+ void HandleReturnData(base::span<const uint8_t> data) override;
+
+ using MemoryTrackerFactory =
+ base::RepeatingCallback<std::unique_ptr<MemoryTracker>(
+ const GPUCreateCommandBufferConfig&)>;
+
+ // Overrides the way CreateMemoryTracker() uses to create a MemoryTracker.
+ // This is intended for mocking the MemoryTracker in tests.
+ static void SetMemoryTrackerFactoryForTesting(MemoryTrackerFactory factory);
- MemoryTracker* GetMemoryTracker() const;
+ scoped_refptr<Buffer> GetTransferBuffer(int32_t id);
+ void RegisterTransferBufferForTest(int32_t id, scoped_refptr<Buffer> buffer);
// Whether this command buffer can currently handle IPC messages.
bool IsScheduled();
@@ -122,15 +133,17 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
gl::GLSurface* surface() const { return surface_.get(); }
+ ContextType context_type() const { return context_type_; }
+
void AddDestructionObserver(DestructionObserver* observer);
void RemoveDestructionObserver(DestructionObserver* observer);
void MarkContextLost();
- scoped_refptr<gles2::ContextGroup> context_group() { return context_group_; }
scoped_refptr<gl::GLShareGroup> share_group() { return share_group_; }
protected:
+ virtual bool HandleMessage(const IPC::Message& message) = 0;
// FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
// url_hash matches.
static void FastSetActiveURL(const GURL& url,
@@ -138,25 +151,24 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
GpuChannel* channel);
std::unique_ptr<MemoryTracker> CreateMemoryTracker(
- const GPUCreateCommandBufferConfig init_params) const;
+ const GPUCreateCommandBufferConfig& init_params) const;
// Must be called during Initialize(). Takes ownership to co-ordinate
// teardown in Destroy().
void set_decoder_context(std::unique_ptr<DecoderContext> decoder_context) {
decoder_context_ = std::move(decoder_context);
}
+ bool CheckContextLost();
// The lifetime of objects of this class is managed by a GpuChannel. The
// GpuChannels destroy all the CommandBufferStubs that they own when
// they are destroyed. So a raw pointer is safe.
GpuChannel* const channel_;
+ ContextType context_type_;
GURL active_url_;
size_t active_url_hash_;
- // The group of contexts that share namespaces with this context.
- scoped_refptr<gles2::ContextGroup> context_group_;
-
bool initialized_;
const SurfaceHandle surface_handle_;
bool use_virtualized_gl_context_;
@@ -181,8 +193,6 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
// Message handlers:
void OnSetGetBuffer(int32_t shm_id);
- virtual void OnTakeFrontBuffer(const Mailbox& mailbox) = 0;
- virtual void OnReturnFrontBuffer(const Mailbox& mailbox, bool is_lost) = 0;
void OnGetState(IPC::Message* reply_message);
void OnWaitForTokenInRange(int32_t start,
int32_t end,
@@ -199,20 +209,9 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
void OnDestroyTransferBuffer(int32_t id);
void OnGetTransferBuffer(int32_t id, IPC::Message* reply_message);
- void OnEnsureBackbuffer();
-
void OnSignalSyncToken(const SyncToken& sync_token, uint32_t id);
void OnSignalAck(uint32_t id);
void OnSignalQuery(uint32_t query, uint32_t id);
- void OnCreateGpuFenceFromHandle(uint32_t gpu_fence_id,
- const gfx::GpuFenceHandle& handle);
- void OnGetGpuFenceHandle(uint32_t gpu_fence_id);
-
- void OnCreateImage(GpuCommandBufferMsg_CreateImage_Params params);
- void OnDestroyImage(int32_t id);
- void OnCreateStreamTexture(uint32_t texture_id,
- int32_t stream_id,
- bool* succeeded);
void ReportState();
@@ -226,13 +225,22 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
// of delayed work.
void ScheduleDelayedWork(base::TimeDelta delay);
- bool CheckContextLost();
void CheckCompleteWaits();
// Set driver bug workarounds and disabled GL extensions to the context.
static void SetContextGpuFeatureInfo(gl::GLContext* context,
const GpuFeatureInfo& gpu_feature_info);
+ static MemoryTrackerFactory GetMemoryTrackerFactory();
+
+ // Overrides the way CreateMemoryTracker() uses to create a MemoryTracker. If
+ // |factory| is base::NullCallback(), it returns the current
+ // MemoryTrackerFactory (initially base::NullCallback() which
+ // CreateMemoryTracker() should interpret as a signal to use the default).
+ // This is intended for mocking the MemoryTracker in tests.
+ static MemoryTrackerFactory SetOrGetMemoryTrackerFactory(
+ MemoryTrackerFactory factory);
+
std::unique_ptr<DecoderContext> decoder_context_;
uint32_t last_flush_id_;
diff --git a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
index 886dff6d36d..16726c5b514 100644
--- a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
+++ b/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
@@ -154,7 +154,9 @@ bool DirectCompositionChildSurfaceWin::ReleaseDrawTexture(bool will_discard) {
params.DirtyRectsCount = 1;
params.pDirtyRects = &dirty_rect;
HRESULT hr = swap_chain_->Present1(interval, flags, &params);
- if (FAILED(hr)) {
+ // Ignore DXGI_STATUS_OCCLUDED since that's not an error but only
+ // indicates that the window is occluded and we can stop rendering.
+ if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
DLOG(ERROR) << "Present1 failed with error " << std::hex << hr;
return false;
}
@@ -216,7 +218,7 @@ void* DirectCompositionChildSurfaceWin::GetHandle() {
}
gfx::SwapResult DirectCompositionChildSurfaceWin::SwapBuffers(
- const PresentationCallback& callback) {
+ PresentationCallback callback) {
// PresentationCallback is handled by DirectCompositionSurfaceWin. The child
// surface doesn't need provide presentation feedback.
DCHECK(!callback);
@@ -262,19 +264,19 @@ bool DirectCompositionChildSurfaceWin::SupportsDCLayers() const {
bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
const gfx::Rect& rectangle) {
if (!gfx::Rect(size_).Contains(rectangle)) {
- VLOG(1) << "Draw rectangle must be contained within size of surface";
+ DLOG(ERROR) << "Draw rectangle must be contained within size of surface";
return false;
}
if (draw_texture_) {
- VLOG(1) << "SetDrawRectangle must be called only once per swap buffers";
+ DLOG(ERROR) << "SetDrawRectangle must be called only once per swap buffers";
return false;
}
DCHECK(!real_surface_);
DCHECK(!g_current_surface);
if (gfx::Rect(size_) != rectangle && !swap_chain_ && !dcomp_surface_) {
- VLOG(1) << "First draw to surface must draw to everything";
+ DLOG(ERROR) << "First draw to surface must draw to everything";
return false;
}
@@ -294,7 +296,7 @@ bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
size_.width(), size_.height(), output_format,
DXGI_ALPHA_MODE_PREMULTIPLIED, dcomp_surface_.GetAddressOf());
if (FAILED(hr)) {
- VLOG(1) << "CreateSurface failed with error " << std::hex << hr;
+ DLOG(ERROR) << "CreateSurface failed with error " << std::hex << hr;
return false;
}
} else if (!enable_dc_layers_ && !swap_chain_) {
@@ -331,8 +333,8 @@ bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
d3d11_device_.Get(), &desc, nullptr, swap_chain_.GetAddressOf());
first_swap_ = true;
if (FAILED(hr)) {
- VLOG(1) << "CreateSwapChainForComposition failed with error " << std::hex
- << hr;
+ DLOG(ERROR) << "CreateSwapChainForComposition failed with error "
+ << std::hex << hr;
return false;
}
}
@@ -346,7 +348,7 @@ bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
HRESULT hr = dcomp_surface_->BeginDraw(
&rect, IID_PPV_ARGS(draw_texture_.GetAddressOf()), &update_offset);
if (FAILED(hr)) {
- VLOG(1) << "BeginDraw failed with error " << std::hex << hr;
+ DLOG(ERROR) << "BeginDraw failed with error " << std::hex << hr;
return false;
}
draw_offset_ = gfx::Point(update_offset) - rectangle.origin();
@@ -373,8 +375,8 @@ bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
eglCreatePbufferFromClientBuffer(GetDisplay(), EGL_D3D_TEXTURE_ANGLE,
buffer, GetConfig(), pbuffer_attribs);
if (!real_surface_) {
- VLOG(1) << "eglCreatePbufferFromClientBuffer failed with error "
- << ui::GetLastEGLErrorString();
+ DLOG(ERROR) << "eglCreatePbufferFromClientBuffer failed with error "
+ << ui::GetLastEGLErrorString();
return false;
}
diff --git a/chromium/gpu/ipc/service/direct_composition_child_surface_win.h b/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
index a91071e594b..52cbce9099e 100644
--- a/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
+++ b/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
@@ -26,7 +26,7 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionChildSurfaceWin
gfx::Size GetSize() override;
bool IsOffscreen() override;
void* GetHandle() override;
- gfx::SwapResult SwapBuffers(const PresentationCallback& callback) override;
+ gfx::SwapResult SwapBuffers(PresentationCallback callback) override;
bool FlipsVertically() const override;
bool SupportsPostSubBuffer() override;
bool OnMakeCurrent(gl::GLContext* context) override;
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
index 93e0fe4341a..8154366b6e2 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.cc
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
@@ -8,6 +8,8 @@
#include <dcomptypes.h>
#include <dxgi1_6.h>
+#include <utility>
+
#include "base/containers/circular_deque.h"
#include "base/feature_list.h"
#include "base/metrics/histogram_functions.h"
@@ -516,6 +518,11 @@ class DCLayerTree::SwapChainPresenter {
const gfx::Size& swap_chain_size,
bool* needs_commit);
+ // Records presentation statistics in UMA and traces (for pixel tests) for the
+ // current swap chain which could either be a regular flip swap chain or a
+ // decode swap chain.
+ void RecordPresentationStatistics();
+
// Layer tree instance that owns this swap chain presenter.
DCLayerTree* layer_tree_;
@@ -616,6 +623,12 @@ bool DCLayerTree::Initialize(
dcomp_device_->CreateVisual(root_visual_.GetAddressOf());
DCHECK(root_visual_);
dcomp_target_->SetRoot(root_visual_.Get());
+ // A visual inherits the interpolation mode of the parent visual by default.
+ // If no visuals set the interpolation mode, the default for the entire visual
+ // tree is nearest neighbor interpolation.
+ // Set the interpolation mode to Linear to get a better upscaling quality.
+ root_visual_->SetBitmapInterpolationMode(
+ DCOMPOSITION_BITMAP_INTERPOLATION_MODE_LINEAR);
return true;
}
@@ -1051,7 +1064,9 @@ bool DCLayerTree::SwapChainPresenter::PresentToDecodeSwapChain(
HRESULT hr = decode_swap_chain_->PresentBuffer(image_dxgi->level(), 1, 0);
base::UmaHistogramSparse("GPU.DirectComposition.DecodeSwapChainPresentResult",
hr);
- if (FAILED(hr)) {
+ // Ignore DXGI_STATUS_OCCLUDED since that's not an error but only indicates
+ // that the window is occluded and we can stop rendering.
+ if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
DLOG(ERROR) << "PresentBuffer failed with error 0x" << std::hex << hr;
return false;
}
@@ -1068,6 +1083,7 @@ bool DCLayerTree::SwapChainPresenter::PresentToDecodeSwapChain(
frames_since_color_space_change_ = 0;
is_yuv_swapchain_ = true;
}
+ RecordPresentationStatistics();
return true;
}
@@ -1113,7 +1129,9 @@ bool DCLayerTree::SwapChainPresenter::PresentToSwapChain(
if (image_dxgi && use_decode_swap_chain) {
D3D11_TEXTURE2D_DESC texture_desc = {};
image_dxgi->texture()->GetDesc(&texture_desc);
+
bool is_decoder_texture = texture_desc.BindFlags & D3D11_BIND_DECODER;
+
// Decode swap chains do not support shared resources.
// TODO(sunnyps): Find a workaround for when the decoder moves to its own
// thread and D3D device. See https://crbug.com/911847
@@ -1121,7 +1139,26 @@ bool DCLayerTree::SwapChainPresenter::PresentToSwapChain(
texture_desc.MiscFlags &
(D3D11_RESOURCE_MISC_SHARED | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX |
D3D11_RESOURCE_MISC_SHARED_NTHANDLE);
- if (is_decoder_texture && !is_shared_texture) {
+
+ // Rotated videos are not promoted to overlays. We plan to implement
+ // rotation using video processor instead of via direct composition. Also
+ // check for skew and any downscaling specified to direct composition.
+ bool is_overlay_supported_transform =
+ visual_info_.transform.IsPositiveScaleOrTranslation();
+
+ // Downscaled video isn't promoted to hardware overlays. We prefer to blit
+ // into the smaller size so that it can be promoted to a hardware overlay.
+ float swap_chain_scale_x =
+ swap_chain_size.width() * 1.0f / params.content_rect.width();
+ float swap_chain_scale_y =
+ swap_chain_size.height() * 1.0f / params.content_rect.height();
+
+ is_overlay_supported_transform = is_overlay_supported_transform &&
+ (swap_chain_scale_x >= 1.0f) &&
+ (swap_chain_scale_y >= 1.0f);
+
+ if (is_decoder_texture && !is_shared_texture &&
+ is_overlay_supported_transform) {
if (PresentToDecodeSwapChain(image_dxgi, params.content_rect,
swap_chain_size, needs_commit)) {
return true;
@@ -1195,7 +1232,9 @@ bool DCLayerTree::SwapChainPresenter::PresentToSwapChain(
first_present_ = false;
HRESULT hr = swap_chain_->Present(0, 0);
- if (FAILED(hr)) {
+ // Ignore DXGI_STATUS_OCCLUDED since that's not an error but only indicates
+ // that the window is occluded and we can stop rendering.
+ if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
DLOG(ERROR) << "Present failed with error 0x" << std::hex << hr;
return false;
}
@@ -1230,29 +1269,57 @@ bool DCLayerTree::SwapChainPresenter::PresentToSwapChain(
event.Wait();
}
+ // Ignore DXGI_STATUS_OCCLUDED since that's not an error but only indicates
+ // that the window is occluded and we can stop rendering.
HRESULT hr = swap_chain_->Present(1, 0);
- if (FAILED(hr)) {
+ if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
DLOG(ERROR) << "Present failed with error 0x" << std::hex << hr;
return false;
}
-
- UMA_HISTOGRAM_ENUMERATION(
- "GPU.DirectComposition.SwapChainFormat2",
- is_yuv_swapchain_ ? g_overlay_format_used : OverlayFormat::kBGRA);
-
frames_since_color_space_change_++;
+ RecordPresentationStatistics();
+ return true;
+}
+void DCLayerTree::SwapChainPresenter::RecordPresentationStatistics() {
+ OverlayFormat swap_chain_format =
+ is_yuv_swapchain_ ? g_overlay_format_used : OverlayFormat::kBGRA;
+ UMA_HISTOGRAM_ENUMERATION("GPU.DirectComposition.SwapChainFormat2",
+ swap_chain_format);
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("gpu.service"),
+ "SwapChain::Present", TRACE_EVENT_SCOPE_THREAD,
+ "PixelFormat", swap_chain_format, "ZeroCopy",
+ !!decode_swap_chain_);
+ HRESULT hr = 0;
Microsoft::WRL::ComPtr<IDXGISwapChainMedia> swap_chain_media;
- if (SUCCEEDED(swap_chain_.CopyTo(swap_chain_media.GetAddressOf()))) {
+ if (decode_swap_chain_) {
+ hr = decode_swap_chain_.As(&swap_chain_media);
+ } else {
+ DCHECK(swap_chain_);
+ hr = swap_chain_.As(&swap_chain_media);
+ }
+ if (SUCCEEDED(hr)) {
DCHECK(swap_chain_media);
DXGI_FRAME_STATISTICS_MEDIA stats = {};
- if (SUCCEEDED(swap_chain_media->GetFrameStatisticsMedia(&stats))) {
+ // GetFrameStatisticsMedia fails with DXGI_ERROR_FRAME_STATISTICS_DISJOINT
+ // sometimes, which means an event (such as power cycle) interrupted the
+ // gathering of presentation statistics. In this situation, calling the
+ // function again succeeds but returns with CompositionMode = NONE.
+ // Waiting for the DXGI adapter to finish presenting before calling the
+ // function doesn't get rid of the failure.
+ HRESULT hr = swap_chain_media->GetFrameStatisticsMedia(&stats);
+ int mode = -1;
+ if (SUCCEEDED(hr)) {
base::UmaHistogramSparse("GPU.DirectComposition.CompositionMode",
stats.CompositionMode);
presentation_history_.AddSample(stats.CompositionMode);
+ mode = stats.CompositionMode;
}
+ // Record CompositionMode as -1 if GetFrameStatisticsMedia() fails.
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("gpu.service"),
+ "GetFrameStatisticsMedia", TRACE_EVENT_SCOPE_THREAD,
+ "CompositionMode", mode);
}
- return true;
}
bool DCLayerTree::SwapChainPresenter::VideoProcessorBlt(
@@ -1589,6 +1656,8 @@ bool DCLayerTree::CommitAndClearPendingOverlays(
bool video_needs_commit = false;
if (!video_swap_chain->PresentToSwapChain(*pending_overlays_[i],
&video_needs_commit)) {
+ DLOG(ERROR) << "PresentToSwapChain failed";
+ DCHECK(false);
return false;
}
needs_commit = needs_commit || video_needs_commit;
@@ -1624,6 +1693,7 @@ bool DCLayerTree::CommitAndClearPendingOverlays(
HRESULT hr = dcomp_device_->Commit();
if (FAILED(hr)) {
DLOG(ERROR) << "Commit failed with error 0x" << std::hex << hr;
+ DCHECK(false);
return false;
}
}
@@ -1873,9 +1943,9 @@ bool DirectCompositionSurfaceWin::Resize(const gfx::Size& size,
}
gfx::SwapResult DirectCompositionSurfaceWin::SwapBuffers(
- const PresentationCallback& callback) {
+ PresentationCallback callback) {
gl::GLSurfacePresentationHelper::ScopedSwapBuffers scoped_swap_buffers(
- presentation_helper_.get(), callback);
+ presentation_helper_.get(), std::move(callback));
bool succeeded = true;
if (root_surface_->SwapBuffers(PresentationCallback()) ==
@@ -1883,7 +1953,8 @@ gfx::SwapResult DirectCompositionSurfaceWin::SwapBuffers(
succeeded = false;
DCLayerTree::BackbufferInfo backbuffer_info = {
- root_surface_->swap_chain().Get(), root_surface_->dcomp_surface().Get(),
+ root_surface_->swap_chain().Get(),
+ root_surface_->dcomp_surface().Get(),
root_surface_->dcomp_surface_serial(),
};
if (!layer_tree_->CommitAndClearPendingOverlays(std::move(backbuffer_info)))
@@ -1902,10 +1973,10 @@ gfx::SwapResult DirectCompositionSurfaceWin::PostSubBuffer(
int y,
int width,
int height,
- const PresentationCallback& callback) {
+ PresentationCallback callback) {
// The arguments are ignored because SetDrawRectangle specified the area to
// be swapped.
- return SwapBuffers(callback);
+ return SwapBuffers(std::move(callback));
}
gfx::VSyncProvider* DirectCompositionSurfaceWin::GetVSyncProvider() {
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.h b/chromium/gpu/ipc/service/direct_composition_surface_win.h
index 703e4ab5138..b0217bb528f 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.h
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.h
@@ -70,12 +70,12 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionSurfaceWin
float scale_factor,
ColorSpace color_space,
bool has_alpha) override;
- gfx::SwapResult SwapBuffers(const PresentationCallback& callback) override;
+ gfx::SwapResult SwapBuffers(PresentationCallback callback) override;
gfx::SwapResult PostSubBuffer(int x,
int y,
int width,
int height,
- const PresentationCallback& callback) override;
+ PresentationCallback callback) override;
gfx::VSyncProvider* GetVSyncProvider() override;
void SetVSyncEnabled(bool enabled) override;
bool SetEnableDCLayers(bool enable) override;
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
index d1f37958d4d..14b17b0e7e2 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
@@ -26,6 +26,7 @@
#include "gpu/command_buffer/common/swap_buffers_flags.h"
#include "gpu/command_buffer/service/gl_context_virtual.h"
#include "gpu/command_buffer/service/gl_state_restorer_impl.h"
+#include "gpu/command_buffer/service/gpu_fence_manager.h"
#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/logger.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
@@ -41,6 +42,8 @@
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "gpu/ipc/service/image_transport_surface.h"
+#include "ui/gfx/gpu_fence.h"
+#include "ui/gfx/gpu_fence_handle.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
@@ -88,9 +91,19 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
DCHECK(manager);
if (share_command_buffer_stub) {
- context_group_ = share_command_buffer_stub->context_group();
- DCHECK(context_group_->bind_generates_resource() ==
- init_params.attribs.bind_generates_resource);
+ context_group_ =
+ share_command_buffer_stub->decoder_context()->GetContextGroup();
+ if (!context_group_) {
+ LOG(ERROR) << "ContextResult::kFatalFailure: attempt to create a GLES2 "
+ "context sharing with a non-GLES2 context";
+ return gpu::ContextResult::kFatalFailure;
+ }
+ if (context_group_->bind_generates_resource() !=
+ init_params.attribs.bind_generates_resource) {
+ LOG(ERROR) << "ContextResult::kFatalFailure: attempt to create a shared "
+ "GLES2 context with inconsistent bind_generates_resource";
+ return gpu::ContextResult::kFatalFailure;
+ }
} else {
scoped_refptr<gles2::FeatureInfo> feature_info = new gles2::FeatureInfo(
manager->gpu_driver_bug_workarounds(), manager->gpu_feature_info());
@@ -157,7 +170,7 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
#endif
command_buffer_ = std::make_unique<CommandBufferService>(
- this, context_group_->transfer_buffer_manager());
+ this, context_group_->memory_tracker());
gles2_decoder_ = gles2::GLES2Decoder::Create(
this, command_buffer_.get(), manager->outputter(), context_group_.get());
set_decoder_context(std::unique_ptr<DecoderContext>(gles2_decoder_));
@@ -415,13 +428,32 @@ int32_t GLES2CommandBufferStub::GetRouteID() const {
return route_id_;
}
+MemoryTracker* GLES2CommandBufferStub::GetMemoryTracker() const {
+ return context_group_->memory_tracker();
+}
+
+bool GLES2CommandBufferStub::HandleMessage(const IPC::Message& message) {
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(GLES2CommandBufferStub, message)
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_TakeFrontBuffer, OnTakeFrontBuffer);
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ReturnFrontBuffer,
+ OnReturnFrontBuffer);
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
+ OnCreateStreamTexture)
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateGpuFenceFromHandle,
+ OnCreateGpuFenceFromHandle)
+ IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_GetGpuFenceHandle,
+ OnGetGpuFenceHandle)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+}
+
void GLES2CommandBufferStub::OnTakeFrontBuffer(const Mailbox& mailbox) {
TRACE_EVENT0("gpu", "CommandBufferStub::OnTakeFrontBuffer");
- if (!gles2_decoder_) {
- LOG(ERROR) << "Can't take front buffer before initialization.";
- return;
- }
-
+ DCHECK(gles2_decoder_);
gles2_decoder_->TakeFrontBuffer(mailbox);
}
@@ -432,6 +464,108 @@ void GLES2CommandBufferStub::OnReturnFrontBuffer(const Mailbox& mailbox,
gles2_decoder_->ReturnFrontBuffer(mailbox, is_lost);
}
+void GLES2CommandBufferStub::OnCreateGpuFenceFromHandle(
+ uint32_t gpu_fence_id,
+ const gfx::GpuFenceHandle& handle) {
+ if (!context_group_->feature_info()->feature_flags().chromium_gpu_fence) {
+ DLOG(ERROR) << "CHROMIUM_gpu_fence unavailable";
+ command_buffer_->SetParseError(error::kLostContext);
+ return;
+ }
+
+ if (gles2_decoder_->GetGpuFenceManager()->CreateGpuFenceFromHandle(
+ gpu_fence_id, handle))
+ return;
+
+ // The insertion failed. This shouldn't happen, force context loss to avoid
+ // inconsistent state.
+ command_buffer_->SetParseError(error::kLostContext);
+ CheckContextLost();
+}
+
+void GLES2CommandBufferStub::OnGetGpuFenceHandle(uint32_t gpu_fence_id) {
+ if (!context_group_->feature_info()->feature_flags().chromium_gpu_fence) {
+ DLOG(ERROR) << "CHROMIUM_gpu_fence unavailable";
+ command_buffer_->SetParseError(error::kLostContext);
+ return;
+ }
+
+ auto* manager = gles2_decoder_->GetGpuFenceManager();
+ gfx::GpuFenceHandle handle;
+ if (manager->IsValidGpuFence(gpu_fence_id)) {
+ std::unique_ptr<gfx::GpuFence> gpu_fence =
+ manager->GetGpuFence(gpu_fence_id);
+ handle = gfx::CloneHandleForIPC(gpu_fence->GetGpuFenceHandle());
+ } else {
+ // Retrieval failed. This shouldn't happen, force context loss to avoid
+ // inconsistent state.
+ DLOG(ERROR) << "GpuFence not found";
+ command_buffer_->SetParseError(error::kLostContext);
+ CheckContextLost();
+ }
+ Send(new GpuCommandBufferMsg_GetGpuFenceHandleComplete(route_id_,
+ gpu_fence_id, handle));
+}
+
+void GLES2CommandBufferStub::OnCreateImage(
+ GpuCommandBufferMsg_CreateImage_Params params) {
+ TRACE_EVENT0("gpu", "GLES2CommandBufferStub::OnCreateImage");
+ const int32_t id = params.id;
+ const gfx::Size& size = params.size;
+ const gfx::BufferFormat& format = params.format;
+ const uint64_t image_release_count = params.image_release_count;
+
+ gles2::ImageManager* image_manager = channel_->image_manager();
+ DCHECK(image_manager);
+ if (image_manager->LookupImage(id)) {
+ LOG(ERROR) << "Image already exists with same ID.";
+ return;
+ }
+
+ if (!gpu::IsImageFromGpuMemoryBufferFormatSupported(
+ format, gles2_decoder_->GetCapabilities())) {
+ LOG(ERROR) << "Format is not supported.";
+ return;
+ }
+
+ if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, format)) {
+ LOG(ERROR) << "Invalid image size for format.";
+ return;
+ }
+
+ scoped_refptr<gl::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
+ std::move(params.gpu_memory_buffer), size, format, surface_handle_);
+ if (!image.get())
+ return;
+
+ image_manager->AddImage(image.get(), id);
+ if (image_release_count)
+ sync_point_client_state_->ReleaseFenceSync(image_release_count);
+}
+
+void GLES2CommandBufferStub::OnDestroyImage(int32_t id) {
+ TRACE_EVENT0("gpu", "GLES2CommandBufferStub::OnDestroyImage");
+
+ gles2::ImageManager* image_manager = channel_->image_manager();
+ DCHECK(image_manager);
+ if (!image_manager->LookupImage(id)) {
+ LOG(ERROR) << "Image with ID doesn't exist.";
+ return;
+ }
+
+ image_manager->RemoveImage(id);
+}
+
+void GLES2CommandBufferStub::OnCreateStreamTexture(uint32_t texture_id,
+ int32_t stream_id,
+ bool* succeeded) {
+#if defined(OS_ANDROID)
+ *succeeded = StreamTexture::Create(this, texture_id, stream_id);
+#else
+ *succeeded = false;
+#endif
+}
+
void GLES2CommandBufferStub::OnSwapBuffers(uint64_t swap_id, uint32_t flags) {
pending_swap_completed_params_.push_back({swap_id, flags});
pending_presented_params_.push_back({swap_id, flags});
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
index 4d4530ba5a7..6e09643060d 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
@@ -10,8 +10,11 @@
#include "build/build_config.h"
#include "gpu/ipc/service/command_buffer_stub.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h"
+#include "ui/gfx/gpu_fence_handle.h"
+struct GpuCommandBufferMsg_CreateImage_Params;
namespace gpu {
+struct Mailbox;
class GPU_IPC_SERVICE_EXPORT GLES2CommandBufferStub
: public CommandBufferStub,
@@ -34,6 +37,7 @@ class GPU_IPC_SERVICE_EXPORT GLES2CommandBufferStub
CommandBufferStub* share_group,
const GPUCreateCommandBufferConfig& init_params,
base::UnsafeSharedMemoryRegion shared_state_shm) override;
+ MemoryTracker* GetMemoryTracker() const override;
// ImageTransportSurfaceDelegate implementation:
#if defined(OS_WIN)
@@ -50,10 +54,23 @@ class GPU_IPC_SERVICE_EXPORT GLES2CommandBufferStub
int32_t GetRouteID() const override;
private:
- void OnTakeFrontBuffer(const Mailbox& mailbox) override;
- void OnReturnFrontBuffer(const Mailbox& mailbox, bool is_lost) override;
+ bool HandleMessage(const IPC::Message& message) override;
+ void OnTakeFrontBuffer(const Mailbox& mailbox);
+ void OnReturnFrontBuffer(const Mailbox& mailbox, bool is_lost);
+ void OnCreateGpuFenceFromHandle(uint32_t gpu_fence_id,
+ const gfx::GpuFenceHandle& handle);
+ void OnGetGpuFenceHandle(uint32_t gpu_fence_id);
+ void OnCreateImage(GpuCommandBufferMsg_CreateImage_Params params);
+ void OnDestroyImage(int32_t id);
+ void OnCreateStreamTexture(uint32_t texture_id,
+ int32_t stream_id,
+ bool* succeeded);
+
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
+ // The group of contexts that share namespaces with this context.
+ scoped_refptr<gles2::ContextGroup> context_group_;
+
// Keep a more specifically typed reference to the decoder to avoid
// unnecessary casts. Owned by parent class.
gles2::GLES2Decoder* gles2_decoder_;
diff --git a/chromium/gpu/ipc/service/gpu_channel.cc b/chromium/gpu/ipc/service/gpu_channel.cc
index f061ec17372..d1f36aaa310 100644
--- a/chromium/gpu/ipc/service/gpu_channel.cc
+++ b/chromium/gpu/ipc/service/gpu_channel.cc
@@ -127,6 +127,8 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelMessageFilter
scoped_refptr<ImageDecodeAcceleratorStub> image_decode_accelerator_stub_;
base::ThreadChecker io_thread_checker_;
+ bool allow_crash_for_testing_ = false;
+
DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageFilter);
};
@@ -145,6 +147,9 @@ GpuChannelMessageFilter::GpuChannelMessageFilter(
static_cast<int32_t>(
GpuChannelReservedRoutes::kImageDecodeAccelerator))) {
io_thread_checker_.DetachFromThread();
+ allow_crash_for_testing_ = gpu_channel->gpu_channel_manager()
+ ->gpu_preferences()
+ .enable_gpu_benchmarking_extension;
}
GpuChannelMessageFilter::~GpuChannelMessageFilter() {
@@ -241,6 +246,18 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
case GpuChannelMsg_CreateSharedImage::ID:
case GpuChannelMsg_DestroySharedImage::ID:
return MessageErrorHandler(message, "Invalid message");
+ case GpuChannelMsg_CrashForTesting::ID:
+ // Handle this message early, on the IO thread, in case the main
+ // thread is hung. This is the purpose of this message: generating
+ // minidumps on the bots, which are symbolized later by the test
+ // harness. Only pay attention to this message if Telemetry's GPU
+ // benchmarking extension was enabled via the command line, which
+ // exposes privileged APIs to JavaScript.
+ if (allow_crash_for_testing_) {
+ gl::Crash();
+ }
+ // Won't be reached if the extension is enabled.
+ return MessageErrorHandler(message, "Crashes for testing are disabled");
default:
break;
}
@@ -268,6 +285,7 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
static_cast<int32_t>(GpuChannelReservedRoutes::kImageDecodeAccelerator)) {
if (!image_decode_accelerator_stub_->OnMessageReceived(message))
return MessageErrorHandler(message, "Invalid image decode request");
+ return true;
}
bool handle_out_of_order =
@@ -474,8 +492,7 @@ CommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) {
bool GpuChannel::HasActiveWebGLContext() const {
for (auto& kv : stubs_) {
- ContextType context_type =
- kv.second->context_group()->feature_info()->context_type();
+ ContextType context_type = kv.second->context_type();
if (context_type == CONTEXT_TYPE_WEBGL1 ||
context_type == CONTEXT_TYPE_WEBGL2) {
return true;
@@ -510,7 +527,6 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
OnCreateCommandBuffer)
IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
OnDestroyCommandBuffer)
- IPC_MESSAGE_HANDLER(GpuChannelMsg_CrashForTesting, OnCrashForTesting)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
@@ -709,16 +725,6 @@ void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) {
RemoveRoute(route_id);
}
-void GpuChannel::OnCrashForTesting() {
- // Only pay attention to this message if Telemetry's GPU
- // benchmarking extension was enabled via the command line, which
- // exposes privileged APIs to JavaScript.
- if (!gpu_channel_manager_->gpu_preferences()
- .enable_gpu_benchmarking_extension)
- return;
- gl::Crash();
-}
-
void GpuChannel::CacheShader(const std::string& key,
const std::string& shader) {
gpu_channel_manager_->delegate()->StoreShaderToDisk(client_id_, key, shader);
diff --git a/chromium/gpu/ipc/service/gpu_channel.h b/chromium/gpu/ipc/service/gpu_channel.h
index 2937600df96..77d23d0a668 100644
--- a/chromium/gpu/ipc/service/gpu_channel.h
+++ b/chromium/gpu/ipc/service/gpu_channel.h
@@ -169,7 +169,6 @@ class GPU_IPC_SERVICE_EXPORT GpuChannel : public IPC::Listener,
gpu::ContextResult* result,
gpu::Capabilities* capabilities);
void OnDestroyCommandBuffer(int32_t route_id);
- void OnCrashForTesting();
std::unique_ptr<IPC::SyncChannel> sync_channel_; // nullptr in tests.
IPC::Sender* channel_; // Same as sync_channel_.get() except in tests.
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index ebf60ad5aad..c185b5782c6 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -60,6 +60,7 @@ GpuChannelManager::GpuChannelManager(
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
Scheduler* scheduler,
SyncPointManager* sync_point_manager,
+ SharedImageManager* shared_image_manager,
GpuMemoryBufferFactory* gpu_memory_buffer_factory,
const GpuFeatureInfo& gpu_feature_info,
GpuProcessActivityFlags activity_flags,
@@ -77,6 +78,7 @@ GpuChannelManager::GpuChannelManager(
mailbox_manager_(gles2::CreateMailboxManager(gpu_preferences)),
scheduler_(scheduler),
sync_point_manager_(sync_point_manager),
+ shared_image_manager_(shared_image_manager),
shader_translator_cache_(gpu_preferences_),
default_offscreen_surface_(std::move(default_offscreen_surface)),
gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
@@ -97,8 +99,7 @@ GpuChannelManager::GpuChannelManager(
gpu::kGpuFeatureStatusEnabled) ||
features::IsUsingSkiaRenderer();
const bool disable_disk_cache =
- gpu_preferences_.disable_gpu_shader_disk_cache ||
- gpu_driver_bug_workarounds_.disable_program_disk_cache;
+ gpu_preferences_.disable_gpu_shader_disk_cache;
if (enable_gr_shader_cache && !disable_disk_cache)
gr_shader_cache_.emplace(gpu_preferences.gpu_program_cache_size, this);
}
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index cae718b113e..5b00e29d4b9 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -27,7 +27,6 @@
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/command_buffer/service/shader_translator_cache.h"
#include "gpu/command_buffer/service/shared_context_state.h"
-#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_preferences.h"
@@ -75,6 +74,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
Scheduler* scheduler,
SyncPointManager* sync_point_manager,
+ SharedImageManager* shared_image_manager,
GpuMemoryBufferFactory* gpu_memory_buffer_factory,
const GpuFeatureInfo& gpu_feature_info,
GpuProcessActivityFlags activity_flags,
@@ -150,7 +150,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
SyncPointManager* sync_point_manager() const { return sync_point_manager_; }
- SharedImageManager* shared_image_manager() { return &shared_image_manager_; }
+ SharedImageManager* shared_image_manager() { return shared_image_manager_; }
// Retrieve GPU Resource consumption statistics for the task manager
void GetVideoMemoryUsageStats(
@@ -203,7 +203,8 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
std::unique_ptr<gles2::Outputter> outputter_;
Scheduler* scheduler_;
// SyncPointManager guaranteed to outlive running MessageLoop.
- SyncPointManager* sync_point_manager_;
+ SyncPointManager* const sync_point_manager_;
+ SharedImageManager* const shared_image_manager_;
std::unique_ptr<gles2::ProgramCache> program_cache_;
gles2::ShaderTranslatorCache shader_translator_cache_;
gles2::FramebufferCompletenessCache framebuffer_completeness_cache_;
@@ -212,7 +213,6 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
GpuFeatureInfo gpu_feature_info_;
ServiceDiscardableManager discardable_manager_;
PassthroughDiscardableManager passthrough_discardable_manager_;
- SharedImageManager shared_image_manager_;
#if defined(OS_ANDROID)
// Last time we know the GPU was powered on. Global for tracking across all
// transport surfaces.
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
index 25dbb562c5b..5d170b3ed01 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
@@ -15,7 +15,8 @@ namespace gpu {
class GpuChannelManagerTest : public GpuChannelTestCommon {
public:
- GpuChannelManagerTest() : GpuChannelTestCommon() {}
+ GpuChannelManagerTest()
+ : GpuChannelTestCommon(true /* use_stub_bindings */) {}
~GpuChannelManagerTest() override = default;
#if defined(OS_ANDROID)
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.cc b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
index ab91f435031..c9e8273a62d 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
@@ -9,6 +9,7 @@
#include "base/threading/thread_task_runner_handle.h"
#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/service/scheduler.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
@@ -49,18 +50,23 @@ class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
DISALLOW_COPY_AND_ASSIGN(TestGpuChannelManagerDelegate);
};
-GpuChannelTestCommon::GpuChannelTestCommon()
- : GpuChannelTestCommon(std::vector<int32_t>()) {}
+GpuChannelTestCommon::GpuChannelTestCommon(bool use_stub_bindings)
+ : GpuChannelTestCommon(std::vector<int32_t>(), use_stub_bindings) {}
GpuChannelTestCommon::GpuChannelTestCommon(
- std::vector<int32_t> enabled_workarounds)
+ std::vector<int32_t> enabled_workarounds,
+ bool use_stub_bindings)
: task_runner_(new base::TestSimpleTaskRunner),
io_task_runner_(new base::TestSimpleTaskRunner),
sync_point_manager_(new SyncPointManager()),
+ shared_image_manager_(new SharedImageManager(false /* thread_safe */)),
scheduler_(new Scheduler(task_runner_, sync_point_manager_.get())),
channel_manager_delegate_(new TestGpuChannelManagerDelegate()) {
// We need GL bindings to actually initialize command buffers.
- gl::GLSurfaceTestSupport::InitializeOneOffWithStubBindings();
+ if (use_stub_bindings)
+ gl::GLSurfaceTestSupport::InitializeOneOffWithStubBindings();
+ else
+ gl::GLSurfaceTestSupport::InitializeOneOff();
GpuFeatureInfo feature_info;
feature_info.enabled_gpu_driver_bug_workarounds =
@@ -69,7 +75,8 @@ GpuChannelTestCommon::GpuChannelTestCommon(
channel_manager_.reset(new GpuChannelManager(
GpuPreferences(), channel_manager_delegate_.get(), nullptr, /* watchdog */
task_runner_.get(), io_task_runner_.get(), scheduler_.get(),
- sync_point_manager_.get(), nullptr, /* gpu_memory_buffer_factory */
+ sync_point_manager_.get(), shared_image_manager_.get(),
+ nullptr, /* gpu_memory_buffer_factory */
std::move(feature_info), GpuProcessActivityFlags(),
gl::init::CreateOffscreenGLSurface(gfx::Size()),
nullptr /* image_decode_accelerator_worker */));
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.h b/chromium/gpu/ipc/service/gpu_channel_test_common.h
index cb2ccce443e..9ab01faf7a6 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.h
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.h
@@ -6,6 +6,7 @@
#define GPU_IPC_SERVICE_GPU_CHANNEL_TEST_COMMON_H_
#include <memory>
+#include <vector>
#include "base/memory/ref_counted.h"
#include "base/memory/unsafe_shared_memory_region.h"
@@ -25,16 +26,19 @@ class GpuChannel;
class GpuChannelManager;
class Scheduler;
class SyncPointManager;
+class SharedImageManager;
class TestGpuChannelManagerDelegate;
class GpuChannelTestCommon : public testing::Test {
public:
- GpuChannelTestCommon();
+ explicit GpuChannelTestCommon(bool use_stub_bindings);
// Constructor which allows a custom set of GPU driver bug workarounds.
- explicit GpuChannelTestCommon(std::vector<int32_t> enabled_workarounds);
+ GpuChannelTestCommon(std::vector<int32_t> enabled_workarounds,
+ bool use_stub_bindings);
~GpuChannelTestCommon() override;
protected:
+ Scheduler* scheduler() const { return scheduler_.get(); }
GpuChannelManager* channel_manager() const { return channel_manager_.get(); }
base::TestSimpleTaskRunner* task_runner() const { return task_runner_.get(); }
base::TestSimpleTaskRunner* io_task_runner() const {
@@ -52,6 +56,7 @@ class GpuChannelTestCommon : public testing::Test {
scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
scoped_refptr<base::TestSimpleTaskRunner> io_task_runner_;
std::unique_ptr<SyncPointManager> sync_point_manager_;
+ std::unique_ptr<SharedImageManager> shared_image_manager_;
std::unique_ptr<Scheduler> scheduler_;
std::unique_ptr<TestGpuChannelManagerDelegate> channel_manager_delegate_;
std::unique_ptr<GpuChannelManager> channel_manager_;
diff --git a/chromium/gpu/ipc/service/gpu_channel_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
index b290851e535..08d3aa4b27f 100644
--- a/chromium/gpu/ipc/service/gpu_channel_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
@@ -12,7 +12,11 @@
namespace gpu {
-class GpuChannelTest : public GpuChannelTestCommon {};
+class GpuChannelTest : public GpuChannelTestCommon {
+ public:
+ GpuChannelTest() : GpuChannelTestCommon(true /* use_stub_bindings */) {}
+ ~GpuChannelTest() override = default;
+};
#if defined(OS_WIN)
const SurfaceHandle kFakeSurfaceHandle = reinterpret_cast<SurfaceHandle>(1);
@@ -234,7 +238,8 @@ TEST_F(GpuChannelTest, CreateFailsIfSharedContextIsLost) {
class GpuChannelExitForContextLostTest : public GpuChannelTestCommon {
public:
GpuChannelExitForContextLostTest()
- : GpuChannelTestCommon({EXIT_ON_CONTEXT_LOST}) {}
+ : GpuChannelTestCommon({EXIT_ON_CONTEXT_LOST} /* enabled_workarounds */,
+ true /* use_stub_bindings */) {}
};
TEST_F(GpuChannelExitForContextLostTest, CreateFailsDuringLostContextShutdown) {
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index 24e258d2967..3c7f7de718f 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -12,6 +12,7 @@
#include "base/threading/scoped_blocking_call.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
+#include "components/viz/common/features.h"
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/config/gpu_driver_bug_list.h"
@@ -90,7 +91,8 @@ void InitializeDirectCompositionOverlaySupport(GPUInfo* gpu_info) {
#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && !defined(IS_CHROMECAST)
bool CanAccessNvidiaDeviceFile() {
bool res = true;
- base::ScopedBlockingCall scoped_blocking_call(base::BlockingType::WILL_BLOCK);
+ base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
+ base::BlockingType::WILL_BLOCK);
if (access("/dev/nvidiactl", R_OK) != 0) {
DVLOG(1) << "NVIDIA device file /dev/nvidiactl access denied";
res = false;
@@ -221,6 +223,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
features::IsOzoneDrmMojo() || ui::OzonePlatform::EnsureInstance()
->GetPlatformProperties()
.requires_mojo;
+ params.viz_display_compositor = features::IsVizDisplayCompositorEnabled();
ui::OzonePlatform::InitializeForGPU(params);
#endif
@@ -418,6 +421,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
features::IsOzoneDrmMojo() || ui::OzonePlatform::EnsureInstance()
->GetPlatformProperties()
.requires_mojo;
+ params.viz_display_compositor = features::IsVizDisplayCompositorEnabled();
ui::OzonePlatform::InitializeForGPU(params);
ui::OzonePlatform::GetInstance()->AfterSandboxEntry();
#endif
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi_unittest.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi_unittest.cc
index c334e8162eb..d4dc4e5e24a 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi_unittest.cc
@@ -9,9 +9,9 @@ namespace gpu {
namespace {
// Disabled by default as it requires DX11.
-INSTANTIATE_TYPED_TEST_CASE_P(DISABLED_GpuMemoryBufferFactoryDXGI,
- GpuMemoryBufferFactoryTest,
- GpuMemoryBufferFactoryDXGI);
+INSTANTIATE_TYPED_TEST_SUITE_P(DISABLED_GpuMemoryBufferFactoryDXGI,
+ GpuMemoryBufferFactoryTest,
+ GpuMemoryBufferFactoryDXGI);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface_unittest.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface_unittest.cc
index 66809f8fd14..2e0b6701f1e 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface_unittest.cc
@@ -8,9 +8,9 @@
namespace gpu {
namespace {
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferFactoryIOSurface,
- GpuMemoryBufferFactoryTest,
- GpuMemoryBufferFactoryIOSurface);
+INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferFactoryIOSurface,
+ GpuMemoryBufferFactoryTest,
+ GpuMemoryBufferFactoryIOSurface);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
index 658441f11e0..031ac81239a 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
@@ -125,7 +125,7 @@ GpuMemoryBufferFactoryNativePixmap::CreateImageForGpuMemoryBuffer(
}
auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(size, format);
- if (!image->Initialize(pixmap.get())) {
+ if (!image->Initialize(std::move(pixmap))) {
LOG(ERROR) << "Failed to create GLImage " << size.ToString() << ", "
<< gfx::BufferFormatToString(format);
return nullptr;
@@ -162,7 +162,7 @@ GpuMemoryBufferFactoryNativePixmap::CreateAnonymousImage(
return nullptr;
}
auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(size, format);
- if (!image->Initialize(pixmap.get())) {
+ if (!image->Initialize(std::move(pixmap))) {
LOG(ERROR) << "Failed to create GLImage " << size.ToString() << ", "
<< gfx::BufferFormatToString(format);
return nullptr;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc
index 2c381184dd0..b1d11027a44 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc
@@ -8,9 +8,9 @@
namespace gpu {
namespace {
-INSTANTIATE_TYPED_TEST_CASE_P(GpuMemoryBufferFactoryNativePixmap,
- GpuMemoryBufferFactoryTest,
- GpuMemoryBufferFactoryNativePixmap);
+INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferFactoryNativePixmap,
+ GpuMemoryBufferFactoryTest,
+ GpuMemoryBufferFactoryNativePixmap);
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
index 5678193dcf7..b9e8aa41e17 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
@@ -41,7 +41,7 @@ class GpuMemoryBufferFactoryTest : public testing::Test {
GpuMemoryBufferFactoryType factory_;
};
-TYPED_TEST_CASE_P(GpuMemoryBufferFactoryTest);
+TYPED_TEST_SUITE_P(GpuMemoryBufferFactoryTest);
TYPED_TEST_P(GpuMemoryBufferFactoryTest, CreateGpuMemoryBuffer) {
const gfx::GpuMemoryBufferId kBufferId(1);
@@ -76,7 +76,7 @@ TYPED_TEST_P(GpuMemoryBufferFactoryTest, CreateGpuMemoryBuffer) {
// The GpuMemoryBufferFactoryTest test case verifies behavior that is expected
// from a GpuMemoryBuffer factory in order to be conformant.
-REGISTER_TYPED_TEST_CASE_P(GpuMemoryBufferFactoryTest, CreateGpuMemoryBuffer);
+REGISTER_TYPED_TEST_SUITE_P(GpuMemoryBufferFactoryTest, CreateGpuMemoryBuffer);
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
index 917d56edad2..add3911de92 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
@@ -4,24 +4,37 @@
#include "gpu/ipc/service/image_decode_accelerator_stub.h"
+#include <utility>
+
#include "base/bind.h"
+#include "base/containers/span.h"
#include "base/feature_list.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/context_result.h"
+#include "gpu/command_buffer/common/discardable_handle.h"
#include "gpu/command_buffer/common/scheduling_priority.h"
#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/decoder_context.h"
#include "gpu/command_buffer/service/scheduler.h"
+#include "gpu/command_buffer/service/service_transfer_cache.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/config/gpu_finch_features.h"
#include "gpu/ipc/common/command_buffer_id.h"
+#include "gpu/ipc/service/command_buffer_stub.h"
#include "gpu/ipc/service/gpu_channel.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/image_decode_accelerator_worker.h"
#include "ipc/ipc_message.h"
#include "ipc/ipc_message_macros.h"
+#include "ui/gfx/color_space.h"
namespace gpu {
+class Buffer;
ImageDecodeAcceleratorStub::ImageDecodeAcceleratorStub(
ImageDecodeAcceleratorWorker* worker,
@@ -77,8 +90,8 @@ void ImageDecodeAcceleratorStub::OnScheduleImageDecode(
uint64_t release_count) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
base::AutoLock lock(lock_);
- if (!channel_) {
- // The channel is no longer available, so don't schedule a decode.
+ if (!channel_ || destroying_channel_) {
+ // The channel is no longer available, so don't do anything.
return;
}
@@ -99,19 +112,24 @@ void ImageDecodeAcceleratorStub::OnScheduleImageDecode(
}
// Start the actual decode.
- worker_->Decode(std::move(decode_params.encoded_data),
- decode_params.output_size,
- base::BindOnce(&ImageDecodeAcceleratorStub::OnDecodeCompleted,
- base::WrapRefCounted(this)));
+ worker_->Decode(
+ std::move(decode_params.encoded_data), decode_params.output_size,
+ base::BindOnce(&ImageDecodeAcceleratorStub::OnDecodeCompleted,
+ base::WrapRefCounted(this), decode_params.output_size));
// Schedule a task to eventually release the decode sync token. Note that this
// task won't run until the sequence is re-enabled when a decode completes.
+ const SyncToken discardable_handle_sync_token = SyncToken(
+ CommandBufferNamespace::GPU_IO,
+ CommandBufferIdFromChannelAndRoute(channel_->client_id(),
+ decode_params.raster_decoder_route_id),
+ decode_params.discardable_handle_release_count);
channel_->scheduler()->ScheduleTask(Scheduler::Task(
sequence_,
base::BindOnce(&ImageDecodeAcceleratorStub::ProcessCompletedDecode,
base::WrapRefCounted(this), std::move(decode_params),
release_count),
- std::vector<SyncToken>()));
+ {discardable_handle_sync_token} /* sync_token_fences */));
}
void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
@@ -119,14 +137,82 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
uint64_t decode_release_count) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
base::AutoLock lock(lock_);
- if (!channel_) {
+ if (!channel_ || destroying_channel_) {
// The channel is no longer available, so don't do anything.
return;
}
- // TODO(andrescj): create the transfer cache entry. Doing so will also upload
- // the decoded image to a GPU texture.
+ DCHECK(!pending_completed_decodes_.empty());
+ std::unique_ptr<CompletedDecode> completed_decode =
+ std::move(pending_completed_decodes_.front());
+
+ // Gain access to the transfer cache through the GpuChannelManager's
+ // SharedContextState. We will also use that to get a GrContext that will be
+ // used for uploading the image.
+ ContextResult context_result;
+ scoped_refptr<SharedContextState> shared_context_state =
+ channel_->gpu_channel_manager()->GetSharedContextState(&context_result);
+ if (context_result != ContextResult::kSuccess) {
+ DLOG(ERROR) << "Unable to obtain the SharedContextState";
+ OnError();
+ return;
+ }
+ DCHECK(shared_context_state);
+ if (!shared_context_state->gr_context()) {
+ DLOG(ERROR) << "Could not get the GrContext";
+ OnError();
+ return;
+ }
+ if (!shared_context_state->MakeCurrent(nullptr /* surface */)) {
+ DLOG(ERROR) << "Could not MakeCurrent the shared context";
+ OnError();
+ return;
+ }
+ // Insert the cache entry in the transfer cache. Note that this section
+ // validates several of the IPC parameters: |params.raster_decoder_route_id|,
+ // |params.transfer_cache_entry_id|, |params.discardable_handle_shm_id|, and
+ // |params.discardable_handle_shm_offset|.
+ CommandBufferStub* command_buffer =
+ channel_->LookupCommandBuffer(params.raster_decoder_route_id);
+ if (!command_buffer) {
+ DLOG(ERROR) << "Could not find the command buffer";
+ OnError();
+ return;
+ }
+ scoped_refptr<Buffer> handle_buffer =
+ command_buffer->GetTransferBuffer(params.discardable_handle_shm_id);
+ if (!DiscardableHandleBase::ValidateParameters(
+ handle_buffer.get(), params.discardable_handle_shm_offset)) {
+ DLOG(ERROR) << "Could not validate the discardable handle parameters";
+ OnError();
+ return;
+ }
+ DCHECK(command_buffer->decoder_context());
+ if (command_buffer->decoder_context()->GetRasterDecoderId() < 0) {
+ DLOG(ERROR) << "Could not get the raster decoder ID";
+ OnError();
+ return;
+ }
+ DCHECK(shared_context_state->transfer_cache());
+ if (!shared_context_state->transfer_cache()->CreateLockedImageEntry(
+ command_buffer->decoder_context()->GetRasterDecoderId(),
+ params.transfer_cache_entry_id,
+ ServiceDiscardableHandle(std::move(handle_buffer),
+ params.discardable_handle_shm_offset,
+ params.discardable_handle_shm_id),
+ shared_context_state->gr_context(),
+ base::make_span(completed_decode->output),
+ completed_decode->row_bytes, completed_decode->image_info,
+ params.needs_mips, params.target_color_space.ToSkColorSpace())) {
+ DLOG(ERROR) << "Could not create and insert the transfer cache entry";
+ OnError();
+ return;
+ }
+ shared_context_state->set_need_context_state_reset(true);
+
+ // All done! The decoded image can now be used for rasterization, so we can
+ // release the decode sync token.
sync_point_client_state_->ReleaseFenceSync(decode_release_count);
// If there are no more completed decodes to be processed, we can disable the
@@ -137,27 +223,42 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
channel_->scheduler()->DisableSequence(sequence_);
}
+ImageDecodeAcceleratorStub::CompletedDecode::CompletedDecode(
+ std::vector<uint8_t> output,
+ size_t row_bytes,
+ SkImageInfo image_info)
+ : output(std::move(output)), row_bytes(row_bytes), image_info(image_info) {}
+
+ImageDecodeAcceleratorStub::CompletedDecode::~CompletedDecode() = default;
+
void ImageDecodeAcceleratorStub::OnDecodeCompleted(
- std::vector<uint8_t> rgba_output) {
+ gfx::Size expected_output_size,
+ std::vector<uint8_t> output,
+ size_t row_bytes,
+ SkImageInfo image_info) {
base::AutoLock lock(lock_);
- if (!channel_) {
+ if (!channel_ || destroying_channel_) {
// The channel is no longer available, so don't do anything.
return;
}
- if (!accepting_completed_decodes_) {
- // We're still waiting for the channel to be destroyed because of an earlier
- // failure, so don't do anything.
- return;
- }
-
- if (rgba_output.empty()) {
+ if (output.empty()) {
DLOG(ERROR) << "The decode failed";
OnError();
return;
}
- pending_completed_decodes_.push(std::move(rgba_output));
+ // Some sanity checks on the output of the decoder.
+ DCHECK_EQ(expected_output_size.width(), image_info.width());
+ DCHECK_EQ(expected_output_size.height(), image_info.height());
+ DCHECK_NE(0u, image_info.minRowBytes());
+ DCHECK_GE(row_bytes, image_info.minRowBytes());
+ DCHECK_EQ(output.size(), image_info.computeByteSize(row_bytes));
+
+ // The decode is ready to be processed: add it to |pending_completed_decodes_|
+ // so that ProcessCompletedDecode() can pick it up.
+ pending_completed_decodes_.push(std::make_unique<CompletedDecode>(
+ std::move(output), row_bytes, image_info));
// We only need to enable the sequence when the number of pending completed
// decodes is 1. If there are more, the sequence should already be enabled.
@@ -166,6 +267,7 @@ void ImageDecodeAcceleratorStub::OnDecodeCompleted(
}
void ImageDecodeAcceleratorStub::OnError() {
+ lock_.AssertAcquired();
DCHECK(channel_);
// Trigger the destruction of the channel and stop processing further
@@ -173,7 +275,7 @@ void ImageDecodeAcceleratorStub::OnError() {
// GpuChannel::OnChannelError() directly because that will end up calling
// ImageDecodeAcceleratorStub::Shutdown() while |lock_| is still acquired. So,
// we post a task to the main thread instead.
- accepting_completed_decodes_ = false;
+ destroying_channel_ = true;
channel_->task_runner()->PostTask(
FROM_HERE,
base::BindOnce(&GpuChannel::OnChannelError, channel_->AsWeakPtr()));
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
index a8b207ac802..bc2742518f5 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
@@ -5,6 +5,10 @@
#ifndef GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_STUB_H_
#define GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_STUB_H_
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
#include <vector>
#include "base/containers/queue.h"
@@ -15,6 +19,8 @@
#include "base/thread_annotations.h"
#include "gpu/command_buffer/service/sequence_id.h"
#include "gpu/ipc/common/gpu_messages.h"
+#include "third_party/skia/include/core/SkImageInfo.h"
+#include "ui/gfx/geometry/size.h"
namespace base {
class SingleThreadTaskRunner;
@@ -74,10 +80,13 @@ class ImageDecodeAcceleratorStub
uint64_t decode_release_count);
// The |worker_| calls this when a decode is completed. If the decode is
- // successful (i.e., |rgba_output| is not empty), |sequence_| will be enabled
- // so that ProcessCompletedDecode() is called. If the decode is not
- // successful, we destroy the channel (see OnError()).
- void OnDecodeCompleted(std::vector<uint8_t> rgba_output);
+ // successful (i.e., |output| is not empty), |sequence_| will be enabled so
+ // that ProcessCompletedDecode() is called. If the decode is not successful,
+ // we destroy the channel (see OnError()).
+ void OnDecodeCompleted(gfx::Size expected_output_size,
+ std::vector<uint8_t> output,
+ size_t row_bytes,
+ SkImageInfo image_info);
// Triggers the destruction of the channel asynchronously and makes it so that
// we stop accepting completed decodes. On entry, |channel_| must not be
@@ -87,14 +96,27 @@ class ImageDecodeAcceleratorStub
// The object to which the actual decoding can be delegated.
ImageDecodeAcceleratorWorker* worker_ = nullptr;
+ struct CompletedDecode {
+ CompletedDecode(std::vector<uint8_t> output,
+ size_t row_bytes,
+ SkImageInfo image_info);
+ ~CompletedDecode();
+
+ std::vector<uint8_t> output;
+ size_t row_bytes;
+ SkImageInfo image_info;
+
+ DISALLOW_COPY_AND_ASSIGN(CompletedDecode);
+ };
+
base::Lock lock_;
GpuChannel* channel_ GUARDED_BY(lock_) = nullptr;
SequenceId sequence_ GUARDED_BY(lock_);
scoped_refptr<SyncPointClientState> sync_point_client_state_
GUARDED_BY(lock_);
- base::queue<std::vector<uint8_t>> pending_completed_decodes_
+ base::queue<std::unique_ptr<CompletedDecode>> pending_completed_decodes_
GUARDED_BY(lock_);
- bool accepting_completed_decodes_ GUARDED_BY(lock_) = true;
+ bool destroying_channel_ GUARDED_BY(lock_) = false;
uint64_t last_release_count_ GUARDED_BY(lock_) = 0;
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
index 8b738c6fa80..52a79dafc7c 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
@@ -2,29 +2,78 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/bind.h"
#include "base/containers/queue.h"
+#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/memory/weak_ptr.h"
#include "base/numerics/checked_math.h"
#include "base/test/scoped_feature_list.h"
#include "base/test/test_simple_task_runner.h"
+#include "cc/paint/image_transfer_cache_entry.h"
+#include "cc/paint/transfer_cache_entry.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/command_buffer/common/capabilities.h"
#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/context_creation_attribs.h"
+#include "gpu/command_buffer/common/context_result.h"
+#include "gpu/command_buffer/common/discardable_handle.h"
+#include "gpu/command_buffer/common/scheduling_priority.h"
#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/decoder_context.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/scheduler.h"
+#include "gpu/command_buffer/service/sequence_id.h"
+#include "gpu/command_buffer/service/service_transfer_cache.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_finch_features.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
+#include "gpu/ipc/common/surface_handle.h"
+#include "gpu/ipc/service/command_buffer_stub.h"
+#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_test_common.h"
#include "gpu/ipc/service/image_decode_accelerator_worker.h"
+#include "ipc/ipc_message.h"
#include "testing/gmock/include/gmock/gmock.h"
+#include "third_party/skia/include/core/SkImage.h"
+#include "third_party/skia/include/core/SkImageInfo.h"
+#include "third_party/skia/include/core/SkSize.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
+#include "url/gurl.h"
using testing::InSequence;
using testing::StrictMock;
namespace gpu {
-class GpuChannel;
+class MemoryTracker;
+
+namespace {
+
+std::unique_ptr<MemoryTracker> CreateMockMemoryTracker(
+ const GPUCreateCommandBufferConfig& init_params) {
+ return std::make_unique<gles2::MockMemoryTracker>();
+}
+
+scoped_refptr<Buffer> MakeBufferForTesting() {
+ return MakeMemoryBuffer(sizeof(base::subtle::Atomic32));
+}
+
+} // namespace
// This mock allows individual tests to decide asynchronously when to finish a
// decode by using the FinishOneDecode() method.
@@ -34,7 +83,7 @@ class MockImageDecodeAcceleratorWorker : public ImageDecodeAcceleratorWorker {
void Decode(std::vector<uint8_t> encoded_data,
const gfx::Size& output_size,
- base::OnceCallback<void(std::vector<uint8_t>)> decode_cb) {
+ CompletedDecodeCB decode_cb) {
pending_decodes_.push(PendingDecode{output_size, std::move(decode_cb)});
DoDecode(output_size);
}
@@ -45,13 +94,19 @@ class MockImageDecodeAcceleratorWorker : public ImageDecodeAcceleratorWorker {
PendingDecode next_decode = std::move(pending_decodes_.front());
pending_decodes_.pop();
if (success) {
- base::CheckedNumeric<size_t> rgba_bytes = 4u;
- rgba_bytes *= next_decode.output_size.width();
+ base::CheckedNumeric<size_t> row_bytes = 4u;
+ row_bytes *= next_decode.output_size.width();
+ base::CheckedNumeric<size_t> rgba_bytes = row_bytes;
rgba_bytes *= next_decode.output_size.height();
std::vector<uint8_t> rgba_output(rgba_bytes.ValueOrDie(), 0u);
- std::move(next_decode.decode_cb).Run(std::move(rgba_output));
+ std::move(next_decode.decode_cb)
+ .Run(std::move(rgba_output), row_bytes.ValueOrDie(),
+ SkImageInfo::Make(next_decode.output_size.width(),
+ next_decode.output_size.height(),
+ kRGBA_8888_SkColorType, kOpaque_SkAlphaType));
} else {
- std::move(next_decode.decode_cb).Run(std::vector<uint8_t>());
+ std::move(next_decode.decode_cb)
+ .Run(std::vector<uint8_t>(), 0u, SkImageInfo());
}
}
@@ -60,7 +115,7 @@ class MockImageDecodeAcceleratorWorker : public ImageDecodeAcceleratorWorker {
private:
struct PendingDecode {
gfx::Size output_size;
- base::OnceCallback<void(std::vector<uint8_t>)> decode_cb;
+ CompletedDecodeCB decode_cb;
};
base::queue<PendingDecode> pending_decodes_;
@@ -70,19 +125,46 @@ class MockImageDecodeAcceleratorWorker : public ImageDecodeAcceleratorWorker {
const int kChannelId = 1;
+const int32_t kCommandBufferRouteId =
+ static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue) + 1;
+
// Test fixture: the general strategy for testing is to have a GPU channel test
// infrastructure (provided by GpuChannelTestCommon), ask the channel to handle
-// decode requests, and expect sync token releases and invokations to the
-// ImageDecodeAcceleratorWorker functionality.
+// decode requests, and expect sync token releases, invocations to the
+// ImageDecodeAcceleratorWorker functionality, and transfer cache entry
+// creation.
class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
public:
- ImageDecodeAcceleratorStubTest() : GpuChannelTestCommon() {}
+ ImageDecodeAcceleratorStubTest()
+ : GpuChannelTestCommon(false /* use_stub_bindings */),
+ weak_ptr_factory_(this) {}
~ImageDecodeAcceleratorStubTest() override = default;
SyncPointManager* sync_point_manager() const {
return channel_manager()->sync_point_manager();
}
+ ServiceTransferCache* GetServiceTransferCache() {
+ ContextResult context_result;
+ scoped_refptr<SharedContextState> shared_context_state =
+ channel_manager()->GetSharedContextState(&context_result);
+ if (context_result != ContextResult::kSuccess || !shared_context_state) {
+ return nullptr;
+ }
+ return shared_context_state->transfer_cache();
+ }
+
+ int GetRasterDecoderId() {
+ GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
+ if (!channel)
+ return -1;
+ CommandBufferStub* command_buffer =
+ channel->LookupCommandBuffer(kCommandBufferRouteId);
+ if (!command_buffer || !command_buffer->decoder_context())
+ return -1;
+ return command_buffer->decoder_context()->GetRasterDecoderId();
+ }
+
void SetUp() override {
GpuChannelTestCommon::SetUp();
// TODO(andrescj): get rid of the |feature_list_| when the feature is
@@ -91,7 +173,49 @@ class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
features::kVaapiJpegImageDecodeAcceleration);
channel_manager()->SetImageDecodeAcceleratorWorkerForTesting(
&image_decode_accelerator_worker_);
- ASSERT_TRUE(CreateChannel(kChannelId, false /* is_gpu_host */));
+
+ // Initialize the GrContext so that texture uploading works.
+ ContextResult context_result;
+ scoped_refptr<SharedContextState> shared_context_state =
+ channel_manager()->GetSharedContextState(&context_result);
+ ASSERT_EQ(ContextResult::kSuccess, context_result);
+ ASSERT_TRUE(shared_context_state);
+ shared_context_state->InitializeGrContext(GpuDriverBugWorkarounds(),
+ nullptr);
+
+ GpuChannel* channel = CreateChannel(kChannelId, false /* is_gpu_host */);
+ ASSERT_TRUE(channel);
+
+ // Create a raster command buffer so that the ImageDecodeAcceleratorStub can
+ // have access to a TransferBufferManager. Note that we mock the
+ // MemoryTracker because GpuCommandBufferMemoryTracker uses a timer that
+ // would make RunTasksUntilIdle() run forever.
+ CommandBufferStub::SetMemoryTrackerFactoryForTesting(
+ base::BindRepeating(&CreateMockMemoryTracker));
+ GPUCreateCommandBufferConfig init_params;
+ init_params.surface_handle = kNullSurfaceHandle;
+ init_params.share_group_id = MSG_ROUTING_NONE;
+ init_params.stream_id = 0;
+ init_params.stream_priority = SchedulingPriority::kNormal;
+ init_params.attribs = ContextCreationAttribs();
+ init_params.attribs.enable_gles2_interface = false;
+ init_params.attribs.enable_raster_interface = true;
+ init_params.attribs.bind_generates_resource = false;
+ init_params.active_url = GURL();
+ ContextResult result = ContextResult::kTransientFailure;
+ Capabilities capabilities;
+ HandleMessage(channel,
+ new GpuChannelMsg_CreateCommandBuffer(
+ init_params, kCommandBufferRouteId,
+ GetSharedMemoryRegion(), &result, &capabilities));
+ ASSERT_EQ(ContextResult::kSuccess, result);
+ CommandBufferStub* command_buffer =
+ channel->LookupCommandBuffer(kCommandBufferRouteId);
+ ASSERT_TRUE(command_buffer);
+
+ // Make sure there are no pending tasks before starting the test.
+ ASSERT_EQ(0u, task_runner()->NumPendingTasks());
+ ASSERT_EQ(0u, io_task_runner()->NumPendingTasks());
}
void TearDown() override {
@@ -100,8 +224,60 @@ class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
channel_manager()->DestroyAllChannels();
}
+ // Intended to run as a task in the GPU scheduler (in the raster sequence):
+ // registers |buffer| in the TransferBufferManager and releases the sync token
+ // corresponding to |handle_release_count|.
+ void RegisterDiscardableHandleBuffer(int32_t shm_id,
+ scoped_refptr<Buffer> buffer,
+ uint64_t handle_release_count) {
+ GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
+ CHECK(channel);
+ CommandBufferStub* command_buffer =
+ channel->LookupCommandBuffer(kCommandBufferRouteId);
+ CHECK(command_buffer);
+ command_buffer->RegisterTransferBufferForTest(shm_id, std::move(buffer));
+ command_buffer->OnFenceSyncRelease(handle_release_count);
+ }
+
+ // Creates a discardable handle and schedules a task in the GPU scheduler (in
+ // the raster sequence) to register the handle's buffer and release the sync
+ // token corresponding to |handle_release_count| (see the
+ // RegisterDiscardableHandleBuffer() method). Returns an invalid handle if the
+ // GPU channel or the command buffer doesn't exist.
+ ClientDiscardableHandle CreateDiscardableHandle(
+ uint64_t handle_release_count) {
+ GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
+ if (!channel)
+ return ClientDiscardableHandle();
+ CommandBufferStub* command_buffer =
+ channel->LookupCommandBuffer(kCommandBufferRouteId);
+ if (!command_buffer)
+ return ClientDiscardableHandle();
+ ClientDiscardableHandle handle(MakeBufferForTesting() /* buffer */,
+ 0u /* byte_offset */,
+ GetNextBufferId() /* shm_id */);
+ scheduler()->ScheduleTask(Scheduler::Task(
+ command_buffer->sequence_id(),
+ base::BindOnce(
+ &ImageDecodeAcceleratorStubTest::RegisterDiscardableHandleBuffer,
+ weak_ptr_factory_.GetWeakPtr(), handle.shm_id(),
+ handle.BufferForTesting(), handle_release_count) /* closure */,
+ std::vector<SyncToken>() /* sync_token_fences */));
+ return handle;
+ }
+
+ // Sends a decode request IPC and returns a sync token that is expected to be
+ // released upon the completion of the decode. The caller is responsible for
+ // keeping track of the release count for the decode sync token
+ // (|decode_release_count|), the transfer cache entry ID
+ // (|transfer_cache_entry_id|), and the release count of the sync token that
+ // is signaled after the discardable handle's buffer has been registered in
+ // the TransferBufferManager. If the channel does not exist or the discardable
+ // handle can't be created, this function returns an empty sync token.
SyncToken SendDecodeRequest(const gfx::Size& output_size,
- uint64_t release_count) {
+ uint64_t decode_release_count,
+ uint32_t transfer_cache_entry_id,
+ uint64_t handle_release_count) {
GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
if (!channel) {
// It's possible that the channel was destroyed as part of an earlier
@@ -111,19 +287,30 @@ class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
return SyncToken();
}
+ // Create the decode sync token for the decode request so that we can test
+ // that it's actually released.
SyncToken decode_sync_token(
CommandBufferNamespace::GPU_IO,
CommandBufferIdFromChannelAndRoute(
kChannelId, static_cast<int32_t>(
GpuChannelReservedRoutes::kImageDecodeAccelerator)),
- release_count);
+ decode_release_count);
+
+ // Create a discardable handle and schedule its buffer's registration.
+ ClientDiscardableHandle handle =
+ CreateDiscardableHandle(handle_release_count);
+ if (!handle.IsValid())
+ return SyncToken();
+
+ // Send the IPC decode request.
GpuChannelMsg_ScheduleImageDecode_Params decode_params;
decode_params.encoded_data = std::vector<uint8_t>();
decode_params.output_size = output_size;
- decode_params.raster_decoder_route_id = 1;
- decode_params.transfer_cache_entry_id = 1u;
- decode_params.discardable_handle_shm_id = 0;
- decode_params.discardable_handle_shm_offset = 0u;
+ decode_params.raster_decoder_route_id = kCommandBufferRouteId;
+ decode_params.transfer_cache_entry_id = transfer_cache_entry_id;
+ decode_params.discardable_handle_shm_id = handle.shm_id();
+ decode_params.discardable_handle_shm_offset = handle.byte_offset();
+ decode_params.discardable_handle_release_count = handle_release_count;
decode_params.target_color_space = gfx::ColorSpace();
decode_params.needs_mips = false;
@@ -144,11 +331,41 @@ class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
}
}
+ void CheckTransferCacheEntries(std::vector<SkISize> expected_sizes) {
+ ServiceTransferCache* transfer_cache = GetServiceTransferCache();
+ ASSERT_TRUE(transfer_cache);
+
+ // First, check the number of entries and early out if 0 entries are
+ // expected.
+ const size_t num_actual_cache_entries =
+ transfer_cache->entries_count_for_testing();
+ ASSERT_EQ(expected_sizes.size(), num_actual_cache_entries);
+ if (expected_sizes.empty())
+ return;
+
+ // Then, check the dimensions of the entries to make sure they are as
+ // expected.
+ int raster_decoder_id = GetRasterDecoderId();
+ ASSERT_GE(raster_decoder_id, 0);
+ for (size_t i = 0; i < num_actual_cache_entries; i++) {
+ auto* decode_entry = static_cast<cc::ServiceImageTransferCacheEntry*>(
+ transfer_cache->GetEntry(ServiceTransferCache::EntryKey(
+ raster_decoder_id, cc::TransferCacheEntryType::kImage, i + 1)));
+ ASSERT_TRUE(decode_entry);
+ ASSERT_TRUE(decode_entry->image());
+ EXPECT_EQ(expected_sizes[i].width(),
+ decode_entry->image()->dimensions().width());
+ EXPECT_EQ(expected_sizes[i].height(),
+ decode_entry->image()->dimensions().height());
+ }
+ }
+
protected:
StrictMock<MockImageDecodeAcceleratorWorker> image_decode_accelerator_worker_;
private:
base::test::ScopedFeatureList feature_list_;
+ base::WeakPtrFactory<ImageDecodeAcceleratorStubTest> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(ImageDecodeAcceleratorStubTest);
};
@@ -167,9 +384,13 @@ TEST_F(ImageDecodeAcceleratorStubTest,
.Times(1);
}
const SyncToken decode1_sync_token = SendDecodeRequest(
- gfx::Size(100, 100) /* output_size */, 1u /* release_count */);
+ gfx::Size(100, 100) /* output_size */, 1u /* decode_release_count */,
+ 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
+ ASSERT_TRUE(decode1_sync_token.HasData());
const SyncToken decode2_sync_token = SendDecodeRequest(
- gfx::Size(200, 200) /* output_size */, 2u /* release_count */);
+ gfx::Size(200, 200) /* output_size */, 2u /* decode_release_count */,
+ 2u /* transfer_cache_entry_id */, 2u /* handle_release_count */);
+ ASSERT_TRUE(decode2_sync_token.HasData());
// A decode sync token should not be released before a decode is finished.
RunTasksUntilIdle();
@@ -192,6 +413,9 @@ TEST_F(ImageDecodeAcceleratorStubTest,
// The channel should still exist at the end.
EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId));
+
+ // Check that the decoded images are in the transfer cache.
+ CheckTransferCacheEntries({SkISize::Make(100, 100), SkISize::Make(200, 200)});
}
// Tests the following flow: three decode requests are sent. The first decode
@@ -210,11 +434,17 @@ TEST_F(ImageDecodeAcceleratorStubTest,
.Times(1);
}
const SyncToken decode1_sync_token = SendDecodeRequest(
- gfx::Size(100, 100) /* output_size */, 1u /* release_count */);
+ gfx::Size(100, 100) /* output_size */, 1u /* decode_release_count */,
+ 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
+ ASSERT_TRUE(decode1_sync_token.HasData());
const SyncToken decode2_sync_token = SendDecodeRequest(
- gfx::Size(200, 200) /* output_size */, 2u /* release_count */);
+ gfx::Size(200, 200) /* output_size */, 2u /* decode_release_count */,
+ 2u /* transfer_cache_entry_id */, 2u /* handle_release_count */);
+ ASSERT_TRUE(decode2_sync_token.HasData());
const SyncToken decode3_sync_token = SendDecodeRequest(
- gfx::Size(300, 300) /* output_size */, 3u /* release_count */);
+ gfx::Size(300, 300) /* output_size */, 3u /* decode_release_count */,
+ 3u /* transfer_cache_entry_id */, 3u /* handle_release_count */);
+ ASSERT_TRUE(decode3_sync_token.HasData());
// A decode sync token should not be released before a decode is finished.
RunTasksUntilIdle();
@@ -233,6 +463,10 @@ TEST_F(ImageDecodeAcceleratorStubTest,
// The channel should still exist at the end.
EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId));
+
+ // Check that the decoded images are in the transfer cache.
+ CheckTransferCacheEntries({SkISize::Make(100, 100), SkISize::Make(200, 200),
+ SkISize::Make(300, 300)});
}
// Tests the following flow: three decode requests are sent. The first decode
@@ -250,11 +484,17 @@ TEST_F(ImageDecodeAcceleratorStubTest, FailedDecodes) {
.Times(1);
}
const SyncToken decode1_sync_token = SendDecodeRequest(
- gfx::Size(100, 100) /* output_size */, 1u /* release_count */);
+ gfx::Size(100, 100) /* output_size */, 1u /* decode_release_count */,
+ 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
+ ASSERT_TRUE(decode1_sync_token.HasData());
const SyncToken decode2_sync_token = SendDecodeRequest(
- gfx::Size(200, 200) /* output_size */, 2u /* release_count */);
+ gfx::Size(200, 200) /* output_size */, 2u /* decode_release_count */,
+ 2u /* transfer_cache_entry_id */, 2u /* handle_release_count */);
+ ASSERT_TRUE(decode2_sync_token.HasData());
const SyncToken decode3_sync_token = SendDecodeRequest(
- gfx::Size(300, 300) /* output_size */, 3u /* release_count */);
+ gfx::Size(300, 300) /* output_size */, 3u /* decode_release_count */,
+ 3u /* transfer_cache_entry_id */, 3u /* handle_release_count */);
+ ASSERT_TRUE(decode3_sync_token.HasData());
// A decode sync token should not be released before a decode is finished.
RunTasksUntilIdle();
@@ -272,15 +512,23 @@ TEST_F(ImageDecodeAcceleratorStubTest, FailedDecodes) {
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token));
+
+ // We expect no entries in the transfer cache.
+ CheckTransferCacheEntries({});
}
-TEST_F(ImageDecodeAcceleratorStubTest, OutOfOrderSyncTokens) {
+TEST_F(ImageDecodeAcceleratorStubTest, OutOfOrderDecodeSyncTokens) {
EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
.Times(1);
const SyncToken decode1_sync_token = SendDecodeRequest(
- gfx::Size(100, 100) /* output_size */, 2u /* release_count */);
+ gfx::Size(100, 100) /* output_size */, 2u /* decode_release_count */,
+ 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
+ ASSERT_TRUE(decode1_sync_token.HasData());
+
const SyncToken decode2_sync_token = SendDecodeRequest(
- gfx::Size(200, 200) /* output_size */, 1u /* release_count */);
+ gfx::Size(200, 200) /* output_size */, 1u /* decode_release_count */,
+ 2u /* transfer_cache_entry_id */, 2u /* handle_release_count */);
+ ASSERT_TRUE(decode2_sync_token.HasData());
// We expect the destruction of the ImageDecodeAcceleratorStub, which also
// implies that all decode sync tokens should be released.
@@ -288,39 +536,113 @@ TEST_F(ImageDecodeAcceleratorStubTest, OutOfOrderSyncTokens) {
EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+
+ // We expect no entries in the transfer cache.
+ CheckTransferCacheEntries({});
}
-TEST_F(ImageDecodeAcceleratorStubTest, ZeroReleaseCountSyncToken) {
+TEST_F(ImageDecodeAcceleratorStubTest, ZeroReleaseCountDecodeSyncToken) {
const SyncToken decode_sync_token = SendDecodeRequest(
- gfx::Size(100, 100) /* output_size */, 0u /* release_count */);
+ gfx::Size(100, 100) /* output_size */, 0u /* decode_release_count */,
+ 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
+ ASSERT_TRUE(decode_sync_token.HasData());
// We expect the destruction of the ImageDecodeAcceleratorStub, which also
// implies that all decode sync tokens should be released.
RunTasksUntilIdle();
EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+
+ // We expect no entries in the transfer cache.
+ CheckTransferCacheEntries({});
}
TEST_F(ImageDecodeAcceleratorStubTest, ZeroWidthOutputSize) {
const SyncToken decode_sync_token = SendDecodeRequest(
- gfx::Size(0, 100) /* output_size */, 1u /* release_count */);
+ gfx::Size(0, 100) /* output_size */, 1u /* decode_release_count */,
+ 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
+ ASSERT_TRUE(decode_sync_token.HasData());
// We expect the destruction of the ImageDecodeAcceleratorStub, which also
// implies that all decode sync tokens should be released.
RunTasksUntilIdle();
EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+
+ // We expect no entries in the transfer cache.
+ CheckTransferCacheEntries({});
}
TEST_F(ImageDecodeAcceleratorStubTest, ZeroHeightOutputSize) {
const SyncToken decode_sync_token = SendDecodeRequest(
- gfx::Size(100, 0) /* output_size */, 1u /* release_count */);
+ gfx::Size(100, 0) /* output_size */, 1u /* decode_release_count */,
+ 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
+ ASSERT_TRUE(decode_sync_token.HasData());
// We expect the destruction of the ImageDecodeAcceleratorStub, which also
// implies that all decode sync tokens should be released.
RunTasksUntilIdle();
EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+
+ // We expect no entries in the transfer cache.
+ CheckTransferCacheEntries({});
+}
+
+// Tests that we wait for a discardable handle's buffer to be registered before
+// we attempt to process the corresponding completed decode.
+TEST_F(ImageDecodeAcceleratorStubTest, WaitForDiscardableHandleRegistration) {
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
+ .Times(1);
+
+ // First, we disable the raster sequence so that we can control when to
+ // register the discardable handle's buffer by re-enabling the sequence.
+ GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
+ ASSERT_TRUE(channel);
+ const CommandBufferStub* command_buffer =
+ channel->LookupCommandBuffer(kCommandBufferRouteId);
+ ASSERT_TRUE(command_buffer);
+ const SequenceId raster_sequence_id = command_buffer->sequence_id();
+ scheduler()->DisableSequence(raster_sequence_id);
+
+ // Now we can send the decode request. This schedules the registration of the
+ // discardable handle, but it won't actually be registered until we re-enable
+ // the raster sequence later on.
+ const SyncToken decode_sync_token = SendDecodeRequest(
+ gfx::Size(100, 100) /* output_size */, 1u /* decode_release_count */,
+ 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */);
+ ASSERT_TRUE(decode_sync_token.HasData());
+
+ // A decode sync token should not be released before a decode is finished.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+
+ // Even when a decode is finished, the decode sync token shouldn't be released
+ // before the discardable handle's buffer is registered.
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ RunTasksUntilIdle();
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+
+ // Let's make sure that the channel and the command buffer are still alive
+ // because if we didn't wait for the discardable handle's buffer to be
+ // registered, we could have caused a channel teardown.
+ ASSERT_TRUE(channel_manager()->LookupChannel(kChannelId));
+ ASSERT_TRUE(channel_manager()
+ ->LookupChannel(kChannelId)
+ ->LookupCommandBuffer(kCommandBufferRouteId));
+
+ // Now let's register the discardable handle's buffer by re-enabling the
+ // raster sequence. This should trigger the processing of the completed decode
+ // and the subsequent release of the decode sync token.
+ scheduler()->EnableSequence(raster_sequence_id);
+ RunTasksUntilIdle();
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+
+ // The channel should still exist at the end.
+ EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId));
+
+ // Check that the decoded images are in the transfer cache.
+ CheckTransferCacheEntries({SkISize::Make(100, 100)});
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_worker.h b/chromium/gpu/ipc/service/image_decode_accelerator_worker.h
index 66efb3090b5..a494783d7f3 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_worker.h
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_worker.h
@@ -5,9 +5,13 @@
#ifndef GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_WORKER_H_
#define GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_WORKER_H_
+#include <stddef.h>
+#include <stdint.h>
+
#include <vector>
#include "base/callback.h"
+#include "third_party/skia/include/core/SkImageInfo.h"
namespace gfx {
class Size;
@@ -21,16 +25,27 @@ class ImageDecodeAcceleratorWorker {
public:
virtual ~ImageDecodeAcceleratorWorker() {}
+ using CompletedDecodeCB =
+ base::OnceCallback<void(std::vector<uint8_t> /* output */,
+ size_t /* row_bytes */,
+ SkImageInfo /* image_info */)>;
+
// Enqueue a decode of |encoded_data|. The |decode_cb| is called
- // asynchronously when the decode completes passing as a parameter a vector
- // containing the decoded image in RGBA format (the stride of the output is
- // |output_size|.width() * 4). If the decode fails, |decode_cb| is called
- // asynchronously with an empty vector. Callbacks should be called in the
- // order that this method is called.
- virtual void Decode(
- std::vector<uint8_t> encoded_data,
- const gfx::Size& output_size,
- base::OnceCallback<void(std::vector<uint8_t>)> decode_cb) = 0;
+ // asynchronously when the decode completes passing as parameters a vector
+ // containing the decoded image (|output|), the stride (|row_bytes|), and a
+ // SkImageInfo (|image_info|) with information about the decoded output.
+ // For a successful decode, implementations must guarantee that:
+ //
+ // 1) |image_info|.width() == |output_size|.width().
+ // 2) |image_info|.height() == |output_size|.height().
+ // 3) |row_bytes| >= |image_info|.minRowBytes().
+ // 4) |output|.size() == |image_info|.computeByteSize(|row_bytes|).
+ //
+ // If the decode fails, |decode_cb| is called asynchronously with an empty
+ // vector. Callbacks should be called in the order that this method is called.
+ virtual void Decode(std::vector<uint8_t> encoded_data,
+ const gfx::Size& output_size,
+ CompletedDecodeCB decode_cb) = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index 5d68c4f9944..8b9e3875e4b 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -45,12 +45,12 @@ class ImageTransportSurfaceOverlayMac : public gl::GLSurface,
ColorSpace color_space,
bool has_alpha) override;
bool IsOffscreen() override;
- gfx::SwapResult SwapBuffers(const PresentationCallback& callback) override;
+ gfx::SwapResult SwapBuffers(PresentationCallback callback) override;
gfx::SwapResult PostSubBuffer(int x,
int y,
int width,
int height,
- const PresentationCallback& callback) override;
+ PresentationCallback callback) override;
bool SupportsPostSubBuffer() override;
gfx::Size GetSize() override;
void* GetHandle() override;
@@ -76,9 +76,9 @@ class ImageTransportSurfaceOverlayMac : public gl::GLSurface,
~ImageTransportSurfaceOverlayMac() override;
gfx::SwapResult SwapBuffersInternal(const gfx::Rect& pixel_damage_rect,
- const PresentationCallback& callback);
+ PresentationCallback callback);
void ApplyBackpressure();
- void BufferPresented(const PresentationCallback& callback,
+ void BufferPresented(PresentationCallback callback,
const gfx::PresentationFeedback& feedback);
base::WeakPtr<ImageTransportSurfaceDelegate> delegate_;
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index 3884df87a49..67d26f0c669 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -7,6 +7,7 @@
#include <sstream>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/command_line.h"
#include "base/metrics/histogram_macros.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -89,17 +90,17 @@ void ImageTransportSurfaceOverlayMac::ApplyBackpressure() {
}
void ImageTransportSurfaceOverlayMac::BufferPresented(
- const PresentationCallback& callback,
+ PresentationCallback callback,
const gfx::PresentationFeedback& feedback) {
DCHECK(!callback.is_null());
- callback.Run(feedback);
+ std::move(callback).Run(feedback);
if (delegate_)
delegate_->BufferPresented(feedback);
}
gfx::SwapResult ImageTransportSurfaceOverlayMac::SwapBuffersInternal(
const gfx::Rect& pixel_damage_rect,
- const PresentationCallback& callback) {
+ PresentationCallback callback) {
TRACE_EVENT0("gpu", "ImageTransportSurfaceOverlayMac::SwapBuffersInternal");
// Do a GL fence for flush to apply back-pressure before drawing.
@@ -166,14 +167,16 @@ gfx::SwapResult ImageTransportSurfaceOverlayMac::SwapBuffersInternal(
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&ImageTransportSurfaceOverlayMac::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), callback, feedback));
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback),
+ feedback));
return gfx::SwapResult::SWAP_ACK;
}
gfx::SwapResult ImageTransportSurfaceOverlayMac::SwapBuffers(
- const PresentationCallback& callback) {
+ PresentationCallback callback) {
return SwapBuffersInternal(
- gfx::Rect(0, 0, pixel_size_.width(), pixel_size_.height()), callback);
+ gfx::Rect(0, 0, pixel_size_.width(), pixel_size_.height()),
+ std::move(callback));
}
gfx::SwapResult ImageTransportSurfaceOverlayMac::PostSubBuffer(
@@ -181,8 +184,9 @@ gfx::SwapResult ImageTransportSurfaceOverlayMac::PostSubBuffer(
int y,
int width,
int height,
- const PresentationCallback& callback) {
- return SwapBuffersInternal(gfx::Rect(x, y, width, height), callback);
+ PresentationCallback callback) {
+ return SwapBuffersInternal(gfx::Rect(x, y, width, height),
+ std::move(callback));
}
bool ImageTransportSurfaceOverlayMac::SupportsPostSubBuffer() {
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
index 8827552d320..6b1b9fc92f5 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
@@ -4,6 +4,8 @@
#include "gpu/ipc/service/pass_through_image_transport_surface.h"
+#include <utility>
+
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
@@ -51,20 +53,20 @@ bool PassThroughImageTransportSurface::Initialize(gl::GLSurfaceFormat format) {
}
gfx::SwapResult PassThroughImageTransportSurface::SwapBuffers(
- const PresentationCallback& callback) {
+ PresentationCallback callback) {
gfx::SwapResponse response;
StartSwapBuffers(&response);
gfx::SwapResult result = gl::GLSurfaceAdapter::SwapBuffers(
- base::Bind(&PassThroughImageTransportSurface::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), callback));
+ base::BindOnce(&PassThroughImageTransportSurface::BufferPresented,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
response.result = result;
FinishSwapBuffers(std::move(response));
return result;
}
void PassThroughImageTransportSurface::SwapBuffersAsync(
- const SwapCompletionCallback& completion_callback,
- const PresentationCallback& presentation_callback) {
+ SwapCompletionCallback completion_callback,
+ PresentationCallback presentation_callback) {
gfx::SwapResponse response;
StartSwapBuffers(&response);
@@ -73,21 +75,23 @@ void PassThroughImageTransportSurface::SwapBuffersAsync(
// is destroyed. However, this also means that the callback can be run on
// the calling thread only.
gl::GLSurfaceAdapter::SwapBuffersAsync(
- base::Bind(&PassThroughImageTransportSurface::FinishSwapBuffersAsync,
- weak_ptr_factory_.GetWeakPtr(), completion_callback,
- base::Passed(&response)),
- base::Bind(&PassThroughImageTransportSurface::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), presentation_callback));
+ base::BindOnce(&PassThroughImageTransportSurface::FinishSwapBuffersAsync,
+ weak_ptr_factory_.GetWeakPtr(),
+ std::move(completion_callback), std::move(response)),
+ base::BindOnce(&PassThroughImageTransportSurface::BufferPresented,
+ weak_ptr_factory_.GetWeakPtr(),
+ std::move(presentation_callback)));
}
gfx::SwapResult PassThroughImageTransportSurface::SwapBuffersWithBounds(
const std::vector<gfx::Rect>& rects,
- const PresentationCallback& callback) {
+ PresentationCallback callback) {
gfx::SwapResponse response;
StartSwapBuffers(&response);
gfx::SwapResult result = gl::GLSurfaceAdapter::SwapBuffersWithBounds(
- rects, base::Bind(&PassThroughImageTransportSurface::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), callback));
+ rects,
+ base::BindOnce(&PassThroughImageTransportSurface::BufferPresented,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
response.result = result;
FinishSwapBuffers(std::move(response));
return result;
@@ -98,13 +102,13 @@ gfx::SwapResult PassThroughImageTransportSurface::PostSubBuffer(
int y,
int width,
int height,
- const PresentationCallback& callback) {
+ PresentationCallback callback) {
gfx::SwapResponse response;
StartSwapBuffers(&response);
gfx::SwapResult result = gl::GLSurfaceAdapter::PostSubBuffer(
x, y, width, height,
- base::Bind(&PassThroughImageTransportSurface::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), callback));
+ base::BindOnce(&PassThroughImageTransportSurface::BufferPresented,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
response.result = result;
FinishSwapBuffers(std::move(response));
@@ -116,42 +120,44 @@ void PassThroughImageTransportSurface::PostSubBufferAsync(
int y,
int width,
int height,
- const GLSurface::SwapCompletionCallback& completion_callback,
- const PresentationCallback& presentation_callback) {
+ SwapCompletionCallback completion_callback,
+ PresentationCallback presentation_callback) {
gfx::SwapResponse response;
StartSwapBuffers(&response);
gl::GLSurfaceAdapter::PostSubBufferAsync(
x, y, width, height,
- base::Bind(&PassThroughImageTransportSurface::FinishSwapBuffersAsync,
- weak_ptr_factory_.GetWeakPtr(), completion_callback,
- base::Passed(&response)),
- base::Bind(&PassThroughImageTransportSurface::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), presentation_callback));
+ base::BindOnce(&PassThroughImageTransportSurface::FinishSwapBuffersAsync,
+ weak_ptr_factory_.GetWeakPtr(),
+ std::move(completion_callback), std::move(response)),
+ base::BindOnce(&PassThroughImageTransportSurface::BufferPresented,
+ weak_ptr_factory_.GetWeakPtr(),
+ std::move(presentation_callback)));
}
gfx::SwapResult PassThroughImageTransportSurface::CommitOverlayPlanes(
- const PresentationCallback& callback) {
+ PresentationCallback callback) {
gfx::SwapResponse response;
StartSwapBuffers(&response);
gfx::SwapResult result = gl::GLSurfaceAdapter::CommitOverlayPlanes(
- base::Bind(&PassThroughImageTransportSurface::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), callback));
+ base::BindOnce(&PassThroughImageTransportSurface::BufferPresented,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
response.result = result;
FinishSwapBuffers(std::move(response));
return result;
}
void PassThroughImageTransportSurface::CommitOverlayPlanesAsync(
- const GLSurface::SwapCompletionCallback& callback,
- const PresentationCallback& presentation_callback) {
+ SwapCompletionCallback callback,
+ PresentationCallback presentation_callback) {
gfx::SwapResponse response;
StartSwapBuffers(&response);
gl::GLSurfaceAdapter::CommitOverlayPlanesAsync(
- base::Bind(&PassThroughImageTransportSurface::FinishSwapBuffersAsync,
- weak_ptr_factory_.GetWeakPtr(), callback,
- base::Passed(&response)),
- base::Bind(&PassThroughImageTransportSurface::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), presentation_callback));
+ base::BindOnce(&PassThroughImageTransportSurface::FinishSwapBuffersAsync,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback),
+ std::move(response)),
+ base::BindOnce(&PassThroughImageTransportSurface::BufferPresented,
+ weak_ptr_factory_.GetWeakPtr(),
+ std::move(presentation_callback)));
}
void PassThroughImageTransportSurface::SetVSyncEnabled(bool enabled) {
@@ -218,7 +224,7 @@ void PassThroughImageTransportSurface::FinishSwapBuffers(
}
void PassThroughImageTransportSurface::FinishSwapBuffersAsync(
- GLSurface::SwapCompletionCallback callback,
+ SwapCompletionCallback callback,
gfx::SwapResponse response,
gfx::SwapResult result,
std::unique_ptr<gfx::GpuFence> gpu_fence) {
@@ -230,14 +236,14 @@ void PassThroughImageTransportSurface::FinishSwapBuffersAsync(
gpu_fence->Wait();
response.result = result;
FinishSwapBuffers(std::move(response));
- callback.Run(result, nullptr);
+ std::move(callback).Run(result, nullptr);
}
void PassThroughImageTransportSurface::BufferPresented(
- const GLSurface::PresentationCallback& callback,
+ GLSurface::PresentationCallback callback,
const gfx::PresentationFeedback& feedback) {
DCHECK(allow_running_presentation_callback_);
- callback.Run(feedback);
+ std::move(callback).Run(feedback);
if (delegate_)
delegate_->BufferPresented(feedback);
}
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
index 279bfa39e8d..8defdd047f1 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
@@ -29,30 +29,26 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
// GLSurface implementation.
bool Initialize(gl::GLSurfaceFormat format) override;
- gfx::SwapResult SwapBuffers(const PresentationCallback& callback) override;
- void SwapBuffersAsync(
- const SwapCompletionCallback& completion_callback,
- const PresentationCallback& presentation_callback) override;
- gfx::SwapResult SwapBuffersWithBounds(
- const std::vector<gfx::Rect>& rects,
- const PresentationCallback& callback) override;
+ gfx::SwapResult SwapBuffers(PresentationCallback callback) override;
+ void SwapBuffersAsync(SwapCompletionCallback completion_callback,
+ PresentationCallback presentation_callback) override;
+ gfx::SwapResult SwapBuffersWithBounds(const std::vector<gfx::Rect>& rects,
+ PresentationCallback callback) override;
gfx::SwapResult PostSubBuffer(int x,
int y,
int width,
int height,
- const PresentationCallback& callback) override;
- void PostSubBufferAsync(
- int x,
- int y,
- int width,
- int height,
- const SwapCompletionCallback& completion_callback,
- const PresentationCallback& presentation_callback) override;
- gfx::SwapResult CommitOverlayPlanes(
- const PresentationCallback& callback) override;
+ PresentationCallback callback) override;
+ void PostSubBufferAsync(int x,
+ int y,
+ int width,
+ int height,
+ SwapCompletionCallback completion_callback,
+ PresentationCallback presentation_callback) override;
+ gfx::SwapResult CommitOverlayPlanes(PresentationCallback callback) override;
void CommitOverlayPlanesAsync(
- const SwapCompletionCallback& completion_callback,
- const PresentationCallback& presentation_callback) override;
+ SwapCompletionCallback completion_callback,
+ PresentationCallback presentation_callback) override;
void SetVSyncEnabled(bool enabled) override;
private:
@@ -62,12 +58,12 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
void StartSwapBuffers(gfx::SwapResponse* response);
void FinishSwapBuffers(gfx::SwapResponse response);
- void FinishSwapBuffersAsync(GLSurface::SwapCompletionCallback callback,
+ void FinishSwapBuffersAsync(SwapCompletionCallback callback,
gfx::SwapResponse response,
gfx::SwapResult result,
std::unique_ptr<gfx::GpuFence> gpu_fence);
- void BufferPresented(const GLSurface::PresentationCallback& callback,
+ void BufferPresented(PresentationCallback callback,
const gfx::PresentationFeedback& feedback);
const bool is_gpu_vsync_disabled_;
diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
index f673526d6ef..7f24495d4b4 100644
--- a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
@@ -114,30 +114,19 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
}
}
- gpu::GpuMemoryBufferFactory* gmb_factory =
- manager->gpu_memory_buffer_factory();
- context_group_ = base::MakeRefCounted<gles2::ContextGroup>(
- manager->gpu_preferences(), gles2::PassthroughCommandDecoderSupported(),
- manager->mailbox_manager(), CreateMemoryTracker(init_params),
- manager->shader_translator_cache(),
- manager->framebuffer_completeness_cache(),
- shared_context_state->feature_info(),
- init_params.attribs.bind_generates_resource, channel_->image_manager(),
- gmb_factory ? gmb_factory->AsImageFactory() : nullptr,
- /*progress_reporter=*/manager->watchdog(), manager->gpu_feature_info(),
- manager->discardable_manager(),
- manager->passthrough_discardable_manager(),
- manager->shared_image_manager());
-
surface_ = shared_context_state->surface();
share_group_ = shared_context_state->share_group();
use_virtualized_gl_context_ =
shared_context_state->use_virtualized_gl_contexts();
- command_buffer_ = std::make_unique<CommandBufferService>(
- this, context_group_->transfer_buffer_manager());
+ memory_tracker_ = CreateMemoryTracker(init_params);
+
+ command_buffer_ =
+ std::make_unique<CommandBufferService>(this, memory_tracker_.get());
std::unique_ptr<raster::RasterDecoder> decoder(raster::RasterDecoder::Create(
- this, command_buffer_.get(), manager->outputter(), context_group_.get(),
+ this, command_buffer_.get(), manager->outputter(),
+ manager->gpu_feature_info(), manager->gpu_preferences(),
+ memory_tracker_.get(), manager->shared_image_manager(),
shared_context_state));
sync_point_client_state_ =
@@ -155,11 +144,6 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
return gpu::ContextResult::kTransientFailure;
}
- if (!context_group_->has_program_cache() &&
- !context_group_->feature_info()->workarounds().disable_program_cache) {
- context_group_->set_program_cache(manager->program_cache());
- }
-
// Initialize the decoder with either the view or pbuffer GLContext.
result = decoder->Initialize(surface_, context, true /* offscreen */,
gpu::gles2::DisallowedFeatures(),
@@ -193,13 +177,12 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
return gpu::ContextResult::kSuccess;
}
-// RasterInterface clients should not manipulate the front buffer.
-void RasterCommandBufferStub::OnTakeFrontBuffer(const Mailbox& mailbox) {
- NOTREACHED();
+MemoryTracker* RasterCommandBufferStub::GetMemoryTracker() const {
+ return memory_tracker_.get();
}
-void RasterCommandBufferStub::OnReturnFrontBuffer(const Mailbox& mailbox,
- bool is_lost) {
- NOTREACHED();
+
+bool RasterCommandBufferStub::HandleMessage(const IPC::Message& message) {
+ return false;
}
void RasterCommandBufferStub::OnSwapBuffers(uint64_t swap_id, uint32_t flags) {}
diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.h b/chromium/gpu/ipc/service/raster_command_buffer_stub.h
index 54cd8a320c6..920815eacbb 100644
--- a/chromium/gpu/ipc/service/raster_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.h
@@ -27,13 +27,15 @@ class GPU_IPC_SERVICE_EXPORT RasterCommandBufferStub
CommandBufferStub* share_group,
const GPUCreateCommandBufferConfig& init_params,
base::UnsafeSharedMemoryRegion shared_state_shm) override;
+ MemoryTracker* GetMemoryTracker() const override;
private:
- void OnTakeFrontBuffer(const Mailbox& mailbox) override;
- void OnReturnFrontBuffer(const Mailbox& mailbox, bool is_lost) override;
+ bool HandleMessage(const IPC::Message& message) override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
void SetActiveURL(GURL url) override;
+ std::unique_ptr<MemoryTracker> memory_tracker_;
+
DISALLOW_COPY_AND_ASSIGN(RasterCommandBufferStub);
};
diff --git a/chromium/gpu/ipc/service/shared_image_stub.cc b/chromium/gpu/ipc/service/shared_image_stub.cc
index 6ee676a8016..70a83f3c205 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.cc
+++ b/chromium/gpu/ipc/service/shared_image_stub.cc
@@ -151,7 +151,7 @@ void SharedImageStub::OnCreateSharedImageWithData(
void SharedImageStub::OnCreateGMBSharedImage(
GpuChannelMsg_CreateGMBSharedImage_Params params) {
- TRACE_EVENT2("gpu", "SharedImageStub::OnCreateSharedImage", "width",
+ TRACE_EVENT2("gpu", "SharedImageStub::OnCreateGMBSharedImage", "width",
params.size.width(), "height", params.size.height());
if (!params.mailbox.IsSharedImage()) {
LOG(ERROR) << "SharedImageStub: Trying to create a SharedImage with a "
@@ -186,7 +186,7 @@ void SharedImageStub::OnCreateGMBSharedImage(
void SharedImageStub::OnUpdateSharedImage(const Mailbox& mailbox,
uint32_t release_id) {
- TRACE_EVENT0("gpu", "SharedImageStub::OnDestroySharedImage");
+ TRACE_EVENT0("gpu", "SharedImageStub::OnUpdateSharedImage");
if (!mailbox.IsSharedImage()) {
LOG(ERROR) << "SharedImageStub: Trying to access a SharedImage with a "
"non-SharedImage mailbox.";
diff --git a/chromium/gpu/ipc/service/stream_texture_android.cc b/chromium/gpu/ipc/service/stream_texture_android.cc
index a1dc375f366..1b66ba80100 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.cc
+++ b/chromium/gpu/ipc/service/stream_texture_android.cc
@@ -28,8 +28,10 @@ using gles2::TextureRef;
bool StreamTexture::Create(CommandBufferStub* owner_stub,
uint32_t client_texture_id,
int stream_id) {
- TextureManager* texture_manager =
- owner_stub->context_group()->texture_manager();
+ gles2::ContextGroup* context_group =
+ owner_stub->decoder_context()->GetContextGroup();
+ DCHECK(context_group);
+ TextureManager* texture_manager = context_group->texture_manager();
TextureRef* texture = texture_manager->GetTexture(client_texture_id);
if (texture && (!texture->texture()->target() ||
@@ -159,8 +161,10 @@ bool StreamTexture::CopyTexImage(unsigned target) {
UpdateTexImage();
- TextureManager* texture_manager =
- owner_stub_->context_group()->texture_manager();
+ gles2::ContextGroup* context_group =
+ owner_stub_->decoder_context()->GetContextGroup();
+ DCHECK(context_group);
+ TextureManager* texture_manager = context_group->texture_manager();
gles2::Texture* texture =
texture_manager->GetTextureForServiceId(texture_id_);
if (texture) {
@@ -219,7 +223,12 @@ void StreamTexture::OnForwardForSurfaceRequest(
surface_owner_.get());
}
+StreamTexture::BindOrCopy StreamTexture::ShouldBindOrCopy() {
+ return COPY;
+}
+
bool StreamTexture::BindTexImage(unsigned target) {
+ NOTREACHED();
return false;
}
diff --git a/chromium/gpu/ipc/service/stream_texture_android.h b/chromium/gpu/ipc/service/stream_texture_android.h
index a284ae66d15..0fbac37eda4 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.h
+++ b/chromium/gpu/ipc/service/stream_texture_android.h
@@ -46,6 +46,7 @@ class StreamTexture : public gpu::gles2::GLStreamTextureImage,
// gl::GLImage implementation:
gfx::Size GetSize() override;
unsigned GetInternalFormat() override;
+ BindOrCopy ShouldBindOrCopy() override;
bool BindTexImage(unsigned target) override;
void ReleaseTexImage(unsigned target) override;
bool CopyTexImage(unsigned target) override;
diff --git a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
index 5d5ad50c9ec..79c1a6604fd 100644
--- a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
@@ -101,31 +101,10 @@ gpu::ContextResult WebGPUCommandBufferStub::Initialize(
share_group_ = manager->share_group();
use_virtualized_gl_context_ = false;
- TransferBufferManager* transfer_buffer_manager;
- // TODO: all of this is necessary to get a transfer buffer manager - we would
- // prefer to create a standalone one instead.
- {
- scoped_refptr<gles2::FeatureInfo> feature_info = new gles2::FeatureInfo(
- manager->gpu_driver_bug_workarounds(), manager->gpu_feature_info());
- gpu::GpuMemoryBufferFactory* gmb_factory =
- manager->gpu_memory_buffer_factory();
- context_group_ = new gles2::ContextGroup(
- manager->gpu_preferences(), gles2::PassthroughCommandDecoderSupported(),
- manager->mailbox_manager(), CreateMemoryTracker(init_params),
- manager->shader_translator_cache(),
- manager->framebuffer_completeness_cache(), feature_info,
- init_params.attribs.bind_generates_resource, channel_->image_manager(),
- gmb_factory ? gmb_factory->AsImageFactory() : nullptr,
- manager->watchdog() /* progress_reporter */,
- manager->gpu_feature_info(), manager->discardable_manager(),
- manager->passthrough_discardable_manager(),
- manager->shared_image_manager());
-
- transfer_buffer_manager = context_group_->transfer_buffer_manager();
- }
+ memory_tracker_ = CreateMemoryTracker(init_params);
command_buffer_ =
- std::make_unique<CommandBufferService>(this, transfer_buffer_manager);
+ std::make_unique<CommandBufferService>(this, memory_tracker_.get());
std::unique_ptr<webgpu::WebGPUDecoder> decoder(webgpu::WebGPUDecoder::Create(
this, command_buffer_.get(), manager->outputter()));
@@ -167,13 +146,12 @@ gpu::ContextResult WebGPUCommandBufferStub::Initialize(
#endif // defined(OS_FUCHSIA)
}
-// WebGPUInterface clients should not manipulate the front buffer.
-void WebGPUCommandBufferStub::OnTakeFrontBuffer(const Mailbox& mailbox) {
- LOG(ERROR) << "Called WebGPUCommandBufferStub::OnTakeFrontBuffer";
+MemoryTracker* WebGPUCommandBufferStub::GetMemoryTracker() const {
+ return memory_tracker_.get();
}
-void WebGPUCommandBufferStub::OnReturnFrontBuffer(const Mailbox& mailbox,
- bool is_lost) {
- LOG(ERROR) << "Called WebGPUCommandBufferStub::OnReturnFrontBuffer";
+
+bool WebGPUCommandBufferStub::HandleMessage(const IPC::Message& message) {
+ return false;
}
void WebGPUCommandBufferStub::OnSwapBuffers(uint64_t swap_id, uint32_t flags) {}
diff --git a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.h b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.h
index cc74b5dfac9..b0f768f2b55 100644
--- a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.h
@@ -27,12 +27,14 @@ class GPU_IPC_SERVICE_EXPORT WebGPUCommandBufferStub
CommandBufferStub* share_group,
const GPUCreateCommandBufferConfig& init_params,
base::UnsafeSharedMemoryRegion shared_state_shm) override;
+ MemoryTracker* GetMemoryTracker() const override;
private:
- void OnTakeFrontBuffer(const Mailbox& mailbox) override;
- void OnReturnFrontBuffer(const Mailbox& mailbox, bool is_lost) override;
+ bool HandleMessage(const IPC::Message& message) override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
+ std::unique_ptr<MemoryTracker> memory_tracker_;
+
DISALLOW_COPY_AND_ASSIGN(WebGPUCommandBufferStub);
};
diff --git a/chromium/gpu/ipc/webgpu_in_process_context.cc b/chromium/gpu/ipc/webgpu_in_process_context.cc
index f4280f86f79..2fd9c7fe3f5 100644
--- a/chromium/gpu/ipc/webgpu_in_process_context.cc
+++ b/chromium/gpu/ipc/webgpu_in_process_context.cc
@@ -81,9 +81,10 @@ ContextResult WebGPUInProcessContext::Initialize(
}
transfer_buffer_ = std::make_unique<TransferBuffer>(webgpu_helper.get());
- webgpu_implementation_ =
- std::make_unique<webgpu::WebGPUImplementation>(webgpu_helper.get());
+ webgpu_implementation_ = std::make_unique<webgpu::WebGPUImplementation>(
+ webgpu_helper.get(), transfer_buffer_.get(), command_buffer_.get());
helper_ = std::move(webgpu_helper);
+ webgpu_implementation_->Initialize(memory_limits);
return result;
}
diff --git a/chromium/gpu/perftests/texture_upload_perftest.cc b/chromium/gpu/perftests/texture_upload_perftest.cc
index ff91cff173b..e7be34d957f 100644
--- a/chromium/gpu/perftests/texture_upload_perftest.cc
+++ b/chromium/gpu/perftests/texture_upload_perftest.cc
@@ -64,7 +64,9 @@ SHADER(
// clang-format on
void CheckNoGlError(const std::string& msg) {
- CHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError()) << " " << msg;
+ const GLenum error = glGetError();
+ CHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), error)
+ << msg << " " << gl::GLEnums::GetStringError(error);
}
// Utility function to compile a shader from a string.
diff --git a/chromium/gpu/tools/compositor_model_bench/render_tree.cc b/chromium/gpu/tools/compositor_model_bench/render_tree.cc
index d74b45f96bf..4b805dd4bd4 100644
--- a/chromium/gpu/tools/compositor_model_bench/render_tree.cc
+++ b/chromium/gpu/tools/compositor_model_bench/render_tree.cc
@@ -456,9 +456,10 @@ std::unique_ptr<RenderNode> BuildRenderTreeFromFile(
int error_code = 0;
std::string error_message;
- std::unique_ptr<base::DictionaryValue> root = base::DictionaryValue::From(
- JSONReader::ReadAndReturnError(contents, base::JSON_ALLOW_TRAILING_COMMAS,
- &error_code, &error_message));
+ std::unique_ptr<base::DictionaryValue> root =
+ base::DictionaryValue::From(JSONReader::ReadAndReturnErrorDeprecated(
+ contents, base::JSON_ALLOW_TRAILING_COMMAS, &error_code,
+ &error_message));
if (!root) {
if (error_code) {
LOG(ERROR) << "Failed to parse JSON file " << path.LossyDisplayName()
diff --git a/chromium/gpu/vulkan/android/vulkan_android_unittests.cc b/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
index a27c8e089af..cee9d41e885 100644
--- a/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
+++ b/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
@@ -82,15 +82,8 @@ TEST_F(VulkanImplementationAndroidTest, ExportImportSyncFd) {
// signal operation pending execution before the export.
// Semaphores can be signaled by including them in a batch as part of a queue
// submission command, defining a queue operation to signal that semaphore.
- unsigned int submit_count = 1;
- VkFence fence = VK_NULL_HANDLE;
- VkSubmitInfo submit_info = {VK_STRUCTURE_TYPE_SUBMIT_INFO};
- submit_info.signalSemaphoreCount = 1;
- submit_info.pSignalSemaphores = &semaphore1;
- result =
- vkQueueSubmit(vk_context_provider_->GetDeviceQueue()->GetVulkanQueue(),
- submit_count, &submit_info, fence);
- EXPECT_EQ(result, VK_SUCCESS);
+ EXPECT_TRUE(vk_implementation_->SubmitSignalSemaphore(
+ vk_context_provider_->GetDeviceQueue()->GetVulkanQueue(), semaphore1));
// Export a sync fd from the semaphore.
base::ScopedFD sync_fd;
diff --git a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
index 275fb419a90..58722673053 100644
--- a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
+++ b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
@@ -48,8 +48,8 @@ bool VulkanImplementationAndroid::InitializeVulkanInstance() {
return true;
}
-VkInstance VulkanImplementationAndroid::GetVulkanInstance() {
- return vulkan_instance_.vk_instance();
+VulkanInstance* VulkanImplementationAndroid::GetVulkanInstance() {
+ return &vulkan_instance_;
}
std::unique_ptr<VulkanSurface> VulkanImplementationAndroid::CreateViewSurface(
@@ -59,13 +59,14 @@ std::unique_ptr<VulkanSurface> VulkanImplementationAndroid::CreateViewSurface(
surface_create_info.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
surface_create_info.window = window;
VkResult result = vkCreateAndroidSurfaceKHR_(
- GetVulkanInstance(), &surface_create_info, nullptr, &surface);
+ vulkan_instance_.vk_instance(), &surface_create_info, nullptr, &surface);
if (VK_SUCCESS != result) {
DLOG(ERROR) << "vkCreateAndroidSurfaceKHR() failed: " << result;
return nullptr;
}
- return std::make_unique<VulkanSurface>(GetVulkanInstance(), surface);
+ return std::make_unique<VulkanSurface>(vulkan_instance_.vk_instance(),
+ surface);
}
bool VulkanImplementationAndroid::GetPhysicalDevicePresentationSupport(
diff --git a/chromium/gpu/vulkan/android/vulkan_implementation_android.h b/chromium/gpu/vulkan/android/vulkan_implementation_android.h
index 145320defe2..ff66a7775a0 100644
--- a/chromium/gpu/vulkan/android/vulkan_implementation_android.h
+++ b/chromium/gpu/vulkan/android/vulkan_implementation_android.h
@@ -23,7 +23,7 @@ class COMPONENT_EXPORT(VULKAN_ANDROID) VulkanImplementationAndroid
// VulkanImplementation:
bool InitializeVulkanInstance() override;
- VkInstance GetVulkanInstance() override;
+ VulkanInstance* GetVulkanInstance() override;
std::unique_ptr<VulkanSurface> CreateViewSurface(
gfx::AcceleratedWidget window) override;
bool GetPhysicalDevicePresentationSupport(
diff --git a/chromium/gpu/vulkan/generate_bindings.py b/chromium/gpu/vulkan/generate_bindings.py
index 66ecfa1698f..887877ad1d1 100755
--- a/chromium/gpu/vulkan/generate_bindings.py
+++ b/chromium/gpu/vulkan/generate_bindings.py
@@ -14,6 +14,7 @@ from subprocess import call
VULKAN_UNASSOCIATED_FUNCTIONS = [
# vkGetInstanceProcAddr belongs here but is handled specially.
+# vkEnumerateInstanceVersion belongs here but is handled specially.
{ 'name': 'vkCreateInstance' },
{ 'name': 'vkEnumerateInstanceExtensionProperties' },
{ 'name': 'vkEnumerateInstanceLayerProperties' },
@@ -70,17 +71,25 @@ VULKAN_DEVICE_FUNCTIONS = [
{ 'name': 'vkFreeMemory' },
{ 'name': 'vkGetDeviceQueue' },
{ 'name': 'vkGetFenceStatus' },
+{ 'name': 'vkGetImageMemoryRequirements' },
{ 'name': 'vkResetFences' },
{ 'name': 'vkUpdateDescriptorSets' },
{ 'name': 'vkWaitForFences' },
]
VULKAN_DEVICE_FUNCTIONS_ANDROID = [
-{ 'name': 'vkImportSemaphoreFdKHR' },
{ 'name': 'vkGetAndroidHardwareBufferPropertiesANDROID' },
+{ 'name': 'vkImportSemaphoreFdKHR' },
+]
+
+VULKAN_DEVICE_FUNCTIONS_LINUX_OR_ANDROID = [
{ 'name': 'vkGetSemaphoreFdKHR' },
]
+VULKAN_DEVICE_FUNCTIONS_LINUX = [
+{ 'name': 'vkGetMemoryFdKHR'},
+]
+
VULKAN_QUEUE_FUNCTIONS = [
{ 'name': 'vkQueueSubmit' },
{ 'name': 'vkQueueWaitIdle' },
@@ -133,7 +142,9 @@ def WriteMacros(file, functions):
def GenerateHeaderFile(file, unassociated_functions, instance_functions,
physical_device_functions, device_functions,
- device_functions_android, queue_functions,
+ device_functions_android,
+ device_functions_linux_or_android,
+ device_functions_linux, queue_functions,
command_buffer_functions, swapchain_functions):
"""Generates gpu/vulkan/vulkan_function_pointers.h"""
@@ -172,6 +183,7 @@ struct VulkanFunctionPointers {
base::NativeLibrary vulkan_loader_library_ = nullptr;
// Unassociated functions
+ PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersionFn = nullptr;
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddrFn = nullptr;
""")
@@ -219,6 +231,30 @@ struct VulkanFunctionPointers {
file.write("""\
+ // Device functions shared between Linux and Android.
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+""")
+
+ WriteFunctionDeclarations(file, device_functions_linux_or_android)
+
+ file.write("""\
+#endif
+""")
+
+ file.write("""\
+
+ // Linux-only device functions.
+#if defined(OS_LINUX)
+""")
+
+ WriteFunctionDeclarations(file, device_functions_linux)
+
+ file.write("""\
+#endif
+""")
+
+ file.write("""\
+
// Queue functions
""")
@@ -289,6 +325,28 @@ struct VulkanFunctionPointers {
file.write("""\
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+""")
+
+ WriteMacros(file, device_functions_linux_or_android)
+
+ file.write("""\
+#endif
+""")
+
+ file.write("""\
+
+#if defined(OS_LINUX)
+""")
+
+ WriteMacros(file, device_functions_linux)
+
+ file.write("""\
+#endif
+""")
+
+ file.write("""\
+
// Queue functions
""")
@@ -339,7 +397,9 @@ def WriteDeviceFunctionPointerInitialization(file, functions):
def GenerateSourceFile(file, unassociated_functions, instance_functions,
physical_device_functions, device_functions,
- device_functions_android, queue_functions,
+ device_functions_android,
+ device_functions_linux_or_android,
+ device_functions_linux, queue_functions,
command_buffer_functions, swapchain_functions):
"""Generates gpu/vulkan/vulkan_function_pointers.cc"""
@@ -370,6 +430,11 @@ bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
if (!vkGetInstanceProcAddrFn)
return false;
+ vkEnumerateInstanceVersionFn =
+ reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
+ vkGetInstanceProcAddrFn(nullptr, "vkEnumerateInstanceVersion"));
+ // vkEnumerateInstanceVersion didn't exist in Vulkan 1.0, so we should
+ // proceed even if we fail to get vkEnumerateInstanceVersion pointer.
""")
WriteUnassociatedFunctionPointerInitialization(file, unassociated_functions)
@@ -420,6 +485,32 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
file.write("""\
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+
+""")
+
+ WriteDeviceFunctionPointerInitialization(file,
+ device_functions_linux_or_android)
+
+ file.write("""\
+#endif
+""")
+
+ file.write("""\
+
+#if defined(OS_LINUX)
+
+""")
+
+ WriteDeviceFunctionPointerInitialization(file,
+ device_functions_linux)
+
+ file.write("""\
+#endif
+""")
+
+ file.write("""\
+
// Queue functions
""")
WriteDeviceFunctionPointerInitialization(file, queue_functions)
@@ -472,6 +563,8 @@ def main(argv):
VULKAN_INSTANCE_FUNCTIONS,
VULKAN_PHYSICAL_DEVICE_FUNCTIONS, VULKAN_DEVICE_FUNCTIONS,
VULKAN_DEVICE_FUNCTIONS_ANDROID,
+ VULKAN_DEVICE_FUNCTIONS_LINUX_OR_ANDROID,
+ VULKAN_DEVICE_FUNCTIONS_LINUX,
VULKAN_QUEUE_FUNCTIONS, VULKAN_COMMAND_BUFFER_FUNCTIONS,
VULKAN_SWAPCHAIN_FUNCTIONS)
header_file.close()
@@ -483,6 +576,8 @@ def main(argv):
VULKAN_INSTANCE_FUNCTIONS,
VULKAN_PHYSICAL_DEVICE_FUNCTIONS, VULKAN_DEVICE_FUNCTIONS,
VULKAN_DEVICE_FUNCTIONS_ANDROID,
+ VULKAN_DEVICE_FUNCTIONS_LINUX_OR_ANDROID,
+ VULKAN_DEVICE_FUNCTIONS_LINUX,
VULKAN_QUEUE_FUNCTIONS, VULKAN_COMMAND_BUFFER_FUNCTIONS,
VULKAN_SWAPCHAIN_FUNCTIONS)
source_file.close()
diff --git a/chromium/gpu/vulkan/vulkan_command_buffer.cc b/chromium/gpu/vulkan/vulkan_command_buffer.cc
index e166496a213..d9c5a4ddd20 100644
--- a/chromium/gpu/vulkan/vulkan_command_buffer.cc
+++ b/chromium/gpu/vulkan/vulkan_command_buffer.cc
@@ -167,7 +167,7 @@ CommandBufferRecorderBase::~CommandBufferRecorderBase() {
if (VK_SUCCESS != result) {
DLOG(ERROR) << "vkEndCommandBuffer() failed: " << result;
}
-};
+}
ScopedMultiUseCommandBufferRecorder::ScopedMultiUseCommandBufferRecorder(
VulkanCommandBuffer& command_buffer)
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.cc b/chromium/gpu/vulkan/vulkan_function_pointers.cc
index ef19bfbb3fb..90fb824f455 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.cc
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.cc
@@ -32,6 +32,11 @@ bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
if (!vkGetInstanceProcAddrFn)
return false;
+ vkEnumerateInstanceVersionFn =
+ reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
+ vkGetInstanceProcAddrFn(nullptr, "vkEnumerateInstanceVersion"));
+ // vkEnumerateInstanceVersion didn't exist in Vulkan 1.0, so we should
+ // proceed even if we fail to get vkEnumerateInstanceVersion pointer.
vkCreateInstanceFn = reinterpret_cast<PFN_vkCreateInstance>(
vkGetInstanceProcAddrFn(nullptr, "vkCreateInstance"));
if (!vkCreateInstanceFn)
@@ -268,6 +273,12 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkGetFenceStatusFn)
return false;
+ vkGetImageMemoryRequirementsFn =
+ reinterpret_cast<PFN_vkGetImageMemoryRequirements>(
+ vkGetDeviceProcAddrFn(vk_device, "vkGetImageMemoryRequirements"));
+ if (!vkGetImageMemoryRequirementsFn)
+ return false;
+
vkResetFencesFn = reinterpret_cast<PFN_vkResetFences>(
vkGetDeviceProcAddrFn(vk_device, "vkResetFences"));
if (!vkResetFencesFn)
@@ -285,11 +296,6 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
#if defined(OS_ANDROID)
- vkImportSemaphoreFdKHRFn = reinterpret_cast<PFN_vkImportSemaphoreFdKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkImportSemaphoreFdKHR"));
- if (!vkImportSemaphoreFdKHRFn)
- return false;
-
vkGetAndroidHardwareBufferPropertiesANDROIDFn =
reinterpret_cast<PFN_vkGetAndroidHardwareBufferPropertiesANDROID>(
vkGetDeviceProcAddrFn(vk_device,
@@ -297,6 +303,15 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkGetAndroidHardwareBufferPropertiesANDROIDFn)
return false;
+ vkImportSemaphoreFdKHRFn = reinterpret_cast<PFN_vkImportSemaphoreFdKHR>(
+ vkGetDeviceProcAddrFn(vk_device, "vkImportSemaphoreFdKHR"));
+ if (!vkImportSemaphoreFdKHRFn)
+ return false;
+
+#endif
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+
vkGetSemaphoreFdKHRFn = reinterpret_cast<PFN_vkGetSemaphoreFdKHR>(
vkGetDeviceProcAddrFn(vk_device, "vkGetSemaphoreFdKHR"));
if (!vkGetSemaphoreFdKHRFn)
@@ -304,6 +319,15 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
#endif
+#if defined(OS_LINUX)
+
+ vkGetMemoryFdKHRFn = reinterpret_cast<PFN_vkGetMemoryFdKHR>(
+ vkGetDeviceProcAddrFn(vk_device, "vkGetMemoryFdKHR"));
+ if (!vkGetMemoryFdKHRFn)
+ return false;
+
+#endif
+
// Queue functions
vkQueueSubmitFn = reinterpret_cast<PFN_vkQueueSubmit>(
vkGetDeviceProcAddrFn(vk_device, "vkQueueSubmit"));
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.h b/chromium/gpu/vulkan/vulkan_function_pointers.h
index abc4a37cf88..a06ed0ff4b4 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.h
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.h
@@ -40,6 +40,7 @@ struct VulkanFunctionPointers {
base::NativeLibrary vulkan_loader_library_ = nullptr;
// Unassociated functions
+ PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersionFn = nullptr;
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddrFn = nullptr;
PFN_vkCreateInstance vkCreateInstanceFn = nullptr;
PFN_vkEnumerateInstanceExtensionProperties
@@ -100,18 +101,28 @@ struct VulkanFunctionPointers {
PFN_vkFreeMemory vkFreeMemoryFn = nullptr;
PFN_vkGetDeviceQueue vkGetDeviceQueueFn = nullptr;
PFN_vkGetFenceStatus vkGetFenceStatusFn = nullptr;
+ PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirementsFn = nullptr;
PFN_vkResetFences vkResetFencesFn = nullptr;
PFN_vkUpdateDescriptorSets vkUpdateDescriptorSetsFn = nullptr;
PFN_vkWaitForFences vkWaitForFencesFn = nullptr;
// Android only device functions.
#if defined(OS_ANDROID)
- PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHRFn = nullptr;
PFN_vkGetAndroidHardwareBufferPropertiesANDROID
vkGetAndroidHardwareBufferPropertiesANDROIDFn = nullptr;
+ PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHRFn = nullptr;
+#endif
+
+ // Device functions shared between Linux and Android.
+#if defined(OS_LINUX) || defined(OS_ANDROID)
PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHRFn = nullptr;
#endif
+ // Linux-only device functions.
+#if defined(OS_LINUX)
+ PFN_vkGetMemoryFdKHR vkGetMemoryFdKHRFn = nullptr;
+#endif
+
// Queue functions
PFN_vkQueueSubmit vkQueueSubmitFn = nullptr;
PFN_vkQueueWaitIdle vkQueueWaitIdleFn = nullptr;
@@ -219,21 +230,30 @@ struct VulkanFunctionPointers {
#define vkFreeMemory gpu::GetVulkanFunctionPointers()->vkFreeMemoryFn
#define vkGetDeviceQueue gpu::GetVulkanFunctionPointers()->vkGetDeviceQueueFn
#define vkGetFenceStatus gpu::GetVulkanFunctionPointers()->vkGetFenceStatusFn
+#define vkGetImageMemoryRequirements \
+ gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirementsFn
#define vkResetFences gpu::GetVulkanFunctionPointers()->vkResetFencesFn
#define vkUpdateDescriptorSets \
gpu::GetVulkanFunctionPointers()->vkUpdateDescriptorSetsFn
#define vkWaitForFences gpu::GetVulkanFunctionPointers()->vkWaitForFencesFn
#if defined(OS_ANDROID)
-#define vkImportSemaphoreFdKHR \
- gpu::GetVulkanFunctionPointers()->vkImportSemaphoreFdKHRFn
#define vkGetAndroidHardwareBufferPropertiesANDROID \
gpu::GetVulkanFunctionPointers() \
->vkGetAndroidHardwareBufferPropertiesANDROIDFn
+#define vkImportSemaphoreFdKHR \
+ gpu::GetVulkanFunctionPointers()->vkImportSemaphoreFdKHRFn
+#endif
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
#define vkGetSemaphoreFdKHR \
gpu::GetVulkanFunctionPointers()->vkGetSemaphoreFdKHRFn
#endif
+#if defined(OS_LINUX)
+#define vkGetMemoryFdKHR gpu::GetVulkanFunctionPointers()->vkGetMemoryFdKHRFn
+#endif
+
// Queue functions
#define vkQueueSubmit gpu::GetVulkanFunctionPointers()->vkQueueSubmitFn
#define vkQueueWaitIdle gpu::GetVulkanFunctionPointers()->vkQueueWaitIdleFn
diff --git a/chromium/gpu/vulkan/vulkan_implementation.cc b/chromium/gpu/vulkan/vulkan_implementation.cc
index c0a54c91db6..b8973ff6b1b 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.cc
+++ b/chromium/gpu/vulkan/vulkan_implementation.cc
@@ -6,6 +6,8 @@
#include "base/bind.h"
#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_instance.h"
namespace gpu {
@@ -17,7 +19,7 @@ std::unique_ptr<VulkanDeviceQueue> CreateVulkanDeviceQueue(
VulkanImplementation* vulkan_implementation,
uint32_t option) {
auto device_queue = std::make_unique<VulkanDeviceQueue>(
- vulkan_implementation->GetVulkanInstance());
+ vulkan_implementation->GetVulkanInstance()->vk_instance());
auto callback = base::BindRepeating(
&VulkanImplementation::GetPhysicalDevicePresentationSupport,
base::Unretained(vulkan_implementation));
@@ -32,4 +34,34 @@ std::unique_ptr<VulkanDeviceQueue> CreateVulkanDeviceQueue(
return device_queue;
}
+bool VulkanImplementation::SubmitSignalSemaphore(VkQueue vk_queue,
+ VkSemaphore vk_semaphore,
+ VkFence vk_fence) {
+ // Structure specifying a queue submit operation.
+ VkSubmitInfo submit_info = {VK_STRUCTURE_TYPE_SUBMIT_INFO};
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores = &vk_semaphore;
+ const unsigned int submit_count = 1;
+ if (vkQueueSubmit(vk_queue, submit_count, &submit_info, vk_fence) !=
+ VK_SUCCESS) {
+ return false;
+ }
+ return true;
+}
+
+bool VulkanImplementation::SubmitWaitSemaphore(VkQueue vk_queue,
+ VkSemaphore vk_semaphore,
+ VkFence vk_fence) {
+ // Structure specifying a queue submit operation.
+ VkSubmitInfo submit_info = {VK_STRUCTURE_TYPE_SUBMIT_INFO};
+ submit_info.waitSemaphoreCount = 1;
+ submit_info.pWaitSemaphores = &vk_semaphore;
+ const unsigned int submit_count = 1;
+ if (vkQueueSubmit(vk_queue, submit_count, &submit_info, vk_fence) !=
+ VK_SUCCESS) {
+ return false;
+ }
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_implementation.h b/chromium/gpu/vulkan/vulkan_implementation.h
index 78470bc5c86..0b9e7e950a9 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.h
+++ b/chromium/gpu/vulkan/vulkan_implementation.h
@@ -8,7 +8,6 @@
#include <vulkan/vulkan.h>
#include <memory>
-#include "base/files/scoped_file.h"
#include "base/macros.h"
#include "build/build_config.h"
#include "gpu/vulkan/vulkan_export.h"
@@ -28,9 +27,11 @@ namespace gpu {
class VulkanDeviceQueue;
class VulkanSurface;
+class VulkanInstance;
-// This object provides factory functions for creating vulkan objects that use
-// platform-specific extensions (e.g. for creation of VkSurfaceKHR objects).
+// Base class which provides functions for creating vulkan objects for different
+// platforms that use platform-specific extensions (e.g. for creation of
+// VkSurfaceKHR objects). It also provides helper/utility functions.
class VULKAN_EXPORT VulkanImplementation {
public:
VulkanImplementation();
@@ -39,7 +40,7 @@ class VULKAN_EXPORT VulkanImplementation {
virtual bool InitializeVulkanInstance() = 0;
- virtual VkInstance GetVulkanInstance() = 0;
+ virtual VulkanInstance* GetVulkanInstance() = 0;
virtual std::unique_ptr<VulkanSurface> CreateViewSurface(
gfx::AcceleratedWidget window) = 0;
@@ -61,6 +62,20 @@ class VULKAN_EXPORT VulkanImplementation {
VkDevice vk_device,
VkFence vk_fence) = 0;
+ // Submits a semaphore to be signalled to the vulkan queue. Semaphore is
+ // signalled once this submission is executed. vk_fence is an optional handle
+ // to fence to be signaled once this submission completes execution.
+ bool SubmitSignalSemaphore(VkQueue vk_queue,
+ VkSemaphore vk_semaphore,
+ VkFence vk_fence = VK_NULL_HANDLE);
+
+ // Submits a semaphore to be waited upon to the vulkan queue. Semaphore is
+ // waited on before this submission is executed. vk_fence is an optional
+ // handle to fence to be signaled once this submission completes execution.
+ bool SubmitWaitSemaphore(VkQueue vk_queue,
+ VkSemaphore vk_semaphore,
+ VkFence vk_fence = VK_NULL_HANDLE);
+
#if defined(OS_ANDROID)
// Import a VkSemaphore from a POSIX sync file descriptor. Importing a
// semaphore payload from a file descriptor transfers ownership of the file
diff --git a/chromium/gpu/vulkan/vulkan_instance.cc b/chromium/gpu/vulkan/vulkan_instance.cc
index c733e65604e..5020258abda 100644
--- a/chromium/gpu/vulkan/vulkan_instance.cc
+++ b/chromium/gpu/vulkan/vulkan_instance.cc
@@ -56,12 +56,23 @@ bool VulkanInstance::Initialize(
if (!vulkan_function_pointers->BindUnassociatedFunctionPointers())
return false;
+ uint32_t supported_api_version = VK_MAKE_VERSION(1, 0, 0);
+ if (vulkan_function_pointers->vkEnumerateInstanceVersionFn) {
+ vulkan_function_pointers->vkEnumerateInstanceVersionFn(
+ &supported_api_version);
+ }
+
+ // Use Vulkan 1.1 if it's available.
+ api_version_ = (supported_api_version >= VK_MAKE_VERSION(1, 1, 0))
+ ? VK_MAKE_VERSION(1, 1, 0)
+ : VK_MAKE_VERSION(1, 0, 0);
+
VkResult result = VK_SUCCESS;
VkApplicationInfo app_info = {};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
app_info.pApplicationName = "Chromium";
- app_info.apiVersion = VK_MAKE_VERSION(1, 1, 0);
+ app_info.apiVersion = api_version_;
std::vector<const char*> enabled_extensions;
enabled_extensions.insert(std::end(enabled_extensions),
diff --git a/chromium/gpu/vulkan/vulkan_instance.h b/chromium/gpu/vulkan/vulkan_instance.h
index 19ba467f761..2762d97b59e 100644
--- a/chromium/gpu/vulkan/vulkan_instance.h
+++ b/chromium/gpu/vulkan/vulkan_instance.h
@@ -29,6 +29,9 @@ class VULKAN_EXPORT VulkanInstance {
bool Initialize(const std::vector<const char*>& required_extensions,
const std::vector<const char*>& required_layers);
+ // VkApplicationInfo.apiVersion value used to initialize the instance.
+ uint32_t api_version() const { return api_version_; }
+
const gfx::ExtensionSet& enabled_extensions() const {
return enabled_extensions_;
}
@@ -38,6 +41,8 @@ class VULKAN_EXPORT VulkanInstance {
private:
void Destroy();
+ uint32_t api_version_;
+
VkInstance vk_instance_ = VK_NULL_HANDLE;
gfx::ExtensionSet enabled_extensions_;
bool debug_report_enabled_ = false;
diff --git a/chromium/gpu/vulkan/vulkan_surface.cc b/chromium/gpu/vulkan/vulkan_surface.cc
index d8d42742575..7ae845b417f 100644
--- a/chromium/gpu/vulkan/vulkan_surface.cc
+++ b/chromium/gpu/vulkan/vulkan_surface.cc
@@ -85,7 +85,7 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue,
if (formats.size() == 1 && VK_FORMAT_UNDEFINED == formats[0].format) {
surface_format_.format = preferred_formats[0];
- surface_format_.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
+ surface_format_.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
} else {
bool format_set = false;
for (VkSurfaceFormatKHR supported_format : formats) {
@@ -93,7 +93,7 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue,
while (counter < size && format_set == false) {
if (supported_format.format == preferred_formats[counter]) {
surface_format_ = supported_format;
- surface_format_.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
+ surface_format_.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
format_set = true;
}
counter++;
diff --git a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
index 2530aafd5d3..f0a6bec43c5 100644
--- a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
+++ b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
@@ -55,8 +55,8 @@ bool VulkanImplementationWin32::InitializeVulkanInstance() {
return true;
}
-VkInstance VulkanImplementationWin32::GetVulkanInstance() {
- return vulkan_instance_.vk_instance();
+VulkanInstance* VulkanImplementationWin32::GetVulkanInstance() {
+ return &vulkan_instance_;
}
std::unique_ptr<VulkanSurface> VulkanImplementationWin32::CreateViewSurface(
@@ -68,13 +68,14 @@ std::unique_ptr<VulkanSurface> VulkanImplementationWin32::CreateViewSurface(
reinterpret_cast<HINSTANCE>(GetWindowLongPtr(window, GWLP_HINSTANCE));
surface_create_info.hwnd = window;
VkResult result = vkCreateWin32SurfaceKHR_(
- GetVulkanInstance(), &surface_create_info, nullptr, &surface);
+ vulkan_instance_.vk_instance(), &surface_create_info, nullptr, &surface);
if (VK_SUCCESS != result) {
DLOG(ERROR) << "vkCreatWin32SurfaceKHR() failed: " << result;
return nullptr;
}
- return std::make_unique<VulkanSurface>(GetVulkanInstance(), surface);
+ return std::make_unique<VulkanSurface>(vulkan_instance_.vk_instance(),
+ surface);
}
bool VulkanImplementationWin32::GetPhysicalDevicePresentationSupport(
diff --git a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h
index 43c3352a0e3..dde99603b0a 100644
--- a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h
+++ b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h
@@ -21,7 +21,7 @@ class COMPONENT_EXPORT(VULKAN_WIN32) VulkanImplementationWin32
// VulkanImplementation:
bool InitializeVulkanInstance() override;
- VkInstance GetVulkanInstance() override;
+ VulkanInstance* GetVulkanInstance() override;
std::unique_ptr<VulkanSurface> CreateViewSurface(
gfx::AcceleratedWidget window) override;
bool GetPhysicalDevicePresentationSupport(
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
index 1f390e7c99e..c9ea14b7360 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
@@ -61,8 +61,8 @@ bool VulkanImplementationX11::InitializeVulkanInstance() {
return true;
}
-VkInstance VulkanImplementationX11::GetVulkanInstance() {
- return vulkan_instance_.vk_instance();
+VulkanInstance* VulkanImplementationX11::GetVulkanInstance() {
+ return &vulkan_instance_;
}
std::unique_ptr<VulkanSurface> VulkanImplementationX11::CreateViewSurface(
@@ -73,13 +73,14 @@ std::unique_ptr<VulkanSurface> VulkanImplementationX11::CreateViewSurface(
surface_create_info.dpy = x_display_;
surface_create_info.window = window;
VkResult result = vkCreateXlibSurfaceKHR_(
- GetVulkanInstance(), &surface_create_info, nullptr, &surface);
+ vulkan_instance_.vk_instance(), &surface_create_info, nullptr, &surface);
if (VK_SUCCESS != result) {
DLOG(ERROR) << "vkCreateXlibSurfaceKHR() failed: " << result;
return nullptr;
}
- return std::make_unique<VulkanSurface>(GetVulkanInstance(), surface);
+ return std::make_unique<VulkanSurface>(vulkan_instance_.vk_instance(),
+ surface);
}
bool VulkanImplementationX11::GetPhysicalDevicePresentationSupport(
@@ -94,7 +95,9 @@ bool VulkanImplementationX11::GetPhysicalDevicePresentationSupport(
std::vector<const char*>
VulkanImplementationX11::GetRequiredDeviceExtensions() {
- return {VK_KHR_SWAPCHAIN_EXTENSION_NAME};
+ return {VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME};
}
VkFence VulkanImplementationX11::CreateVkFenceForGpuFence(VkDevice vk_device) {
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.h b/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
index 49ed24452d3..d6e6ff7ffa0 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
@@ -23,7 +23,7 @@ class COMPONENT_EXPORT(VULKAN_X11) VulkanImplementationX11
// VulkanImplementation:
bool InitializeVulkanInstance() override;
- VkInstance GetVulkanInstance() override;
+ VulkanInstance* GetVulkanInstance() override;
std::unique_ptr<VulkanSurface> CreateViewSurface(
gfx::AcceleratedWidget window) override;
bool GetPhysicalDevicePresentationSupport(