summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-13 16:23:34 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-14 10:37:21 +0000
commit38a9a29f4f9436cace7f0e7abf9c586057df8a4e (patch)
treec4e8c458dc595bc0ddb435708fa2229edfd00bd4 /chromium/gpu
parente684a3455bcc29a6e3e66a004e352dea4e1141e7 (diff)
downloadqtwebengine-chromium-38a9a29f4f9436cace7f0e7abf9c586057df8a4e.tar.gz
BASELINE: Update Chromium to 73.0.3683.37
Change-Id: I08c9af2948b645f671e5d933aca1f7a90ea372f2 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn8
-rw-r--r--chromium/gpu/GLES2/gl2chromium_autogen.h8
-rw-r--r--chromium/gpu/angle_end2end_tests_main.cc7
-rw-r--r--chromium/gpu/command_buffer/build_cmd_buffer_lib.py16
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py155
-rwxr-xr-xchromium/gpu/command_buffer/build_raster_cmd_buffer.py126
-rw-r--r--chromium/gpu/command_buffer/client/BUILD.gn12
-rw-r--r--chromium/gpu/command_buffer/client/buffer_tracker.h3
-rw-r--r--chromium/gpu/command_buffer/client/buffer_tracker_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/client/client_discardable_manager.cc11
-rw-r--r--chromium/gpu/command_buffer/client/client_discardable_manager.h5
-rw-r--r--chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc8
-rw-r--r--chromium/gpu/command_buffer/client/client_font_manager.cc60
-rw-r--r--chromium/gpu/command_buffer/client/client_font_manager.h2
-rw-r--r--chromium/gpu/command_buffer/client/client_test_helper.cc4
-rw-r--r--chromium/gpu/command_buffer/client/client_test_helper.h6
-rw-r--r--chromium/gpu/command_buffer/client/client_transfer_cache.cc6
-rw-r--r--chromium/gpu/command_buffer/client/client_transfer_cache.h6
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.cc2
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.h4
-rw-r--r--chromium/gpu/command_buffer/client/command_buffer_direct_locked.cc2
-rw-r--r--chromium/gpu/command_buffer/client/command_buffer_direct_locked.h3
-rw-r--r--chromium/gpu/command_buffer/client/context_support.h2
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator.cc56
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator.h43
-rw-r--r--chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h108
-rw-r--r--chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h140
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc521
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h41
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_autogen.h62
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h104
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc110
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h13
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_autogen.h55
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h57
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h58
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h57
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h95
-rw-r--r--chromium/gpu/command_buffer/client/gpu_control.h8
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.cc4
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.h4
-rw-r--r--chromium/gpu/command_buffer/client/logging.h19
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory.cc16
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory.h43
-rw-r--r--chromium/gpu/command_buffer/client/mock_transfer_buffer.cc12
-rw-r--r--chromium/gpu/command_buffer/client/mock_transfer_buffer.h14
-rw-r--r--chromium/gpu/command_buffer/client/program_info_manager.cc88
-rw-r--r--chromium/gpu/command_buffer/client/program_info_manager.h8
-rw-r--r--chromium/gpu/command_buffer/client/program_info_manager_unittest.cc25
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker.cc4
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker.h6
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker_unittest.cc9
-rw-r--r--chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h57
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.cc189
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.h32
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_autogen.h18
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.cc113
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.h41
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc41
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_impl_autogen.h48
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_unittest.cc92
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_unittest_autogen.h25
-rw-r--r--chromium/gpu/command_buffer/client/raster_interface.h15
-rw-r--r--chromium/gpu/command_buffer/client/raster_interface_autogen.h14
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer.cc23
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer.h44
-rw-r--r--chromium/gpu/command_buffer/client/shared_image_interface.h11
-rw-r--r--chromium/gpu/command_buffer/client/shared_memory_limits.h2
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer.cc8
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers.h194
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers_unittest.cc217
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc86
-rw-r--r--chromium/gpu/command_buffer/client/vertex_array_object_manager.cc30
-rw-r--r--chromium/gpu/command_buffer/client/vertex_array_object_manager.h4
-rw-r--r--chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc11
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc6
-rw-r--r--chromium/gpu/command_buffer/common/buffer.cc21
-rw-r--r--chromium/gpu/command_buffer/common/buffer.h20
-rw-r--r--chromium/gpu/command_buffer/common/command_buffer.h2
-rw-r--r--chromium/gpu/command_buffer/common/command_buffer_shared_test.cc3
-rw-r--r--chromium/gpu/command_buffer/common/discardable_handle_unittest.cc10
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format.cc9
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h726
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h188
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h378
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc93
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.h60
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_unittest.cc52
-rw-r--r--chromium/gpu/command_buffer/common/id_type.h1
-rw-r--r--chromium/gpu/command_buffer/common/mailbox.cc53
-rw-r--r--chromium/gpu/command_buffer/common/mailbox.h7
-rw-r--r--chromium/gpu/command_buffer/common/presentation_feedback_utils.cc7
-rw-r--r--chromium/gpu/command_buffer/common/presentation_feedback_utils.h5
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format.cc8
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h260
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h123
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_ids_autogen.h47
-rw-r--r--chromium/gpu/command_buffer/common/swap_buffers_flags.h3
-rw-r--r--chromium/gpu/command_buffer/common/unittest_main.cc2
-rw-r--r--chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt21
-rw-r--r--chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt11
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn46
-rw-r--r--chromium/gpu/command_buffer/service/buffer_manager.cc99
-rw-r--r--chromium/gpu/command_buffer/service/buffer_manager.h49
-rw-r--r--chromium/gpu/command_buffer/service/buffer_manager_unittest.cc9
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_direct.cc101
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_direct.h28
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service.cc9
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service.h4
-rw-r--r--chromium/gpu/command_buffer/service/common_decoder.cc3
-rw-r--r--chromium/gpu/command_buffer/service/common_decoder.h6
-rw-r--r--chromium/gpu/command_buffer/service/context_group.h6
-rw-r--r--chromium/gpu/command_buffer/service/context_state.cc10
-rw-r--r--chromium/gpu/command_buffer/service/context_state.h8
-rw-r--r--chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h8
-rw-r--r--chromium/gpu/command_buffer/service/decoder_client.h10
-rw-r--r--chromium/gpu/command_buffer/service/decoder_context.h6
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc46
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h8
-rw-r--r--chromium/gpu/command_buffer/service/feature_info_unittest.cc10
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_completeness_cache.h4
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager.h7
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc5
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual_delegate.h2
-rw-r--r--chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.h8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_tex_image.h1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc29
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h9
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc1455
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.h4
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h110
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h4
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc99
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h18
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h40
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc216
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc262
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc110
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc20
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc9
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc12
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h25
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc12
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h25
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc10
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc57
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h6
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc18
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc19
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc41
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc27
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.h3
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h3
-rw-r--r--chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.h1
-rw-r--r--chromium/gpu/command_buffer/service/gpu_tracer.cc3
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller.cc28
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller.h8
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc19
-rw-r--r--chromium/gpu/command_buffer/service/gr_shader_cache.cc15
-rw-r--r--chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc5
-rw-r--r--chromium/gpu/command_buffer/service/id_manager.h6
-rw-r--r--chromium/gpu/command_buffer/service/image_manager.h5
-rw-r--r--chromium/gpu/command_buffer/service/logger.cc10
-rw-r--r--chromium/gpu/command_buffer/service/logger.h4
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.h2
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/service/mocks.h1
-rw-r--r--chromium/gpu/command_buffer/service/multi_draw_manager.cc229
-rw-r--r--chromium/gpu/command_buffer/service/multi_draw_manager.h92
-rw-r--r--chromium/gpu/command_buffer/service/passthrough_program_cache.h1
-rw-r--r--chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/service/program_cache.cc18
-rw-r--r--chromium/gpu/command_buffer/service/program_cache.h28
-rw-r--r--chromium/gpu/command_buffer/service/program_cache_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.cc61
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.h11
-rw-r--r--chromium/gpu/command_buffer/service/program_manager_unittest.cc68
-rw-r--r--chromium/gpu/command_buffer/service/query_manager.h6
-rw-r--r--chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h7
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc871
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.h5
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_autogen.h115
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_context_state.cc157
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_context_state.h85
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_mock.h3
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest.cc107
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h21
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc89
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h23
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc15
-rw-r--r--chromium/gpu/command_buffer/service/renderbuffer_manager.cc20
-rw-r--r--chromium/gpu/command_buffer/service/renderbuffer_manager.h5
-rw-r--r--chromium/gpu/command_buffer/service/sampler_manager.h4
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.cc28
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.h2
-rw-r--r--chromium/gpu/command_buffer/service/scheduler_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/service/service_font_manager.cc36
-rw-r--r--chromium/gpu/command_buffer/service/service_font_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/shader_manager.h4
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator.cc8
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator.h10
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.cc353
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.h164
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory.h7
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc743
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h12
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc54
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc409
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h12
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc182
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.cc39
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.h15
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc6
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.cc12
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.h3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc6
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.h6
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.cc64
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.h12
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager.cc9
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc47
-rw-r--r--chromium/gpu/command_buffer/service/test_helper.cc5
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc257
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h119
-rw-r--r--chromium/gpu/command_buffer/service/transform_feedback_manager.cc13
-rw-r--r--chromium/gpu/command_buffer/service/transform_feedback_manager.h13
-rw-r--r--chromium/gpu/command_buffer/service/vertex_array_manager.h5
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder.cc2
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.cc93
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.h16
-rw-r--r--chromium/gpu/config/BUILD.gn2
-rw-r--r--chromium/gpu/config/gpu_control_list.cc16
-rw-r--r--chromium/gpu/config/gpu_control_list.h11
-rw-r--r--chromium/gpu/config/gpu_control_list_entry_unittest.cc29
-rw-r--r--chromium/gpu/config/gpu_control_list_format.txt4
-rw-r--r--chromium/gpu/config/gpu_control_list_testing.json9
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h132
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_autogen.cc32
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h1
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.cc3
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.json61
-rw-r--r--chromium/gpu/config/gpu_dx_diagnostics_win.cc8
-rw-r--r--chromium/gpu/config/gpu_finch_features.cc5
-rw-r--r--chromium/gpu/config/gpu_finch_features.h2
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc9
-rw-r--r--chromium/gpu/config/gpu_lists_version.h2
-rw-r--r--chromium/gpu/config/gpu_preferences.h4
-rw-r--r--chromium/gpu/config/gpu_preferences_unittest.cc3
-rw-r--r--chromium/gpu/config/gpu_test_config.cc6
-rw-r--r--chromium/gpu/config/gpu_test_config.h1
-rw-r--r--chromium/gpu/config/gpu_util.cc6
-rw-r--r--chromium/gpu/config/gpu_workaround_list.txt1
-rwxr-xr-xchromium/gpu/config/process_json.py25
-rw-r--r--chromium/gpu/config/software_rendering_list.json47
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.cc33
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.h2
-rw-r--r--chromium/gpu/gles2_conform_support/egl/display.cc7
-rw-r--r--chromium/gpu/gles2_conform_support/gles2_conform_test.cc4
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc32
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h22
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.cc41
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.h6
-rw-r--r--chromium/gpu/ipc/client/gpu_context_tests.h10
-rw-r--r--chromium/gpu/ipc/client/raster_in_process_context_tests.cc5
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.cc152
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.h16
-rw-r--r--chromium/gpu/ipc/command_buffer_task_executor.cc21
-rw-r--r--chromium/gpu/ipc/command_buffer_task_executor.h20
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn21
-rw-r--r--chromium/gpu/ipc/common/android/android_image_reader_utils.cc27
-rw-r--r--chromium/gpu/ipc/common/android/android_image_reader_utils.h8
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl.cc6
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl.h6
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc24
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.h15
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc10
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h13
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc14
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.h13
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc12
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h13
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc19
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.h15
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h33
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_support.cc14
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_support.h2
-rw-r--r--chromium/gpu/ipc/common/gpu_messages.h25
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.mojom2
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_struct_traits.h12
-rw-r--r--chromium/gpu/ipc/common/surface_handle.h11
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.cc22
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.h12
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.h8
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc258
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h30
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.cc9
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.h4
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn5
-rw-r--r--chromium/gpu/ipc/service/child_window_win.cc4
-rw-r--r--chromium/gpu/ipc/service/command_buffer_stub.cc99
-rw-r--r--chromium/gpu/ipc/service/command_buffer_stub.h12
-rw-r--r--chromium/gpu/ipc/service/direct_composition_child_surface_win.cc267
-rw-r--r--chromium/gpu/ipc/service/direct_composition_child_surface_win.h31
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.cc663
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.h22
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc503
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.cc26
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.cc163
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.h5
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc143
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h30
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_delegate.h12
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc4
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.cc8
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.h7
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_unittest.cc2
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_init.h4
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.h1
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h5
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc18
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc43
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.cc119
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.h39
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc326
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_worker.h38
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc9
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm4
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_win.cc2
-rw-r--r--chromium/gpu/ipc/service/raster_command_buffer_stub.cc82
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.cc122
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.h13
-rw-r--r--chromium/gpu/perftests/run_all_tests.cc5
-rw-r--r--chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc4
-rw-r--r--chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc4
-rw-r--r--chromium/gpu/vulkan/BUILD.gn18
-rw-r--r--chromium/gpu/vulkan/android/vulkan_android_unittests.cc64
-rw-r--r--chromium/gpu/vulkan/android/vulkan_implementation_android.cc213
-rw-r--r--chromium/gpu/vulkan/android/vulkan_implementation_android.h10
-rw-r--r--chromium/gpu/vulkan/demo/vulkan_demo.cc9
-rw-r--r--chromium/gpu/vulkan/features.gni2
-rwxr-xr-xchromium/gpu/vulkan/generate_bindings.py12
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.cc22
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.h21
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.h18
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.cc12
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.h4
-rw-r--r--chromium/gpu/vulkan/vulkan_surface.cc9
-rw-r--r--chromium/gpu/vulkan/vulkan_surface.h7
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.cc2
-rw-r--r--chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc9
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.cc9
358 files changed, 11395 insertions, 7100 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index 278d1f83e00..9974be5f0cc 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -216,7 +216,6 @@ test("gl_tests") {
"command_buffer/tests/gl_ext_multisample_compatibility_unittest.cc",
"command_buffer/tests/gl_ext_srgb_unittest.cc",
"command_buffer/tests/gl_ext_window_rectangles_unittest.cc",
- "command_buffer/tests/gl_fence_sync_unittest.cc",
"command_buffer/tests/gl_gpu_memory_buffer_unittest.cc",
"command_buffer/tests/gl_iosurface_readback_workaround_unittest.cc",
"command_buffer/tests/gl_lose_context_chromium_unittest.cc",
@@ -245,6 +244,7 @@ test("gl_tests") {
"command_buffer/tests/gl_unittests_android.cc",
"command_buffer/tests/gl_virtual_contexts_ext_window_rectangles_unittest.cc",
"command_buffer/tests/gl_virtual_contexts_unittest.cc",
+ "command_buffer/tests/gl_webgl_multi_draw_test.cc",
"command_buffer/tests/occlusion_query_unittest.cc",
"command_buffer/tests/texture_image_factory.cc",
"command_buffer/tests/texture_image_factory.h",
@@ -321,6 +321,7 @@ test("gpu_unittests") {
"command_buffer/client/raster_implementation_unittest.cc",
"command_buffer/client/raster_implementation_unittest_autogen.h",
"command_buffer/client/ring_buffer_test.cc",
+ "command_buffer/client/transfer_buffer_cmd_copy_helpers_unittest.cc",
"command_buffer/client/transfer_buffer_unittest.cc",
"command_buffer/client/vertex_array_object_manager_unittest.cc",
"command_buffer/client/webgpu_implementation_unittest.cc",
@@ -440,6 +441,7 @@ test("gpu_unittests") {
"ipc/service/gpu_channel_test_common.cc",
"ipc/service/gpu_channel_test_common.h",
"ipc/service/gpu_channel_unittest.cc",
+ "ipc/service/image_decode_accelerator_stub_unittest.cc",
]
if (is_mac) {
@@ -469,7 +471,7 @@ test("gpu_unittests") {
]
}
- if (is_fuchsia || (is_posix && !is_mac)) {
+ if (is_linux || is_android || is_chromeos) {
sources += [ "command_buffer/service/gpu_fence_manager_unittest.cc" ]
}
@@ -504,6 +506,7 @@ test("gpu_unittests") {
"//gpu/command_buffer/common",
"//gpu/command_buffer/common:gles2_utils",
"//gpu/command_buffer/service",
+ "//gpu/config",
"//gpu/ipc:gl_in_process_context",
"//gpu/ipc/client",
"//gpu/ipc/common",
@@ -610,6 +613,7 @@ fuzzer_test("gpu_fuzzer") {
":gles2",
":gpu",
"//base",
+ "//base:i18n",
"//base/third_party/dynamic_annotations",
"//ui/gfx/geometry",
"//ui/gl",
diff --git a/chromium/gpu/GLES2/gl2chromium_autogen.h b/chromium/gpu/GLES2/gl2chromium_autogen.h
index 0fafe61704a..e46e882a0f6 100644
--- a/chromium/gpu/GLES2/gl2chromium_autogen.h
+++ b/chromium/gpu/GLES2/gl2chromium_autogen.h
@@ -168,6 +168,12 @@
#define glShallowFinishCHROMIUM GLES2_GET_FUN(ShallowFinishCHROMIUM)
#define glShallowFlushCHROMIUM GLES2_GET_FUN(ShallowFlushCHROMIUM)
#define glOrderingBarrierCHROMIUM GLES2_GET_FUN(OrderingBarrierCHROMIUM)
+#define glMultiDrawArraysWEBGL GLES2_GET_FUN(MultiDrawArraysWEBGL)
+#define glMultiDrawArraysInstancedWEBGL \
+ GLES2_GET_FUN(MultiDrawArraysInstancedWEBGL)
+#define glMultiDrawElementsWEBGL GLES2_GET_FUN(MultiDrawElementsWEBGL)
+#define glMultiDrawElementsInstancedWEBGL \
+ GLES2_GET_FUN(MultiDrawElementsInstancedWEBGL)
#define glStencilFunc GLES2_GET_FUN(StencilFunc)
#define glStencilFuncSeparate GLES2_GET_FUN(StencilFuncSeparate)
#define glStencilMask GLES2_GET_FUN(StencilMask)
@@ -336,8 +342,6 @@
#define glCommitOverlayPlanesCHROMIUM GLES2_GET_FUN(CommitOverlayPlanesCHROMIUM)
#define glFlushDriverCachesCHROMIUM GLES2_GET_FUN(FlushDriverCachesCHROMIUM)
#define glGetLastFlushIdCHROMIUM GLES2_GET_FUN(GetLastFlushIdCHROMIUM)
-#define glScheduleDCLayerSharedStateCHROMIUM \
- GLES2_GET_FUN(ScheduleDCLayerSharedStateCHROMIUM)
#define glScheduleDCLayerCHROMIUM GLES2_GET_FUN(ScheduleDCLayerCHROMIUM)
#define glSetActiveURLCHROMIUM GLES2_GET_FUN(SetActiveURLCHROMIUM)
#define glMatrixLoadfCHROMIUM GLES2_GET_FUN(MatrixLoadfCHROMIUM)
diff --git a/chromium/gpu/angle_end2end_tests_main.cc b/chromium/gpu/angle_end2end_tests_main.cc
index cb2c65ee897..e6fde851cd4 100644
--- a/chromium/gpu/angle_end2end_tests_main.cc
+++ b/chromium/gpu/angle_end2end_tests_main.cc
@@ -8,7 +8,6 @@
#include "base/test/launcher/unit_test_launcher.h"
#include "base/test/test_suite.h"
#include "testing/gmock/include/gmock/gmock.h"
-#include "third_party/angle/src/tests/test_utils/ANGLETest.h"
namespace {
@@ -19,10 +18,14 @@ int RunHelper(base::TestSuite* test_suite) {
} // namespace
+// Located in third_party/angle/src/tests/test_utils/ANGLETest.cpp.
+// Defined here so we can avoid depending on the ANGLE headers.
+void ANGLEProcessTestArgs(int *argc, char *argv[]);
+
int main(int argc, char** argv) {
base::CommandLine::Init(argc, argv);
testing::InitGoogleMock(&argc, argv);
- testing::AddGlobalTestEnvironment(new ANGLETestEnvironment());
+ ANGLEProcessTestArgs(&argc, argv);
base::TestSuite test_suite(argc, argv);
int rt = base::LaunchUnitTestsWithOptions(
argc, argv,
diff --git a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
index 43279b5b8ca..2e73406867f 100644
--- a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
+++ b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
@@ -2209,11 +2209,10 @@ class GENnHandler(TypeHandler):
def WriteGetDataSizeCode(self, func, arg, f):
"""Overrriden from TypeHandler."""
code = """ uint32_t %(data_size)s;
- if (!%(namespace)sSafeMultiplyUint32(n, sizeof(GLuint), &%(data_size)s)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&%(data_size)s)) {
return error::kOutOfBounds;
}
-""" % {'data_size': arg.GetReservedSizeId(),
- 'namespace': _Namespace()}
+""" % {'data_size': arg.GetReservedSizeId()}
f.write(code)
def WriteHandlerImplementation (self, func, f):
@@ -2662,11 +2661,10 @@ class DELnHandler(TypeHandler):
def WriteGetDataSizeCode(self, func, arg, f):
"""Overrriden from TypeHandler."""
code = """ uint32_t %(data_size)s;
- if (!%(namespace)sSafeMultiplyUint32(n, sizeof(GLuint), &%(data_size)s)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&%(data_size)s)) {
return error::kOutOfBounds;
}
-""" % {'data_size': arg.GetReservedSizeId(),
- 'namespace': _Namespace()}
+""" % {'data_size': arg.GetReservedSizeId()}
f.write(code)
def WriteGLES2ImplementationUnitTest(self, func, f):
@@ -3378,7 +3376,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
self.WriteClientGLCallLog(func, f)
if self.__NeedsToCalcDataCount(func):
- f.write(" size_t count = %sGLES2Util::Calc%sDataCount(%s);\n" %
+ f.write(" uint32_t count = %sGLES2Util::Calc%sDataCount(%s);\n" %
(_Namespace(), func.name, func.GetOriginalArgs()[0].name))
f.write(" DCHECK_LE(count, %du);\n" % self.GetArrayCount(func))
f.write(" if (count == 0) {\n")
@@ -3388,8 +3386,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
f.write(" return;\n")
f.write(" }\n")
else:
- f.write(" size_t count = %d;" % self.GetArrayCount(func))
- f.write(" for (size_t ii = 0; ii < count; ++ii)\n")
+ f.write(" uint32_t count = %d;" % self.GetArrayCount(func))
+ f.write(" for (uint32_t ii = 0; ii < count; ++ii)\n")
f.write(' GPU_CLIENT_LOG("value[" << ii << "]: " << %s[ii]);\n' %
func.GetLastOriginalArg().name)
for arg in func.GetOriginalArgs():
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index 34e0d452918..2cc75e3b82b 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -1684,9 +1684,6 @@ _NAMED_TYPE_INFO = {
'is_complete': True,
'valid': [
'0',
- 'gpu::SwapBuffersFlags::kPresentationFeedback',
- 'gpu::SwapBuffersFlags::kVSyncParams',
- 'gpu::SwapBuffersFlags::kPresentationFeedback | '
'gpu::SwapBuffersFlags::kVSyncParams',
],
},
@@ -2857,6 +2854,122 @@ _FUNCTION_INFO = {
'trace_level': 2,
'es31': True
},
+ 'MultiDrawBeginCHROMIUM': {
+ 'decoder_func': 'DoMultiDrawBeginCHROMIUM',
+ 'extension': 'WEBGL_multi_draw',
+ 'extension_flag': 'webgl_multi_draw',
+ 'internal': True,
+ 'trace_level': 1,
+ 'impl_func': False,
+ 'unit_test': False,
+ },
+ 'MultiDrawEndCHROMIUM': {
+ 'decoder_func': 'DoMultiDrawEndCHROMIUM',
+ 'extension': 'WEBGL_multi_draw',
+ 'extension_flag': 'webgl_multi_draw',
+ 'internal': True,
+ 'trace_level': 1,
+ 'impl_func': False,
+ 'unit_test': False,
+ },
+ 'MultiDrawArraysCHROMIUM': {
+ 'type': 'Custom',
+ 'cmd_args': 'GLenumDrawMode mode, '
+ 'uint32_t firsts_shm_id, uint32_t firsts_shm_offset, '
+ 'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
+ 'GLsizei drawcount',
+ 'extension': 'WEBGL_multi_draw',
+ 'extension_flag': 'webgl_multi_draw',
+ 'data_transfer_methods': ['shm'],
+ 'size_args': {
+ 'firsts': 'drawcount * sizeof(GLint)',
+ 'counts': 'drawcount * sizeof(GLsizei)', },
+ 'defer_draws': True,
+ 'impl_func': False,
+ 'client_test': False,
+ 'internal': True,
+ 'trace_level': 2,
+ },
+ 'MultiDrawArraysInstancedCHROMIUM': {
+ 'type': 'Custom',
+ 'cmd_args': 'GLenumDrawMode mode, '
+ 'uint32_t firsts_shm_id, uint32_t firsts_shm_offset, '
+ 'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
+ 'uint32_t instance_counts_shm_id, '
+ 'uint32_t instance_counts_shm_offset, GLsizei drawcount',
+ 'extension': 'WEBGL_multi_draw_instanced',
+ 'extension_flag': 'webgl_multi_draw_instanced',
+ 'data_transfer_methods': ['shm'],
+ 'size_args': {
+ 'firsts': 'drawcount * sizeof(GLint)',
+ 'counts': 'drawcount * sizeof(GLsizei)',
+ 'instance_counts': 'drawcount * sizeof(GLsizei)', },
+ 'defer_draws': True,
+ 'impl_func': False,
+ 'client_test': False,
+ 'internal': True,
+ 'trace_level': 2,
+ },
+ 'MultiDrawElementsCHROMIUM': {
+ 'type': 'Custom',
+ 'cmd_args': 'GLenumDrawMode mode, '
+ 'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
+ 'GLenumIndexType type, '
+ 'uint32_t offsets_shm_id, uint32_t offsets_shm_offset, '
+ 'GLsizei drawcount',
+ 'extension': 'WEBGL_multi_draw',
+ 'extension_flag': 'webgl_multi_draw',
+ 'data_transfer_methods': ['shm'],
+ 'size_args': {
+ 'counts': 'drawcount * sizeof(GLsizei)',
+ 'offsets': 'drawcount * sizeof(GLsizei)', },
+ 'defer_draws': True,
+ 'impl_func': False,
+ 'client_test': False,
+ 'internal': True,
+ 'trace_level': 2,
+ },
+ 'MultiDrawElementsInstancedCHROMIUM': {
+ 'type': 'Custom',
+ 'cmd_args': 'GLenumDrawMode mode, '
+ 'uint32_t counts_shm_id, uint32_t counts_shm_offset, '
+ 'GLenumIndexType type, '
+ 'uint32_t offsets_shm_id, uint32_t offsets_shm_offset, '
+ 'uint32_t instance_counts_shm_id, '
+ 'uint32_t instance_counts_shm_offset, GLsizei drawcount',
+ 'extension': 'WEBGL_multi_draw_instanced',
+ 'extension_flag': 'webgl_multi_draw_instanced',
+ 'data_transfer_methods': ['shm'],
+ 'size_args': {
+ 'counts': 'drawcount * sizeof(GLsizei)',
+ 'offsets': 'drawcount * sizeof(GLsizei)',
+ 'instance_counts': 'drawcount * sizeof(GLsizei)', },
+ 'defer_draws': True,
+ 'impl_func': False,
+ 'client_test': False,
+ 'internal': True,
+ 'trace_level': 2,
+ },
+ 'MultiDrawArraysWEBGL': {
+ 'type': 'NoCommand',
+ 'extension': 'WEBGL_multi_draw',
+ 'extension_flag': 'webgl_multi_draw',
+ },
+ 'MultiDrawArraysInstancedWEBGL': {
+ 'type': 'NoCommand',
+ 'extension': 'WEBGL_multi_draw_instanced',
+ 'extension_flag': 'webgl_multi_draw_instanced',
+ },
+ 'MultiDrawElementsWEBGL': {
+ 'type': 'NoCommand',
+ 'extension': 'WEBGL_multi_draw',
+ 'extension_flag': 'webgl_multi_draw',
+ },
+ 'MultiDrawElementsInstancedWEBGL': {
+ 'type': 'NoCommand',
+ 'extension': 'WEBGL_multi_draw_instanced',
+ 'extension_flag': 'webgl_multi_draw_instanced',
+ },
'OverlayPromotionHintCHROMIUM': {
'decoder_func': 'DoOverlayPromotionHintCHROMIUM',
'extension': "CHROMIUM_uniform_stream_texture_matrix",
@@ -3730,12 +3843,7 @@ _FUNCTION_INFO = {
'extension': "CHROMIUM_sync_point",
},
'WaitSyncTokenCHROMIUM': {
- 'type': 'Custom',
- 'impl_func': False,
- 'cmd_args': 'GLint namespace_id, '
- 'GLuint64 command_buffer_id, '
- 'GLuint64 release_count',
- 'client_test': False,
+ 'type': 'NoCommand',
'extension': "CHROMIUM_sync_point",
},
'DiscardBackbufferCHROMIUM': {
@@ -3773,23 +3881,20 @@ _FUNCTION_INFO = {
'extension': 'CHROMIUM_schedule_ca_layer',
'unit_test': False,
},
- 'ScheduleDCLayerSharedStateCHROMIUM': {
- 'type': 'Custom',
- 'impl_func': False,
- 'client_test': False,
- 'cmd_args': 'GLfloat opacity, GLboolean is_clipped, '
- 'GLint z_order, GLuint shm_id, GLuint shm_offset',
- 'extension': 'CHROMIUM_schedule_ca_layer',
- },
'ScheduleDCLayerCHROMIUM': {
- 'type': 'Custom',
- 'impl_func': False,
- 'client_test': False,
- 'cmd_args': 'GLsizei num_textures, GLuint background_color, '
- 'GLuint edge_aa_mask, GLuint filter, GLuint shm_id, '
- 'GLuint shm_offset, GLuint protected_video_type',
-
- 'extension': 'CHROMIUM_schedule_ca_layer',
+ 'cmd_args': 'GLuint y_texture_id, GLuint uv_texture_id, GLint z_order, '
+ 'GLint content_x, GLint content_y, GLint content_width, '
+ 'GLint content_height, GLint quad_x, GLint quad_y, '
+ 'GLint quad_width, GLint quad_height, '
+ 'GLfloat transform_c1r1, GLfloat transform_c2r1, '
+ 'GLfloat transform_c1r2, GLfloat transform_c2r2, '
+ 'GLfloat transform_tx, GLfloat transform_ty, '
+ 'GLboolean is_clipped, GLint clip_x, GLint clip_y, '
+ 'GLint clip_width, GLint clip_height, '
+ 'GLuint protected_video_type',
+ 'decoder_func': 'DoScheduleDCLayerCHROMIUM',
+ 'extension': 'CHROMIUM_schedule_dc_layer',
+ 'unit_test': False,
},
'CommitOverlayPlanesCHROMIUM': {
'impl_func': False,
diff --git a/chromium/gpu/command_buffer/build_raster_cmd_buffer.py b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
index 6411c46a434..662b926b953 100755
--- a/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
@@ -181,40 +181,14 @@ _NAMED_TYPE_INFO = {
# not_shared: For GENn types, True if objects can't be shared between contexts
_FUNCTION_INFO = {
- 'CreateAndConsumeTexture': {
- 'type': 'NoCommand',
- 'trace_level': 2,
- },
- 'CreateAndConsumeTextureINTERNAL': {
- 'decoder_func': 'DoCreateAndConsumeTextureINTERNAL',
+ 'CopySubTextureINTERNAL': {
+ 'decoder_func': 'DoCopySubTextureINTERNAL',
'internal': True,
'type': 'PUT',
- 'count': 16, # GL_MAILBOX_SIZE_CHROMIUM
+ 'count': 32, # GL_MAILBOX_SIZE_CHROMIUM x2
'unit_test': False,
'trace_level': 2,
},
- 'CreateImageCHROMIUM': {
- 'type': 'NoCommand',
- 'cmd_args':
- 'ClientBuffer buffer, GLsizei width, GLsizei height, '
- 'GLenum internalformat',
- 'result': ['GLuint'],
- 'trace_level': 1,
- },
- 'CopySubTexture': {
- 'decoder_func': 'DoCopySubTexture',
- 'unit_test': False,
- 'trace_level': 2,
- },
- 'DestroyImageCHROMIUM': {
- 'type': 'NoCommand',
- 'trace_level': 1,
- },
- 'DeleteTextures': {
- 'type': 'DELn',
- 'resource_type': 'Texture',
- 'resource_types': 'Textures',
- },
'Finish': {
'impl_func': False,
'client_test': False,
@@ -237,40 +211,6 @@ _FUNCTION_INFO = {
'type': 'NoCommand',
'trace_level': 1,
},
- 'GetIntegerv': {
- 'type': 'GETn',
- 'result': ['SizedResult<GLint>'],
- 'decoder_func': 'DoGetIntegerv',
- 'client_test': False,
- },
- 'ProduceTextureDirect': {
- 'decoder_func': 'DoProduceTextureDirect',
- 'impl_func': False,
- 'type': 'PUT',
- 'count': 16, # GL_MAILBOX_SIZE_CHROMIUM
- 'unit_test': False,
- 'client_test': False,
- 'trace_level': 1,
- },
- 'TexParameteri': {
- 'decoder_func': 'DoTexParameteri',
- 'unit_test' : False,
- 'valid_args': {
- '2': 'GL_NEAREST'
- },
- },
- 'TexStorage2D': {
- 'decoder_func': 'DoTexStorage2D',
- 'unit_test': False,
- },
- 'WaitSync': {
- 'type': 'Custom',
- 'cmd_args': 'GLuint sync, GLbitfieldSyncFlushFlags flags, '
- 'GLuint64 timeout',
- 'impl_func': False,
- 'client_test': False,
- 'trace_level': 1,
- },
'GenQueriesEXT': {
'type': 'GENn',
'gl_test_func': 'glGenQueriesARB',
@@ -304,14 +244,6 @@ _FUNCTION_INFO = {
'type': 'NoCommand',
'gl_test_func': 'glGetQueryObjectuiv',
},
- 'BindTexImage2DCHROMIUM': {
- 'decoder_func': 'DoBindTexImage2DCHROMIUM',
- 'unit_test': False,
- },
- 'ReleaseTexImage2DCHROMIUM': {
- 'decoder_func': 'DoReleaseTexImage2DCHROMIUM',
- 'unit_test': False,
- },
'ShallowFlushCHROMIUM': {
'type': 'NoCommand',
},
@@ -350,9 +282,6 @@ _FUNCTION_INFO = {
'unit_test': False,
'trace_level': 1,
},
- 'GenSyncTokenCHROMIUM': {
- 'type': 'NoCommand',
- },
'GenUnverifiedSyncTokenCHROMIUM': {
'type': 'NoCommand',
},
@@ -360,31 +289,7 @@ _FUNCTION_INFO = {
'type': 'NoCommand',
},
'WaitSyncTokenCHROMIUM': {
- 'type': 'Custom',
- 'impl_func': False,
- 'cmd_args': 'GLint namespace_id, '
- 'GLuint64 command_buffer_id, '
- 'GLuint64 release_count',
- 'client_test': False,
- },
- 'InitializeDiscardableTextureCHROMIUM': {
- 'type': 'Custom',
- 'cmd_args': 'GLuint texture_id, uint32_t shm_id, '
- 'uint32_t shm_offset',
- 'impl_func': False,
- 'client_test': False,
- },
- 'UnlockDiscardableTextureCHROMIUM': {
- 'type': 'Custom',
- 'cmd_args': 'GLuint texture_id',
- 'impl_func': False,
- 'client_test': False,
- },
- 'LockDiscardableTextureCHROMIUM': {
- 'type': 'Custom',
- 'cmd_args': 'GLuint texture_id',
- 'impl_func': False,
- 'client_test': False,
+ 'type': 'NoCommand',
},
'BeginRasterCHROMIUM': {
'decoder_func': 'DoBeginRasterCHROMIUM',
@@ -451,29 +356,6 @@ _FUNCTION_INFO = {
'client_test': False,
'unit_test': False,
},
- 'CreateTexture': {
- 'type': 'Create',
- 'resource_type': 'Texture',
- 'resource_types': 'Textures',
- 'decoder_func': 'DoCreateTexture',
- 'not_shared': 'True',
- 'unit_test': False,
- },
- 'SetColorSpaceMetadata': {
- 'type': 'Custom',
- 'impl_func': False,
- 'client_test': False,
- 'cmd_args': 'GLuint texture_id, GLuint shm_id, GLuint shm_offset, '
- 'GLsizei color_space_size',
- },
- 'UnpremultiplyAndDitherCopyCHROMIUM': {
- 'decoder_func': 'DoUnpremultiplyAndDitherCopyCHROMIUM',
- 'cmd_args': 'GLuint source_id, GLuint dest_id, GLint x, GLint y, '
- 'GLsizei width, GLsizei height',
- 'client_test': False,
- 'unit_test': False,
- 'impl_func': True,
- },
}
diff --git a/chromium/gpu/command_buffer/client/BUILD.gn b/chromium/gpu/command_buffer/client/BUILD.gn
index 15d4780acbd..f542ceeb0ff 100644
--- a/chromium/gpu/command_buffer/client/BUILD.gn
+++ b/chromium/gpu/command_buffer/client/BUILD.gn
@@ -4,6 +4,11 @@
import("//build/config/jumbo.gni")
+declare_args() {
+ # Enable GPU client logging without DCHECK being on.
+ enable_gpu_client_logging = false
+}
+
# The files here go into the "gpu" component in a component build (with
# "command_buffer_client" and "gles2_cmd_helper" just forwarding) and goes into
# separate static libraries in non-component build.
@@ -80,6 +85,7 @@ jumbo_source_set("client_sources") {
"shared_image_interface.h",
"transfer_buffer.cc",
"transfer_buffer.h",
+ "transfer_buffer_cmd_copy_helpers.h",
]
configs += [ "//gpu:gpu_implementation" ]
@@ -205,6 +211,9 @@ jumbo_component("gles2_implementation") {
sources = gles2_implementation_source_files
defines = [ "GLES2_IMPL_IMPLEMENTATION" ]
+ if (enable_gpu_client_logging) {
+ defines += [ "GPU_ENABLE_CLIENT_LOGGING" ]
+ }
all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
deps = [
@@ -295,6 +304,9 @@ jumbo_component("gles2_implementation_no_check") {
"GLES2_IMPL_IMPLEMENTATION",
"GLES2_CONFORMANCE_TESTS=1",
]
+ if (enable_gpu_client_logging) {
+ defines += [ "GPU_ENABLE_CLIENT_LOGGING" ]
+ }
deps = [
":client",
diff --git a/chromium/gpu/command_buffer/client/buffer_tracker.h b/chromium/gpu/command_buffer/client/buffer_tracker.h
index 62508b3a1b7..db7e96f0e3f 100644
--- a/chromium/gpu/command_buffer/client/buffer_tracker.h
+++ b/chromium/gpu/command_buffer/client/buffer_tracker.h
@@ -7,7 +7,6 @@
#include <stdint.h>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "gles2_impl_export.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
@@ -105,7 +104,7 @@ class GLES2_IMPL_EXPORT BufferTracker {
void Free(Buffer* buffer);
private:
- typedef base::hash_map<GLuint, Buffer*> BufferMap;
+ typedef std::unordered_map<GLuint, Buffer*> BufferMap;
MappedMemoryManager* mapped_memory_;
BufferMap buffers_;
diff --git a/chromium/gpu/command_buffer/client/buffer_tracker_unittest.cc b/chromium/gpu/command_buffer/client/buffer_tracker_unittest.cc
index 2125fc61ef7..1b85662cedc 100644
--- a/chromium/gpu/command_buffer/client/buffer_tracker_unittest.cc
+++ b/chromium/gpu/command_buffer/client/buffer_tracker_unittest.cc
@@ -30,7 +30,7 @@ class MockClientCommandBufferImpl : public MockClientCommandBuffer {
context_lost_(false) {}
~MockClientCommandBufferImpl() override = default;
- scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ scoped_refptr<gpu::Buffer> CreateTransferBuffer(uint32_t size,
int32_t* id) override {
if (context_lost_) {
*id = -1;
diff --git a/chromium/gpu/command_buffer/client/client_discardable_manager.cc b/chromium/gpu/command_buffer/client/client_discardable_manager.cc
index 7cc2230919a..ec946ca3cd1 100644
--- a/chromium/gpu/command_buffer/client/client_discardable_manager.cc
+++ b/chromium/gpu/command_buffer/client/client_discardable_manager.cc
@@ -6,6 +6,7 @@
#include "base/atomic_sequence_num.h"
#include "base/containers/flat_set.h"
+#include "base/numerics/checked_math.h"
#include "base/system/sys_info.h"
namespace gpu {
@@ -108,17 +109,17 @@ void FreeOffsetSet::ReturnFreeOffset(uint32_t offset) {
// Returns the size of the allocation which ClientDiscardableManager will
// sub-allocate from. This should be at least as big as the minimum shared
// memory allocation size.
-size_t AllocationSize() {
+uint32_t AllocationSize() {
#if defined(OS_NACL)
// base::SysInfo isn't available under NaCl.
- size_t allocation_size = getpagesize();
+ size_t system_allocation_size = getpagesize();
#else
- size_t allocation_size = base::SysInfo::VMAllocationGranularity();
+ size_t system_allocation_size = base::SysInfo::VMAllocationGranularity();
#endif
+ DCHECK(base::CheckedNumeric<uint32_t>(system_allocation_size).IsValid());
// If the allocation is small (less than 2K), round it up to at least 2K.
- allocation_size = std::max(static_cast<size_t>(2048), allocation_size);
- return allocation_size;
+ return std::max(2048u, static_cast<uint32_t>(system_allocation_size));
}
ClientDiscardableHandle::Id GetNextHandleId() {
diff --git a/chromium/gpu/command_buffer/client/client_discardable_manager.h b/chromium/gpu/command_buffer/client/client_discardable_manager.h
index a5331085b0d..21888fc06f5 100644
--- a/chromium/gpu/command_buffer/client/client_discardable_manager.h
+++ b/chromium/gpu/command_buffer/client/client_discardable_manager.h
@@ -62,10 +62,9 @@ class GPU_EXPORT ClientDiscardableManager {
bool CreateNewAllocation(CommandBuffer* command_buffer);
private:
- size_t allocation_size_;
+ uint32_t allocation_size_;
size_t element_size_ = sizeof(base::subtle::Atomic32);
- uint32_t elements_per_allocation_ =
- static_cast<uint32_t>(allocation_size_ / element_size_);
+ uint32_t elements_per_allocation_ = allocation_size_ / element_size_;
struct Allocation;
std::vector<std::unique_ptr<Allocation>> allocations_;
diff --git a/chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc b/chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc
index 0c967dafbab..2c811c66ef4 100644
--- a/chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/client/client_discardable_manager_unittest.cc
@@ -31,15 +31,11 @@ class FakeCommandBuffer : public CommandBuffer {
return State();
}
void SetGetBuffer(int32_t transfer_buffer_id) override { NOTREACHED(); }
- scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ scoped_refptr<gpu::Buffer> CreateTransferBuffer(uint32_t size,
int32_t* id) override {
*id = next_id_++;
active_ids_.insert(*id);
- base::UnsafeSharedMemoryRegion shmem_region =
- base::UnsafeSharedMemoryRegion::Create(size);
- base::WritableSharedMemoryMapping shmem_mapping = shmem_region.Map();
- return MakeBufferFromSharedMemory(std::move(shmem_region),
- std::move(shmem_mapping));
+ return MakeMemoryBuffer(size);
}
void DestroyTransferBuffer(int32_t id) override {
auto found = active_ids_.find(id);
diff --git a/chromium/gpu/command_buffer/client/client_font_manager.cc b/chromium/gpu/command_buffer/client/client_font_manager.cc
index b0985c55a33..6dc6f269cdb 100644
--- a/chromium/gpu/command_buffer/client/client_font_manager.cc
+++ b/chromium/gpu/command_buffer/client/client_font_manager.cc
@@ -11,7 +11,7 @@ namespace {
class Serializer {
public:
- Serializer(char* memory, size_t memory_size)
+ Serializer(char* memory, uint32_t memory_size)
: memory_(memory), memory_size_(memory_size) {}
~Serializer() = default;
@@ -21,7 +21,7 @@ class Serializer {
WriteData(val, sizeof(T), alignof(T));
}
- void WriteData(const void* input, size_t bytes, size_t alignment) {
+ void WriteData(const void* input, uint32_t bytes, size_t alignment) {
AlignMemory(bytes, alignment);
if (bytes == 0)
return;
@@ -32,7 +32,7 @@ class Serializer {
}
private:
- void AlignMemory(size_t size, size_t alignment) {
+ void AlignMemory(uint32_t size, size_t alignment) {
// Due to the math below, alignment must be a power of two.
DCHECK_GT(alignment, 0u);
DCHECK_EQ(alignment & (alignment - 1), 0u);
@@ -46,8 +46,8 @@ class Serializer {
}
char* memory_ = nullptr;
- size_t memory_size_ = 0u;
- size_t bytes_written_ = 0u;
+ uint32_t memory_size_ = 0u;
+ uint32_t bytes_written_ = 0u;
};
} // namespace
@@ -109,7 +109,7 @@ void ClientFontManager::Serialize() {
std::vector<uint8_t> strike_data;
strike_server_.writeStrikeData(&strike_data);
- const uint64_t num_handles_created =
+ const uint32_t num_handles_created =
last_allocated_handle_id_ - last_serialized_handle_id_;
if (strike_data.size() == 0u && num_handles_created == 0u &&
locked_handles_.size() == 0u) {
@@ -117,19 +117,29 @@ void ClientFontManager::Serialize() {
return;
}
- // Size requires for serialization.
- size_t bytes_required =
- // Skia data size.
- +sizeof(uint64_t) + alignof(uint64_t) + strike_data.size() +
- 16
- // num of handles created + SerializableHandles.
- + sizeof(uint64_t) + alignof(uint64_t) +
- num_handles_created * sizeof(SerializableSkiaHandle) +
- alignof(SerializableSkiaHandle) +
- // num of handles locked + DiscardableHandleIds.
- +sizeof(uint64_t) + alignof(uint64_t) +
- locked_handles_.size() * sizeof(SkDiscardableHandleId) +
- alignof(SkDiscardableHandleId);
+ // Size required for serialization.
+ base::CheckedNumeric<uint32_t> checked_bytes_required = 0;
+ // Skia data size.
+ checked_bytes_required += sizeof(uint32_t) + alignof(uint32_t) + 16;
+ checked_bytes_required += strike_data.size();
+
+ // num of handles created + SerializableHandles.
+ checked_bytes_required +=
+ sizeof(uint32_t) + alignof(uint32_t) + alignof(SerializableSkiaHandle);
+ checked_bytes_required +=
+ base::CheckMul(num_handles_created, sizeof(SerializableSkiaHandle));
+
+ // num of handles locked + DiscardableHandleIds.
+ checked_bytes_required +=
+ sizeof(uint32_t) + alignof(uint32_t) + alignof(SkDiscardableHandleId);
+ checked_bytes_required +=
+ base::CheckMul(locked_handles_.size(), sizeof(SkDiscardableHandleId));
+
+ uint32_t bytes_required = 0;
+ if (!checked_bytes_required.AssignIfValid(&bytes_required)) {
+ DLOG(FATAL) << "ClientFontManager::Serialize: font buffer overflow";
+ return;
+ }
// Allocate memory.
void* memory = client_->MapFontBuffer(bytes_required);
@@ -141,7 +151,7 @@ void ClientFontManager::Serialize() {
Serializer serializer(reinterpret_cast<char*>(memory), bytes_required);
// Serialize all new handles.
- serializer.Write<uint64_t>(&num_handles_created);
+ serializer.Write<uint32_t>(&num_handles_created);
for (SkDiscardableHandleId handle_id = last_serialized_handle_id_ + 1;
handle_id <= last_allocated_handle_id_; handle_id++) {
auto it = discardable_handle_map_.find(handle_id);
@@ -157,14 +167,16 @@ void ClientFontManager::Serialize() {
}
// Serialize all locked handle ids, so the raster unlocks them when done.
- const uint64_t num_locked_handles = locked_handles_.size();
- serializer.Write<uint64_t>(&num_locked_handles);
+ DCHECK(base::IsValueInRangeForNumericType<uint32_t>(locked_handles_.size()));
+ const uint32_t num_locked_handles = locked_handles_.size();
+ serializer.Write<uint32_t>(&num_locked_handles);
for (auto handle_id : locked_handles_)
serializer.Write<SkDiscardableHandleId>(&handle_id);
// Serialize skia data.
- const uint64_t skia_data_size = strike_data.size();
- serializer.Write<uint64_t>(&skia_data_size);
+ DCHECK(base::IsValueInRangeForNumericType<uint32_t>(strike_data.size()));
+ const uint32_t skia_data_size = strike_data.size();
+ serializer.Write<uint32_t>(&skia_data_size);
serializer.WriteData(strike_data.data(), strike_data.size(), 16);
// Reset all state for what has been serialized.
diff --git a/chromium/gpu/command_buffer/client/client_font_manager.h b/chromium/gpu/command_buffer/client/client_font_manager.h
index 0b1a6974aad..5b9062328f6 100644
--- a/chromium/gpu/command_buffer/client/client_font_manager.h
+++ b/chromium/gpu/command_buffer/client/client_font_manager.h
@@ -22,7 +22,7 @@ class RASTER_EXPORT ClientFontManager
public:
virtual ~Client() {}
- virtual void* MapFontBuffer(size_t size) = 0;
+ virtual void* MapFontBuffer(uint32_t size) = 0;
};
ClientFontManager(Client* client, CommandBuffer* command_buffer);
diff --git a/chromium/gpu/command_buffer/client/client_test_helper.cc b/chromium/gpu/command_buffer/client/client_test_helper.cc
index afa81d1168b..89570ea3536 100644
--- a/chromium/gpu/command_buffer/client/client_test_helper.cc
+++ b/chromium/gpu/command_buffer/client/client_test_helper.cc
@@ -51,7 +51,7 @@ void FakeCommandBufferServiceBase::SetGetBufferHelper(int transfer_buffer_id,
}
scoped_refptr<gpu::Buffer>
-FakeCommandBufferServiceBase::CreateTransferBufferHelper(size_t size,
+FakeCommandBufferServiceBase::CreateTransferBufferHelper(uint32_t size,
int32_t* id) {
*id = GetNextFreeTransferBufferId();
if (*id >= 0) {
@@ -138,7 +138,7 @@ void MockClientCommandBuffer::SetGetBuffer(int transfer_buffer_id) {
}
scoped_refptr<gpu::Buffer> MockClientCommandBuffer::CreateTransferBuffer(
- size_t size,
+ uint32_t size,
int32_t* id) {
return CreateTransferBufferHelper(size, id);
}
diff --git a/chromium/gpu/command_buffer/client/client_test_helper.h b/chromium/gpu/command_buffer/client/client_test_helper.h
index bdf51f95556..a293fe6de92 100644
--- a/chromium/gpu/command_buffer/client/client_test_helper.h
+++ b/chromium/gpu/command_buffer/client/client_test_helper.h
@@ -45,7 +45,7 @@ class FakeCommandBufferServiceBase : public CommandBufferServiceBase {
void FlushHelper(int32_t put_offset);
void SetGetBufferHelper(int transfer_buffer_id, int32_t token);
- scoped_refptr<gpu::Buffer> CreateTransferBufferHelper(size_t size,
+ scoped_refptr<gpu::Buffer> CreateTransferBufferHelper(uint32_t size,
int32_t* id);
void DestroyTransferBufferHelper(int32_t id);
@@ -66,7 +66,7 @@ class MockClientCommandBuffer : public CommandBuffer,
int32_t start,
int32_t end) override;
void SetGetBuffer(int transfer_buffer_id) override;
- scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ scoped_refptr<gpu::Buffer> CreateTransferBuffer(uint32_t size,
int32_t* id) override;
// This is so we can use all the gmock functions when Flush is called.
@@ -134,7 +134,7 @@ class MockClientGpuControl : public GpuControl {
DoSignalSyncToken(sync_token, &callback);
}
- MOCK_METHOD1(WaitSyncTokenHint, void(const SyncToken&));
+ MOCK_METHOD1(WaitSyncToken, void(const SyncToken&));
MOCK_METHOD1(CanWaitUnverifiedSyncToken, bool(const SyncToken&));
MOCK_METHOD2(CreateGpuFence,
void(uint32_t gpu_fence_id, ClientGpuFence source));
diff --git a/chromium/gpu/command_buffer/client/client_transfer_cache.cc b/chromium/gpu/command_buffer/client/client_transfer_cache.cc
index 9e2e930d5c7..b762dacadae 100644
--- a/chromium/gpu/command_buffer/client/client_transfer_cache.cc
+++ b/chromium/gpu/command_buffer/client/client_transfer_cache.cc
@@ -11,7 +11,7 @@ ClientTransferCache::ClientTransferCache(Client* client) : client_(client) {}
ClientTransferCache::~ClientTransferCache() = default;
void* ClientTransferCache::MapEntry(MappedMemoryManager* mapped_memory,
- size_t size) {
+ uint32_t size) {
DCHECK(!mapped_ptr_);
DCHECK(!transfer_buffer_ptr_);
mapped_ptr_.emplace(size, client_->cmd_buffer_helper(), mapped_memory);
@@ -24,7 +24,7 @@ void* ClientTransferCache::MapEntry(MappedMemoryManager* mapped_memory,
void* ClientTransferCache::MapTransferBufferEntry(
TransferBufferInterface* transfer_buffer,
- size_t size) {
+ uint32_t size) {
DCHECK(!mapped_ptr_);
DCHECK(!transfer_buffer_ptr_);
transfer_buffer_ptr_.emplace(size, client_->cmd_buffer_helper(),
@@ -64,7 +64,7 @@ void ClientTransferCache::AddTransferCacheEntry(uint32_t type,
uint32_t id,
uint32_t shm_id,
uint32_t shm_offset,
- size_t size) {
+ uint32_t size) {
DCHECK(!mapped_ptr_);
EntryKey key(type, id);
diff --git a/chromium/gpu/command_buffer/client/client_transfer_cache.h b/chromium/gpu/command_buffer/client/client_transfer_cache.h
index 45325e1e80b..96f6a919f9c 100644
--- a/chromium/gpu/command_buffer/client/client_transfer_cache.h
+++ b/chromium/gpu/command_buffer/client/client_transfer_cache.h
@@ -79,7 +79,7 @@ class GLES2_IMPL_EXPORT ClientTransferCache {
uint32_t id,
uint32_t shm_id,
uint32_t shm_offset,
- size_t size);
+ uint32_t size);
// Similar to AddTransferCacheEntry() but doesn't use |client_| to trigger the
// creation of the service-side cache entry. Instead, it calls
@@ -97,9 +97,9 @@ class GLES2_IMPL_EXPORT ClientTransferCache {
base::OnceCallback<void(ClientDiscardableHandle)> create_entry_cb);
// Map(of either type) must always be followed by an Unmap.
- void* MapEntry(MappedMemoryManager* mapped_memory, size_t size);
+ void* MapEntry(MappedMemoryManager* mapped_memory, uint32_t size);
void* MapTransferBufferEntry(TransferBufferInterface* transfer_buffer,
- size_t size);
+ uint32_t size);
void UnmapAndCreateEntry(uint32_t type, uint32_t id);
bool LockEntry(uint32_t type, uint32_t id);
void UnlockEntries(const std::vector<std::pair<uint32_t, uint32_t>>& entries);
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
index 8d95a8053b2..5a764e48d45 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
@@ -133,7 +133,7 @@ void CommandBufferHelper::FreeRingBuffer() {
}
}
-gpu::ContextResult CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
+gpu::ContextResult CommandBufferHelper::Initialize(uint32_t ring_buffer_size) {
ring_buffer_size_ = ring_buffer_size;
if (!AllocateRingBuffer()) {
// This would fail if CreateTransferBuffer fails, which will not fail for
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
index e1d9c0e7259..d6ff4d05233 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -60,7 +60,7 @@ class GPU_EXPORT CommandBufferHelper {
// Parameters:
// ring_buffer_size: The size of the ring buffer portion of the command
// buffer.
- gpu::ContextResult Initialize(int32_t ring_buffer_size);
+ gpu::ContextResult Initialize(uint32_t ring_buffer_size);
// Sets whether the command buffer should automatically flush periodically
// to try to increase performance. Defaults to true.
@@ -293,7 +293,7 @@ class GPU_EXPORT CommandBufferHelper {
CommandBuffer* const command_buffer_;
int32_t ring_buffer_id_ = -1;
- int32_t ring_buffer_size_ = 0;
+ uint32_t ring_buffer_size_ = 0;
scoped_refptr<gpu::Buffer> ring_buffer_;
CommandBufferEntry* entries_ = nullptr;
int32_t total_entry_count_ = 0; // the total number of entries
diff --git a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.cc b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.cc
index f51210e97b0..1d47fd24d10 100644
--- a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.cc
+++ b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.cc
@@ -42,7 +42,7 @@ CommandBuffer::State CommandBufferDirectLocked::WaitForGetOffsetInRange(
}
scoped_refptr<Buffer> CommandBufferDirectLocked::CreateTransferBuffer(
- size_t size,
+ uint32_t size,
int32_t* id) {
if (fail_create_transfer_buffer_) {
*id = -1;
diff --git a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
index 0d55f1186b0..c7d32cdf38e 100644
--- a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
+++ b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
@@ -24,7 +24,8 @@ class CommandBufferDirectLocked : public CommandBufferDirect {
CommandBuffer::State WaitForGetOffsetInRange(uint32_t set_get_buffer_count,
int32_t start,
int32_t end) override;
- scoped_refptr<Buffer> CreateTransferBuffer(size_t size, int32_t* id) override;
+ scoped_refptr<Buffer> CreateTransferBuffer(uint32_t size,
+ int32_t* id) override;
void LockFlush() { flush_locked_ = true; }
diff --git a/chromium/gpu/command_buffer/client/context_support.h b/chromium/gpu/command_buffer/client/context_support.h
index 3e4fed22080..a3438f75d2a 100644
--- a/chromium/gpu/command_buffer/client/context_support.h
+++ b/chromium/gpu/command_buffer/client/context_support.h
@@ -116,7 +116,7 @@ class ContextSupport {
// Maps a buffer that will receive serialized data for an entry to be created.
// Returns nullptr on failure. If success, must be paired with a call to
// UnmapAndCreateTransferCacheEntry.
- virtual void* MapTransferCacheEntry(size_t serialized_size) = 0;
+ virtual void* MapTransferCacheEntry(uint32_t serialized_size) = 0;
// Unmaps the buffer and creates a transfer cache entry with the serialized
// data.
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator.cc b/chromium/gpu/command_buffer/client/fenced_allocator.cc
index 4ca5ff9fffd..50a96aa4cb0 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator.cc
+++ b/chromium/gpu/command_buffer/client/fenced_allocator.cc
@@ -17,19 +17,20 @@ namespace gpu {
namespace {
// Round down to the largest multiple of kAllocAlignment no greater than |size|.
-unsigned int RoundDown(unsigned int size) {
+uint32_t RoundDown(uint32_t size) {
return size & ~(FencedAllocator::kAllocAlignment - 1);
}
// Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
-unsigned int RoundUp(unsigned int size) {
- return (size + (FencedAllocator::kAllocAlignment - 1)) &
- ~(FencedAllocator::kAllocAlignment - 1);
+base::CheckedNumeric<uint32_t> RoundUp(uint32_t size) {
+ return (base::CheckedNumeric<uint32_t>(size) +
+ (FencedAllocator::kAllocAlignment - 1)) &
+ ~(FencedAllocator::kAllocAlignment - 1);
}
} // namespace
-FencedAllocator::FencedAllocator(unsigned int size, CommandBufferHelper* helper)
+FencedAllocator::FencedAllocator(uint32_t size, CommandBufferHelper* helper)
: helper_(helper), bytes_in_use_(0) {
Block block = { FREE, 0, RoundDown(size), kUnusedToken };
blocks_.push_back(block);
@@ -47,7 +48,7 @@ FencedAllocator::~FencedAllocator() {
// blocks, waiting for them. The current implementation isn't smart about
// optimizing what to wait for, just looks inside the block in order (first-fit
// as well).
-FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
+FencedAllocator::Offset FencedAllocator::Alloc(uint32_t size) {
// size of 0 is not allowed because it would be inconsistent to only sometimes
// have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
if (size == 0) {
@@ -55,24 +56,27 @@ FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
}
// Round up the allocation size to ensure alignment.
- size = RoundUp(size);
+ uint32_t aligned_size = 0;
+ if (!RoundUp(size).AssignIfValid(&aligned_size)) {
+ return kInvalidOffset;
+ }
// Try first to allocate in a free block.
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
Block &block = blocks_[i];
- if (block.state == FREE && block.size >= size) {
- return AllocInBlock(i, size);
+ if (block.state == FREE && block.size >= aligned_size) {
+ return AllocInBlock(i, aligned_size);
}
}
// No free block is available. Look for blocks pending tokens, and wait for
// them to be re-usable.
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
if (blocks_[i].state != FREE_PENDING_TOKEN)
continue;
i = WaitForTokenAndFreeBlock(i);
- if (blocks_[i].size >= size)
- return AllocInBlock(i, size);
+ if (blocks_[i].size >= aligned_size)
+ return AllocInBlock(i, aligned_size);
}
return kInvalidOffset;
}
@@ -105,10 +109,10 @@ void FencedAllocator::FreePendingToken(FencedAllocator::Offset offset,
}
// Gets the max of the size of the blocks marked as free.
-unsigned int FencedAllocator::GetLargestFreeSize() {
+uint32_t FencedAllocator::GetLargestFreeSize() {
FreeUnused();
- unsigned int max_size = 0;
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ uint32_t max_size = 0;
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
Block &block = blocks_[i];
if (block.state == FREE)
max_size = std::max(max_size, block.size);
@@ -118,10 +122,10 @@ unsigned int FencedAllocator::GetLargestFreeSize() {
// Gets the size of the largest segment of blocks that are either FREE or
// FREE_PENDING_TOKEN.
-unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
- unsigned int max_size = 0;
- unsigned int current_size = 0;
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
+uint32_t FencedAllocator::GetLargestFreeOrPendingSize() {
+ uint32_t max_size = 0;
+ uint32_t current_size = 0;
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
Block &block = blocks_[i];
if (block.state == IN_USE) {
max_size = std::max(max_size, current_size);
@@ -135,10 +139,10 @@ unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
}
// Gets the total size of all blocks marked as free.
-unsigned int FencedAllocator::GetFreeSize() {
+uint32_t FencedAllocator::GetFreeSize() {
FreeUnused();
- unsigned int size = 0;
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ uint32_t size = 0;
+ for (uint32_t i = 0; i < blocks_.size(); ++i) {
Block& block = blocks_[i];
if (block.state == FREE)
size += block.size;
@@ -152,7 +156,7 @@ unsigned int FencedAllocator::GetFreeSize() {
// - the successive offsets match the block sizes, and they are in order.
bool FencedAllocator::CheckConsistency() {
if (blocks_.size() < 1) return false;
- for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
+ for (uint32_t i = 0; i < blocks_.size() - 1; ++i) {
Block &current = blocks_[i];
Block &next = blocks_[i + 1];
// This test is NOT included in the next one, because offset is unsigned.
@@ -216,7 +220,7 @@ FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
// Frees any blocks pending a token for which the token has been read.
void FencedAllocator::FreeUnused() {
- for (unsigned int i = 0; i < blocks_.size();) {
+ for (uint32_t i = 0; i < blocks_.size();) {
Block& block = blocks_[i];
if (block.state == FREE_PENDING_TOKEN &&
helper_->HasTokenPassed(block.token)) {
@@ -231,7 +235,7 @@ void FencedAllocator::FreeUnused() {
// If the block is exactly the requested size, simply mark it IN_USE, otherwise
// split it and mark the first one (of the requested size) IN_USE.
FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
- unsigned int size) {
+ uint32_t size) {
Block &block = blocks_[index];
DCHECK_GE(block.size, size);
DCHECK_EQ(block.state, FREE);
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator.h b/chromium/gpu/command_buffer/client/fenced_allocator.h
index f820ca23f91..7b238dd8249 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator.h
+++ b/chromium/gpu/command_buffer/client/fenced_allocator.h
@@ -32,19 +32,19 @@ class CommandBufferHelper;
// (see http://www.corp.google.com/eng/doc/cpp_primer.html#thread_safety).
class GPU_EXPORT FencedAllocator {
public:
- typedef unsigned int Offset;
+ typedef uint32_t Offset;
// Invalid offset, returned by Alloc in case of failure.
enum : Offset { kInvalidOffset = 0xffffffffU };
// Allocation alignment, must be a power of two.
- enum : unsigned int { kAllocAlignment = 16 };
+ enum : uint32_t { kAllocAlignment = 16 };
// Status of a block of memory, for book-keeping.
enum State { IN_USE, FREE, FREE_PENDING_TOKEN };
// Creates a FencedAllocator. Note that the size of the buffer is passed, but
// not its base address: everything is handled as offsets into the buffer.
- FencedAllocator(unsigned int size, CommandBufferHelper* helper);
+ FencedAllocator(uint32_t size, CommandBufferHelper* helper);
~FencedAllocator();
@@ -58,7 +58,7 @@ class GPU_EXPORT FencedAllocator {
// Returns:
// the offset of the allocated memory block, or kInvalidOffset if out of
// memory.
- Offset Alloc(unsigned int size);
+ Offset Alloc(uint32_t size);
// Frees a block of memory.
//
@@ -78,15 +78,15 @@ class GPU_EXPORT FencedAllocator {
void FreeUnused();
// Gets the size of the largest free block that is available without waiting.
- unsigned int GetLargestFreeSize();
+ uint32_t GetLargestFreeSize();
// Gets the size of the largest free block that can be allocated if the
// caller can wait. Allocating a block of this size will succeed, but may
// block.
- unsigned int GetLargestFreeOrPendingSize();
+ uint32_t GetLargestFreeOrPendingSize();
// Gets the total size of all free blocks that are available without waiting.
- unsigned int GetFreeSize();
+ uint32_t GetFreeSize();
// Checks for consistency inside the book-keeping structures. Used for
// testing.
@@ -96,7 +96,7 @@ class GPU_EXPORT FencedAllocator {
bool InUseOrFreePending();
// Return bytes of memory that is IN_USE
- size_t bytes_in_use() const { return bytes_in_use_; }
+ uint32_t bytes_in_use() const { return bytes_in_use_; }
// Gets the status of a block, as well as the corresponding token if
// FREE_PENDING_TOKEN.
@@ -107,7 +107,7 @@ class GPU_EXPORT FencedAllocator {
struct Block {
State state;
Offset offset;
- unsigned int size;
+ uint32_t size;
int32_t token; // token to wait for in the FREE_PENDING_TOKEN case.
};
@@ -120,7 +120,7 @@ class GPU_EXPORT FencedAllocator {
};
typedef std::vector<Block> Container;
- typedef unsigned int BlockIndex;
+ typedef uint32_t BlockIndex;
static const int32_t kUnusedToken = 0;
@@ -142,11 +142,11 @@ class GPU_EXPORT FencedAllocator {
// NOTE: this will invalidate block indices.
// Returns the offset of the allocated block (NOTE: this is different from
// the other functions that return a block index).
- Offset AllocInBlock(BlockIndex index, unsigned int size);
+ Offset AllocInBlock(BlockIndex index, uint32_t size);
CommandBufferHelper *helper_;
Container blocks_;
- size_t bytes_in_use_;
+ uint32_t bytes_in_use_;
DISALLOW_IMPLICIT_CONSTRUCTORS(FencedAllocator);
};
@@ -155,9 +155,7 @@ class GPU_EXPORT FencedAllocator {
// instead of offsets.
class FencedAllocatorWrapper {
public:
- FencedAllocatorWrapper(unsigned int size,
- CommandBufferHelper* helper,
- void* base)
+ FencedAllocatorWrapper(uint32_t size, CommandBufferHelper* helper, void* base)
: allocator_(size, helper), base_(base) {}
// Allocates a block of memory. If the buffer is out of directly available
@@ -170,7 +168,7 @@ class FencedAllocatorWrapper {
// Returns:
// the pointer to the allocated memory block, or NULL if out of
// memory.
- void *Alloc(unsigned int size) {
+ void* Alloc(uint32_t size) {
FencedAllocator::Offset offset = allocator_.Alloc(size);
return GetPointer(offset);
}
@@ -186,7 +184,8 @@ class FencedAllocatorWrapper {
// Returns:
// the pointer to the allocated memory block, or NULL if out of
// memory.
- template <typename T> T *AllocTyped(unsigned int count) {
+ template <typename T>
+ T* AllocTyped(uint32_t count) {
return static_cast<T *>(Alloc(count * sizeof(T)));
}
@@ -233,18 +232,16 @@ class FencedAllocatorWrapper {
}
// Gets the size of the largest free block that is available without waiting.
- unsigned int GetLargestFreeSize() {
- return allocator_.GetLargestFreeSize();
- }
+ uint32_t GetLargestFreeSize() { return allocator_.GetLargestFreeSize(); }
// Gets the size of the largest free block that can be allocated if the
// caller can wait.
- unsigned int GetLargestFreeOrPendingSize() {
+ uint32_t GetLargestFreeOrPendingSize() {
return allocator_.GetLargestFreeOrPendingSize();
}
// Gets the total size of all free blocks.
- unsigned int GetFreeSize() { return allocator_.GetFreeSize(); }
+ uint32_t GetFreeSize() { return allocator_.GetFreeSize(); }
// Checks for consistency inside the book-keeping structures. Used for
// testing.
@@ -257,7 +254,7 @@ class FencedAllocatorWrapper {
FencedAllocator &allocator() { return allocator_; }
- size_t bytes_in_use() const { return allocator_.bytes_in_use(); }
+ uint32_t bytes_in_use() const { return allocator_.bytes_in_use(); }
FencedAllocator::State GetPointerStatusForTest(void* pointer,
int32_t* token_if_pending) {
diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 74b68fa62a5..0a8dab9b68b 100644
--- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -740,6 +740,39 @@ void GL_APIENTRY GLES2ShallowFlushCHROMIUM() {
void GL_APIENTRY GLES2OrderingBarrierCHROMIUM() {
gles2::GetGLContext()->OrderingBarrierCHROMIUM();
}
+void GL_APIENTRY GLES2MultiDrawArraysWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount) {
+ gles2::GetGLContext()->MultiDrawArraysWEBGL(mode, firsts, counts, drawcount);
+}
+void GL_APIENTRY
+GLES2MultiDrawArraysInstancedWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ gles2::GetGLContext()->MultiDrawArraysInstancedWEBGL(
+ mode, firsts, counts, instance_counts, drawcount);
+}
+void GL_APIENTRY GLES2MultiDrawElementsWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount) {
+ gles2::GetGLContext()->MultiDrawElementsWEBGL(mode, counts, type, offsets,
+ drawcount);
+}
+void GL_APIENTRY
+GLES2MultiDrawElementsInstancedWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ gles2::GetGLContext()->MultiDrawElementsInstancedWEBGL(
+ mode, counts, type, offsets, instance_counts, drawcount);
+}
void GL_APIENTRY GLES2StencilFunc(GLenum func, GLint ref, GLuint mask) {
gles2::GetGLContext()->StencilFunc(func, ref, mask);
}
@@ -1533,27 +1566,35 @@ void GL_APIENTRY GLES2FlushDriverCachesCHROMIUM() {
GLuint GL_APIENTRY GLES2GetLastFlushIdCHROMIUM() {
return gles2::GetGLContext()->GetLastFlushIdCHROMIUM();
}
-void GL_APIENTRY
-GLES2ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint z_order,
- const GLfloat* transform) {
- gles2::GetGLContext()->ScheduleDCLayerSharedStateCHROMIUM(
- opacity, is_clipped, clip_rect, z_order, transform);
-}
-void GL_APIENTRY
-GLES2ScheduleDCLayerCHROMIUM(GLsizei num_textures,
- const GLuint* contents_texture_ids,
- const GLfloat* contents_rect,
- GLuint background_color,
- GLuint edge_aa_mask,
- const GLfloat* bounds_rect,
- GLuint filter,
- GLuint protected_video_type) {
+void GL_APIENTRY GLES2ScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
+ GLuint protected_video_type) {
gles2::GetGLContext()->ScheduleDCLayerCHROMIUM(
- num_textures, contents_texture_ids, contents_rect, background_color,
- edge_aa_mask, bounds_rect, filter, protected_video_type);
+ y_texture_id, uv_texture_id, z_order, content_x, content_y, content_width,
+ content_height, quad_x, quad_y, quad_width, quad_height, transform_c1r1,
+ transform_c2r1, transform_c1r2, transform_c2r2, transform_tx,
+ transform_ty, is_clipped, clip_x, clip_y, clip_width, clip_height,
+ protected_video_type);
}
void GL_APIENTRY GLES2SetActiveURLCHROMIUM(const char* url) {
gles2::GetGLContext()->SetActiveURLCHROMIUM(url);
@@ -1836,10 +1877,9 @@ void GL_APIENTRY GLES2MaxShaderCompilerThreadsKHR(GLuint count) {
gles2::GetGLContext()->MaxShaderCompilerThreadsKHR(count);
}
GLuint GL_APIENTRY
-GLES2CreateAndTexStorage2DSharedImageCHROMIUM(GLenum internalFormat,
- const GLbyte* mailbox) {
+GLES2CreateAndTexStorage2DSharedImageCHROMIUM(const GLbyte* mailbox) {
return gles2::GetGLContext()->CreateAndTexStorage2DSharedImageCHROMIUM(
- internalFormat, mailbox);
+ mailbox);
}
void GL_APIENTRY GLES2BeginSharedImageAccessDirectCHROMIUM(GLuint texture,
GLenum mode) {
@@ -2470,6 +2510,23 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glOrderingBarrierCHROMIUM),
},
{
+ "glMultiDrawArraysWEBGL",
+ reinterpret_cast<GLES2FunctionPointer>(glMultiDrawArraysWEBGL),
+ },
+ {
+ "glMultiDrawArraysInstancedWEBGL",
+ reinterpret_cast<GLES2FunctionPointer>(glMultiDrawArraysInstancedWEBGL),
+ },
+ {
+ "glMultiDrawElementsWEBGL",
+ reinterpret_cast<GLES2FunctionPointer>(glMultiDrawElementsWEBGL),
+ },
+ {
+ "glMultiDrawElementsInstancedWEBGL",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glMultiDrawElementsInstancedWEBGL),
+ },
+ {
"glStencilFunc",
reinterpret_cast<GLES2FunctionPointer>(glStencilFunc),
},
@@ -3091,11 +3148,6 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glGetLastFlushIdCHROMIUM),
},
{
- "glScheduleDCLayerSharedStateCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glScheduleDCLayerSharedStateCHROMIUM),
- },
- {
"glScheduleDCLayerCHROMIUM",
reinterpret_cast<GLES2FunctionPointer>(glScheduleDCLayerCHROMIUM),
},
diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index f2da35b038e..4b936cbc4d2 100644
--- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -1506,6 +1506,86 @@ void ShaderSourceBucket(GLuint shader, uint32_t str_bucket_id) {
}
}
+void MultiDrawBeginCHROMIUM(GLsizei drawcount) {
+ gles2::cmds::MultiDrawBeginCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::MultiDrawBeginCHROMIUM>();
+ if (c) {
+ c->Init(drawcount);
+ }
+}
+
+void MultiDrawEndCHROMIUM() {
+ gles2::cmds::MultiDrawEndCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::MultiDrawEndCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void MultiDrawArraysCHROMIUM(GLenum mode,
+ uint32_t firsts_shm_id,
+ uint32_t firsts_shm_offset,
+ uint32_t counts_shm_id,
+ uint32_t counts_shm_offset,
+ GLsizei drawcount) {
+ gles2::cmds::MultiDrawArraysCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::MultiDrawArraysCHROMIUM>();
+ if (c) {
+ c->Init(mode, firsts_shm_id, firsts_shm_offset, counts_shm_id,
+ counts_shm_offset, drawcount);
+ }
+}
+
+void MultiDrawArraysInstancedCHROMIUM(GLenum mode,
+ uint32_t firsts_shm_id,
+ uint32_t firsts_shm_offset,
+ uint32_t counts_shm_id,
+ uint32_t counts_shm_offset,
+ uint32_t instance_counts_shm_id,
+ uint32_t instance_counts_shm_offset,
+ GLsizei drawcount) {
+ gles2::cmds::MultiDrawArraysInstancedCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::MultiDrawArraysInstancedCHROMIUM>();
+ if (c) {
+ c->Init(mode, firsts_shm_id, firsts_shm_offset, counts_shm_id,
+ counts_shm_offset, instance_counts_shm_id,
+ instance_counts_shm_offset, drawcount);
+ }
+}
+
+void MultiDrawElementsCHROMIUM(GLenum mode,
+ uint32_t counts_shm_id,
+ uint32_t counts_shm_offset,
+ GLenum type,
+ uint32_t offsets_shm_id,
+ uint32_t offsets_shm_offset,
+ GLsizei drawcount) {
+ gles2::cmds::MultiDrawElementsCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::MultiDrawElementsCHROMIUM>();
+ if (c) {
+ c->Init(mode, counts_shm_id, counts_shm_offset, type, offsets_shm_id,
+ offsets_shm_offset, drawcount);
+ }
+}
+
+void MultiDrawElementsInstancedCHROMIUM(GLenum mode,
+ uint32_t counts_shm_id,
+ uint32_t counts_shm_offset,
+ GLenum type,
+ uint32_t offsets_shm_id,
+ uint32_t offsets_shm_offset,
+ uint32_t instance_counts_shm_id,
+ uint32_t instance_counts_shm_offset,
+ GLsizei drawcount) {
+ gles2::cmds::MultiDrawElementsInstancedCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::MultiDrawElementsInstancedCHROMIUM>();
+ if (c) {
+ c->Init(mode, counts_shm_id, counts_shm_offset, type, offsets_shm_id,
+ offsets_shm_offset, instance_counts_shm_id,
+ instance_counts_shm_offset, drawcount);
+ }
+}
+
void StencilFunc(GLenum func, GLint ref, GLuint mask) {
gles2::cmds::StencilFunc* c = GetCmdSpace<gles2::cmds::StencilFunc>();
if (c) {
@@ -2734,16 +2814,6 @@ void InsertFenceSyncCHROMIUM(GLuint64 release_count) {
}
}
-void WaitSyncTokenCHROMIUM(GLint namespace_id,
- GLuint64 command_buffer_id,
- GLuint64 release_count) {
- gles2::cmds::WaitSyncTokenCHROMIUM* c =
- GetCmdSpace<gles2::cmds::WaitSyncTokenCHROMIUM>();
- if (c) {
- c->Init(namespace_id, command_buffer_id, release_count);
- }
-}
-
void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
GLuint dest_id,
GLint x,
@@ -2852,30 +2922,37 @@ void FlushDriverCachesCHROMIUM() {
}
}
-void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
- GLboolean is_clipped,
- GLint z_order,
- GLuint shm_id,
- GLuint shm_offset) {
- gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM* c =
- GetCmdSpace<gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM>();
- if (c) {
- c->Init(opacity, is_clipped, z_order, shm_id, shm_offset);
- }
-}
-
-void ScheduleDCLayerCHROMIUM(GLsizei num_textures,
- GLuint background_color,
- GLuint edge_aa_mask,
- GLuint filter,
- GLuint shm_id,
- GLuint shm_offset,
+void ScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
GLuint protected_video_type) {
gles2::cmds::ScheduleDCLayerCHROMIUM* c =
GetCmdSpace<gles2::cmds::ScheduleDCLayerCHROMIUM>();
if (c) {
- c->Init(num_textures, background_color, edge_aa_mask, filter, shm_id,
- shm_offset, protected_video_type);
+ c->Init(y_texture_id, uv_texture_id, z_order, content_x, content_y,
+ content_width, content_height, quad_x, quad_y, quad_width,
+ quad_height, transform_c1r1, transform_c2r1, transform_c1r2,
+ transform_c2r2, transform_tx, transform_ty, is_clipped, clip_x,
+ clip_y, clip_width, clip_height, protected_video_type);
}
}
@@ -3397,7 +3474,6 @@ void MaxShaderCompilerThreadsKHR(GLuint count) {
}
void CreateAndTexStorage2DSharedImageINTERNALImmediate(GLuint texture,
- GLenum internalFormat,
const GLbyte* mailbox) {
const uint32_t size = gles2::cmds::
CreateAndTexStorage2DSharedImageINTERNALImmediate::ComputeSize();
@@ -3405,7 +3481,7 @@ void CreateAndTexStorage2DSharedImageINTERNALImmediate(GLuint texture,
GetImmediateCmdSpaceTotalSize<
gles2::cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate>(size);
if (c) {
- c->Init(texture, internalFormat, mailbox);
+ c->Init(texture, mailbox);
}
}
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index 0e1dd204db7..a5ca58fb6e5 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -13,16 +13,19 @@
#include <GLES3/gl31.h>
#include <stddef.h>
#include <stdint.h>
+
#include <algorithm>
#include <map>
#include <set>
#include <sstream>
#include <string>
+
#include "base/atomic_sequence_num.h"
#include "base/bits.h"
#include "base/compiler_specific.h"
#include "base/containers/span.h"
#include "base/numerics/safe_math.h"
+#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
@@ -40,6 +43,7 @@
#include "gpu/command_buffer/client/readback_buffer_shadow_tracker.h"
#include "gpu/command_buffer/client/shared_memory_limits.h"
#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers.h"
#include "gpu/command_buffer/client/vertex_array_object_manager.h"
#include "gpu/command_buffer/common/context_creation_attribs.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -274,7 +278,7 @@ gpu::ContextResult GLES2Implementation::Initialize(
if (support_client_side_arrays_) {
GetIdHandler(SharedIdNamespaces::kBuffers)
- ->MakeIds(this, kClientSideArrayId, arraysize(reserved_ids_),
+ ->MakeIds(this, kClientSideArrayId, base::size(reserved_ids_),
&reserved_ids_[0]);
}
@@ -308,7 +312,7 @@ GLES2Implementation::~GLES2Implementation() {
// GLES2Implementation::Initialize() could fail before allocating
// reserved_ids_, so we need delete them carefully.
if (support_client_side_arrays_ && reserved_ids_[0]) {
- DeleteBuffers(arraysize(reserved_ids_), &reserved_ids_[0]);
+ DeleteBuffers(base::size(reserved_ids_), &reserved_ids_[0]);
}
// Release remaining BufferRange mem; This is when a MapBufferRange() is
@@ -2113,7 +2117,8 @@ void GLES2Implementation::BufferSubDataHelper(GLenum target,
int32_t end = 0;
int32_t buffer_size = buffer->size();
- if (!SafeAddInt32(offset, size, &end) || end > buffer_size) {
+ if (!base::CheckAdd(offset, size).AssignIfValid(&end) ||
+ end > buffer_size) {
SetGLError(GL_INVALID_VALUE, "glBufferSubData", "out of range");
return;
}
@@ -2136,22 +2141,16 @@ void GLES2Implementation::BufferSubDataHelperImpl(
DCHECK(buffer);
DCHECK_GT(size, 0);
- const int8_t* source = static_cast<const int8_t*>(data);
- while (size) {
- if (!buffer->valid() || buffer->size() == 0) {
- buffer->Reset(size);
- if (!buffer->valid()) {
- return;
- }
- }
- memcpy(buffer->address(), source, buffer->size());
- helper_->BufferSubData(target, offset, buffer->size(), buffer->shm_id(),
- buffer->offset());
+ auto DoBufferSubData = [&](const std::array<uint32_t, 1>&,
+ uint32_t copy_offset, uint32_t) {
+ helper_->BufferSubData(target, offset + copy_offset, buffer->size(),
+ buffer->shm_id(), buffer->offset());
InvalidateReadbackBufferShadowDataCHROMIUM(GetBoundBufferHelper(target));
- offset += buffer->size();
- source += buffer->size();
- size -= buffer->size();
- buffer->Release();
+ };
+
+ if (!TransferArraysAndExecute(size, buffer, DoBufferSubData,
+ static_cast<const int8_t*>(data))) {
+ SetGLError(GL_OUT_OF_MEMORY, "glBufferSubData", "out of memory");
}
}
@@ -2168,6 +2167,234 @@ void GLES2Implementation::BufferSubData(GLenum target,
CheckGLError();
}
+void GLES2Implementation::MultiDrawArraysWEBGLHelper(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount) {
+ DCHECK_GT(drawcount, 0);
+
+ uint32_t buffer_size = ComputeCombinedCopySize(drawcount, firsts, counts);
+ ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_);
+
+ helper_->MultiDrawBeginCHROMIUM(drawcount);
+ auto DoMultiDraw = [&](const std::array<uint32_t, 2>& offsets, uint32_t,
+ uint32_t copy_count) {
+ helper_->MultiDrawArraysCHROMIUM(
+ mode, buffer.shm_id(), buffer.offset() + offsets[0], buffer.shm_id(),
+ buffer.offset() + offsets[1], copy_count);
+ };
+ if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, firsts,
+ counts)) {
+ SetGLError(GL_OUT_OF_MEMORY, "glMultiDrawArraysWEBGL", "out of memory");
+ }
+ helper_->MultiDrawEndCHROMIUM();
+}
+
+void GLES2Implementation::MultiDrawArraysInstancedWEBGLHelper(
+ GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ DCHECK_GT(drawcount, 0);
+
+ uint32_t buffer_size =
+ ComputeCombinedCopySize(drawcount, firsts, counts, instance_counts);
+ ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_);
+
+ helper_->MultiDrawBeginCHROMIUM(drawcount);
+ auto DoMultiDraw = [&](const std::array<uint32_t, 3>& offsets, uint32_t,
+ uint32_t copy_count) {
+ helper_->MultiDrawArraysInstancedCHROMIUM(
+ mode, buffer.shm_id(), buffer.offset() + offsets[0], buffer.shm_id(),
+ buffer.offset() + offsets[1], buffer.shm_id(),
+ buffer.offset() + offsets[2], copy_count);
+ };
+ if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, firsts, counts,
+ instance_counts)) {
+ SetGLError(GL_OUT_OF_MEMORY, "glMultiDrawArraysInstancedWEBGL",
+ "out of memory");
+ }
+ helper_->MultiDrawEndCHROMIUM();
+}
+
+void GLES2Implementation::MultiDrawElementsWEBGLHelper(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount) {
+ DCHECK_GT(drawcount, 0);
+
+ uint32_t buffer_size = ComputeCombinedCopySize(drawcount, counts, offsets);
+ ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_);
+
+ helper_->MultiDrawBeginCHROMIUM(drawcount);
+ auto DoMultiDraw = [&](const std::array<uint32_t, 2>& offsets, uint32_t,
+ uint32_t copy_count) {
+ helper_->MultiDrawElementsCHROMIUM(
+ mode, buffer.shm_id(), buffer.offset() + offsets[0], type,
+ buffer.shm_id(), buffer.offset() + offsets[1], copy_count);
+ };
+ if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, counts,
+ offsets)) {
+ SetGLError(GL_OUT_OF_MEMORY, "glMultiDrawElementsWEBGL", "out of memory");
+ }
+ helper_->MultiDrawEndCHROMIUM();
+}
+
+void GLES2Implementation::MultiDrawElementsInstancedWEBGLHelper(
+ GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ DCHECK_GT(drawcount, 0);
+
+ uint32_t buffer_size =
+ ComputeCombinedCopySize(drawcount, counts, offsets, instance_counts);
+ ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_);
+
+ helper_->MultiDrawBeginCHROMIUM(drawcount);
+ auto DoMultiDraw = [&](const std::array<uint32_t, 3>& offsets, uint32_t,
+ uint32_t copy_count) {
+ helper_->MultiDrawElementsInstancedCHROMIUM(
+ mode, buffer.shm_id(), buffer.offset() + offsets[0], type,
+ buffer.shm_id(), buffer.offset() + offsets[1], buffer.shm_id(),
+ buffer.offset() + offsets[2], copy_count);
+ };
+ if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, counts,
+ offsets, instance_counts)) {
+ SetGLError(GL_OUT_OF_MEMORY, "glMultiDrawElementsInstancedWEBGL",
+ "out of memory");
+ }
+ helper_->MultiDrawEndCHROMIUM();
+}
+
+void GLES2Implementation::MultiDrawArraysWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMultiDrawArraysWEBGL("
+ << GLES2Util::GetStringDrawMode(mode) << ", " << firsts
+ << ", " << counts << ", " << drawcount << ")");
+ if (drawcount < 0) {
+ SetGLError(GL_INVALID_VALUE, "glMultiDrawArraysWEBGL", "drawcount < 0");
+ return;
+ }
+ if (drawcount == 0) {
+ return;
+ }
+ // This is for an extension for WebGL which doesn't support client side arrays
+ if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
+ SetGLError(GL_INVALID_OPERATION, "glMultiDrawArraysWEBGL",
+ "Missing array buffer for vertex attribute");
+ return;
+ }
+ MultiDrawArraysWEBGLHelper(mode, firsts, counts, drawcount);
+ CheckGLError();
+}
+
+void GLES2Implementation::MultiDrawArraysInstancedWEBGL(
+ GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMultiDrawArraysInstancedWEBGL("
+ << GLES2Util::GetStringDrawMode(mode) << ", " << firsts
+ << ", " << counts << ", " << instance_counts << ", "
+ << drawcount << ")");
+ if (drawcount < 0) {
+ SetGLError(GL_INVALID_VALUE, "glMultiDrawArraysWEBGLInstanced",
+ "drawcount < 0");
+ return;
+ }
+ if (drawcount == 0) {
+ return;
+ }
+ // This is for an extension for WebGL which doesn't support client side arrays
+ if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
+ SetGLError(GL_INVALID_OPERATION, "glMultiDrawArraysWEBGLInstanced",
+ "Missing array buffer for vertex attribute");
+ return;
+ }
+ MultiDrawArraysInstancedWEBGLHelper(mode, firsts, counts, instance_counts,
+ drawcount);
+ CheckGLError();
+}
+
+void GLES2Implementation::MultiDrawElementsWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMultiDrawElementsWEBGL("
+ << GLES2Util::GetStringDrawMode(mode) << ", " << counts
+ << ", " << GLES2Util::GetStringIndexType(type) << ", "
+ << offsets << ", " << drawcount << ")");
+ if (drawcount < 0) {
+ SetGLError(GL_INVALID_VALUE, "glMultiDrawElementsWEBGL", "drawcount < 0");
+ return;
+ }
+ if (drawcount == 0) {
+ return;
+ }
+ // This is for an extension for WebGL which doesn't support client side arrays
+ if (vertex_array_object_manager_->bound_element_array_buffer() == 0) {
+ SetGLError(GL_INVALID_OPERATION, "glMultiDrawElementsWEBGL",
+ "No element array buffer");
+ return;
+ }
+ if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
+ SetGLError(GL_INVALID_OPERATION, "glMultiDrawElementsWEBGL",
+ "Missing array buffer for vertex attribute");
+ return;
+ }
+ MultiDrawElementsWEBGLHelper(mode, counts, type, offsets, drawcount);
+ CheckGLError();
+}
+
+void GLES2Implementation::MultiDrawElementsInstancedWEBGL(
+ GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMultiDrawElementsInstancedWEBGL("
+ << GLES2Util::GetStringDrawMode(mode) << ", " << counts
+ << ", " << GLES2Util::GetStringIndexType(type) << ", "
+ << offsets << ", " << instance_counts << ", " << drawcount
+ << ")");
+ if (drawcount < 0) {
+ SetGLError(GL_INVALID_VALUE, "glMultiDrawElementsInstancedWEBGL",
+ "drawcount < 0");
+ return;
+ }
+ if (drawcount == 0) {
+ return;
+ }
+ // This is for an extension for WebGL which doesn't support client side arrays
+ if (vertex_array_object_manager_->bound_element_array_buffer() == 0) {
+ SetGLError(GL_INVALID_OPERATION, "glMultiDrawElementsInstancedWEBGL",
+ "No element array buffer");
+ return;
+ }
+ if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
+ SetGLError(GL_INVALID_OPERATION, "glMultiDrawElementsInstancedWEBGL",
+ "Missing array buffer for vertex attribute");
+ return;
+ }
+ MultiDrawElementsInstancedWEBGLHelper(mode, counts, type, offsets,
+ instance_counts, drawcount);
+ CheckGLError();
+}
+
void GLES2Implementation::RemoveTransferBuffer(BufferTracker::Buffer* buffer) {
int32_t token = buffer->last_usage_token();
@@ -3266,6 +3493,27 @@ void GLES2Implementation::TexSubImage3DImpl(GLenum target,
}
}
+void GLES2Implementation::GetResultNameHelper(GLsizei bufsize,
+ GLsizei* length,
+ char* name) {
+ // Length of string (without final \0) that we will write to the buffer.
+ GLsizei max_length = 0;
+ if (name && (bufsize > 0)) {
+ std::vector<int8_t> str;
+ GetBucketContents(kResultBucketId, &str);
+ if (!str.empty()) {
+ DCHECK_LE(str.size(), static_cast<size_t>(INT_MAX));
+ // Note: both bufsize and str.size() count/include the terminating \0.
+ max_length = std::min(bufsize, static_cast<GLsizei>(str.size())) - 1;
+ }
+ memcpy(name, str.data(), max_length);
+ name[max_length] = '\0';
+ }
+ if (length) {
+ *length = max_length;
+ }
+}
+
bool GLES2Implementation::GetActiveAttribHelper(GLuint program,
GLuint index,
GLsizei bufsize,
@@ -3293,21 +3541,8 @@ bool GLES2Implementation::GetActiveAttribHelper(GLuint program,
if (type) {
*type = result->type;
}
- if (length || name) {
- std::vector<int8_t> str;
- // Note: this can invalidate |result|.
- GetBucketContents(kResultBucketId, &str);
- GLsizei max_size =
- std::min(static_cast<size_t>(bufsize) - 1,
- std::max(static_cast<size_t>(0), str.size() - 1));
- if (length) {
- *length = max_size;
- }
- if (name && bufsize > 0) {
- memcpy(name, &str[0], max_size);
- name[max_size] = '\0';
- }
- }
+ // Note: this can invalidate |result|.
+ GetResultNameHelper(bufsize, length, name);
}
return success;
}
@@ -3366,29 +3601,18 @@ bool GLES2Implementation::GetActiveUniformHelper(GLuint program,
helper_->GetActiveUniform(program, index, kResultBucketId, GetResultShmId(),
result.offset());
WaitForCmd();
- if (result->success) {
+ bool success = !!result->success;
+ if (success) {
if (size) {
*size = result->size;
}
if (type) {
*type = result->type;
}
- if (length || name) {
- std::vector<int8_t> str;
- GetBucketContents(kResultBucketId, &str);
- GLsizei max_size =
- std::min(static_cast<size_t>(bufsize) - 1,
- std::max(static_cast<size_t>(0), str.size() - 1));
- if (length) {
- *length = max_size;
- }
- if (name && bufsize > 0) {
- memcpy(name, &str[0], max_size);
- name[max_size] = '\0';
- }
- }
+ // Note: this can invalidate |result|.
+ GetResultNameHelper(bufsize, length, name);
}
- return result->success != 0;
+ return success;
}
void GLES2Implementation::GetActiveUniform(GLuint program,
@@ -3444,27 +3668,12 @@ bool GLES2Implementation::GetActiveUniformBlockNameHelper(GLuint program,
helper_->GetActiveUniformBlockName(program, index, kResultBucketId,
GetResultShmId(), result.offset());
WaitForCmd();
- if (*result) {
- if (bufsize == 0) {
- if (length) {
- *length = 0;
- }
- } else if (length || name) {
- std::vector<int8_t> str;
- GetBucketContents(kResultBucketId, &str);
- DCHECK_GT(str.size(), 0u);
- GLsizei max_size =
- std::min(bufsize, static_cast<GLsizei>(str.size())) - 1;
- if (length) {
- *length = max_size;
- }
- if (name) {
- memcpy(name, &str[0], max_size);
- name[max_size] = '\0';
- }
- }
+ bool success = !!result;
+ if (success) {
+ // Note: this can invalidate |result|.
+ GetResultNameHelper(bufsize, length, name);
}
- return *result != 0;
+ return success;
}
void GLES2Implementation::GetActiveUniformBlockName(GLuint program,
@@ -3799,25 +4008,8 @@ bool GLES2Implementation::GetTransformFeedbackVaryingHelper(GLuint program,
if (type) {
*type = result->type;
}
- if (length || name) {
- std::vector<int8_t> str;
- GetBucketContents(kResultBucketId, &str);
- GLsizei max_size = std::min(bufsize, static_cast<GLsizei>(str.size()));
- if (max_size > 0) {
- --max_size;
- }
- if (length) {
- *length = max_size;
- }
- if (name) {
- if (max_size > 0) {
- memcpy(name, &str[0], max_size);
- name[max_size] = '\0';
- } else if (bufsize > 0) {
- name[0] = '\0';
- }
- }
- }
+ // Note: this can invalidate |result|.
+ GetResultNameHelper(bufsize, length, name);
}
return result->success != 0;
}
@@ -4706,7 +4898,10 @@ void GLES2Implementation::DrawArrays(GLenum mode, GLint first, GLsizei count) {
bool simulated = false;
if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
GLsizei num_elements;
- SafeAddInt32(first, count, &num_elements);
+ if (!base::CheckAdd(first, count).AssignIfValid(&num_elements)) {
+ SetGLError(GL_INVALID_VALUE, "glDrawArrays", "first+count overflow");
+ return;
+ }
if (!vertex_array_object_manager_->SetupSimulatedClientSideBuffers(
"glDrawArrays", this, helper_, num_elements, 0, &simulated)) {
return;
@@ -4936,7 +5131,7 @@ void GLES2Implementation::ScheduleCALayerSharedStateCHROMIUM(
const GLfloat* clip_rect,
GLint sorting_context_id,
const GLfloat* transform) {
- size_t shm_size = 20 * sizeof(GLfloat);
+ uint32_t shm_size = 20 * sizeof(GLfloat);
ScopedTransferBufferPtr buffer(shm_size, helper_, transfer_buffer_);
if (!buffer.valid() || buffer.size() < shm_size) {
SetGLError(GL_OUT_OF_MEMORY, "GLES2::ScheduleCALayerSharedStateCHROMIUM",
@@ -4957,7 +5152,7 @@ void GLES2Implementation::ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
GLuint edge_aa_mask,
const GLfloat* bounds_rect,
GLuint filter) {
- size_t shm_size = 8 * sizeof(GLfloat);
+ uint32_t shm_size = 8 * sizeof(GLfloat);
ScopedTransferBufferPtr buffer(shm_size, helper_, transfer_buffer_);
if (!buffer.valid() || buffer.size() < shm_size) {
SetGLError(GL_OUT_OF_MEMORY, "GLES2::ScheduleCALayerCHROMIUM",
@@ -4972,26 +5167,6 @@ void GLES2Implementation::ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
buffer.offset());
}
-void GLES2Implementation::ScheduleDCLayerSharedStateCHROMIUM(
- GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint z_order,
- const GLfloat* transform) {
- size_t shm_size = 20 * sizeof(GLfloat);
- ScopedTransferBufferPtr buffer(shm_size, helper_, transfer_buffer_);
- if (!buffer.valid() || buffer.size() < shm_size) {
- SetGLError(GL_OUT_OF_MEMORY, "GLES2::ScheduleDCLayerSharedStateCHROMIUM",
- "out of memory");
- return;
- }
- GLfloat* mem = static_cast<GLfloat*>(buffer.address());
- memcpy(mem + 0, clip_rect, 4 * sizeof(GLfloat));
- memcpy(mem + 4, transform, 16 * sizeof(GLfloat));
- helper_->ScheduleDCLayerSharedStateCHROMIUM(opacity, is_clipped, z_order,
- buffer.shm_id(), buffer.offset());
-}
-
void GLES2Implementation::SetColorSpaceMetadataCHROMIUM(
GLuint texture_id,
GLColorSpace color_space) {
@@ -5019,34 +5194,6 @@ void GLES2Implementation::SetColorSpaceMetadataCHROMIUM(
#endif
}
-void GLES2Implementation::ScheduleDCLayerCHROMIUM(
- GLsizei num_textures,
- const GLuint* contents_texture_ids,
- const GLfloat* contents_rect,
- GLuint background_color,
- GLuint edge_aa_mask,
- const GLfloat* bounds_rect,
- GLuint filter,
- GLuint protected_video_type) {
- const size_t kRectsSize = 8 * sizeof(GLfloat);
- size_t textures_size = num_textures * sizeof(GLuint);
- size_t shm_size = kRectsSize + textures_size;
- ScopedTransferBufferPtr buffer(shm_size, helper_, transfer_buffer_);
- if (!buffer.valid() || buffer.size() < shm_size) {
- SetGLError(GL_OUT_OF_MEMORY, "GLES2::ScheduleDCLayerCHROMIUM",
- "out of memory");
- return;
- }
- GLfloat* mem = static_cast<GLfloat*>(buffer.address());
- memcpy(mem + 0, contents_rect, 4 * sizeof(GLfloat));
- memcpy(mem + 4, bounds_rect, 4 * sizeof(GLfloat));
- memcpy(static_cast<char*>(buffer.address()) + kRectsSize,
- contents_texture_ids, textures_size);
- helper_->ScheduleDCLayerCHROMIUM(num_textures, background_color, edge_aa_mask,
- filter, buffer.shm_id(), buffer.offset(),
- protected_video_type);
-}
-
void GLES2Implementation::CommitOverlayPlanesCHROMIUM(uint64_t swap_id,
uint32_t flags) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
@@ -5956,7 +6103,11 @@ void GLES2Implementation::DrawArraysInstancedANGLE(GLenum mode,
bool simulated = false;
if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
GLsizei num_elements;
- SafeAddInt32(first, count, &num_elements);
+ if (!base::CheckAdd(first, count).AssignIfValid(&num_elements)) {
+ SetGLError(GL_INVALID_VALUE, "glDrawArraysInstancedANGLE",
+ "first+count overflow");
+ return;
+ }
if (!vertex_array_object_manager_->SetupSimulatedClientSideBuffers(
"glDrawArraysInstancedANGLE", this, helper_, num_elements,
primcount, &simulated)) {
@@ -6044,20 +6195,17 @@ GLuint GLES2Implementation::CreateAndConsumeTextureCHROMIUM(
}
GLuint GLES2Implementation::CreateAndTexStorage2DSharedImageCHROMIUM(
- GLenum internal_format,
const GLbyte* data) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix()
<< "] CreateAndTexStorage2DSharedImageCHROMIUM("
- << GLES2Util::GetStringImageInternalFormat(internal_format)
- << ", " << static_cast<const void*>(data) << ")");
+ << static_cast<const void*>(data) << ")");
const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
DCHECK(mailbox.Verify()) << "CreateAndTexStorage2DSharedImageCHROMIUM was "
"passed an invalid mailbox.";
GLuint client_id;
GetIdHandler(SharedIdNamespaces::kTextures)->MakeIds(this, 0, 1, &client_id);
- helper_->CreateAndTexStorage2DSharedImageINTERNALImmediate(
- client_id, internal_format, data);
+ helper_->CreateAndTexStorage2DSharedImageINTERNALImmediate(client_id, data);
if (share_group_->bind_generates_resource())
helper_->CommandBufferHelper::OrderingBarrier();
CheckGLError();
@@ -6227,7 +6375,7 @@ bool GLES2Implementation::ThreadsafeDiscardableTextureIsDeletedForTracing(
return manager->TextureIsDeletedForTracing(texture_id);
}
-void* GLES2Implementation::MapTransferCacheEntry(size_t serialized_size) {
+void* GLES2Implementation::MapTransferCacheEntry(uint32_t serialized_size) {
NOTREACHED();
return nullptr;
}
@@ -6339,14 +6487,9 @@ void GLES2Implementation::WaitSyncTokenCHROMIUM(const GLbyte* sync_token_data) {
return;
}
- helper_->WaitSyncTokenCHROMIUM(
- static_cast<GLint>(sync_token.namespace_id()),
- sync_token.command_buffer_id().GetUnsafeValue(),
- sync_token.release_count());
-
// Enqueue sync token in flush after inserting command so that it's not
// included in an automatic flush.
- gpu_control_->WaitSyncTokenHint(verified_sync_token);
+ gpu_control_->WaitSyncToken(verified_sync_token);
}
namespace {
@@ -6482,11 +6625,11 @@ bool GLES2Implementation::PackStringsToBucket(GLsizei count,
base::CheckedNumeric<uint32_t> total_size = count;
total_size += 1;
total_size *= sizeof(GLint);
- if (!total_size.IsValid()) {
+ uint32_t header_size = 0;
+ if (!total_size.AssignIfValid(&header_size)) {
SetGLError(GL_INVALID_VALUE, func_name, "overflow");
return false;
}
- size_t header_size = total_size.ValueOrDefault(0);
std::vector<GLint> header(count + 1);
header[0] = static_cast<GLint>(count);
for (GLsizei ii = 0; ii < count; ++ii) {
@@ -6498,35 +6641,30 @@ bool GLES2Implementation::PackStringsToBucket(GLsizei count,
}
total_size += len;
total_size += 1; // NULL at the end of each char array.
- if (!total_size.IsValid()) {
- SetGLError(GL_INVALID_VALUE, func_name, "overflow");
- return false;
- }
header[ii + 1] = len;
}
// Pack data into a bucket on the service.
- helper_->SetBucketSize(kResultBucketId, total_size.ValueOrDefault(0));
- size_t offset = 0;
+ uint32_t validated_size = 0;
+ if (!total_size.AssignIfValid(&validated_size)) {
+ SetGLError(GL_INVALID_VALUE, func_name, "overflow");
+ return false;
+ }
+ helper_->SetBucketSize(kResultBucketId, validated_size);
+ uint32_t offset = 0;
for (GLsizei ii = 0; ii <= count; ++ii) {
const char* src =
(ii == 0) ? reinterpret_cast<const char*>(&header[0]) : str[ii - 1];
- base::CheckedNumeric<size_t> checked_size =
- (ii == 0) ? header_size : static_cast<size_t>(header[ii]);
+ uint32_t size = (ii == 0) ? header_size : header[ii];
if (ii > 0) {
- checked_size += 1; // NULL in the end.
- }
- if (!checked_size.IsValid()) {
- SetGLError(GL_INVALID_VALUE, func_name, "overflow");
- return false;
+ size += 1; // NULL in the end.
}
- size_t size = checked_size.ValueOrDefault(0);
while (size) {
ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
if (!buffer.valid() || buffer.size() == 0) {
SetGLError(GL_OUT_OF_MEMORY, func_name, "too large");
return false;
}
- size_t copy_size = buffer.size();
+ uint32_t copy_size = buffer.size();
if (ii > 0 && buffer.size() == size)
--copy_size;
if (copy_size)
@@ -6722,7 +6860,8 @@ void GLES2Implementation::DeletePathsCHROMIUM(GLuint first_client_id,
return;
GLuint last_client_id;
- if (!SafeAddUint32(first_client_id, range - 1, &last_client_id)) {
+ if (!base::CheckAdd(first_client_id, range - 1)
+ .AssignIfValid(&last_client_id)) {
SetGLError(GL_INVALID_OPERATION, kFunctionName, "overflow");
return;
}
@@ -6787,13 +6926,15 @@ void GLES2Implementation::PathCommandsCHROMIUM(GLuint path,
}
uint32_t coords_size;
- if (!SafeMultiplyUint32(num_coords, coord_type_size, &coords_size)) {
+ if (!base::CheckMul(num_coords, coord_type_size)
+ .AssignIfValid(&coords_size)) {
SetGLError(GL_INVALID_OPERATION, kFunctionName, "overflow");
return;
}
uint32_t required_buffer_size;
- if (!SafeAddUint32(coords_size, num_commands, &required_buffer_size)) {
+ if (!base::CheckAdd(coords_size, num_commands)
+ .AssignIfValid(&required_buffer_size)) {
SetGLError(GL_INVALID_OPERATION, kFunctionName, "overflow");
return;
}
@@ -6835,9 +6976,9 @@ bool GLES2Implementation::PrepareInstancedPathCommand(
const GLfloat* transform_values,
ScopedTransferBufferPtr* buffer,
uint32_t* out_paths_shm_id,
- size_t* out_paths_offset,
+ uint32_t* out_paths_offset,
uint32_t* out_transforms_shm_id,
- size_t* out_transforms_offset) {
+ uint32_t* out_transforms_offset) {
if (num_paths < 0) {
SetGLError(GL_INVALID_VALUE, function_name, "numPaths < 0");
return false;
@@ -6883,7 +7024,7 @@ bool GLES2Implementation::PrepareInstancedPathCommand(
}
uint32_t paths_size;
- if (!SafeMultiplyUint32(path_name_size, num_paths, &paths_size)) {
+ if (!base::CheckMul(path_name_size, num_paths).AssignIfValid(&paths_size)) {
SetGLError(GL_INVALID_OPERATION, function_name, "overflow");
return false;
}
@@ -6893,13 +7034,15 @@ bool GLES2Implementation::PrepareInstancedPathCommand(
uint32_t one_transform_size = sizeof(GLfloat) * transforms_component_count;
uint32_t transforms_size;
- if (!SafeMultiplyUint32(one_transform_size, num_paths, &transforms_size)) {
+ if (!base::CheckMul(one_transform_size, num_paths)
+ .AssignIfValid(&transforms_size)) {
SetGLError(GL_INVALID_OPERATION, function_name, "overflow");
return false;
}
uint32_t required_buffer_size;
- if (!SafeAddUint32(transforms_size, paths_size, &required_buffer_size)) {
+ if (!base::CheckAdd(transforms_size, paths_size)
+ .AssignIfValid(&required_buffer_size)) {
SetGLError(GL_INVALID_OPERATION, function_name, "overflow");
return false;
}
@@ -6951,9 +7094,9 @@ void GLES2Implementation::StencilFillPathInstancedCHROMIUM(
ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
uint32_t paths_shm_id = 0;
- size_t paths_offset = 0;
+ uint32_t paths_offset = 0;
uint32_t transforms_shm_id = 0;
- size_t transforms_offset = 0;
+ uint32_t transforms_offset = 0;
if (!PrepareInstancedPathCommand(
"glStencilFillPathInstancedCHROMIUM", num_paths, path_name_type,
paths, transform_type, transform_values, &buffer, &paths_shm_id,
@@ -6986,9 +7129,9 @@ void GLES2Implementation::StencilStrokePathInstancedCHROMIUM(
ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
uint32_t paths_shm_id = 0;
- size_t paths_offset = 0;
+ uint32_t paths_offset = 0;
uint32_t transforms_shm_id = 0;
- size_t transforms_offset = 0;
+ uint32_t transforms_offset = 0;
if (!PrepareInstancedPathCommand(
"glStencilStrokePathInstancedCHROMIUM", num_paths, path_name_type,
paths, transform_type, transform_values, &buffer, &paths_shm_id,
@@ -7019,9 +7162,9 @@ void GLES2Implementation::CoverFillPathInstancedCHROMIUM(
ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
uint32_t paths_shm_id = 0;
- size_t paths_offset = 0;
+ uint32_t paths_offset = 0;
uint32_t transforms_shm_id = 0;
- size_t transforms_offset = 0;
+ uint32_t transforms_offset = 0;
if (!PrepareInstancedPathCommand(
"glCoverFillPathInstancedCHROMIUM", num_paths, path_name_type, paths,
transform_type, transform_values, &buffer, &paths_shm_id,
@@ -7053,9 +7196,9 @@ void GLES2Implementation::CoverStrokePathInstancedCHROMIUM(
ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
uint32_t paths_shm_id = 0;
- size_t paths_offset = 0;
+ uint32_t paths_offset = 0;
uint32_t transforms_shm_id = 0;
- size_t transforms_offset = 0;
+ uint32_t transforms_offset = 0;
if (!PrepareInstancedPathCommand(
"glCoverStrokePathInstancedCHROMIUM", num_paths, path_name_type,
paths, transform_type, transform_values, &buffer, &paths_shm_id,
@@ -7089,9 +7232,9 @@ void GLES2Implementation::StencilThenCoverFillPathInstancedCHROMIUM(
ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
uint32_t paths_shm_id = 0;
- size_t paths_offset = 0;
+ uint32_t paths_offset = 0;
uint32_t transforms_shm_id = 0;
- size_t transforms_offset = 0;
+ uint32_t transforms_offset = 0;
if (!PrepareInstancedPathCommand(
"glStencilThenCoverFillPathInstancedCHROMIUM", num_paths,
path_name_type, paths, transform_type, transform_values, &buffer,
@@ -7128,9 +7271,9 @@ void GLES2Implementation::StencilThenCoverStrokePathInstancedCHROMIUM(
ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
uint32_t paths_shm_id = 0;
- size_t paths_offset = 0;
+ uint32_t paths_offset = 0;
uint32_t transforms_shm_id = 0;
- size_t transforms_offset = 0;
+ uint32_t transforms_offset = 0;
if (!PrepareInstancedPathCommand(
"glStencilThenCoverStrokePathInstancedCHROMIUM", num_paths,
path_name_type, paths, transform_type, transform_values, &buffer,
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index 615d2cd4753..44f7d0bdd56 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -13,6 +13,7 @@
#include <memory>
#include <set>
#include <string>
+#include <unordered_map>
#include <utility>
#include <vector>
@@ -135,7 +136,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
uint32_t texture_id) override;
bool ThreadsafeDiscardableTextureIsDeletedForTracing(
uint32_t texture_id) override;
- void* MapTransferCacheEntry(size_t serialized_size) override;
+ void* MapTransferCacheEntry(uint32_t serialized_size) override;
void UnmapAndCreateTransferCacheEntry(uint32_t type, uint32_t id) override;
bool ThreadsafeLockTransferCacheEntry(uint32_t type, uint32_t id) override;
void UnlockTransferCacheEntries(
@@ -148,6 +149,12 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
GLint GetUniformLocationHelper(GLuint program, const char* name);
GLint GetFragDataIndexEXTHelper(GLuint program, const char* name);
GLint GetFragDataLocationHelper(GLuint program, const char* name);
+
+ // Writes the result bucket into a buffer pointed by name and of maximum size
+ // buffsize. If length is !null, it receives the number of characters written
+ // (excluding the final \0). This is a helper function for GetActive*Helper
+ // functions that return names.
+ void GetResultNameHelper(GLsizei bufsize, GLsizei* length, char* name);
bool GetActiveAttribHelper(
GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
GLint* size, GLenum* type, char* name);
@@ -470,6 +477,30 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
GLenum target, GLintptr offset, GLsizeiptr size, const void* data,
ScopedTransferBufferPtr* buffer);
+ void MultiDrawArraysWEBGLHelper(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount);
+
+ void MultiDrawArraysInstancedWEBGLHelper(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instanceCounts,
+ GLsizei drawcount);
+
+ void MultiDrawElementsWEBGLHelper(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount);
+
+ void MultiDrawElementsInstancedWEBGLHelper(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instanceCounts,
+ GLsizei drawcount);
+
GLuint CreateImageCHROMIUMHelper(ClientBuffer buffer,
GLsizei width,
GLsizei height,
@@ -603,9 +634,9 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
const GLfloat* transform_values,
ScopedTransferBufferPtr* buffer,
uint32_t* out_paths_shm_id,
- size_t* out_paths_offset,
+ uint32_t* out_paths_offset,
uint32_t* out_transforms_shm_id,
- size_t* out_transforms_offset);
+ uint32_t* out_transforms_offset);
// Set to 1 to have the client fail when a GL error is generated.
// This helps find bugs in the renderer since the debugger stops on the error.
@@ -753,7 +784,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
MappedBufferMap mapped_buffers_;
// TODO(zmo): Consolidate |mapped_buffers_| and |mapped_buffer_range_map_|.
- typedef base::hash_map<GLuint, MappedBuffer> MappedBufferRangeMap;
+ typedef std::unordered_map<GLuint, MappedBuffer> MappedBufferRangeMap;
MappedBufferRangeMap mapped_buffer_range_map_;
typedef std::map<const void*, MappedTexture> MappedTextureMap;
@@ -771,7 +802,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
base::Optional<ScopedMappedMemoryPtr> font_mapped_buffer_;
base::Optional<ScopedTransferBufferPtr> raster_mapped_buffer_;
- base::Callback<void(const char*, int32_t)> error_message_callback_;
+ base::RepeatingCallback<void(const char*, int32_t)> error_message_callback_;
bool deferring_error_callbacks_ = false;
std::deque<DeferredErrorCallback> deferred_error_callbacks_;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
index b520b91d139..624ea51c828 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -528,6 +528,30 @@ void ShallowFlushCHROMIUM() override;
void OrderingBarrierCHROMIUM() override;
+void MultiDrawArraysWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount) override;
+
+void MultiDrawArraysInstancedWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) override;
+
+void MultiDrawElementsWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount) override;
+
+void MultiDrawElementsInstancedWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) override;
+
void StencilFunc(GLenum func, GLint ref, GLuint mask) override;
void StencilFuncSeparate(GLenum face,
@@ -1079,19 +1103,28 @@ void FlushDriverCachesCHROMIUM() override;
GLuint GetLastFlushIdCHROMIUM() override;
-void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint z_order,
- const GLfloat* transform) override;
-
-void ScheduleDCLayerCHROMIUM(GLsizei num_textures,
- const GLuint* contents_texture_ids,
- const GLfloat* contents_rect,
- GLuint background_color,
- GLuint edge_aa_mask,
- const GLfloat* bounds_rect,
- GLuint filter,
+void ScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
GLuint protected_video_type) override;
void SetActiveURLCHROMIUM(const char* url) override;
@@ -1287,8 +1320,7 @@ void FramebufferTextureMultiviewLayeredANGLE(GLenum target,
void MaxShaderCompilerThreadsKHR(GLuint count) override;
-GLuint CreateAndTexStorage2DSharedImageCHROMIUM(GLenum internalFormat,
- const GLbyte* mailbox) override;
+GLuint CreateAndTexStorage2DSharedImageCHROMIUM(const GLbyte* mailbox) override;
void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
index 45060ce79d7..b196b0a5879 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -241,13 +241,13 @@ void GLES2Implementation::ClearBufferfv(GLenum buffer,
<< GLES2Util::GetStringBufferfv(buffer) << ", "
<< drawbuffers << ", " << static_cast<const void*>(value)
<< ")");
- size_t count = GLES2Util::CalcClearBufferfvDataCount(buffer);
+ uint32_t count = GLES2Util::CalcClearBufferfvDataCount(buffer);
DCHECK_LE(count, 4u);
if (count == 0) {
SetGLErrorInvalidEnum("glClearBufferfv", buffer, "buffer");
return;
}
- for (size_t ii = 0; ii < count; ++ii)
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << value[ii]);
helper_->ClearBufferfvImmediate(buffer, drawbuffers, value);
CheckGLError();
@@ -261,13 +261,13 @@ void GLES2Implementation::ClearBufferiv(GLenum buffer,
<< GLES2Util::GetStringBufferiv(buffer) << ", "
<< drawbuffers << ", " << static_cast<const void*>(value)
<< ")");
- size_t count = GLES2Util::CalcClearBufferivDataCount(buffer);
+ uint32_t count = GLES2Util::CalcClearBufferivDataCount(buffer);
DCHECK_LE(count, 4u);
if (count == 0) {
SetGLErrorInvalidEnum("glClearBufferiv", buffer, "buffer");
return;
}
- for (size_t ii = 0; ii < count; ++ii)
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << value[ii]);
helper_->ClearBufferivImmediate(buffer, drawbuffers, value);
CheckGLError();
@@ -281,13 +281,13 @@ void GLES2Implementation::ClearBufferuiv(GLenum buffer,
<< GLES2Util::GetStringBufferuiv(buffer) << ", "
<< drawbuffers << ", " << static_cast<const void*>(value)
<< ")");
- size_t count = GLES2Util::CalcClearBufferuivDataCount(buffer);
+ uint32_t count = GLES2Util::CalcClearBufferuivDataCount(buffer);
DCHECK_LE(count, 4u);
if (count == 0) {
SetGLErrorInvalidEnum("glClearBufferuiv", buffer, "buffer");
return;
}
- for (size_t ii = 0; ii < count; ++ii)
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << value[ii]);
helper_->ClearBufferuivImmediate(buffer, drawbuffers, value);
CheckGLError();
@@ -1761,8 +1761,8 @@ void GLES2Implementation::SamplerParameterfv(GLuint sampler,
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glSamplerParameterfv(" << sampler
<< ", " << GLES2Util::GetStringSamplerParameter(pname)
<< ", " << static_cast<const void*>(params) << ")");
- size_t count = 1;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 1;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << params[ii]);
helper_->SamplerParameterfvImmediate(sampler, pname, params);
CheckGLError();
@@ -1786,8 +1786,8 @@ void GLES2Implementation::SamplerParameteriv(GLuint sampler,
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glSamplerParameteriv(" << sampler
<< ", " << GLES2Util::GetStringSamplerParameter(pname)
<< ", " << static_cast<const void*>(params) << ")");
- size_t count = 1;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 1;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << params[ii]);
helper_->SamplerParameterivImmediate(sampler, pname, params);
CheckGLError();
@@ -1929,8 +1929,8 @@ void GLES2Implementation::TexParameterfv(GLenum target,
<< GLES2Util::GetStringTextureBindTarget(target) << ", "
<< GLES2Util::GetStringTextureParameter(pname) << ", "
<< static_cast<const void*>(params) << ")");
- size_t count = 1;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 1;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << params[ii]);
helper_->TexParameterfvImmediate(target, pname, params);
CheckGLError();
@@ -1956,8 +1956,8 @@ void GLES2Implementation::TexParameteriv(GLenum target,
<< GLES2Util::GetStringTextureBindTarget(target) << ", "
<< GLES2Util::GetStringTextureParameter(pname) << ", "
<< static_cast<const void*>(params) << ")");
- size_t count = 1;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 1;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << params[ii]);
helper_->TexParameterivImmediate(target, pname, params);
CheckGLError();
@@ -2647,8 +2647,8 @@ void GLES2Implementation::VertexAttrib1fv(GLuint indx, const GLfloat* values) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib1fv(" << indx << ", "
<< static_cast<const void*>(values) << ")");
- size_t count = 1;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 1;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << values[ii]);
helper_->VertexAttrib1fvImmediate(indx, values);
CheckGLError();
@@ -2666,8 +2666,8 @@ void GLES2Implementation::VertexAttrib2fv(GLuint indx, const GLfloat* values) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib2fv(" << indx << ", "
<< static_cast<const void*>(values) << ")");
- size_t count = 2;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 2;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << values[ii]);
helper_->VertexAttrib2fvImmediate(indx, values);
CheckGLError();
@@ -2688,8 +2688,8 @@ void GLES2Implementation::VertexAttrib3fv(GLuint indx, const GLfloat* values) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib3fv(" << indx << ", "
<< static_cast<const void*>(values) << ")");
- size_t count = 3;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 3;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << values[ii]);
helper_->VertexAttrib3fvImmediate(indx, values);
CheckGLError();
@@ -2711,8 +2711,8 @@ void GLES2Implementation::VertexAttrib4fv(GLuint indx, const GLfloat* values) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib4fv(" << indx << ", "
<< static_cast<const void*>(values) << ")");
- size_t count = 4;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 4;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << values[ii]);
helper_->VertexAttrib4fvImmediate(indx, values);
CheckGLError();
@@ -2734,8 +2734,8 @@ void GLES2Implementation::VertexAttribI4iv(GLuint indx, const GLint* values) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttribI4iv(" << indx
<< ", " << static_cast<const void*>(values) << ")");
- size_t count = 4;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 4;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << values[ii]);
helper_->VertexAttribI4ivImmediate(indx, values);
CheckGLError();
@@ -2758,8 +2758,8 @@ void GLES2Implementation::VertexAttribI4uiv(GLuint indx, const GLuint* values) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttribI4uiv(" << indx
<< ", " << static_cast<const void*>(values) << ")");
- size_t count = 4;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 4;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << values[ii]);
helper_->VertexAttribI4uivImmediate(indx, values);
CheckGLError();
@@ -3389,14 +3389,58 @@ void GLES2Implementation::FlushDriverCachesCHROMIUM() {
CheckGLError();
}
+void GLES2Implementation::ScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
+ GLuint protected_video_type) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glScheduleDCLayerCHROMIUM(" << y_texture_id
+ << ", " << uv_texture_id << ", " << z_order << ", " << content_x
+ << ", " << content_y << ", " << content_width << ", "
+ << content_height << ", " << quad_x << ", " << quad_y << ", "
+ << quad_width << ", " << quad_height << ", " << transform_c1r1 << ", "
+ << transform_c2r1 << ", " << transform_c1r2 << ", " << transform_c2r2
+ << ", " << transform_tx << ", " << transform_ty << ", "
+ << GLES2Util::GetStringBool(is_clipped) << ", " << clip_x << ", "
+ << clip_y << ", " << clip_width << ", " << clip_height << ", "
+ << protected_video_type << ")");
+ helper_->ScheduleDCLayerCHROMIUM(
+ y_texture_id, uv_texture_id, z_order, content_x, content_y, content_width,
+ content_height, quad_x, quad_y, quad_width, quad_height, transform_c1r1,
+ transform_c2r1, transform_c1r2, transform_c2r2, transform_tx,
+ transform_ty, is_clipped, clip_x, clip_y, clip_width, clip_height,
+ protected_video_type);
+ CheckGLError();
+}
+
void GLES2Implementation::MatrixLoadfCHROMIUM(GLenum matrixMode,
const GLfloat* m) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMatrixLoadfCHROMIUM("
<< GLES2Util::GetStringMatrixMode(matrixMode) << ", "
<< static_cast<const void*>(m) << ")");
- size_t count = 16;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 16;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << m[ii]);
helper_->MatrixLoadfCHROMIUMImmediate(matrixMode, m);
CheckGLError();
@@ -3563,8 +3607,8 @@ void GLES2Implementation::UniformMatrix4fvStreamTextureMatrixCHROMIUM(
<< "] glUniformMatrix4fvStreamTextureMatrixCHROMIUM("
<< location << ", " << GLES2Util::GetStringBool(transpose)
<< ", " << static_cast<const void*>(transform) << ")");
- size_t count = 16;
- for (size_t ii = 0; ii < count; ++ii)
+ uint32_t count = 16;
+ for (uint32_t ii = 0; ii < count; ++ii)
GPU_CLIENT_LOG("value[" << ii << "]: " << transform[ii]);
helper_->UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate(
location, transpose, transform);
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
index 80cae58011e..44f1e44435d 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -17,6 +17,7 @@
#include <memory>
#include "base/compiler_specific.h"
+#include "base/stl_util.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/mock_transfer_buffer.h"
@@ -852,9 +853,9 @@ TEST_F(GLES2ImplementationTest, DrawArraysClientSideBuffers) {
const GLint kFirst = 1;
const GLsizei kCount = 2;
const GLsizei kSize1 =
- arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents1 * sizeof(verts[0][0]);
const GLsizei kSize2 =
- arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents2 * sizeof(verts[0][0]);
const GLsizei kEmuOffset1 = 0;
const GLsizei kEmuOffset2 = kSize1;
const GLsizei kTotalSize = kSize1 + kSize2;
@@ -920,7 +921,7 @@ TEST_F(GLES2ImplementationTest, DrawArraysInstancedANGLEClientSideBuffers) {
const GLsizei kCount = 2;
const GLuint kDivisor = 1;
const GLsizei kSize1 =
- arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents1 * sizeof(verts[0][0]);
const GLsizei kSize2 =
1 * kNumComponents2 * sizeof(verts[0][0]);
const GLsizei kEmuOffset1 = 0;
@@ -998,9 +999,9 @@ TEST_F(GLES2ImplementationTest, DrawElementsClientSideBuffers) {
const GLsizei kClientStride = sizeof(verts[0]);
const GLsizei kCount = 2;
const GLsizei kSize1 =
- arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents1 * sizeof(verts[0][0]);
const GLsizei kSize2 =
- arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents2 * sizeof(verts[0][0]);
const GLsizei kEmuOffset1 = 0;
const GLsizei kEmuOffset2 = kSize1;
const GLsizei kTotalSize = kSize1 + kSize2;
@@ -1082,9 +1083,9 @@ TEST_F(GLES2ImplementationTest, DrawElementsClientSideBuffersIndexUint) {
const GLsizei kClientStride = sizeof(verts[0]);
const GLsizei kCount = 2;
const GLsizei kSize1 =
- arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents1 * sizeof(verts[0][0]);
const GLsizei kSize2 =
- arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents2 * sizeof(verts[0][0]);
const GLsizei kEmuOffset1 = 0;
const GLsizei kEmuOffset2 = kSize1;
const GLsizei kTotalSize = kSize1 + kSize2;
@@ -1191,9 +1192,9 @@ TEST_F(GLES2ImplementationTest,
const GLsizei kClientStride = sizeof(verts[0]);
const GLsizei kCount = 2;
const GLsizei kSize1 =
- arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents1 * sizeof(verts[0][0]);
const GLsizei kSize2 =
- arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents2 * sizeof(verts[0][0]);
const GLsizei kEmuOffset1 = 0;
const GLsizei kEmuOffset2 = kSize1;
const GLsizei kTotalSize = kSize1 + kSize2;
@@ -1280,7 +1281,7 @@ TEST_F(GLES2ImplementationTest, DrawElementsInstancedANGLEClientSideBuffers) {
const GLsizei kClientStride = sizeof(verts[0]);
const GLsizei kCount = 2;
const GLsizei kSize1 =
- arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ base::size(verts) * kNumComponents1 * sizeof(verts[0][0]);
const GLsizei kSize2 =
1 * kNumComponents2 * sizeof(verts[0][0]);
const GLuint kDivisor = 1;
@@ -2372,7 +2373,7 @@ TEST_F(GLES2ImplementationTest, SubImage2DUnpack) {
}
for (int sub = 0; sub < 2; ++sub) {
- for (size_t a = 0; a < arraysize(unpack_alignments); ++a) {
+ for (size_t a = 0; a < base::size(unpack_alignments); ++a) {
const void* commands = GetPut();
GLint alignment = unpack_alignments[a];
@@ -2513,7 +2514,7 @@ TEST_F(GLES3ImplementationTest, SubImage3DUnpack) {
}
for (int sub = 0; sub < 2; ++sub) {
- for (size_t a = 0; a < arraysize(unpack_alignments); ++a) {
+ for (size_t a = 0; a < base::size(unpack_alignments); ++a) {
const void* commands = GetPut();
GLint alignment = unpack_alignments[a];
@@ -2674,11 +2675,10 @@ TEST_F(GLES2ImplementationTest, TextureInvalidArguments) {
// to (runtime-detected) compression formats. Try to infer the error with an
// aux check.
const GLenum kCompressedFormat = GL_ETC1_RGB8_OES;
- gl_->CompressedTexImage2D(
- kTarget, kLevel, kCompressedFormat, kWidth, kHeight, kBorder,
- arraysize(pixels), pixels);
+ gl_->CompressedTexImage2D(kTarget, kLevel, kCompressedFormat, kWidth, kHeight,
+ kBorder, base::size(pixels), pixels);
- // In the above, kCompressedFormat and arraysize(pixels) are possibly wrong
+ // In the above, kCompressedFormat and base::size(pixels) are possibly wrong
// values. First ensure that these do not cause failures at the client. If
// this check ever fails, it probably means that client checks more than at
// the time of writing of this test. In this case, more code needs to be
@@ -2689,9 +2689,8 @@ TEST_F(GLES2ImplementationTest, TextureInvalidArguments) {
// Changing border to invalid border should make the call fail at the client
// checks.
- gl_->CompressedTexImage2D(
- kTarget, kLevel, kCompressedFormat, kWidth, kHeight, kInvalidBorder,
- arraysize(pixels), pixels);
+ gl_->CompressedTexImage2D(kTarget, kLevel, kCompressedFormat, kWidth, kHeight,
+ kInvalidBorder, base::size(pixels), pixels);
EXPECT_TRUE(NoCommandsWritten());
EXPECT_EQ(GL_INVALID_VALUE, CheckError());
}
@@ -3073,15 +3072,48 @@ TEST_F(GLES2ImplementationTest, BufferDataLargerThanTransferBuffer) {
ExpectedMemoryInfo mem2 = GetExpectedMemory(kUsableSize);
Cmds expected;
- expected.set_size.Init(
- GL_ARRAY_BUFFER, arraysize(buf), 0, 0, GL_DYNAMIC_DRAW);
+ expected.set_size.Init(GL_ARRAY_BUFFER, base::size(buf), 0, 0,
+ GL_DYNAMIC_DRAW);
expected.copy_data1.Init(
GL_ARRAY_BUFFER, 0, kUsableSize, mem1.id, mem1.offset);
expected.set_token1.Init(GetNextToken());
expected.copy_data2.Init(
GL_ARRAY_BUFFER, kUsableSize, kUsableSize, mem2.id, mem2.offset);
expected.set_token2.Init(GetNextToken());
- gl_->BufferData(GL_ARRAY_BUFFER, arraysize(buf), buf, GL_DYNAMIC_DRAW);
+ gl_->BufferData(GL_ARRAY_BUFFER, base::size(buf), buf, GL_DYNAMIC_DRAW);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, MultiDrawArraysWEBGLLargerThanTransferBuffer) {
+ struct Cmds {
+ cmds::MultiDrawBeginCHROMIUM begin;
+ cmds::MultiDrawArraysCHROMIUM draw1;
+ cmd::SetToken set_token1;
+ cmds::MultiDrawArraysCHROMIUM draw2;
+ cmd::SetToken set_token2;
+ cmds::MultiDrawEndCHROMIUM end;
+ };
+ const unsigned kUsableSize =
+ kTransferBufferSize - GLES2Implementation::kStartingOffset;
+ const unsigned kDrawCount = kUsableSize / sizeof(int);
+ const unsigned kChunkDrawCount = kDrawCount / 2;
+ const unsigned kCountsOffset = kChunkDrawCount * sizeof(int);
+ GLint firsts[kDrawCount] = {0};
+ GLsizei counts[kDrawCount] = {0};
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kUsableSize);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kUsableSize);
+
+ Cmds expected;
+ expected.begin.Init(kDrawCount);
+ expected.draw1.Init(GL_TRIANGLES, mem1.id, mem1.offset, mem1.id,
+ mem1.offset + kCountsOffset, kChunkDrawCount);
+ expected.set_token1.Init(GetNextToken());
+ expected.draw2.Init(GL_TRIANGLES, mem2.id, mem2.offset, mem2.id,
+ mem2.offset + kCountsOffset, kChunkDrawCount);
+ expected.set_token2.Init(GetNextToken());
+ expected.end.Init();
+ gl_->MultiDrawArraysWEBGL(GL_TRIANGLES, firsts, counts, kDrawCount);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -3102,7 +3134,7 @@ TEST_F(GLES2ImplementationTest, CapabilitiesAreCached) {
};
Cmds expected;
- for (size_t ii = 0; ii < arraysize(kStates); ++ii) {
+ for (size_t ii = 0; ii < base::size(kStates); ++ii) {
GLenum state = kStates[ii];
expected.enable_cmd.Init(state);
GLboolean result = gl_->IsEnabled(state);
@@ -3151,9 +3183,11 @@ TEST_F(GLES2ImplementationTest, BeginEndQueryEXT) {
GLuint data[2];
};
GenCmds expected_gen_cmds;
- expected_gen_cmds.gen.Init(arraysize(expected_ids), &expected_ids[0]);
- GLuint ids[arraysize(expected_ids)] = { 0, };
- gl_->GenQueriesEXT(arraysize(expected_ids), &ids[0]);
+ expected_gen_cmds.gen.Init(base::size(expected_ids), &expected_ids[0]);
+ GLuint ids[base::size(expected_ids)] = {
+ 0,
+ };
+ gl_->GenQueriesEXT(base::size(expected_ids), &ids[0]);
EXPECT_EQ(0, memcmp(
&expected_gen_cmds, commands_, sizeof(expected_gen_cmds)));
GLuint id1 = ids[0];
@@ -3338,9 +3372,11 @@ TEST_F(GLES2ImplementationTest, QueryCounterEXT) {
GLuint data[2];
};
GenCmds expected_gen_cmds;
- expected_gen_cmds.gen.Init(arraysize(expected_ids), &expected_ids[0]);
- GLuint ids[arraysize(expected_ids)] = { 0, };
- gl_->GenQueriesEXT(arraysize(expected_ids), &ids[0]);
+ expected_gen_cmds.gen.Init(base::size(expected_ids), &expected_ids[0]);
+ GLuint ids[base::size(expected_ids)] = {
+ 0,
+ };
+ gl_->GenQueriesEXT(base::size(expected_ids), &ids[0]);
EXPECT_EQ(0, memcmp(
&expected_gen_cmds, commands_, sizeof(expected_gen_cmds)));
GLuint id1 = ids[0];
@@ -3551,9 +3587,8 @@ TEST_F(GLES2ImplementationTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
Mailbox mailbox = Mailbox::Generate();
Cmds expected;
- expected.cmd.Init(kTexturesStartId, GL_RGBA, mailbox.name);
- GLuint id =
- gl_->CreateAndTexStorage2DSharedImageCHROMIUM(GL_RGBA, mailbox.name);
+ expected.cmd.Init(kTexturesStartId, mailbox.name);
+ GLuint id = gl_->CreateAndTexStorage2DSharedImageCHROMIUM(mailbox.name);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
EXPECT_EQ(kTexturesStartId, id);
}
@@ -3883,7 +3918,7 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM) {
EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token))
.WillOnce(Return(true));
EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
- gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, base::size(sync_token_datas));
EXPECT_TRUE(NoCommandsWritten());
EXPECT_EQ(GL_NO_ERROR, CheckError());
@@ -3940,7 +3975,7 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM_Sequence) {
.InSequence(sequence)
.WillOnce(Return(true));
EXPECT_CALL(*gpu_control_, EnsureWorkVisible()).InSequence(sequence);
- gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, base::size(sync_token_datas));
EXPECT_EQ(GL_NO_ERROR, CheckError());
EXPECT_TRUE(sync_token1.verified_flush());
@@ -3963,7 +3998,7 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM_EmptySyncToken) {
// Ensure proper sequence of checking and validating.
EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(_)).Times(0);
EXPECT_CALL(*gpu_control_, EnsureWorkVisible()).Times(0);
- gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, base::size(sync_token_datas));
EXPECT_TRUE(NoCommandsWritten());
EXPECT_EQ(GL_NO_ERROR, CheckError());
@@ -3982,12 +4017,9 @@ TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUM) {
struct Cmds {
cmds::InsertFenceSyncCHROMIUM insert_fence_sync;
- cmds::WaitSyncTokenCHROMIUM wait_sync_token;
};
Cmds expected;
expected.insert_fence_sync.Init(kFenceSync);
- expected.wait_sync_token.Init(kNamespaceId, kCommandBufferId.GetUnsafeValue(),
- kFenceSync);
EXPECT_CALL(*gpu_control_, GetNamespaceID()).WillOnce(Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
@@ -3997,7 +4029,7 @@ TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUM) {
EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
gl_->GenSyncTokenCHROMIUM(sync_token_data);
- EXPECT_CALL(*gpu_control_, WaitSyncTokenHint(sync_token));
+ EXPECT_CALL(*gpu_control_, WaitSyncToken(sync_token));
gl_->WaitSyncTokenCHROMIUM(sync_token_data);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
index ffbd0c4a4e3..722696075b7 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -2936,6 +2936,19 @@ TEST_F(GLES2ImplementationTest, FlushDriverCachesCHROMIUM) {
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
+TEST_F(GLES2ImplementationTest, ScheduleDCLayerCHROMIUM) {
+ struct Cmds {
+ cmds::ScheduleDCLayerCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ true, 19, 20, 21, 22, 23);
+
+ gl_->ScheduleDCLayerCHROMIUM(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, true, 19, 20, 21, 22, 23);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
TEST_F(GLES2ImplementationTest, MatrixLoadfCHROMIUM) {
GLfloat data[16] = {0};
struct Cmds {
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
index 181c4a60d29..8f024988b3a 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -382,6 +382,26 @@ virtual void ShaderSource(GLuint shader,
virtual void ShallowFinishCHROMIUM() = 0;
virtual void ShallowFlushCHROMIUM() = 0;
virtual void OrderingBarrierCHROMIUM() = 0;
+virtual void MultiDrawArraysWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount) = 0;
+virtual void MultiDrawArraysInstancedWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) = 0;
+virtual void MultiDrawElementsWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount) = 0;
+virtual void MultiDrawElementsInstancedWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) = 0;
virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) = 0;
virtual void StencilFuncSeparate(GLenum face,
GLenum func,
@@ -795,18 +815,28 @@ virtual void CommitOverlayPlanesCHROMIUM(GLuint64 swap_id,
GLbitfield flags = 0) = 0;
virtual void FlushDriverCachesCHROMIUM() = 0;
virtual GLuint GetLastFlushIdCHROMIUM() = 0;
-virtual void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint z_order,
- const GLfloat* transform) = 0;
-virtual void ScheduleDCLayerCHROMIUM(GLsizei num_textures,
- const GLuint* contents_texture_ids,
- const GLfloat* contents_rect,
- GLuint background_color,
- GLuint edge_aa_mask,
- const GLfloat* bounds_rect,
- GLuint filter,
+virtual void ScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
GLuint protected_video_type) = 0;
virtual void SetActiveURLCHROMIUM(const char* url) = 0;
virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) = 0;
@@ -959,7 +989,6 @@ virtual void FramebufferTextureMultiviewLayeredANGLE(GLenum target,
GLsizei numViews) = 0;
virtual void MaxShaderCompilerThreadsKHR(GLuint count) = 0;
virtual GLuint CreateAndTexStorage2DSharedImageCHROMIUM(
- GLenum internalFormat,
const GLbyte* mailbox) = 0;
virtual void BeginSharedImageAccessDirectCHROMIUM(GLuint texture,
GLenum mode) = 0;
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index 3d4833bb8ff..263313c515b 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -373,6 +373,26 @@ void ShaderSource(GLuint shader,
void ShallowFinishCHROMIUM() override;
void ShallowFlushCHROMIUM() override;
void OrderingBarrierCHROMIUM() override;
+void MultiDrawArraysWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount) override;
+void MultiDrawArraysInstancedWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) override;
+void MultiDrawElementsWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount) override;
+void MultiDrawElementsInstancedWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) override;
void StencilFunc(GLenum func, GLint ref, GLuint mask) override;
void StencilFuncSeparate(GLenum face,
GLenum func,
@@ -771,18 +791,28 @@ void ScheduleCALayerInUseQueryCHROMIUM(GLsizei count,
void CommitOverlayPlanesCHROMIUM(GLuint64 swap_id, GLbitfield flags) override;
void FlushDriverCachesCHROMIUM() override;
GLuint GetLastFlushIdCHROMIUM() override;
-void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint z_order,
- const GLfloat* transform) override;
-void ScheduleDCLayerCHROMIUM(GLsizei num_textures,
- const GLuint* contents_texture_ids,
- const GLfloat* contents_rect,
- GLuint background_color,
- GLuint edge_aa_mask,
- const GLfloat* bounds_rect,
- GLuint filter,
+void ScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
GLuint protected_video_type) override;
void SetActiveURLCHROMIUM(const char* url) override;
void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) override;
@@ -928,8 +958,7 @@ void FramebufferTextureMultiviewLayeredANGLE(GLenum target,
GLint baseViewIndex,
GLsizei numViews) override;
void MaxShaderCompilerThreadsKHR(GLuint count) override;
-GLuint CreateAndTexStorage2DSharedImageCHROMIUM(GLenum internalFormat,
- const GLbyte* mailbox) override;
+GLuint CreateAndTexStorage2DSharedImageCHROMIUM(const GLbyte* mailbox) override;
void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index f5ebca07a38..81e592e25f7 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -493,6 +493,28 @@ void GLES2InterfaceStub::ShaderSource(GLuint /* shader */,
void GLES2InterfaceStub::ShallowFinishCHROMIUM() {}
void GLES2InterfaceStub::ShallowFlushCHROMIUM() {}
void GLES2InterfaceStub::OrderingBarrierCHROMIUM() {}
+void GLES2InterfaceStub::MultiDrawArraysWEBGL(GLenum /* mode */,
+ const GLint* /* firsts */,
+ const GLsizei* /* counts */,
+ GLsizei /* drawcount */) {}
+void GLES2InterfaceStub::MultiDrawArraysInstancedWEBGL(
+ GLenum /* mode */,
+ const GLint* /* firsts */,
+ const GLsizei* /* counts */,
+ const GLsizei* /* instance_counts */,
+ GLsizei /* drawcount */) {}
+void GLES2InterfaceStub::MultiDrawElementsWEBGL(GLenum /* mode */,
+ const GLsizei* /* counts */,
+ GLenum /* type */,
+ const GLsizei* /* offsets */,
+ GLsizei /* drawcount */) {}
+void GLES2InterfaceStub::MultiDrawElementsInstancedWEBGL(
+ GLenum /* mode */,
+ const GLsizei* /* counts */,
+ GLenum /* type */,
+ const GLsizei* /* offsets */,
+ const GLsizei* /* instance_counts */,
+ GLsizei /* drawcount */) {}
void GLES2InterfaceStub::StencilFunc(GLenum /* func */,
GLint /* ref */,
GLuint /* mask */) {}
@@ -1042,20 +1064,29 @@ void GLES2InterfaceStub::FlushDriverCachesCHROMIUM() {}
GLuint GLES2InterfaceStub::GetLastFlushIdCHROMIUM() {
return 0;
}
-void GLES2InterfaceStub::ScheduleDCLayerSharedStateCHROMIUM(
- GLfloat /* opacity */,
- GLboolean /* is_clipped */,
- const GLfloat* /* clip_rect */,
- GLint /* z_order */,
- const GLfloat* /* transform */) {}
void GLES2InterfaceStub::ScheduleDCLayerCHROMIUM(
- GLsizei /* num_textures */,
- const GLuint* /* contents_texture_ids */,
- const GLfloat* /* contents_rect */,
- GLuint /* background_color */,
- GLuint /* edge_aa_mask */,
- const GLfloat* /* bounds_rect */,
- GLuint /* filter */,
+ GLuint /* y_texture_id */,
+ GLuint /* uv_texture_id */,
+ GLint /* z_order */,
+ GLint /* content_x */,
+ GLint /* content_y */,
+ GLint /* content_width */,
+ GLint /* content_height */,
+ GLint /* quad_x */,
+ GLint /* quad_y */,
+ GLint /* quad_width */,
+ GLint /* quad_height */,
+ GLfloat /* transform_c1r1 */,
+ GLfloat /* transform_c2r1 */,
+ GLfloat /* transform_c1r2 */,
+ GLfloat /* transform_c2r2 */,
+ GLfloat /* transform_tx */,
+ GLfloat /* transform_ty */,
+ GLboolean /* is_clipped */,
+ GLint /* clip_x */,
+ GLint /* clip_y */,
+ GLint /* clip_width */,
+ GLint /* clip_height */,
GLuint /* protected_video_type */) {}
void GLES2InterfaceStub::SetActiveURLCHROMIUM(const char* /* url */) {}
void GLES2InterfaceStub::MatrixLoadfCHROMIUM(GLenum /* matrixMode */,
@@ -1246,7 +1277,6 @@ void GLES2InterfaceStub::FramebufferTextureMultiviewLayeredANGLE(
GLsizei /* numViews */) {}
void GLES2InterfaceStub::MaxShaderCompilerThreadsKHR(GLuint /* count */) {}
GLuint GLES2InterfaceStub::CreateAndTexStorage2DSharedImageCHROMIUM(
- GLenum /* internalFormat */,
const GLbyte* /* mailbox */) {
return 0;
}
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index caa12933ef3..61c2fa70d65 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -373,6 +373,26 @@ void ShaderSource(GLuint shader,
void ShallowFinishCHROMIUM() override;
void ShallowFlushCHROMIUM() override;
void OrderingBarrierCHROMIUM() override;
+void MultiDrawArraysWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount) override;
+void MultiDrawArraysInstancedWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) override;
+void MultiDrawElementsWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount) override;
+void MultiDrawElementsInstancedWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) override;
void StencilFunc(GLenum func, GLint ref, GLuint mask) override;
void StencilFuncSeparate(GLenum face,
GLenum func,
@@ -771,18 +791,28 @@ void ScheduleCALayerInUseQueryCHROMIUM(GLsizei count,
void CommitOverlayPlanesCHROMIUM(GLuint64 swap_id, GLbitfield flags) override;
void FlushDriverCachesCHROMIUM() override;
GLuint GetLastFlushIdCHROMIUM() override;
-void ScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint z_order,
- const GLfloat* transform) override;
-void ScheduleDCLayerCHROMIUM(GLsizei num_textures,
- const GLuint* contents_texture_ids,
- const GLfloat* contents_rect,
- GLuint background_color,
- GLuint edge_aa_mask,
- const GLfloat* bounds_rect,
- GLuint filter,
+void ScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
GLuint protected_video_type) override;
void SetActiveURLCHROMIUM(const char* url) override;
void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) override;
@@ -928,8 +958,7 @@ void FramebufferTextureMultiviewLayeredANGLE(GLenum target,
GLint baseViewIndex,
GLsizei numViews) override;
void MaxShaderCompilerThreadsKHR(GLuint count) override;
-GLuint CreateAndTexStorage2DSharedImageCHROMIUM(GLenum internalFormat,
- const GLbyte* mailbox) override;
+GLuint CreateAndTexStorage2DSharedImageCHROMIUM(const GLbyte* mailbox) override;
void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index 6ddd340314a..dcde091ee4b 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -1065,6 +1065,48 @@ void GLES2TraceImplementation::OrderingBarrierCHROMIUM() {
gl_->OrderingBarrierCHROMIUM();
}
+void GLES2TraceImplementation::MultiDrawArraysWEBGL(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MultiDrawArraysWEBGL");
+ gl_->MultiDrawArraysWEBGL(mode, firsts, counts, drawcount);
+}
+
+void GLES2TraceImplementation::MultiDrawArraysInstancedWEBGL(
+ GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::MultiDrawArraysInstancedWEBGL");
+ gl_->MultiDrawArraysInstancedWEBGL(mode, firsts, counts, instance_counts,
+ drawcount);
+}
+
+void GLES2TraceImplementation::MultiDrawElementsWEBGL(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MultiDrawElementsWEBGL");
+ gl_->MultiDrawElementsWEBGL(mode, counts, type, offsets, drawcount);
+}
+
+void GLES2TraceImplementation::MultiDrawElementsInstancedWEBGL(
+ GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::MultiDrawElementsInstancedWEBGL");
+ gl_->MultiDrawElementsInstancedWEBGL(mode, counts, type, offsets,
+ instance_counts, drawcount);
+}
+
void GLES2TraceImplementation::StencilFunc(GLenum func,
GLint ref,
GLuint mask) {
@@ -2225,31 +2267,37 @@ GLuint GLES2TraceImplementation::GetLastFlushIdCHROMIUM() {
return gl_->GetLastFlushIdCHROMIUM();
}
-void GLES2TraceImplementation::ScheduleDCLayerSharedStateCHROMIUM(
- GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint z_order,
- const GLfloat* transform) {
- TRACE_EVENT_BINARY_EFFICIENT0(
- "gpu", "GLES2Trace::ScheduleDCLayerSharedStateCHROMIUM");
- gl_->ScheduleDCLayerSharedStateCHROMIUM(opacity, is_clipped, clip_rect,
- z_order, transform);
-}
-
void GLES2TraceImplementation::ScheduleDCLayerCHROMIUM(
- GLsizei num_textures,
- const GLuint* contents_texture_ids,
- const GLfloat* contents_rect,
- GLuint background_color,
- GLuint edge_aa_mask,
- const GLfloat* bounds_rect,
- GLuint filter,
+ GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
GLuint protected_video_type) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ScheduleDCLayerCHROMIUM");
- gl_->ScheduleDCLayerCHROMIUM(num_textures, contents_texture_ids,
- contents_rect, background_color, edge_aa_mask,
- bounds_rect, filter, protected_video_type);
+ gl_->ScheduleDCLayerCHROMIUM(
+ y_texture_id, uv_texture_id, z_order, content_x, content_y, content_width,
+ content_height, quad_x, quad_y, quad_width, quad_height, transform_c1r1,
+ transform_c2r1, transform_c1r2, transform_c2r2, transform_tx,
+ transform_ty, is_clipped, clip_x, clip_y, clip_width, clip_height,
+ protected_video_type);
}
void GLES2TraceImplementation::SetActiveURLCHROMIUM(const char* url) {
@@ -2664,11 +2712,10 @@ void GLES2TraceImplementation::MaxShaderCompilerThreadsKHR(GLuint count) {
}
GLuint GLES2TraceImplementation::CreateAndTexStorage2DSharedImageCHROMIUM(
- GLenum internalFormat,
const GLbyte* mailbox) {
TRACE_EVENT_BINARY_EFFICIENT0(
"gpu", "GLES2Trace::CreateAndTexStorage2DSharedImageCHROMIUM");
- return gl_->CreateAndTexStorage2DSharedImageCHROMIUM(internalFormat, mailbox);
+ return gl_->CreateAndTexStorage2DSharedImageCHROMIUM(mailbox);
}
void GLES2TraceImplementation::BeginSharedImageAccessDirectCHROMIUM(
diff --git a/chromium/gpu/command_buffer/client/gpu_control.h b/chromium/gpu/command_buffer/client/gpu_control.h
index dc40d197128..61c8c3471bf 100644
--- a/chromium/gpu/command_buffer/client/gpu_control.h
+++ b/chromium/gpu/command_buffer/client/gpu_control.h
@@ -104,11 +104,9 @@ class GPU_EXPORT GpuControl {
base::OnceClosure callback) = 0;
// This allows the command buffer proxy to mark the next flush with sync token
- // dependencies for the gpu scheduler. This is used in addition to the
- // WaitSyncToken command in the command buffer which is still needed. For
- // example, the WaitSyncToken command is used to pull texture updates when
- // used in conjunction with MailboxManagerSync.
- virtual void WaitSyncTokenHint(const SyncToken& sync_token) = 0;
+ // dependencies for the gpu scheduler, or to block prior to the flush in case
+ // of android webview.
+ virtual void WaitSyncToken(const SyncToken& sync_token) = 0;
// Under some circumstances a sync token may be used which has not been
// verified to have been flushed. For example, fence syncs queued on the same
diff --git a/chromium/gpu/command_buffer/client/implementation_base.cc b/chromium/gpu/command_buffer/client/implementation_base.cc
index e07e10ccf14..3477663c3b1 100644
--- a/chromium/gpu/command_buffer/client/implementation_base.cc
+++ b/chromium/gpu/command_buffer/client/implementation_base.cc
@@ -19,8 +19,8 @@
namespace gpu {
#if !defined(_MSC_VER)
-const size_t ImplementationBase::kMaxSizeOfSimpleResult;
-const unsigned int ImplementationBase::kStartingOffset;
+const uint32_t ImplementationBase::kMaxSizeOfSimpleResult;
+const uint32_t ImplementationBase::kStartingOffset;
#endif
ImplementationBase::ImplementationBase(CommandBufferHelper* helper,
diff --git a/chromium/gpu/command_buffer/client/implementation_base.h b/chromium/gpu/command_buffer/client/implementation_base.h
index b77ca46cb8b..b4ebeaccc39 100644
--- a/chromium/gpu/command_buffer/client/implementation_base.h
+++ b/chromium/gpu/command_buffer/client/implementation_base.h
@@ -45,11 +45,11 @@ class GLES2_IMPL_EXPORT ImplementationBase
public GpuControlClient {
public:
// The maximum result size from simple GL get commands.
- static const size_t kMaxSizeOfSimpleResult =
+ static const uint32_t kMaxSizeOfSimpleResult =
16 * sizeof(uint32_t); // NOLINT.
// used for testing only. If more things are reseved add them here.
- static const unsigned int kStartingOffset = kMaxSizeOfSimpleResult;
+ static const uint32_t kStartingOffset = kMaxSizeOfSimpleResult;
// Alignment of allocations.
static const unsigned int kAlignment = 16;
diff --git a/chromium/gpu/command_buffer/client/logging.h b/chromium/gpu/command_buffer/client/logging.h
index a56e75cc849..e206a77bd63 100644
--- a/chromium/gpu/command_buffer/client/logging.h
+++ b/chromium/gpu/command_buffer/client/logging.h
@@ -9,9 +9,15 @@
#include "base/macros.h"
#include "gpu/command_buffer/client/gles2_impl_export.h"
-// Macros to log information if DCHECK_IS_ON() and --enable-gpu-client-logging
-// flag is set. Code is optimized out if DCHECK is disabled. Requires that a
-// LogSettings named log_settings_ is in scope whenever a macro is used.
+// Macros to log information if --enable-gpu-client-logging is set and either:
+// DCHECK_IS_ON(), or
+// enable_gpu_client_logging=true is set in GN args.
+// Code is optimized out if DCHECK is disabled or the other GN arg is not set.
+// Requires that a LogSettings named log_settings_ is in scope whenever a macro
+// is used.
+//
+// Note that it's typically necessary to also specify --enable-logging=stderr to
+// see this logging output on Linux or macOS.
//
// Example usage:
//
@@ -32,13 +38,14 @@
// LogSettings log_settings_;
// };
-#if DCHECK_IS_ON() && !defined(__native_client__) && \
- !defined(GLES2_CONFORMANCE_TESTS) && !defined(GLES2_INLINE_OPTIMIZATION)
+#if (DCHECK_IS_ON() || defined(GPU_ENABLE_CLIENT_LOGGING)) && \
+ !defined(__native_client__) && !defined(GLES2_CONFORMANCE_TESTS) && \
+ !defined(GLES2_INLINE_OPTIMIZATION)
#define GPU_CLIENT_DEBUG
#endif
#if defined(GPU_CLIENT_DEBUG)
-#define GPU_CLIENT_LOG(args) DLOG_IF(INFO, log_settings_.enabled()) << args;
+#define GPU_CLIENT_LOG(args) LOG_IF(INFO, log_settings_.enabled()) << args;
#define GPU_CLIENT_LOG_CODE_BLOCK(code) code
#define GPU_CLIENT_DCHECK_CODE_BLOCK(code) code
#else // !defined(GPU_CLIENT_DEBUG)
diff --git a/chromium/gpu/command_buffer/client/mapped_memory.cc b/chromium/gpu/command_buffer/client/mapped_memory.cc
index bbb0f8aa47e..01dd619d0e4 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory.cc
+++ b/chromium/gpu/command_buffer/client/mapped_memory.cc
@@ -13,6 +13,7 @@
#include "base/atomic_sequence_num.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -36,9 +37,7 @@ MemoryChunk::MemoryChunk(int32_t shm_id,
CommandBufferHelper* helper)
: shm_id_(shm_id),
shm_(shm),
- allocator_(base::checked_cast<unsigned int>(shm->size()),
- helper,
- shm->memory()) {}
+ allocator_(shm->size(), helper, shm->memory()) {}
MemoryChunk::~MemoryChunk() = default;
@@ -105,12 +104,15 @@ void* MappedMemoryManager::Alloc(unsigned int size,
// Make a new chunk to satisfy the request.
CommandBuffer* cmd_buf = helper_->command_buffer();
- unsigned int chunk_size =
- ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
- chunk_size_multiple_;
+ base::CheckedNumeric<uint32_t> chunk_size = size;
+ chunk_size = (size + chunk_size_multiple_ - 1) & ~(chunk_size_multiple_ - 1);
+ uint32_t safe_chunk_size = 0;
+ if (!chunk_size.AssignIfValid(&safe_chunk_size))
+ return nullptr;
+
int32_t id = -1;
scoped_refptr<gpu::Buffer> shm =
- cmd_buf->CreateTransferBuffer(chunk_size, &id);
+ cmd_buf->CreateTransferBuffer(safe_chunk_size, &id);
if (id < 0)
return nullptr;
DCHECK(shm.get());
diff --git a/chromium/gpu/command_buffer/client/mapped_memory.h b/chromium/gpu/command_buffer/client/mapped_memory.h
index 7eeee65d5ed..39fdcd8ea3e 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory.h
+++ b/chromium/gpu/command_buffer/client/mapped_memory.h
@@ -11,6 +11,7 @@
#include <memory>
#include "base/bind.h"
+#include "base/bits.h"
#include "base/macros.h"
#include "base/trace_event/memory_dump_provider.h"
#include "gpu/command_buffer/client/fenced_allocator.h"
@@ -30,25 +31,21 @@ class GPU_EXPORT MemoryChunk {
~MemoryChunk();
// Gets the size of the largest free block that is available without waiting.
- unsigned int GetLargestFreeSizeWithoutWaiting() {
+ uint32_t GetLargestFreeSizeWithoutWaiting() {
return allocator_.GetLargestFreeSize();
}
// Gets the size of the largest free block that can be allocated if the
// caller can wait.
- unsigned int GetLargestFreeSizeWithWaiting() {
+ uint32_t GetLargestFreeSizeWithWaiting() {
return allocator_.GetLargestFreeOrPendingSize();
}
// Gets the size of the chunk.
- unsigned int GetSize() const {
- return static_cast<unsigned int>(shm_->size());
- }
+ uint32_t GetSize() const { return shm_->size(); }
// The shared memory id for this chunk.
- int32_t shm_id() const {
- return shm_id_;
- }
+ int32_t shm_id() const { return shm_id_; }
gpu::Buffer* shared_memory() const { return shm_.get(); }
@@ -62,15 +59,11 @@ class GPU_EXPORT MemoryChunk {
// Returns:
// the pointer to the allocated memory block, or nullptr if out of
// memory.
- void* Alloc(unsigned int size) {
- return allocator_.Alloc(size);
- }
+ void* Alloc(uint32_t size) { return allocator_.Alloc(size); }
// Gets the offset to a memory block given the base memory and the address.
// It translates nullptr to FencedAllocator::kInvalidOffset.
- unsigned int GetOffset(void* pointer) {
- return allocator_.GetOffset(pointer);
- }
+ uint32_t GetOffset(void* pointer) { return allocator_.GetOffset(pointer); }
// Frees a block of memory.
//
@@ -86,7 +79,7 @@ class GPU_EXPORT MemoryChunk {
// Parameters:
// pointer: the pointer to the memory block to free.
// token: the token value to wait for before re-using the memory.
- void FreePendingToken(void* pointer, unsigned int token) {
+ void FreePendingToken(void* pointer, uint32_t token) {
allocator_.FreePendingToken(pointer, token);
}
@@ -96,7 +89,7 @@ class GPU_EXPORT MemoryChunk {
}
// Gets the free size of the chunk.
- unsigned int GetFreeSize() { return allocator_.GetFreeSize(); }
+ uint32_t GetFreeSize() { return allocator_.GetFreeSize(); }
// Returns true if pointer is in the range of this block.
bool IsInChunk(void* pointer) const {
@@ -107,9 +100,7 @@ class GPU_EXPORT MemoryChunk {
// Returns true of any memory in this chunk is in use or free pending token.
bool InUseOrFreePending() { return allocator_.InUseOrFreePending(); }
- size_t bytes_in_use() const {
- return allocator_.bytes_in_use();
- }
+ uint32_t bytes_in_use() const { return allocator_.bytes_in_use(); }
FencedAllocator::State GetPointerStatusForTest(void* pointer,
int32_t* token_if_pending) {
@@ -138,12 +129,11 @@ class GPU_EXPORT MappedMemoryManager {
~MappedMemoryManager();
- unsigned int chunk_size_multiple() const {
- return chunk_size_multiple_;
- }
+ uint32_t chunk_size_multiple() const { return chunk_size_multiple_; }
- void set_chunk_size_multiple(unsigned int multiple) {
- DCHECK(multiple % FencedAllocator::kAllocAlignment == 0);
+ void set_chunk_size_multiple(uint32_t multiple) {
+ DCHECK(base::bits::IsPowerOfTwo(multiple));
+ DCHECK_GE(multiple, FencedAllocator::kAllocAlignment);
chunk_size_multiple_ = multiple;
}
@@ -162,8 +152,7 @@ class GPU_EXPORT MappedMemoryManager {
// shm_offset: pointer to variable to receive the shared memory offset.
// Returns:
// pointer to allocated block of memory. nullptr if failure.
- void* Alloc(
- unsigned int size, int32_t* shm_id, unsigned int* shm_offset);
+ void* Alloc(uint32_t size, int32_t* shm_id, uint32_t* shm_offset);
// Frees a block of memory.
//
@@ -213,7 +202,7 @@ class GPU_EXPORT MappedMemoryManager {
typedef std::vector<std::unique_ptr<MemoryChunk>> MemoryChunkVector;
// size a chunk is rounded up to.
- unsigned int chunk_size_multiple_;
+ uint32_t chunk_size_multiple_;
CommandBufferHelper* helper_;
MemoryChunkVector chunks_;
size_t allocated_memory_;
diff --git a/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc b/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
index 2621daf43ff..5b65cfae8fd 100644
--- a/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
+++ b/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
@@ -86,7 +86,7 @@ void* MockTransferBuffer::AllocUpTo(unsigned int size,
// reallocated.
actual_buffer_index_ = (actual_buffer_index_ + 1) % kNumBuffers;
- size = std::min(static_cast<size_t>(size), MaxTransferBufferSize());
+ size = std::min(size, MaxTransferBufferSize());
if (actual_offset_ + size > size_) {
actual_offset_ = result_size_;
}
@@ -136,7 +136,7 @@ unsigned int MockTransferBuffer::GetFragmentedFreeSize() const {
void MockTransferBuffer::ShrinkLastBlock(unsigned int new_size) {}
-size_t MockTransferBuffer::MaxTransferBufferSize() {
+uint32_t MockTransferBuffer::MaxTransferBufferSize() {
return size_ - result_size_;
}
@@ -150,7 +150,7 @@ bool MockTransferBuffer::InSync() {
}
MockTransferBuffer::ExpectedMemoryInfo MockTransferBuffer::GetExpectedMemory(
- size_t size) {
+ uint32_t size) {
ExpectedMemoryInfo mem;
mem.offset = AllocateExpectedTransferBuffer(size);
mem.id = GetExpectedTransferBufferId();
@@ -160,7 +160,7 @@ MockTransferBuffer::ExpectedMemoryInfo MockTransferBuffer::GetExpectedMemory(
}
MockTransferBuffer::ExpectedMemoryInfo
-MockTransferBuffer::GetExpectedResultMemory(size_t size) {
+MockTransferBuffer::GetExpectedResultMemory(uint32_t size) {
ExpectedMemoryInfo mem;
mem.offset = GetExpectedResultBufferOffset();
mem.id = GetExpectedResultBufferId();
@@ -169,7 +169,7 @@ MockTransferBuffer::GetExpectedResultMemory(size_t size) {
return mem;
}
-uint32_t MockTransferBuffer::AllocateExpectedTransferBuffer(size_t size) {
+uint32_t MockTransferBuffer::AllocateExpectedTransferBuffer(uint32_t size) {
EXPECT_LE(size, MaxTransferBufferSize());
// Toggle which buffer we get each time to simulate the buffer being
@@ -187,7 +187,7 @@ uint32_t MockTransferBuffer::AllocateExpectedTransferBuffer(size_t size) {
}
void* MockTransferBuffer::GetExpectedTransferAddressFromOffset(uint32_t offset,
- size_t size) {
+ uint32_t size) {
EXPECT_GE(offset, expected_buffer_index_ * alignment_);
EXPECT_LE(offset + size, size_ + expected_buffer_index_ * alignment_);
return expected_buffer() + offset;
diff --git a/chromium/gpu/command_buffer/client/mock_transfer_buffer.h b/chromium/gpu/command_buffer/client/mock_transfer_buffer.h
index 7b159b1c4f2..995bc78dc34 100644
--- a/chromium/gpu/command_buffer/client/mock_transfer_buffer.h
+++ b/chromium/gpu/command_buffer/client/mock_transfer_buffer.h
@@ -51,11 +51,11 @@ class MockTransferBuffer : public TransferBufferInterface {
unsigned int GetFragmentedFreeSize() const override;
void ShrinkLastBlock(unsigned int new_size) override;
- size_t MaxTransferBufferSize();
+ uint32_t MaxTransferBufferSize();
unsigned int RoundToAlignment(unsigned int size);
bool InSync();
- ExpectedMemoryInfo GetExpectedMemory(size_t size);
- ExpectedMemoryInfo GetExpectedResultMemory(size_t size);
+ ExpectedMemoryInfo GetExpectedMemory(uint32_t size);
+ ExpectedMemoryInfo GetExpectedResultMemory(uint32_t size);
private:
static const int kNumBuffers = 2;
@@ -68,15 +68,15 @@ class MockTransferBuffer : public TransferBufferInterface {
return static_cast<uint8_t*>(buffers_[expected_buffer_index_]->memory());
}
- uint32_t AllocateExpectedTransferBuffer(size_t size);
- void* GetExpectedTransferAddressFromOffset(uint32_t offset, size_t size);
+ uint32_t AllocateExpectedTransferBuffer(uint32_t size);
+ void* GetExpectedTransferAddressFromOffset(uint32_t offset, uint32_t size);
int GetExpectedResultBufferId();
uint32_t GetExpectedResultBufferOffset();
int GetExpectedTransferBufferId();
CommandBuffer* command_buffer_;
- size_t size_;
- size_t result_size_;
+ uint32_t size_;
+ uint32_t result_size_;
uint32_t alignment_;
int buffer_ids_[kNumBuffers];
scoped_refptr<Buffer> buffers_[kNumBuffers];
diff --git a/chromium/gpu/command_buffer/client/program_info_manager.cc b/chromium/gpu/command_buffer/client/program_info_manager.cc
index 5c79059895d..9581a354b2b 100644
--- a/chromium/gpu/command_buffer/client/program_info_manager.cc
+++ b/chromium/gpu/command_buffer/client/program_info_manager.cc
@@ -13,14 +13,33 @@ template <typename T>
static T LocalGetAs(const std::vector<int8_t>& data,
uint32_t offset,
size_t size) {
- const int8_t* p = &data[0] + offset;
- if (offset + size > data.size()) {
- NOTREACHED();
- return nullptr;
- }
+ const int8_t* p = data.data() + offset;
+ DCHECK_LE(offset + size, data.size());
return static_cast<T>(static_cast<const void*>(p));
}
+// Writes the strimg pointed by name and of maximum size buffsize. If length is
+// !null, it receives the number of characters written (excluding the final \0).
+// This is a helper function for GetActive*Helper functions that return names.
+void FillNameAndLength(GLsizei bufsize,
+ GLsizei* length,
+ char* name,
+ const std::string& string) {
+ // Length of string (without final \0) that we will write to the
+ // buffer.
+ GLsizei max_length = 0;
+ if (name && (bufsize > 0)) {
+ DCHECK_LE(string.size(), static_cast<size_t>(INT_MAX));
+ // Note: bufsize counts the terminating \0, but not string.size().
+ max_length = std::min(bufsize - 1, static_cast<GLsizei>(string.size()));
+ memcpy(name, string.data(), max_length);
+ name[max_length] = '\0';
+ }
+ if (length) {
+ *length = max_length;
+ }
+}
+
} // namespace
namespace gpu {
@@ -184,7 +203,7 @@ void ProgramInfoManager::Program::CacheFragDataIndex(const std::string& name,
GLint ProgramInfoManager::Program::GetFragDataLocation(
const std::string& name) const {
- base::hash_map<std::string, GLint>::const_iterator iter =
+ std::unordered_map<std::string, GLint>::const_iterator iter =
frag_data_locations_.find(name);
if (iter == frag_data_locations_.end())
return -1;
@@ -812,18 +831,7 @@ bool ProgramInfoManager::GetActiveAttrib(
if (type) {
*type = attrib_info->type;
}
- if (length || name) {
- GLsizei max_size = std::min(
- static_cast<size_t>(bufsize) - 1,
- std::max(static_cast<size_t>(0), attrib_info->name.size()));
- if (length) {
- *length = max_size;
- }
- if (name && bufsize > 0) {
- memcpy(name, attrib_info->name.c_str(), max_size);
- name[max_size] = '\0';
- }
- }
+ FillNameAndLength(bufsize, length, name, attrib_info->name);
return true;
}
}
@@ -848,18 +856,7 @@ bool ProgramInfoManager::GetActiveUniform(
if (type) {
*type = uniform_info->type;
}
- if (length || name) {
- GLsizei max_size = std::min(
- static_cast<size_t>(bufsize) - 1,
- std::max(static_cast<size_t>(0), uniform_info->name.size()));
- if (length) {
- *length = max_size;
- }
- if (name && bufsize > 0) {
- memcpy(name, uniform_info->name.c_str(), max_size);
- name[max_size] = '\0';
- }
- }
+ FillNameAndLength(bufsize, length, name, uniform_info->name);
return true;
}
}
@@ -884,30 +881,13 @@ bool ProgramInfoManager::GetActiveUniformBlockName(
GLES2Implementation* gl, GLuint program, GLuint index,
GLsizei buf_size, GLsizei* length, char* name) {
DCHECK_LE(0, buf_size);
- if (!name) {
- buf_size = 0;
- }
{
base::AutoLock auto_lock(lock_);
Program* info = GetProgramInfo(gl, program, kES3UniformBlocks);
if (info) {
const Program::UniformBlock* uniform_block = info->GetUniformBlock(index);
if (uniform_block) {
- if (buf_size == 0) {
- if (length) {
- *length = 0;
- }
- } else if (length || name) {
- GLsizei max_size = std::min(
- buf_size - 1, static_cast<GLsizei>(uniform_block->name.size()));
- if (length) {
- *length = max_size;
- }
- if (name) {
- memcpy(name, uniform_block->name.data(), max_size);
- name[max_size] = '\0';
- }
- }
+ FillNameAndLength(buf_size, length, name, uniform_block->name);
return true;
}
}
@@ -1009,17 +989,7 @@ bool ProgramInfoManager::GetTransformFeedbackVarying(
if (type) {
*type = varying->type;
}
- if (length || name) {
- GLsizei max_size = std::min(
- bufsize - 1, static_cast<GLsizei>(varying->name.size()));
- if (length) {
- *length = static_cast<GLsizei>(max_size);
- }
- if (name && bufsize > 0) {
- memcpy(name, varying->name.c_str(), max_size);
- name[max_size] = '\0';
- }
- }
+ FillNameAndLength(bufsize, length, name, varying->name);
return true;
}
}
diff --git a/chromium/gpu/command_buffer/client/program_info_manager.h b/chromium/gpu/command_buffer/client/program_info_manager.h
index fc6c9677dbb..4f1ddcb55a0 100644
--- a/chromium/gpu/command_buffer/client/program_info_manager.h
+++ b/chromium/gpu/command_buffer/client/program_info_manager.h
@@ -9,9 +9,9 @@
#include <stdint.h>
#include <string>
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/gtest_prod_util.h"
#include "base/synchronization/lock.h"
#include "gles2_impl_export.h"
@@ -242,14 +242,14 @@ class GLES2_IMPL_EXPORT ProgramInfoManager {
std::vector<UniformES3> uniforms_es3_;
- base::hash_map<std::string, GLint> frag_data_locations_;
- base::hash_map<std::string, GLint> frag_data_indices_;
+ std::unordered_map<std::string, GLint> frag_data_locations_;
+ std::unordered_map<std::string, GLint> frag_data_indices_;
};
Program* GetProgramInfo(
GLES2Implementation* gl, GLuint program, ProgramInfoType type);
- typedef base::hash_map<GLuint, Program> ProgramInfoMap;
+ typedef std::unordered_map<GLuint, Program> ProgramInfoMap;
ProgramInfoMap program_infos_;
diff --git a/chromium/gpu/command_buffer/client/program_info_manager_unittest.cc b/chromium/gpu/command_buffer/client/program_info_manager_unittest.cc
index 302dc197b71..c533d99dfa3 100644
--- a/chromium/gpu/command_buffer/client/program_info_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/client/program_info_manager_unittest.cc
@@ -7,6 +7,7 @@
#include <memory>
+#include "base/stl_util.h"
#include "gpu/command_buffer/client/program_info_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -100,8 +101,8 @@ class ProgramInfoManagerTest : public testing::Test {
data->uniform_loc0[0] = 1;
data->uniform_loc1[0] = 2;
data->uniform_loc1[1] = 3;
- memcpy(data->uniform_name0, kName[0], arraysize(data->uniform_name0));
- memcpy(data->uniform_name1, kName[1], arraysize(data->uniform_name1));
+ memcpy(data->uniform_name0, kName[0], base::size(data->uniform_name0));
+ memcpy(data->uniform_name1, kName[1], base::size(data->uniform_name1));
}
void SetupUniformBlocksData(UniformBlocksData* data) {
@@ -115,23 +116,23 @@ class ProgramInfoManagerTest : public testing::Test {
data->entry[0].binding = 0;
data->entry[0].data_size = 8;
data->entry[0].name_offset = ComputeOffset(data, data->name0);
- data->entry[0].name_length = arraysize(data->name0);
- data->entry[0].active_uniforms = arraysize(data->indices0);
+ data->entry[0].name_length = base::size(data->name0);
+ data->entry[0].active_uniforms = base::size(data->indices0);
data->entry[0].active_uniform_offset = ComputeOffset(data, data->indices0);
data->entry[0].referenced_by_vertex_shader = static_cast<uint32_t>(true);
data->entry[0].referenced_by_fragment_shader = static_cast<uint32_t>(false);
data->entry[1].binding = 1;
data->entry[1].data_size = 4;
data->entry[1].name_offset = ComputeOffset(data, data->name1);
- data->entry[1].name_length = arraysize(data->name1);
- data->entry[1].active_uniforms = arraysize(data->indices1);
+ data->entry[1].name_length = base::size(data->name1);
+ data->entry[1].active_uniforms = base::size(data->indices1);
data->entry[1].active_uniform_offset = ComputeOffset(data, data->indices1);
data->entry[1].referenced_by_vertex_shader = static_cast<uint32_t>(false);
data->entry[1].referenced_by_fragment_shader = static_cast<uint32_t>(true);
- memcpy(data->name0, kName[0], arraysize(data->name0));
+ memcpy(data->name0, kName[0], base::size(data->name0));
data->indices0[0] = kIndices[0][0];
data->indices0[1] = kIndices[0][1];
- memcpy(data->name1, kName[1], arraysize(data->name1));
+ memcpy(data->name1, kName[1], base::size(data->name1));
data->indices1[0] = kIndices[1][0];
}
@@ -158,13 +159,13 @@ class ProgramInfoManagerTest : public testing::Test {
data->entry[0].size = 1;
data->entry[0].type = GL_FLOAT_VEC2;
data->entry[0].name_offset = ComputeOffset(data, data->name0);
- data->entry[0].name_length = arraysize(data->name0);
+ data->entry[0].name_length = base::size(data->name0);
data->entry[1].size = 2;
data->entry[1].type = GL_FLOAT;
data->entry[1].name_offset = ComputeOffset(data, data->name1);
- data->entry[1].name_length = arraysize(data->name1);
- memcpy(data->name0, kName[0], arraysize(data->name0));
- memcpy(data->name1, kName[1], arraysize(data->name1));
+ data->entry[1].name_length = base::size(data->name1);
+ memcpy(data->name0, kName[0], base::size(data->name0));
+ memcpy(data->name1, kName[1], base::size(data->name1));
}
std::unique_ptr<ProgramInfoManager> program_info_manager_;
diff --git a/chromium/gpu/command_buffer/client/query_tracker.cc b/chromium/gpu/command_buffer/client/query_tracker.cc
index ef8a972915a..3b08a12aea8 100644
--- a/chromium/gpu/command_buffer/client/query_tracker.cc
+++ b/chromium/gpu/command_buffer/client/query_tracker.cc
@@ -78,8 +78,8 @@ bool QuerySyncManager::Alloc(QuerySyncManager::QueryInfo* info) {
bucket = buckets_.back().get();
}
- size_t index_in_bucket = 0;
- for (size_t i = 0; i < kSyncsPerBucket; i++) {
+ uint32_t index_in_bucket = 0;
+ for (uint32_t i = 0; i < kSyncsPerBucket; i++) {
if (!bucket->in_use_query_syncs[i]) {
index_in_bucket = i;
break;
diff --git a/chromium/gpu/command_buffer/client/query_tracker.h b/chromium/gpu/command_buffer/client/query_tracker.h
index 1596e985bda..d407c50a132 100644
--- a/chromium/gpu/command_buffer/client/query_tracker.h
+++ b/chromium/gpu/command_buffer/client/query_tracker.h
@@ -13,11 +13,11 @@
#include <bitset>
#include <list>
#include <memory>
+#include <unordered_map>
#include "base/atomicops.h"
#include "base/containers/circular_deque.h"
#include "base/containers/flat_map.h"
-#include "base/containers/hash_tables.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "gles2_impl_export.h"
@@ -33,7 +33,7 @@ namespace gles2 {
// Manages buckets of QuerySync instances in mapped memory.
class GLES2_IMPL_EXPORT QuerySyncManager {
public:
- static const size_t kSyncsPerBucket = 256;
+ static const uint32_t kSyncsPerBucket = 256;
struct GLES2_IMPL_EXPORT Bucket {
Bucket(QuerySync* sync_mem, int32_t shm_id, uint32_t shm_offset);
@@ -225,7 +225,7 @@ class GLES2_IMPL_EXPORT QueryTracker {
}
private:
- typedef base::hash_map<GLuint, std::unique_ptr<Query>> QueryIdMap;
+ typedef std::unordered_map<GLuint, std::unique_ptr<Query>> QueryIdMap;
typedef base::flat_map<GLenum, Query*> QueryTargetMap;
QueryIdMap queries_;
diff --git a/chromium/gpu/command_buffer/client/query_tracker_unittest.cc b/chromium/gpu/command_buffer/client/query_tracker_unittest.cc
index 01a1460bcba..515e52dbc3c 100644
--- a/chromium/gpu/command_buffer/client/query_tracker_unittest.cc
+++ b/chromium/gpu/command_buffer/client/query_tracker_unittest.cc
@@ -13,6 +13,7 @@
#include <memory>
#include <vector>
+#include "base/stl_util.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/mapped_memory.h"
@@ -59,7 +60,7 @@ TEST_F(QuerySyncManagerTest, Basic) {
QuerySyncManager::QueryInfo infos[4];
memset(&infos, 0xBD, sizeof(infos));
- for (size_t ii = 0; ii < arraysize(infos); ++ii) {
+ for (size_t ii = 0; ii < base::size(infos); ++ii) {
EXPECT_TRUE(sync_manager_->Alloc(&infos[ii]));
ASSERT_TRUE(infos[ii].sync != nullptr);
EXPECT_EQ(0, infos[ii].sync->process_count);
@@ -67,7 +68,7 @@ TEST_F(QuerySyncManagerTest, Basic) {
EXPECT_EQ(0, infos[ii].submit_count);
}
- for (size_t ii = 0; ii < arraysize(infos); ++ii) {
+ for (size_t ii = 0; ii < base::size(infos); ++ii) {
sync_manager_->Free(infos[ii]);
}
}
@@ -76,7 +77,7 @@ TEST_F(QuerySyncManagerTest, DontFree) {
QuerySyncManager::QueryInfo infos[4];
memset(&infos, 0xBD, sizeof(infos));
- for (size_t ii = 0; ii < arraysize(infos); ++ii) {
+ for (size_t ii = 0; ii < base::size(infos); ++ii) {
EXPECT_TRUE(sync_manager_->Alloc(&infos[ii]));
}
}
@@ -402,7 +403,7 @@ TEST_F(QueryTrackerTest, ManyQueries) {
const int32_t kToken = 46;
const uint32_t kResult = 456;
- const size_t kTestSize = 4000;
+ const uint32_t kTestSize = 4000;
static_assert(kTestSize > QuerySyncManager::kSyncsPerBucket,
"We want to use more than one bucket");
// Create lots of queries.
diff --git a/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
index a95e987f0d6..ec362130fcd 100644
--- a/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
@@ -11,16 +11,6 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_CMD_HELPER_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_RASTER_CMD_HELPER_AUTOGEN_H_
-void DeleteTexturesImmediate(GLsizei n, const GLuint* textures) {
- const uint32_t size = raster::cmds::DeleteTexturesImmediate::ComputeSize(n);
- raster::cmds::DeleteTexturesImmediate* c =
- GetImmediateCmdSpaceTotalSize<raster::cmds::DeleteTexturesImmediate>(
- size);
- if (c) {
- c->Init(n, textures);
- }
-}
-
void Finish() {
raster::cmds::Finish* c = GetCmdSpace<raster::cmds::Finish>();
if (c) {
@@ -94,20 +84,9 @@ void InsertFenceSyncCHROMIUM(GLuint64 release_count) {
}
}
-void WaitSyncTokenCHROMIUM(GLint namespace_id,
- GLuint64 command_buffer_id,
- GLuint64 release_count) {
- raster::cmds::WaitSyncTokenCHROMIUM* c =
- GetCmdSpace<raster::cmds::WaitSyncTokenCHROMIUM>();
- if (c) {
- c->Init(namespace_id, command_buffer_id, release_count);
- }
-}
-
void BeginRasterCHROMIUMImmediate(GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint color_type,
GLuint color_space_transfer_cache_id,
const GLbyte* mailbox) {
const uint32_t size =
@@ -116,7 +95,7 @@ void BeginRasterCHROMIUMImmediate(GLuint sk_color,
GetImmediateCmdSpaceTotalSize<raster::cmds::BeginRasterCHROMIUMImmediate>(
size);
if (c) {
- c->Init(sk_color, msaa_sample_count, can_use_lcd_text, color_type,
+ c->Init(sk_color, msaa_sample_count, can_use_lcd_text,
color_space_transfer_cache_id, mailbox);
}
}
@@ -203,32 +182,20 @@ void ClearPaintCacheINTERNAL() {
}
}
-void CreateAndConsumeTextureINTERNALImmediate(GLuint texture_id,
- bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format,
- const GLbyte* mailbox) {
+void CopySubTextureINTERNALImmediate(GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ const GLbyte* mailboxes) {
const uint32_t size =
- raster::cmds::CreateAndConsumeTextureINTERNALImmediate::ComputeSize();
- raster::cmds::CreateAndConsumeTextureINTERNALImmediate* c =
+ raster::cmds::CopySubTextureINTERNALImmediate::ComputeSize();
+ raster::cmds::CopySubTextureINTERNALImmediate* c =
GetImmediateCmdSpaceTotalSize<
- raster::cmds::CreateAndConsumeTextureINTERNALImmediate>(size);
- if (c) {
- c->Init(texture_id, use_buffer, buffer_usage, format, mailbox);
- }
-}
-
-void CopySubTexture(GLuint source_id,
- GLuint dest_id,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height) {
- raster::cmds::CopySubTexture* c = GetCmdSpace<raster::cmds::CopySubTexture>();
+ raster::cmds::CopySubTextureINTERNALImmediate>(size);
if (c) {
- c->Init(source_id, dest_id, xoffset, yoffset, x, y, width, height);
+ c->Init(xoffset, yoffset, x, y, width, height, mailboxes);
}
}
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc
index 83d4b257f77..a4ad95d85a2 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation.cc
@@ -89,7 +89,7 @@ namespace raster {
namespace {
-const size_t kMaxTransferCacheEntrySizeForTransferBuffer = 1024;
+const uint32_t kMaxTransferCacheEntrySizeForTransferBuffer = 1024;
} // namespace
@@ -101,7 +101,7 @@ class RasterImplementation::TransferCacheSerializeHelperImpl
: ri_(ri) {}
~TransferCacheSerializeHelperImpl() final = default;
- size_t take_end_offset_of_last_inlined_entry() {
+ uint32_t take_end_offset_of_last_inlined_entry() {
auto offset = end_offset_of_last_inlined_entry_;
end_offset_of_last_inlined_entry_ = 0u;
return offset;
@@ -113,12 +113,12 @@ class RasterImplementation::TransferCacheSerializeHelperImpl
static_cast<uint32_t>(key.first), key.second);
}
- size_t CreateEntryInternal(const cc::ClientTransferCacheEntry& entry,
- char* memory) final {
- size_t size = entry.SerializedSize();
+ uint32_t CreateEntryInternal(const cc::ClientTransferCacheEntry& entry,
+ char* memory) final {
+ uint32_t size = entry.SerializedSize();
// Cap the entries inlined to a specific size.
if (size <= ri_->max_inlined_entry_size_ && ri_->raster_mapped_buffer_) {
- size_t written = InlineEntry(entry, memory);
+ uint32_t written = InlineEntry(entry, memory);
if (written > 0u)
return written;
}
@@ -144,7 +144,8 @@ class RasterImplementation::TransferCacheSerializeHelperImpl
// Writes the entry into |memory| if there is enough space. Returns the number
// of bytes written on success or 0u on failure due to insufficient size.
- size_t InlineEntry(const cc::ClientTransferCacheEntry& entry, char* memory) {
+ uint32_t InlineEntry(const cc::ClientTransferCacheEntry& entry,
+ char* memory) {
DCHECK(memory);
DCHECK(SkIsAlign4(reinterpret_cast<uintptr_t>(memory)));
@@ -153,9 +154,12 @@ class RasterImplementation::TransferCacheSerializeHelperImpl
const auto& buffer = ri_->raster_mapped_buffer_;
DCHECK(buffer->BelongsToBuffer(memory));
- size_t memory_offset = memory - static_cast<char*>(buffer->address());
- size_t bytes_to_write = entry.SerializedSize();
- size_t bytes_remaining = buffer->size() - memory_offset;
+ DCHECK(base::CheckedNumeric<uint32_t>(memory -
+ static_cast<char*>(buffer->address()))
+ .IsValid());
+ uint32_t memory_offset = memory - static_cast<char*>(buffer->address());
+ uint32_t bytes_to_write = entry.SerializedSize();
+ uint32_t bytes_remaining = buffer->size() - memory_offset;
DCHECK_GT(bytes_to_write, 0u);
if (bytes_to_write > bytes_remaining)
@@ -173,7 +177,7 @@ class RasterImplementation::TransferCacheSerializeHelperImpl
}
RasterImplementation* const ri_;
- size_t end_offset_of_last_inlined_entry_ = 0u;
+ uint32_t end_offset_of_last_inlined_entry_ = 0u;
DISALLOW_COPY_AND_ASSIGN(TransferCacheSerializeHelperImpl);
};
@@ -181,7 +185,7 @@ class RasterImplementation::TransferCacheSerializeHelperImpl
// Helper to copy PaintOps to the GPU service over the transfer buffer.
class RasterImplementation::PaintOpSerializer {
public:
- PaintOpSerializer(size_t initial_size,
+ PaintOpSerializer(uint32_t initial_size,
RasterImplementation* ri,
cc::DecodeStashingImageProvider* stashing_image_provider,
TransferCacheSerializeHelperImpl* transfer_cache_helper,
@@ -218,6 +222,7 @@ class RasterImplementation::PaintOpSerializer {
size = op->Serialize(buffer_ + written_bytes_, free_bytes_, options);
}
DCHECK_LE(size, free_bytes_);
+ DCHECK(base::CheckAdd<uint32_t>(written_bytes_, size).IsValid());
ri_->paint_cache_->FinalizePendingEntries();
written_bytes_ += size;
@@ -235,7 +240,7 @@ class RasterImplementation::PaintOpSerializer {
// Check the address of the last inlined entry to figured out whether
// transfer cache entries were written past the last successfully serialized
// op.
- size_t total_written_size = std::max(
+ uint32_t total_written_size = std::max(
written_bytes_,
transfer_cache_helper_->take_end_offset_of_last_inlined_entry());
@@ -264,8 +269,8 @@ class RasterImplementation::PaintOpSerializer {
TransferCacheSerializeHelperImpl* const transfer_cache_helper_;
ClientFontManager* font_manager_;
- size_t written_bytes_ = 0;
- size_t free_bytes_ = 0;
+ uint32_t written_bytes_ = 0;
+ uint32_t free_bytes_ = 0;
DISALLOW_COPY_AND_ASSIGN(PaintOpSerializer);
};
@@ -291,7 +296,6 @@ RasterImplementation::RasterImplementation(
ImageDecodeAcceleratorInterface* image_decode_accelerator)
: ImplementationBase(helper, transfer_buffer, gpu_control),
helper_(helper),
- active_texture_unit_(0),
error_bits_(0),
lose_context_when_out_of_memory_(lose_context_when_out_of_memory),
use_count_(0),
@@ -320,9 +324,6 @@ gpu::ContextResult RasterImplementation::Initialize(
return result;
}
- texture_units_ = std::make_unique<TextureUnit[]>(
- capabilities_.max_combined_texture_image_units);
-
return gpu::ContextResult::kSuccess;
}
@@ -344,15 +345,8 @@ RasterCmdHelper* RasterImplementation::helper() const {
}
IdAllocator* RasterImplementation::GetIdAllocator(IdNamespaces namespace_id) {
- switch (namespace_id) {
- case IdNamespaces::kQueries:
- return &query_id_allocator_;
- case IdNamespaces::kTextures:
- return &texture_id_allocator_;
- default:
- DCHECK(false);
- return nullptr;
- }
+ DCHECK_EQ(namespace_id, IdNamespaces::kQueries);
+ return &query_id_allocator_;
}
void RasterImplementation::OnGpuControlLostContext() {
@@ -481,7 +475,7 @@ bool RasterImplementation::ThreadsafeDiscardableTextureIsDeletedForTracing(
return false;
}
-void* RasterImplementation::MapTransferCacheEntry(size_t serialized_size) {
+void* RasterImplementation::MapTransferCacheEntry(uint32_t serialized_size) {
// Prefer to use transfer buffer when possible, since transfer buffer
// allocations are much cheaper.
if (raster_mapped_buffer_ ||
@@ -778,28 +772,6 @@ void RasterImplementation::FinishHelper() {
void RasterImplementation::GenQueriesEXTHelper(GLsizei /* n */,
const GLuint* /* queries */) {}
-void RasterImplementation::DeleteTexturesHelper(GLsizei n,
- const GLuint* textures) {
- helper_->DeleteTexturesImmediate(n, textures);
- for (GLsizei ii = 0; ii < n; ++ii) {
- texture_id_allocator_.FreeID(textures[ii]);
- }
- UnbindTexturesHelper(n, textures);
-}
-
-void RasterImplementation::UnbindTexturesHelper(GLsizei n,
- const GLuint* textures) {
- for (GLsizei ii = 0; ii < n; ++ii) {
- for (GLint tt = 0; tt < capabilities_.max_combined_texture_image_units;
- ++tt) {
- TextureUnit& unit = texture_units_[tt];
- if (textures[ii] == unit.bound_texture_2d) {
- unit.bound_texture_2d = 0;
- }
- }
- }
-}
-
GLenum RasterImplementation::GetGraphicsResetStatusKHR() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetGraphicsResetStatusKHR()");
@@ -879,24 +851,6 @@ void RasterImplementation::GetQueryObjectuivEXT(GLuint id,
*params = base::saturated_cast<GLuint>(result);
}
-void RasterImplementation::GenSyncTokenCHROMIUM(GLbyte* sync_token) {
- if (!sync_token) {
- SetGLError(GL_INVALID_VALUE, "glGenSyncTokenCHROMIUM", "empty sync_token");
- return;
- }
-
- uint64_t fence_sync = gpu_control_->GenerateFenceSyncRelease();
- helper_->InsertFenceSyncCHROMIUM(fence_sync);
- helper_->CommandBufferHelper::OrderingBarrier();
- gpu_control_->EnsureWorkVisible();
-
- // Copy the data over after setting the data to ensure alignment.
- SyncToken sync_token_data(gpu_control_->GetNamespaceID(),
- gpu_control_->GetCommandBufferID(), fence_sync);
- sync_token_data.SetVerifyFlush();
- memcpy(sync_token, &sync_token_data, sizeof(sync_token_data));
-}
-
void RasterImplementation::GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) {
if (!sync_token) {
SetGLError(GL_INVALID_VALUE, "glGenUnverifiedSyncTokenCHROMIUM",
@@ -962,14 +916,7 @@ void RasterImplementation::WaitSyncTokenCHROMIUM(
return;
}
- helper_->WaitSyncTokenCHROMIUM(
- static_cast<GLint>(sync_token.namespace_id()),
- sync_token.command_buffer_id().GetUnsafeValue(),
- sync_token.release_count());
-
- // Enqueue sync token in flush after inserting command so that it's not
- // included in an automatic flush.
- gpu_control_->WaitSyncTokenHint(verified_sync_token);
+ gpu_control_->WaitSyncToken(verified_sync_token);
}
void* RasterImplementation::MapRasterCHROMIUM(GLsizeiptr size) {
@@ -991,7 +938,7 @@ void* RasterImplementation::MapRasterCHROMIUM(GLsizeiptr size) {
return raster_mapped_buffer_->address();
}
-void* RasterImplementation::MapFontBuffer(size_t size) {
+void* RasterImplementation::MapFontBuffer(uint32_t size) {
if (size < 0) {
SetGLError(GL_INVALID_VALUE, "glMapFontBufferCHROMIUM", "negative size");
return nullptr;
@@ -1006,11 +953,6 @@ void* RasterImplementation::MapFontBuffer(size_t size) {
"mapped font buffer with no raster buffer");
return nullptr;
}
- if (size > std::numeric_limits<uint32_t>::max()) {
- SetGLError(GL_INVALID_OPERATION, "glMapFontBufferCHROMIUM",
- "trying to map too large font buffer");
- return nullptr;
- }
font_mapped_buffer_.emplace(size, helper_, mapped_memory_.get());
if (!font_mapped_buffer_->valid()) {
@@ -1021,8 +963,8 @@ void* RasterImplementation::MapFontBuffer(size_t size) {
return font_mapped_buffer_->address();
}
-void RasterImplementation::UnmapRasterCHROMIUM(GLsizeiptr raster_written_size,
- GLsizeiptr total_written_size) {
+void RasterImplementation::UnmapRasterCHROMIUM(uint32_t raster_written_size,
+ uint32_t total_written_size) {
if (total_written_size < 0) {
SetGLError(GL_INVALID_VALUE, "glUnmapRasterCHROMIUM",
"negative written_size");
@@ -1040,9 +982,9 @@ void RasterImplementation::UnmapRasterCHROMIUM(GLsizeiptr raster_written_size,
}
raster_mapped_buffer_->Shrink(total_written_size);
- GLuint font_shm_id = 0u;
- GLuint font_shm_offset = 0u;
- GLsizeiptr font_shm_size = 0u;
+ uint32_t font_shm_id = 0u;
+ uint32_t font_shm_offset = 0u;
+ uint32_t font_shm_size = 0u;
if (font_mapped_buffer_) {
font_shm_id = font_mapped_buffer_->shm_id();
font_shm_offset = font_mapped_buffer_->offset();
@@ -1065,30 +1007,42 @@ void RasterImplementation::UnmapRasterCHROMIUM(GLsizeiptr raster_written_size,
// instead of having to edit some template or the code generator.
#include "gpu/command_buffer/client/raster_implementation_impl_autogen.h"
-GLuint RasterImplementation::CreateAndConsumeTexture(
- bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format,
- const GLbyte* mailbox) {
+void RasterImplementation::CopySubTexture(const gpu::Mailbox& source_mailbox,
+ const gpu::Mailbox& dest_mailbox,
+ GLenum dest_target,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCreateAndConsumeTexture("
- << use_buffer << ", "
- << static_cast<uint32_t>(buffer_usage) << ", "
- << static_cast<uint32_t>(format) << ", "
- << static_cast<const void*>(mailbox) << ")");
- GLuint client_id = texture_id_allocator_.AllocateID();
- helper_->CreateAndConsumeTextureINTERNALImmediate(
- client_id, use_buffer, buffer_usage, format, mailbox);
- GPU_CLIENT_LOG("returned " << client_id);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCopySubTexture("
+ << source_mailbox.ToDebugString() << ", "
+ << dest_mailbox.ToDebugString() << ", " << xoffset << ", "
+ << yoffset << ", " << x << ", " << y << ", " << width
+ << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopySubTexture", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopySubTexture", "height < 0");
+ return;
+ }
+ GLbyte mailboxes[sizeof(source_mailbox.name) * 2];
+ memcpy(mailboxes, source_mailbox.name, sizeof(source_mailbox.name));
+ memcpy(mailboxes + sizeof(source_mailbox.name), dest_mailbox.name,
+ sizeof(dest_mailbox.name));
+ helper_->CopySubTextureINTERNALImmediate(xoffset, yoffset, x, y, width,
+ height, mailboxes);
CheckGLError();
- return client_id;
}
void RasterImplementation::BeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint color_type,
const cc::RasterColorSpace& raster_color_space,
const GLbyte* mailbox) {
DCHECK(!raster_properties_);
@@ -1105,7 +1059,7 @@ void RasterImplementation::BeginRasterCHROMIUM(
raster_color_space.color_space_id);
helper_->BeginRasterCHROMIUMImmediate(
- sk_color, msaa_sample_count, can_use_lcd_text, color_type,
+ sk_color, msaa_sample_count, can_use_lcd_text,
raster_color_space.color_space_id, mailbox);
transfer_cache_serialize_helper.FlushEntries();
@@ -1138,9 +1092,8 @@ void RasterImplementation::RasterCHROMIUM(const cc::DisplayItemList* list,
return;
// TODO(enne): Tune these numbers
- // TODO(enne): Convert these types here and in transfer buffer to be size_t.
- static constexpr unsigned int kMinAlloc = 16 * 1024;
- unsigned int free_size = std::max(GetTransferBufferFreeSize(), kMinAlloc);
+ static constexpr uint32_t kMinAlloc = 16 * 1024;
+ uint32_t free_size = std::max(GetTransferBufferFreeSize(), kMinAlloc);
// This section duplicates RasterSource::PlaybackToCanvas setup preamble.
cc::PaintOpBufferSerializer::Preamble preamble;
@@ -1229,6 +1182,16 @@ void RasterImplementation::IssueImageDecodeCacheEntryCreation(
target_color_space, needs_mips);
}
+GLuint RasterImplementation::CreateAndConsumeForGpuRaster(
+ const GLbyte* mailbox) {
+ NOTREACHED();
+ return 0;
+}
+
+void RasterImplementation::DeleteGpuRasterTexture(GLuint texture) {
+ NOTREACHED();
+}
+
void RasterImplementation::BeginGpuRaster() {
NOTREACHED();
}
@@ -1287,8 +1250,14 @@ void RasterImplementation::SetActiveURLCHROMIUM(const char* url) {
cc::ClientPaintCache* RasterImplementation::GetOrCreatePaintCache() {
if (!paint_cache_) {
- constexpr size_t kPaintCacheBudget = 4 * 1024 * 1024;
- paint_cache_ = std::make_unique<cc::ClientPaintCache>(kPaintCacheBudget);
+ constexpr size_t kNormalPaintCacheBudget = 4 * 1024 * 1024;
+ constexpr size_t kLowEndPaintCacheBudget = 256 * 1024;
+ size_t paint_cache_budget = 0u;
+ if (base::SysInfo::IsLowEndDevice())
+ paint_cache_budget = kLowEndPaintCacheBudget;
+ else
+ paint_cache_budget = kNormalPaintCacheBudget;
+ paint_cache_ = std::make_unique<cc::ClientPaintCache>(paint_cache_budget);
}
return paint_cache_.get();
}
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.h b/chromium/gpu/command_buffer/client/raster_implementation.h
index bc2d3e35a1f..444379df337 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation.h
@@ -111,10 +111,19 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
#include "gpu/command_buffer/client/raster_implementation_autogen.h"
// RasterInterface implementation.
+ void CopySubTexture(const gpu::Mailbox& source_mailbox,
+ const gpu::Mailbox& dest_mailbox,
+ GLenum dest_target,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) override;
+
void BeginRasterCHROMIUM(GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint color_type,
const cc::RasterColorSpace& raster_color_space,
const GLbyte* mailbox) override;
void RasterCHROMIUM(const cc::DisplayItemList* list,
@@ -130,6 +139,8 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
uint32_t transfer_cache_entry_id,
const gfx::ColorSpace& target_color_space,
bool needs_mips) override;
+ GLuint CreateAndConsumeForGpuRaster(const GLbyte* mailbox) override;
+ void DeleteGpuRasterTexture(GLuint texture) override;
void BeginGpuRaster() override;
void EndGpuRaster() override;
@@ -164,7 +175,7 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
uint32_t texture_id) override;
bool ThreadsafeDiscardableTextureIsDeletedForTracing(
uint32_t texture_id) override;
- void* MapTransferCacheEntry(size_t serialized_size) override;
+ void* MapTransferCacheEntry(uint32_t serialized_size) override;
void UnmapAndCreateTransferCacheEntry(uint32_t type, uint32_t id) override;
bool ThreadsafeLockTransferCacheEntry(uint32_t type, uint32_t id) override;
void UnlockTransferCacheEntries(
@@ -178,9 +189,9 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
GLuint64* params);
// ClientFontManager::Client implementation.
- void* MapFontBuffer(size_t size) override;
+ void* MapFontBuffer(uint32_t size) override;
- void set_max_inlined_entry_size_for_testing(size_t max_size) {
+ void set_max_inlined_entry_size_for_testing(uint32_t max_size) {
max_inlined_entry_size_ = max_size;
}
@@ -236,8 +247,8 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
// |raster_written_size| is the size of buffer used by raster commands.
// |total_written_size| is the total size of the buffer written to, including
// any transfer cache entries inlined into the buffer.
- void UnmapRasterCHROMIUM(GLsizeiptr raster_written_size,
- GLsizeiptr total_written_size);
+ void UnmapRasterCHROMIUM(uint32_t raster_written_size,
+ uint32_t total_written_size);
// Returns the last error and clears it. Useful for debugging.
const std::string& GetLastError() { return last_error_; }
@@ -245,7 +256,6 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
void GenQueriesEXTHelper(GLsizei n, const GLuint* queries);
void DeleteTexturesHelper(GLsizei n, const GLuint* textures);
- void UnbindTexturesHelper(GLsizei n, const GLuint* textures);
void DeleteQueriesEXTHelper(GLsizei n, const GLuint* queries);
// IdAllocators for objects that can't be shared among contexts.
@@ -292,11 +302,6 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
gles2::DebugMarkerManager debug_marker_manager_;
std::string this_in_hex_;
- std::unique_ptr<TextureUnit[]> texture_units_;
-
- // 0 to capabilities_.max_combined_texture_image_units.
- GLuint active_texture_unit_;
-
// Current GL error bits.
uint32_t error_bits_;
@@ -319,7 +324,6 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
// whether it should aggressively free them.
bool aggressively_free_resources_;
- IdAllocator texture_id_allocator_;
IdAllocator query_id_allocator_;
ClientFontManager font_manager_;
@@ -343,7 +347,7 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
};
base::Optional<RasterProperties> raster_properties_;
- size_t max_inlined_entry_size_;
+ uint32_t max_inlined_entry_size_;
ClientTransferCache transfer_cache_;
std::string last_active_url_;
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_autogen.h b/chromium/gpu/command_buffer/client/raster_implementation_autogen.h
index a7e8767a8a6..d7268773a12 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation_autogen.h
@@ -13,8 +13,6 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_AUTOGEN_H_
-void DeleteTextures(GLsizei n, const GLuint* textures) override;
-
void Finish() override;
void Flush() override;
@@ -37,8 +35,6 @@ void GetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* params) override;
void LoseContextCHROMIUM(GLenum current, GLenum other) override;
-void GenSyncTokenCHROMIUM(GLbyte* sync_token) override;
-
void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) override;
void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) override;
@@ -49,20 +45,6 @@ GLenum GetGraphicsResetStatusKHR() override;
void EndRasterCHROMIUM() override;
-GLuint CreateAndConsumeTexture(bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format,
- const GLbyte* mailbox) override;
-
-void CopySubTexture(GLuint source_id,
- GLuint dest_id,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height) override;
-
void TraceBeginCHROMIUM(const char* category_name,
const char* trace_name) override;
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
index 5be94207915..4fd888e1edf 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
@@ -23,41 +23,15 @@
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/common/capabilities.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
+#include "gpu/command_buffer/common/mailbox.h"
#include "ui/gfx/geometry/rect_conversions.h"
#include "ui/gfx/skia_util.h"
namespace gpu {
namespace raster {
-static GLenum GetImageTextureTarget(const gpu::Capabilities& caps,
- gfx::BufferUsage usage,
- viz::ResourceFormat format) {
- gfx::BufferFormat buffer_format = viz::BufferFormat(format);
- return GetBufferTextureTarget(usage, buffer_format, caps);
-}
-
-RasterImplementationGLES::Texture::Texture(GLuint id,
- GLenum target,
- bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format)
- : id(id),
- target(target),
- use_buffer(use_buffer),
- buffer_usage(buffer_usage),
- format(format) {}
-
-RasterImplementationGLES::Texture* RasterImplementationGLES::GetTexture(
- GLuint texture_id) {
- auto it = texture_info_.find(texture_id);
- DCHECK(it != texture_info_.end()) << "Undefined texture id";
- return &it->second;
-}
-
-RasterImplementationGLES::RasterImplementationGLES(
- gles2::GLES2Interface* gl,
- const gpu::Capabilities& caps)
- : gl_(gl), caps_(caps) {}
+RasterImplementationGLES::RasterImplementationGLES(gles2::GLES2Interface* gl)
+ : gl_(gl) {}
RasterImplementationGLES::~RasterImplementationGLES() {}
@@ -77,10 +51,6 @@ void RasterImplementationGLES::OrderingBarrierCHROMIUM() {
gl_->OrderingBarrierCHROMIUM();
}
-void RasterImplementationGLES::GenSyncTokenCHROMIUM(GLbyte* sync_token) {
- gl_->GenSyncTokenCHROMIUM(sync_token);
-}
-
void RasterImplementationGLES::GenUnverifiedSyncTokenCHROMIUM(
GLbyte* sync_token) {
gl_->GenUnverifiedSyncTokenCHROMIUM(sync_token);
@@ -131,59 +101,33 @@ void RasterImplementationGLES::GetQueryObjectuivEXT(GLuint id,
gl_->GetQueryObjectuivEXT(id, pname, params);
}
-void RasterImplementationGLES::DeleteTextures(GLsizei n,
- const GLuint* textures) {
- DCHECK_GT(n, 0);
- for (GLsizei i = 0; i < n; i++) {
- auto texture_iter = texture_info_.find(textures[i]);
- DCHECK(texture_iter != texture_info_.end());
-
- texture_info_.erase(texture_iter);
- }
-
- gl_->DeleteTextures(n, textures);
-}
-
-GLuint RasterImplementationGLES::CreateAndConsumeTexture(
- bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format,
- const GLbyte* mailbox) {
- GLuint texture_id = gl_->CreateAndConsumeTextureCHROMIUM(mailbox);
- DCHECK(texture_id);
- DCHECK(!viz::IsResourceFormatCompressed(format));
-
- GLenum target = use_buffer
- ? GetImageTextureTarget(caps_, buffer_usage, format)
- : GL_TEXTURE_2D;
- texture_info_.emplace(std::make_pair(
- texture_id,
- Texture(texture_id, target, use_buffer, buffer_usage, format)));
-
- return texture_id;
-}
-
-void RasterImplementationGLES::CopySubTexture(GLuint source_id,
- GLuint dest_id,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height) {
- Texture* source = GetTexture(source_id);
- Texture* dest = GetTexture(dest_id);
-
- gl_->CopySubTextureCHROMIUM(source->id, 0, dest->target, dest->id, 0, xoffset,
- yoffset, x, y, width, height, false, false,
- false);
+void RasterImplementationGLES::CopySubTexture(
+ const gpu::Mailbox& source_mailbox,
+ const gpu::Mailbox& dest_mailbox,
+ GLenum dest_target,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GLuint texture_ids[2] = {
+ gl_->CreateAndConsumeTextureCHROMIUM(source_mailbox.name),
+ gl_->CreateAndConsumeTextureCHROMIUM(dest_mailbox.name),
+ };
+ DCHECK(texture_ids[0]);
+ DCHECK(texture_ids[1]);
+
+ gl_->CopySubTextureCHROMIUM(texture_ids[0], 0, dest_target, texture_ids[1], 0,
+ xoffset, yoffset, x, y, width, height, false,
+ false, false);
+ gl_->DeleteTextures(2, texture_ids);
}
void RasterImplementationGLES::BeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint color_type,
const cc::RasterColorSpace& raster_color_space,
const GLbyte* mailbox) {
NOTREACHED();
@@ -219,6 +163,15 @@ SyncToken RasterImplementationGLES::ScheduleImageDecode(
return SyncToken();
}
+GLuint RasterImplementationGLES::CreateAndConsumeForGpuRaster(
+ const GLbyte* mailbox) {
+ return gl_->CreateAndConsumeTextureCHROMIUM(mailbox);
+}
+
+void RasterImplementationGLES::DeleteGpuRasterTexture(GLuint texture) {
+ gl_->DeleteTextures(1, &texture);
+}
+
void RasterImplementationGLES::BeginGpuRaster() {
// Using push/pop functions directly incurs cost to evaluate function
// arguments even when tracing is disabled.
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.h b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
index e4573cde019..2de8b7f99d2 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
@@ -21,13 +21,10 @@
namespace gpu {
namespace raster {
-struct Capabilities;
-
// An implementation of RasterInterface on top of GLES2Interface.
class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
public:
- RasterImplementationGLES(gles2::GLES2Interface* gl,
- const gpu::Capabilities& caps);
+ explicit RasterImplementationGLES(gles2::GLES2Interface* gl);
~RasterImplementationGLES() override;
// Command buffer Flush / Finish.
@@ -37,7 +34,6 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
void OrderingBarrierCHROMIUM() override;
// SyncTokens.
- void GenSyncTokenCHROMIUM(GLbyte* sync_token) override;
void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) override;
void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) override;
void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) override;
@@ -54,18 +50,10 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
void EndQueryEXT(GLenum target) override;
void GetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* params) override;
- // Texture objects.
- void DeleteTextures(GLsizei n, const GLuint* textures) override;
-
- // Mailboxes.
- GLuint CreateAndConsumeTexture(bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format,
- const GLbyte* mailbox) override;
-
// Texture copying.
- void CopySubTexture(GLuint source_id,
- GLuint dest_id,
+ void CopySubTexture(const gpu::Mailbox& source_mailbox,
+ const gpu::Mailbox& dest_mailbox,
+ GLenum dest_target,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -77,7 +65,6 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
void BeginRasterCHROMIUM(GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint color_type,
const cc::RasterColorSpace& raster_color_space,
const GLbyte* mailbox) override;
void RasterCHROMIUM(const cc::DisplayItemList* list,
@@ -98,6 +85,8 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
bool needs_mips) override;
// Raster via GrContext.
+ GLuint CreateAndConsumeForGpuRaster(const GLbyte* mailbox) override;
+ void DeleteGpuRasterTexture(GLuint texture) override;
void BeginGpuRaster() override;
void EndGpuRaster() override;
@@ -108,25 +97,7 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
void SetActiveURLCHROMIUM(const char* url) override;
private:
- struct Texture {
- Texture(GLuint id,
- GLenum target,
- bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format);
- GLuint id;
- GLenum target;
- bool use_buffer;
- gfx::BufferUsage buffer_usage;
- viz::ResourceFormat format;
- };
-
- Texture* GetTexture(GLuint texture_id);
-
gles2::GLES2Interface* gl_;
- gpu::Capabilities caps_;
-
- std::unordered_map<GLuint, Texture> texture_info_;
DISALLOW_COPY_AND_ASSIGN(RasterImplementationGLES);
};
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
index 4737c2f1757..2a6964a0d5b 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
@@ -15,6 +15,7 @@
#include "base/containers/flat_map.h"
#include "cc/paint/color_space_transfer_cache_entry.h"
#include "cc/paint/display_item_list.h"
+#include "cc/paint/image_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/client_test_helper.h"
@@ -206,7 +207,7 @@ class ContextSupportStub : public ContextSupport {
uint32_t texture_id) override {
return false;
}
- void* MapTransferCacheEntry(size_t serialized_size) override {
+ void* MapTransferCacheEntry(uint32_t serialized_size) override {
mapped_transfer_cache_entry_.reset(new char[serialized_size]);
return mapped_transfer_cache_entry_.get();
}
@@ -244,16 +245,11 @@ class RasterImplementationGLESTest : public testing::Test {
void SetUp() override {
gl_ = std::make_unique<RasterMockGLES2Interface>();
- ri_ = std::make_unique<RasterImplementationGLES>(gl_.get(),
- gpu::Capabilities());
+ ri_ = std::make_unique<RasterImplementationGLES>(gl_.get());
}
void TearDown() override {}
- void SetUpWithCapabilities(const gpu::Capabilities& capabilities) {
- ri_.reset(new RasterImplementationGLES(gl_.get(), capabilities));
- }
-
void ExpectBindTexture(GLenum target, GLuint texture_id) {
if (bound_texture_ != texture_id) {
bound_texture_ = texture_id;
@@ -288,13 +284,6 @@ TEST_F(RasterImplementationGLESTest, OrderingBarrierCHROMIUM) {
ri_->OrderingBarrierCHROMIUM();
}
-TEST_F(RasterImplementationGLESTest, GenSyncTokenCHROMIUM) {
- GLbyte sync_token_data[GL_SYNC_TOKEN_SIZE_CHROMIUM] = {};
-
- EXPECT_CALL(*gl_, GenSyncTokenCHROMIUM(sync_token_data)).Times(1);
- ri_->GenSyncTokenCHROMIUM(sync_token_data);
-}
-
TEST_F(RasterImplementationGLESTest, GenUnverifiedSyncTokenCHROMIUM) {
GLbyte sync_token_data[GL_SYNC_TOKEN_SIZE_CHROMIUM] = {};
@@ -385,36 +374,28 @@ TEST_F(RasterImplementationGLESTest, GetQueryObjectuivEXT) {
ri_->GetQueryObjectuivEXT(kQueryId, kQueryParam, &result);
}
-TEST_F(RasterImplementationGLESTest, DeleteTextures) {
- const GLsizei kNumTextures = 2;
- GLuint texture_ids[kNumTextures] = {2, 3};
+TEST_F(RasterImplementationGLESTest, DeleteGpuRasterTexture) {
+ GLuint texture_id = 3;
gpu::Mailbox mailbox;
EXPECT_CALL(*gl_, CreateAndConsumeTextureCHROMIUM(mailbox.name))
- .WillOnce(Return(texture_ids[0]))
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, CreateAndConsumeTextureCHROMIUM(mailbox.name))
- .WillOnce(Return(texture_ids[1]))
+ .WillOnce(Return(texture_id))
.RetiresOnSaturation();
- ri_->CreateAndConsumeTexture(false, gfx::BufferUsage::GPU_READ,
- viz::RGBA_8888, mailbox.name);
- ri_->CreateAndConsumeTexture(false, gfx::BufferUsage::GPU_READ,
- viz::RGBA_8888, mailbox.name);
+ EXPECT_EQ(texture_id, ri_->CreateAndConsumeForGpuRaster(mailbox.name));
- EXPECT_CALL(*gl_, DeleteTextures(kNumTextures, texture_ids)).Times(1);
- ri_->DeleteTextures(kNumTextures, texture_ids);
+ EXPECT_CALL(*gl_, DeleteTextures(1, _)).Times(1);
+ ri_->DeleteGpuRasterTexture(texture_id);
}
-TEST_F(RasterImplementationGLESTest, CreateAndConsumeTexture) {
+TEST_F(RasterImplementationGLESTest, CreateAndConsumeForGpuRaster) {
const GLuint kTextureId = 23;
GLuint texture_id = 0;
gpu::Mailbox mailbox;
EXPECT_CALL(*gl_, CreateAndConsumeTextureCHROMIUM(mailbox.name))
.WillOnce(Return(kTextureId));
- texture_id = ri_->CreateAndConsumeTexture(false, gfx::BufferUsage::GPU_READ,
- viz::RGBA_8888, mailbox.name);
+ texture_id = ri_->CreateAndConsumeForGpuRaster(mailbox.name);
EXPECT_EQ(kTextureId, texture_id);
}
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/raster_implementation_impl_autogen.h
index 766205dcd49..e581ccf9af0 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation_impl_autogen.h
@@ -13,28 +13,6 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_IMPL_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_IMPL_AUTOGEN_H_
-void RasterImplementation::DeleteTextures(GLsizei n, const GLuint* textures) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteTextures(" << n << ", "
- << static_cast<const void*>(textures) << ")");
- GPU_CLIENT_LOG_CODE_BLOCK({
- for (GLsizei i = 0; i < n; ++i) {
- GPU_CLIENT_LOG(" " << i << ": " << textures[i]);
- }
- });
- GPU_CLIENT_DCHECK_CODE_BLOCK({
- for (GLsizei i = 0; i < n; ++i) {
- DCHECK(textures[i] != 0);
- }
- });
- if (n < 0) {
- SetGLError(GL_INVALID_VALUE, "glDeleteTextures", "n < 0");
- return;
- }
- DeleteTexturesHelper(n, textures);
- CheckGLError();
-}
-
void RasterImplementation::GenQueriesEXT(GLsizei n, GLuint* queries) {
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenQueriesEXT(" << n << ", "
<< static_cast<const void*>(queries) << ")");
@@ -87,30 +65,4 @@ void RasterImplementation::LoseContextCHROMIUM(GLenum current, GLenum other) {
CheckGLError();
}
-void RasterImplementation::CopySubTexture(GLuint source_id,
- GLuint dest_id,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCopySubTexture(" << source_id
- << ", " << dest_id << ", " << xoffset << ", " << yoffset
- << ", " << x << ", " << y << ", " << width << ", "
- << height << ")");
- if (width < 0) {
- SetGLError(GL_INVALID_VALUE, "glCopySubTexture", "width < 0");
- return;
- }
- if (height < 0) {
- SetGLError(GL_INVALID_VALUE, "glCopySubTexture", "height < 0");
- return;
- }
- helper_->CopySubTexture(source_id, dest_id, xoffset, yoffset, x, y, width,
- height);
- CheckGLError();
-}
-
#endif // GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
index e99f54495a5..d78df6c45af 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
@@ -15,6 +15,7 @@
#include <memory>
#include "base/compiler_specific.h"
+#include "base/stl_util.h"
#include "cc/paint/raw_memory_transfer_cache_entry.h"
#include "cc/paint/transfer_cache_serialize_helper.h"
#include "gpu/command_buffer/client/client_test_helper.h"
@@ -69,10 +70,10 @@ class SizedResultHelper {
class RasterImplementationTest : public testing::Test {
protected:
static const uint8_t kInitialValue = 0xBD;
- static const int32_t kNumCommandEntries = 500;
- static const int32_t kCommandBufferSizeBytes =
+ static const uint32_t kNumCommandEntries = 500;
+ static const uint32_t kCommandBufferSizeBytes =
kNumCommandEntries * sizeof(CommandBufferEntry);
- static const size_t kTransferBufferSize = 512;
+ static const uint32_t kTransferBufferSize = 512;
static const GLint kMaxCombinedTextureImageUnits = 8;
static const GLint kMaxTextureImageUnits = 8;
@@ -254,7 +255,7 @@ class RasterImplementationTest : public testing::Test {
memset(ring_buffer->memory(), kInitialValue, ring_buffer->size());
}
- size_t MaxTransferBufferSize() {
+ uint32_t MaxTransferBufferSize() {
return transfer_buffer_->MaxTransferBufferSize();
}
@@ -262,15 +263,15 @@ class RasterImplementationTest : public testing::Test {
gl_->mapped_memory_->set_max_allocated_bytes(limit);
}
- ExpectedMemoryInfo GetExpectedMemory(size_t size) {
+ ExpectedMemoryInfo GetExpectedMemory(uint32_t size) {
return transfer_buffer_->GetExpectedMemory(size);
}
- ExpectedMemoryInfo GetExpectedResultMemory(size_t size) {
+ ExpectedMemoryInfo GetExpectedResultMemory(uint32_t size) {
return transfer_buffer_->GetExpectedResultMemory(size);
}
- ExpectedMemoryInfo GetExpectedMappedMemory(size_t size) {
+ ExpectedMemoryInfo GetExpectedMappedMemory(uint32_t size) {
ExpectedMemoryInfo mem;
// Temporarily allocate memory and expect that memory block to be reused.
@@ -332,9 +333,9 @@ class RasterImplementationManualInitTest : public RasterImplementationTest {
// GCC requires these declarations, but MSVC requires they not be present
#ifndef _MSC_VER
const uint8_t RasterImplementationTest::kInitialValue;
-const int32_t RasterImplementationTest::kNumCommandEntries;
-const int32_t RasterImplementationTest::kCommandBufferSizeBytes;
-const size_t RasterImplementationTest::kTransferBufferSize;
+const uint32_t RasterImplementationTest::kNumCommandEntries;
+const uint32_t RasterImplementationTest::kCommandBufferSizeBytes;
+const uint32_t RasterImplementationTest::kTransferBufferSize;
const GLint RasterImplementationTest::kMaxCombinedTextureImageUnits;
const GLint RasterImplementationTest::kMaxTextureImageUnits;
const GLint RasterImplementationTest::kMaxTextureSize;
@@ -404,11 +405,11 @@ TEST_F(RasterImplementationTest, BeginEndQueryEXT) {
GLuint data[2];
};
GenCmds expected_gen_cmds;
- expected_gen_cmds.gen.Init(arraysize(expected_ids), &expected_ids[0]);
- GLuint ids[arraysize(expected_ids)] = {
+ expected_gen_cmds.gen.Init(base::size(expected_ids), &expected_ids[0]);
+ GLuint ids[base::size(expected_ids)] = {
0,
};
- gl_->GenQueriesEXT(arraysize(expected_ids), &ids[0]);
+ gl_->GenQueriesEXT(base::size(expected_ids), &ids[0]);
EXPECT_EQ(0,
memcmp(&expected_gen_cmds, commands_, sizeof(expected_gen_cmds)));
GLuint id1 = ids[0];
@@ -511,39 +512,6 @@ TEST_F(RasterImplementationManualInitTest, BadQueryTargets) {
EXPECT_EQ(nullptr, GetQuery(id));
}
-TEST_F(RasterImplementationTest, GenSyncTokenCHROMIUM) {
- const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
- const CommandBufferId kCommandBufferId =
- CommandBufferId::FromUnsafeValue(234u);
- const GLuint64 kFenceSync = 123u;
- SyncToken sync_token;
-
- EXPECT_CALL(*gpu_control_, GetNamespaceID())
- .WillRepeatedly(Return(kNamespaceId));
- EXPECT_CALL(*gpu_control_, GetCommandBufferID())
- .WillRepeatedly(Return(kCommandBufferId));
-
- gl_->GenSyncTokenCHROMIUM(nullptr);
- EXPECT_TRUE(NoCommandsWritten());
- EXPECT_EQ(GL_INVALID_VALUE, CheckError());
-
- const void* commands = GetPut();
- cmds::InsertFenceSyncCHROMIUM insert_fence_sync;
- insert_fence_sync.Init(kFenceSync);
-
- EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
- .WillOnce(Return(kFenceSync));
- EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
- gl_->GenSyncTokenCHROMIUM(sync_token.GetData());
- EXPECT_EQ(0, memcmp(&insert_fence_sync, commands, sizeof(insert_fence_sync)));
- EXPECT_EQ(GL_NO_ERROR, CheckError());
-
- EXPECT_TRUE(sync_token.verified_flush());
- EXPECT_EQ(kNamespaceId, sync_token.namespace_id());
- EXPECT_EQ(kCommandBufferId, sync_token.command_buffer_id());
- EXPECT_EQ(kFenceSync, sync_token.release_count());
-}
-
TEST_F(RasterImplementationTest, GenUnverifiedSyncTokenCHROMIUM) {
const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
const CommandBufferId kCommandBufferId =
@@ -613,7 +581,7 @@ TEST_F(RasterImplementationTest, VerifySyncTokensCHROMIUM) {
EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token))
.WillOnce(Return(true));
EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
- gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, base::size(sync_token_datas));
EXPECT_TRUE(NoCommandsWritten());
EXPECT_EQ(GL_NO_ERROR, CheckError());
@@ -670,7 +638,7 @@ TEST_F(RasterImplementationTest, VerifySyncTokensCHROMIUM_Sequence) {
.InSequence(sequence)
.WillOnce(Return(true));
EXPECT_CALL(*gpu_control_, EnsureWorkVisible()).InSequence(sequence);
- gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, base::size(sync_token_datas));
EXPECT_EQ(GL_NO_ERROR, CheckError());
EXPECT_TRUE(sync_token1.verified_flush());
@@ -693,7 +661,7 @@ TEST_F(RasterImplementationTest, VerifySyncTokensCHROMIUM_EmptySyncToken) {
// Ensure proper sequence of checking and validating.
EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(_)).Times(0);
EXPECT_CALL(*gpu_control_, EnsureWorkVisible()).Times(0);
- gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, base::size(sync_token_datas));
EXPECT_TRUE(NoCommandsWritten());
EXPECT_EQ(GL_NO_ERROR, CheckError());
@@ -712,22 +680,22 @@ TEST_F(RasterImplementationTest, WaitSyncTokenCHROMIUM) {
struct Cmds {
cmds::InsertFenceSyncCHROMIUM insert_fence_sync;
- cmds::WaitSyncTokenCHROMIUM wait_sync_token;
};
Cmds expected;
expected.insert_fence_sync.Init(kFenceSync);
- expected.wait_sync_token.Init(kNamespaceId, kCommandBufferId.GetUnsafeValue(),
- kFenceSync);
EXPECT_CALL(*gpu_control_, GetNamespaceID()).WillOnce(Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillOnce(Return(kCommandBufferId));
EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
.WillOnce(Return(kFenceSync));
- EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
- gl_->GenSyncTokenCHROMIUM(sync_token_data);
+ gl_->GenUnverifiedSyncTokenCHROMIUM(sync_token_data);
- EXPECT_CALL(*gpu_control_, WaitSyncTokenHint(sync_token));
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token))
+ .WillOnce(Return(true));
+ gpu::SyncToken verified_sync_token = sync_token;
+ verified_sync_token.SetVerifyFlush();
+ EXPECT_CALL(*gpu_control_, WaitSyncToken(verified_sync_token));
gl_->WaitSyncTokenCHROMIUM(sync_token_data);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -779,9 +747,8 @@ TEST_F(RasterImplementationTest, SignalSyncToken) {
EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
.WillOnce(Return(kFenceSync));
- EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
gpu::SyncToken sync_token;
- gl_->GenSyncTokenCHROMIUM(sync_token.GetData());
+ gl_->GenUnverifiedSyncTokenCHROMIUM(sync_token.GetData());
int signaled_count = 0;
@@ -793,6 +760,8 @@ TEST_F(RasterImplementationTest, SignalSyncToken) {
base::OnceClosure* callback) {
signal_closure = std::move(*callback);
}));
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token))
+ .WillOnce(Return(true));
gl_->SignalSyncToken(sync_token,
base::BindOnce(&CountCallback, &signaled_count));
EXPECT_EQ(0, signaled_count);
@@ -813,9 +782,8 @@ TEST_F(RasterImplementationTest, SignalSyncTokenAfterContextLoss) {
.WillOnce(Return(kCommandBufferId));
EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
.WillOnce(Return(kFenceSync));
- EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
gpu::SyncToken sync_token;
- gl_->GenSyncTokenCHROMIUM(sync_token.GetData());
+ gl_->GenUnverifiedSyncTokenCHROMIUM(sync_token.GetData());
int signaled_count = 0;
@@ -827,6 +795,8 @@ TEST_F(RasterImplementationTest, SignalSyncTokenAfterContextLoss) {
base::OnceClosure* callback) {
signal_closure = std::move(*callback);
}));
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token))
+ .WillOnce(Return(true));
gl_->SignalSyncToken(sync_token,
base::BindOnce(&CountCallback, &signaled_count));
EXPECT_EQ(0, signaled_count);
@@ -878,7 +848,7 @@ TEST_F(RasterImplementationManualInitTest, FailInitOnTransferBufferFail) {
TEST_F(RasterImplementationTest, TransferCacheSerialization) {
gl_->set_max_inlined_entry_size_for_testing(768u);
- size_t buffer_size = transfer_buffer_->MaxTransferBufferSize();
+ uint32_t buffer_size = transfer_buffer_->MaxTransferBufferSize();
ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_);
ASSERT_EQ(buffer.size(), buffer_size);
@@ -902,7 +872,7 @@ TEST_F(RasterImplementationTest, TransferCacheSerialization) {
TEST_F(RasterImplementationTest, SetActiveURLCHROMIUM) {
const uint32_t kURLBucketId = RasterImplementation::kResultBucketId;
const std::string url = "chrome://test";
- const size_t kPaddedStringSize =
+ const uint32_t kPaddedStringSize =
transfer_buffer_->RoundToAlignment(url.size());
gl_->SetActiveURLCHROMIUM(url.c_str());
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/raster_implementation_unittest_autogen.h
index a9a82a3f010..bd95a044ab3 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation_unittest_autogen.h
@@ -13,20 +13,6 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
-TEST_F(RasterImplementationTest, DeleteTextures) {
- GLuint ids[2] = {kTexturesStartId, kTexturesStartId + 1};
- struct Cmds {
- cmds::DeleteTexturesImmediate del;
- GLuint data[2];
- };
- Cmds expected;
- expected.del.Init(base::size(ids), &ids[0]);
- expected.data[0] = kTexturesStartId;
- expected.data[1] = kTexturesStartId + 1;
- gl_->DeleteTextures(base::size(ids), &ids[0]);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
TEST_F(RasterImplementationTest, Flush) {
struct Cmds {
cmds::Flush cmd;
@@ -81,15 +67,4 @@ TEST_F(RasterImplementationTest, LoseContextCHROMIUM) {
GL_GUILTY_CONTEXT_RESET_ARB);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
-
-TEST_F(RasterImplementationTest, CopySubTexture) {
- struct Cmds {
- cmds::CopySubTexture cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, 2, 3, 4, 5, 6, 7, 8);
-
- gl_->CopySubTexture(1, 2, 3, 4, 5, 6, 7, 8);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
#endif // GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/raster_interface.h b/chromium/gpu/command_buffer/client/raster_interface.h
index 8b80e5c7d75..7373ad4d91a 100644
--- a/chromium/gpu/command_buffer/client/raster_interface.h
+++ b/chromium/gpu/command_buffer/client/raster_interface.h
@@ -29,6 +29,9 @@ extern "C" typedef struct _ClientBuffer* ClientBuffer;
extern "C" typedef struct _GLColorSpace* GLColorSpace;
namespace gpu {
+
+struct Mailbox;
+
namespace raster {
enum RasterTexStorageFlags { kNone = 0, kOverlay = (1 << 0) };
@@ -38,12 +41,20 @@ class RasterInterface {
RasterInterface() {}
virtual ~RasterInterface() {}
+ virtual void CopySubTexture(const gpu::Mailbox& source_mailbox,
+ const gpu::Mailbox& dest_mailbox,
+ GLenum dest_target,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) = 0;
// OOP-Raster
virtual void BeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint pixel_config,
const cc::RasterColorSpace& raster_color_space,
const GLbyte* mailbox) = 0;
virtual void RasterCHROMIUM(const cc::DisplayItemList* list,
@@ -66,6 +77,8 @@ class RasterInterface {
bool needs_mips) = 0;
// Raster via GrContext.
+ virtual GLuint CreateAndConsumeForGpuRaster(const GLbyte* mailbox) = 0;
+ virtual void DeleteGpuRasterTexture(GLuint texture) = 0;
virtual void BeginGpuRaster() = 0;
virtual void EndGpuRaster() = 0;
diff --git a/chromium/gpu/command_buffer/client/raster_interface_autogen.h b/chromium/gpu/command_buffer/client/raster_interface_autogen.h
index 3f1b71310f4..59fd8e7c712 100644
--- a/chromium/gpu/command_buffer/client/raster_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/raster_interface_autogen.h
@@ -13,7 +13,6 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_INTERFACE_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_RASTER_INTERFACE_AUTOGEN_H_
-virtual void DeleteTextures(GLsizei n, const GLuint* textures) = 0;
virtual void Finish() = 0;
virtual void Flush() = 0;
virtual GLenum GetError() = 0;
@@ -25,24 +24,11 @@ virtual void BeginQueryEXT(GLenum target, GLuint id) = 0;
virtual void EndQueryEXT(GLenum target) = 0;
virtual void GetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* params) = 0;
virtual void LoseContextCHROMIUM(GLenum current, GLenum other) = 0;
-virtual void GenSyncTokenCHROMIUM(GLbyte* sync_token) = 0;
virtual void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) = 0;
virtual void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) = 0;
virtual void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) = 0;
virtual GLenum GetGraphicsResetStatusKHR() = 0;
virtual void EndRasterCHROMIUM() = 0;
-virtual GLuint CreateAndConsumeTexture(bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format,
- const GLbyte* mailbox) = 0;
-virtual void CopySubTexture(GLuint source_id,
- GLuint dest_id,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height) = 0;
virtual void TraceBeginCHROMIUM(const char* category_name,
const char* trace_name) = 0;
virtual void TraceEndCHROMIUM() = 0;
diff --git a/chromium/gpu/command_buffer/client/ring_buffer.cc b/chromium/gpu/command_buffer/client/ring_buffer.cc
index 1b080ec5fc1..00670464802 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer.cc
+++ b/chromium/gpu/command_buffer/client/ring_buffer.cc
@@ -16,14 +16,14 @@
namespace gpu {
-RingBuffer::RingBuffer(unsigned int alignment,
+RingBuffer::RingBuffer(uint32_t alignment,
Offset base_offset,
- size_t size,
+ uint32_t size,
CommandBufferHelper* helper,
void* base)
: helper_(helper),
base_offset_(base_offset),
- size_(base::checked_cast<unsigned int>(size)),
+ size_(size),
alignment_(alignment),
base_(static_cast<int8_t*>(base) - base_offset) {}
@@ -53,7 +53,7 @@ void RingBuffer::FreeOldestBlock() {
blocks_.pop_front();
}
-void* RingBuffer::Alloc(unsigned int size) {
+void* RingBuffer::Alloc(uint32_t size) {
DCHECK_LE(size, size_) << "attempt to allocate more than maximum memory";
// Similarly to malloc, an allocation of 0 allocates at least 1 byte, to
// return different pointers every time.
@@ -87,8 +87,7 @@ void* RingBuffer::Alloc(unsigned int size) {
return GetPointer(offset + base_offset_);
}
-void RingBuffer::FreePendingToken(void* pointer,
- unsigned int token) {
+void RingBuffer::FreePendingToken(void* pointer, uint32_t token) {
Offset offset = GetOffset(pointer);
offset -= base_offset_;
DCHECK(!blocks_.empty()) << "no allocations to free";
@@ -150,13 +149,13 @@ void RingBuffer::DiscardBlock(void* pointer) {
NOTREACHED() << "attempt to discard non-existant block";
}
-unsigned int RingBuffer::GetLargestFreeSizeNoWaiting() {
- unsigned int size = GetLargestFreeSizeNoWaitingInternal();
+uint32_t RingBuffer::GetLargestFreeSizeNoWaiting() {
+ uint32_t size = GetLargestFreeSizeNoWaitingInternal();
DCHECK_EQ(size, RoundToAlignment(size));
return size;
}
-unsigned int RingBuffer::GetLargestFreeSizeNoWaitingInternal() {
+uint32_t RingBuffer::GetLargestFreeSizeNoWaitingInternal() {
while (!blocks_.empty()) {
Block& block = blocks_.front();
if (!helper_->HasTokenPassed(block.token) || block.state == IN_USE) break;
@@ -180,8 +179,8 @@ unsigned int RingBuffer::GetLargestFreeSizeNoWaitingInternal() {
}
}
-unsigned int RingBuffer::GetTotalFreeSizeNoWaiting() {
- unsigned int largest_free_size = GetLargestFreeSizeNoWaitingInternal();
+uint32_t RingBuffer::GetTotalFreeSizeNoWaiting() {
+ uint32_t largest_free_size = GetLargestFreeSizeNoWaitingInternal();
if (free_offset_ > in_use_offset_) {
// It's free from free_offset_ to size_ and from 0 to in_use_offset_.
return size_ - free_offset_ + in_use_offset_;
@@ -190,7 +189,7 @@ unsigned int RingBuffer::GetTotalFreeSizeNoWaiting() {
}
}
-void RingBuffer::ShrinkLastBlock(unsigned int new_size) {
+void RingBuffer::ShrinkLastBlock(uint32_t new_size) {
if (blocks_.empty())
return;
auto& block = blocks_.back();
diff --git a/chromium/gpu/command_buffer/client/ring_buffer.h b/chromium/gpu/command_buffer/client/ring_buffer.h
index 3eac7f062c5..f0260979f33 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer.h
+++ b/chromium/gpu/command_buffer/client/ring_buffer.h
@@ -22,7 +22,7 @@ class CommandBufferHelper;
// allocations must not be kept past new allocations.
class GPU_EXPORT RingBuffer {
public:
- typedef unsigned int Offset;
+ typedef uint32_t Offset;
// Creates a RingBuffer.
// Parameters:
@@ -31,9 +31,9 @@ class GPU_EXPORT RingBuffer {
// size: The size of the buffer in bytes.
// helper: A CommandBufferHelper for dealing with tokens.
// base: The physical address that corresponds to base_offset.
- RingBuffer(unsigned int alignment,
+ RingBuffer(uint32_t alignment,
Offset base_offset,
- size_t size,
+ uint32_t size,
CommandBufferHelper* helper,
void* base);
@@ -51,7 +51,7 @@ class GPU_EXPORT RingBuffer {
//
// Returns:
// the pointer to the allocated memory block.
- void* Alloc(unsigned int size);
+ void* Alloc(uint32_t size);
// Frees a block of memory, pending the passage of a token. That memory won't
// be re-allocated until the token has passed through the command stream.
@@ -61,7 +61,7 @@ class GPU_EXPORT RingBuffer {
// Parameters:
// pointer: the pointer to the memory block to free.
// token: the token value to wait for before re-using the memory.
- void FreePendingToken(void* pointer, unsigned int token);
+ void FreePendingToken(void* pointer, uint32_t token);
// Discards a block within the ring buffer.
//
@@ -70,15 +70,15 @@ class GPU_EXPORT RingBuffer {
void DiscardBlock(void* pointer);
// Gets the size of the largest free block that is available without waiting.
- unsigned int GetLargestFreeSizeNoWaiting();
+ uint32_t GetLargestFreeSizeNoWaiting();
// Gets the total size of all free blocks that are available without waiting.
- unsigned int GetTotalFreeSizeNoWaiting();
+ uint32_t GetTotalFreeSizeNoWaiting();
// Gets the size of the largest free block that can be allocated if the
// caller can wait. Allocating a block of this size will succeed, but may
// block.
- unsigned int GetLargestFreeOrPendingSize() {
+ uint32_t GetLargestFreeOrPendingSize() {
// If size_ is not a multiple of alignment_, then trying to allocate it will
// cause us to try to allocate more than we actually can due to rounding up.
// So, round down here.
@@ -86,9 +86,9 @@ class GPU_EXPORT RingBuffer {
}
// Total size minus usable size.
- unsigned int GetUsedSize() { return size_ - GetLargestFreeSizeNoWaiting(); }
+ uint32_t GetUsedSize() { return size_ - GetLargestFreeSizeNoWaiting(); }
- unsigned int NumUsedBlocks() const { return num_used_blocks_; }
+ uint32_t NumUsedBlocks() const { return num_used_blocks_; }
// Gets a pointer to a memory block given the base memory and the offset.
void* GetPointer(RingBuffer::Offset offset) const {
@@ -101,13 +101,13 @@ class GPU_EXPORT RingBuffer {
}
// Rounds the given size to the alignment in use.
- unsigned int RoundToAlignment(unsigned int size) {
+ uint32_t RoundToAlignment(uint32_t size) {
return (size + alignment_ - 1) & ~(alignment_ - 1);
}
// Shrinks the last block. new_size must be smaller than the current size
// and the block must still be in use in order to shrink.
- void ShrinkLastBlock(unsigned int new_size);
+ void ShrinkLastBlock(uint32_t new_size);
private:
enum State {
@@ -117,23 +117,19 @@ class GPU_EXPORT RingBuffer {
};
// Book-keeping sturcture that describes a block of memory.
struct Block {
- Block(Offset _offset, unsigned int _size, State _state)
- : offset(_offset),
- size(_size),
- token(0),
- state(_state) {
- }
+ Block(Offset _offset, uint32_t _size, State _state)
+ : offset(_offset), size(_size), token(0), state(_state) {}
Offset offset;
- unsigned int size;
- unsigned int token; // token to wait for.
+ uint32_t size;
+ uint32_t token; // token to wait for.
State state;
};
using Container = base::circular_deque<Block>;
- using BlockIndex = unsigned int;
+ using BlockIndex = uint32_t;
void FreeOldestBlock();
- unsigned int GetLargestFreeSizeNoWaitingInternal();
+ uint32_t GetLargestFreeSizeNoWaitingInternal();
CommandBufferHelper* helper_;
@@ -154,10 +150,10 @@ class GPU_EXPORT RingBuffer {
Offset in_use_offset_ = 0;
// Alignment for allocations.
- unsigned int alignment_;
+ uint32_t alignment_;
// Number of blocks in |blocks_| that are in the IN_USE state.
- unsigned int num_used_blocks_ = 0;
+ uint32_t num_used_blocks_ = 0;
// The physical address that corresponds to base_offset.
void* base_;
diff --git a/chromium/gpu/command_buffer/client/shared_image_interface.h b/chromium/gpu/command_buffer/client/shared_image_interface.h
index 32d45a9c2f4..fa1f8032df0 100644
--- a/chromium/gpu/command_buffer/client/shared_image_interface.h
+++ b/chromium/gpu/command_buffer/client/shared_image_interface.h
@@ -6,6 +6,7 @@
#define GPU_COMMAND_BUFFER_CLIENT_SHARED_IMAGE_INTERFACE_H_
#include "base/compiler_specific.h"
+#include "base/containers/span.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h"
@@ -44,6 +45,16 @@ class SharedImageInterface {
const gfx::ColorSpace& color_space,
uint32_t usage) = 0;
+ // Same behavior as the above, except that this version takes |pixel_data|
+ // which is used to populate the SharedImage. |pixel_data| should have the
+ // same format which would be passed to glTexImage2D to populate a similarly
+ // specified texture.
+ virtual Mailbox CreateSharedImage(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) = 0;
+
// Creates a shared image out of a GpuMemoryBuffer, using |color_space|.
// |usage| is a combination of |SharedImageUsage| bits that describes which
// API(s) the image will be used with. Format and size are derived from the
diff --git a/chromium/gpu/command_buffer/client/shared_memory_limits.h b/chromium/gpu/command_buffer/client/shared_memory_limits.h
index 4fc1f318c05..bef14c3b4c7 100644
--- a/chromium/gpu/command_buffer/client/shared_memory_limits.h
+++ b/chromium/gpu/command_buffer/client/shared_memory_limits.h
@@ -36,7 +36,7 @@ struct SharedMemoryLimits {
#endif
}
- int32_t command_buffer_size = 1024 * 1024;
+ uint32_t command_buffer_size = 1024 * 1024;
uint32_t start_transfer_buffer_size = 64 * 1024;
uint32_t min_transfer_buffer_size = 64 * 1024;
uint32_t max_transfer_buffer_size = 16 * 1024 * 1024;
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer.cc b/chromium/gpu/command_buffer/client/transfer_buffer.cc
index 44570ad26a7..cf346c8b544 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer.cc
+++ b/chromium/gpu/command_buffer/client/transfer_buffer.cc
@@ -153,7 +153,7 @@ void TransferBuffer::ReallocateRingBuffer(unsigned int size, bool shrink) {
if (usable_ && (shrink || needed_buffer_size > current_size)) {
// We should never attempt to reallocate the buffer if someone has a result
// pointer that hasn't been released. This would cause a use-after-free.
- CHECK(!outstanding_result_pointer_);
+ DCHECK(!outstanding_result_pointer_);
if (HaveBuffer()) {
Free();
}
@@ -177,7 +177,7 @@ void TransferBuffer::ShrinkOrExpandRingBufferIfNecessary(
unsigned int size_to_allocate) {
// We should never attempt to shrink the buffer if someone has a result
// pointer that hasn't been released.
- CHECK(!outstanding_result_pointer_);
+ DCHECK(!outstanding_result_pointer_);
// Don't resize the buffer while blocks are in use to avoid throwing away
// live allocations.
if (HaveBuffer() && ring_buffer_->NumUsedBlocks() > 0)
@@ -244,13 +244,17 @@ void* TransferBuffer::AcquireResultBuffer() {
// ensure this invariant.
DCHECK(!outstanding_result_pointer_);
ReallocateRingBuffer(result_size_);
+#if DCHECK_IS_ON()
outstanding_result_pointer_ = true;
+#endif
return result_buffer_;
}
void TransferBuffer::ReleaseResultBuffer() {
DCHECK(outstanding_result_pointer_);
+#if DCHECK_IS_ON()
outstanding_result_pointer_ = false;
+#endif
}
int TransferBuffer::GetResultOffset() {
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers.h b/chromium/gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers.h
new file mode 100644
index 00000000000..161aaba0b25
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers.h
@@ -0,0 +1,194 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_CMD_COPY_HELPERS_H_
+#define GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_CMD_COPY_HELPERS_H_
+
+#include "base/bits.h"
+#include "base/numerics/safe_math.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+
+namespace gpu {
+
+// Sum the sizes of the types in Ts as CheckedNumeric<T>.
+template <typename T, typename... Ts>
+constexpr base::CheckedNumeric<T> CheckedSizeOfPackedTypes() {
+ static_assert(sizeof...(Ts) > 0, "");
+ base::CheckedNumeric<T> checked_elements_size = 0;
+ for (size_t s : {sizeof(Ts)...}) {
+ checked_elements_size += s;
+ }
+ return checked_elements_size;
+}
+
+// Compute the number of bytes required for a struct-of-arrays where each array
+// of type T has count items. If there is an overflow, this function returns 0.
+template <typename... Ts>
+constexpr base::CheckedNumeric<uint32_t> ComputeCheckedCombinedCopySize(
+ uint32_t count) {
+ static_assert(sizeof...(Ts) > 0, "");
+ base::CheckedNumeric<uint32_t> checked_combined_size = 0;
+ base::CheckedNumeric<uint32_t> checked_count(count);
+ for (auto info : {std::make_pair(sizeof(Ts), alignof(Ts))...}) {
+ size_t alignment = info.second;
+ DCHECK(base::bits::IsPowerOfTwo(alignment));
+
+ checked_combined_size =
+ (checked_combined_size + alignment - 1) & ~(alignment - 1);
+ checked_combined_size += checked_count * info.first;
+ }
+ return checked_combined_size;
+}
+
+// Copy count items from each array in arrays starting at array[offset_count]
+// into the address pointed to by buffer
+template <typename... Ts>
+auto CopyArraysToBuffer(uint32_t count,
+ uint32_t offset_count,
+ void* buffer,
+ Ts*... arrays)
+ -> std::array<uint32_t, sizeof...(arrays)> {
+ constexpr uint32_t arr_count = sizeof...(arrays);
+ static_assert(arr_count > 0, "Requires at least one array");
+ DCHECK_GT(count, 0u);
+ DCHECK(buffer);
+
+ // Length of each copy
+ std::array<size_t, arr_count> copy_lengths{{(count * sizeof(Ts))...}};
+
+ std::array<size_t, arr_count> alignments{{alignof(Ts)...}};
+
+ // Offset to the destination of each copy
+ std::array<uint32_t, arr_count> byte_offsets{};
+ byte_offsets[0] = 0;
+ base::CheckedNumeric<uint32_t> checked_byte_offset = copy_lengths[0];
+ for (uint32_t i = 1; i < arr_count; ++i) {
+ DCHECK(base::bits::IsPowerOfTwo(alignments[i]));
+ checked_byte_offset =
+ (checked_byte_offset + alignments[i] - 1) & ~(alignments[i] - 1);
+ byte_offsets[i] = checked_byte_offset.ValueOrDie();
+ checked_byte_offset += copy_lengths[i];
+ }
+
+ // Pointers to the copy sources
+ std::array<const int8_t*, arr_count> byte_pointers{
+ {(DCHECK(arrays),
+ reinterpret_cast<const int8_t*>(arrays + offset_count))...}};
+
+ for (uint32_t i = 0; i < arr_count; ++i) {
+ memcpy(static_cast<int8_t*>(buffer) + byte_offsets[i], byte_pointers[i],
+ copy_lengths[i]);
+ }
+
+ return byte_offsets;
+}
+
+// Sum the sizes of the types in Ts. This will fail to compile if the result
+// does not fit in T.
+template <typename T, typename... Ts>
+constexpr T SizeOfPackedTypes() {
+ constexpr base::CheckedNumeric<T> checked_elements_size =
+ CheckedSizeOfPackedTypes<T, Ts...>();
+ static_assert(checked_elements_size.IsValid(), "");
+ return checked_elements_size.ValueOrDie();
+}
+
+template <typename... Ts>
+constexpr uint32_t ComputeCombinedCopySize(uint32_t count) {
+ return ComputeCheckedCombinedCopySize<Ts...>(count).ValueOrDefault(
+ UINT32_MAX);
+}
+
+template <typename... Ts>
+constexpr uint32_t ComputeCombinedCopySize(uint32_t count,
+ const Ts*... arrays) {
+ return ComputeCheckedCombinedCopySize<Ts...>(count).ValueOrDefault(
+ UINT32_MAX);
+}
+
+// Compute the largest array size for a struct-of-arrays that can fit inside
+// a buffer
+template <typename... Ts>
+constexpr uint32_t ComputeMaxCopyCount(uint32_t buffer_size) {
+ // Start by tightly packing the elements and decrease copy_count until
+ // the total aligned copy size fits
+ constexpr uint32_t elements_size = SizeOfPackedTypes<uint32_t, Ts...>();
+ uint32_t copy_count = buffer_size / elements_size;
+
+ while (copy_count > 0) {
+ base::CheckedNumeric<uint32_t> checked_combined_size =
+ ComputeCheckedCombinedCopySize<Ts...>(copy_count);
+ uint32_t combined_size = 0;
+ if (checked_combined_size.AssignIfValid(&combined_size) &&
+ combined_size <= buffer_size) {
+ break;
+ }
+ copy_count--;
+ }
+
+ return copy_count;
+}
+
+} // namespace gpu
+
+namespace internal {
+
+// The transfer buffer may not fit all count items from each array in arrays.
+// This function copies in equal number of items from each array into the buffer
+// and calls a callback function f. It releases the buffer and repeats until
+// all items have been consumed.
+template <typename F, typename TransferBuffer, typename... Ts>
+bool TransferArraysAndExecute(uint32_t count,
+ TransferBuffer* buffer,
+ const F& f,
+ Ts*... arrays) {
+ static_assert(sizeof...(arrays) > 0, "Requires at least one array");
+ DCHECK(buffer);
+
+ uint32_t offset_count = 0;
+ while (count) {
+ uint32_t desired_size =
+ gpu::ComputeCheckedCombinedCopySize<Ts...>(count).ValueOrDefault(
+ UINT32_MAX);
+ uint32_t copy_count = gpu::ComputeMaxCopyCount<Ts...>(buffer->size());
+ if (!buffer->valid() || copy_count == 0) {
+ // Reset the buffer to the desired size
+ buffer->Reset(desired_size);
+ if (!buffer->valid()) {
+ return false;
+ }
+ // The buffer size may be less than the desired size. Recompute the number
+ // of elements to copy.
+ copy_count = gpu::ComputeMaxCopyCount<Ts...>(buffer->size());
+ if (copy_count == 0) {
+ return false;
+ }
+ }
+
+ std::array<uint32_t, sizeof...(arrays)> byte_offsets =
+ gpu::CopyArraysToBuffer(copy_count, offset_count, buffer->address(),
+ arrays...);
+ f(byte_offsets, offset_count, copy_count);
+ buffer->Release();
+ offset_count += copy_count;
+ count -= copy_count;
+ }
+ return true;
+}
+
+} // namespace internal
+
+namespace gpu {
+template <typename F, typename... Ts>
+bool TransferArraysAndExecute(uint32_t count,
+ ScopedTransferBufferPtr* buffer,
+ const F& f,
+ Ts*... arrays) {
+ return internal::TransferArraysAndExecute<F, ScopedTransferBufferPtr, Ts...>(
+ count, buffer, f, arrays...);
+}
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_CMD_COPY_HELPERS_H_
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers_unittest.cc b/chromium/gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers_unittest.cc
new file mode 100644
index 00000000000..a9503190d29
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers_unittest.cc
@@ -0,0 +1,217 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+namespace {
+
+// Define a fake scoped transfer buffer to test helpers
+class FakeScopedTransferBufferPtr {
+ public:
+ FakeScopedTransferBufferPtr(uint32_t max_size)
+ : valid_(false), max_size_(max_size), buffer_() {}
+
+ void Reset(uint32_t size) {
+ buffer_.resize(std::min(max_size_, size));
+ std::fill(buffer_.begin(), buffer_.end(), 0);
+ valid_ = true;
+ }
+ void Release() { buffer_.clear(); }
+ uint32_t size() const { return static_cast<uint32_t>(buffer_.size()); }
+ bool valid() const { return valid_; }
+ void* address() { return buffer_.data(); }
+
+ private:
+ bool valid_;
+ uint32_t max_size_;
+ std::vector<uint8_t> buffer_;
+};
+
+constexpr uint32_t MaxCopyCount(uint32_t buffer_size) {
+ return ComputeMaxCopyCount<char, short, float, size_t>(buffer_size);
+}
+
+} // namespace
+
+class TransferBufferCmdCopyHelpersTest : public testing::Test {
+ protected:
+ struct BigStruct {
+ std::array<char, UINT32_MAX> a;
+ };
+ struct ExpectedBuffers {
+ std::vector<char> a;
+ std::vector<short> b;
+ std::vector<float> c;
+ std::vector<size_t> d;
+
+ ExpectedBuffers(uint32_t count) : a(count), b(count), c(count), d(count) {
+ uint32_t j = 0;
+ for (uint32_t i = 0; i < count; ++i) {
+ a[i] = static_cast<char>(j++);
+ }
+ for (uint32_t i = 0; i < count; ++i) {
+ b[i] = static_cast<short>(j++);
+ }
+ for (uint32_t i = 0; i < count; ++i) {
+ c[i] = static_cast<float>(j++);
+ }
+ for (uint32_t i = 0; i < count; ++i) {
+ d[i] = static_cast<size_t>(j++);
+ }
+ }
+ };
+
+ template <uint32_t BufferSize>
+ void CheckTransferArraysAndExecute(uint32_t count) {
+ FakeScopedTransferBufferPtr transfer_buffer(BufferSize);
+ ExpectedBuffers expected(count);
+
+ EXPECT_TRUE(::internal::TransferArraysAndExecute(
+ count, &transfer_buffer,
+ [&](std::array<uint32_t, 4>& byte_offsets, uint32_t copy_offset,
+ uint32_t copy_count) {
+ // Check that each sub-copy is correct
+ const uint8_t* buffer =
+ reinterpret_cast<uint8_t*>(transfer_buffer.address());
+ EXPECT_EQ(memcmp(&buffer[byte_offsets[0]], &expected.a[copy_offset],
+ copy_count * sizeof(char)),
+ 0);
+ EXPECT_EQ(memcmp(&buffer[byte_offsets[1]], &expected.b[copy_offset],
+ copy_count * sizeof(short)),
+ 0);
+ EXPECT_EQ(memcmp(&buffer[byte_offsets[2]], &expected.c[copy_offset],
+ copy_count * sizeof(float)),
+ 0);
+ EXPECT_EQ(memcmp(&buffer[byte_offsets[3]], &expected.d[copy_offset],
+ copy_count * sizeof(size_t)),
+ 0);
+ },
+ expected.a.data(), expected.b.data(), expected.c.data(),
+ expected.d.data()));
+ }
+};
+
+// Check packed size computation
+TEST_F(TransferBufferCmdCopyHelpersTest, CheckedSizeOfTypes) {
+ EXPECT_EQ((SizeOfPackedTypes<uint32_t, char>()), sizeof(bool));
+ EXPECT_EQ((SizeOfPackedTypes<uint32_t, int>()), sizeof(int));
+ EXPECT_EQ((SizeOfPackedTypes<uint32_t, float>()), sizeof(float));
+ EXPECT_EQ((SizeOfPackedTypes<uint32_t, float, int>()),
+ sizeof(float) + sizeof(int));
+ EXPECT_EQ((SizeOfPackedTypes<uint32_t, BigStruct>()), sizeof(BigStruct));
+}
+
+// Check copy size computations which do not require padding elements
+TEST_F(TransferBufferCmdCopyHelpersTest, ComputeCombinedCopySizeAligned) {
+ EXPECT_EQ((ComputeCombinedCopySize<char, int, float>(4)),
+ 4 * sizeof(char) + 4 * sizeof(int) + 4 * sizeof(float));
+
+ EXPECT_EQ((ComputeCombinedCopySize<float, int, char>(3)),
+ 3 * sizeof(float) + 3 * sizeof(int) + 3 * sizeof(char));
+
+ EXPECT_EQ((ComputeCombinedCopySize<BigStruct>(1)), sizeof(BigStruct));
+}
+
+// Check copy size computations where elements do require padding
+TEST_F(TransferBufferCmdCopyHelpersTest, ComputeCombinedCopySizeUnaligned) {
+ EXPECT_EQ((ComputeCombinedCopySize<char, int, float>(3)),
+ 4 * sizeof(char) + 3 * sizeof(int) + 3 * sizeof(float));
+
+ EXPECT_EQ((ComputeCombinedCopySize<char, int, float>(5)),
+ 8 * sizeof(char) + 5 * sizeof(int) + 5 * sizeof(float));
+}
+
+// Check that overflow in copy size computation returns UINT32_MAX
+TEST_F(TransferBufferCmdCopyHelpersTest, ComputeCombinedCopySizeOverflow) {
+ EXPECT_EQ((ComputeCombinedCopySize<BigStruct, short>(1)), UINT32_MAX);
+ EXPECT_EQ((ComputeCombinedCopySize<short, BigStruct>(1)), UINT32_MAX);
+ EXPECT_EQ((ComputeCombinedCopySize<float>(UINT32_MAX / sizeof(float) + 1)),
+ UINT32_MAX);
+ EXPECT_EQ((ComputeCombinedCopySize<BigStruct, BigStruct>(2)), UINT32_MAX);
+}
+
+// Check that the computed copy count is correct when padding is not required
+TEST_F(TransferBufferCmdCopyHelpersTest, ComputeMaxCopyCountAligned) {
+ EXPECT_EQ((ComputeMaxCopyCount<BigStruct>(UINT32_MAX)), 1u);
+ EXPECT_EQ((ComputeMaxCopyCount<int, float>(64)), 8u);
+ EXPECT_EQ((ComputeMaxCopyCount<char>(64)), 64u);
+ EXPECT_EQ((ComputeMaxCopyCount<short, char, char>(64)), 16u);
+}
+
+// Check that the computed copy count is correct when padding is required
+TEST_F(TransferBufferCmdCopyHelpersTest, ComputeMaxCopyCountUnaligned) {
+ EXPECT_EQ((ComputeMaxCopyCount<char, int, float>(64)), 7u);
+ EXPECT_EQ((ComputeMaxCopyCount<char, short, int>(64)), 9u);
+}
+
+// Check that computing copy count for a buffer of size 0 is 0;
+TEST_F(TransferBufferCmdCopyHelpersTest, ComputeMaxCopyCountZero) {
+ uint32_t buffer_size = 0;
+ EXPECT_EQ((ComputeMaxCopyCount<char>(buffer_size)), 0u);
+ EXPECT_EQ((ComputeMaxCopyCount<int, float>(buffer_size)), 0u);
+ EXPECT_EQ((ComputeMaxCopyCount<BigStruct>(buffer_size)), 0u);
+}
+
+// Check that copy count for elements whose packed size fits in the buffer
+// but computed aligned size does not is 0
+TEST_F(TransferBufferCmdCopyHelpersTest, ComputeMaxCopyCountOverflow) {
+ EXPECT_EQ((ComputeMaxCopyCount<char, float>(
+ SizeOfPackedTypes<uint32_t, char, float>())),
+ 0u);
+ EXPECT_EQ((ComputeMaxCopyCount<short, float>(
+ SizeOfPackedTypes<uint32_t, short, float>())),
+ 0u);
+ EXPECT_EQ((ComputeMaxCopyCount<char, size_t>(
+ SizeOfPackedTypes<uint32_t, char, size_t>())),
+ 0u);
+ EXPECT_EQ((ComputeMaxCopyCount<short, size_t>(
+ SizeOfPackedTypes<uint32_t, short, size_t>())),
+ 0u);
+}
+
+// Check that copied results are as expected and correctly aligned
+TEST_F(TransferBufferCmdCopyHelpersTest, TransferArraysAndExecute) {
+ // Aligned: Copy 1 element from each buffer into a transfer buffer of 256
+ // bytes
+ CheckTransferArraysAndExecute<256>(1);
+
+ // Aligned: Copy as many elements as possible from each buffer into a transfer
+ // buffer of 256 bytes
+ CheckTransferArraysAndExecute<256>(MaxCopyCount(256));
+
+ // Unaligned: Copy 1 element from each buffer into a transfer buffer of 256
+ // bytes
+ CheckTransferArraysAndExecute<257>(1);
+
+ // Unaligned: Copy as many elements as possible from each buffer into a
+ // transfer buffer of 257 bytes
+ CheckTransferArraysAndExecute<257>(MaxCopyCount(257));
+
+ // Large: Copy 1 element from each buffer into a transfer buffer of UINT32_MAX
+ // bytes
+ CheckTransferArraysAndExecute<UINT32_MAX>(1);
+
+ // Large: Copy as many elements as possible from each buffer into a transfer
+ // buffer of 256 bytes
+ CheckTransferArraysAndExecute<UINT32_MAX>(MaxCopyCount(256));
+}
+
+// Check copies that overflow and require multiple transfer buffers
+TEST_F(TransferBufferCmdCopyHelpersTest, TransferArraysAndExecuteOverflow) {
+ // Check aligned transfers
+ CheckTransferArraysAndExecute<256>(256);
+ CheckTransferArraysAndExecute<256>(512);
+ CheckTransferArraysAndExecute<4096>(64 * MaxCopyCount(4096));
+
+ // Check unaligned transfers
+ CheckTransferArraysAndExecute<257>(256);
+ CheckTransferArraysAndExecute<253>(513);
+ CheckTransferArraysAndExecute<4097>(MaxCopyCount(4097));
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
index 802961baf28..002bda4f0ed 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
+++ b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
@@ -33,9 +33,9 @@ class TransferBufferTest : public testing::Test {
static const int32_t kNumCommandEntries = 400;
static const int32_t kCommandBufferSizeBytes =
kNumCommandEntries * sizeof(CommandBufferEntry);
- static const unsigned int kStartingOffset = 64;
- static const unsigned int kAlignment = 4;
- static const size_t kTransferBufferSize = 256;
+ static const uint32_t kStartingOffset = 64;
+ static const uint32_t kAlignment = 4;
+ static const uint32_t kTransferBufferSize = 256;
TransferBufferTest()
: transfer_buffer_id_(0) {
@@ -91,9 +91,9 @@ void TransferBufferTest::TearDown() {
#ifndef _MSC_VER
const int32_t TransferBufferTest::kNumCommandEntries;
const int32_t TransferBufferTest::kCommandBufferSizeBytes;
-const unsigned int TransferBufferTest::kStartingOffset;
-const unsigned int TransferBufferTest::kAlignment;
-const size_t TransferBufferTest::kTransferBufferSize;
+const uint32_t TransferBufferTest::kStartingOffset;
+const uint32_t TransferBufferTest::kAlignment;
+const uint32_t TransferBufferTest::kTransferBufferSize;
#endif
TEST_F(TransferBufferTest, Basic) {
@@ -159,7 +159,7 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_EQ(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
// See that it gets reallocated.
- unsigned int size = 0;
+ uint32_t size = 0;
void* data = transfer_buffer_->AllocUpTo(1, &size);
EXPECT_TRUE(data != nullptr);
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
@@ -206,7 +206,7 @@ TEST_F(TransferBufferTest, TooLargeAllocation) {
void* ptr = transfer_buffer_->Alloc(kTransferBufferSize + 1);
EXPECT_TRUE(ptr == nullptr);
// Check we if we try to allocate larger than max we get max.
- unsigned int size_allocated = 0;
+ uint32_t size_allocated = 0;
ptr = transfer_buffer_->AllocUpTo(
kTransferBufferSize + 1, &size_allocated);
ASSERT_TRUE(ptr != nullptr);
@@ -231,9 +231,9 @@ class MockClientCommandBufferCanFail : public MockClientCommandBufferMockFlush {
~MockClientCommandBufferCanFail() override = default;
MOCK_METHOD2(CreateTransferBuffer,
- scoped_refptr<Buffer>(size_t size, int32_t* id));
+ scoped_refptr<Buffer>(uint32_t size, int32_t* id));
- scoped_refptr<gpu::Buffer> RealCreateTransferBuffer(size_t size,
+ scoped_refptr<gpu::Buffer> RealCreateTransferBuffer(uint32_t size,
int32_t* id) {
return MockClientCommandBufferMockFlush::CreateTransferBuffer(size, id);
}
@@ -244,11 +244,11 @@ class TransferBufferExpandContractTest : public testing::Test {
static const int32_t kNumCommandEntries = 400;
static const int32_t kCommandBufferSizeBytes =
kNumCommandEntries * sizeof(CommandBufferEntry);
- static const unsigned int kStartingOffset = 64;
- static const unsigned int kAlignment = 4;
- static const size_t kStartTransferBufferSize = 256;
- static const size_t kMaxTransferBufferSize = 1024;
- static const size_t kMinTransferBufferSize = 128;
+ static const uint32_t kStartingOffset = 64;
+ static const uint32_t kAlignment = 4;
+ static const uint32_t kStartTransferBufferSize = 256;
+ static const uint32_t kMaxTransferBufferSize = 1024;
+ static const uint32_t kMinTransferBufferSize = 128;
TransferBufferExpandContractTest()
: transfer_buffer_id_(0) {
@@ -321,11 +321,11 @@ void TransferBufferExpandContractTest::TearDown() {
#ifndef _MSC_VER
const int32_t TransferBufferExpandContractTest::kNumCommandEntries;
const int32_t TransferBufferExpandContractTest::kCommandBufferSizeBytes;
-const unsigned int TransferBufferExpandContractTest::kStartingOffset;
-const unsigned int TransferBufferExpandContractTest::kAlignment;
-const size_t TransferBufferExpandContractTest::kStartTransferBufferSize;
-const size_t TransferBufferExpandContractTest::kMaxTransferBufferSize;
-const size_t TransferBufferExpandContractTest::kMinTransferBufferSize;
+const uint32_t TransferBufferExpandContractTest::kStartingOffset;
+const uint32_t TransferBufferExpandContractTest::kAlignment;
+const uint32_t TransferBufferExpandContractTest::kStartTransferBufferSize;
+const uint32_t TransferBufferExpandContractTest::kMaxTransferBufferSize;
+const uint32_t TransferBufferExpandContractTest::kMinTransferBufferSize;
#endif
TEST_F(TransferBufferExpandContractTest, ExpandWithSmallAllocations) {
@@ -352,7 +352,7 @@ TEST_F(TransferBufferExpandContractTest, ExpandWithSmallAllocations) {
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
// Fill the free space.
- unsigned int size_allocated = 0;
+ uint32_t size_allocated = 0;
void* ptr = transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize(),
&size_allocated);
transfer_buffer_->FreePendingToken(ptr, token);
@@ -399,9 +399,9 @@ TEST_F(TransferBufferExpandContractTest, NoExpandWithInUseAllocation) {
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
// Fill the free space in two blocks.
- unsigned int block_size_1 = transfer_buffer_->GetFreeSize() / 2;
- unsigned int block_size_2 = transfer_buffer_->GetFreeSize() - block_size_1;
- unsigned int size_allocated = 0;
+ uint32_t block_size_1 = transfer_buffer_->GetFreeSize() / 2;
+ uint32_t block_size_2 = transfer_buffer_->GetFreeSize() - block_size_1;
+ uint32_t size_allocated = 0;
void* block1 = transfer_buffer_->AllocUpTo(block_size_1, &size_allocated);
EXPECT_EQ(block_size_1, size_allocated);
void* block2 = transfer_buffer_->AllocUpTo(block_size_2, &size_allocated);
@@ -448,7 +448,7 @@ TEST_F(TransferBufferExpandContractTest, ExpandWithLargeAllocations) {
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
// Allocate one byte more than the free space to force expansion.
- unsigned int size_allocated = 0;
+ uint32_t size_allocated = 0;
ExpectCreateTransferBuffer(kStartTransferBufferSize * 2);
void* ptr = transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize() + 1,
&size_allocated);
@@ -456,7 +456,7 @@ TEST_F(TransferBufferExpandContractTest, ExpandWithLargeAllocations) {
// Expand again.
ExpectCreateTransferBuffer(kStartTransferBufferSize * 4);
- unsigned int size_requested = transfer_buffer_->GetFreeSize() + 1;
+ uint32_t size_requested = transfer_buffer_->GetFreeSize() + 1;
ptr = transfer_buffer_->AllocUpTo(size_requested, &size_allocated);
ASSERT_TRUE(ptr != nullptr);
EXPECT_EQ(size_requested, size_allocated);
@@ -500,7 +500,7 @@ TEST_F(TransferBufferExpandContractTest, ShrinkRingBuffer) {
transfer_buffer_->FreePendingToken(ptr, token);
// We shouldn't shrink before we reach the allocation threshold.
- for (size_t allocated = kMaxTransferBufferSize - kStartingOffset;
+ for (uint32_t allocated = kMaxTransferBufferSize - kStartingOffset;
allocated < (kStartTransferBufferSize + kStartingOffset) *
(TransferBuffer::kShrinkThreshold);) {
ptr = transfer_buffer_->Alloc(kStartTransferBufferSize);
@@ -545,9 +545,9 @@ TEST_F(TransferBufferExpandContractTest, Contract) {
&MockClientCommandBufferCanFail::RealCreateTransferBuffer))
.RetiresOnSaturation();
- const size_t kSize1 = 256 - kStartingOffset;
- const size_t kSize2 = 128 - kStartingOffset;
- unsigned int size_allocated = 0;
+ const uint32_t kSize1 = 256 - kStartingOffset;
+ const uint32_t kSize2 = 128 - kStartingOffset;
+ uint32_t size_allocated = 0;
void* ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
ASSERT_TRUE(ptr != nullptr);
EXPECT_EQ(kSize2, size_allocated);
@@ -602,8 +602,8 @@ TEST_F(TransferBufferExpandContractTest, OutOfMemory) {
DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
.RetiresOnSaturation();
- const size_t kSize1 = 512 - kStartingOffset;
- unsigned int size_allocated = 0;
+ const uint32_t kSize1 = 512 - kStartingOffset;
+ uint32_t size_allocated = 0;
void* ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
ASSERT_TRUE(ptr == nullptr);
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
@@ -638,9 +638,9 @@ TEST_F(TransferBufferExpandContractTest, ReallocsToDefault) {
}
TEST_F(TransferBufferExpandContractTest, Shrink) {
- unsigned int alloc_size = transfer_buffer_->GetFreeSize();
+ uint32_t alloc_size = transfer_buffer_->GetFreeSize();
EXPECT_EQ(kStartTransferBufferSize - kStartingOffset, alloc_size);
- unsigned int size_allocated = 0;
+ uint32_t size_allocated = 0;
void* ptr = transfer_buffer_->AllocUpTo(alloc_size, &size_allocated);
ASSERT_NE(ptr, nullptr);
@@ -649,13 +649,13 @@ TEST_F(TransferBufferExpandContractTest, Shrink) {
EXPECT_EQ(0u, transfer_buffer_->GetFreeSize());
// Shrink once.
- const unsigned int shrink_size1 = 64;
+ const uint32_t shrink_size1 = 64;
EXPECT_LT(shrink_size1, alloc_size);
transfer_buffer_->ShrinkLastBlock(shrink_size1 - kAlignment + 1);
EXPECT_EQ(alloc_size - shrink_size1, transfer_buffer_->GetFreeSize());
// Shrink again.
- const unsigned int shrink_size2 = 32;
+ const uint32_t shrink_size2 = 32;
EXPECT_LT(shrink_size2, shrink_size1);
transfer_buffer_->ShrinkLastBlock(shrink_size2);
EXPECT_EQ(alloc_size - shrink_size2, transfer_buffer_->GetFreeSize());
@@ -669,10 +669,10 @@ TEST_F(TransferBufferExpandContractTest, Shrink) {
TEST_F(TransferBufferTest, MultipleAllocsAndFrees) {
// An arbitrary size, but is aligned so no padding needed.
- constexpr size_t kArbitrarySize = 16;
+ constexpr uint32_t kArbitrarySize = 16;
Initialize();
- size_t original_free_size = transfer_buffer_->GetFreeSize();
+ uint32_t original_free_size = transfer_buffer_->GetFreeSize();
EXPECT_EQ(transfer_buffer_->GetSize(), original_free_size);
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(), original_free_size);
@@ -742,7 +742,7 @@ TEST_F(TransferBufferTest, MultipleAllocsAndFrees) {
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(), original_free_size);
}
-#if defined(GTEST_HAS_DEATH_TEST)
+#if defined(GTEST_HAS_DEATH_TEST) && DCHECK_IS_ON()
TEST_F(TransferBufferTest, ResizeDuringScopedResultPtr) {
Initialize();
@@ -750,19 +750,18 @@ TEST_F(TransferBufferTest, ResizeDuringScopedResultPtr) {
// If an attempt is made to resize the transfer buffer while a result
// pointer exists, we should hit a CHECK. Allocate just enough to force a
// resize.
- unsigned int size_allocated;
+ uint32_t size_allocated;
ASSERT_DEATH(transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize() + 1,
&size_allocated),
"outstanding_result_pointer_");
}
-#if DCHECK_IS_ON()
TEST_F(TransferBufferTest, AllocDuringScopedResultPtr) {
Initialize();
ScopedResultPtr<int> ptr(transfer_buffer_.get());
// If an attempt is made to allocate any amount in the transfer buffer while a
// result pointer exists, we should hit a DCHECK.
- unsigned int size_allocated;
+ uint32_t size_allocated;
ASSERT_DEATH(transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize() + 1,
&size_allocated),
"outstanding_result_pointer_");
@@ -776,7 +775,6 @@ TEST_F(TransferBufferTest, TwoScopedResultPtrs) {
"outstanding_result_pointer_");
}
-#endif // DCHECK_IS_ON()
-#endif // defined(GTEST_HAS_DEATH_TEST)
+#endif // defined(GTEST_HAS_DEATH_TEST) && DCHECK_IS_ON()
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/vertex_array_object_manager.cc b/chromium/gpu/command_buffer/client/vertex_array_object_manager.cc
index 4893115e11a..f0d0ab87e8a 100644
--- a/chromium/gpu/command_buffer/client/vertex_array_object_manager.cc
+++ b/chromium/gpu/command_buffer/client/vertex_array_object_manager.cc
@@ -16,7 +16,8 @@
namespace gpu {
namespace gles2 {
-static GLsizei RoundUpToMultipleOf4(GLsizei size) {
+template <typename T>
+static T RoundUpToMultipleOf4(T size) {
return (size + 3) & ~3;
}
@@ -444,7 +445,7 @@ bool VertexArrayObjectManager::SetAttribPointer(
const void* ptr,
GLboolean integer) {
// Client side arrays are not allowed in vaos.
- if (buffer_id == 0 && !IsDefaultVAOBound()) {
+ if (buffer_id == 0 && !IsDefaultVAOBound() && ptr) {
return false;
}
bound_vertex_array_object_->SetAttribPointer(
@@ -505,20 +506,27 @@ bool VertexArrayObjectManager::SetupSimulatedClientSideBuffers(
return false;
}
*simulated = true;
- GLsizei total_size = 0;
+ base::CheckedNumeric<GLsizei> checked_total_size = 0;
// Compute the size of the buffer we need.
const VertexArrayObject::VertexAttribs& vertex_attribs =
bound_vertex_array_object_->vertex_attribs();
for (GLuint ii = 0; ii < vertex_attribs.size(); ++ii) {
const VertexArrayObject::VertexAttrib& attrib = vertex_attribs[ii];
if (attrib.IsClientSide() && attrib.enabled()) {
- size_t bytes_per_element =
+ uint32_t bytes_per_element =
GLES2Util::GetGroupSizeForBufferType(attrib.size(), attrib.type());
GLsizei elements = (primcount && attrib.divisor() > 0) ?
((primcount - 1) / attrib.divisor() + 1) : num_elements;
- total_size += RoundUpToMultipleOf4(bytes_per_element * elements);
+ checked_total_size +=
+ RoundUpToMultipleOf4(base::CheckMul(bytes_per_element, elements));
}
}
+ GLsizei total_size = 0;
+ if (!checked_total_size.AssignIfValid(&total_size)) {
+ gl->SetGLError(GL_INVALID_OPERATION, function_name,
+ "size overflow for client side arrays");
+ return false;
+ }
gl_helper->BindBuffer(GL_ARRAY_BUFFER, array_buffer_id_);
array_buffer_offset_ = 0;
if (total_size > array_buffer_size_) {
@@ -528,7 +536,7 @@ bool VertexArrayObjectManager::SetupSimulatedClientSideBuffers(
for (GLuint ii = 0; ii < vertex_attribs.size(); ++ii) {
const VertexArrayObject::VertexAttrib& attrib = vertex_attribs[ii];
if (attrib.IsClientSide() && attrib.enabled()) {
- size_t bytes_per_element =
+ uint32_t bytes_per_element =
GLES2Util::GetGroupSizeForBufferType(attrib.size(), attrib.type());
GLsizei real_stride = attrib.stride() ?
attrib.stride() : static_cast<GLsizei>(bytes_per_element);
@@ -612,8 +620,14 @@ bool VertexArrayObjectManager::SetupSimulatedIndexAndClientSideBuffers(
break;
}
gl_helper->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, element_array_buffer_id_);
- GLsizei bytes_per_element = GLES2Util::GetGLTypeSizeForBuffers(type);
- GLsizei bytes_needed = bytes_per_element * count;
+ uint32_t bytes_per_element = GLES2Util::GetGLTypeSizeForBuffers(type);
+ GLsizei bytes_needed = 0;
+ if (!base::CheckMul(bytes_per_element, count)
+ .AssignIfValid(&bytes_needed)) {
+ gl->SetGLError(GL_INVALID_OPERATION, function_name,
+ "size overflow for client side index arrays");
+ return false;
+ }
if (bytes_needed > element_array_buffer_size_) {
element_array_buffer_size_ = bytes_needed;
gl->BufferDataHelper(GL_ELEMENT_ARRAY_BUFFER, bytes_needed, nullptr,
diff --git a/chromium/gpu/command_buffer/client/vertex_array_object_manager.h b/chromium/gpu/command_buffer/client/vertex_array_object_manager.h
index 9efac4c3c14..9ddf3e4e326 100644
--- a/chromium/gpu/command_buffer/client/vertex_array_object_manager.h
+++ b/chromium/gpu/command_buffer/client/vertex_array_object_manager.h
@@ -9,8 +9,8 @@
#include <stdint.h>
#include <memory>
+#include <unordered_map>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "gles2_impl_export.h"
@@ -101,7 +101,7 @@ class GLES2_IMPL_EXPORT VertexArrayObjectManager {
GLuint bound_element_array_buffer() const;
private:
- typedef base::hash_map<GLuint, VertexArrayObject*> VertexArrayObjectMap;
+ typedef std::unordered_map<GLuint, VertexArrayObject*> VertexArrayObjectMap;
bool IsDefaultVAOBound() const;
diff --git a/chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc b/chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
index 7e1f2ef36cc..b532ef7527f 100644
--- a/chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
@@ -12,6 +12,7 @@
#include <memory>
+#include "base/stl_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
@@ -81,9 +82,9 @@ TEST_F(VertexArrayObjectManagerTest, UnbindBuffer) {
const GLuint kElementArray = 789;
bool changed = false;
GLuint ids[2] = { 1, 3, };
- manager_->GenVertexArrays(arraysize(ids), ids);
+ manager_->GenVertexArrays(base::size(ids), ids);
// Bind buffers to attribs on 2 vaos.
- for (size_t ii = 0; ii < arraysize(ids); ++ii) {
+ for (size_t ii = 0; ii < base::size(ids); ++ii) {
EXPECT_TRUE(manager_->BindVertexArray(ids[ii], &changed));
EXPECT_TRUE(manager_->SetAttribPointer(
kBufferToUnbind, 0, 4, GL_FLOAT, false, 0, 0, GL_FALSE));
@@ -117,7 +118,7 @@ TEST_F(VertexArrayObjectManagerTest, UnbindBuffer) {
static const GLuint expected_element_array[] = {
0, kElementArray,
};
- for (size_t ii = 0; ii < arraysize(ids); ++ii) {
+ for (size_t ii = 0; ii < base::size(ids); ++ii) {
EXPECT_TRUE(manager_->BindVertexArray(ids[ii], &changed));
for (size_t jj = 0; jj < 4; ++jj) {
uint32_t param = 1;
@@ -200,7 +201,7 @@ TEST_F(VertexArrayObjectManagerTest, HaveEnabledClientSideArrays) {
TEST_F(VertexArrayObjectManagerTest, BindElementArray) {
bool changed = false;
GLuint ids[2] = { 1, 3, };
- manager_->GenVertexArrays(arraysize(ids), ids);
+ manager_->GenVertexArrays(base::size(ids), ids);
// Check the default element array is 0.
EXPECT_EQ(0u, manager_->bound_element_array_buffer());
@@ -240,7 +241,7 @@ TEST_F(VertexArrayObjectManagerTest, GenBindDelete) {
EXPECT_FALSE(changed);
GLuint ids[2] = { 1, 3, };
- manager_->GenVertexArrays(arraysize(ids), ids);
+ manager_->GenVertexArrays(base::size(ids), ids);
// Check Genned arrays succeed.
EXPECT_TRUE(manager_->BindVertexArray(1, &changed));
EXPECT_TRUE(changed);
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc
index 384b0fee17d..3238cc16348 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc
@@ -31,10 +31,10 @@ namespace webgpu {
class WebGPUImplementationTest : public testing::Test {
protected:
static const uint8_t kInitialValue = 0xBD;
- static const int32_t kNumCommandEntries = 500;
- static const int32_t kCommandBufferSizeBytes =
+ static const uint32_t kNumCommandEntries = 500;
+ static const uint32_t kCommandBufferSizeBytes =
kNumCommandEntries * sizeof(CommandBufferEntry);
- static const size_t kTransferBufferSize = 512;
+ static const uint32_t kTransferBufferSize = 512;
static const GLint kMaxCombinedTextureImageUnits = 8;
static const GLint kMaxTextureImageUnits = 8;
diff --git a/chromium/gpu/command_buffer/common/buffer.cc b/chromium/gpu/command_buffer/common/buffer.cc
index 47092ad3e64..f972822841c 100644
--- a/chromium/gpu/command_buffer/common/buffer.cc
+++ b/chromium/gpu/command_buffer/common/buffer.cc
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/atomic_sequence_num.h"
#include "base/format_macros.h"
#include "base/logging.h"
#include "base/no_destructor.h"
@@ -14,6 +15,12 @@
#include "base/strings/stringprintf.h"
namespace gpu {
+namespace {
+
+// Global atomic to generate unique buffer IDs.
+base::AtomicSequenceNumber g_next_buffer_id;
+
+} // namespace
const base::UnsafeSharedMemoryRegion& BufferBacking::shared_memory_region()
const {
@@ -26,7 +33,7 @@ base::UnguessableToken BufferBacking::GetGUID() const {
return base::UnguessableToken();
}
-MemoryBufferBacking::MemoryBufferBacking(size_t size)
+MemoryBufferBacking::MemoryBufferBacking(uint32_t size)
: memory_(new char[size]), size_(size) {}
MemoryBufferBacking::~MemoryBufferBacking() = default;
@@ -35,7 +42,7 @@ void* MemoryBufferBacking::GetMemory() const {
return memory_.get();
}
-size_t MemoryBufferBacking::GetSize() const {
+uint32_t MemoryBufferBacking::GetSize() const {
return size_;
}
@@ -45,6 +52,7 @@ SharedMemoryBufferBacking::SharedMemoryBufferBacking(
: shared_memory_region_(std::move(shared_memory_region)),
shared_memory_mapping_(std::move(shared_memory_mapping)) {
DCHECK_EQ(shared_memory_region_.GetGUID(), shared_memory_mapping_.guid());
+ DCHECK_LE(shared_memory_mapping_.size(), static_cast<size_t>(UINT32_MAX));
}
SharedMemoryBufferBacking::~SharedMemoryBufferBacking() = default;
@@ -62,8 +70,8 @@ void* SharedMemoryBufferBacking::GetMemory() const {
return shared_memory_mapping_.memory();
}
-size_t SharedMemoryBufferBacking::GetSize() const {
- return shared_memory_mapping_.size();
+uint32_t SharedMemoryBufferBacking::GetSize() const {
+ return static_cast<uint32_t>(shared_memory_mapping_.size());
}
Buffer::Buffer(std::unique_ptr<BufferBacking> backing)
@@ -97,6 +105,11 @@ uint32_t Buffer::GetRemainingSize(uint32_t data_offset) const {
return static_cast<uint32_t>(size_) - data_offset;
}
+int32_t GetNextBufferId() {
+ // 0 is a reserved value.
+ return g_next_buffer_id.GetNext() + 1;
+}
+
base::trace_event::MemoryAllocatorDumpGuid GetBufferGUIDForTracing(
uint64_t tracing_process_id,
int32_t buffer_id) {
diff --git a/chromium/gpu/command_buffer/common/buffer.h b/chromium/gpu/command_buffer/common/buffer.h
index 538542f3659..21e4055547e 100644
--- a/chromium/gpu/command_buffer/common/buffer.h
+++ b/chromium/gpu/command_buffer/common/buffer.h
@@ -24,19 +24,19 @@ class GPU_EXPORT BufferBacking {
virtual const base::UnsafeSharedMemoryRegion& shared_memory_region() const;
virtual base::UnguessableToken GetGUID() const;
virtual void* GetMemory() const = 0;
- virtual size_t GetSize() const = 0;
+ virtual uint32_t GetSize() const = 0;
};
class GPU_EXPORT MemoryBufferBacking : public BufferBacking {
public:
- explicit MemoryBufferBacking(size_t size);
+ explicit MemoryBufferBacking(uint32_t size);
~MemoryBufferBacking() override;
void* GetMemory() const override;
- size_t GetSize() const override;
+ uint32_t GetSize() const override;
private:
std::unique_ptr<char[]> memory_;
- size_t size_;
+ uint32_t size_;
DISALLOW_COPY_AND_ASSIGN(MemoryBufferBacking);
};
@@ -50,7 +50,7 @@ class GPU_EXPORT SharedMemoryBufferBacking : public BufferBacking {
const base::UnsafeSharedMemoryRegion& shared_memory_region() const override;
base::UnguessableToken GetGUID() const override;
void* GetMemory() const override;
- size_t GetSize() const override;
+ uint32_t GetSize() const override;
private:
base::UnsafeSharedMemoryRegion shared_memory_region_;
@@ -65,7 +65,7 @@ class GPU_EXPORT Buffer : public base::RefCountedThreadSafe<Buffer> {
BufferBacking* backing() const { return backing_.get(); }
void* memory() const { return memory_; }
- size_t size() const { return size_; }
+ uint32_t size() const { return size_; }
// Returns nullptr if the address overflows the memory.
void* GetDataAddress(uint32_t data_offset, uint32_t data_size) const;
@@ -82,7 +82,7 @@ class GPU_EXPORT Buffer : public base::RefCountedThreadSafe<Buffer> {
std::unique_ptr<BufferBacking> backing_;
void* memory_;
- size_t size_;
+ uint32_t size_;
DISALLOW_COPY_AND_ASSIGN(Buffer);
};
@@ -100,11 +100,15 @@ static inline scoped_refptr<Buffer> MakeBufferFromSharedMemory(
std::move(shared_memory_region), std::move(shared_memory_mapping)));
}
-static inline scoped_refptr<Buffer> MakeMemoryBuffer(size_t size) {
+static inline scoped_refptr<Buffer> MakeMemoryBuffer(uint32_t size) {
return base::MakeRefCounted<Buffer>(
std::make_unique<MemoryBufferBacking>(size));
}
+// Generates a process unique buffer ID which can be safely used with
+// GetBufferGUIDForTracing.
+GPU_EXPORT int32_t GetNextBufferId();
+
// Generates GUID which can be used to trace buffer using an Id.
GPU_EXPORT base::trace_event::MemoryAllocatorDumpGuid GetBufferGUIDForTracing(
uint64_t tracing_process_id,
diff --git a/chromium/gpu/command_buffer/common/command_buffer.h b/chromium/gpu/command_buffer/common/command_buffer.h
index a91610b488a..f67bbda7210 100644
--- a/chromium/gpu/command_buffer/common/command_buffer.h
+++ b/chromium/gpu/command_buffer/common/command_buffer.h
@@ -110,7 +110,7 @@ class GPU_EXPORT CommandBuffer {
// Create a transfer buffer of the given size. Returns its ID or -1 on
// error.
- virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(uint32_t size,
int32_t* id) = 0;
// Destroy a transfer buffer. The ID must be positive.
diff --git a/chromium/gpu/command_buffer/common/command_buffer_shared_test.cc b/chromium/gpu/command_buffer/common/command_buffer_shared_test.cc
index 4ae60408373..70bfa0471e6 100644
--- a/chromium/gpu/command_buffer/common/command_buffer_shared_test.cc
+++ b/chromium/gpu/command_buffer/common/command_buffer_shared_test.cc
@@ -66,7 +66,8 @@ TEST_F(CommandBufferSharedTest, TestConsistency) {
consumer.Start();
consumer.task_runner()->PostTask(
- FROM_HERE, base::Bind(&WriteToState, buffer.get(), shared_state_.get()));
+ FROM_HERE,
+ base::BindOnce(&WriteToState, buffer.get(), shared_state_.get()));
CommandBuffer::State last_state;
while (1) {
diff --git a/chromium/gpu/command_buffer/common/discardable_handle_unittest.cc b/chromium/gpu/command_buffer/common/discardable_handle_unittest.cc
index 3b5402b33d7..7738027d72b 100644
--- a/chromium/gpu/command_buffer/common/discardable_handle_unittest.cc
+++ b/chromium/gpu/command_buffer/common/discardable_handle_unittest.cc
@@ -9,13 +9,9 @@
namespace gpu {
namespace {
-scoped_refptr<Buffer> MakeBufferForTesting(size_t num_handles) {
- size_t size = sizeof(base::subtle::Atomic32) * num_handles;
- base::UnsafeSharedMemoryRegion shmem_region =
- base::UnsafeSharedMemoryRegion::Create(size);
- base::WritableSharedMemoryMapping shmem_mapping = shmem_region.Map();
- return MakeBufferFromSharedMemory(std::move(shmem_region),
- std::move(shmem_mapping));
+scoped_refptr<Buffer> MakeBufferForTesting(uint32_t num_handles) {
+ uint32_t size = sizeof(base::subtle::Atomic32) * num_handles;
+ return MakeMemoryBuffer(size);
}
} // namespace
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format.cc b/chromium/gpu/command_buffer/common/gles2_cmd_format.cc
index a5ba131c40e..63715537825 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format.cc
@@ -7,13 +7,16 @@
// We explicitly do NOT include gles2_cmd_format.h here because client side
// and service side have different requirements.
+
#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include <stddef.h>
+
+#include "base/stl_util.h"
+
namespace gpu {
namespace gles2 {
-#include <stddef.h>
-
#include "gpu/command_buffer/common/gles2_cmd_ids_autogen.h"
const char* GetCommandName(CommandId id) {
@@ -26,7 +29,7 @@ const char* GetCommandName(CommandId id) {
};
size_t index = static_cast<size_t>(id) - kFirstGLES2Command;
- return (index < arraysize(names)) ? names[index] : "*unknown-command*";
+ return (index < base::size(names)) ? names[index] : "*unknown-command*";
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index 76ee6074b51..d45e669919b 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -7493,6 +7493,396 @@ static_assert(offsetof(ShaderSourceBucket, shader) == 4,
static_assert(offsetof(ShaderSourceBucket, str_bucket_id) == 8,
"offset of ShaderSourceBucket str_bucket_id should be 8");
+struct MultiDrawBeginCHROMIUM {
+ typedef MultiDrawBeginCHROMIUM ValueType;
+ static const CommandId kCmdId = kMultiDrawBeginCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLsizei _drawcount) {
+ SetHeader();
+ drawcount = _drawcount;
+ }
+
+ void* Set(void* cmd, GLsizei _drawcount) {
+ static_cast<ValueType*>(cmd)->Init(_drawcount);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t drawcount;
+};
+
+static_assert(sizeof(MultiDrawBeginCHROMIUM) == 8,
+ "size of MultiDrawBeginCHROMIUM should be 8");
+static_assert(offsetof(MultiDrawBeginCHROMIUM, header) == 0,
+ "offset of MultiDrawBeginCHROMIUM header should be 0");
+static_assert(offsetof(MultiDrawBeginCHROMIUM, drawcount) == 4,
+ "offset of MultiDrawBeginCHROMIUM drawcount should be 4");
+
+struct MultiDrawEndCHROMIUM {
+ typedef MultiDrawEndCHROMIUM ValueType;
+ static const CommandId kCmdId = kMultiDrawEndCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+static_assert(sizeof(MultiDrawEndCHROMIUM) == 4,
+ "size of MultiDrawEndCHROMIUM should be 4");
+static_assert(offsetof(MultiDrawEndCHROMIUM, header) == 0,
+ "offset of MultiDrawEndCHROMIUM header should be 0");
+
+struct MultiDrawArraysCHROMIUM {
+ typedef MultiDrawArraysCHROMIUM ValueType;
+ static const CommandId kCmdId = kMultiDrawArraysCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode,
+ uint32_t _firsts_shm_id,
+ uint32_t _firsts_shm_offset,
+ uint32_t _counts_shm_id,
+ uint32_t _counts_shm_offset,
+ GLsizei _drawcount) {
+ SetHeader();
+ mode = _mode;
+ firsts_shm_id = _firsts_shm_id;
+ firsts_shm_offset = _firsts_shm_offset;
+ counts_shm_id = _counts_shm_id;
+ counts_shm_offset = _counts_shm_offset;
+ drawcount = _drawcount;
+ }
+
+ void* Set(void* cmd,
+ GLenum _mode,
+ uint32_t _firsts_shm_id,
+ uint32_t _firsts_shm_offset,
+ uint32_t _counts_shm_id,
+ uint32_t _counts_shm_offset,
+ GLsizei _drawcount) {
+ static_cast<ValueType*>(cmd)->Init(_mode, _firsts_shm_id,
+ _firsts_shm_offset, _counts_shm_id,
+ _counts_shm_offset, _drawcount);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+ uint32_t firsts_shm_id;
+ uint32_t firsts_shm_offset;
+ uint32_t counts_shm_id;
+ uint32_t counts_shm_offset;
+ int32_t drawcount;
+};
+
+static_assert(sizeof(MultiDrawArraysCHROMIUM) == 28,
+ "size of MultiDrawArraysCHROMIUM should be 28");
+static_assert(offsetof(MultiDrawArraysCHROMIUM, header) == 0,
+ "offset of MultiDrawArraysCHROMIUM header should be 0");
+static_assert(offsetof(MultiDrawArraysCHROMIUM, mode) == 4,
+ "offset of MultiDrawArraysCHROMIUM mode should be 4");
+static_assert(offsetof(MultiDrawArraysCHROMIUM, firsts_shm_id) == 8,
+ "offset of MultiDrawArraysCHROMIUM firsts_shm_id should be 8");
+static_assert(
+ offsetof(MultiDrawArraysCHROMIUM, firsts_shm_offset) == 12,
+ "offset of MultiDrawArraysCHROMIUM firsts_shm_offset should be 12");
+static_assert(offsetof(MultiDrawArraysCHROMIUM, counts_shm_id) == 16,
+ "offset of MultiDrawArraysCHROMIUM counts_shm_id should be 16");
+static_assert(
+ offsetof(MultiDrawArraysCHROMIUM, counts_shm_offset) == 20,
+ "offset of MultiDrawArraysCHROMIUM counts_shm_offset should be 20");
+static_assert(offsetof(MultiDrawArraysCHROMIUM, drawcount) == 24,
+ "offset of MultiDrawArraysCHROMIUM drawcount should be 24");
+
+struct MultiDrawArraysInstancedCHROMIUM {
+ typedef MultiDrawArraysInstancedCHROMIUM ValueType;
+ static const CommandId kCmdId = kMultiDrawArraysInstancedCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode,
+ uint32_t _firsts_shm_id,
+ uint32_t _firsts_shm_offset,
+ uint32_t _counts_shm_id,
+ uint32_t _counts_shm_offset,
+ uint32_t _instance_counts_shm_id,
+ uint32_t _instance_counts_shm_offset,
+ GLsizei _drawcount) {
+ SetHeader();
+ mode = _mode;
+ firsts_shm_id = _firsts_shm_id;
+ firsts_shm_offset = _firsts_shm_offset;
+ counts_shm_id = _counts_shm_id;
+ counts_shm_offset = _counts_shm_offset;
+ instance_counts_shm_id = _instance_counts_shm_id;
+ instance_counts_shm_offset = _instance_counts_shm_offset;
+ drawcount = _drawcount;
+ }
+
+ void* Set(void* cmd,
+ GLenum _mode,
+ uint32_t _firsts_shm_id,
+ uint32_t _firsts_shm_offset,
+ uint32_t _counts_shm_id,
+ uint32_t _counts_shm_offset,
+ uint32_t _instance_counts_shm_id,
+ uint32_t _instance_counts_shm_offset,
+ GLsizei _drawcount) {
+ static_cast<ValueType*>(cmd)->Init(
+ _mode, _firsts_shm_id, _firsts_shm_offset, _counts_shm_id,
+ _counts_shm_offset, _instance_counts_shm_id,
+ _instance_counts_shm_offset, _drawcount);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+ uint32_t firsts_shm_id;
+ uint32_t firsts_shm_offset;
+ uint32_t counts_shm_id;
+ uint32_t counts_shm_offset;
+ uint32_t instance_counts_shm_id;
+ uint32_t instance_counts_shm_offset;
+ int32_t drawcount;
+};
+
+static_assert(sizeof(MultiDrawArraysInstancedCHROMIUM) == 36,
+ "size of MultiDrawArraysInstancedCHROMIUM should be 36");
+static_assert(offsetof(MultiDrawArraysInstancedCHROMIUM, header) == 0,
+ "offset of MultiDrawArraysInstancedCHROMIUM header should be 0");
+static_assert(offsetof(MultiDrawArraysInstancedCHROMIUM, mode) == 4,
+ "offset of MultiDrawArraysInstancedCHROMIUM mode should be 4");
+static_assert(
+ offsetof(MultiDrawArraysInstancedCHROMIUM, firsts_shm_id) == 8,
+ "offset of MultiDrawArraysInstancedCHROMIUM firsts_shm_id should be 8");
+static_assert(offsetof(MultiDrawArraysInstancedCHROMIUM, firsts_shm_offset) ==
+ 12,
+ "offset of MultiDrawArraysInstancedCHROMIUM firsts_shm_offset "
+ "should be 12");
+static_assert(
+ offsetof(MultiDrawArraysInstancedCHROMIUM, counts_shm_id) == 16,
+ "offset of MultiDrawArraysInstancedCHROMIUM counts_shm_id should be 16");
+static_assert(offsetof(MultiDrawArraysInstancedCHROMIUM, counts_shm_offset) ==
+ 20,
+ "offset of MultiDrawArraysInstancedCHROMIUM counts_shm_offset "
+ "should be 20");
+static_assert(offsetof(MultiDrawArraysInstancedCHROMIUM,
+ instance_counts_shm_id) == 24,
+ "offset of MultiDrawArraysInstancedCHROMIUM "
+ "instance_counts_shm_id should be 24");
+static_assert(offsetof(MultiDrawArraysInstancedCHROMIUM,
+ instance_counts_shm_offset) == 28,
+ "offset of MultiDrawArraysInstancedCHROMIUM "
+ "instance_counts_shm_offset should be 28");
+static_assert(
+ offsetof(MultiDrawArraysInstancedCHROMIUM, drawcount) == 32,
+ "offset of MultiDrawArraysInstancedCHROMIUM drawcount should be 32");
+
+struct MultiDrawElementsCHROMIUM {
+ typedef MultiDrawElementsCHROMIUM ValueType;
+ static const CommandId kCmdId = kMultiDrawElementsCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode,
+ uint32_t _counts_shm_id,
+ uint32_t _counts_shm_offset,
+ GLenum _type,
+ uint32_t _offsets_shm_id,
+ uint32_t _offsets_shm_offset,
+ GLsizei _drawcount) {
+ SetHeader();
+ mode = _mode;
+ counts_shm_id = _counts_shm_id;
+ counts_shm_offset = _counts_shm_offset;
+ type = _type;
+ offsets_shm_id = _offsets_shm_id;
+ offsets_shm_offset = _offsets_shm_offset;
+ drawcount = _drawcount;
+ }
+
+ void* Set(void* cmd,
+ GLenum _mode,
+ uint32_t _counts_shm_id,
+ uint32_t _counts_shm_offset,
+ GLenum _type,
+ uint32_t _offsets_shm_id,
+ uint32_t _offsets_shm_offset,
+ GLsizei _drawcount) {
+ static_cast<ValueType*>(cmd)->Init(
+ _mode, _counts_shm_id, _counts_shm_offset, _type, _offsets_shm_id,
+ _offsets_shm_offset, _drawcount);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+ uint32_t counts_shm_id;
+ uint32_t counts_shm_offset;
+ uint32_t type;
+ uint32_t offsets_shm_id;
+ uint32_t offsets_shm_offset;
+ int32_t drawcount;
+};
+
+static_assert(sizeof(MultiDrawElementsCHROMIUM) == 32,
+ "size of MultiDrawElementsCHROMIUM should be 32");
+static_assert(offsetof(MultiDrawElementsCHROMIUM, header) == 0,
+ "offset of MultiDrawElementsCHROMIUM header should be 0");
+static_assert(offsetof(MultiDrawElementsCHROMIUM, mode) == 4,
+ "offset of MultiDrawElementsCHROMIUM mode should be 4");
+static_assert(offsetof(MultiDrawElementsCHROMIUM, counts_shm_id) == 8,
+ "offset of MultiDrawElementsCHROMIUM counts_shm_id should be 8");
+static_assert(
+ offsetof(MultiDrawElementsCHROMIUM, counts_shm_offset) == 12,
+ "offset of MultiDrawElementsCHROMIUM counts_shm_offset should be 12");
+static_assert(offsetof(MultiDrawElementsCHROMIUM, type) == 16,
+ "offset of MultiDrawElementsCHROMIUM type should be 16");
+static_assert(
+ offsetof(MultiDrawElementsCHROMIUM, offsets_shm_id) == 20,
+ "offset of MultiDrawElementsCHROMIUM offsets_shm_id should be 20");
+static_assert(
+ offsetof(MultiDrawElementsCHROMIUM, offsets_shm_offset) == 24,
+ "offset of MultiDrawElementsCHROMIUM offsets_shm_offset should be 24");
+static_assert(offsetof(MultiDrawElementsCHROMIUM, drawcount) == 28,
+ "offset of MultiDrawElementsCHROMIUM drawcount should be 28");
+
+struct MultiDrawElementsInstancedCHROMIUM {
+ typedef MultiDrawElementsInstancedCHROMIUM ValueType;
+ static const CommandId kCmdId = kMultiDrawElementsInstancedCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _mode,
+ uint32_t _counts_shm_id,
+ uint32_t _counts_shm_offset,
+ GLenum _type,
+ uint32_t _offsets_shm_id,
+ uint32_t _offsets_shm_offset,
+ uint32_t _instance_counts_shm_id,
+ uint32_t _instance_counts_shm_offset,
+ GLsizei _drawcount) {
+ SetHeader();
+ mode = _mode;
+ counts_shm_id = _counts_shm_id;
+ counts_shm_offset = _counts_shm_offset;
+ type = _type;
+ offsets_shm_id = _offsets_shm_id;
+ offsets_shm_offset = _offsets_shm_offset;
+ instance_counts_shm_id = _instance_counts_shm_id;
+ instance_counts_shm_offset = _instance_counts_shm_offset;
+ drawcount = _drawcount;
+ }
+
+ void* Set(void* cmd,
+ GLenum _mode,
+ uint32_t _counts_shm_id,
+ uint32_t _counts_shm_offset,
+ GLenum _type,
+ uint32_t _offsets_shm_id,
+ uint32_t _offsets_shm_offset,
+ uint32_t _instance_counts_shm_id,
+ uint32_t _instance_counts_shm_offset,
+ GLsizei _drawcount) {
+ static_cast<ValueType*>(cmd)->Init(
+ _mode, _counts_shm_id, _counts_shm_offset, _type, _offsets_shm_id,
+ _offsets_shm_offset, _instance_counts_shm_id,
+ _instance_counts_shm_offset, _drawcount);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t mode;
+ uint32_t counts_shm_id;
+ uint32_t counts_shm_offset;
+ uint32_t type;
+ uint32_t offsets_shm_id;
+ uint32_t offsets_shm_offset;
+ uint32_t instance_counts_shm_id;
+ uint32_t instance_counts_shm_offset;
+ int32_t drawcount;
+};
+
+static_assert(sizeof(MultiDrawElementsInstancedCHROMIUM) == 40,
+ "size of MultiDrawElementsInstancedCHROMIUM should be 40");
+static_assert(
+ offsetof(MultiDrawElementsInstancedCHROMIUM, header) == 0,
+ "offset of MultiDrawElementsInstancedCHROMIUM header should be 0");
+static_assert(offsetof(MultiDrawElementsInstancedCHROMIUM, mode) == 4,
+ "offset of MultiDrawElementsInstancedCHROMIUM mode should be 4");
+static_assert(
+ offsetof(MultiDrawElementsInstancedCHROMIUM, counts_shm_id) == 8,
+ "offset of MultiDrawElementsInstancedCHROMIUM counts_shm_id should be 8");
+static_assert(offsetof(MultiDrawElementsInstancedCHROMIUM, counts_shm_offset) ==
+ 12,
+ "offset of MultiDrawElementsInstancedCHROMIUM counts_shm_offset "
+ "should be 12");
+static_assert(offsetof(MultiDrawElementsInstancedCHROMIUM, type) == 16,
+ "offset of MultiDrawElementsInstancedCHROMIUM type should be 16");
+static_assert(
+ offsetof(MultiDrawElementsInstancedCHROMIUM, offsets_shm_id) == 20,
+ "offset of MultiDrawElementsInstancedCHROMIUM offsets_shm_id should be 20");
+static_assert(offsetof(MultiDrawElementsInstancedCHROMIUM,
+ offsets_shm_offset) == 24,
+ "offset of MultiDrawElementsInstancedCHROMIUM offsets_shm_offset "
+ "should be 24");
+static_assert(offsetof(MultiDrawElementsInstancedCHROMIUM,
+ instance_counts_shm_id) == 28,
+ "offset of MultiDrawElementsInstancedCHROMIUM "
+ "instance_counts_shm_id should be 28");
+static_assert(offsetof(MultiDrawElementsInstancedCHROMIUM,
+ instance_counts_shm_offset) == 32,
+ "offset of MultiDrawElementsInstancedCHROMIUM "
+ "instance_counts_shm_offset should be 32");
+static_assert(
+ offsetof(MultiDrawElementsInstancedCHROMIUM, drawcount) == 36,
+ "offset of MultiDrawElementsInstancedCHROMIUM drawcount should be 36");
+
struct StencilFunc {
typedef StencilFunc ValueType;
static const CommandId kCmdId = kStencilFunc;
@@ -13529,73 +13919,6 @@ static_assert(offsetof(InsertFenceSyncCHROMIUM, release_count_0) == 4,
static_assert(offsetof(InsertFenceSyncCHROMIUM, release_count_1) == 8,
"offset of InsertFenceSyncCHROMIUM release_count_1 should be 8");
-struct WaitSyncTokenCHROMIUM {
- typedef WaitSyncTokenCHROMIUM ValueType;
- static const CommandId kCmdId = kWaitSyncTokenCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLint _namespace_id,
- GLuint64 _command_buffer_id,
- GLuint64 _release_count) {
- SetHeader();
- namespace_id = _namespace_id;
- GLES2Util::MapUint64ToTwoUint32(static_cast<uint64_t>(_command_buffer_id),
- &command_buffer_id_0, &command_buffer_id_1);
- GLES2Util::MapUint64ToTwoUint32(static_cast<uint64_t>(_release_count),
- &release_count_0, &release_count_1);
- }
-
- void* Set(void* cmd,
- GLint _namespace_id,
- GLuint64 _command_buffer_id,
- GLuint64 _release_count) {
- static_cast<ValueType*>(cmd)->Init(_namespace_id, _command_buffer_id,
- _release_count);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- GLuint64 command_buffer_id() const volatile {
- return static_cast<GLuint64>(GLES2Util::MapTwoUint32ToUint64(
- command_buffer_id_0, command_buffer_id_1));
- }
-
- GLuint64 release_count() const volatile {
- return static_cast<GLuint64>(
- GLES2Util::MapTwoUint32ToUint64(release_count_0, release_count_1));
- }
-
- gpu::CommandHeader header;
- int32_t namespace_id;
- uint32_t command_buffer_id_0;
- uint32_t command_buffer_id_1;
- uint32_t release_count_0;
- uint32_t release_count_1;
-};
-
-static_assert(sizeof(WaitSyncTokenCHROMIUM) == 24,
- "size of WaitSyncTokenCHROMIUM should be 24");
-static_assert(offsetof(WaitSyncTokenCHROMIUM, header) == 0,
- "offset of WaitSyncTokenCHROMIUM header should be 0");
-static_assert(offsetof(WaitSyncTokenCHROMIUM, namespace_id) == 4,
- "offset of WaitSyncTokenCHROMIUM namespace_id should be 4");
-static_assert(
- offsetof(WaitSyncTokenCHROMIUM, command_buffer_id_0) == 8,
- "offset of WaitSyncTokenCHROMIUM command_buffer_id_0 should be 8");
-static_assert(
- offsetof(WaitSyncTokenCHROMIUM, command_buffer_id_1) == 12,
- "offset of WaitSyncTokenCHROMIUM command_buffer_id_1 should be 12");
-static_assert(offsetof(WaitSyncTokenCHROMIUM, release_count_0) == 16,
- "offset of WaitSyncTokenCHROMIUM release_count_0 should be 16");
-static_assert(offsetof(WaitSyncTokenCHROMIUM, release_count_1) == 20,
- "offset of WaitSyncTokenCHROMIUM release_count_1 should be 20");
-
struct UnpremultiplyAndDitherCopyCHROMIUM {
typedef UnpremultiplyAndDitherCopyCHROMIUM ValueType;
static const CommandId kCmdId = kUnpremultiplyAndDitherCopyCHROMIUM;
@@ -14101,71 +14424,6 @@ static_assert(sizeof(FlushDriverCachesCHROMIUM) == 4,
static_assert(offsetof(FlushDriverCachesCHROMIUM, header) == 0,
"offset of FlushDriverCachesCHROMIUM header should be 0");
-struct ScheduleDCLayerSharedStateCHROMIUM {
- typedef ScheduleDCLayerSharedStateCHROMIUM ValueType;
- static const CommandId kCmdId = kScheduleDCLayerSharedStateCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLfloat _opacity,
- GLboolean _is_clipped,
- GLint _z_order,
- GLuint _shm_id,
- GLuint _shm_offset) {
- SetHeader();
- opacity = _opacity;
- is_clipped = _is_clipped;
- z_order = _z_order;
- shm_id = _shm_id;
- shm_offset = _shm_offset;
- }
-
- void* Set(void* cmd,
- GLfloat _opacity,
- GLboolean _is_clipped,
- GLint _z_order,
- GLuint _shm_id,
- GLuint _shm_offset) {
- static_cast<ValueType*>(cmd)->Init(_opacity, _is_clipped, _z_order, _shm_id,
- _shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- float opacity;
- uint32_t is_clipped;
- int32_t z_order;
- uint32_t shm_id;
- uint32_t shm_offset;
-};
-
-static_assert(sizeof(ScheduleDCLayerSharedStateCHROMIUM) == 24,
- "size of ScheduleDCLayerSharedStateCHROMIUM should be 24");
-static_assert(
- offsetof(ScheduleDCLayerSharedStateCHROMIUM, header) == 0,
- "offset of ScheduleDCLayerSharedStateCHROMIUM header should be 0");
-static_assert(
- offsetof(ScheduleDCLayerSharedStateCHROMIUM, opacity) == 4,
- "offset of ScheduleDCLayerSharedStateCHROMIUM opacity should be 4");
-static_assert(
- offsetof(ScheduleDCLayerSharedStateCHROMIUM, is_clipped) == 8,
- "offset of ScheduleDCLayerSharedStateCHROMIUM is_clipped should be 8");
-static_assert(
- offsetof(ScheduleDCLayerSharedStateCHROMIUM, z_order) == 12,
- "offset of ScheduleDCLayerSharedStateCHROMIUM z_order should be 12");
-static_assert(
- offsetof(ScheduleDCLayerSharedStateCHROMIUM, shm_id) == 16,
- "offset of ScheduleDCLayerSharedStateCHROMIUM shm_id should be 16");
-static_assert(
- offsetof(ScheduleDCLayerSharedStateCHROMIUM, shm_offset) == 20,
- "offset of ScheduleDCLayerSharedStateCHROMIUM shm_offset should be 20");
-
struct ScheduleDCLayerCHROMIUM {
typedef ScheduleDCLayerCHROMIUM ValueType;
static const CommandId kCmdId = kScheduleDCLayerCHROMIUM;
@@ -14178,66 +14436,165 @@ struct ScheduleDCLayerCHROMIUM {
void SetHeader() { header.SetCmd<ValueType>(); }
- void Init(GLsizei _num_textures,
- GLuint _background_color,
- GLuint _edge_aa_mask,
- GLuint _filter,
- GLuint _shm_id,
- GLuint _shm_offset,
+ void Init(GLuint _y_texture_id,
+ GLuint _uv_texture_id,
+ GLint _z_order,
+ GLint _content_x,
+ GLint _content_y,
+ GLint _content_width,
+ GLint _content_height,
+ GLint _quad_x,
+ GLint _quad_y,
+ GLint _quad_width,
+ GLint _quad_height,
+ GLfloat _transform_c1r1,
+ GLfloat _transform_c2r1,
+ GLfloat _transform_c1r2,
+ GLfloat _transform_c2r2,
+ GLfloat _transform_tx,
+ GLfloat _transform_ty,
+ GLboolean _is_clipped,
+ GLint _clip_x,
+ GLint _clip_y,
+ GLint _clip_width,
+ GLint _clip_height,
GLuint _protected_video_type) {
SetHeader();
- num_textures = _num_textures;
- background_color = _background_color;
- edge_aa_mask = _edge_aa_mask;
- filter = _filter;
- shm_id = _shm_id;
- shm_offset = _shm_offset;
+ y_texture_id = _y_texture_id;
+ uv_texture_id = _uv_texture_id;
+ z_order = _z_order;
+ content_x = _content_x;
+ content_y = _content_y;
+ content_width = _content_width;
+ content_height = _content_height;
+ quad_x = _quad_x;
+ quad_y = _quad_y;
+ quad_width = _quad_width;
+ quad_height = _quad_height;
+ transform_c1r1 = _transform_c1r1;
+ transform_c2r1 = _transform_c2r1;
+ transform_c1r2 = _transform_c1r2;
+ transform_c2r2 = _transform_c2r2;
+ transform_tx = _transform_tx;
+ transform_ty = _transform_ty;
+ is_clipped = _is_clipped;
+ clip_x = _clip_x;
+ clip_y = _clip_y;
+ clip_width = _clip_width;
+ clip_height = _clip_height;
protected_video_type = _protected_video_type;
}
void* Set(void* cmd,
- GLsizei _num_textures,
- GLuint _background_color,
- GLuint _edge_aa_mask,
- GLuint _filter,
- GLuint _shm_id,
- GLuint _shm_offset,
+ GLuint _y_texture_id,
+ GLuint _uv_texture_id,
+ GLint _z_order,
+ GLint _content_x,
+ GLint _content_y,
+ GLint _content_width,
+ GLint _content_height,
+ GLint _quad_x,
+ GLint _quad_y,
+ GLint _quad_width,
+ GLint _quad_height,
+ GLfloat _transform_c1r1,
+ GLfloat _transform_c2r1,
+ GLfloat _transform_c1r2,
+ GLfloat _transform_c2r2,
+ GLfloat _transform_tx,
+ GLfloat _transform_ty,
+ GLboolean _is_clipped,
+ GLint _clip_x,
+ GLint _clip_y,
+ GLint _clip_width,
+ GLint _clip_height,
GLuint _protected_video_type) {
- static_cast<ValueType*>(cmd)->Init(_num_textures, _background_color,
- _edge_aa_mask, _filter, _shm_id,
- _shm_offset, _protected_video_type);
+ static_cast<ValueType*>(cmd)->Init(
+ _y_texture_id, _uv_texture_id, _z_order, _content_x, _content_y,
+ _content_width, _content_height, _quad_x, _quad_y, _quad_width,
+ _quad_height, _transform_c1r1, _transform_c2r1, _transform_c1r2,
+ _transform_c2r2, _transform_tx, _transform_ty, _is_clipped, _clip_x,
+ _clip_y, _clip_width, _clip_height, _protected_video_type);
return NextCmdAddress<ValueType>(cmd);
}
gpu::CommandHeader header;
- int32_t num_textures;
- uint32_t background_color;
- uint32_t edge_aa_mask;
- uint32_t filter;
- uint32_t shm_id;
- uint32_t shm_offset;
+ uint32_t y_texture_id;
+ uint32_t uv_texture_id;
+ int32_t z_order;
+ int32_t content_x;
+ int32_t content_y;
+ int32_t content_width;
+ int32_t content_height;
+ int32_t quad_x;
+ int32_t quad_y;
+ int32_t quad_width;
+ int32_t quad_height;
+ float transform_c1r1;
+ float transform_c2r1;
+ float transform_c1r2;
+ float transform_c2r2;
+ float transform_tx;
+ float transform_ty;
+ uint32_t is_clipped;
+ int32_t clip_x;
+ int32_t clip_y;
+ int32_t clip_width;
+ int32_t clip_height;
uint32_t protected_video_type;
};
-static_assert(sizeof(ScheduleDCLayerCHROMIUM) == 32,
- "size of ScheduleDCLayerCHROMIUM should be 32");
+static_assert(sizeof(ScheduleDCLayerCHROMIUM) == 96,
+ "size of ScheduleDCLayerCHROMIUM should be 96");
static_assert(offsetof(ScheduleDCLayerCHROMIUM, header) == 0,
"offset of ScheduleDCLayerCHROMIUM header should be 0");
-static_assert(offsetof(ScheduleDCLayerCHROMIUM, num_textures) == 4,
- "offset of ScheduleDCLayerCHROMIUM num_textures should be 4");
-static_assert(offsetof(ScheduleDCLayerCHROMIUM, background_color) == 8,
- "offset of ScheduleDCLayerCHROMIUM background_color should be 8");
-static_assert(offsetof(ScheduleDCLayerCHROMIUM, edge_aa_mask) == 12,
- "offset of ScheduleDCLayerCHROMIUM edge_aa_mask should be 12");
-static_assert(offsetof(ScheduleDCLayerCHROMIUM, filter) == 16,
- "offset of ScheduleDCLayerCHROMIUM filter should be 16");
-static_assert(offsetof(ScheduleDCLayerCHROMIUM, shm_id) == 20,
- "offset of ScheduleDCLayerCHROMIUM shm_id should be 20");
-static_assert(offsetof(ScheduleDCLayerCHROMIUM, shm_offset) == 24,
- "offset of ScheduleDCLayerCHROMIUM shm_offset should be 24");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, y_texture_id) == 4,
+ "offset of ScheduleDCLayerCHROMIUM y_texture_id should be 4");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, uv_texture_id) == 8,
+ "offset of ScheduleDCLayerCHROMIUM uv_texture_id should be 8");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, z_order) == 12,
+ "offset of ScheduleDCLayerCHROMIUM z_order should be 12");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, content_x) == 16,
+ "offset of ScheduleDCLayerCHROMIUM content_x should be 16");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, content_y) == 20,
+ "offset of ScheduleDCLayerCHROMIUM content_y should be 20");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, content_width) == 24,
+ "offset of ScheduleDCLayerCHROMIUM content_width should be 24");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, content_height) == 28,
+ "offset of ScheduleDCLayerCHROMIUM content_height should be 28");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, quad_x) == 32,
+ "offset of ScheduleDCLayerCHROMIUM quad_x should be 32");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, quad_y) == 36,
+ "offset of ScheduleDCLayerCHROMIUM quad_y should be 36");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, quad_width) == 40,
+ "offset of ScheduleDCLayerCHROMIUM quad_width should be 40");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, quad_height) == 44,
+ "offset of ScheduleDCLayerCHROMIUM quad_height should be 44");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, transform_c1r1) == 48,
+ "offset of ScheduleDCLayerCHROMIUM transform_c1r1 should be 48");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, transform_c2r1) == 52,
+ "offset of ScheduleDCLayerCHROMIUM transform_c2r1 should be 52");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, transform_c1r2) == 56,
+ "offset of ScheduleDCLayerCHROMIUM transform_c1r2 should be 56");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, transform_c2r2) == 60,
+ "offset of ScheduleDCLayerCHROMIUM transform_c2r2 should be 60");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, transform_tx) == 64,
+ "offset of ScheduleDCLayerCHROMIUM transform_tx should be 64");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, transform_ty) == 68,
+ "offset of ScheduleDCLayerCHROMIUM transform_ty should be 68");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, is_clipped) == 72,
+ "offset of ScheduleDCLayerCHROMIUM is_clipped should be 72");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, clip_x) == 76,
+ "offset of ScheduleDCLayerCHROMIUM clip_x should be 76");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, clip_y) == 80,
+ "offset of ScheduleDCLayerCHROMIUM clip_y should be 80");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, clip_width) == 84,
+ "offset of ScheduleDCLayerCHROMIUM clip_width should be 84");
+static_assert(offsetof(ScheduleDCLayerCHROMIUM, clip_height) == 88,
+ "offset of ScheduleDCLayerCHROMIUM clip_height should be 88");
static_assert(
- offsetof(ScheduleDCLayerCHROMIUM, protected_video_type) == 28,
- "offset of ScheduleDCLayerCHROMIUM protected_video_type should be 28");
+ offsetof(ScheduleDCLayerCHROMIUM, protected_video_type) == 92,
+ "offset of ScheduleDCLayerCHROMIUM protected_video_type should be 92");
struct SetActiveURLCHROMIUM {
typedef SetActiveURLCHROMIUM ValueType;
@@ -16758,30 +17115,25 @@ struct CreateAndTexStorage2DSharedImageINTERNALImmediate {
void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
- void Init(GLuint _texture, GLenum _internalFormat, const GLbyte* _mailbox) {
+ void Init(GLuint _texture, const GLbyte* _mailbox) {
SetHeader();
texture = _texture;
- internalFormat = _internalFormat;
memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
}
- void* Set(void* cmd,
- GLuint _texture,
- GLenum _internalFormat,
- const GLbyte* _mailbox) {
- static_cast<ValueType*>(cmd)->Init(_texture, _internalFormat, _mailbox);
+ void* Set(void* cmd, GLuint _texture, const GLbyte* _mailbox) {
+ static_cast<ValueType*>(cmd)->Init(_texture, _mailbox);
const uint32_t size = ComputeSize();
return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
}
gpu::CommandHeader header;
uint32_t texture;
- uint32_t internalFormat;
};
static_assert(
- sizeof(CreateAndTexStorage2DSharedImageINTERNALImmediate) == 12,
- "size of CreateAndTexStorage2DSharedImageINTERNALImmediate should be 12");
+ sizeof(CreateAndTexStorage2DSharedImageINTERNALImmediate) == 8,
+ "size of CreateAndTexStorage2DSharedImageINTERNALImmediate should be 8");
static_assert(offsetof(CreateAndTexStorage2DSharedImageINTERNALImmediate,
header) == 0,
"offset of CreateAndTexStorage2DSharedImageINTERNALImmediate "
@@ -16790,10 +17142,6 @@ static_assert(offsetof(CreateAndTexStorage2DSharedImageINTERNALImmediate,
texture) == 4,
"offset of CreateAndTexStorage2DSharedImageINTERNALImmediate "
"texture should be 4");
-static_assert(offsetof(CreateAndTexStorage2DSharedImageINTERNALImmediate,
- internalFormat) == 8,
- "offset of CreateAndTexStorage2DSharedImageINTERNALImmediate "
- "internalFormat should be 8");
struct BeginSharedImageAccessDirectCHROMIUM {
typedef BeginSharedImageAccessDirectCHROMIUM ValueType;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 52f445fe307..62958cdf8a0 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -2298,6 +2298,112 @@ TEST_F(GLES2FormatTest, ShaderSourceBucket) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, MultiDrawBeginCHROMIUM) {
+ cmds::MultiDrawBeginCHROMIUM& cmd =
+ *GetBufferAs<cmds::MultiDrawBeginCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::MultiDrawBeginCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(11), cmd.drawcount);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, MultiDrawEndCHROMIUM) {
+ cmds::MultiDrawEndCHROMIUM& cmd = *GetBufferAs<cmds::MultiDrawEndCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::MultiDrawEndCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, MultiDrawArraysCHROMIUM) {
+ cmds::MultiDrawArraysCHROMIUM& cmd =
+ *GetBufferAs<cmds::MultiDrawArraysCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13), static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15), static_cast<GLsizei>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::MultiDrawArraysCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.firsts_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.firsts_shm_offset);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.counts_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.counts_shm_offset);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.drawcount);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, MultiDrawArraysInstancedCHROMIUM) {
+ cmds::MultiDrawArraysInstancedCHROMIUM& cmd =
+ *GetBufferAs<cmds::MultiDrawArraysInstancedCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13), static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15), static_cast<uint32_t>(16),
+ static_cast<uint32_t>(17), static_cast<GLsizei>(18));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::MultiDrawArraysInstancedCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.firsts_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.firsts_shm_offset);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.counts_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.counts_shm_offset);
+ EXPECT_EQ(static_cast<uint32_t>(16), cmd.instance_counts_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(17), cmd.instance_counts_shm_offset);
+ EXPECT_EQ(static_cast<GLsizei>(18), cmd.drawcount);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, MultiDrawElementsCHROMIUM) {
+ cmds::MultiDrawElementsCHROMIUM& cmd =
+ *GetBufferAs<cmds::MultiDrawElementsCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11),
+ static_cast<uint32_t>(12), static_cast<uint32_t>(13),
+ static_cast<GLenum>(14), static_cast<uint32_t>(15),
+ static_cast<uint32_t>(16), static_cast<GLsizei>(17));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::MultiDrawElementsCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.counts_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.counts_shm_offset);
+ EXPECT_EQ(static_cast<GLenum>(14), cmd.type);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.offsets_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(16), cmd.offsets_shm_offset);
+ EXPECT_EQ(static_cast<GLsizei>(17), cmd.drawcount);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, MultiDrawElementsInstancedCHROMIUM) {
+ cmds::MultiDrawElementsInstancedCHROMIUM& cmd =
+ *GetBufferAs<cmds::MultiDrawElementsInstancedCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11),
+ static_cast<uint32_t>(12), static_cast<uint32_t>(13),
+ static_cast<GLenum>(14), static_cast<uint32_t>(15),
+ static_cast<uint32_t>(16), static_cast<uint32_t>(17),
+ static_cast<uint32_t>(18), static_cast<GLsizei>(19));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::MultiDrawElementsInstancedCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.mode);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.counts_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.counts_shm_offset);
+ EXPECT_EQ(static_cast<GLenum>(14), cmd.type);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.offsets_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(16), cmd.offsets_shm_offset);
+ EXPECT_EQ(static_cast<uint32_t>(17), cmd.instance_counts_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(18), cmd.instance_counts_shm_offset);
+ EXPECT_EQ(static_cast<GLsizei>(19), cmd.drawcount);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, StencilFunc) {
cmds::StencilFunc& cmd = *GetBufferAs<cmds::StencilFunc>();
void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11),
@@ -4552,21 +4658,6 @@ TEST_F(GLES2FormatTest, InsertFenceSyncCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
-TEST_F(GLES2FormatTest, WaitSyncTokenCHROMIUM) {
- cmds::WaitSyncTokenCHROMIUM& cmd =
- *GetBufferAs<cmds::WaitSyncTokenCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLint>(11), static_cast<GLuint64>(12),
- static_cast<GLuint64>(13));
- EXPECT_EQ(static_cast<uint32_t>(cmds::WaitSyncTokenCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLint>(11), cmd.namespace_id);
- EXPECT_EQ(static_cast<GLuint64>(12), cmd.command_buffer_id());
- EXPECT_EQ(static_cast<GLuint64>(13), cmd.release_count());
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
TEST_F(GLES2FormatTest, UnpremultiplyAndDitherCopyCHROMIUM) {
cmds::UnpremultiplyAndDitherCopyCHROMIUM& cmd =
*GetBufferAs<cmds::UnpremultiplyAndDitherCopyCHROMIUM>();
@@ -4725,41 +4816,46 @@ TEST_F(GLES2FormatTest, FlushDriverCachesCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
-TEST_F(GLES2FormatTest, ScheduleDCLayerSharedStateCHROMIUM) {
- cmds::ScheduleDCLayerSharedStateCHROMIUM& cmd =
- *GetBufferAs<cmds::ScheduleDCLayerSharedStateCHROMIUM>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLfloat>(11),
- static_cast<GLboolean>(12), static_cast<GLint>(13),
- static_cast<GLuint>(14), static_cast<GLuint>(15));
- EXPECT_EQ(
- static_cast<uint32_t>(cmds::ScheduleDCLayerSharedStateCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLfloat>(11), cmd.opacity);
- EXPECT_EQ(static_cast<GLboolean>(12), cmd.is_clipped);
- EXPECT_EQ(static_cast<GLint>(13), cmd.z_order);
- EXPECT_EQ(static_cast<GLuint>(14), cmd.shm_id);
- EXPECT_EQ(static_cast<GLuint>(15), cmd.shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
TEST_F(GLES2FormatTest, ScheduleDCLayerCHROMIUM) {
cmds::ScheduleDCLayerCHROMIUM& cmd =
*GetBufferAs<cmds::ScheduleDCLayerCHROMIUM>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(11),
- static_cast<GLuint>(12), static_cast<GLuint>(13),
- static_cast<GLuint>(14), static_cast<GLuint>(15),
- static_cast<GLuint>(16), static_cast<GLuint>(17));
+ void* next_cmd = cmd.Set(
+ &cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
+ static_cast<GLint>(13), static_cast<GLint>(14), static_cast<GLint>(15),
+ static_cast<GLint>(16), static_cast<GLint>(17), static_cast<GLint>(18),
+ static_cast<GLint>(19), static_cast<GLint>(20), static_cast<GLint>(21),
+ static_cast<GLfloat>(22), static_cast<GLfloat>(23),
+ static_cast<GLfloat>(24), static_cast<GLfloat>(25),
+ static_cast<GLfloat>(26), static_cast<GLfloat>(27),
+ static_cast<GLboolean>(28), static_cast<GLint>(29),
+ static_cast<GLint>(30), static_cast<GLint>(31), static_cast<GLint>(32),
+ static_cast<GLuint>(33));
EXPECT_EQ(static_cast<uint32_t>(cmds::ScheduleDCLayerCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLsizei>(11), cmd.num_textures);
- EXPECT_EQ(static_cast<GLuint>(12), cmd.background_color);
- EXPECT_EQ(static_cast<GLuint>(13), cmd.edge_aa_mask);
- EXPECT_EQ(static_cast<GLuint>(14), cmd.filter);
- EXPECT_EQ(static_cast<GLuint>(15), cmd.shm_id);
- EXPECT_EQ(static_cast<GLuint>(16), cmd.shm_offset);
- EXPECT_EQ(static_cast<GLuint>(17), cmd.protected_video_type);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.y_texture_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.uv_texture_id);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.z_order);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.content_x);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.content_y);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.content_width);
+ EXPECT_EQ(static_cast<GLint>(17), cmd.content_height);
+ EXPECT_EQ(static_cast<GLint>(18), cmd.quad_x);
+ EXPECT_EQ(static_cast<GLint>(19), cmd.quad_y);
+ EXPECT_EQ(static_cast<GLint>(20), cmd.quad_width);
+ EXPECT_EQ(static_cast<GLint>(21), cmd.quad_height);
+ EXPECT_EQ(static_cast<GLfloat>(22), cmd.transform_c1r1);
+ EXPECT_EQ(static_cast<GLfloat>(23), cmd.transform_c2r1);
+ EXPECT_EQ(static_cast<GLfloat>(24), cmd.transform_c1r2);
+ EXPECT_EQ(static_cast<GLfloat>(25), cmd.transform_c2r2);
+ EXPECT_EQ(static_cast<GLfloat>(26), cmd.transform_tx);
+ EXPECT_EQ(static_cast<GLfloat>(27), cmd.transform_ty);
+ EXPECT_EQ(static_cast<GLboolean>(28), cmd.is_clipped);
+ EXPECT_EQ(static_cast<GLint>(29), cmd.clip_x);
+ EXPECT_EQ(static_cast<GLint>(30), cmd.clip_y);
+ EXPECT_EQ(static_cast<GLint>(31), cmd.clip_width);
+ EXPECT_EQ(static_cast<GLint>(32), cmd.clip_height);
+ EXPECT_EQ(static_cast<GLuint>(33), cmd.protected_video_type);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -5584,8 +5680,7 @@ TEST_F(GLES2FormatTest, CreateAndTexStorage2DSharedImageINTERNALImmediate) {
};
cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetBufferAs<cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12), data);
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), data);
EXPECT_EQ(
static_cast<uint32_t>(
cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate::kCmdId),
@@ -5593,7 +5688,6 @@ TEST_F(GLES2FormatTest, CreateAndTexStorage2DSharedImageINTERNALImmediate) {
EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
cmd.header.size * 4u);
EXPECT_EQ(static_cast<GLuint>(11), cmd.texture);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.internalFormat);
CheckBytesWrittenMatchesExpectedSize(
next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
}
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index 753b9c30960..c3321ba34c6 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -165,193 +165,197 @@
OP(Scissor) /* 406 */ \
OP(ShaderBinary) /* 407 */ \
OP(ShaderSourceBucket) /* 408 */ \
- OP(StencilFunc) /* 409 */ \
- OP(StencilFuncSeparate) /* 410 */ \
- OP(StencilMask) /* 411 */ \
- OP(StencilMaskSeparate) /* 412 */ \
- OP(StencilOp) /* 413 */ \
- OP(StencilOpSeparate) /* 414 */ \
- OP(TexImage2D) /* 415 */ \
- OP(TexImage3D) /* 416 */ \
- OP(TexParameterf) /* 417 */ \
- OP(TexParameterfvImmediate) /* 418 */ \
- OP(TexParameteri) /* 419 */ \
- OP(TexParameterivImmediate) /* 420 */ \
- OP(TexStorage3D) /* 421 */ \
- OP(TexSubImage2D) /* 422 */ \
- OP(TexSubImage3D) /* 423 */ \
- OP(TransformFeedbackVaryingsBucket) /* 424 */ \
- OP(Uniform1f) /* 425 */ \
- OP(Uniform1fvImmediate) /* 426 */ \
- OP(Uniform1i) /* 427 */ \
- OP(Uniform1ivImmediate) /* 428 */ \
- OP(Uniform1ui) /* 429 */ \
- OP(Uniform1uivImmediate) /* 430 */ \
- OP(Uniform2f) /* 431 */ \
- OP(Uniform2fvImmediate) /* 432 */ \
- OP(Uniform2i) /* 433 */ \
- OP(Uniform2ivImmediate) /* 434 */ \
- OP(Uniform2ui) /* 435 */ \
- OP(Uniform2uivImmediate) /* 436 */ \
- OP(Uniform3f) /* 437 */ \
- OP(Uniform3fvImmediate) /* 438 */ \
- OP(Uniform3i) /* 439 */ \
- OP(Uniform3ivImmediate) /* 440 */ \
- OP(Uniform3ui) /* 441 */ \
- OP(Uniform3uivImmediate) /* 442 */ \
- OP(Uniform4f) /* 443 */ \
- OP(Uniform4fvImmediate) /* 444 */ \
- OP(Uniform4i) /* 445 */ \
- OP(Uniform4ivImmediate) /* 446 */ \
- OP(Uniform4ui) /* 447 */ \
- OP(Uniform4uivImmediate) /* 448 */ \
- OP(UniformBlockBinding) /* 449 */ \
- OP(UniformMatrix2fvImmediate) /* 450 */ \
- OP(UniformMatrix2x3fvImmediate) /* 451 */ \
- OP(UniformMatrix2x4fvImmediate) /* 452 */ \
- OP(UniformMatrix3fvImmediate) /* 453 */ \
- OP(UniformMatrix3x2fvImmediate) /* 454 */ \
- OP(UniformMatrix3x4fvImmediate) /* 455 */ \
- OP(UniformMatrix4fvImmediate) /* 456 */ \
- OP(UniformMatrix4x2fvImmediate) /* 457 */ \
- OP(UniformMatrix4x3fvImmediate) /* 458 */ \
- OP(UseProgram) /* 459 */ \
- OP(ValidateProgram) /* 460 */ \
- OP(VertexAttrib1f) /* 461 */ \
- OP(VertexAttrib1fvImmediate) /* 462 */ \
- OP(VertexAttrib2f) /* 463 */ \
- OP(VertexAttrib2fvImmediate) /* 464 */ \
- OP(VertexAttrib3f) /* 465 */ \
- OP(VertexAttrib3fvImmediate) /* 466 */ \
- OP(VertexAttrib4f) /* 467 */ \
- OP(VertexAttrib4fvImmediate) /* 468 */ \
- OP(VertexAttribI4i) /* 469 */ \
- OP(VertexAttribI4ivImmediate) /* 470 */ \
- OP(VertexAttribI4ui) /* 471 */ \
- OP(VertexAttribI4uivImmediate) /* 472 */ \
- OP(VertexAttribIPointer) /* 473 */ \
- OP(VertexAttribPointer) /* 474 */ \
- OP(Viewport) /* 475 */ \
- OP(WaitSync) /* 476 */ \
- OP(BlitFramebufferCHROMIUM) /* 477 */ \
- OP(RenderbufferStorageMultisampleCHROMIUM) /* 478 */ \
- OP(RenderbufferStorageMultisampleEXT) /* 479 */ \
- OP(FramebufferTexture2DMultisampleEXT) /* 480 */ \
- OP(TexStorage2DEXT) /* 481 */ \
- OP(GenQueriesEXTImmediate) /* 482 */ \
- OP(DeleteQueriesEXTImmediate) /* 483 */ \
- OP(QueryCounterEXT) /* 484 */ \
- OP(BeginQueryEXT) /* 485 */ \
- OP(BeginTransformFeedback) /* 486 */ \
- OP(EndQueryEXT) /* 487 */ \
- OP(EndTransformFeedback) /* 488 */ \
- OP(SetDisjointValueSyncCHROMIUM) /* 489 */ \
- OP(InsertEventMarkerEXT) /* 490 */ \
- OP(PushGroupMarkerEXT) /* 491 */ \
- OP(PopGroupMarkerEXT) /* 492 */ \
- OP(GenVertexArraysOESImmediate) /* 493 */ \
- OP(DeleteVertexArraysOESImmediate) /* 494 */ \
- OP(IsVertexArrayOES) /* 495 */ \
- OP(BindVertexArrayOES) /* 496 */ \
- OP(FramebufferParameteri) /* 497 */ \
- OP(BindImageTexture) /* 498 */ \
- OP(DispatchCompute) /* 499 */ \
- OP(MemoryBarrierEXT) /* 500 */ \
- OP(MemoryBarrierByRegion) /* 501 */ \
- OP(SwapBuffers) /* 502 */ \
- OP(GetMaxValueInBufferCHROMIUM) /* 503 */ \
- OP(EnableFeatureCHROMIUM) /* 504 */ \
- OP(MapBufferRange) /* 505 */ \
- OP(UnmapBuffer) /* 506 */ \
- OP(FlushMappedBufferRange) /* 507 */ \
- OP(ResizeCHROMIUM) /* 508 */ \
- OP(GetRequestableExtensionsCHROMIUM) /* 509 */ \
- OP(RequestExtensionCHROMIUM) /* 510 */ \
- OP(GetProgramInfoCHROMIUM) /* 511 */ \
- OP(GetUniformBlocksCHROMIUM) /* 512 */ \
- OP(GetTransformFeedbackVaryingsCHROMIUM) /* 513 */ \
- OP(GetUniformsES3CHROMIUM) /* 514 */ \
- OP(DescheduleUntilFinishedCHROMIUM) /* 515 */ \
- OP(GetTranslatedShaderSourceANGLE) /* 516 */ \
- OP(PostSubBufferCHROMIUM) /* 517 */ \
- OP(CopyTextureCHROMIUM) /* 518 */ \
- OP(CopySubTextureCHROMIUM) /* 519 */ \
- OP(DrawArraysInstancedANGLE) /* 520 */ \
- OP(DrawElementsInstancedANGLE) /* 521 */ \
- OP(VertexAttribDivisorANGLE) /* 522 */ \
- OP(ProduceTextureDirectCHROMIUMImmediate) /* 523 */ \
- OP(CreateAndConsumeTextureINTERNALImmediate) /* 524 */ \
- OP(BindUniformLocationCHROMIUMBucket) /* 525 */ \
- OP(BindTexImage2DCHROMIUM) /* 526 */ \
- OP(BindTexImage2DWithInternalformatCHROMIUM) /* 527 */ \
- OP(ReleaseTexImage2DCHROMIUM) /* 528 */ \
- OP(TraceBeginCHROMIUM) /* 529 */ \
- OP(TraceEndCHROMIUM) /* 530 */ \
- OP(DiscardFramebufferEXTImmediate) /* 531 */ \
- OP(LoseContextCHROMIUM) /* 532 */ \
- OP(InsertFenceSyncCHROMIUM) /* 533 */ \
- OP(WaitSyncTokenCHROMIUM) /* 534 */ \
- OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 535 */ \
- OP(DrawBuffersEXTImmediate) /* 536 */ \
- OP(DiscardBackbufferCHROMIUM) /* 537 */ \
- OP(ScheduleOverlayPlaneCHROMIUM) /* 538 */ \
- OP(ScheduleCALayerSharedStateCHROMIUM) /* 539 */ \
- OP(ScheduleCALayerCHROMIUM) /* 540 */ \
- OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 541 */ \
- OP(CommitOverlayPlanesCHROMIUM) /* 542 */ \
- OP(FlushDriverCachesCHROMIUM) /* 543 */ \
- OP(ScheduleDCLayerSharedStateCHROMIUM) /* 544 */ \
- OP(ScheduleDCLayerCHROMIUM) /* 545 */ \
- OP(SetActiveURLCHROMIUM) /* 546 */ \
- OP(MatrixLoadfCHROMIUMImmediate) /* 547 */ \
- OP(MatrixLoadIdentityCHROMIUM) /* 548 */ \
- OP(GenPathsCHROMIUM) /* 549 */ \
- OP(DeletePathsCHROMIUM) /* 550 */ \
- OP(IsPathCHROMIUM) /* 551 */ \
- OP(PathCommandsCHROMIUM) /* 552 */ \
- OP(PathParameterfCHROMIUM) /* 553 */ \
- OP(PathParameteriCHROMIUM) /* 554 */ \
- OP(PathStencilFuncCHROMIUM) /* 555 */ \
- OP(StencilFillPathCHROMIUM) /* 556 */ \
- OP(StencilStrokePathCHROMIUM) /* 557 */ \
- OP(CoverFillPathCHROMIUM) /* 558 */ \
- OP(CoverStrokePathCHROMIUM) /* 559 */ \
- OP(StencilThenCoverFillPathCHROMIUM) /* 560 */ \
- OP(StencilThenCoverStrokePathCHROMIUM) /* 561 */ \
- OP(StencilFillPathInstancedCHROMIUM) /* 562 */ \
- OP(StencilStrokePathInstancedCHROMIUM) /* 563 */ \
- OP(CoverFillPathInstancedCHROMIUM) /* 564 */ \
- OP(CoverStrokePathInstancedCHROMIUM) /* 565 */ \
- OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 566 */ \
- OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 567 */ \
- OP(BindFragmentInputLocationCHROMIUMBucket) /* 568 */ \
- OP(ProgramPathFragmentInputGenCHROMIUM) /* 569 */ \
- OP(CoverageModulationCHROMIUM) /* 570 */ \
- OP(BlendBarrierKHR) /* 571 */ \
- OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 572 */ \
- OP(BindFragDataLocationIndexedEXTBucket) /* 573 */ \
- OP(BindFragDataLocationEXTBucket) /* 574 */ \
- OP(GetFragDataIndexEXT) /* 575 */ \
- OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 576 */ \
- OP(OverlayPromotionHintCHROMIUM) /* 577 */ \
- OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 578 */ \
- OP(SetDrawRectangleCHROMIUM) /* 579 */ \
- OP(SetEnableDCLayersCHROMIUM) /* 580 */ \
- OP(InitializeDiscardableTextureCHROMIUM) /* 581 */ \
- OP(UnlockDiscardableTextureCHROMIUM) /* 582 */ \
- OP(LockDiscardableTextureCHROMIUM) /* 583 */ \
- OP(TexStorage2DImageCHROMIUM) /* 584 */ \
- OP(SetColorSpaceMetadataCHROMIUM) /* 585 */ \
- OP(WindowRectanglesEXTImmediate) /* 586 */ \
- OP(CreateGpuFenceINTERNAL) /* 587 */ \
- OP(WaitGpuFenceCHROMIUM) /* 588 */ \
- OP(DestroyGpuFenceCHROMIUM) /* 589 */ \
- OP(SetReadbackBufferShadowAllocationINTERNAL) /* 590 */ \
- OP(FramebufferTextureMultiviewLayeredANGLE) /* 591 */ \
- OP(MaxShaderCompilerThreadsKHR) /* 592 */ \
- OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 593 */ \
- OP(BeginSharedImageAccessDirectCHROMIUM) /* 594 */ \
- OP(EndSharedImageAccessDirectCHROMIUM) /* 595 */
+ OP(MultiDrawBeginCHROMIUM) /* 409 */ \
+ OP(MultiDrawEndCHROMIUM) /* 410 */ \
+ OP(MultiDrawArraysCHROMIUM) /* 411 */ \
+ OP(MultiDrawArraysInstancedCHROMIUM) /* 412 */ \
+ OP(MultiDrawElementsCHROMIUM) /* 413 */ \
+ OP(MultiDrawElementsInstancedCHROMIUM) /* 414 */ \
+ OP(StencilFunc) /* 415 */ \
+ OP(StencilFuncSeparate) /* 416 */ \
+ OP(StencilMask) /* 417 */ \
+ OP(StencilMaskSeparate) /* 418 */ \
+ OP(StencilOp) /* 419 */ \
+ OP(StencilOpSeparate) /* 420 */ \
+ OP(TexImage2D) /* 421 */ \
+ OP(TexImage3D) /* 422 */ \
+ OP(TexParameterf) /* 423 */ \
+ OP(TexParameterfvImmediate) /* 424 */ \
+ OP(TexParameteri) /* 425 */ \
+ OP(TexParameterivImmediate) /* 426 */ \
+ OP(TexStorage3D) /* 427 */ \
+ OP(TexSubImage2D) /* 428 */ \
+ OP(TexSubImage3D) /* 429 */ \
+ OP(TransformFeedbackVaryingsBucket) /* 430 */ \
+ OP(Uniform1f) /* 431 */ \
+ OP(Uniform1fvImmediate) /* 432 */ \
+ OP(Uniform1i) /* 433 */ \
+ OP(Uniform1ivImmediate) /* 434 */ \
+ OP(Uniform1ui) /* 435 */ \
+ OP(Uniform1uivImmediate) /* 436 */ \
+ OP(Uniform2f) /* 437 */ \
+ OP(Uniform2fvImmediate) /* 438 */ \
+ OP(Uniform2i) /* 439 */ \
+ OP(Uniform2ivImmediate) /* 440 */ \
+ OP(Uniform2ui) /* 441 */ \
+ OP(Uniform2uivImmediate) /* 442 */ \
+ OP(Uniform3f) /* 443 */ \
+ OP(Uniform3fvImmediate) /* 444 */ \
+ OP(Uniform3i) /* 445 */ \
+ OP(Uniform3ivImmediate) /* 446 */ \
+ OP(Uniform3ui) /* 447 */ \
+ OP(Uniform3uivImmediate) /* 448 */ \
+ OP(Uniform4f) /* 449 */ \
+ OP(Uniform4fvImmediate) /* 450 */ \
+ OP(Uniform4i) /* 451 */ \
+ OP(Uniform4ivImmediate) /* 452 */ \
+ OP(Uniform4ui) /* 453 */ \
+ OP(Uniform4uivImmediate) /* 454 */ \
+ OP(UniformBlockBinding) /* 455 */ \
+ OP(UniformMatrix2fvImmediate) /* 456 */ \
+ OP(UniformMatrix2x3fvImmediate) /* 457 */ \
+ OP(UniformMatrix2x4fvImmediate) /* 458 */ \
+ OP(UniformMatrix3fvImmediate) /* 459 */ \
+ OP(UniformMatrix3x2fvImmediate) /* 460 */ \
+ OP(UniformMatrix3x4fvImmediate) /* 461 */ \
+ OP(UniformMatrix4fvImmediate) /* 462 */ \
+ OP(UniformMatrix4x2fvImmediate) /* 463 */ \
+ OP(UniformMatrix4x3fvImmediate) /* 464 */ \
+ OP(UseProgram) /* 465 */ \
+ OP(ValidateProgram) /* 466 */ \
+ OP(VertexAttrib1f) /* 467 */ \
+ OP(VertexAttrib1fvImmediate) /* 468 */ \
+ OP(VertexAttrib2f) /* 469 */ \
+ OP(VertexAttrib2fvImmediate) /* 470 */ \
+ OP(VertexAttrib3f) /* 471 */ \
+ OP(VertexAttrib3fvImmediate) /* 472 */ \
+ OP(VertexAttrib4f) /* 473 */ \
+ OP(VertexAttrib4fvImmediate) /* 474 */ \
+ OP(VertexAttribI4i) /* 475 */ \
+ OP(VertexAttribI4ivImmediate) /* 476 */ \
+ OP(VertexAttribI4ui) /* 477 */ \
+ OP(VertexAttribI4uivImmediate) /* 478 */ \
+ OP(VertexAttribIPointer) /* 479 */ \
+ OP(VertexAttribPointer) /* 480 */ \
+ OP(Viewport) /* 481 */ \
+ OP(WaitSync) /* 482 */ \
+ OP(BlitFramebufferCHROMIUM) /* 483 */ \
+ OP(RenderbufferStorageMultisampleCHROMIUM) /* 484 */ \
+ OP(RenderbufferStorageMultisampleEXT) /* 485 */ \
+ OP(FramebufferTexture2DMultisampleEXT) /* 486 */ \
+ OP(TexStorage2DEXT) /* 487 */ \
+ OP(GenQueriesEXTImmediate) /* 488 */ \
+ OP(DeleteQueriesEXTImmediate) /* 489 */ \
+ OP(QueryCounterEXT) /* 490 */ \
+ OP(BeginQueryEXT) /* 491 */ \
+ OP(BeginTransformFeedback) /* 492 */ \
+ OP(EndQueryEXT) /* 493 */ \
+ OP(EndTransformFeedback) /* 494 */ \
+ OP(SetDisjointValueSyncCHROMIUM) /* 495 */ \
+ OP(InsertEventMarkerEXT) /* 496 */ \
+ OP(PushGroupMarkerEXT) /* 497 */ \
+ OP(PopGroupMarkerEXT) /* 498 */ \
+ OP(GenVertexArraysOESImmediate) /* 499 */ \
+ OP(DeleteVertexArraysOESImmediate) /* 500 */ \
+ OP(IsVertexArrayOES) /* 501 */ \
+ OP(BindVertexArrayOES) /* 502 */ \
+ OP(FramebufferParameteri) /* 503 */ \
+ OP(BindImageTexture) /* 504 */ \
+ OP(DispatchCompute) /* 505 */ \
+ OP(MemoryBarrierEXT) /* 506 */ \
+ OP(MemoryBarrierByRegion) /* 507 */ \
+ OP(SwapBuffers) /* 508 */ \
+ OP(GetMaxValueInBufferCHROMIUM) /* 509 */ \
+ OP(EnableFeatureCHROMIUM) /* 510 */ \
+ OP(MapBufferRange) /* 511 */ \
+ OP(UnmapBuffer) /* 512 */ \
+ OP(FlushMappedBufferRange) /* 513 */ \
+ OP(ResizeCHROMIUM) /* 514 */ \
+ OP(GetRequestableExtensionsCHROMIUM) /* 515 */ \
+ OP(RequestExtensionCHROMIUM) /* 516 */ \
+ OP(GetProgramInfoCHROMIUM) /* 517 */ \
+ OP(GetUniformBlocksCHROMIUM) /* 518 */ \
+ OP(GetTransformFeedbackVaryingsCHROMIUM) /* 519 */ \
+ OP(GetUniformsES3CHROMIUM) /* 520 */ \
+ OP(DescheduleUntilFinishedCHROMIUM) /* 521 */ \
+ OP(GetTranslatedShaderSourceANGLE) /* 522 */ \
+ OP(PostSubBufferCHROMIUM) /* 523 */ \
+ OP(CopyTextureCHROMIUM) /* 524 */ \
+ OP(CopySubTextureCHROMIUM) /* 525 */ \
+ OP(DrawArraysInstancedANGLE) /* 526 */ \
+ OP(DrawElementsInstancedANGLE) /* 527 */ \
+ OP(VertexAttribDivisorANGLE) /* 528 */ \
+ OP(ProduceTextureDirectCHROMIUMImmediate) /* 529 */ \
+ OP(CreateAndConsumeTextureINTERNALImmediate) /* 530 */ \
+ OP(BindUniformLocationCHROMIUMBucket) /* 531 */ \
+ OP(BindTexImage2DCHROMIUM) /* 532 */ \
+ OP(BindTexImage2DWithInternalformatCHROMIUM) /* 533 */ \
+ OP(ReleaseTexImage2DCHROMIUM) /* 534 */ \
+ OP(TraceBeginCHROMIUM) /* 535 */ \
+ OP(TraceEndCHROMIUM) /* 536 */ \
+ OP(DiscardFramebufferEXTImmediate) /* 537 */ \
+ OP(LoseContextCHROMIUM) /* 538 */ \
+ OP(InsertFenceSyncCHROMIUM) /* 539 */ \
+ OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 540 */ \
+ OP(DrawBuffersEXTImmediate) /* 541 */ \
+ OP(DiscardBackbufferCHROMIUM) /* 542 */ \
+ OP(ScheduleOverlayPlaneCHROMIUM) /* 543 */ \
+ OP(ScheduleCALayerSharedStateCHROMIUM) /* 544 */ \
+ OP(ScheduleCALayerCHROMIUM) /* 545 */ \
+ OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 546 */ \
+ OP(CommitOverlayPlanesCHROMIUM) /* 547 */ \
+ OP(FlushDriverCachesCHROMIUM) /* 548 */ \
+ OP(ScheduleDCLayerCHROMIUM) /* 549 */ \
+ OP(SetActiveURLCHROMIUM) /* 550 */ \
+ OP(MatrixLoadfCHROMIUMImmediate) /* 551 */ \
+ OP(MatrixLoadIdentityCHROMIUM) /* 552 */ \
+ OP(GenPathsCHROMIUM) /* 553 */ \
+ OP(DeletePathsCHROMIUM) /* 554 */ \
+ OP(IsPathCHROMIUM) /* 555 */ \
+ OP(PathCommandsCHROMIUM) /* 556 */ \
+ OP(PathParameterfCHROMIUM) /* 557 */ \
+ OP(PathParameteriCHROMIUM) /* 558 */ \
+ OP(PathStencilFuncCHROMIUM) /* 559 */ \
+ OP(StencilFillPathCHROMIUM) /* 560 */ \
+ OP(StencilStrokePathCHROMIUM) /* 561 */ \
+ OP(CoverFillPathCHROMIUM) /* 562 */ \
+ OP(CoverStrokePathCHROMIUM) /* 563 */ \
+ OP(StencilThenCoverFillPathCHROMIUM) /* 564 */ \
+ OP(StencilThenCoverStrokePathCHROMIUM) /* 565 */ \
+ OP(StencilFillPathInstancedCHROMIUM) /* 566 */ \
+ OP(StencilStrokePathInstancedCHROMIUM) /* 567 */ \
+ OP(CoverFillPathInstancedCHROMIUM) /* 568 */ \
+ OP(CoverStrokePathInstancedCHROMIUM) /* 569 */ \
+ OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 570 */ \
+ OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 571 */ \
+ OP(BindFragmentInputLocationCHROMIUMBucket) /* 572 */ \
+ OP(ProgramPathFragmentInputGenCHROMIUM) /* 573 */ \
+ OP(CoverageModulationCHROMIUM) /* 574 */ \
+ OP(BlendBarrierKHR) /* 575 */ \
+ OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 576 */ \
+ OP(BindFragDataLocationIndexedEXTBucket) /* 577 */ \
+ OP(BindFragDataLocationEXTBucket) /* 578 */ \
+ OP(GetFragDataIndexEXT) /* 579 */ \
+ OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 580 */ \
+ OP(OverlayPromotionHintCHROMIUM) /* 581 */ \
+ OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 582 */ \
+ OP(SetDrawRectangleCHROMIUM) /* 583 */ \
+ OP(SetEnableDCLayersCHROMIUM) /* 584 */ \
+ OP(InitializeDiscardableTextureCHROMIUM) /* 585 */ \
+ OP(UnlockDiscardableTextureCHROMIUM) /* 586 */ \
+ OP(LockDiscardableTextureCHROMIUM) /* 587 */ \
+ OP(TexStorage2DImageCHROMIUM) /* 588 */ \
+ OP(SetColorSpaceMetadataCHROMIUM) /* 589 */ \
+ OP(WindowRectanglesEXTImmediate) /* 590 */ \
+ OP(CreateGpuFenceINTERNAL) /* 591 */ \
+ OP(WaitGpuFenceCHROMIUM) /* 592 */ \
+ OP(DestroyGpuFenceCHROMIUM) /* 593 */ \
+ OP(SetReadbackBufferShadowAllocationINTERNAL) /* 594 */ \
+ OP(FramebufferTextureMultiviewLayeredANGLE) /* 595 */ \
+ OP(MaxShaderCompilerThreadsKHR) /* 596 */ \
+ OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 597 */ \
+ OP(BeginSharedImageAccessDirectCHROMIUM) /* 598 */ \
+ OP(EndSharedImageAccessDirectCHROMIUM) /* 599 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index 6e2586661e2..e3a0a8ec0f5 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -515,7 +515,7 @@ int GLES2Util::GLGetNumValuesReturned(int id) const {
namespace {
// Return the number of bytes per element, based on the element type.
-int BytesPerElement(int type) {
+uint32_t BytesPerElement(int type) {
switch (type) {
case GL_FLOAT_32_UNSIGNED_INT_24_8_REV:
return 8;
@@ -546,7 +546,7 @@ int BytesPerElement(int type) {
} // anonymous namespace
// Return the number of elements per group of a specified format.
-int GLES2Util::ElementsPerGroup(int format, int type) {
+uint32_t GLES2Util::ElementsPerGroup(int format, int type) {
switch (type) {
case GL_UNSIGNED_SHORT_5_6_5:
case GL_UNSIGNED_SHORT_4_4_4_4:
@@ -592,11 +592,11 @@ int GLES2Util::ElementsPerGroup(int format, int type) {
}
uint32_t GLES2Util::ComputeImageGroupSize(int format, int type) {
- int bytes_per_element = BytesPerElement(type);
- DCHECK_GE(8, bytes_per_element);
- int elements_per_group = ElementsPerGroup(format, type);
- DCHECK_GE(4, elements_per_group);
- return bytes_per_element * elements_per_group;
+ uint32_t bytes_per_element = BytesPerElement(type);
+ DCHECK_GE(8u, bytes_per_element);
+ uint32_t elements_per_group = ElementsPerGroup(format, type);
+ DCHECK_GE(4u, elements_per_group);
+ return bytes_per_element * elements_per_group;
}
bool GLES2Util::ComputeImageRowSizeHelper(int width,
@@ -608,7 +608,8 @@ bool GLES2Util::ComputeImageRowSizeHelper(int width,
DCHECK(alignment == 1 || alignment == 2 ||
alignment == 4 || alignment == 8);
uint32_t unpadded_row_size;
- if (!SafeMultiplyUint32(width, bytes_per_group, &unpadded_row_size)) {
+ if (!base::CheckMul(width, bytes_per_group)
+ .AssignIfValid(&unpadded_row_size)) {
return false;
}
uint32_t residual = unpadded_row_size % alignment;
@@ -616,7 +617,8 @@ bool GLES2Util::ComputeImageRowSizeHelper(int width,
uint32_t padded_row_size = unpadded_row_size;
if (residual > 0) {
padding = alignment - residual;
- if (!SafeAddUint32(unpadded_row_size, padding, &padded_row_size)) {
+ if (!base::CheckAdd(unpadded_row_size, padding)
+ .AssignIfValid(&padded_row_size)) {
return false;
}
}
@@ -686,8 +688,8 @@ bool GLES2Util::ComputeImageDataSizesES3(
int image_height = params.image_height > 0 ? params.image_height : height;
uint32_t num_of_rows;
if (depth > 0) {
- if (!SafeMultiplyUint32(image_height, depth - 1, &num_of_rows) ||
- !SafeAddUint32(num_of_rows, height, &num_of_rows)) {
+ if (!base::CheckAdd(base::CheckMul(image_height, depth - 1), height)
+ .AssignIfValid(&num_of_rows)) {
return false;
}
} else {
@@ -695,42 +697,28 @@ bool GLES2Util::ComputeImageDataSizesES3(
}
if (num_of_rows > 0) {
- uint32_t size_of_all_but_last_row;
- if (!SafeMultiplyUint32((num_of_rows - 1), padded_row_size,
- &size_of_all_but_last_row)) {
- return false;
- }
- if (!SafeAddUint32(size_of_all_but_last_row, unpadded_row_size, size)) {
+ if (!base::CheckAdd(base::CheckMul(num_of_rows - 1, padded_row_size),
+ unpadded_row_size)
+ .AssignIfValid(size)) {
return false;
}
} else {
*size = 0;
}
- uint32_t skip_size = 0;
+ base::CheckedNumeric<uint32_t> skip_size = 0;
if (params.skip_images > 0) {
- uint32_t image_size;
- if (!SafeMultiplyUint32(image_height, padded_row_size, &image_size))
- return false;
- if (!SafeMultiplyUint32(image_size, params.skip_images, &skip_size))
- return false;
+ skip_size = image_height;
+ skip_size *= padded_row_size;
+ skip_size *= params.skip_images;
}
if (params.skip_rows > 0) {
- uint32_t temp;
- if (!SafeMultiplyUint32(padded_row_size, params.skip_rows, &temp))
- return false;
- if (!SafeAddUint32(skip_size, temp, &skip_size))
- return false;
+ skip_size += base::CheckMul(padded_row_size, params.skip_rows);
}
if (params.skip_pixels > 0) {
- uint32_t temp;
- if (!SafeMultiplyUint32(bytes_per_group, params.skip_pixels, &temp))
- return false;
- if (!SafeAddUint32(skip_size, temp, &skip_size))
- return false;
+ skip_size += base::CheckMul(bytes_per_group, params.skip_pixels);
}
- uint32_t total_size;
- if (!SafeAddUint32(*size, skip_size, &total_size))
+ if (!base::CheckAdd(*size, skip_size).IsValid())
return false;
if (opt_padded_row_size) {
@@ -740,11 +728,11 @@ bool GLES2Util::ComputeImageDataSizesES3(
*opt_unpadded_row_size = unpadded_row_size;
}
if (opt_skip_size)
- *opt_skip_size = skip_size;
+ *opt_skip_size = skip_size.ValueOrDefault(0);
return true;
}
-size_t GLES2Util::RenderbufferBytesPerPixel(int format) {
+uint32_t GLES2Util::RenderbufferBytesPerPixel(int format) {
switch (format) {
case GL_STENCIL_INDEX8:
return 1;
@@ -897,11 +885,11 @@ uint32_t GLES2Util::GetElementCountForUniformType(int type) {
}
}
-size_t GLES2Util::GetGLTypeSizeForTextures(uint32_t type) {
- return static_cast<size_t>(BytesPerElement(type));
+uint32_t GLES2Util::GetGLTypeSizeForTextures(uint32_t type) {
+ return BytesPerElement(type);
}
-size_t GLES2Util::GetGLTypeSizeForBuffers(uint32_t type) {
+uint32_t GLES2Util::GetGLTypeSizeForBuffers(uint32_t type) {
switch (type) {
case GL_BYTE:
return sizeof(GLbyte); // NOLINT
@@ -930,8 +918,9 @@ size_t GLES2Util::GetGLTypeSizeForBuffers(uint32_t type) {
}
}
-size_t GLES2Util::GetGroupSizeForBufferType(uint32_t count, uint32_t type) {
- size_t type_size = GetGLTypeSizeForBuffers(type);
+uint32_t GLES2Util::GetGroupSizeForBufferType(uint32_t count, uint32_t type) {
+ DCHECK_LE(count, 4u);
+ uint32_t type_size = GetGLTypeSizeForBuffers(type);
// For packed types, group size equals to the type size.
if (type == GL_INT_2_10_10_10_REV || type == GL_UNSIGNED_INT_2_10_10_10_REV) {
DCHECK_EQ(4u, count);
@@ -939,7 +928,8 @@ size_t GLES2Util::GetGroupSizeForBufferType(uint32_t count, uint32_t type) {
}
return type_size * count;
}
-size_t GLES2Util::GetComponentCountForGLTransformType(uint32_t type) {
+
+uint32_t GLES2Util::GetComponentCountForGLTransformType(uint32_t type) {
switch (type) {
case GL_TRANSLATE_X_CHROMIUM:
case GL_TRANSLATE_Y_CHROMIUM:
@@ -958,7 +948,8 @@ size_t GLES2Util::GetComponentCountForGLTransformType(uint32_t type) {
return 0;
}
}
-size_t GLES2Util::GetCoefficientCountForGLPathFragmentInputGenMode(
+
+uint32_t GLES2Util::GetCoefficientCountForGLPathFragmentInputGenMode(
uint32_t gen_mode) {
switch (gen_mode) {
case GL_EYE_LINEAR_CHROMIUM:
@@ -973,7 +964,7 @@ size_t GLES2Util::GetCoefficientCountForGLPathFragmentInputGenMode(
}
}
-size_t GLES2Util::GetGLTypeSizeForPathCoordType(uint32_t type) {
+uint32_t GLES2Util::GetGLTypeSizeForPathCoordType(uint32_t type) {
switch (type) {
case GL_BYTE:
return sizeof(GLbyte); // NOLINT
@@ -990,7 +981,7 @@ size_t GLES2Util::GetGLTypeSizeForPathCoordType(uint32_t type) {
}
}
-size_t GLES2Util::GetGLTypeSizeForGLPathNameType(uint32_t type) {
+uint32_t GLES2Util::GetGLTypeSizeForGLPathNameType(uint32_t type) {
switch (type) {
case GL_BYTE:
return sizeof(GLbyte); // NOLINT
@@ -1584,8 +1575,8 @@ std::string GLES2Util::GetStringError(uint32_t value) {
static EnumToString string_table[] = {
{ GL_NONE, "GL_NONE" },
};
- return GLES2Util::GetQualifiedEnumString(
- string_table, arraysize(string_table), value);
+ return GLES2Util::GetQualifiedEnumString(string_table,
+ base::size(string_table), value);
}
std::string GLES2Util::GetStringBool(uint32_t value) {
@@ -1627,7 +1618,7 @@ GLSLArrayName::GLSLArrayName(const std::string& name) : element_index_(-1) {
base_name_ = name.substr(0, open_pos);
}
-size_t GLES2Util::CalcClearBufferivDataCount(int buffer) {
+uint32_t GLES2Util::CalcClearBufferivDataCount(int buffer) {
switch (buffer) {
case GL_COLOR:
return 4;
@@ -1638,7 +1629,7 @@ size_t GLES2Util::CalcClearBufferivDataCount(int buffer) {
}
}
-size_t GLES2Util::CalcClearBufferfvDataCount(int buffer) {
+uint32_t GLES2Util::CalcClearBufferfvDataCount(int buffer) {
switch (buffer) {
case GL_COLOR:
return 4;
@@ -1649,7 +1640,7 @@ size_t GLES2Util::CalcClearBufferfvDataCount(int buffer) {
}
}
-size_t GLES2Util::CalcClearBufferuivDataCount(int buffer) {
+uint32_t GLES2Util::CalcClearBufferuivDataCount(int buffer) {
switch (buffer) {
case GL_COLOR:
return 4;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
index d2b5c2ce8d9..25a28052368 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
@@ -22,37 +22,6 @@
namespace gpu {
namespace gles2 {
-// Does a multiply and checks for overflow. If the multiply did not overflow
-// returns true.
-
-// Multiplies 2 32 bit unsigned numbers checking for overflow.
-// If there was no overflow returns true.
-inline bool SafeMultiplyUint32(uint32_t a, uint32_t b, uint32_t* dst) {
- DCHECK(dst);
- base::CheckedNumeric<uint32_t> checked = a;
- checked *= b;
- *dst = checked.ValueOrDefault(0);
- return checked.IsValid();
-}
-
-// Does an add checking for overflow. If there was no overflow returns true.
-inline bool SafeAddUint32(uint32_t a, uint32_t b, uint32_t* dst) {
- DCHECK(dst);
- base::CheckedNumeric<uint32_t> checked = a;
- checked += b;
- *dst = checked.ValueOrDefault(0);
- return checked.IsValid();
-}
-
-// Does an add checking for overflow. If there was no overflow returns true.
-inline bool SafeAddInt32(int32_t a, int32_t b, int32_t* dst) {
- DCHECK(dst);
- base::CheckedNumeric<int32_t> checked = a;
- checked += b;
- *dst = checked.ValueOrDefault(0);
- return checked.IsValid();
-}
-
// A 32-bit and 64-bit compatible way of converting a pointer to a
// 32-bit usigned integer, suitable to be stored in a GLuint.
inline uint32_t ToGLuint(const void* ptr) {
@@ -143,7 +112,7 @@ class GLES2_UTILS_EXPORT GLES2Util {
// function is called. If 0 is returned the id is invalid.
int GLGetNumValuesReturned(int id) const;
- static int ElementsPerGroup(int format, int type);
+ static uint32_t ElementsPerGroup(int format, int type);
// Computes the size of a single group of elements from a format and type pair
static uint32_t ComputeImageGroupSize(int format, int type);
@@ -171,7 +140,7 @@ class GLES2_UTILS_EXPORT GLES2Util {
uint32_t* opt_padded_row_size, uint32_t* opt_skip_size,
uint32_t* opt_padding);
- static size_t RenderbufferBytesPerPixel(int format);
+ static uint32_t RenderbufferBytesPerPixel(int format);
static uint8_t StencilBitsPerPixel(int format);
@@ -182,21 +151,22 @@ class GLES2_UTILS_EXPORT GLES2Util {
// For example, GL_FLOAT_MAT3 returns 9.
static uint32_t GetElementCountForUniformType(int type);
- static size_t GetGLTypeSizeForTextures(uint32_t type);
+ static uint32_t GetGLTypeSizeForTextures(uint32_t type);
- static size_t GetGLTypeSizeForBuffers(uint32_t type);
+ static uint32_t GetGLTypeSizeForBuffers(uint32_t type);
- static size_t GetGroupSizeForBufferType(uint32_t count, uint32_t type);
+ static uint32_t GetGroupSizeForBufferType(uint32_t count, uint32_t type);
- static size_t GetGLTypeSizeForPathCoordType(uint32_t type);
+ static uint32_t GetComponentCountForGLTransformType(uint32_t type);
- static uint32_t GLErrorToErrorBit(uint32_t gl_error);
+ static uint32_t GetCoefficientCountForGLPathFragmentInputGenMode(
+ uint32_t gen_mode);
- static size_t GetComponentCountForGLTransformType(uint32_t type);
- static size_t GetGLTypeSizeForGLPathNameType(uint32_t type);
+ static uint32_t GetGLTypeSizeForPathCoordType(uint32_t type);
- static size_t GetCoefficientCountForGLPathFragmentInputGenMode(
- uint32_t gen_mode);
+ static uint32_t GetGLTypeSizeForGLPathNameType(uint32_t type);
+
+ static uint32_t GLErrorToErrorBit(uint32_t gl_error);
static uint32_t GLErrorBitToGLError(uint32_t error_bit);
@@ -234,9 +204,9 @@ class GLES2_UTILS_EXPORT GLES2Util {
static std::string GetStringBool(uint32_t value);
static std::string GetStringError(uint32_t value);
- static size_t CalcClearBufferivDataCount(int buffer);
- static size_t CalcClearBufferfvDataCount(int buffer);
- static size_t CalcClearBufferuivDataCount(int buffer);
+ static uint32_t CalcClearBufferivDataCount(int buffer);
+ static uint32_t CalcClearBufferfvDataCount(int buffer);
+ static uint32_t CalcClearBufferuivDataCount(int buffer);
static void MapUint64ToTwoUint32(
uint64_t v64, uint32_t* v32_0, uint32_t* v32_1);
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_unittest.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils_unittest.cc
index 459e39e408f..993d6e6a758 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_unittest.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_unittest.cc
@@ -21,58 +21,6 @@ class GLES2UtilTest : public testing:: Test {
GLES2Util util_;
};
-TEST_F(GLES2UtilTest, SafeMultiplyUint32) {
- uint32_t result = 0;
- EXPECT_TRUE(SafeMultiplyUint32(2u, 3u, &result));
- EXPECT_EQ(6u, result);
- EXPECT_FALSE(SafeMultiplyUint32(0x80000000u, 2u, &result));
- EXPECT_EQ(0u, result);
- EXPECT_TRUE(SafeMultiplyUint32(0x2u, 0x7FFFFFFFu, &result));
- EXPECT_EQ(0xFFFFFFFEu, result);
- EXPECT_FALSE(SafeMultiplyUint32(2u, 0x80000000u, &result));
- EXPECT_EQ(0u, result);
-}
-
-TEST_F(GLES2UtilTest, SafeAddUint32) {
- uint32_t result = 0;
- EXPECT_TRUE(SafeAddUint32(2u, 3u, &result));
- EXPECT_EQ(5u, result);
- EXPECT_FALSE(SafeAddUint32(0x80000000u, 0x80000000u, &result));
- EXPECT_EQ(0u, result);
- EXPECT_TRUE(SafeAddUint32(0xFFFFFFFEu, 0x1u, &result));
- EXPECT_EQ(0xFFFFFFFFu, result);
- EXPECT_FALSE(SafeAddUint32(0xFFFFFFFEu, 0x2u, &result));
- EXPECT_EQ(0u, result);
- EXPECT_TRUE(SafeAddUint32(0x1u, 0xFFFFFFFEu, &result));
- EXPECT_EQ(0xFFFFFFFFu, result);
- EXPECT_FALSE(SafeAddUint32(0x2u, 0xFFFFFFFEu, &result));
- EXPECT_EQ(0u, result);
-}
-
-TEST_F(GLES2UtilTest, SafeAddInt32) {
- int32_t result = 0;
- const int32_t kMax = std::numeric_limits<int32_t>::max();
- const int32_t kMin = std::numeric_limits<int32_t>::min();
- EXPECT_TRUE(SafeAddInt32(2, 3, &result));
- EXPECT_EQ(5, result);
- EXPECT_FALSE(SafeAddInt32(kMax, 1, &result));
- EXPECT_EQ(0, result);
- EXPECT_TRUE(SafeAddInt32(kMin + 1, -1, &result));
- EXPECT_EQ(kMin, result);
- EXPECT_FALSE(SafeAddInt32(kMin, -1, &result));
- EXPECT_EQ(0, result);
- EXPECT_TRUE(SafeAddInt32(kMax - 1, 1, &result));
- EXPECT_EQ(kMax, result);
- EXPECT_FALSE(SafeAddInt32(1, kMax, &result));
- EXPECT_EQ(0, result);
- EXPECT_TRUE(SafeAddInt32(-1, kMin + 1, &result));
- EXPECT_EQ(kMin, result);
- EXPECT_FALSE(SafeAddInt32(-1, kMin, &result));
- EXPECT_EQ(0, result);
- EXPECT_TRUE(SafeAddInt32(1, kMax - 1, &result));
- EXPECT_EQ(kMax, result);
-}
-
TEST_F(GLES2UtilTest, GLGetNumValuesReturned) {
EXPECT_EQ(0, util_.GLGetNumValuesReturned(GL_COMPRESSED_TEXTURE_FORMATS));
EXPECT_EQ(0, util_.GLGetNumValuesReturned(GL_SHADER_BINARY_FORMATS));
diff --git a/chromium/gpu/command_buffer/common/id_type.h b/chromium/gpu/command_buffer/common/id_type.h
index 0d009c61867..e3efbfa36b2 100644
--- a/chromium/gpu/command_buffer/common/id_type.h
+++ b/chromium/gpu/command_buffer/common/id_type.h
@@ -60,6 +60,7 @@ class IdType {
bool operator==(const IdType& other) const { return value_ == other.value_; }
bool operator!=(const IdType& other) const { return value_ != other.value_; }
bool operator<(const IdType& other) const { return value_ < other.value_; }
+ bool operator<=(const IdType& other) const { return value_ <= other.value_; }
// Hasher to use in std::unordered_map, std::unordered_set, etc.
struct Hasher {
diff --git a/chromium/gpu/command_buffer/common/mailbox.cc b/chromium/gpu/command_buffer/common/mailbox.cc
index fdec1e6594f..2a7a0383447 100644
--- a/chromium/gpu/command_buffer/common/mailbox.cc
+++ b/chromium/gpu/command_buffer/common/mailbox.cc
@@ -10,16 +10,48 @@
#include "base/logging.h"
#include "base/rand_util.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
namespace gpu {
+namespace {
+
+// The last byte of the mailbox's name stores the SharedImage flag. This avoids
+// conflicts with Verify logic, which uses the first byte.
+constexpr size_t kSharedImageFlagIndex = GL_MAILBOX_SIZE_CHROMIUM - 1;
+
+// Use the lowest bit for the SharedImage flag (any bit would work).
+constexpr int8_t kSharedImageFlag = 0x1;
+
+void MarkMailboxAsSharedImage(bool is_shared_image, int8_t* name) {
+ if (is_shared_image)
+ name[kSharedImageFlagIndex] |= kSharedImageFlag;
+ else
+ name[kSharedImageFlagIndex] &= ~kSharedImageFlag;
+}
+
+Mailbox GenerateMailbox(bool is_shared_image) {
+ Mailbox result;
+ // Generates cryptographically-secure bytes.
+ base::RandBytes(result.name, sizeof(result.name));
+ MarkMailboxAsSharedImage(is_shared_image, result.name);
+#if !defined(NDEBUG)
+ int8_t value = 1;
+ for (size_t i = 1; i < sizeof(result.name); ++i)
+ value ^= result.name[i];
+ result.name[0] = value;
+#endif
+ return result;
+}
+
+} // namespace
Mailbox::Mailbox() {
memset(name, 0, sizeof(name));
}
bool Mailbox::IsZero() const {
- for (size_t i = 0; i < arraysize(name); ++i) {
+ for (size_t i = 0; i < base::size(name); ++i) {
if (name[i])
return false;
}
@@ -35,17 +67,16 @@ void Mailbox::SetName(const int8_t* n) {
memcpy(name, n, sizeof(name));
}
+bool Mailbox::IsSharedImage() const {
+ return name[kSharedImageFlagIndex] & kSharedImageFlag;
+}
+
Mailbox Mailbox::Generate() {
- Mailbox result;
- // Generates cryptographically-secure bytes.
- base::RandBytes(result.name, sizeof(result.name));
-#if !defined(NDEBUG)
- int8_t value = 1;
- for (size_t i = 1; i < sizeof(result.name); ++i)
- value ^= result.name[i];
- result.name[0] = value;
-#endif
- return result;
+ return GenerateMailbox(false /* is_shared_image */);
+}
+
+Mailbox Mailbox::GenerateForSharedImage() {
+ return GenerateMailbox(true /* is_shared_image */);
}
bool Mailbox::Verify() const {
diff --git a/chromium/gpu/command_buffer/common/mailbox.h b/chromium/gpu/command_buffer/common/mailbox.h
index 6db8cb2ee94..5cd20835b43 100644
--- a/chromium/gpu/command_buffer/common/mailbox.h
+++ b/chromium/gpu/command_buffer/common/mailbox.h
@@ -40,9 +40,16 @@ struct GPU_EXPORT Mailbox {
void SetZero();
void SetName(const int8_t* name);
+ // Indicates whether this mailbox is used with the SharedImage system.
+ bool IsSharedImage() const;
+
// Generate a unique unguessable mailbox name.
static Mailbox Generate();
+ // Generate a unique unguessable mailbox name for use with the SharedImage
+ // system.
+ static Mailbox GenerateForSharedImage();
+
// Verify that the mailbox was created through Mailbox::Generate. This only
// works in Debug (always returns true in Release). This is not a secure
// check, only to catch bugs where clients forgot to call Mailbox::Generate.
diff --git a/chromium/gpu/command_buffer/common/presentation_feedback_utils.cc b/chromium/gpu/command_buffer/common/presentation_feedback_utils.cc
index 814ada433cd..edfb5dc3075 100644
--- a/chromium/gpu/command_buffer/common/presentation_feedback_utils.cc
+++ b/chromium/gpu/command_buffer/common/presentation_feedback_utils.cc
@@ -9,13 +9,6 @@
namespace gpu {
-bool ShouldSendBufferPresented(uint32_t swap_buffer_flags,
- uint32_t presentation_feedback_flags) {
- return swap_buffer_flags & SwapBuffersFlags::kPresentationFeedback ||
- (swap_buffer_flags & SwapBuffersFlags::kVSyncParams &&
- presentation_feedback_flags & gfx::PresentationFeedback::kVSync);
-}
-
bool ShouldUpdateVsyncParams(const gfx::PresentationFeedback& feedback) {
return feedback.flags & gfx::PresentationFeedback::kVSync &&
feedback.timestamp != base::TimeTicks() &&
diff --git a/chromium/gpu/command_buffer/common/presentation_feedback_utils.h b/chromium/gpu/command_buffer/common/presentation_feedback_utils.h
index 9d89456fbcf..ae0eba39f02 100644
--- a/chromium/gpu/command_buffer/common/presentation_feedback_utils.h
+++ b/chromium/gpu/command_buffer/common/presentation_feedback_utils.h
@@ -15,11 +15,6 @@ struct PresentationFeedback;
namespace gpu {
-// Returns true if command buffer should send buffer presented message to
-// client.
-GPU_EXPORT bool ShouldSendBufferPresented(uint32_t swap_buffer_flags,
- uint32_t presentation_feedback_flags);
-
// Returns true if command buffer should update vsync timing paramters based on
// presentation feedback.
GPU_EXPORT bool ShouldUpdateVsyncParams(
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format.cc b/chromium/gpu/command_buffer/common/raster_cmd_format.cc
index 1a21bed58e5..6729d13cbe0 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format.cc
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format.cc
@@ -9,11 +9,13 @@
// and service side have different requirements.
#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include <stddef.h>
+
+#include "base/stl_util.h"
+
namespace gpu {
namespace raster {
-#include <stddef.h>
-
#include "gpu/command_buffer/common/raster_cmd_ids_autogen.h"
const char* GetCommandName(CommandId id) {
@@ -26,7 +28,7 @@ const char* GetCommandName(CommandId id) {
};
size_t index = static_cast<size_t>(id) - kFirstRasterCommand;
- return (index < arraysize(names)) ? names[index] : "*unknown-command*";
+ return (index < base::size(names)) ? names[index] : "*unknown-command*";
}
} // namespace raster
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
index ae0eba0865d..56631893db9 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
@@ -13,48 +13,6 @@
#define GL_SCANOUT_CHROMIUM 0x6000
-struct DeleteTexturesImmediate {
- typedef DeleteTexturesImmediate ValueType;
- static const CommandId kCmdId = kDeleteTexturesImmediate;
- static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeDataSize(GLsizei _n) {
- return static_cast<uint32_t>(sizeof(GLuint) * _n); // NOLINT
- }
-
- static uint32_t ComputeSize(GLsizei _n) {
- return static_cast<uint32_t>(sizeof(ValueType) +
- ComputeDataSize(_n)); // NOLINT
- }
-
- void SetHeader(GLsizei _n) {
- header.SetCmdByTotalSize<ValueType>(ComputeSize(_n));
- }
-
- void Init(GLsizei _n, const GLuint* _textures) {
- SetHeader(_n);
- n = _n;
- memcpy(ImmediateDataAddress(this), _textures, ComputeDataSize(_n));
- }
-
- void* Set(void* cmd, GLsizei _n, const GLuint* _textures) {
- static_cast<ValueType*>(cmd)->Init(_n, _textures);
- const uint32_t size = ComputeSize(_n);
- return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
- }
-
- gpu::CommandHeader header;
- int32_t n;
-};
-
-static_assert(sizeof(DeleteTexturesImmediate) == 8,
- "size of DeleteTexturesImmediate should be 8");
-static_assert(offsetof(DeleteTexturesImmediate, header) == 0,
- "offset of DeleteTexturesImmediate header should be 0");
-static_assert(offsetof(DeleteTexturesImmediate, n) == 4,
- "offset of DeleteTexturesImmediate n should be 4");
-
struct Finish {
typedef Finish ValueType;
static const CommandId kCmdId = kFinish;
@@ -398,75 +356,6 @@ static_assert(offsetof(InsertFenceSyncCHROMIUM, release_count_0) == 4,
static_assert(offsetof(InsertFenceSyncCHROMIUM, release_count_1) == 8,
"offset of InsertFenceSyncCHROMIUM release_count_1 should be 8");
-struct WaitSyncTokenCHROMIUM {
- typedef WaitSyncTokenCHROMIUM ValueType;
- static const CommandId kCmdId = kWaitSyncTokenCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLint _namespace_id,
- GLuint64 _command_buffer_id,
- GLuint64 _release_count) {
- SetHeader();
- namespace_id = _namespace_id;
- gles2::GLES2Util::MapUint64ToTwoUint32(
- static_cast<uint64_t>(_command_buffer_id), &command_buffer_id_0,
- &command_buffer_id_1);
- gles2::GLES2Util::MapUint64ToTwoUint32(
- static_cast<uint64_t>(_release_count), &release_count_0,
- &release_count_1);
- }
-
- void* Set(void* cmd,
- GLint _namespace_id,
- GLuint64 _command_buffer_id,
- GLuint64 _release_count) {
- static_cast<ValueType*>(cmd)->Init(_namespace_id, _command_buffer_id,
- _release_count);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- GLuint64 command_buffer_id() const volatile {
- return static_cast<GLuint64>(gles2::GLES2Util::MapTwoUint32ToUint64(
- command_buffer_id_0, command_buffer_id_1));
- }
-
- GLuint64 release_count() const volatile {
- return static_cast<GLuint64>(gles2::GLES2Util::MapTwoUint32ToUint64(
- release_count_0, release_count_1));
- }
-
- gpu::CommandHeader header;
- int32_t namespace_id;
- uint32_t command_buffer_id_0;
- uint32_t command_buffer_id_1;
- uint32_t release_count_0;
- uint32_t release_count_1;
-};
-
-static_assert(sizeof(WaitSyncTokenCHROMIUM) == 24,
- "size of WaitSyncTokenCHROMIUM should be 24");
-static_assert(offsetof(WaitSyncTokenCHROMIUM, header) == 0,
- "offset of WaitSyncTokenCHROMIUM header should be 0");
-static_assert(offsetof(WaitSyncTokenCHROMIUM, namespace_id) == 4,
- "offset of WaitSyncTokenCHROMIUM namespace_id should be 4");
-static_assert(
- offsetof(WaitSyncTokenCHROMIUM, command_buffer_id_0) == 8,
- "offset of WaitSyncTokenCHROMIUM command_buffer_id_0 should be 8");
-static_assert(
- offsetof(WaitSyncTokenCHROMIUM, command_buffer_id_1) == 12,
- "offset of WaitSyncTokenCHROMIUM command_buffer_id_1 should be 12");
-static_assert(offsetof(WaitSyncTokenCHROMIUM, release_count_0) == 16,
- "offset of WaitSyncTokenCHROMIUM release_count_0 should be 16");
-static_assert(offsetof(WaitSyncTokenCHROMIUM, release_count_1) == 20,
- "offset of WaitSyncTokenCHROMIUM release_count_1 should be 20");
-
struct BeginRasterCHROMIUMImmediate {
typedef BeginRasterCHROMIUMImmediate ValueType;
static const CommandId kCmdId = kBeginRasterCHROMIUMImmediate;
@@ -486,14 +375,12 @@ struct BeginRasterCHROMIUMImmediate {
void Init(GLuint _sk_color,
GLuint _msaa_sample_count,
GLboolean _can_use_lcd_text,
- GLint _color_type,
GLuint _color_space_transfer_cache_id,
const GLbyte* _mailbox) {
SetHeader();
sk_color = _sk_color;
msaa_sample_count = _msaa_sample_count;
can_use_lcd_text = _can_use_lcd_text;
- color_type = _color_type;
color_space_transfer_cache_id = _color_space_transfer_cache_id;
memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
}
@@ -502,11 +389,10 @@ struct BeginRasterCHROMIUMImmediate {
GLuint _sk_color,
GLuint _msaa_sample_count,
GLboolean _can_use_lcd_text,
- GLint _color_type,
GLuint _color_space_transfer_cache_id,
const GLbyte* _mailbox) {
static_cast<ValueType*>(cmd)->Init(
- _sk_color, _msaa_sample_count, _can_use_lcd_text, _color_type,
+ _sk_color, _msaa_sample_count, _can_use_lcd_text,
_color_space_transfer_cache_id, _mailbox);
const uint32_t size = ComputeSize();
return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
@@ -516,12 +402,11 @@ struct BeginRasterCHROMIUMImmediate {
uint32_t sk_color;
uint32_t msaa_sample_count;
uint32_t can_use_lcd_text;
- int32_t color_type;
uint32_t color_space_transfer_cache_id;
};
-static_assert(sizeof(BeginRasterCHROMIUMImmediate) == 24,
- "size of BeginRasterCHROMIUMImmediate should be 24");
+static_assert(sizeof(BeginRasterCHROMIUMImmediate) == 20,
+ "size of BeginRasterCHROMIUMImmediate should be 20");
static_assert(offsetof(BeginRasterCHROMIUMImmediate, header) == 0,
"offset of BeginRasterCHROMIUMImmediate header should be 0");
static_assert(offsetof(BeginRasterCHROMIUMImmediate, sk_color) == 4,
@@ -532,12 +417,10 @@ static_assert(
static_assert(
offsetof(BeginRasterCHROMIUMImmediate, can_use_lcd_text) == 12,
"offset of BeginRasterCHROMIUMImmediate can_use_lcd_text should be 12");
-static_assert(offsetof(BeginRasterCHROMIUMImmediate, color_type) == 16,
- "offset of BeginRasterCHROMIUMImmediate color_type should be 16");
static_assert(offsetof(BeginRasterCHROMIUMImmediate,
- color_space_transfer_cache_id) == 20,
+ color_space_transfer_cache_id) == 16,
"offset of BeginRasterCHROMIUMImmediate "
- "color_space_transfer_cache_id should be 20");
+ "color_space_transfer_cache_id should be 16");
struct RasterCHROMIUM {
typedef RasterCHROMIUM ValueType;
@@ -904,14 +787,14 @@ static_assert(sizeof(ClearPaintCacheINTERNAL) == 4,
static_assert(offsetof(ClearPaintCacheINTERNAL, header) == 0,
"offset of ClearPaintCacheINTERNAL header should be 0");
-struct CreateAndConsumeTextureINTERNALImmediate {
- typedef CreateAndConsumeTextureINTERNALImmediate ValueType;
- static const CommandId kCmdId = kCreateAndConsumeTextureINTERNALImmediate;
+struct CopySubTextureINTERNALImmediate {
+ typedef CopySubTextureINTERNALImmediate ValueType;
+ static const CommandId kCmdId = kCopySubTextureINTERNALImmediate;
static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
static uint32_t ComputeDataSize() {
- return static_cast<uint32_t>(sizeof(GLbyte) * 16);
+ return static_cast<uint32_t>(sizeof(GLbyte) * 32);
}
static uint32_t ComputeSize() {
@@ -920,107 +803,38 @@ struct CreateAndConsumeTextureINTERNALImmediate {
void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
- void Init(GLuint _texture_id,
- bool _use_buffer,
- gfx::BufferUsage _buffer_usage,
- viz::ResourceFormat _format,
- const GLbyte* _mailbox) {
- SetHeader();
- texture_id = _texture_id;
- use_buffer = _use_buffer;
- buffer_usage = static_cast<uint32_t>(_buffer_usage);
- format = static_cast<uint32_t>(_format);
- memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
- }
-
- void* Set(void* cmd,
- GLuint _texture_id,
- bool _use_buffer,
- gfx::BufferUsage _buffer_usage,
- viz::ResourceFormat _format,
- const GLbyte* _mailbox) {
- static_cast<ValueType*>(cmd)->Init(_texture_id, _use_buffer, _buffer_usage,
- _format, _mailbox);
- const uint32_t size = ComputeSize();
- return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
- }
-
- gpu::CommandHeader header;
- uint32_t texture_id;
- uint32_t use_buffer;
- uint32_t buffer_usage;
- uint32_t format;
-};
-
-static_assert(sizeof(CreateAndConsumeTextureINTERNALImmediate) == 20,
- "size of CreateAndConsumeTextureINTERNALImmediate should be 20");
-static_assert(
- offsetof(CreateAndConsumeTextureINTERNALImmediate, header) == 0,
- "offset of CreateAndConsumeTextureINTERNALImmediate header should be 0");
-static_assert(offsetof(CreateAndConsumeTextureINTERNALImmediate, texture_id) ==
- 4,
- "offset of CreateAndConsumeTextureINTERNALImmediate texture_id "
- "should be 4");
-static_assert(offsetof(CreateAndConsumeTextureINTERNALImmediate, use_buffer) ==
- 8,
- "offset of CreateAndConsumeTextureINTERNALImmediate use_buffer "
- "should be 8");
-static_assert(offsetof(CreateAndConsumeTextureINTERNALImmediate,
- buffer_usage) == 12,
- "offset of CreateAndConsumeTextureINTERNALImmediate buffer_usage "
- "should be 12");
-static_assert(
- offsetof(CreateAndConsumeTextureINTERNALImmediate, format) == 16,
- "offset of CreateAndConsumeTextureINTERNALImmediate format should be 16");
-
-struct CopySubTexture {
- typedef CopySubTexture ValueType;
- static const CommandId kCmdId = kCopySubTexture;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _source_id,
- GLuint _dest_id,
- GLint _xoffset,
+ void Init(GLint _xoffset,
GLint _yoffset,
GLint _x,
GLint _y,
GLsizei _width,
- GLsizei _height) {
+ GLsizei _height,
+ const GLbyte* _mailboxes) {
SetHeader();
- source_id = _source_id;
- dest_id = _dest_id;
xoffset = _xoffset;
yoffset = _yoffset;
x = _x;
y = _y;
width = _width;
height = _height;
+ memcpy(ImmediateDataAddress(this), _mailboxes, ComputeDataSize());
}
void* Set(void* cmd,
- GLuint _source_id,
- GLuint _dest_id,
GLint _xoffset,
GLint _yoffset,
GLint _x,
GLint _y,
GLsizei _width,
- GLsizei _height) {
- static_cast<ValueType*>(cmd)->Init(_source_id, _dest_id, _xoffset, _yoffset,
- _x, _y, _width, _height);
- return NextCmdAddress<ValueType>(cmd);
+ GLsizei _height,
+ const GLbyte* _mailboxes) {
+ static_cast<ValueType*>(cmd)->Init(_xoffset, _yoffset, _x, _y, _width,
+ _height, _mailboxes);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
}
gpu::CommandHeader header;
- uint32_t source_id;
- uint32_t dest_id;
int32_t xoffset;
int32_t yoffset;
int32_t x;
@@ -1029,26 +843,22 @@ struct CopySubTexture {
int32_t height;
};
-static_assert(sizeof(CopySubTexture) == 36,
- "size of CopySubTexture should be 36");
-static_assert(offsetof(CopySubTexture, header) == 0,
- "offset of CopySubTexture header should be 0");
-static_assert(offsetof(CopySubTexture, source_id) == 4,
- "offset of CopySubTexture source_id should be 4");
-static_assert(offsetof(CopySubTexture, dest_id) == 8,
- "offset of CopySubTexture dest_id should be 8");
-static_assert(offsetof(CopySubTexture, xoffset) == 12,
- "offset of CopySubTexture xoffset should be 12");
-static_assert(offsetof(CopySubTexture, yoffset) == 16,
- "offset of CopySubTexture yoffset should be 16");
-static_assert(offsetof(CopySubTexture, x) == 20,
- "offset of CopySubTexture x should be 20");
-static_assert(offsetof(CopySubTexture, y) == 24,
- "offset of CopySubTexture y should be 24");
-static_assert(offsetof(CopySubTexture, width) == 28,
- "offset of CopySubTexture width should be 28");
-static_assert(offsetof(CopySubTexture, height) == 32,
- "offset of CopySubTexture height should be 32");
+static_assert(sizeof(CopySubTextureINTERNALImmediate) == 28,
+ "size of CopySubTextureINTERNALImmediate should be 28");
+static_assert(offsetof(CopySubTextureINTERNALImmediate, header) == 0,
+ "offset of CopySubTextureINTERNALImmediate header should be 0");
+static_assert(offsetof(CopySubTextureINTERNALImmediate, xoffset) == 4,
+ "offset of CopySubTextureINTERNALImmediate xoffset should be 4");
+static_assert(offsetof(CopySubTextureINTERNALImmediate, yoffset) == 8,
+ "offset of CopySubTextureINTERNALImmediate yoffset should be 8");
+static_assert(offsetof(CopySubTextureINTERNALImmediate, x) == 12,
+ "offset of CopySubTextureINTERNALImmediate x should be 12");
+static_assert(offsetof(CopySubTextureINTERNALImmediate, y) == 16,
+ "offset of CopySubTextureINTERNALImmediate y should be 16");
+static_assert(offsetof(CopySubTextureINTERNALImmediate, width) == 20,
+ "offset of CopySubTextureINTERNALImmediate width should be 20");
+static_assert(offsetof(CopySubTextureINTERNALImmediate, height) == 24,
+ "offset of CopySubTextureINTERNALImmediate height should be 24");
struct TraceBeginCHROMIUM {
typedef TraceBeginCHROMIUM ValueType;
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
index 05a838d368c..1402583c19d 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
@@ -14,24 +14,6 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_TEST_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_TEST_AUTOGEN_H_
-TEST_F(RasterFormatTest, DeleteTexturesImmediate) {
- static GLuint ids[] = {
- 12, 23, 34,
- };
- cmds::DeleteTexturesImmediate& cmd =
- *GetBufferAs<cmds::DeleteTexturesImmediate>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(base::size(ids)), ids);
- EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteTexturesImmediate::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
- cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLsizei>(base::size(ids)), cmd.n);
- CheckBytesWrittenMatchesExpectedSize(
- next_cmd,
- sizeof(cmd) + RoundSizeToMultipleOfEntries(base::size(ids) * 4u));
- EXPECT_EQ(0, memcmp(ids, ImmediateDataAddress(&cmd), sizeof(ids)));
-}
-
TEST_F(RasterFormatTest, Finish) {
cmds::Finish& cmd = *GetBufferAs<cmds::Finish>();
void* next_cmd = cmd.Set(&cmd);
@@ -61,7 +43,9 @@ TEST_F(RasterFormatTest, GetError) {
TEST_F(RasterFormatTest, GenQueriesEXTImmediate) {
static GLuint ids[] = {
- 12, 23, 34,
+ 12,
+ 23,
+ 34,
};
cmds::GenQueriesEXTImmediate& cmd =
*GetBufferAs<cmds::GenQueriesEXTImmediate>();
@@ -79,7 +63,9 @@ TEST_F(RasterFormatTest, GenQueriesEXTImmediate) {
TEST_F(RasterFormatTest, DeleteQueriesEXTImmediate) {
static GLuint ids[] = {
- 12, 23, 34,
+ 12,
+ 23,
+ 34,
};
cmds::DeleteQueriesEXTImmediate& cmd =
*GetBufferAs<cmds::DeleteQueriesEXTImmediate>();
@@ -145,21 +131,6 @@ TEST_F(RasterFormatTest, InsertFenceSyncCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
-TEST_F(RasterFormatTest, WaitSyncTokenCHROMIUM) {
- cmds::WaitSyncTokenCHROMIUM& cmd =
- *GetBufferAs<cmds::WaitSyncTokenCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLint>(11), static_cast<GLuint64>(12),
- static_cast<GLuint64>(13));
- EXPECT_EQ(static_cast<uint32_t>(cmds::WaitSyncTokenCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLint>(11), cmd.namespace_id);
- EXPECT_EQ(static_cast<GLuint64>(12), cmd.command_buffer_id());
- EXPECT_EQ(static_cast<GLuint64>(13), cmd.release_count());
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
TEST_F(RasterFormatTest, BeginRasterCHROMIUMImmediate) {
const int kSomeBaseValueToTestWith = 51;
static GLbyte data[] = {
@@ -184,8 +155,7 @@ TEST_F(RasterFormatTest, BeginRasterCHROMIUMImmediate) {
*GetBufferAs<cmds::BeginRasterCHROMIUMImmediate>();
void* next_cmd =
cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
- static_cast<GLboolean>(13), static_cast<GLint>(14),
- static_cast<GLuint>(15), data);
+ static_cast<GLboolean>(13), static_cast<GLuint>(14), data);
EXPECT_EQ(static_cast<uint32_t>(cmds::BeginRasterCHROMIUMImmediate::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
@@ -193,8 +163,7 @@ TEST_F(RasterFormatTest, BeginRasterCHROMIUMImmediate) {
EXPECT_EQ(static_cast<GLuint>(11), cmd.sk_color);
EXPECT_EQ(static_cast<GLuint>(12), cmd.msaa_sample_count);
EXPECT_EQ(static_cast<GLboolean>(13), cmd.can_use_lcd_text);
- EXPECT_EQ(static_cast<GLint>(14), cmd.color_type);
- EXPECT_EQ(static_cast<GLuint>(15), cmd.color_space_transfer_cache_id);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.color_space_transfer_cache_id);
CheckBytesWrittenMatchesExpectedSize(
next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
}
@@ -277,7 +246,9 @@ TEST_F(RasterFormatTest, UnlockTransferCacheEntryINTERNAL) {
TEST_F(RasterFormatTest, DeletePaintCacheTextBlobsINTERNALImmediate) {
static GLuint ids[] = {
- 12, 23, 34,
+ 12,
+ 23,
+ 34,
};
cmds::DeletePaintCacheTextBlobsINTERNALImmediate& cmd =
*GetBufferAs<cmds::DeletePaintCacheTextBlobsINTERNALImmediate>();
@@ -296,7 +267,9 @@ TEST_F(RasterFormatTest, DeletePaintCacheTextBlobsINTERNALImmediate) {
TEST_F(RasterFormatTest, DeletePaintCachePathsINTERNALImmediate) {
static GLuint ids[] = {
- 12, 23, 34,
+ 12,
+ 23,
+ 34,
};
cmds::DeletePaintCachePathsINTERNALImmediate& cmd =
*GetBufferAs<cmds::DeletePaintCachePathsINTERNALImmediate>();
@@ -323,7 +296,7 @@ TEST_F(RasterFormatTest, ClearPaintCacheINTERNAL) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
-TEST_F(RasterFormatTest, CreateAndConsumeTextureINTERNALImmediate) {
+TEST_F(RasterFormatTest, CopySubTextureINTERNALImmediate) {
const int kSomeBaseValueToTestWith = 51;
static GLbyte data[] = {
static_cast<GLbyte>(kSomeBaseValueToTestWith + 0),
@@ -342,48 +315,44 @@ TEST_F(RasterFormatTest, CreateAndConsumeTextureINTERNALImmediate) {
static_cast<GLbyte>(kSomeBaseValueToTestWith + 13),
static_cast<GLbyte>(kSomeBaseValueToTestWith + 14),
static_cast<GLbyte>(kSomeBaseValueToTestWith + 15),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 16),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 17),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 18),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 19),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 20),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 21),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 22),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 23),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 24),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 25),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 26),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 27),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 28),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 29),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 30),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 31),
};
- cmds::CreateAndConsumeTextureINTERNALImmediate& cmd =
- *GetBufferAs<cmds::CreateAndConsumeTextureINTERNALImmediate>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<bool>(12),
- static_cast<gfx::BufferUsage>(13),
- static_cast<viz::ResourceFormat>(14), data);
- EXPECT_EQ(static_cast<uint32_t>(
- cmds::CreateAndConsumeTextureINTERNALImmediate::kCmdId),
- cmd.header.command);
+ cmds::CopySubTextureINTERNALImmediate& cmd =
+ *GetBufferAs<cmds::CopySubTextureINTERNALImmediate>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(11), static_cast<GLint>(12),
+ static_cast<GLint>(13), static_cast<GLint>(14),
+ static_cast<GLsizei>(15), static_cast<GLsizei>(16), data);
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::CopySubTextureINTERNALImmediate::kCmdId),
+ cmd.header.command);
EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.texture_id);
- EXPECT_EQ(static_cast<bool>(12), static_cast<bool>(cmd.use_buffer));
- EXPECT_EQ(static_cast<gfx::BufferUsage>(13),
- static_cast<gfx::BufferUsage>(cmd.buffer_usage));
- EXPECT_EQ(static_cast<viz::ResourceFormat>(14),
- static_cast<viz::ResourceFormat>(cmd.format));
+ EXPECT_EQ(static_cast<GLint>(11), cmd.xoffset);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.yoffset);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.y);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.height);
CheckBytesWrittenMatchesExpectedSize(
next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
}
-TEST_F(RasterFormatTest, CopySubTexture) {
- cmds::CopySubTexture& cmd = *GetBufferAs<cmds::CopySubTexture>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
- static_cast<GLint>(13), static_cast<GLint>(14),
- static_cast<GLint>(15), static_cast<GLint>(16),
- static_cast<GLsizei>(17), static_cast<GLsizei>(18));
- EXPECT_EQ(static_cast<uint32_t>(cmds::CopySubTexture::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.source_id);
- EXPECT_EQ(static_cast<GLuint>(12), cmd.dest_id);
- EXPECT_EQ(static_cast<GLint>(13), cmd.xoffset);
- EXPECT_EQ(static_cast<GLint>(14), cmd.yoffset);
- EXPECT_EQ(static_cast<GLint>(15), cmd.x);
- EXPECT_EQ(static_cast<GLint>(16), cmd.y);
- EXPECT_EQ(static_cast<GLsizei>(17), cmd.width);
- EXPECT_EQ(static_cast<GLsizei>(18), cmd.height);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
TEST_F(RasterFormatTest, TraceBeginCHROMIUM) {
cmds::TraceBeginCHROMIUM& cmd = *GetBufferAs<cmds::TraceBeginCHROMIUM>();
void* next_cmd =
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_ids_autogen.h
index 626e2b04ce6..0c879b913fb 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_ids_autogen.h
@@ -12,31 +12,28 @@
#define GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_IDS_AUTOGEN_H_
#define RASTER_COMMAND_LIST(OP) \
- OP(DeleteTexturesImmediate) /* 256 */ \
- OP(Finish) /* 257 */ \
- OP(Flush) /* 258 */ \
- OP(GetError) /* 259 */ \
- OP(GenQueriesEXTImmediate) /* 260 */ \
- OP(DeleteQueriesEXTImmediate) /* 261 */ \
- OP(BeginQueryEXT) /* 262 */ \
- OP(EndQueryEXT) /* 263 */ \
- OP(LoseContextCHROMIUM) /* 264 */ \
- OP(InsertFenceSyncCHROMIUM) /* 265 */ \
- OP(WaitSyncTokenCHROMIUM) /* 266 */ \
- OP(BeginRasterCHROMIUMImmediate) /* 267 */ \
- OP(RasterCHROMIUM) /* 268 */ \
- OP(EndRasterCHROMIUM) /* 269 */ \
- OP(CreateTransferCacheEntryINTERNAL) /* 270 */ \
- OP(DeleteTransferCacheEntryINTERNAL) /* 271 */ \
- OP(UnlockTransferCacheEntryINTERNAL) /* 272 */ \
- OP(DeletePaintCacheTextBlobsINTERNALImmediate) /* 273 */ \
- OP(DeletePaintCachePathsINTERNALImmediate) /* 274 */ \
- OP(ClearPaintCacheINTERNAL) /* 275 */ \
- OP(CreateAndConsumeTextureINTERNALImmediate) /* 276 */ \
- OP(CopySubTexture) /* 277 */ \
- OP(TraceBeginCHROMIUM) /* 278 */ \
- OP(TraceEndCHROMIUM) /* 279 */ \
- OP(SetActiveURLCHROMIUM) /* 280 */
+ OP(Finish) /* 256 */ \
+ OP(Flush) /* 257 */ \
+ OP(GetError) /* 258 */ \
+ OP(GenQueriesEXTImmediate) /* 259 */ \
+ OP(DeleteQueriesEXTImmediate) /* 260 */ \
+ OP(BeginQueryEXT) /* 261 */ \
+ OP(EndQueryEXT) /* 262 */ \
+ OP(LoseContextCHROMIUM) /* 263 */ \
+ OP(InsertFenceSyncCHROMIUM) /* 264 */ \
+ OP(BeginRasterCHROMIUMImmediate) /* 265 */ \
+ OP(RasterCHROMIUM) /* 266 */ \
+ OP(EndRasterCHROMIUM) /* 267 */ \
+ OP(CreateTransferCacheEntryINTERNAL) /* 268 */ \
+ OP(DeleteTransferCacheEntryINTERNAL) /* 269 */ \
+ OP(UnlockTransferCacheEntryINTERNAL) /* 270 */ \
+ OP(DeletePaintCacheTextBlobsINTERNALImmediate) /* 271 */ \
+ OP(DeletePaintCachePathsINTERNALImmediate) /* 272 */ \
+ OP(ClearPaintCacheINTERNAL) /* 273 */ \
+ OP(CopySubTextureINTERNALImmediate) /* 274 */ \
+ OP(TraceBeginCHROMIUM) /* 275 */ \
+ OP(TraceEndCHROMIUM) /* 276 */ \
+ OP(SetActiveURLCHROMIUM) /* 277 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/swap_buffers_flags.h b/chromium/gpu/command_buffer/common/swap_buffers_flags.h
index acc2f787b9e..f4bbb48f08b 100644
--- a/chromium/gpu/command_buffer/common/swap_buffers_flags.h
+++ b/chromium/gpu/command_buffer/common/swap_buffers_flags.h
@@ -13,8 +13,7 @@ namespace gpu {
class SwapBuffersFlags {
public:
enum : uint32_t {
- kPresentationFeedback = 1 << 0, // Request presentation.
- kVSyncParams = 1 << 1, // Request VSYNC parameters update.
+ kVSyncParams = 1 << 0, // Request VSYNC parameters update.
};
};
diff --git a/chromium/gpu/command_buffer/common/unittest_main.cc b/chromium/gpu/command_buffer/common/unittest_main.cc
index aa474317246..e479638ddc1 100644
--- a/chromium/gpu/command_buffer/common/unittest_main.cc
+++ b/chromium/gpu/command_buffer/common/unittest_main.cc
@@ -36,5 +36,5 @@ int main(int argc, char** argv) {
return base::LaunchUnitTests(
argc, argv,
- base::Bind(&base::TestSuite::Run, base::Unretained(&test_suite)));
+ base::BindOnce(&base::TestSuite::Run, base::Unretained(&test_suite)));
}
diff --git a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
index 7a14db0357a..740a8dcd3d8 100644
--- a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
@@ -158,6 +158,20 @@ GL_APICALL void GL_APIENTRY glShaderSource (GLidShader shader, GLsizeiNo
GL_APICALL void GL_APIENTRY glShallowFinishCHROMIUM (void);
GL_APICALL void GL_APIENTRY glShallowFlushCHROMIUM (void);
GL_APICALL void GL_APIENTRY glOrderingBarrierCHROMIUM (void);
+
+// Extensions WEBGL_multi_draw and WEBGL_multi_draw_instanced
+// WEBGL entrypoints are public, CHROMIUM entrypoints are internal to the command buffer
+GL_APICALL void GL_APIENTRY glMultiDrawBeginCHROMIUM (GLsizei drawcount);
+GL_APICALL void GL_APIENTRY glMultiDrawEndCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glMultiDrawArraysCHROMIUM (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, GLsizei drawcount);
+GL_APICALL void GL_APIENTRY glMultiDrawArraysInstancedCHROMIUM (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, const GLsizei* instance_counts, GLsizei drawcount);
+GL_APICALL void GL_APIENTRY glMultiDrawElementsCHROMIUM (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, GLsizei drawcount);
+GL_APICALL void GL_APIENTRY glMultiDrawElementsInstancedCHROMIUM (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, const GLsizei* instance_counts, GLsizei drawcount);
+GL_APICALL void GL_APIENTRY glMultiDrawArraysWEBGL (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, GLsizei drawcount);
+GL_APICALL void GL_APIENTRY glMultiDrawArraysInstancedWEBGL (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, const GLsizei* instance_counts, GLsizei drawcount);
+GL_APICALL void GL_APIENTRY glMultiDrawElementsWEBGL (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, GLsizei drawcount);
+GL_APICALL void GL_APIENTRY glMultiDrawElementsInstancedWEBGL (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, const GLsizei* instance_counts, GLsizei drawcount);
+
GL_APICALL void GL_APIENTRY glStencilFunc (GLenumCmpFunction func, GLint ref, GLuint mask);
GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenumFaceType face, GLenumCmpFunction func, GLint ref, GLuint mask);
GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask);
@@ -316,8 +330,7 @@ GL_APICALL void GL_APIENTRY glScheduleCALayerInUseQueryCHROMIUM (GLsizei
GL_APICALL void GL_APIENTRY glCommitOverlayPlanesCHROMIUM (GLuint64 swap_id, GLbitfieldSwapBuffersFlags flags = 0);
GL_APICALL void GL_APIENTRY glFlushDriverCachesCHROMIUM (void);
GL_APICALL GLuint GL_APIENTRY glGetLastFlushIdCHROMIUM (void);
-GL_APICALL void GL_APIENTRY glScheduleDCLayerSharedStateCHROMIUM (GLfloat opacity, GLboolean is_clipped, const GLfloat* clip_rect, GLint z_order, const GLfloat* transform);
-GL_APICALL void GL_APIENTRY glScheduleDCLayerCHROMIUM (GLsizei num_textures, const GLuint* contents_texture_ids, const GLfloat* contents_rect, GLuint background_color, GLuint edge_aa_mask, const GLfloat* bounds_rect, GLuint filter, GLuint protected_video_type);
+GL_APICALL void GL_APIENTRY glScheduleDCLayerCHROMIUM (GLuint y_texture_id, GLuint uv_texture_id, GLint z_order, GLint content_x, GLint content_y, GLint content_width, GLint content_height, GLint quad_x, GLint quad_y, GLint quad_width, GLint quad_height, GLfloat transform_c1r1, GLfloat transform_c2r1, GLfloat transform_c1r2, GLfloat transform_c2r2, GLfloat transform_tx, GLfloat transform_ty, GLboolean is_clipped, GLint clip_x, GLint clip_y, GLint clip_width, GLint clip_height, GLuint protected_video_type);
GL_APICALL void GL_APIENTRY glSetActiveURLCHROMIUM (const char* url);
// Extension CHROMIUM_path_rendering.
@@ -408,7 +421,7 @@ GL_APICALL void GL_APIENTRY glFramebufferTextureMultiviewLayeredANGLE (G
GL_APICALL void GL_APIENTRY glMaxShaderCompilerThreadsKHR (GLuint count);
// Extension CHROMIUM_shared_image
-GL_APICALL GLuint GL_APIENTRY glCreateAndTexStorage2DSharedImageCHROMIUM (GLenumTextureInternalFormat internalFormat, const GLbyte* mailbox);
-GL_APICALL void GL_APIENTRY glCreateAndTexStorage2DSharedImageINTERNAL (GLuint texture, GLenumTextureInternalFormat internalFormat, const GLbyte* mailbox);
+GL_APICALL GLuint GL_APIENTRY glCreateAndTexStorage2DSharedImageCHROMIUM (const GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY glCreateAndTexStorage2DSharedImageINTERNAL (GLuint texture, const GLbyte* mailbox);
GL_APICALL void GL_APIENTRY glBeginSharedImageAccessDirectCHROMIUM (GLuint texture, GLenumSharedImageAccessMode mode);
GL_APICALL void GL_APIENTRY glEndSharedImageAccessDirectCHROMIUM (GLuint texture);
diff --git a/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
index 0cdeca83888..40ee7becae7 100644
--- a/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
@@ -4,7 +4,6 @@
// This file is read by build_raster_cmd_buffer.py to generate commands.
-GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizeiNotNegative n, const GLuint* textures);
GL_APICALL void GL_APIENTRY glFinish (void);
GL_APICALL void GL_APIENTRY glFlush (void);
GL_APICALL GLenum GL_APIENTRY glGetError (void);
@@ -19,7 +18,6 @@ GL_APICALL void GL_APIENTRY glGetQueryObjectuivEXT (GLidQuery id, GLenum
// Non-GL commands.
GL_APICALL void GL_APIENTRY glLoseContextCHROMIUM (GLenumResetStatus current, GLenumResetStatus other);
GL_APICALL GLuint64 GL_APIENTRY glInsertFenceSyncCHROMIUM (void);
-GL_APICALL void GL_APIENTRY glGenSyncTokenCHROMIUM (GLbyte* sync_token);
GL_APICALL void GL_APIENTRY glGenUnverifiedSyncTokenCHROMIUM (GLbyte* sync_token);
GL_APICALL void GL_APIENTRY glVerifySyncTokensCHROMIUM (GLbyte** sync_tokens, GLsizei count);
GL_APICALL void GL_APIENTRY glWaitSyncTokenCHROMIUM (const GLbyte* sync_token);
@@ -28,8 +26,8 @@ GL_APICALL void GL_APIENTRY glWaitSyncTokenCHROMIUM (const GLbyte* sync_
GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusKHR (void);
// Extension CHROMIUM_raster_transport
-GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint sk_color, GLuint msaa_sample_count, GLboolean can_use_lcd_text, GLint color_type, GLuint color_space_transfer_cache_id, const GLbyte* mailbox);
-GL_APICALL void GL_APIENTRY glRasterCHROMIUM (GLuint raster_shm_id, GLuint raster_shm_offset, GLsizeiptr raster_shm_size, GLuint font_shm_id, GLuint font_shm_offset, GLsizeiptr font_shm_size);
+GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint sk_color, GLuint msaa_sample_count, GLboolean can_use_lcd_text, GLuint color_space_transfer_cache_id, const GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY glRasterCHROMIUM (GLuint raster_shm_id, GLuint raster_shm_offset, GLuint raster_shm_size, GLuint font_shm_id, GLuint font_shm_offset, GLuint font_shm_size);
GL_APICALL void GL_APIENTRY glEndRasterCHROMIUM (void);
GL_APICALL void GL_APIENTRY glCreateTransferCacheEntryINTERNAL (GLuint entry_type, GLuint entry_id, GLuint handle_shm_id, GLuint handle_shm_offset, GLuint data_shm_id, GLuint data_shm_offset, GLuint data_size);
GL_APICALL void GL_APIENTRY glDeleteTransferCacheEntryINTERNAL (GLuint entry_type, GLuint entry_id);
@@ -39,9 +37,8 @@ GL_APICALL void GL_APIENTRY glDeletePaintCachePathsINTERNAL (GLsizeiNotN
GL_APICALL void GL_APIENTRY glClearPaintCacheINTERNAL (void);
// TOOD(backer): Remove GL encoding. These are not GL functions.
-GL_APICALL GLuint GL_APIENTRY glCreateAndConsumeTexture (bool use_buffer, EnumClassgfx::BufferUsage buffer_usage, EnumClassviz::ResourceFormat format, const GLbyte* mailbox);
-GL_APICALL GLuint GL_APIENTRY glCreateAndConsumeTextureINTERNAL (GLuint texture_id, bool use_buffer, EnumClassgfx::BufferUsage buffer_usage, EnumClassviz::ResourceFormat format, const GLbyte* mailbox);
-GL_APICALL void GL_APIENTRY glCopySubTexture (GLuint source_id, GLuint dest_id, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+// |mailboxes| argument is the concatenation of the source mailbox and the destination mailbox (32 bytes total)
+GL_APICALL void GL_APIENTRY glCopySubTextureINTERNAL (GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, const GLbyte* mailboxes);
GL_APICALL void GL_APIENTRY glTraceBeginCHROMIUM (const char* category_name, const char* trace_name);
GL_APICALL void GL_APIENTRY glTraceEndCHROMIUM (void);
GL_APICALL void GL_APIENTRY glSetActiveURLCHROMIUM (const char* url);
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index 73f1cd7a60a..c0fd9edb8ad 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -4,8 +4,8 @@
import("//build/config/jumbo.gni")
import("//build/config/ui.gni")
-import("//third_party/protobuf/proto_library.gni")
import("//gpu/vulkan/features.gni")
+import("//third_party/protobuf/proto_library.gni")
group("service") {
if (is_component_build) {
@@ -183,6 +183,8 @@ target(link_target_type, "gles2_sources") {
"mailbox_manager_sync.h",
"memory_program_cache.cc",
"memory_program_cache.h",
+ "multi_draw_manager.cc",
+ "multi_draw_manager.h",
"passthrough_abstract_texture_impl.cc",
"passthrough_abstract_texture_impl.h",
"passthrough_discardable_manager.cc",
@@ -203,8 +205,6 @@ target(link_target_type, "gles2_sources") {
"raster_cmd_validation_implementation_autogen.h",
"raster_decoder.cc",
"raster_decoder.h",
- "raster_decoder_context_state.cc",
- "raster_decoder_context_state.h",
"renderbuffer_manager.cc",
"renderbuffer_manager.h",
"sampler_manager.cc",
@@ -223,6 +223,8 @@ target(link_target_type, "gles2_sources") {
"shader_translator.h",
"shader_translator_cache.cc",
"shader_translator_cache.h",
+ "shared_context_state.cc",
+ "shared_context_state.h",
"shared_image_backing.cc",
"shared_image_backing.h",
"shared_image_backing_factory.h",
@@ -258,15 +260,6 @@ target(link_target_type, "gles2_sources") {
"wrapped_sk_image.h",
]
- if (is_android) {
- sources += [
- "ahardwarebuffer_utils.cc",
- "ahardwarebuffer_utils.h",
- "shared_image_backing_factory_ahardwarebuffer.cc",
- "shared_image_backing_factory_ahardwarebuffer.h",
- ]
- }
-
configs += [
"//build/config:precompiled_headers",
"//gpu:gpu_gles2_implementation",
@@ -322,10 +315,31 @@ target(link_target_type, "gles2_sources") {
]
}
- if (is_android && !is_debug) {
- # On Android optimize more since this component can be a bottleneck.
- configs -= [ "//build/config/compiler:default_optimization" ]
- configs += [ "//build/config/compiler:optimize_max" ]
+ if (is_android) {
+ if (!is_debug) {
+ # On Android optimize more since this component can be a bottleneck.
+ configs -= [ "//build/config/compiler:default_optimization" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+ sources += [
+ "ahardwarebuffer_utils.cc",
+ "ahardwarebuffer_utils.h",
+ "shared_image_backing_factory_ahardwarebuffer.cc",
+ "shared_image_backing_factory_ahardwarebuffer.h",
+ ]
+
+ # TODO(cblume): http://crbug.com/911313
+ # Abstract out the platform specific defines. Right now we need the android
+ # platform specific define here to be able to include android specific
+ # functions.
+ defines = [ "VK_USE_PLATFORM_ANDROID_KHR" ]
+ deps += [ "//third_party/libsync" ]
+ if (enable_vulkan) {
+ deps += [
+ "//gpu/ipc/common:android_image_reader_utils",
+ "//gpu/vulkan:vulkan",
+ ]
+ }
}
}
diff --git a/chromium/gpu/command_buffer/service/buffer_manager.cc b/chromium/gpu/command_buffer/service/buffer_manager.cc
index 493762d04f8..979ed2dcab4 100644
--- a/chromium/gpu/command_buffer/service/buffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/buffer_manager.cc
@@ -10,12 +10,11 @@
#include "base/format_macros.h"
#include "base/logging.h"
-#include "base/numerics/safe_math.h"
+#include "base/numerics/checked_math.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
-#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/error_state.h"
#include "gpu/command_buffer/service/feature_info.h"
@@ -210,9 +209,8 @@ bool Buffer::CheckRange(GLintptr offset, GLsizeiptr size) const {
size < 0 || size > std::numeric_limits<int32_t>::max()) {
return false;
}
- base::CheckedNumeric<int32_t> max = offset;
- max += size;
- return max.IsValid() && max.ValueOrDefault(0) <= size_;
+ int32_t max;
+ return base::CheckAdd(offset, size).AssignIfValid(&max) && max <= size_;
}
void Buffer::SetRange(GLintptr offset, GLsizeiptr size, const GLvoid * data) {
@@ -307,12 +305,10 @@ bool Buffer::GetMaxValueForRange(
}
uint32_t size;
- if (!SafeMultiplyUint32(
- count, GLES2Util::GetGLTypeSizeForBuffers(type), &size)) {
- return false;
- }
-
- if (!SafeAddUint32(offset, size, &size)) {
+ if (!base::CheckAdd(
+ offset,
+ base::CheckMul(count, GLES2Util::GetGLTypeSizeForBuffers(type)))
+ .AssignIfValid(&size)) {
return false;
}
@@ -426,10 +422,12 @@ void BufferManager::SetInfo(Buffer* buffer,
memory_type_tracker_->TrackMemAlloc(buffer->size());
}
-void BufferManager::ValidateAndDoBufferData(
- ContextState* context_state, GLenum target, GLsizeiptr size,
- const GLvoid* data, GLenum usage) {
- ErrorState* error_state = context_state->GetErrorState();
+void BufferManager::ValidateAndDoBufferData(ContextState* context_state,
+ ErrorState* error_state,
+ GLenum target,
+ GLsizeiptr size,
+ const GLvoid* data,
+ GLenum usage) {
if (!feature_info_->validators()->buffer_target.IsValid(target)) {
ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
error_state, "glBufferData", target, "target");
@@ -475,7 +473,6 @@ void BufferManager::ValidateAndDoBufferData(
}
}
-
void BufferManager::DoBufferData(
ErrorState* error_state,
Buffer* buffer,
@@ -514,11 +511,14 @@ void BufferManager::DoBufferData(
SetInfo(buffer, target, size, usage, use_shadow);
}
-void BufferManager::ValidateAndDoBufferSubData(
- ContextState* context_state, GLenum target, GLintptr offset, GLsizeiptr size,
- const GLvoid * data) {
- Buffer* buffer = RequestBufferAccess(context_state, target, offset, size,
- "glBufferSubData");
+void BufferManager::ValidateAndDoBufferSubData(ContextState* context_state,
+ ErrorState* error_state,
+ GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const GLvoid* data) {
+ Buffer* buffer = RequestBufferAccess(context_state, error_state, target,
+ offset, size, "glBufferSubData");
if (!buffer) {
return;
}
@@ -535,20 +535,23 @@ void BufferManager::DoBufferSubData(
}
}
-void BufferManager::ValidateAndDoCopyBufferSubData(
- ContextState* context_state, GLenum readtarget, GLenum writetarget,
- GLintptr readoffset, GLintptr writeoffset, GLsizeiptr size) {
+void BufferManager::ValidateAndDoCopyBufferSubData(ContextState* context_state,
+ ErrorState* error_state,
+ GLenum readtarget,
+ GLenum writetarget,
+ GLintptr readoffset,
+ GLintptr writeoffset,
+ GLsizeiptr size) {
const char* func_name = "glCopyBufferSubData";
- Buffer* readbuffer = RequestBufferAccess(context_state, readtarget,
- readoffset, size, func_name);
+ Buffer* readbuffer = RequestBufferAccess(
+ context_state, error_state, readtarget, readoffset, size, func_name);
if (!readbuffer)
return;
- Buffer* writebuffer = RequestBufferAccess(context_state, writetarget,
- writeoffset, size, func_name);
+ Buffer* writebuffer = RequestBufferAccess(
+ context_state, error_state, writetarget, writeoffset, size, func_name);
if (!writebuffer)
return;
- ErrorState* error_state = context_state->GetErrorState();
if (readbuffer == writebuffer &&
((writeoffset >= readoffset && writeoffset < readoffset + size) ||
(readoffset >= writeoffset && readoffset < writeoffset + size))) {
@@ -592,12 +595,16 @@ void BufferManager::DoCopyBufferSubData(
}
void BufferManager::ValidateAndDoGetBufferParameteri64v(
- ContextState* context_state, GLenum target, GLenum pname, GLint64* params) {
+ ContextState* context_state,
+ ErrorState* error_state,
+ GLenum target,
+ GLenum pname,
+ GLint64* params) {
Buffer* buffer = GetBufferInfoForTarget(context_state, target);
if (!buffer) {
- ERRORSTATE_SET_GL_ERROR(
- context_state->GetErrorState(), GL_INVALID_OPERATION,
- "glGetBufferParameteri64v", "no buffer bound for target");
+ ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_OPERATION,
+ "glGetBufferParameteri64v",
+ "no buffer bound for target");
return;
}
switch (pname) {
@@ -622,12 +629,16 @@ void BufferManager::ValidateAndDoGetBufferParameteri64v(
}
void BufferManager::ValidateAndDoGetBufferParameteriv(
- ContextState* context_state, GLenum target, GLenum pname, GLint* params) {
+ ContextState* context_state,
+ ErrorState* error_state,
+ GLenum target,
+ GLenum pname,
+ GLint* params) {
Buffer* buffer = GetBufferInfoForTarget(context_state, target);
if (!buffer) {
- ERRORSTATE_SET_GL_ERROR(
- context_state->GetErrorState(), GL_INVALID_OPERATION,
- "glGetBufferParameteriv", "no buffer bound for target");
+ ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_OPERATION,
+ "glGetBufferParameteriv",
+ "no buffer bound for target");
return;
}
switch (pname) {
@@ -788,13 +799,12 @@ bool BufferManager::OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
}
Buffer* BufferManager::RequestBufferAccess(ContextState* context_state,
+ ErrorState* error_state,
GLenum target,
GLintptr offset,
GLsizeiptr size,
const char* func_name) {
DCHECK(context_state);
- ErrorState* error_state = context_state->GetErrorState();
-
Buffer* buffer = GetBufferInfoForTarget(context_state, target);
if (!RequestBufferAccess(error_state, buffer, func_name,
"bound to target 0x%04x", target)) {
@@ -811,11 +821,10 @@ Buffer* BufferManager::RequestBufferAccess(ContextState* context_state,
}
Buffer* BufferManager::RequestBufferAccess(ContextState* context_state,
+ ErrorState* error_state,
GLenum target,
const char* func_name) {
DCHECK(context_state);
- ErrorState* error_state = context_state->GetErrorState();
-
Buffer* buffer = GetBufferInfoForTarget(context_state, target);
return RequestBufferAccess(error_state, buffer, func_name,
"bound to target 0x%04x", target)
@@ -895,10 +904,10 @@ bool BufferManager::RequestBuffersAccess(
return false;
}
GLsizeiptr size = bindings->GetEffectiveBufferSize(ii);
- base::CheckedNumeric<GLsizeiptr> required_size = variable_sizes[ii];
- required_size *= count;
- if (size < required_size.ValueOrDefault(
- std::numeric_limits<GLsizeiptr>::max())) {
+ GLsizeiptr required_size;
+ if (!base::CheckMul(variable_sizes[ii], count)
+ .AssignIfValid(&required_size) ||
+ size < required_size) {
std::string msg = base::StringPrintf(
"%s : buffer or buffer range at index %zu not large enough",
message_tag, ii);
diff --git a/chromium/gpu/command_buffer/service/buffer_manager.h b/chromium/gpu/command_buffer/service/buffer_manager.h
index c21ea41584f..7f1f3af53ac 100644
--- a/chromium/gpu/command_buffer/service/buffer_manager.h
+++ b/chromium/gpu/command_buffer/service/buffer_manager.h
@@ -11,9 +11,9 @@
#include <map>
#include <memory>
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/debug/stack_trace.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -268,32 +268,47 @@ class GPU_GLES2_EXPORT BufferManager
// Validates a glBufferSubData, and then calls DoBufferData if validation was
// successful.
- void ValidateAndDoBufferSubData(
- ContextState* context_state, GLenum target, GLintptr offset,
- GLsizeiptr size, const GLvoid * data);
+ void ValidateAndDoBufferSubData(ContextState* context_state,
+ ErrorState* error_state,
+ GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const GLvoid* data);
// Validates a glBufferData, and then calls DoBufferData if validation was
// successful.
- void ValidateAndDoBufferData(
- ContextState* context_state, GLenum target, GLsizeiptr size,
- const GLvoid * data, GLenum usage);
+ void ValidateAndDoBufferData(ContextState* context_state,
+ ErrorState* error_state,
+ GLenum target,
+ GLsizeiptr size,
+ const GLvoid* data,
+ GLenum usage);
// Validates a glCopyBufferSubData, and then calls DoCopyBufferSubData if
// validation was successful.
- void ValidateAndDoCopyBufferSubData(
- ContextState* context_state, GLenum readtarget, GLenum writetarget,
- GLintptr readoffset, GLintptr writeoffset, GLsizeiptr size);
+ void ValidateAndDoCopyBufferSubData(ContextState* context_state,
+ ErrorState* error_state,
+ GLenum readtarget,
+ GLenum writetarget,
+ GLintptr readoffset,
+ GLintptr writeoffset,
+ GLsizeiptr size);
// Validates a glGetBufferParameteri64v, and then calls GetBufferParameteri64v
// if validation was successful.
- void ValidateAndDoGetBufferParameteri64v(
- ContextState* context_state, GLenum target, GLenum pname,
- GLint64* params);
+ void ValidateAndDoGetBufferParameteri64v(ContextState* context_state,
+ ErrorState* error_state,
+ GLenum target,
+ GLenum pname,
+ GLint64* params);
// Validates a glGetBufferParameteriv, and then calls GetBufferParameteriv if
// validation was successful.
- void ValidateAndDoGetBufferParameteriv(
- ContextState* context_state, GLenum target, GLenum pname, GLint* params);
+ void ValidateAndDoGetBufferParameteriv(ContextState* context_state,
+ ErrorState* error_state,
+ GLenum target,
+ GLenum pname,
+ GLint* params);
// Sets the target of a buffer. Returns false if the target can not be set.
bool SetTarget(Buffer* buffer, GLenum target);
@@ -334,12 +349,14 @@ class GPU_GLES2_EXPORT BufferManager
// return nullptr if a GL error is generated.
// Generates INVALID_VALUE if offset + size is out of range.
Buffer* RequestBufferAccess(ContextState* context_state,
+ ErrorState* error_state,
GLenum target,
GLintptr offset,
GLsizeiptr size,
const char* func_name);
// Same as above, but assume to access the entire buffer.
Buffer* RequestBufferAccess(ContextState* context_state,
+ ErrorState* error_state,
GLenum target,
const char* func_name);
// Same as above, but it can be any buffer rather than the buffer bound to
@@ -430,7 +447,7 @@ class GPU_GLES2_EXPORT BufferManager
scoped_refptr<FeatureInfo> feature_info_;
// Info for each buffer in the system.
- typedef base::hash_map<GLuint, scoped_refptr<Buffer> > BufferMap;
+ typedef std::unordered_map<GLuint, scoped_refptr<Buffer>> BufferMap;
BufferMap buffers_;
// The maximum size of buffers.
diff --git a/chromium/gpu/command_buffer/service/buffer_manager_unittest.cc b/chromium/gpu/command_buffer/service/buffer_manager_unittest.cc
index 43ad1aaa05c..02a839e392b 100644
--- a/chromium/gpu/command_buffer/service/buffer_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/buffer_manager_unittest.cc
@@ -7,6 +7,7 @@
#include <memory>
+#include "base/stl_util.h"
#include "gpu/command_buffer/service/buffer_manager.h"
#include "gpu/command_buffer/service/error_state_mock.h"
#include "gpu/command_buffer/service/feature_info.h"
@@ -526,7 +527,7 @@ TEST_F(BufferManagerTest, BindBufferConflicts) {
GL_TRANSFORM_FEEDBACK_BUFFER,
GL_UNIFORM_BUFFER
};
- for (size_t ii = 0; ii < arraysize(kTargets); ++ii) {
+ for (size_t ii = 0; ii < base::size(kTargets); ++ii) {
client_id++;
service_id++;
manager_->CreateBuffer(client_id, service_id);
@@ -534,7 +535,7 @@ TEST_F(BufferManagerTest, BindBufferConflicts) {
ASSERT_TRUE(buffer != nullptr);
EXPECT_TRUE(manager_->SetTarget(buffer, kTargets[ii]));
- for (size_t jj = 0; jj < arraysize(kTargets); ++jj) {
+ for (size_t jj = 0; jj < base::size(kTargets); ++jj) {
EXPECT_TRUE(manager_->SetTarget(buffer, kTargets[jj]));
}
EXPECT_EQ(kTargets[ii], GetInitialTarget(buffer));
@@ -553,7 +554,7 @@ TEST_F(BufferManagerTest, BindBufferConflicts) {
GL_TRANSFORM_FEEDBACK_BUFFER,
GL_UNIFORM_BUFFER
};
- for (size_t ii = 0; ii < arraysize(kTargets); ++ii) {
+ for (size_t ii = 0; ii < base::size(kTargets); ++ii) {
client_id++;
service_id++;
manager_->CreateBuffer(client_id, service_id);
@@ -561,7 +562,7 @@ TEST_F(BufferManagerTest, BindBufferConflicts) {
ASSERT_TRUE(buffer != nullptr);
EXPECT_TRUE(manager_->SetTarget(buffer, kTargets[ii]));
- for (size_t jj = 0; jj < arraysize(kTargets); ++jj) {
+ for (size_t jj = 0; jj < base::size(kTargets); ++jj) {
EXPECT_TRUE(manager_->SetTarget(buffer, kTargets[jj]));
}
}
diff --git a/chromium/gpu/command_buffer/service/command_buffer_direct.cc b/chromium/gpu/command_buffer/service/command_buffer_direct.cc
index 8c7f8616f63..45439dc4eb2 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_direct.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_direct.cc
@@ -6,7 +6,6 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/command_buffer/service/transfer_buffer_manager.h"
namespace gpu {
@@ -19,37 +18,11 @@ uint64_t g_next_command_buffer_id = 1;
CommandBufferDirect::CommandBufferDirect(
TransferBufferManager* transfer_buffer_manager)
- : CommandBufferDirect(transfer_buffer_manager, nullptr) {}
-
-CommandBufferDirect::CommandBufferDirect(
- TransferBufferManager* transfer_buffer_manager,
- SyncPointManager* sync_point_manager)
: service_(this, transfer_buffer_manager),
- sync_point_manager_(sync_point_manager),
command_buffer_id_(
- CommandBufferId::FromUnsafeValue(g_next_command_buffer_id++)) {
- if (sync_point_manager_) {
- sync_point_order_data_ = sync_point_manager_->CreateSyncPointOrderData();
- sync_point_client_state_ = sync_point_manager_->CreateSyncPointClientState(
- GetNamespaceID(), GetCommandBufferID(),
- sync_point_order_data_->sequence_id());
- } else {
- sync_point_order_data_ = nullptr;
- sync_point_client_state_ = nullptr;
- }
-}
+ CommandBufferId::FromUnsafeValue(g_next_command_buffer_id++)) {}
-CommandBufferDirect::~CommandBufferDirect() {
- sync_point_manager_ = nullptr;
- if (sync_point_order_data_) {
- sync_point_order_data_->Destroy();
- sync_point_order_data_ = nullptr;
- }
- if (sync_point_client_state_) {
- sync_point_client_state_->Destroy();
- sync_point_client_state_ = nullptr;
- }
-}
+CommandBufferDirect::~CommandBufferDirect() = default;
CommandBuffer::State CommandBufferDirect::GetLastState() {
service_.UpdateState();
@@ -76,32 +49,7 @@ CommandBuffer::State CommandBufferDirect::WaitForGetOffsetInRange(
void CommandBufferDirect::Flush(int32_t put_offset) {
DCHECK(handler_);
- uint32_t order_num = 0;
- if (sync_point_manager_) {
- // If sync point manager is supported, assign order numbers to commands.
- if (paused_order_num_) {
- // Was previous paused, continue to process the order number.
- order_num = paused_order_num_;
- paused_order_num_ = 0;
- } else {
- order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber();
- }
- sync_point_order_data_->BeginProcessingOrderNumber(order_num);
- }
-
- if (pause_commands_) {
- // Do not process commands, simply store the current order number.
- paused_order_num_ = order_num;
-
- sync_point_order_data_->PauseProcessingOrderNumber(order_num);
- return;
- }
-
service_.Flush(put_offset, handler_);
- if (sync_point_manager_) {
- // Finish processing order number here.
- sync_point_order_data_->FinishProcessingOrderNumber(order_num);
- }
}
void CommandBufferDirect::OrderingBarrier(int32_t put_offset) {
@@ -112,7 +60,7 @@ void CommandBufferDirect::SetGetBuffer(int32_t transfer_buffer_id) {
service_.SetGetBuffer(transfer_buffer_id);
}
-scoped_refptr<Buffer> CommandBufferDirect::CreateTransferBuffer(size_t size,
+scoped_refptr<Buffer> CommandBufferDirect::CreateTransferBuffer(uint32_t size,
int32_t* id) {
return service_.CreateTransferBuffer(size, id);
}
@@ -135,17 +83,7 @@ void CommandBufferDirect::CacheShader(const std::string& key,
const std::string& shader) {}
void CommandBufferDirect::OnFenceSyncRelease(uint64_t release) {
- DCHECK(sync_point_client_state_);
- service_.SetReleaseCount(release);
- sync_point_client_state_->ReleaseFenceSync(release);
-}
-
-bool CommandBufferDirect::OnWaitSyncToken(const gpu::SyncToken& sync_token) {
- DCHECK(sync_point_manager_);
- if (sync_point_manager_->IsSyncTokenReleased(sync_token))
- return false;
- service_.SetScheduled(false);
- return true;
+ NOTIMPLEMENTED();
}
void CommandBufferDirect::OnDescheduleUntilFinished() {
@@ -158,37 +96,8 @@ void CommandBufferDirect::OnRescheduleAfterFinished() {
void CommandBufferDirect::OnSwapBuffers(uint64_t swap_id, uint32_t flags) {}
-gpu::CommandBufferNamespace CommandBufferDirect::GetNamespaceID() const {
- return gpu::CommandBufferNamespace::IN_PROCESS;
-}
-
-CommandBufferId CommandBufferDirect::GetCommandBufferID() const {
- return command_buffer_id_;
-}
-
-void CommandBufferDirect::SetCommandsPaused(bool paused) {
- pause_commands_ = paused;
-}
-
-void CommandBufferDirect::SignalSyncToken(const gpu::SyncToken& sync_token,
- base::OnceClosure callback) {
- if (sync_point_manager_) {
- DCHECK(!paused_order_num_);
- uint32_t order_num =
- sync_point_order_data_->GenerateUnprocessedOrderNumber();
- sync_point_order_data_->BeginProcessingOrderNumber(order_num);
- base::RepeatingClosure maybe_pass_callback =
- base::AdaptCallbackForRepeating(std::move(callback));
- if (!sync_point_client_state_->Wait(sync_token, maybe_pass_callback))
- maybe_pass_callback.Run();
- sync_point_order_data_->FinishProcessingOrderNumber(order_num);
- } else {
- std::move(callback).Run();
- }
-}
-
scoped_refptr<Buffer> CommandBufferDirect::CreateTransferBufferWithId(
- size_t size,
+ uint32_t size,
int32_t id) {
return service_.CreateTransferBufferWithId(size, id);
}
diff --git a/chromium/gpu/command_buffer/service/command_buffer_direct.h b/chromium/gpu/command_buffer/service/command_buffer_direct.h
index 7897b08e1ed..d12b11f8476 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_direct.h
+++ b/chromium/gpu/command_buffer/service/command_buffer_direct.h
@@ -16,21 +16,12 @@ namespace gpu {
class AsyncAPIInterface;
class TransferBufferManager;
-class SyncPointClientState;
-class SyncPointManager;
-class SyncPointOrderData;
-struct SyncToken;
class GPU_EXPORT CommandBufferDirect : public CommandBuffer,
public CommandBufferServiceClient,
public DecoderClient {
public:
- using MakeCurrentCallback = base::Callback<bool()>;
-
- CommandBufferDirect(TransferBufferManager* transfer_buffer_manager,
- SyncPointManager* sync_point_manager);
explicit CommandBufferDirect(TransferBufferManager* transfer_buffer_manager);
-
~CommandBufferDirect() override;
void set_handler(AsyncAPIInterface* handler) { handler_ = handler; }
@@ -45,7 +36,8 @@ class GPU_EXPORT CommandBufferDirect : public CommandBuffer,
int32_t start,
int32_t end) override;
void SetGetBuffer(int32_t transfer_buffer_id) override;
- scoped_refptr<Buffer> CreateTransferBuffer(size_t size, int32_t* id) override;
+ scoped_refptr<Buffer> CreateTransferBuffer(uint32_t size,
+ int32_t* id) override;
void DestroyTransferBuffer(int32_t id) override;
// CommandBufferServiceBase implementation:
@@ -56,20 +48,12 @@ class GPU_EXPORT CommandBufferDirect : public CommandBuffer,
void OnConsoleMessage(int32_t id, const std::string& message) override;
void CacheShader(const std::string& key, const std::string& shader) override;
void OnFenceSyncRelease(uint64_t release) override;
- bool OnWaitSyncToken(const gpu::SyncToken&) override;
void OnDescheduleUntilFinished() override;
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
void ScheduleGrContextCleanup() override {}
- CommandBufferNamespace GetNamespaceID() const;
- CommandBufferId GetCommandBufferID() const;
-
- void SetCommandsPaused(bool paused);
- void SignalSyncToken(const gpu::SyncToken& sync_token,
- base::OnceClosure callback);
-
- scoped_refptr<Buffer> CreateTransferBufferWithId(size_t size, int32_t id);
+ scoped_refptr<Buffer> CreateTransferBufferWithId(uint32_t size, int32_t id);
void SetGetOffsetForTest(int32_t get_offset) {
service_.SetGetOffsetForTest(get_offset);
@@ -77,13 +61,7 @@ class GPU_EXPORT CommandBufferDirect : public CommandBuffer,
private:
CommandBufferService service_;
- SyncPointManager* sync_point_manager_;
-
AsyncAPIInterface* handler_ = nullptr;
- scoped_refptr<SyncPointOrderData> sync_point_order_data_;
- scoped_refptr<SyncPointClientState> sync_point_client_state_;
- bool pause_commands_ = false;
- uint32_t paused_order_num_ = 0;
const CommandBufferId command_buffer_id_;
};
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service.cc b/chromium/gpu/command_buffer/service/command_buffer_service.cc
index 644b8205739..89b0e1b0f8e 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_service.cc
@@ -106,7 +106,7 @@ void CommandBufferService::SetGetBuffer(int32_t transfer_buffer_id) {
// This means ring_buffer_ can be nullptr.
ring_buffer_ = GetTransferBuffer(transfer_buffer_id);
if (ring_buffer_) {
- int32_t size = ring_buffer_->size();
+ uint32_t size = ring_buffer_->size();
volatile void* memory = ring_buffer_->memory();
// check proper alignments.
DCHECK_EQ(
@@ -145,10 +145,9 @@ void CommandBufferService::SetReleaseCount(uint64_t release_count) {
UpdateState();
}
-scoped_refptr<Buffer> CommandBufferService::CreateTransferBuffer(size_t size,
+scoped_refptr<Buffer> CommandBufferService::CreateTransferBuffer(uint32_t size,
int32_t* id) {
- static int32_t next_id = 1;
- *id = next_id++;
+ *id = GetNextBufferId();
auto result = CreateTransferBufferWithId(size, *id);
if (!result)
*id = -1;
@@ -171,7 +170,7 @@ bool CommandBufferService::RegisterTransferBuffer(
}
scoped_refptr<Buffer> CommandBufferService::CreateTransferBufferWithId(
- size_t size,
+ uint32_t size,
int32_t id) {
scoped_refptr<Buffer> buffer = MakeMemoryBuffer(size);
if (!RegisterTransferBuffer(id, buffer)) {
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service.h b/chromium/gpu/command_buffer/service/command_buffer_service.h
index b6bfb5c116a..42e1716a0b3 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service.h
+++ b/chromium/gpu/command_buffer/service/command_buffer_service.h
@@ -106,10 +106,10 @@ class GPU_EXPORT CommandBufferService : public CommandBufferServiceBase {
// Creates an in-process transfer buffer and register it with a newly created
// id.
- scoped_refptr<Buffer> CreateTransferBuffer(size_t size, int32_t* id);
+ scoped_refptr<Buffer> CreateTransferBuffer(uint32_t size, int32_t* id);
// Creates an in-process transfer buffer and register it with a given id.
- scoped_refptr<Buffer> CreateTransferBufferWithId(size_t size, int32_t id);
+ scoped_refptr<Buffer> CreateTransferBufferWithId(uint32_t size, int32_t id);
// Sets whether commands should be processed by this scheduler. Setting to
// false unschedules. Setting to true reschedules.
diff --git a/chromium/gpu/command_buffer/service/common_decoder.cc b/chromium/gpu/command_buffer/service/common_decoder.cc
index 7cafcc451a5..0a02d3f7318 100644
--- a/chromium/gpu/command_buffer/service/common_decoder.cc
+++ b/chromium/gpu/command_buffer/service/common_decoder.cc
@@ -10,6 +10,7 @@
#include <algorithm>
#include "base/numerics/safe_math.h"
+#include "base/stl_util.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
namespace gpu {
@@ -212,7 +213,7 @@ RETURN_TYPE GetImmediateDataAs(const volatile COMMAND_TYPE& pod) {
error::Error CommonDecoder::DoCommonCommand(unsigned int command,
unsigned int arg_count,
const volatile void* cmd_data) {
- if (command < arraysize(command_info)) {
+ if (command < base::size(command_info)) {
const CommandInfo& info = command_info[command];
unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
diff --git a/chromium/gpu/command_buffer/service/common_decoder.h b/chromium/gpu/command_buffer/service/common_decoder.h
index 0877aabae32..76df1a56fc3 100644
--- a/chromium/gpu/command_buffer/service/common_decoder.h
+++ b/chromium/gpu/command_buffer/service/common_decoder.h
@@ -99,8 +99,10 @@ class GPU_EXPORT CommonDecoder {
private:
bool OffsetSizeValid(size_t offset, size_t size) const {
- size_t temp = offset + size;
- return temp <= size_ && temp >= offset;
+ size_t end = 0;
+ if (!base::CheckAdd<size_t>(offset, size).AssignIfValid(&end))
+ return false;
+ return end <= size_;
}
size_t size_;
diff --git a/chromium/gpu/command_buffer/service/context_group.h b/chromium/gpu/command_buffer/service/context_group.h
index 026bca1488b..3b3cacba99c 100644
--- a/chromium/gpu/command_buffer/service/context_group.h
+++ b/chromium/gpu/command_buffer/service/context_group.h
@@ -8,9 +8,9 @@
#include <stdint.h>
#include <memory>
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -228,7 +228,7 @@ class GPU_GLES2_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
}
bool GetSyncServiceId(GLuint client_id, GLsync* service_id) const {
- base::hash_map<GLuint, GLsync>::const_iterator iter =
+ std::unordered_map<GLuint, GLsync>::const_iterator iter =
syncs_id_map_.find(client_id);
if (iter == syncs_id_map_.end())
return false;
@@ -324,7 +324,7 @@ class GPU_GLES2_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
std::vector<base::WeakPtr<DecoderContext>> decoders_;
// Mappings from client side IDs to service side IDs.
- base::hash_map<GLuint, GLsync> syncs_id_map_;
+ std::unordered_map<GLuint, GLsync> syncs_id_map_;
bool use_passthrough_cmd_decoder_;
std::unique_ptr<PassthroughResources> passthrough_resources_;
diff --git a/chromium/gpu/command_buffer/service/context_state.cc b/chromium/gpu/command_buffer/service/context_state.cc
index 7501a1ec0be..9a6661e2185 100644
--- a/chromium/gpu/command_buffer/service/context_state.cc
+++ b/chromium/gpu/command_buffer/service/context_state.cc
@@ -10,7 +10,6 @@
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/buffer_manager.h"
-#include "gpu/command_buffer/service/error_state.h"
#include "gpu/command_buffer/service/framebuffer_manager.h"
#include "gpu/command_buffer/service/program_manager.h"
#include "gpu/command_buffer/service/renderbuffer_manager.h"
@@ -223,12 +222,9 @@ void Vec4::SetValues<GLuint>(const GLuint* values) {
}
ContextState::ContextState(FeatureInfo* feature_info,
- ErrorStateClient* error_state_client,
- Logger* logger,
bool track_texture_and_sampler_units)
: track_texture_and_sampler_units(track_texture_and_sampler_units),
- feature_info_(feature_info),
- error_state_(ErrorState::Create(error_state_client, logger)) {
+ feature_info_(feature_info) {
Initialize();
}
@@ -629,10 +625,6 @@ void ContextState::RestoreState(const ContextState* prev_state) {
framebuffer_srgb_valid_ = false;
}
-ErrorState* ContextState::GetErrorState() {
- return error_state_.get();
-}
-
void ContextState::EnableDisable(GLenum pname, bool enable) const {
if (pname == GL_PRIMITIVE_RESTART_FIXED_INDEX &&
feature_info_->feature_flags().emulate_primitive_restart_fixed_index) {
diff --git a/chromium/gpu/command_buffer/service/context_state.h b/chromium/gpu/command_buffer/service/context_state.h
index 4afd27facf6..4af44a67546 100644
--- a/chromium/gpu/command_buffer/service/context_state.h
+++ b/chromium/gpu/command_buffer/service/context_state.h
@@ -23,11 +23,8 @@ namespace gpu {
namespace gles2 {
class Buffer;
-class ErrorState;
-class ErrorStateClient;
class FeatureInfo;
class IndexedBufferBindingHost;
-class Logger;
class Program;
class Renderbuffer;
class TransformFeedback;
@@ -195,8 +192,6 @@ struct GPU_GLES2_EXPORT ContextState {
enum Dimension { k2D, k3D };
ContextState(FeatureInfo* feature_info,
- ErrorStateClient* error_state_client,
- Logger* logger,
bool track_texture_and_sampler_units = true);
~ContextState();
@@ -284,8 +279,6 @@ struct GPU_GLES2_EXPORT ContextState {
api()->glStencilMaskSeparateFn(op, mask);
}
- ErrorState* GetErrorState();
-
void SetBoundBuffer(GLenum target, Buffer* buffer);
void RemoveBoundBuffer(Buffer* buffer);
@@ -430,7 +423,6 @@ struct GPU_GLES2_EXPORT ContextState {
gl::GLApi* api_ = nullptr;
FeatureInfo* feature_info_;
- std::unique_ptr<ErrorState> error_state_;
bool context_lost_ = false;
};
diff --git a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
index 622cf5da453..a163f2922ea 100644
--- a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
+++ b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
@@ -73,7 +73,7 @@ class MockCopyTextureResourceManager
// Cannot MOCK_METHOD more than 10 args.
void DoCopyTexture(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -91,7 +91,7 @@ class MockCopyTextureResourceManager
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) override {}
void DoCopySubTexture(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -117,7 +117,7 @@ class MockCopyTextureResourceManager
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) override {}
void DoCopySubTextureWithTransform(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -143,7 +143,7 @@ class MockCopyTextureResourceManager
const GLfloat transform_matrix[16],
CopyTexImageResourceManager* luma_emulation_blitter) override{};
void DoCopyTextureWithTransform(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
diff --git a/chromium/gpu/command_buffer/service/decoder_client.h b/chromium/gpu/command_buffer/service/decoder_client.h
index 4bdf65376f3..66da71f6b80 100644
--- a/chromium/gpu/command_buffer/service/decoder_client.h
+++ b/chromium/gpu/command_buffer/service/decoder_client.h
@@ -14,8 +14,6 @@
namespace gpu {
-struct SyncToken;
-
class GPU_EXPORT DecoderClient {
public:
virtual ~DecoderClient() = default;
@@ -31,14 +29,6 @@ class GPU_EXPORT DecoderClient {
// reschedule waiting decoders.
virtual void OnFenceSyncRelease(uint64_t release) = 0;
- // Called when the decoder needs to wait on a sync token. If the wait is valid
- // (fence sync is not released yet), the client must unschedule the command
- // buffer and return true. The client is responsible for rescheduling the
- // command buffer when the fence is released. If the wait is a noop (fence is
- // already released) or invalid, the client must leave the command buffer
- // scheduled, and return false.
- virtual bool OnWaitSyncToken(const gpu::SyncToken&) = 0;
-
// Called when the decoder needs to be descheduled while waiting for a fence
// completion. The client is responsible for descheduling the command buffer
// before returning, and then calling PerformPollingWork periodically to test
diff --git a/chromium/gpu/command_buffer/service/decoder_context.h b/chromium/gpu/command_buffer/service/decoder_context.h
index abc53a017ad..61b0954c9cf 100644
--- a/chromium/gpu/command_buffer/service/decoder_context.h
+++ b/chromium/gpu/command_buffer/service/decoder_context.h
@@ -213,6 +213,12 @@ class GPU_GLES2_EXPORT DecoderContext : public AsyncAPIInterface,
// Methods required by GpuTracer
//
virtual gles2::Outputter* outputter() const = 0;
+
+ // Restores all attributs in the gl context state.
+ virtual void RestoreAllAttributes() const = 0;
+
+ // Restores texture states for a given service id.
+ virtual void RestoreTextureState(unsigned service_id) = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index 7932e09929e..ae7705b8d6a 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -11,6 +11,7 @@
#include "base/command_line.h"
#include "base/metrics/histogram_macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "build/build_config.h"
@@ -34,16 +35,6 @@ namespace gles2 {
namespace {
-struct FormatInfo {
- GLenum format;
- const GLenum* types;
- size_t count;
-};
-
-} // anonymous namespace.
-
-namespace {
-
class ScopedPixelUnpackBufferOverride {
public:
explicit ScopedPixelUnpackBufferOverride(bool has_pixel_buffers,
@@ -258,7 +249,16 @@ void FeatureInfo::InitializeBasicState(const base::CommandLine* command_line) {
void FeatureInfo::Initialize(ContextType context_type,
bool is_passthrough_cmd_decoder,
- const DisallowedFeatures& disallowed_features) {
+ const DisallowedFeatures& disallowed_features,
+ bool force_reinitialize) {
+ if (initialized_) {
+ DCHECK_EQ(context_type, context_type_);
+ DCHECK_EQ(is_passthrough_cmd_decoder, is_passthrough_cmd_decoder_);
+ DCHECK(disallowed_features == disallowed_features_);
+ if (!force_reinitialize)
+ return;
+ }
+
disallowed_features_ = disallowed_features;
context_type_ = context_type;
is_passthrough_cmd_decoder_ = is_passthrough_cmd_decoder;
@@ -272,20 +272,24 @@ void FeatureInfo::Initialize(ContextType context_type,
break;
}
InitializeFeatures();
+ initialized_ = true;
}
void FeatureInfo::InitializeForTesting(
const DisallowedFeatures& disallowed_features) {
+ initialized_ = false;
Initialize(CONTEXT_TYPE_OPENGLES2, false /* is_passthrough_cmd_decoder */,
disallowed_features);
}
void FeatureInfo::InitializeForTesting() {
+ initialized_ = false;
Initialize(CONTEXT_TYPE_OPENGLES2, false /* is_passthrough_cmd_decoder */,
DisallowedFeatures());
}
void FeatureInfo::InitializeForTesting(ContextType context_type) {
+ initialized_ = false;
Initialize(context_type, false /* is_passthrough_cmd_decoder */,
DisallowedFeatures());
}
@@ -1493,7 +1497,10 @@ void FeatureInfo::InitializeFeatures() {
feature_flags_.ext_robustness =
gfx::HasExtension(extensions, "GL_EXT_robustness");
feature_flags_.ext_pixel_buffer_object =
+ gfx::HasExtension(extensions, "GL_ARB_pixel_buffer_object") ||
gfx::HasExtension(extensions, "GL_NV_pixel_buffer_object");
+ feature_flags_.ext_unpack_subimage =
+ gfx::HasExtension(extensions, "GL_EXT_unpack_subimage");
feature_flags_.oes_rgb8_rgba8 =
gfx::HasExtension(extensions, "GL_OES_rgb8_rgba8");
feature_flags_.angle_robust_resource_initialization =
@@ -1542,6 +1549,19 @@ void FeatureInfo::InitializeFeatures() {
AddExtensionString("GL_KHR_robust_buffer_access_behavior");
feature_flags_.khr_robust_buffer_access_behavior = true;
}
+
+ if (!is_passthrough_cmd_decoder_ ||
+ gfx::HasExtension(extensions, "GL_ANGLE_multi_draw")) {
+ feature_flags_.webgl_multi_draw = true;
+ AddExtensionString("GL_WEBGL_multi_draw");
+
+ if (gfx::HasExtension(extensions, "GL_ANGLE_instanced_arrays") ||
+ feature_flags_.angle_instanced_arrays || gl_version_info_->is_es3 ||
+ gl_version_info_->is_desktop_core_profile) {
+ feature_flags_.webgl_multi_draw_instanced = true;
+ AddExtensionString("GL_WEBGL_multi_draw_instanced");
+ }
+ }
}
void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
@@ -1692,8 +1712,8 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
GLenum formats[] = {
GL_RED, GL_RG, GL_RGBA, GL_RED, GL_RG, GL_RGB,
};
- DCHECK_EQ(arraysize(internal_formats), arraysize(formats));
- for (size_t i = 0; i < arraysize(formats); ++i) {
+ DCHECK_EQ(base::size(internal_formats), base::size(formats));
+ for (size_t i = 0; i < base::size(formats); ++i) {
glTexImage2D(GL_TEXTURE_2D, 0, internal_formats[i], width, width, 0,
formats[i], GL_FLOAT, nullptr);
full_float_support &= glCheckFramebufferStatusEXT(GL_FRAMEBUFFER) ==
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index 925fd2e0838..922eb929adf 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -127,6 +127,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool khr_robustness = false;
bool ext_robustness = false;
bool ext_pixel_buffer_object = false;
+ bool ext_unpack_subimage = false;
bool oes_rgb8_rgba8 = false;
bool angle_robust_resource_initialization = false;
bool nv_fence = false;
@@ -140,6 +141,8 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool khr_parallel_shader_compile = false;
bool android_surface_control = false;
bool khr_robust_buffer_access_behavior = false;
+ bool webgl_multi_draw = false;
+ bool webgl_multi_draw_instanced = false;
};
FeatureInfo();
@@ -151,7 +154,8 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
// Initializes the feature information. Needs a current GL context.
void Initialize(ContextType context_type,
bool is_passthrough_cmd_decoder,
- const DisallowedFeatures& disallowed_features);
+ const DisallowedFeatures& disallowed_features,
+ bool force_reinitialize = false);
// Helper that defaults to no disallowed features and a GLES2 context.
void InitializeForTesting();
@@ -229,6 +233,8 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
void InitializeFeatures();
void InitializeFloatAndHalfFloatFeatures(const gfx::ExtensionSet& extensions);
+ bool initialized_ = false;
+
Validators validators_;
DisallowedFeatures disallowed_features_;
diff --git a/chromium/gpu/command_buffer/service/feature_info_unittest.cc b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
index a3d96fb1479..af214f27ebc 100644
--- a/chromium/gpu/command_buffer/service/feature_info_unittest.cc
+++ b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
@@ -166,16 +166,6 @@ class FeatureInfoTest
scoped_refptr<FeatureInfo> info_;
};
-namespace {
-
-struct FormatInfo {
- GLenum format;
- const GLenum* types;
- size_t count;
-};
-
-} // anonymous namespace.
-
static const MockedGLVersionKind kGLVersionKinds[] = {
ES2_on_Version3_0,
ES2_on_Version3_2Compatibility,
diff --git a/chromium/gpu/command_buffer/service/framebuffer_completeness_cache.h b/chromium/gpu/command_buffer/service/framebuffer_completeness_cache.h
index 05be906b56d..53f13f103c7 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_completeness_cache.h
+++ b/chromium/gpu/command_buffer/service/framebuffer_completeness_cache.h
@@ -6,8 +6,8 @@
#define GPU_COMMAND_BUFFER_SERVICE_FRAMEBUFFER_COMPLETENESS_CACHE_H_
#include <string>
+#include <unordered_set>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "gpu/gpu_gles2_export.h"
@@ -26,7 +26,7 @@ class GPU_GLES2_EXPORT FramebufferCompletenessCache {
void SetComplete(const std::string& signature);
private:
- typedef base::hash_set<std::string> Map;
+ typedef std::unordered_set<std::string> Map;
Map cache_;
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager.h b/chromium/gpu/command_buffer/service/framebuffer_manager.h
index 44fa609885c..19303807c2b 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager.h
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager.h
@@ -9,9 +9,9 @@
#include <stdint.h>
#include <memory>
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/context_group.h"
@@ -279,7 +279,7 @@ class GPU_GLES2_EXPORT Framebuffer : public base::RefCounted<Framebuffer> {
unsigned framebuffer_complete_state_count_id_;
// A map of attachments.
- typedef base::hash_map<GLenum, scoped_refptr<Attachment> > AttachmentMap;
+ typedef std::unordered_map<GLenum, scoped_refptr<Attachment>> AttachmentMap;
AttachmentMap attachments_;
// User's draw buffers setting through DrawBuffers() call.
@@ -370,8 +370,7 @@ class GPU_GLES2_EXPORT FramebufferManager {
}
// Info for each framebuffer in the system.
- typedef base::hash_map<GLuint, scoped_refptr<Framebuffer> >
- FramebufferMap;
+ typedef std::unordered_map<GLuint, scoped_refptr<Framebuffer>> FramebufferMap;
FramebufferMap framebuffers_;
// Incremented anytime anything changes that might effect framebuffer
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc b/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
index 04b71c58c58..13c7c844602 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
@@ -5,6 +5,7 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/stl_util.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/service/error_state_mock.h"
#include "gpu/command_buffer/service/feature_info.h"
@@ -1081,7 +1082,7 @@ TEST_F(FramebufferInfoTest, DrawBuffers) {
framebuffer_->GetDrawBuffer(i));
}
- for (size_t ii = 0; ii < arraysize(kTextureClientId); ++ii) {
+ for (size_t ii = 0; ii < base::size(kTextureClientId); ++ii) {
texture_manager_->CreateTexture(
kTextureClientId[ii], kTextureServiceId[ii]);
scoped_refptr<TextureRef> texture(
@@ -1184,7 +1185,7 @@ TEST_F(FramebufferInfoTest, DrawBufferMasks) {
GL_FLOAT,
GL_UNSIGNED_INT};
- for (size_t ii = 0; ii < arraysize(kTextureClientId); ++ii) {
+ for (size_t ii = 0; ii < base::size(kTextureClientId); ++ii) {
texture_manager_->CreateTexture(
kTextureClientId[ii], kTextureServiceId[ii]);
scoped_refptr<TextureRef> texture(
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual_delegate.h b/chromium/gpu/command_buffer/service/gl_context_virtual_delegate.h
index 1b2ffc57ae2..0db2f148720 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual_delegate.h
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual_delegate.h
@@ -32,7 +32,6 @@ class GPU_GLES2_EXPORT GLContextVirtualDelegate {
// Restore States.
virtual void RestoreGlobalState() const = 0;
virtual void ClearAllAttributes() const = 0;
- virtual void RestoreAllAttributes() const = 0;
virtual void RestoreActiveTexture() const = 0;
virtual void RestoreAllTextureUnitAndSamplerBindings(
const gles2::ContextState* prev_state) const = 0;
@@ -42,7 +41,6 @@ class GPU_GLES2_EXPORT GLContextVirtualDelegate {
virtual void RestoreFramebufferBindings() const = 0;
virtual void RestoreRenderbufferBindings() = 0;
virtual void RestoreProgramBindings() const = 0;
- virtual void RestoreTextureState(unsigned service_id) const = 0;
virtual void RestoreTextureUnitBindings(unsigned unit) const = 0;
virtual void RestoreVertexAttribArray(unsigned index) = 0;
virtual void RestoreAllExternalTextureBindingsIfNeeded() = 0;
diff --git a/chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc b/chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc
index d47a8d4d430..0d207240e85 100644
--- a/chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc
+++ b/chromium/gpu/command_buffer/service/gl_state_restorer_impl.cc
@@ -71,12 +71,14 @@ void GLStateRestorerImpl::RestoreVertexAttribArray(unsigned int index) {
void GLStateRestorerImpl::PauseQueries() {
DCHECK(delegate_.get());
- delegate_->GetQueryManager()->PauseQueries();
+ if (auto* query_manager = delegate_->GetQueryManager())
+ query_manager->PauseQueries();
}
void GLStateRestorerImpl::ResumeQueries() {
DCHECK(delegate_.get());
- delegate_->GetQueryManager()->ResumeQueries();
+ if (auto* query_manager = delegate_->GetQueryManager())
+ query_manager->ResumeQueries();
}
const gles2::ContextState* GLStateRestorerImpl::GetContextState() const {
diff --git a/chromium/gpu/command_buffer/service/gl_utils.h b/chromium/gpu/command_buffer/service/gl_utils.h
index 2b03458aba3..60f0b808aba 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.h
+++ b/chromium/gpu/command_buffer/service/gl_utils.h
@@ -60,14 +60,6 @@ struct CALayerSharedState {
gfx::Transform transform;
};
-struct DCLayerSharedState {
- float opacity;
- bool is_clipped;
- gfx::Rect clip_rect;
- int z_order;
- gfx::Transform transform;
-};
-
std::vector<int> GetAllGLErrors();
bool PrecisionMeetsSpecForHighpFloat(GLint rangeMin,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_tex_image.h b/chromium/gpu/command_buffer/service/gles2_cmd_copy_tex_image.h
index 269cad6083f..409da846f6a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_tex_image.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_tex_image.h
@@ -7,7 +7,6 @@
#include <array>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/gpu_gles2_export.h"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
index f5fbb9df135..939bf436f71 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <algorithm>
+#include <unordered_map>
#include "gpu/command_buffer/service/decoder_context.h"
#include "gpu/command_buffer/service/gl_utils.h"
@@ -545,7 +546,7 @@ bool BindFramebufferTexture2D(GLenum target,
}
void DoCopyTexImage2D(
- const gpu::DecoderContext* decoder,
+ gpu::DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -598,7 +599,7 @@ void DoCopyTexImage2D(
}
void DoCopyTexSubImage2D(
- const gpu::DecoderContext* decoder,
+ gpu::DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -770,7 +771,7 @@ enum TexImageCommandType {
};
void DoReadbackAndTexImage(TexImageCommandType command_type,
- const gpu::DecoderContext* decoder,
+ gpu::DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -857,7 +858,7 @@ class CopyTextureResourceManagerImpl
const gles2::FeatureInfo::FeatureFlags& feature_flags) override;
void Destroy() override;
void DoCopyTexture(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -875,7 +876,7 @@ class CopyTextureResourceManagerImpl
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) override;
void DoCopySubTexture(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -901,7 +902,7 @@ class CopyTextureResourceManagerImpl
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) override;
void DoCopySubTextureWithTransform(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -927,7 +928,7 @@ class CopyTextureResourceManagerImpl
const GLfloat transform_matrix[16],
CopyTexImageResourceManager* luma_emulation_blitter) override;
void DoCopyTextureWithTransform(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -974,7 +975,7 @@ class CopyTextureResourceManagerImpl
};
void DoCopyTextureInternal(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -1006,7 +1007,7 @@ class CopyTextureResourceManagerImpl
ShaderVector vertex_shaders_;
ShaderVector fragment_shaders_;
typedef int ProgramMapKey;
- typedef base::hash_map<ProgramMapKey, ProgramInfo> ProgramMap;
+ typedef std::unordered_map<ProgramMapKey, ProgramInfo> ProgramMap;
ProgramMap programs_;
GLuint vertex_array_object_id_;
GLuint buffer_id_;
@@ -1098,7 +1099,7 @@ void CopyTextureResourceManagerImpl::Destroy() {
}
void CopyTextureResourceManagerImpl::DoCopyTexture(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -1124,7 +1125,7 @@ void CopyTextureResourceManagerImpl::DoCopyTexture(
}
void CopyTextureResourceManagerImpl::DoCopySubTexture(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -1218,7 +1219,7 @@ void CopyTextureResourceManagerImpl::DoCopySubTexture(
}
void CopyTextureResourceManagerImpl::DoCopySubTextureWithTransform(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -1252,7 +1253,7 @@ void CopyTextureResourceManagerImpl::DoCopySubTextureWithTransform(
}
void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -1332,7 +1333,7 @@ void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform(
}
void CopyTextureResourceManagerImpl::DoCopyTextureInternal(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
index bc9ec515158..c0952dbc2e4 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
@@ -7,7 +7,6 @@
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/gpu_gles2_export.h"
@@ -58,7 +57,7 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
virtual void Destroy() = 0;
virtual void DoCopyTexture(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -77,7 +76,7 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
CopyTexImageResourceManager* luma_emulation_blitter) = 0;
virtual void DoCopySubTexture(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -104,7 +103,7 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
CopyTexImageResourceManager* luma_emulation_blitter) = 0;
virtual void DoCopySubTextureWithTransform(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
@@ -135,7 +134,7 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
// matrix should be given in column-major form, so it can be passed
// directly to GL.
virtual void DoCopyTextureWithTransform(
- const DecoderContext* decoder,
+ DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
GLint source_level,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 4ebebc0ffb7..a3caf17ca6a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -16,6 +16,7 @@
#include <map>
#include <memory>
#include <set>
+#include <unordered_map>
#include <utility>
#include "base/callback.h"
@@ -30,6 +31,7 @@
#include "base/numerics/ranges.h"
#include "base/numerics/safe_math.h"
#include "base/optional.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
@@ -66,6 +68,7 @@
#include "gpu/command_buffer/service/logger.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/multi_draw_manager.h"
#include "gpu/command_buffer/service/path_manager.h"
#include "gpu/command_buffer/service/program_manager.h"
#include "gpu/command_buffer/service/renderbuffer_manager.h"
@@ -98,6 +101,7 @@
#include "ui/gl/dc_renderer_layer_params.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_enums.h"
#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/gl_image.h"
@@ -123,6 +127,9 @@ const char kOESDerivativeExtension[] = "GL_OES_standard_derivatives";
const char kEXTFragDepthExtension[] = "GL_EXT_frag_depth";
const char kEXTDrawBuffersExtension[] = "GL_EXT_draw_buffers";
const char kEXTShaderTextureLodExtension[] = "GL_EXT_shader_texture_lod";
+const char kWEBGLMultiDrawExtension[] = "GL_WEBGL_multi_draw";
+const char kWEBGLMultiDrawInstancedExtension[] =
+ "GL_WEBGL_multi_draw_instanced";
gfx::OverlayTransform GetGFXOverlayTransform(GLenum plane_transform) {
switch (plane_transform) {
@@ -220,17 +227,16 @@ class GLES2DecoderImpl;
// Local versions of the SET_GL_ERROR macros
#define LOCAL_SET_GL_ERROR(error, function_name, msg) \
- ERRORSTATE_SET_GL_ERROR(state_.GetErrorState(), error, function_name, msg)
-#define LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, value, label) \
- ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(state_.GetErrorState(), function_name, \
+ ERRORSTATE_SET_GL_ERROR(error_state_.get(), error, function_name, msg)
+#define LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, value, label) \
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(error_state_.get(), function_name, \
static_cast<uint32_t>(value), label)
#define LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name) \
- ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(state_.GetErrorState(), \
- function_name)
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state_.get(), function_name)
#define LOCAL_PEEK_GL_ERROR(function_name) \
- ERRORSTATE_PEEK_GL_ERROR(state_.GetErrorState(), function_name)
+ ERRORSTATE_PEEK_GL_ERROR(error_state_.get(), function_name)
#define LOCAL_CLEAR_REAL_GL_ERRORS(function_name) \
- ERRORSTATE_CLEAR_REAL_GL_ERRORS(state_.GetErrorState(), function_name)
+ ERRORSTATE_CLEAR_REAL_GL_ERRORS(error_state_.get(), function_name)
#define LOCAL_PERFORMANCE_WARNING(msg) \
PerformanceWarning(__FILE__, __LINE__, msg)
#define LOCAL_RENDER_WARNING(msg) \
@@ -299,11 +305,15 @@ class ScopedGLErrorSuppressor {
// unit zero in case the client has changed that to something invalid.
class ScopedTextureBinder {
public:
- explicit ScopedTextureBinder(ContextState* state, GLuint id, GLenum target);
+ explicit ScopedTextureBinder(ContextState* state,
+ ErrorState* error_state,
+ GLuint id,
+ GLenum target);
~ScopedTextureBinder();
private:
ContextState* state_;
+ ErrorState* error_state_;
GLenum target_;
DISALLOW_COPY_AND_ASSIGN(ScopedTextureBinder);
};
@@ -312,11 +322,14 @@ class ScopedTextureBinder {
// object goes out of scope.
class ScopedRenderBufferBinder {
public:
- explicit ScopedRenderBufferBinder(ContextState* state, GLuint id);
+ explicit ScopedRenderBufferBinder(ContextState* state,
+ ErrorState* error_state,
+ GLuint id);
~ScopedRenderBufferBinder();
private:
ContextState* state_;
+ ErrorState* error_state_;
DISALLOW_COPY_AND_ASSIGN(ScopedRenderBufferBinder);
};
@@ -663,7 +676,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
void RestoreBufferBinding(unsigned int target) override;
void RestoreFramebufferBindings() const override;
void RestoreRenderbufferBindings() override;
- void RestoreTextureState(unsigned service_id) const override;
+ void RestoreTextureState(unsigned service_id) override;
void ClearDeviceWindowRectangles() const;
void RestoreDeviceWindowRectangles() const override;
@@ -1151,7 +1164,6 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
void DoCreateAndConsumeTextureINTERNAL(GLuint client_id,
const volatile GLbyte* key);
void DoCreateAndTexStorage2DSharedImageINTERNAL(GLuint client_id,
- GLenum internal_format,
const volatile GLbyte* data);
void DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id, GLenum mode);
void DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id);
@@ -1193,6 +1205,30 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
void DoFlushMappedBufferRange(
GLenum target, GLintptr offset, GLsizeiptr size);
+ void DoScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
+ GLuint protected_video_type);
+
// Creates a Program for the given program.
Program* CreateProgram(GLuint client_id, GLuint service_id) {
return program_manager()->CreateProgram(client_id, service_id);
@@ -1857,6 +1893,9 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Wrapper for glLinkProgram
void DoLinkProgram(GLuint program);
+ void DoMultiDrawBeginCHROMIUM(GLsizei drawcount);
+ void DoMultiDrawEndCHROMIUM();
+
// Wrapper for glOverlayPromotionHintCHROMIUIM
void DoOverlayPromotionHintCHROMIUM(GLuint client_id,
GLboolean promotion_hint,
@@ -2100,8 +2139,9 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLuint shm_offset,
GLuint size);
- // Returns false if textures were replaced.
- bool PrepareTexturesForRender();
+ // Returns false if a GL error occurred. textures_set is always modified
+ // appropriately to indicate whether textures were set, even on failure.
+ bool PrepareTexturesForRender(bool* textures_set, const char* function_name);
void RestoreStateForTextures();
// Returns true if GL_FIXED attribs were simulated.
@@ -2110,19 +2150,49 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLuint max_vertex_accessed, bool* simulated, GLsizei primcount);
void RestoreStateForSimulatedFixedAttribs();
- // Handle DrawArrays and DrawElements for both instanced and non-instanced
- // cases (primcount is always 1 for non-instanced).
- error::Error DoDrawArrays(
- const char* function_name,
- bool instanced, GLenum mode, GLint first, GLsizei count,
- GLsizei primcount);
- error::Error DoDrawElements(const char* function_name,
+ bool CheckMultiDrawArraysVertices(const char* function_name,
+ bool instanced,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* primcounts,
+ GLsizei drawcount,
+ GLuint* total_max_vertex_accessed,
+ GLsizei* total_max_primcount);
+ bool CheckMultiDrawElementsVertices(const char* function_name,
+ bool instanced,
+ const GLsizei* counts,
+ GLenum type,
+ const int32_t* offsets,
+ const GLsizei* primcounts,
+ GLsizei drawcount,
+ Buffer* element_array_buffer,
+ GLuint* total_max_vertex_accessed,
+ GLsizei* total_max_primcount);
+ bool CheckTransformFeedback(const char* function_name,
bool instanced,
GLenum mode,
- GLsizei count,
- GLenum type,
- int32_t offset,
- GLsizei primcount);
+ const GLsizei* counts,
+ const GLsizei* primcounts,
+ GLsizei drawcount,
+ GLsizei* transform_feedback_vertices);
+
+ // Handle MultiDrawArrays and MultiDrawElements for both instanced and
+ // non-instanced cases (primcount is always 1 for non-instanced).
+ error::Error DoMultiDrawArrays(const char* function_name,
+ bool instanced,
+ GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* primcounts,
+ GLsizei drawcount);
+ error::Error DoMultiDrawElements(const char* function_name,
+ bool instanced,
+ GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const int32_t* offsets,
+ const GLsizei* primcounts,
+ GLsizei drawcount);
GLenum GetBindTargetForSamplerType(GLenum type) {
switch (type) {
@@ -2358,7 +2428,6 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// The GL_CHROMIUM_schedule_ca_layer extension requires that SwapBuffers and
// equivalent functions reset shared state.
void ClearScheduleCALayerState();
- void ClearScheduleDCLayerState();
// Helper method to call glClear workaround.
void ClearFramebufferForWorkaround(GLbitfield mask);
@@ -2429,6 +2498,8 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
DebugMarkerManager debug_marker_manager_;
Logger logger_;
+ std::unique_ptr<ErrorState> error_state_;
+
// All the state for this context.
ContextState state_;
@@ -2531,6 +2602,8 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
std::unique_ptr<GpuFenceManager> gpu_fence_manager_;
+ std::unique_ptr<MultiDrawManager> multi_draw_manager_;
+
std::unique_ptr<VertexArrayManager> vertex_array_manager_;
base::flat_set<scoped_refptr<Buffer>> writes_submitted_but_not_completed_;
@@ -2590,6 +2663,8 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
bool frag_depth_explicitly_enabled_;
bool draw_buffers_explicitly_enabled_;
bool shader_texture_lod_explicitly_enabled_;
+ bool multi_draw_explicitly_enabled_;
+ bool multi_draw_instanced_explicitly_enabled_;
bool compile_shader_always_succeeds_;
@@ -2635,7 +2710,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
std::vector<std::unique_ptr<gl::GLFence>> deschedule_until_finished_fences_;
// Used to validate multisample renderbuffers if needed
- typedef base::hash_map<GLenum, GLuint> TextureMap;
+ typedef std::unordered_map<GLenum, GLuint> TextureMap;
TextureMap validation_textures_;
GLuint validation_fbo_multisample_;
GLuint validation_fbo_;
@@ -2662,12 +2737,11 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
bool force_shader_name_hashing_for_test;
- GLfloat line_width_range_[2];
+ GLfloat line_width_range_[2] = {0.0, 1.0};
SamplerState default_sampler_state_;
std::unique_ptr<CALayerSharedState> ca_layer_shared_state_;
- std::unique_ptr<DCLayerSharedState> dc_layer_shared_state_;
// All currently outstanding AbstractTextures that we've created.
std::set<ValidatingAbstractTextureImpl*> abstract_textures_;
@@ -2722,12 +2796,11 @@ static void RestoreCurrentTextureBindings(ContextState* state,
}
ScopedTextureBinder::ScopedTextureBinder(ContextState* state,
+ ErrorState* error_state,
GLuint id,
GLenum target)
- : state_(state),
- target_(target) {
- ScopedGLErrorSuppressor suppressor(
- "ScopedTextureBinder::ctor", state_->GetErrorState());
+ : state_(state), error_state_(error_state), target_(target) {
+ ScopedGLErrorSuppressor suppressor("ScopedTextureBinder::ctor", error_state_);
// TODO(apatrick): Check if there are any other states that need to be reset
// before binding a new texture.
@@ -2737,38 +2810,38 @@ ScopedTextureBinder::ScopedTextureBinder(ContextState* state,
}
ScopedTextureBinder::~ScopedTextureBinder() {
- ScopedGLErrorSuppressor suppressor(
- "ScopedTextureBinder::dtor", state_->GetErrorState());
+ ScopedGLErrorSuppressor suppressor("ScopedTextureBinder::dtor", error_state_);
RestoreCurrentTextureBindings(state_, target_, 0);
state_->RestoreActiveTexture();
}
ScopedRenderBufferBinder::ScopedRenderBufferBinder(ContextState* state,
+ ErrorState* error_state,
GLuint id)
- : state_(state) {
- ScopedGLErrorSuppressor suppressor(
- "ScopedRenderBufferBinder::ctor", state_->GetErrorState());
+ : state_(state), error_state_(error_state) {
+ ScopedGLErrorSuppressor suppressor("ScopedRenderBufferBinder::ctor",
+ error_state_);
state->api()->glBindRenderbufferEXTFn(GL_RENDERBUFFER, id);
}
ScopedRenderBufferBinder::~ScopedRenderBufferBinder() {
- ScopedGLErrorSuppressor suppressor(
- "ScopedRenderBufferBinder::dtor", state_->GetErrorState());
+ ScopedGLErrorSuppressor suppressor("ScopedRenderBufferBinder::dtor",
+ error_state_);
state_->RestoreRenderbufferBindings();
}
ScopedFramebufferBinder::ScopedFramebufferBinder(GLES2DecoderImpl* decoder,
GLuint id)
: decoder_(decoder) {
- ScopedGLErrorSuppressor suppressor(
- "ScopedFramebufferBinder::ctor", decoder_->GetErrorState());
+ ScopedGLErrorSuppressor suppressor("ScopedFramebufferBinder::ctor",
+ decoder_->error_state_.get());
decoder->api()->glBindFramebufferEXTFn(GL_FRAMEBUFFER, id);
decoder->OnFboChanged();
}
ScopedFramebufferBinder::~ScopedFramebufferBinder() {
- ScopedGLErrorSuppressor suppressor(
- "ScopedFramebufferBinder::dtor", decoder_->GetErrorState());
+ ScopedGLErrorSuppressor suppressor("ScopedFramebufferBinder::dtor",
+ decoder_->error_state_.get());
decoder_->RestoreCurrentFramebufferBindings();
}
@@ -2783,8 +2856,8 @@ ScopedResolvedFramebufferBinder::ScopedResolvedFramebufferBinder(
if (!resolve_and_bind_)
return;
auto* api = decoder_->api();
- ScopedGLErrorSuppressor suppressor(
- "ScopedResolvedFramebufferBinder::ctor", decoder_->GetErrorState());
+ ScopedGLErrorSuppressor suppressor("ScopedResolvedFramebufferBinder::ctor",
+ decoder_->error_state_.get());
// On old AMD GPUs on macOS, glColorMask doesn't work correctly for
// multisampled renderbuffers and the alpha channel can be overwritten. This
@@ -2849,8 +2922,8 @@ ScopedResolvedFramebufferBinder::~ScopedResolvedFramebufferBinder() {
if (!resolve_and_bind_)
return;
- ScopedGLErrorSuppressor suppressor(
- "ScopedResolvedFramebufferBinder::dtor", decoder_->GetErrorState());
+ ScopedGLErrorSuppressor suppressor("ScopedResolvedFramebufferBinder::dtor",
+ decoder_->error_state_.get());
decoder_->RestoreCurrentFramebufferBindings();
if (decoder_->state_.enable_flags.scissor_test) {
decoder_->state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, true);
@@ -2872,7 +2945,8 @@ ScopedFramebufferCopyBinder::ScopedFramebufferCopyBinder(
auto* api = decoder_->api();
api->glGenTexturesFn(1, &temp_texture_);
- ScopedTextureBinder texture_binder(&decoder->state_, temp_texture_,
+ ScopedTextureBinder texture_binder(&decoder->state_,
+ decoder->error_state_.get(), temp_texture_,
GL_TEXTURE_2D);
if (width == 0 || height == 0) {
// Copy the whole framebuffer if a rectangle isn't specified.
@@ -2934,49 +3008,39 @@ inline gl::GLApi* BackTexture::api() const {
void BackTexture::Create() {
DCHECK_EQ(id(), 0u);
ScopedGLErrorSuppressor suppressor("BackTexture::Create",
- decoder_->state_.GetErrorState());
+ decoder_->error_state_.get());
GLuint id;
api()->glGenTexturesFn(1, &id);
GLenum target = Target();
- ScopedTextureBinder binder(&decoder_->state_, id, target);
+ ScopedTextureBinder binder(&decoder_->state_, decoder_->error_state_.get(),
+ id, target);
// No client id is necessary because this texture will never be directly
// accessed by a client, only indirectly via a mailbox.
texture_ref_ = TextureRef::Create(decoder_->texture_manager(), 0, id);
decoder_->texture_manager()->SetTarget(texture_ref_.get(), target);
decoder_->texture_manager()->SetParameteri(
- "BackTexture::Create",
- decoder_->GetErrorState(),
- texture_ref_.get(),
- GL_TEXTURE_MAG_FILTER,
- GL_LINEAR);
+ "BackTexture::Create", decoder_->error_state_.get(), texture_ref_.get(),
+ GL_TEXTURE_MAG_FILTER, GL_LINEAR);
decoder_->texture_manager()->SetParameteri(
- "BackTexture::Create",
- decoder_->GetErrorState(),
- texture_ref_.get(),
- GL_TEXTURE_MIN_FILTER,
- GL_LINEAR);
+ "BackTexture::Create", decoder_->error_state_.get(), texture_ref_.get(),
+ GL_TEXTURE_MIN_FILTER, GL_LINEAR);
decoder_->texture_manager()->SetParameteri(
- "BackTexture::Create",
- decoder_->GetErrorState(),
- texture_ref_.get(),
- GL_TEXTURE_WRAP_S,
- GL_CLAMP_TO_EDGE);
+ "BackTexture::Create", decoder_->error_state_.get(), texture_ref_.get(),
+ GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
decoder_->texture_manager()->SetParameteri(
- "BackTexture::Create",
- decoder_->GetErrorState(),
- texture_ref_.get(),
- GL_TEXTURE_WRAP_T,
- GL_CLAMP_TO_EDGE);
+ "BackTexture::Create", decoder_->error_state_.get(), texture_ref_.get(),
+ GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
bool BackTexture::AllocateStorage(
const gfx::Size& size, GLenum format, bool zero) {
DCHECK_NE(id(), 0u);
ScopedGLErrorSuppressor suppressor("BackTexture::AllocateStorage",
- decoder_->state_.GetErrorState());
- ScopedTextureBinder binder(&decoder_->state_, id(), Target());
+ decoder_->error_state_.get());
+ ScopedTextureBinder binder(&decoder_->state_, decoder_->error_state_.get(),
+ id(), Target());
uint32_t image_size = 0;
GLES2Util::ComputeImageDataSizes(size.width(), size.height(), 1, format,
GL_UNSIGNED_BYTE, 8, &image_size, nullptr,
@@ -3025,8 +3089,9 @@ bool BackTexture::AllocateStorage(
void BackTexture::Copy() {
DCHECK_NE(id(), 0u);
ScopedGLErrorSuppressor suppressor("BackTexture::Copy",
- decoder_->state_.GetErrorState());
- ScopedTextureBinder binder(&decoder_->state_, id(), Target());
+ decoder_->error_state_.get());
+ ScopedTextureBinder binder(&decoder_->state_, decoder_->error_state_.get(),
+ id(), Target());
api()->glCopyTexSubImage2DFn(Target(),
0, // level
0, 0, 0, 0, size_.width(), size_.height());
@@ -3035,13 +3100,14 @@ void BackTexture::Copy() {
void BackTexture::Destroy() {
if (image_) {
DCHECK(texture_ref_);
- ScopedTextureBinder binder(&decoder_->state_, id(), Target());
+ ScopedTextureBinder binder(&decoder_->state_, decoder_->error_state_.get(),
+ id(), Target());
DestroyNativeGpuMemoryBuffer(true);
}
if (texture_ref_) {
ScopedGLErrorSuppressor suppressor("BackTexture::Destroy",
- decoder_->state_.GetErrorState());
+ decoder_->error_state_.get());
texture_ref_ = nullptr;
}
memory_tracker_.TrackMemFree(bytes_allocated_);
@@ -3099,7 +3165,9 @@ bool BackTexture::AllocateNativeGpuMemoryBuffer(const gfx::Size& size,
image_ = image;
decoder_->texture_manager()->SetLevelInfo(
texture_ref_.get(), Target(), 0, image_->GetInternalFormat(),
- size.width(), size.height(), 1, 0, image_->GetInternalFormat(),
+ size.width(), size.height(), 1, 0,
+ TextureManager::ExtractFormatFromStorageFormat(
+ image_->GetInternalFormat()),
GL_UNSIGNED_BYTE, gfx::Rect(size));
decoder_->texture_manager()->SetLevelImage(texture_ref_.get(), Target(), 0,
image_.get(), Texture::BOUND);
@@ -3132,7 +3200,7 @@ void BackTexture::DestroyNativeGpuMemoryBuffer(bool have_context) {
if (image_) {
ScopedGLErrorSuppressor suppressor(
"BackTexture::DestroyNativeGpuMemoryBuffer",
- decoder_->state_.GetErrorState());
+ decoder_->error_state_.get());
image_->ReleaseTexImage(Target());
@@ -3161,7 +3229,7 @@ inline gl::GLApi* BackRenderbuffer::api() const {
void BackRenderbuffer::Create() {
ScopedGLErrorSuppressor suppressor("BackRenderbuffer::Create",
- decoder_->state_.GetErrorState());
+ decoder_->error_state_.get());
Destroy();
api()->glGenRenderbuffersEXTFn(1, &id_);
}
@@ -3170,8 +3238,9 @@ bool BackRenderbuffer::AllocateStorage(const gfx::Size& size,
GLenum format,
GLsizei samples) {
ScopedGLErrorSuppressor suppressor("BackRenderbuffer::AllocateStorage",
- decoder_->state_.GetErrorState());
- ScopedRenderBufferBinder binder(&decoder_->state_, id_);
+ decoder_->error_state_.get());
+ ScopedRenderBufferBinder binder(&decoder_->state_,
+ decoder_->error_state_.get(), id_);
uint32_t estimated_size = 0;
if (!decoder_->renderbuffer_manager()->ComputeEstimatedRenderbufferSize(
@@ -3224,7 +3293,7 @@ bool BackRenderbuffer::AllocateStorage(const gfx::Size& size,
void BackRenderbuffer::Destroy() {
if (id_ != 0) {
ScopedGLErrorSuppressor suppressor("BackRenderbuffer::Destroy",
- decoder_->state_.GetErrorState());
+ decoder_->error_state_.get());
api()->glDeleteRenderbuffersEXTFn(1, &id_);
id_ = 0;
}
@@ -3254,15 +3323,15 @@ inline gl::GLApi* BackFramebuffer::api() const {
void BackFramebuffer::Create() {
ScopedGLErrorSuppressor suppressor("BackFramebuffer::Create",
- decoder_->GetErrorState());
+ decoder_->error_state_.get());
Destroy();
api()->glGenFramebuffersEXTFn(1, &id_);
}
void BackFramebuffer::AttachRenderTexture(BackTexture* texture) {
DCHECK_NE(id_, 0u);
- ScopedGLErrorSuppressor suppressor(
- "BackFramebuffer::AttachRenderTexture", decoder_->GetErrorState());
+ ScopedGLErrorSuppressor suppressor("BackFramebuffer::AttachRenderTexture",
+ decoder_->error_state_.get());
ScopedFramebufferBinder binder(decoder_, id_);
GLuint attach_id = texture ? texture->id() : 0;
api()->glFramebufferTexture2DEXTFn(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
@@ -3272,8 +3341,8 @@ void BackFramebuffer::AttachRenderTexture(BackTexture* texture) {
void BackFramebuffer::AttachRenderBuffer(GLenum target,
BackRenderbuffer* render_buffer) {
DCHECK_NE(id_, 0u);
- ScopedGLErrorSuppressor suppressor(
- "BackFramebuffer::AttachRenderBuffer", decoder_->GetErrorState());
+ ScopedGLErrorSuppressor suppressor("BackFramebuffer::AttachRenderBuffer",
+ decoder_->error_state_.get());
ScopedFramebufferBinder binder(decoder_, id_);
GLuint attach_id = render_buffer ? render_buffer->id() : 0;
api()->glFramebufferRenderbufferEXTFn(GL_FRAMEBUFFER, target, GL_RENDERBUFFER,
@@ -3283,7 +3352,7 @@ void BackFramebuffer::AttachRenderBuffer(GLenum target,
void BackFramebuffer::Destroy() {
if (id_ != 0) {
ScopedGLErrorSuppressor suppressor("BackFramebuffer::Destroy",
- decoder_->GetErrorState());
+ decoder_->error_state_.get());
api()->glDeleteFramebuffersEXTFn(1, &id_);
id_ = 0;
}
@@ -3296,7 +3365,7 @@ void BackFramebuffer::Invalidate() {
GLenum BackFramebuffer::CheckStatus() {
DCHECK_NE(id_, 0u);
ScopedGLErrorSuppressor suppressor("BackFramebuffer::CheckStatus",
- decoder_->GetErrorState());
+ decoder_->error_state_.get());
ScopedFramebufferBinder binder(decoder_, id_);
return api()->glCheckFramebufferStatusEXTFn(GL_FRAMEBUFFER);
}
@@ -3324,8 +3393,10 @@ GLES2DecoderImpl::GLES2DecoderImpl(
logger_(&debug_marker_manager_,
base::BindRepeating(&DecoderClient::OnConsoleMessage,
base::Unretained(client_),
- 0)),
- state_(group_->feature_info(), this, &logger_),
+ 0),
+ group->gpu_preferences().disable_gl_error_limit),
+ error_state_(ErrorState::Create(this, &logger_)),
+ state_(group_->feature_info()),
attrib_0_buffer_id_(0),
attrib_0_buffer_matches_value_(true),
attrib_0_size_(0),
@@ -3363,6 +3434,8 @@ GLES2DecoderImpl::GLES2DecoderImpl(
frag_depth_explicitly_enabled_(false),
draw_buffers_explicitly_enabled_(false),
shader_texture_lod_explicitly_enabled_(false),
+ multi_draw_explicitly_enabled_(false),
+ multi_draw_instanced_explicitly_enabled_(false),
compile_shader_always_succeeds_(false),
lose_context_when_out_of_memory_(false),
should_use_native_gmb_for_backbuffer_(false),
@@ -3572,6 +3645,9 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
gpu_fence_manager_.reset(new GpuFenceManager());
+ multi_draw_manager_.reset(
+ new MultiDrawManager(MultiDrawManager::IndexStorageType::Offset));
+
util_.set_num_compressed_texture_formats(
validators_->compressed_texture_format.GetValues().size());
@@ -4293,6 +4369,13 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() {
break;
}
+ if (shader_spec == SH_WEBGL_SPEC || shader_spec == SH_WEBGL2_SPEC) {
+ resources.ANGLE_multi_draw =
+ (multi_draw_explicitly_enabled_ && features().webgl_multi_draw) ||
+ (multi_draw_instanced_explicitly_enabled_ &&
+ features().webgl_multi_draw_instanced);
+ }
+
if (((shader_spec == SH_WEBGL_SPEC || shader_spec == SH_WEBGL2_SPEC) &&
features().enable_shader_name_hashing) ||
force_shader_name_hashing_for_test)
@@ -4457,7 +4540,8 @@ bool GLES2DecoderImpl::GenTransformFeedbacksHelper(
bool GLES2DecoderImpl::GenPathsCHROMIUMHelper(GLuint first_client_id,
GLsizei range) {
GLuint last_client_id;
- if (!SafeAddUint32(first_client_id, range - 1, &last_client_id))
+ if (range < 1 || !base::CheckAdd(first_client_id, range - 1)
+ .AssignIfValid(&last_client_id))
return false;
if (path_manager()->HasPathsInRange(first_client_id, last_client_id))
@@ -4483,7 +4567,8 @@ bool GLES2DecoderImpl::GenPathsCHROMIUMHelper(GLuint first_client_id,
bool GLES2DecoderImpl::DeletePathsCHROMIUMHelper(GLuint first_client_id,
GLsizei range) {
GLuint last_client_id;
- if (!SafeAddUint32(first_client_id, range - 1, &last_client_id))
+ if (range < 1 || !base::CheckAdd(first_client_id, range - 1)
+ .AssignIfValid(&last_client_id))
return false;
path_manager()->RemovePaths(first_client_id, last_client_id);
@@ -5063,7 +5148,7 @@ void GLES2DecoderImpl::EndDecoding() {
}
ErrorState* GLES2DecoderImpl::GetErrorState() {
- return state_.GetErrorState();
+ return error_state_.get();
}
bool GLES2DecoderImpl::GetServiceTextureId(uint32_t client_texture_id,
@@ -5258,6 +5343,8 @@ void GLES2DecoderImpl::Destroy(bool have_context) {
framebuffer_manager_.reset();
}
+ multi_draw_manager_.reset();
+
if (query_manager_.get()) {
query_manager_->Destroy(have_context);
query_manager_.reset();
@@ -5727,7 +5814,7 @@ error::Error GLES2DecoderImpl::DoCommandsImpl(unsigned int num_commands,
const unsigned int arg_count = size - 1;
unsigned int command_index = command - kFirstGLES2Command;
- if (command_index < arraysize(command_info)) {
+ if (command_index < base::size(command_info)) {
const CommandInfo& info = command_info[command_index];
unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
@@ -6107,7 +6194,7 @@ void GLES2DecoderImpl::RestoreRenderbufferBindings() {
state_.RestoreRenderbufferBindings();
}
-void GLES2DecoderImpl::RestoreTextureState(unsigned service_id) const {
+void GLES2DecoderImpl::RestoreTextureState(unsigned service_id) {
Texture* texture = texture_manager()->GetTextureForServiceId(service_id);
if (texture) {
GLenum target = texture->target();
@@ -6788,6 +6875,9 @@ void GLES2DecoderImpl::DoGenerateMipmap(GLenum target) {
if (enable_srgb && feature_info_->feature_flags().desktop_srgb_support) {
state_.EnableDisableFramebufferSRGB(enable_srgb);
}
+ if (workarounds().clamp_texture_base_level_and_max_level) {
+ tex->ApplyClampedBaseLevelAndMaxLevelToDriver();
+ }
if (enable_srgb && workarounds().decode_encode_srgb_for_generatemipmap) {
if (target == GL_TEXTURE_2D) {
if (!InitializeSRGBConverter("generateMipmap")) {
@@ -7637,7 +7727,7 @@ void GLES2DecoderImpl::DoGetBufferParameteri64v(GLenum target,
GLsizei params_size) {
// Just delegate it. Some validation is actually done before this.
buffer_manager()->ValidateAndDoGetBufferParameteri64v(
- &state_, target, pname, params);
+ &state_, error_state_.get(), target, pname, params);
}
void GLES2DecoderImpl::DoGetBufferParameteriv(GLenum target,
@@ -7646,7 +7736,7 @@ void GLES2DecoderImpl::DoGetBufferParameteriv(GLenum target,
GLsizei params_size) {
// Just delegate it. Some validation is actually done before this.
buffer_manager()->ValidateAndDoGetBufferParameteriv(
- &state_, target, pname, params);
+ &state_, error_state_.get(), target, pname, params);
}
void GLES2DecoderImpl::DoBindAttribLocation(GLuint program_id,
@@ -9301,6 +9391,8 @@ void GLES2DecoderImpl::DoLinkProgram(GLuint program_id) {
if (workarounds().clear_uniforms_before_first_program_use)
program_manager()->ClearUniforms(program);
}
+ if (features().webgl_multi_draw || features().webgl_multi_draw_instanced)
+ program_manager()->UpdateDrawIDUniformLocation(program);
}
// LinkProgram can be very slow. Exit command processing to allow for
@@ -9412,8 +9504,8 @@ void GLES2DecoderImpl::DoSamplerParameterf(
GL_INVALID_VALUE, "glSamplerParameterf", "unknown sampler");
return;
}
- sampler_manager()->SetParameterf(
- "glSamplerParameterf", GetErrorState(), sampler, pname, param);
+ sampler_manager()->SetParameterf("glSamplerParameterf", error_state_.get(),
+ sampler, pname, param);
}
void GLES2DecoderImpl::DoSamplerParameteri(
@@ -9424,8 +9516,8 @@ void GLES2DecoderImpl::DoSamplerParameteri(
GL_INVALID_VALUE, "glSamplerParameteri", "unknown sampler");
return;
}
- sampler_manager()->SetParameteri(
- "glSamplerParameteri", GetErrorState(), sampler, pname, param);
+ sampler_manager()->SetParameteri("glSamplerParameteri", error_state_.get(),
+ sampler, pname, param);
}
void GLES2DecoderImpl::DoSamplerParameterfv(GLuint client_id,
@@ -9438,8 +9530,8 @@ void GLES2DecoderImpl::DoSamplerParameterfv(GLuint client_id,
GL_INVALID_VALUE, "glSamplerParameterfv", "unknown sampler");
return;
}
- sampler_manager()->SetParameterf(
- "glSamplerParameterfv", GetErrorState(), sampler, pname, params[0]);
+ sampler_manager()->SetParameterf("glSamplerParameterfv", error_state_.get(),
+ sampler, pname, params[0]);
}
void GLES2DecoderImpl::DoSamplerParameteriv(GLuint client_id,
@@ -9452,8 +9544,8 @@ void GLES2DecoderImpl::DoSamplerParameteriv(GLuint client_id,
GL_INVALID_VALUE, "glSamplerParameteriv", "unknown sampler");
return;
}
- sampler_manager()->SetParameteri(
- "glSamplerParameteriv", GetErrorState(), sampler, pname, params[0]);
+ sampler_manager()->SetParameteri("glSamplerParameteriv", error_state_.get(),
+ sampler, pname, params[0]);
}
void GLES2DecoderImpl::DoTexParameterf(
@@ -9465,8 +9557,8 @@ void GLES2DecoderImpl::DoTexParameterf(
return;
}
- texture_manager()->SetParameterf(
- "glTexParameterf", GetErrorState(), texture, pname, param);
+ texture_manager()->SetParameterf("glTexParameterf", error_state_.get(),
+ texture, pname, param);
}
void GLES2DecoderImpl::DoTexParameteri(
@@ -9478,8 +9570,8 @@ void GLES2DecoderImpl::DoTexParameteri(
return;
}
- texture_manager()->SetParameteri(
- "glTexParameteri", GetErrorState(), texture, pname, param);
+ texture_manager()->SetParameteri("glTexParameteri", error_state_.get(),
+ texture, pname, param);
}
void GLES2DecoderImpl::DoTexParameterfv(GLenum target,
@@ -9492,8 +9584,8 @@ void GLES2DecoderImpl::DoTexParameterfv(GLenum target,
return;
}
- texture_manager()->SetParameterf(
- "glTexParameterfv", GetErrorState(), texture, pname, *params);
+ texture_manager()->SetParameterf("glTexParameterfv", error_state_.get(),
+ texture, pname, *params);
}
void GLES2DecoderImpl::DoTexParameteriv(GLenum target,
@@ -9507,8 +9599,8 @@ void GLES2DecoderImpl::DoTexParameteriv(GLenum target,
return;
}
- texture_manager()->SetParameteri(
- "glTexParameteriv", GetErrorState(), texture, pname, *params);
+ texture_manager()->SetParameteri("glTexParameteriv", error_state_.get(),
+ texture, pname, *params);
}
bool GLES2DecoderImpl::CheckCurrentProgram(const char* function_name) {
@@ -9621,7 +9713,7 @@ bool GLES2DecoderImpl::ValidateUniformBlockBackings(const char* func_name) {
uniform_block_sizes[index] = static_cast<GLsizeiptr>(info.data_size);
}
return buffer_manager()->RequestBuffersAccess(
- state_.GetErrorState(), state_.indexed_uniform_buffer_bindings.get(),
+ error_state_.get(), state_.indexed_uniform_buffer_bindings.get(),
uniform_block_sizes, 1, func_name, "uniform buffers");
}
@@ -10242,7 +10334,7 @@ bool GLES2DecoderImpl::DoBindOrCopyTexImageIfNeeded(Texture* texture,
gl::GLImage* image = texture->GetLevelImage(textarget, 0, &image_state);
if (image && image_state == Texture::UNBOUND) {
ScopedGLErrorSuppressor suppressor(
- "GLES2DecoderImpl::DoBindOrCopyTexImageIfNeeded", GetErrorState());
+ "GLES2DecoderImpl::DoBindOrCopyTexImageIfNeeded", error_state_.get());
if (texture_unit)
api()->glActiveTextureFn(texture_unit);
api()->glBindTextureFn(textarget, texture->service_id());
@@ -10269,12 +10361,14 @@ void GLES2DecoderImpl::DoCopyBufferSubData(GLenum readtarget,
GLsizeiptr size) {
// Just delegate it. Some validation is actually done before this.
buffer_manager()->ValidateAndDoCopyBufferSubData(
- &state_, readtarget, writetarget, readoffset, writeoffset, size);
+ &state_, error_state_.get(), readtarget, writetarget, readoffset,
+ writeoffset, size);
}
-bool GLES2DecoderImpl::PrepareTexturesForRender() {
+bool GLES2DecoderImpl::PrepareTexturesForRender(bool* textures_set,
+ const char* function_name) {
DCHECK(state_.current_program.get());
- bool textures_set = false;
+ *textures_set = false;
const Program::SamplerIndices& sampler_indices =
state_.current_program->sampler_indices();
for (size_t ii = 0; ii < sampler_indices.size(); ++ii) {
@@ -10293,7 +10387,7 @@ bool GLES2DecoderImpl::PrepareTexturesForRender() {
if (!texture_ref ||
!texture_manager()->CanRenderWithSampler(
texture_ref, sampler_state)) {
- textures_set = true;
+ *textures_set = true;
api()->glActiveTextureFn(GL_TEXTURE0 + texture_unit_index);
api()->glBindTextureFn(textarget, texture_manager()->black_texture_id(
uniform_info->type));
@@ -10309,13 +10403,26 @@ bool GLES2DecoderImpl::PrepareTexturesForRender() {
" incompatible texture filtering.");
}
continue;
+ } else if (!texture_ref->texture()->CompatibleWithSamplerUniformType(
+ uniform_info->type)) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ (std::string("Texture bound to texture unit ") +
+ base::UintToString(texture_unit_index) +
+ " with internal format " +
+ GLES2Util::GetStringEnum(
+ texture_ref->texture()->GetInternalFormatOfBaseLevel()) +
+ " is not compatible with sampler type " +
+ GLES2Util::GetStringEnum(uniform_info->type))
+ .c_str());
+ return false;
}
if (textarget != GL_TEXTURE_CUBE_MAP) {
Texture* texture = texture_ref->texture();
if (DoBindOrCopyTexImageIfNeeded(texture, textarget,
GL_TEXTURE0 + texture_unit_index)) {
- textures_set = true;
+ *textures_set = true;
continue;
}
}
@@ -10323,7 +10430,7 @@ bool GLES2DecoderImpl::PrepareTexturesForRender() {
// else: should this be an error?
}
}
- return !textures_set;
+ return true;
}
void GLES2DecoderImpl::RestoreStateForTextures() {
@@ -10499,7 +10606,8 @@ bool GLES2DecoderImpl::SimulateAttrib0(
uint32_t size_needed = 0;
if (num_vertices == 0 ||
- !SafeMultiplyUint32(num_vertices, sizeof(Vec4f), &size_needed) ||
+ !base::CheckMul(num_vertices, sizeof(Vec4f))
+ .AssignIfValid(&size_needed) ||
size_needed > 0x7FFFFFFFU) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, function_name, "Simulating attrib 0");
return false;
@@ -10535,7 +10643,7 @@ bool GLES2DecoderImpl::SimulateAttrib0(
std::min(num_vertices, kMaxVerticesPerLoop);
std::vector<Vec4f> temp(vertices_per_loop, fvalue);
for (GLuint offset = 0; offset < num_vertices;) {
- size_t count = std::min(num_vertices - offset, vertices_per_loop);
+ GLuint count = std::min(num_vertices - offset, vertices_per_loop);
api()->glBufferSubDataFn(GL_ARRAY_BUFFER, offset * sizeof(Vec4f),
count * sizeof(Vec4f), temp.data());
offset += count;
@@ -10616,7 +10724,7 @@ bool GLES2DecoderImpl::SimulateFixedAttribs(
// to be used normally. It's just here to pass that OpenGL ES 2.0 conformance
// tests so we just add to the buffer attrib used.
- GLuint elements_needed = 0;
+ base::CheckedNumeric<uint32_t> elements_needed = 0;
const VertexAttribManager::VertexAttribList& enabled_attribs =
state_.vertex_attrib_manager->GetEnabledVertexAttribs();
for (VertexAttribManager::VertexAttribList::const_iterator it =
@@ -10635,19 +10743,14 @@ bool GLES2DecoderImpl::SimulateFixedAttribs(
if (attrib_info &&
attrib->CanAccess(max_accessed) &&
attrib->type() == GL_FIXED) {
- uint32_t elements_used = 0;
- if (!SafeMultiplyUint32(num_vertices, attrib->size(), &elements_used) ||
- !SafeAddUint32(elements_needed, elements_used, &elements_needed)) {
- LOCAL_SET_GL_ERROR(
- GL_OUT_OF_MEMORY, function_name, "simulating GL_FIXED attribs");
- return false;
- }
+ elements_needed += base::CheckMul(num_vertices, attrib->size());
}
}
const uint32_t kSizeOfFloat = sizeof(float); // NOLINT
uint32_t size_needed = 0;
- if (!SafeMultiplyUint32(elements_needed, kSizeOfFloat, &size_needed) ||
+ if (!base::CheckMul(elements_needed, kSizeOfFloat)
+ .AssignIfValid(&size_needed) ||
size_needed > 0x7FFFFFFFU) {
LOCAL_SET_GL_ERROR(
GL_OUT_OF_MEMORY, function_name, "simulating GL_FIXED attribs");
@@ -10752,38 +10855,140 @@ bool GLES2DecoderImpl::AttribsTypeMatch() {
return true;
}
-error::Error GLES2DecoderImpl::DoDrawArrays(
+ALWAYS_INLINE bool GLES2DecoderImpl::CheckMultiDrawArraysVertices(
+ const char* function_name,
+ bool instanced,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* primcounts,
+ GLsizei drawcount,
+ GLuint* total_max_vertex_accessed,
+ GLsizei* total_max_primcount) {
+ DCHECK_GE(drawcount, 0);
+ for (GLsizei draw_id = 0; draw_id < drawcount; ++draw_id) {
+ GLint first = firsts[draw_id];
+ GLsizei count = counts[draw_id];
+ GLsizei primcount = instanced ? primcounts[draw_id] : 1;
+ // We have to check this here because the prototype for glDrawArrays
+ // is GLint not GLsizei.
+ if (first < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "first < 0");
+ return false;
+ }
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "count < 0");
+ return false;
+ }
+ if (primcount < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "primcount < 0");
+ return false;
+ }
+ if (count == 0 || primcount == 0) {
+ LOCAL_RENDER_WARNING("Render count or primcount is 0.");
+ continue;
+ }
+
+ base::CheckedNumeric<GLuint> checked_max_vertex = first;
+ checked_max_vertex += count - 1;
+ // first and count-1 are both a non-negative int, so their sum fits an
+ // unsigned int.
+ GLuint max_vertex_accessed = 0;
+ if (!checked_max_vertex.AssignIfValid(&max_vertex_accessed)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name,
+ "first + count overflow");
+ return false;
+ }
+ if (!IsDrawValid(function_name, max_vertex_accessed, instanced,
+ primcount)) {
+ return false;
+ }
+ *total_max_vertex_accessed =
+ std::max(*total_max_vertex_accessed, max_vertex_accessed);
+ *total_max_primcount = std::max(*total_max_primcount, primcount);
+ }
+ return true;
+}
+
+ALWAYS_INLINE bool GLES2DecoderImpl::CheckTransformFeedback(
const char* function_name,
bool instanced,
GLenum mode,
- GLint first,
- GLsizei count,
- GLsizei primcount) {
+ const GLsizei* counts,
+ const GLsizei* primcounts,
+ GLsizei drawcount,
+ GLsizei* vertices_drawn) {
+ DCHECK(state_.bound_transform_feedback.get());
+ if (state_.bound_transform_feedback->active() &&
+ !state_.bound_transform_feedback->paused()) {
+ if (mode != state_.bound_transform_feedback->primitive_mode()) {
+ LOCAL_SET_GL_ERROR(
+ GL_INVALID_OPERATION, function_name,
+ "mode differs from active transformfeedback's primitiveMode");
+ return false;
+ }
+ for (GLsizei draw_id = 0; draw_id < drawcount; ++draw_id) {
+ GLsizei count = counts[draw_id];
+ GLsizei primcount = instanced ? primcounts[draw_id] : 1;
+
+ bool valid = state_.bound_transform_feedback->GetVerticesNeededForDraw(
+ mode, count, primcount, *vertices_drawn, vertices_drawn);
+ if (!valid) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
+ "integer overflow calculating number of vertices "
+ "for transform feedback");
+ return false;
+ }
+ }
+
+ if (!buffer_manager()->RequestBuffersAccess(
+ error_state_.get(), state_.bound_transform_feedback.get(),
+ state_.current_program->GetTransformFeedbackVaryingSizes(),
+ *vertices_drawn, function_name, "transformfeedback buffers")) {
+ return false;
+ }
+ }
+ return true;
+}
+
+ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawArrays(
+ const char* function_name,
+ bool instanced,
+ GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* primcounts,
+ GLsizei drawcount) {
error::Error error = WillAccessBoundFramebufferForDraw();
if (error != error::kNoError)
return error;
+
if (!validators_->draw_mode.IsValid(mode)) {
LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, mode, "mode");
return error::kNoError;
}
- if (count < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "count < 0");
+
+ if (drawcount < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "drawcount < 0");
return error::kNoError;
}
- if (primcount < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "primcount < 0");
+
+ if (!CheckBoundDrawFramebufferValid(function_name)) {
return error::kNoError;
}
- if (!CheckBoundDrawFramebufferValid(function_name)) {
+
+ GLuint total_max_vertex_accessed = 0;
+ GLsizei total_max_primcount = 0;
+ if (!CheckMultiDrawArraysVertices(
+ function_name, instanced, firsts, counts, primcounts, drawcount,
+ &total_max_vertex_accessed, &total_max_primcount)) {
return error::kNoError;
}
- // We have to check this here because the prototype for glDrawArrays
- // is GLint not GLsizei.
- if (first < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "first < 0");
+
+ if (total_max_primcount == 0) {
return error::kNoError;
}
+ GLsizei transform_feedback_vertices = 0;
if (feature_info_->IsWebGL2OrES3Context()) {
if (!AttribsTypeMatch()) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
@@ -10791,29 +10996,10 @@ error::Error GLES2DecoderImpl::DoDrawArrays(
return error::kNoError;
}
- DCHECK(state_.bound_transform_feedback.get());
- if (state_.bound_transform_feedback->active() &&
- !state_.bound_transform_feedback->paused()) {
- if (mode != state_.bound_transform_feedback->primitive_mode()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
- "mode differs from active transformfeedback's primitiveMode");
- return error::kNoError;
- }
- GLsizei vertices = 0;
- bool valid = state_.bound_transform_feedback->GetVerticesNeededForDraw(
- mode, count, primcount, &vertices);
- if (!valid) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
- "integer overflow calculating number of vertices "
- "for transform feedback");
- return error::kNoError;
- }
- if (!buffer_manager()->RequestBuffersAccess(
- state_.GetErrorState(), state_.bound_transform_feedback.get(),
- state_.current_program->GetTransformFeedbackVaryingSizes(),
- vertices, function_name, "transformfeedback buffers")) {
- return error::kNoError;
- }
+ if (!CheckTransformFeedback(function_name, instanced, mode, counts,
+ primcounts, drawcount,
+ &transform_feedback_vertices)) {
+ return error::kNoError;
}
if (!ValidateUniformBlockBackings(function_name)) {
@@ -10821,63 +11007,64 @@ error::Error GLES2DecoderImpl::DoDrawArrays(
}
}
- if (count == 0 || primcount == 0) {
- LOCAL_RENDER_WARNING("Render count or primcount is 0.");
+ if (!ClearUnclearedTextures()) {
+ // TODO(enga): Can this be GL_OUT_OF_MEMORY?
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "out of memory");
return error::kNoError;
}
- base::CheckedNumeric<GLuint> checked_max_vertex = first;
- checked_max_vertex += count - 1;
- // first and count-1 are both a non-negative int, so their sum fits an
- // unsigned int.
- if (!checked_max_vertex.IsValid()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name,
- "first + count overflow");
+ bool simulated_attrib_0 = false;
+ if (!SimulateAttrib0(function_name, total_max_vertex_accessed,
+ &simulated_attrib_0)) {
return error::kNoError;
}
- GLuint max_vertex_accessed = checked_max_vertex.ValueOrDefault(0);
- if (IsDrawValid(function_name, max_vertex_accessed, instanced, primcount)) {
- if (!ClearUnclearedTextures()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "out of memory");
+ bool simulated_fixed_attribs = false;
+ if (SimulateFixedAttribs(function_name, total_max_vertex_accessed,
+ &simulated_fixed_attribs, total_max_primcount)) {
+ bool textures_set;
+ if (!PrepareTexturesForRender(&textures_set, function_name)) {
return error::kNoError;
}
- bool simulated_attrib_0 = false;
- if (!SimulateAttrib0(
- function_name, max_vertex_accessed, &simulated_attrib_0)) {
+ ApplyDirtyState();
+ if (!ValidateAndAdjustDrawBuffers(function_name)) {
return error::kNoError;
}
- bool simulated_fixed_attribs = false;
- if (SimulateFixedAttribs(
- function_name, max_vertex_accessed, &simulated_fixed_attribs,
- primcount)) {
- bool textures_set = !PrepareTexturesForRender();
- ApplyDirtyState();
- if (!ValidateAndAdjustDrawBuffers(function_name)) {
- return error::kNoError;
+
+ GLint draw_id_location = state_.current_program->draw_id_uniform_location();
+ for (GLsizei draw_id = 0; draw_id < drawcount; ++draw_id) {
+ GLint first = firsts[draw_id];
+ GLsizei count = counts[draw_id];
+ GLsizei primcount = instanced ? primcounts[draw_id] : 1;
+ if (count == 0 || primcount == 0) {
+ continue;
+ }
+ if (draw_id_location >= 0) {
+ api()->glUniform1iFn(draw_id_location, draw_id);
}
if (!instanced) {
api()->glDrawArraysFn(mode, first, count);
} else {
api()->glDrawArraysInstancedANGLEFn(mode, first, count, primcount);
}
- if (state_.bound_transform_feedback.get()) {
- state_.bound_transform_feedback->OnVerticesDrawn(mode, count,
- primcount);
- }
- if (textures_set) {
- RestoreStateForTextures();
- }
- if (simulated_fixed_attribs) {
- RestoreStateForSimulatedFixedAttribs();
- }
}
- if (simulated_attrib_0) {
- // We don't have to restore attrib 0 generic data at the end of this
- // function even if it is simulated. This is because we will simulate
- // it in each draw call, and attrib 0 generic data queries use cached
- // values instead of passing down to the underlying driver.
- RestoreStateForAttrib(0, false);
+ if (state_.bound_transform_feedback.get()) {
+ state_.bound_transform_feedback->OnVerticesDrawn(
+ transform_feedback_vertices);
}
+
+ if (textures_set) {
+ RestoreStateForTextures();
+ }
+ if (simulated_fixed_attribs) {
+ RestoreStateForSimulatedFixedAttribs();
+ }
+ }
+ if (simulated_attrib_0) {
+ // We don't have to restore attrib 0 generic data at the end of this
+ // function even if it is simulated. This is because we will simulate
+ // it in each draw call, and attrib 0 generic data queries use cached
+ // values instead of passing down to the underlying driver.
+ RestoreStateForAttrib(0, false);
}
return error::kNoError;
}
@@ -10886,12 +11073,10 @@ error::Error GLES2DecoderImpl::HandleDrawArrays(uint32_t immediate_data_size,
const volatile void* cmd_data) {
const volatile cmds::DrawArrays& c =
*static_cast<const volatile cmds::DrawArrays*>(cmd_data);
- return DoDrawArrays("glDrawArrays",
- false,
- static_cast<GLenum>(c.mode),
- static_cast<GLint>(c.first),
- static_cast<GLsizei>(c.count),
- 1);
+ GLint first = static_cast<GLint>(c.first);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ return DoMultiDrawArrays("glDrawArrays", false, static_cast<GLenum>(c.mode),
+ &first, &count, nullptr, 1);
}
error::Error GLES2DecoderImpl::HandleDrawArraysInstancedANGLE(
@@ -10903,50 +11088,117 @@ error::Error GLES2DecoderImpl::HandleDrawArraysInstancedANGLE(
if (!features().angle_instanced_arrays)
return error::kUnknownCommand;
- return DoDrawArrays("glDrawArraysInstancedANGLE", true,
- static_cast<GLenum>(c.mode), static_cast<GLint>(c.first),
- static_cast<GLsizei>(c.count),
- static_cast<GLsizei>(c.primcount));
+ GLint first = static_cast<GLint>(c.first);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ GLsizei primcount = static_cast<GLsizei>(c.primcount);
+ return DoMultiDrawArrays("glDrawArraysInstancedANGLE", true,
+ static_cast<GLenum>(c.mode), &first, &count,
+ &primcount, 1);
+}
+
+ALWAYS_INLINE bool GLES2DecoderImpl::CheckMultiDrawElementsVertices(
+ const char* function_name,
+ bool instanced,
+ const GLsizei* counts,
+ GLenum type,
+ const int32_t* offsets,
+ const GLsizei* primcounts,
+ GLsizei drawcount,
+ Buffer* element_array_buffer,
+ GLuint* total_max_vertex_accessed,
+ GLsizei* total_max_primcount) {
+ DCHECK_GE(drawcount, 0);
+ for (GLsizei draw_id = 0; draw_id < drawcount; ++draw_id) {
+ GLsizei count = counts[draw_id];
+ GLsizei offset = offsets[draw_id];
+ GLsizei primcount = instanced ? primcounts[draw_id] : 1;
+
+ if (count < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "count < 0");
+ return false;
+ }
+ if (offset < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "offset < 0");
+ return false;
+ }
+ if (primcount < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "primcount < 0");
+ return false;
+ }
+ if (count == 0 || primcount == 0) {
+ continue;
+ }
+
+ GLuint max_vertex_accessed;
+ if (!element_array_buffer->GetMaxValueForRange(
+ offset, count, type,
+ state_.enable_flags.primitive_restart_fixed_index,
+ &max_vertex_accessed)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
+ "range out of bounds for buffer");
+ return false;
+ }
+
+ if (!IsDrawValid(function_name, max_vertex_accessed, instanced,
+ primcount)) {
+ return false;
+ }
+
+ *total_max_vertex_accessed =
+ std::max(*total_max_vertex_accessed, max_vertex_accessed);
+ *total_max_primcount = std::max(*total_max_primcount, primcount);
+ }
+ return true;
}
-error::Error GLES2DecoderImpl::DoDrawElements(const char* function_name,
- bool instanced,
- GLenum mode,
- GLsizei count,
- GLenum type,
- int32_t offset,
- GLsizei primcount) {
+ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements(
+ const char* function_name,
+ bool instanced,
+ GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const int32_t* offsets,
+ const GLsizei* primcounts,
+ GLsizei drawcount) {
error::Error error = WillAccessBoundFramebufferForDraw();
if (error != error::kNoError)
return error;
- if (count < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "count < 0");
- return error::kNoError;
- }
- if (offset < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "offset < 0");
- return error::kNoError;
- }
if (!validators_->draw_mode.IsValid(mode)) {
LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, mode, "mode");
return error::kNoError;
}
+
if (!validators_->index_type.IsValid(type)) {
LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, type, "type");
return error::kNoError;
}
- if (primcount < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "primcount < 0");
+
+ if (drawcount < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "drawcount < 0");
+ return error::kNoError;
+ }
+
+ if (!CheckBoundDrawFramebufferValid(function_name)) {
return error::kNoError;
}
+
Buffer* element_array_buffer = buffer_manager()->RequestBufferAccess(
- &state_, GL_ELEMENT_ARRAY_BUFFER, function_name);
+ &state_, error_state_.get(), GL_ELEMENT_ARRAY_BUFFER, function_name);
if (!element_array_buffer) {
return error::kNoError;
}
- if (!CheckBoundDrawFramebufferValid(function_name)) {
+ GLuint total_max_vertex_accessed = 0;
+ GLsizei total_max_primcount = 0;
+ if (!CheckMultiDrawElementsVertices(
+ function_name, instanced, counts, type, offsets, primcounts,
+ drawcount, element_array_buffer, &total_max_vertex_accessed,
+ &total_max_primcount)) {
+ return error::kNoError;
+ }
+
+ if (total_max_primcount == 0) {
return error::kNoError;
}
@@ -10954,7 +11206,7 @@ error::Error GLES2DecoderImpl::DoDrawElements(const char* function_name,
state_.bound_transform_feedback->active() &&
!state_.bound_transform_feedback->paused()) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
- "transformfeedback is active and not paused");
+ "transformfeedback is active and not paused");
return error::kNoError;
}
@@ -10969,53 +11221,54 @@ error::Error GLES2DecoderImpl::DoDrawElements(const char* function_name,
}
}
- if (count == 0 || primcount == 0) {
+ if (!ClearUnclearedTextures()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "out of memory");
return error::kNoError;
}
- GLuint max_vertex_accessed;
- if (!element_array_buffer->GetMaxValueForRange(
- offset, count, type,
- state_.enable_flags.primitive_restart_fixed_index,
- &max_vertex_accessed)) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION, function_name, "range out of bounds for buffer");
+ bool simulated_attrib_0 = false;
+ if (!SimulateAttrib0(function_name, total_max_vertex_accessed,
+ &simulated_attrib_0)) {
return error::kNoError;
}
-
- if (IsDrawValid(function_name, max_vertex_accessed, instanced, primcount)) {
- if (!ClearUnclearedTextures()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "out of memory");
+ bool simulated_fixed_attribs = false;
+ if (SimulateFixedAttribs(function_name, total_max_vertex_accessed,
+ &simulated_fixed_attribs, total_max_primcount)) {
+ bool textures_set;
+ if (!PrepareTexturesForRender(&textures_set, function_name)) {
return error::kNoError;
}
- bool simulated_attrib_0 = false;
- if (!SimulateAttrib0(
- function_name, max_vertex_accessed, &simulated_attrib_0)) {
+ ApplyDirtyState();
+ // TODO(gman): Refactor to hide these details in BufferManager or
+ // VertexAttribManager.
+ bool used_client_side_array = false;
+ if (element_array_buffer->IsClientSideArray()) {
+ used_client_side_array = true;
+ api()->glBindBufferFn(GL_ELEMENT_ARRAY_BUFFER, 0);
+ }
+ if (!ValidateAndAdjustDrawBuffers(function_name)) {
return error::kNoError;
}
- bool simulated_fixed_attribs = false;
- if (SimulateFixedAttribs(
- function_name, max_vertex_accessed, &simulated_fixed_attribs,
- primcount)) {
- bool textures_set = !PrepareTexturesForRender();
- ApplyDirtyState();
- // TODO(gman): Refactor to hide these details in BufferManager or
- // VertexAttribManager.
+ if (state_.enable_flags.primitive_restart_fixed_index &&
+ feature_info_->feature_flags().emulate_primitive_restart_fixed_index) {
+ api()->glEnableFn(GL_PRIMITIVE_RESTART);
+ buffer_manager()->SetPrimitiveRestartFixedIndexIfNecessary(type);
+ }
+
+ GLint draw_id_location = state_.current_program->draw_id_uniform_location();
+ for (GLsizei draw_id = 0; draw_id < drawcount; ++draw_id) {
+ GLsizei count = counts[draw_id];
+ GLsizei offset = offsets[draw_id];
+ GLsizei primcount = instanced ? primcounts[draw_id] : 1;
+ if (count == 0 || primcount == 0) {
+ continue;
+ }
const GLvoid* indices = reinterpret_cast<const GLvoid*>(offset);
- bool used_client_side_array = false;
- if (element_array_buffer->IsClientSideArray()) {
- used_client_side_array = true;
- api()->glBindBufferFn(GL_ELEMENT_ARRAY_BUFFER, 0);
+ if (used_client_side_array) {
indices = element_array_buffer->GetRange(offset, 0);
}
- if (!ValidateAndAdjustDrawBuffers(function_name)) {
- return error::kNoError;
- }
- if (state_.enable_flags.primitive_restart_fixed_index &&
- feature_info_->feature_flags().
- emulate_primitive_restart_fixed_index) {
- api()->glEnableFn(GL_PRIMITIVE_RESTART);
- buffer_manager()->SetPrimitiveRestartFixedIndexIfNecessary(type);
+ if (draw_id_location >= 0) {
+ api()->glUniform1iFn(draw_id_location, draw_id);
}
if (!instanced) {
api()->glDrawElementsFn(mode, count, type, indices);
@@ -11023,30 +11276,29 @@ error::Error GLES2DecoderImpl::DoDrawElements(const char* function_name,
api()->glDrawElementsInstancedANGLEFn(mode, count, type, indices,
primcount);
}
- if (state_.enable_flags.primitive_restart_fixed_index &&
- feature_info_->feature_flags().
- emulate_primitive_restart_fixed_index) {
- api()->glDisableFn(GL_PRIMITIVE_RESTART);
- }
- if (used_client_side_array) {
- api()->glBindBufferFn(GL_ELEMENT_ARRAY_BUFFER,
- element_array_buffer->service_id());
- }
- if (textures_set) {
- RestoreStateForTextures();
- }
- if (simulated_fixed_attribs) {
- RestoreStateForSimulatedFixedAttribs();
- }
}
- if (simulated_attrib_0) {
- // We don't have to restore attrib 0 generic data at the end of this
- // function even if it is simulated. This is because we will simulate
- // it in each draw call, and attrib 0 generic data queries use cached
- // values instead of passing down to the underlying driver.
- RestoreStateForAttrib(0, false);
+ if (state_.enable_flags.primitive_restart_fixed_index &&
+ feature_info_->feature_flags().emulate_primitive_restart_fixed_index) {
+ api()->glDisableFn(GL_PRIMITIVE_RESTART);
+ }
+ if (used_client_side_array) {
+ api()->glBindBufferFn(GL_ELEMENT_ARRAY_BUFFER,
+ element_array_buffer->service_id());
+ }
+ if (textures_set) {
+ RestoreStateForTextures();
+ }
+ if (simulated_fixed_attribs) {
+ RestoreStateForSimulatedFixedAttribs();
}
}
+ if (simulated_attrib_0) {
+ // We don't have to restore attrib 0 generic data at the end of this
+ // function even if it is simulated. This is because we will simulate
+ // it in each draw call, and attrib 0 generic data queries use cached
+ // values instead of passing down to the underlying driver.
+ RestoreStateForAttrib(0, false);
+ }
return error::kNoError;
}
@@ -11055,10 +11307,11 @@ error::Error GLES2DecoderImpl::HandleDrawElements(
const volatile void* cmd_data) {
const volatile gles2::cmds::DrawElements& c =
*static_cast<const volatile gles2::cmds::DrawElements*>(cmd_data);
- return DoDrawElements("glDrawElements", false, static_cast<GLenum>(c.mode),
- static_cast<GLsizei>(c.count),
- static_cast<GLenum>(c.type),
- static_cast<int32_t>(c.index_offset), 1);
+ GLsizei count = static_cast<GLsizei>(c.count);
+ int32_t offset = static_cast<int32_t>(c.index_offset);
+ return DoMultiDrawElements("glDrawArrays", false, static_cast<GLenum>(c.mode),
+ &count, static_cast<GLenum>(c.type), &offset,
+ nullptr, 1);
}
error::Error GLES2DecoderImpl::HandleDrawElementsInstancedANGLE(
@@ -11070,10 +11323,228 @@ error::Error GLES2DecoderImpl::HandleDrawElementsInstancedANGLE(
if (!features().angle_instanced_arrays)
return error::kUnknownCommand;
- return DoDrawElements(
- "glDrawElementsInstancedANGLE", true, static_cast<GLenum>(c.mode),
- static_cast<GLsizei>(c.count), static_cast<GLenum>(c.type),
- static_cast<int32_t>(c.index_offset), static_cast<GLsizei>(c.primcount));
+ GLsizei count = static_cast<GLsizei>(c.count);
+ int32_t offset = static_cast<int32_t>(c.index_offset);
+ GLsizei primcount = static_cast<GLsizei>(c.primcount);
+ return DoMultiDrawElements(
+ "glDrawElementsInstancedANGLE", true, static_cast<GLenum>(c.mode), &count,
+ static_cast<GLenum>(c.type), &offset, &primcount, 1);
+}
+
+void GLES2DecoderImpl::DoMultiDrawBeginCHROMIUM(GLsizei drawcount) {
+ if (!multi_draw_manager_->Begin(drawcount)) {
+ MarkContextLost(error::kGuilty);
+ group_->LoseContexts(error::kInnocent);
+ }
+}
+
+void GLES2DecoderImpl::DoMultiDrawEndCHROMIUM() {
+ MultiDrawManager::ResultData result;
+ if (!multi_draw_manager_->End(&result)) {
+ MarkContextLost(error::kGuilty);
+ group_->LoseContexts(error::kInnocent);
+ return;
+ }
+ switch (result.draw_function) {
+ case MultiDrawManager::DrawFunction::DrawArrays:
+ DoMultiDrawArrays("glMultiDrawArraysWEBGL", false, result.mode,
+ result.firsts.data(), result.counts.data(), nullptr,
+ result.drawcount);
+ break;
+ case MultiDrawManager::DrawFunction::DrawArraysInstanced:
+ DoMultiDrawArrays("glMultiDrawArraysInstancedWEBGL", true, result.mode,
+ result.firsts.data(), result.counts.data(),
+ result.instance_counts.data(), result.drawcount);
+ break;
+ case MultiDrawManager::DrawFunction::DrawElements:
+ DoMultiDrawElements("glMultiDrawElementsWEBGL", false, result.mode,
+ result.counts.data(), result.type,
+ result.offsets.data(), nullptr, result.drawcount);
+ break;
+ case MultiDrawManager::DrawFunction::DrawElementsInstanced:
+ DoMultiDrawElements("glMultiDrawElementsInstancedWEBGL", true,
+ result.mode, result.counts.data(), result.type,
+ result.offsets.data(), result.instance_counts.data(),
+ result.drawcount);
+ break;
+ default:
+ NOTREACHED();
+ MarkContextLost(error::kGuilty);
+ group_->LoseContexts(error::kInnocent);
+ }
+}
+
+error::Error GLES2DecoderImpl::HandleMultiDrawArraysCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawArraysCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::MultiDrawArraysCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum mode = static_cast<GLenum>(c.mode);
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+
+ uint32_t firsts_size, counts_size;
+ base::CheckedNumeric<uint32_t> checked_size(drawcount);
+ if (!(checked_size * sizeof(GLint)).AssignIfValid(&firsts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLint* firsts = GetSharedMemoryAs<const GLint*>(
+ c.firsts_shm_id, c.firsts_shm_offset, firsts_size);
+ const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>(
+ c.counts_shm_id, c.counts_shm_offset, counts_size);
+ if (firsts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (!multi_draw_manager_->MultiDrawArrays(mode, firsts, counts, drawcount)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleMultiDrawArraysInstancedCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawArraysInstancedCHROMIUM& c =
+ *static_cast<
+ const volatile gles2::cmds::MultiDrawArraysInstancedCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw_instanced) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum mode = static_cast<GLenum>(c.mode);
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+
+ uint32_t firsts_size, counts_size, instance_counts_size;
+ base::CheckedNumeric<uint32_t> checked_size(drawcount);
+ if (!(checked_size * sizeof(GLint)).AssignIfValid(&firsts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&instance_counts_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLint* firsts = GetSharedMemoryAs<const GLint*>(
+ c.firsts_shm_id, c.firsts_shm_offset, firsts_size);
+ const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>(
+ c.counts_shm_id, c.counts_shm_offset, counts_size);
+ const GLsizei* instance_counts = GetSharedMemoryAs<const GLsizei*>(
+ c.instance_counts_shm_id, c.instance_counts_shm_offset,
+ instance_counts_size);
+ if (firsts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (instance_counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (!multi_draw_manager_->MultiDrawArraysInstanced(
+ mode, firsts, counts, instance_counts, drawcount)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleMultiDrawElementsCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawElementsCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::MultiDrawElementsCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum mode = static_cast<GLenum>(c.mode);
+ GLenum type = static_cast<GLenum>(c.type);
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+
+ uint32_t counts_size, offsets_size;
+ base::CheckedNumeric<uint32_t> checked_size(drawcount);
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&offsets_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>(
+ c.counts_shm_id, c.counts_shm_offset, counts_size);
+ const GLsizei* offsets = GetSharedMemoryAs<const GLsizei*>(
+ c.offsets_shm_id, c.offsets_shm_offset, offsets_size);
+ if (counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (offsets == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (!multi_draw_manager_->MultiDrawElements(mode, counts, type, offsets,
+ drawcount)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleMultiDrawElementsInstancedCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawElementsInstancedCHROMIUM& c =
+ *static_cast<
+ const volatile gles2::cmds::MultiDrawElementsInstancedCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw_instanced) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum mode = static_cast<GLenum>(c.mode);
+ GLenum type = static_cast<GLenum>(c.type);
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+
+ uint32_t counts_size, offsets_size, instance_counts_size;
+ base::CheckedNumeric<uint32_t> checked_size(drawcount);
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&offsets_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&instance_counts_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>(
+ c.counts_shm_id, c.counts_shm_offset, counts_size);
+ const GLsizei* offsets = GetSharedMemoryAs<const GLsizei*>(
+ c.offsets_shm_id, c.offsets_shm_offset, offsets_size);
+ const GLsizei* instance_counts = GetSharedMemoryAs<const GLsizei*>(
+ c.instance_counts_shm_id, c.instance_counts_shm_offset,
+ instance_counts_size);
+ if (counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (offsets == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (instance_counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (!multi_draw_manager_->MultiDrawElementsInstanced(
+ mode, counts, type, offsets, instance_counts, drawcount)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
}
GLuint GLES2DecoderImpl::DoGetMaxValueInBufferCHROMIUM(
@@ -11787,7 +12258,7 @@ error::Error GLES2DecoderImpl::HandleVertexAttribIPointer(
GL_INVALID_VALUE, "glVertexAttribIPointer", "offset < 0");
return error::kNoError;
}
- GLsizei type_size = GLES2Util::GetGLTypeSizeForBuffers(type);
+ uint32_t type_size = GLES2Util::GetGLTypeSizeForBuffers(type);
// type_size must be a power of two to use & as optimized modulo.
DCHECK(GLES2Util::IsPOT(type_size));
if (offset & (type_size - 1)) {
@@ -11807,7 +12278,8 @@ error::Error GLES2DecoderImpl::HandleVertexAttribIPointer(
SHADER_VARIABLE_INT : SHADER_VARIABLE_UINT;
state_.vertex_attrib_manager->UpdateAttribBaseTypeAndMask(indx, base_type);
- GLsizei group_size = GLES2Util::GetGroupSizeForBufferType(size, type);
+ uint32_t group_size = GLES2Util::GetGroupSizeForBufferType(size, type);
+ DCHECK_LE(group_size, static_cast<uint32_t>(INT_MAX));
state_.vertex_attrib_manager
->SetAttribInfo(indx,
state_.bound_array_buffer.get(),
@@ -11878,7 +12350,7 @@ error::Error GLES2DecoderImpl::HandleVertexAttribPointer(
GL_INVALID_VALUE, "glVertexAttribPointer", "offset < 0");
return error::kNoError;
}
- GLsizei type_size = GLES2Util::GetGLTypeSizeForBuffers(type);
+ uint32_t type_size = GLES2Util::GetGLTypeSizeForBuffers(type);
// type_size must be a power of two to use & as optimized modulo.
DCHECK(GLES2Util::IsPOT(type_size));
if (offset & (type_size - 1)) {
@@ -11897,7 +12369,8 @@ error::Error GLES2DecoderImpl::HandleVertexAttribPointer(
state_.vertex_attrib_manager->UpdateAttribBaseTypeAndMask(
indx, SHADER_VARIABLE_FLOAT);
- GLsizei group_size = GLES2Util::GetGroupSizeForBufferType(size, type);
+ uint32_t group_size = GLES2Util::GetGroupSizeForBufferType(size, type);
+ DCHECK_LE(group_size, static_cast<uint32_t>(INT_MAX));
state_.vertex_attrib_manager
->SetAttribInfo(indx,
state_.bound_array_buffer.get(),
@@ -12107,11 +12580,12 @@ error::Error GLES2DecoderImpl::HandleReadPixels(uint32_t immediate_data_size,
return error::kInvalidArguments;
}
if (!buffer_manager()->RequestBufferAccess(
- state_.GetErrorState(), buffer, func_name, "pixel pack buffer")) {
+ error_state_.get(), buffer, func_name, "pixel pack buffer")) {
return error::kNoError;
}
uint32_t size = 0;
- if (!SafeAddUint32(pixels_size + skip_size, pixels_shm_offset, &size)) {
+ if (!base::CheckAdd(pixels_size + skip_size, pixels_shm_offset)
+ .AssignIfValid(&size)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, func_name, "size + offset overflow");
return error::kNoError;
}
@@ -12256,7 +12730,8 @@ error::Error GLES2DecoderImpl::HandleReadPixels(uint32_t immediate_data_size,
int32_t max_x;
int32_t max_y;
- if (!SafeAddInt32(x, width, &max_x) || !SafeAddInt32(y, height, &max_y)) {
+ if (!base::CheckAdd(x, width).AssignIfValid(&max_x) ||
+ !base::CheckAdd(y, height).AssignIfValid(&max_y)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, func_name, "dimensions out of range");
return error::kNoError;
}
@@ -12515,7 +12990,6 @@ void GLES2DecoderImpl::DoSwapBuffersWithBoundsCHROMIUM(
}
ClearScheduleCALayerState();
- ClearScheduleDCLayerState();
std::vector<gfx::Rect> bounds(count);
for (GLsizei i = 0; i < count; ++i) {
@@ -12550,7 +13024,6 @@ error::Error GLES2DecoderImpl::HandlePostSubBufferCHROMIUM(
}
ClearScheduleCALayerState();
- ClearScheduleDCLayerState();
if (supports_async_swap_) {
TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", c.swap_id());
@@ -12708,125 +13181,6 @@ error::Error GLES2DecoderImpl::HandleScheduleCALayerCHROMIUM(
return error::kNoError;
}
-error::Error GLES2DecoderImpl::HandleScheduleDCLayerSharedStateCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM*>(
- cmd_data);
-
- const GLfloat* mem = GetSharedMemoryAs<const GLfloat*>(c.shm_id, c.shm_offset,
- 20 * sizeof(GLfloat));
- if (!mem) {
- return error::kOutOfBounds;
- }
- gfx::RectF clip_rect(mem[0], mem[1], mem[2], mem[3]);
- gfx::Transform transform(mem[4], mem[8], mem[12], mem[16], mem[5], mem[9],
- mem[13], mem[17], mem[6], mem[10], mem[14], mem[18],
- mem[7], mem[11], mem[15], mem[19]);
- dc_layer_shared_state_.reset(new DCLayerSharedState);
- dc_layer_shared_state_->opacity = c.opacity;
- dc_layer_shared_state_->is_clipped = c.is_clipped ? true : false;
- dc_layer_shared_state_->clip_rect = gfx::ToEnclosingRect(clip_rect);
- dc_layer_shared_state_->z_order = c.z_order;
- dc_layer_shared_state_->transform = transform;
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleScheduleDCLayerCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::ScheduleDCLayerCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::ScheduleDCLayerCHROMIUM*>(
- cmd_data);
- GLuint filter = c.filter;
- if (filter != GL_NEAREST && filter != GL_LINEAR) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
- "invalid filter");
- return error::kNoError;
- }
-
- if (!dc_layer_shared_state_) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION, "glScheduleDCLayerCHROMIUM",
- "glScheduleDCLayerSharedStateCHROMIUM has not been called");
- return error::kNoError;
- }
-
- GLsizei num_textures = c.num_textures;
- if (num_textures < 0 || num_textures > 4) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glScheduleDCLayerCHROMIUM",
- "number of textures greater than maximum of 4");
- return error::kNoError;
- }
-
- size_t textures_size = num_textures * sizeof(GLuint);
-
- base::CheckedNumeric<uint32_t> data_size = textures_size;
- const uint32_t kRectDataSize = 8 * sizeof(GLfloat);
- data_size += kRectDataSize;
- if (!data_size.IsValid())
- return error::kOutOfBounds;
- const void* data =
- GetAddressAndCheckSize(c.shm_id, c.shm_offset, data_size.ValueOrDie());
- if (!data) {
- return error::kOutOfBounds;
- }
- const GLfloat* mem = reinterpret_cast<const GLfloat*>(data);
-
- gfx::RectF contents_rect(mem[0], mem[1], mem[2], mem[3]);
- gfx::RectF bounds_rect(mem[4], mem[5], mem[6], mem[7]);
-
- const volatile GLuint* texture_ids = reinterpret_cast<const volatile GLuint*>(
- static_cast<const volatile char*>(data) + kRectDataSize);
-
- std::vector<scoped_refptr<gl::GLImage>> images;
- for (int i = 0; i < num_textures; ++i) {
- GLuint contents_texture_id = texture_ids[i];
- scoped_refptr<gl::GLImage> image;
- if (contents_texture_id) {
- TextureRef* ref = texture_manager()->GetTexture(contents_texture_id);
- if (!ref) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
- "unknown texture");
- return error::kNoError;
- }
- Texture::ImageState image_state;
- image = ref->texture()->GetLevelImage(ref->texture()->target(), 0,
- &image_state);
- if (!image) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
- "unsupported texture format");
- return error::kNoError;
- }
- }
- images.push_back(image);
- }
-
- GLuint protected_video_type = c.protected_video_type;
- if (protected_video_type >
- static_cast<GLuint>(ui::ProtectedVideoType::kMaxValue)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
- "unknown protected video type");
- return error::kNoError;
- }
- ui::ProtectedVideoType protected_video_type_param =
- static_cast<ui::ProtectedVideoType>(protected_video_type);
-
- ui::DCRendererLayerParams params = ui::DCRendererLayerParams(
- dc_layer_shared_state_->is_clipped, dc_layer_shared_state_->clip_rect,
- dc_layer_shared_state_->z_order, dc_layer_shared_state_->transform,
- images, contents_rect, gfx::ToEnclosingRect(bounds_rect),
- c.background_color, c.edge_aa_mask, dc_layer_shared_state_->opacity,
- filter, protected_video_type_param);
- if (!surface_->ScheduleDCLayer(params)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glScheduleDCLayerCHROMIUM",
- "failed to schedule DCLayer");
- }
- return error::kNoError;
-}
-
error::Error GLES2DecoderImpl::HandleSetColorSpaceMetadataCHROMIUM(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -13216,6 +13570,10 @@ error::Error GLES2DecoderImpl::HandleGetString(uint32_t immediate_data_size,
extension_set.erase(kEXTDrawBuffersExtension);
if (!shader_texture_lod_explicitly_enabled_)
extension_set.erase(kEXTShaderTextureLodExtension);
+ if (!multi_draw_explicitly_enabled_)
+ extension_set.erase(kWEBGLMultiDrawExtension);
+ if (!multi_draw_instanced_explicitly_enabled_)
+ extension_set.erase(kWEBGLMultiDrawInstancedExtension);
}
if (supports_post_sub_buffer_)
extension_set.insert("GL_CHROMIUM_post_sub_buffer");
@@ -13248,15 +13606,16 @@ error::Error GLES2DecoderImpl::HandleBufferData(uint32_t immediate_data_size,
return error::kOutOfBounds;
}
}
- buffer_manager()->ValidateAndDoBufferData(&state_, target, size, data, usage);
+ buffer_manager()->ValidateAndDoBufferData(&state_, error_state_.get(), target,
+ size, data, usage);
return error::kNoError;
}
void GLES2DecoderImpl::DoBufferSubData(
GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid * data) {
// Just delegate it. Some validation is actually done before this.
- buffer_manager()->ValidateAndDoBufferSubData(
- &state_, target, offset, size, data);
+ buffer_manager()->ValidateAndDoBufferSubData(&state_, error_state_.get(),
+ target, offset, size, data);
}
bool GLES2DecoderImpl::ClearLevel(Texture* texture,
@@ -13437,7 +13796,7 @@ bool GLES2DecoderImpl::ClearCompressedTextureLevel(Texture* texture,
GLsizei bytes_required = 0;
if (!GetCompressedTexSizeInBytes("ClearCompressedTextureLevel", width, height,
1, format, &bytes_required,
- state_.GetErrorState())) {
+ error_state_.get())) {
return false;
}
@@ -13512,7 +13871,8 @@ bool GLES2DecoderImpl::ClearLevel3D(Texture* texture,
subs.push_back(TexSubCoord3D(0, 0, 0, width, height, depth));
} else {
uint32_t size_per_layer;
- if (!SafeMultiplyUint32(padded_row_size, height, &size_per_layer)) {
+ if (!base::CheckMul(padded_row_size, height)
+ .AssignIfValid(&size_per_layer)) {
return false;
}
if (size_per_layer < kMaxZeroSize) {
@@ -13669,7 +14029,7 @@ const CompressedFormatInfo kCompressedFormatInfoArray[] = {
};
const CompressedFormatInfo* GetCompressedFormatInfo(GLenum format) {
- for (size_t i = 0; i < arraysize(kCompressedFormatInfoArray); i++) {
+ for (size_t i = 0; i < base::size(kCompressedFormatInfoArray); i++) {
if (kCompressedFormatInfoArray[i].format == format) {
return &kCompressedFormatInfoArray[i];
}
@@ -13745,7 +14105,7 @@ bool GLES2DecoderImpl::ValidateCompressedTexFuncData(const char* function_name,
const GLvoid* data) {
GLsizei bytes_required = 0;
if (!GetCompressedTexSizeInBytes(function_name, width, height, depth, format,
- &bytes_required, state_.GetErrorState())) {
+ &bytes_required, error_state_.get())) {
return false;
}
@@ -13758,7 +14118,7 @@ bool GLES2DecoderImpl::ValidateCompressedTexFuncData(const char* function_name,
Buffer* buffer = state_.bound_pixel_unpack_buffer.get();
if (buffer &&
!buffer_manager()->RequestBufferAccess(
- state_.GetErrorState(), buffer, reinterpret_cast<GLintptr>(data),
+ error_state_.get(), buffer, reinterpret_cast<GLintptr>(data),
static_cast<GLsizeiptr>(bytes_required), function_name,
"pixel unpack buffer")) {
return false;
@@ -14176,7 +14536,7 @@ error::Error GLES2DecoderImpl::HandleTexImage2D(uint32_t immediate_data_size,
// For testing only. Allows us to stress the ability to respond to OOM errors.
uint32_t num_pixels;
if (workarounds().simulate_out_of_memory_on_large_textures &&
- (!SafeMultiplyUint32(width, height, &num_pixels) ||
+ (!base::CheckMul(width, height).AssignIfValid(&num_pixels) ||
(num_pixels >= 4096 * 4096))) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, func_name, "synthetic out of memory");
return error::kNoError;
@@ -14187,7 +14547,8 @@ error::Error GLES2DecoderImpl::HandleTexImage2D(uint32_t immediate_data_size,
pixels, pixels_size, padding,
TextureManager::DoTexImageArguments::kTexImage2D };
texture_manager()->ValidateAndDoTexImage(
- &texture_state_, &state_, &framebuffer_state_, func_name, args);
+ &texture_state_, &state_, error_state_.get(), &framebuffer_state_,
+ func_name, args);
// This may be a slow command. Exit command processing to allow for
// context preemption and GPU watchdog checks.
@@ -14273,7 +14634,7 @@ error::Error GLES2DecoderImpl::HandleTexImage3D(uint32_t immediate_data_size,
// For testing only. Allows us to stress the ability to respond to OOM errors.
uint32_t num_pixels;
if (workarounds().simulate_out_of_memory_on_large_textures &&
- (!SafeMultiplyUint32(width, height, &num_pixels) ||
+ (!base::CheckMul(width, height).AssignIfValid(&num_pixels) ||
(num_pixels >= 4096 * 4096))) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, func_name, "synthetic out of memory");
return error::kNoError;
@@ -14284,7 +14645,8 @@ error::Error GLES2DecoderImpl::HandleTexImage3D(uint32_t immediate_data_size,
pixels, pixels_size, padding,
TextureManager::DoTexImageArguments::kTexImage3D };
texture_manager()->ValidateAndDoTexImage(
- &texture_state_, &state_, &framebuffer_state_, func_name, args);
+ &texture_state_, &state_, error_state_.get(), &framebuffer_state_,
+ func_name, args);
// This may be a slow command. Exit command processing to allow for
// context preemption and GPU watchdog checks.
@@ -14575,7 +14937,8 @@ void GLES2DecoderImpl::DoCopyTexImage2D(
}
DCHECK(texture_manager()->ValidateTextureParameters(
- GetErrorState(), func_name, true, format, type, internal_format, level));
+ error_state_.get(), func_name, true, format, type, internal_format,
+ level));
// Only target image size is validated here.
if (!GLES2Util::ComputeImageDataSizes(width, height, 1, format, type,
@@ -14663,8 +15026,8 @@ void GLES2DecoderImpl::DoCopyTexImage2D(
0,
TextureManager::DoTexImageArguments::kTexImage2D};
texture_manager()->WorkaroundCopyTexImageCubeMap(
- &texture_state_, &state_, &framebuffer_state_, texture_ref, func_name,
- args);
+ &texture_state_, &state_, error_state_.get(), &framebuffer_state_,
+ texture_ref, func_name, args);
}
if (src.x() != x || src.y() != y ||
@@ -14708,8 +15071,9 @@ void GLES2DecoderImpl::DoCopyTexImage2D(
target, i, final_internal_format, width, height, 1, border,
format, type, nullptr, pixels_size, 0,
TextureManager::DoTexImageArguments::kTexImage2D };
- texture_manager()->WorkaroundCopyTexImageCubeMap(&texture_state_,
- &state_, &framebuffer_state_, texture_ref, func_name, args);
+ texture_manager()->WorkaroundCopyTexImageCubeMap(
+ &texture_state_, &state_, error_state_.get(), &framebuffer_state_,
+ texture_ref, func_name, args);
}
}
@@ -14745,7 +15109,7 @@ void GLES2DecoderImpl::DoCopyTexImage2D(
{
// Copy from the read framebuffer into |temp_texture|.
api()->glGenTexturesFn(1, &temp_texture);
- ScopedTextureBinder binder(&state_, temp_texture,
+ ScopedTextureBinder binder(&state_, error_state_.get(), temp_texture,
source_texture_target);
api()->glCopyTexImage2DFn(source_texture_target, 0,
temp_internal_format, x, y, width, height,
@@ -14778,8 +15142,9 @@ void GLES2DecoderImpl::DoCopyTexImage2D(
target, level, final_internal_format, width, height, 1, border,
format, type, nullptr, pixels_size, 0,
TextureManager::DoTexImageArguments::kTexImage2D };
- texture_manager()->WorkaroundCopyTexImageCubeMap(&texture_state_,
- &state_, &framebuffer_state_, texture_ref, func_name, args);
+ texture_manager()->WorkaroundCopyTexImageCubeMap(
+ &texture_state_, &state_, error_state_.get(), &framebuffer_state_,
+ texture_ref, func_name, args);
}
if (workarounds().clear_pixel_unpack_buffer_before_copyteximage)
state_.PushTextureUnpackState();
@@ -15095,9 +15460,9 @@ error::Error GLES2DecoderImpl::HandleTexSubImage2D(
target, level, xoffset, yoffset, 0, width, height, 1,
format, type, pixels, pixels_size, padding,
TextureManager::DoTexSubImageArguments::kTexSubImage2D};
- texture_manager()->ValidateAndDoTexSubImage(this, &texture_state_, &state_,
- &framebuffer_state_,
- func_name, args);
+ texture_manager()->ValidateAndDoTexSubImage(
+ this, &texture_state_, &state_, error_state_.get(), &framebuffer_state_,
+ func_name, args);
// This may be a slow command. Exit command processing to allow for
// context preemption and GPU watchdog checks.
@@ -15189,9 +15554,9 @@ error::Error GLES2DecoderImpl::HandleTexSubImage3D(
target, level, xoffset, yoffset, zoffset, width, height, depth,
format, type, pixels, pixels_size, padding,
TextureManager::DoTexSubImageArguments::kTexSubImage3D};
- texture_manager()->ValidateAndDoTexSubImage(this, &texture_state_, &state_,
- &framebuffer_state_,
- func_name, args);
+ texture_manager()->ValidateAndDoTexSubImage(
+ this, &texture_state_, &state_, error_state_.get(), &framebuffer_state_,
+ func_name, args);
// This may be a slow command. Exit command processing to allow for
// context preemption and GPU watchdog checks.
@@ -15746,7 +16111,7 @@ error::Error GLES2DecoderImpl::HandleShaderBinary(
return error::kNoError;
}
uint32_t data_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&data_size)) {
return error::kOutOfBounds;
}
const GLuint* shaders = GetSharedMemoryAs<const GLuint*>(
@@ -15797,7 +16162,6 @@ void GLES2DecoderImpl::DoSwapBuffers(uint64_t swap_id, GLbitfield flags) {
}
ClearScheduleCALayerState();
- ClearScheduleDCLayerState();
// If offscreen then don't actually SwapBuffers to the display. Just copy
// the rendered frame to another frame buffer.
@@ -15855,8 +16219,8 @@ void GLES2DecoderImpl::DoSwapBuffers(uint64_t swap_id, GLbitfield flags) {
if (offscreen_size_.width() == 0 || offscreen_size_.height() == 0)
return;
- ScopedGLErrorSuppressor suppressor(
- "GLES2DecoderImpl::DoSwapBuffers", GetErrorState());
+ ScopedGLErrorSuppressor suppressor("GLES2DecoderImpl::DoSwapBuffers",
+ error_state_.get());
if (IsOffscreenBufferMultisampled()) {
// For multisampled buffers, resolve the frame buffer.
@@ -15937,7 +16301,6 @@ void GLES2DecoderImpl::DoCommitOverlayPlanes(uint64_t swap_id,
return;
}
ClearScheduleCALayerState();
- ClearScheduleDCLayerState();
if (supports_async_swap_) {
client_->OnSwapBuffers(swap_id, flags);
surface_->CommitOverlayPlanesAsync(
@@ -16036,6 +16399,8 @@ error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM(
bool desire_frag_depth = false;
bool desire_draw_buffers = false;
bool desire_shader_texture_lod = false;
+ bool desire_multi_draw = false;
+ bool desire_multi_draw_instanced = false;
if (feature_info_->context_type() == CONTEXT_TYPE_WEBGL1) {
desire_standard_derivatives =
feature_str.find("GL_OES_standard_derivatives ") != std::string::npos;
@@ -16046,14 +16411,24 @@ error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM(
desire_shader_texture_lod =
feature_str.find("GL_EXT_shader_texture_lod ") != std::string::npos;
}
+ if (feature_info_->IsWebGLContext()) {
+ desire_multi_draw =
+ feature_str.find("GL_WEBGL_multi_draw ") != std::string::npos;
+ desire_multi_draw_instanced =
+ feature_str.find("GL_WEBGL_multi_draw_instanced ") != std::string::npos;
+ }
if (desire_standard_derivatives != derivatives_explicitly_enabled_ ||
desire_frag_depth != frag_depth_explicitly_enabled_ ||
desire_draw_buffers != draw_buffers_explicitly_enabled_ ||
- desire_shader_texture_lod != shader_texture_lod_explicitly_enabled_) {
+ desire_shader_texture_lod != shader_texture_lod_explicitly_enabled_ ||
+ desire_multi_draw != multi_draw_explicitly_enabled_ ||
+ desire_multi_draw_instanced != multi_draw_instanced_explicitly_enabled_) {
derivatives_explicitly_enabled_ |= desire_standard_derivatives;
frag_depth_explicitly_enabled_ |= desire_frag_depth;
draw_buffers_explicitly_enabled_ |= desire_draw_buffers;
shader_texture_lod_explicitly_enabled_ |= desire_shader_texture_lod;
+ multi_draw_explicitly_enabled_ |= desire_multi_draw;
+ multi_draw_instanced_explicitly_enabled_ |= desire_multi_draw_instanced;
DestroyShaderTranslator();
}
@@ -16336,34 +16711,6 @@ error::Error GLES2DecoderImpl::HandleInsertFenceSyncCHROMIUM(
return error::kNoError;
}
-error::Error GLES2DecoderImpl::HandleWaitSyncTokenCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::WaitSyncTokenCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::WaitSyncTokenCHROMIUM*>(
- cmd_data);
-
- const gpu::CommandBufferNamespace kMinNamespaceId =
- gpu::CommandBufferNamespace::INVALID;
- const gpu::CommandBufferNamespace kMaxNamespaceId =
- gpu::CommandBufferNamespace::NUM_COMMAND_BUFFER_NAMESPACES;
-
- gpu::CommandBufferNamespace namespace_id =
- static_cast<gpu::CommandBufferNamespace>(c.namespace_id);
- if ((namespace_id < static_cast<int32_t>(kMinNamespaceId)) ||
- (namespace_id >= static_cast<int32_t>(kMaxNamespaceId))) {
- namespace_id = gpu::CommandBufferNamespace::INVALID;
- }
- const CommandBufferId command_buffer_id =
- CommandBufferId::FromUnsafeValue(c.command_buffer_id());
- const uint64_t release = c.release_count();
-
- gpu::SyncToken sync_token;
- sync_token.Set(namespace_id, command_buffer_id, release);
- return client_->OnWaitSyncToken(sync_token) ? error::kDeferCommandUntilLater
- : error::kNoError;
-}
-
error::Error GLES2DecoderImpl::HandleDiscardBackbufferCHROMIUM(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -16975,7 +17322,7 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
GLenum format =
TextureManager::ExtractFormatFromStorageFormat(internal_format);
if (!texture_manager()->ValidateTextureParameters(
- GetErrorState(), kFunctionName, true, format, dest_type,
+ error_state_.get(), kFunctionName, true, format, dest_type,
internal_format, dest_level)) {
return;
}
@@ -17189,8 +17536,9 @@ void GLES2DecoderImpl::CopySubTextureHelper(const char* function_name,
// See: https://crbug.com/586476
int32_t max_x;
int32_t max_y;
- if (!SafeAddInt32(x, width, &max_x) || !SafeAddInt32(y, height, &max_y) ||
- x < 0 || y < 0 || max_x > source_width || max_y > source_height) {
+ if (!base::CheckAdd(x, width).AssignIfValid(&max_x) ||
+ !base::CheckAdd(y, height).AssignIfValid(&max_y) || x < 0 || y < 0 ||
+ max_x > source_width || max_y > source_height) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name,
"source texture bad dimensions");
return;
@@ -17307,8 +17655,8 @@ void GLES2DecoderImpl::CopySubTextureHelper(const char* function_name,
if (image && dest_internal_format == source_internal_format &&
dest_level == 0 && !unpack_flip_y && !unpack_premultiply_alpha_change &&
!dither) {
- ScopedTextureBinder binder(&state_, dest_texture->service_id(),
- dest_binding_target);
+ ScopedTextureBinder binder(&state_, error_state_.get(),
+ dest_texture->service_id(), dest_binding_target);
if (image->CopyTexSubImage(dest_target, gfx::Point(xoffset, yoffset),
gfx::Rect(x, y, width, height))) {
return;
@@ -17488,7 +17836,7 @@ void GLES2DecoderImpl::TexStorageImpl(GLenum target,
GLsizei level_size;
if (!GetCompressedTexSizeInBytes(
function_name, level_width, level_height, level_depth,
- internal_format, &level_size, state_.GetErrorState())) {
+ internal_format, &level_size, error_state_.get())) {
// GetCompressedTexSizeInBytes() already generates a GL error.
return;
}
@@ -17617,7 +17965,7 @@ void GLES2DecoderImpl::DoTexStorage2DImageCHROMIUM(GLenum target,
width, "height", height);
ScopedGLErrorSuppressor suppressor(
- "GLES2CmdDecoder::DoTexStorage2DImageCHROMIUM", state_.GetErrorState());
+ "GLES2CmdDecoder::DoTexStorage2DImageCHROMIUM", error_state_.get());
if (!texture_manager()->ValidForTarget(target, 0, width, height, 1)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DImageCHROMIUM",
@@ -17685,7 +18033,9 @@ void GLES2DecoderImpl::DoTexStorage2DImageCHROMIUM(GLenum target,
texture_manager()->SetLevelInfo(
texture_ref, target, 0, image->GetInternalFormat(), width, height, 1, 0,
- image->GetInternalFormat(), GL_UNSIGNED_BYTE, cleared_rect);
+ TextureManager::ExtractFormatFromStorageFormat(
+ image->GetInternalFormat()),
+ GL_UNSIGNED_BYTE, cleared_rect);
texture_manager()->SetLevelImage(texture_ref, target, 0, image.get(),
Texture::BOUND);
@@ -17760,7 +18110,6 @@ void GLES2DecoderImpl::DoCreateAndConsumeTextureINTERNAL(
void GLES2DecoderImpl::DoCreateAndTexStorage2DSharedImageINTERNAL(
GLuint client_id,
- GLenum internal_format,
const volatile GLbyte* data) {
TRACE_EVENT2("gpu",
"GLES2DecoderImpl::DoCreateAndTexStorage2DSharedImageCHROMIUM",
@@ -17985,7 +18334,7 @@ void GLES2DecoderImpl::BindTexImage2DCHROMIUMImpl(const char* function_name,
{
ScopedGLErrorSuppressor suppressor(
- "GLES2DecoderImpl::DoBindTexImage2DCHROMIUM", GetErrorState());
+ "GLES2DecoderImpl::DoBindTexImage2DCHROMIUM", error_state_.get());
// Note: We fallback to using CopyTexImage() before the texture is used
// when BindTexImage() fails.
@@ -18001,10 +18350,11 @@ void GLES2DecoderImpl::BindTexImage2DCHROMIUMImpl(const char* function_name,
gfx::Size size = image->GetSize();
GLenum texture_internalformat =
internalformat ? internalformat : image->GetInternalFormat();
- texture_manager()->SetLevelInfo(texture_ref, target, 0,
- texture_internalformat, size.width(),
- size.height(), 1, 0, texture_internalformat,
- GL_UNSIGNED_BYTE, gfx::Rect(size));
+ texture_manager()->SetLevelInfo(
+ texture_ref, target, 0, texture_internalformat, size.width(),
+ size.height(), 1, 0,
+ TextureManager::ExtractFormatFromStorageFormat(texture_internalformat),
+ GL_UNSIGNED_BYTE, gfx::Rect(size));
texture_manager()->SetLevelImage(texture_ref, target, 0, image, image_state);
}
@@ -18039,7 +18389,7 @@ void GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM(
if (image_state == Texture::BOUND) {
ScopedGLErrorSuppressor suppressor(
- "GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM", GetErrorState());
+ "GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM", error_state_.get());
image->ReleaseTexImage(target);
texture_manager()->SetLevelInfo(texture_ref, target, 0, GL_RGBA, 0, 0, 1, 0,
@@ -18437,7 +18787,7 @@ error::Error GLES2DecoderImpl::HandleMapBufferRange(
return error::kNoError;
}
Buffer* buffer = buffer_manager()->RequestBufferAccess(
- &state_, target, offset, size, func_name);
+ &state_, error_state_.get(), target, offset, size, func_name);
if (!buffer) {
// An error is already set.
return error::kNoError;
@@ -18628,6 +18978,83 @@ void GLES2DecoderImpl::DoFlushMappedBufferRange(
api()->glFlushMappedBufferRangeFn(target, offset, size);
}
+void GLES2DecoderImpl::DoScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
+ GLuint protected_video_type) {
+ if (protected_video_type >
+ static_cast<GLuint>(ui::ProtectedVideoType::kMaxValue)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
+ "invalid protected video type");
+ return;
+ }
+
+ GLuint texture_ids[] = {y_texture_id, uv_texture_id};
+ scoped_refptr<gl::GLImage> images[2];
+ size_t i = 0;
+ for (auto& texture_id : texture_ids) {
+ if (!texture_id) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
+ "invalid texture");
+ return;
+ }
+ TextureRef* ref = texture_manager()->GetTexture(texture_id);
+ if (!ref) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
+ "unknown texture");
+ return;
+ }
+ Texture::ImageState image_state;
+ gl::GLImage* image = ref->texture()->GetLevelImage(ref->texture()->target(),
+ 0, &image_state);
+ if (!image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
+ "unsupported texture format");
+ return;
+ }
+ images[i++] = scoped_refptr<gl::GLImage>(image);
+ }
+
+ ui::DCRendererLayerParams params;
+ params.y_image = std::move(images[0]);
+ params.uv_image = std::move(images[1]);
+ params.z_order = z_order;
+ params.content_rect =
+ gfx::Rect(content_x, content_y, content_width, content_height);
+ params.quad_rect = gfx::Rect(quad_x, quad_y, quad_width, quad_height);
+ params.transform =
+ gfx::Transform(transform_c1r1, transform_c2r1, transform_c1r2,
+ transform_c2r2, transform_tx, transform_ty);
+ params.is_clipped = is_clipped;
+ params.clip_rect = gfx::Rect(clip_x, clip_y, clip_width, clip_height);
+ params.protected_video_type =
+ static_cast<ui::ProtectedVideoType>(protected_video_type);
+
+ if (!surface_->ScheduleDCLayer(params)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glScheduleDCLayerCHROMIUM",
+ "failed to schedule DCLayer");
+ }
+}
+
// Note that GL_LOST_CONTEXT is specific to GLES.
// For desktop GL we have to query the reset status proactively.
void GLES2DecoderImpl::OnContextLostError() {
@@ -18807,7 +19234,8 @@ class PathCommandValidatorContext {
DCHECK_LE(transforms_component_count, 12U);
uint32_t one_transform_size = sizeof(GLfloat) * transforms_component_count;
uint32_t transforms_size = 0;
- if (!SafeMultiplyUint32(one_transform_size, num_paths, &transforms_size)) {
+ if (!base::CheckMul(one_transform_size, num_paths)
+ .AssignIfValid(&transforms_size)) {
error_ = error::kOutOfBounds;
return false;
}
@@ -18842,7 +19270,7 @@ class PathCommandValidatorContext {
uint32_t shm_offset,
std::unique_ptr<GLuint[]>* out_buffer) {
uint32_t paths_size = 0;
- if (!SafeMultiplyUint32(num_paths, sizeof(T), &paths_size)) {
+ if (!base::CheckMul(num_paths, sizeof(T)).AssignIfValid(&paths_size)) {
error_ = error::kOutOfBounds;
return false;
}
@@ -19022,7 +19450,8 @@ error::Error GLES2DecoderImpl::HandlePathCommandsCHROMIUM(
uint32_t coords_size = 0;
uint32_t coord_type_size =
GLES2Util::GetGLTypeSizeForPathCoordType(coord_type);
- if (!SafeMultiplyUint32(num_coords, coord_type_size, &coords_size))
+ if (!base::CheckMul(num_coords, coord_type_size)
+ .AssignIfValid(&coords_size))
return error::kOutOfBounds;
uint32_t coords_shm_id = static_cast<uint32_t>(c.coords_shm_id);
@@ -19656,13 +20085,9 @@ void GLES2DecoderImpl::ClearScheduleCALayerState() {
ca_layer_shared_state_.reset();
}
-void GLES2DecoderImpl::ClearScheduleDCLayerState() {
- dc_layer_shared_state_.reset();
-}
-
void GLES2DecoderImpl::ClearFramebufferForWorkaround(GLbitfield mask) {
ScopedGLErrorSuppressor suppressor("GLES2DecoderImpl::ClearWorkaround",
- GetErrorState());
+ error_state_.get());
clear_framebuffer_blit_->ClearFramebuffer(
this, gfx::Size(viewport_max_width_, viewport_max_height_), mask,
state_.color_clear_red, state_.color_clear_green, state_.color_clear_blue,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
index 01be2ce23d3..37f590e1cf5 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -65,6 +65,10 @@ struct DisallowedFeatures {
oes_texture_half_float_linear = false;
}
+ bool operator==(const DisallowedFeatures& other) const {
+ return !std::memcmp(this, &other, sizeof(*this));
+ }
+
bool npot_support = false;
bool chromium_color_buffer_float_rgba = false;
bool chromium_color_buffer_float_rgb = false;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index 0952302390b..912d5b30d38 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -739,7 +739,7 @@ error::Error GLES2DecoderImpl::HandleDeleteBuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t buffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &buffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&buffers_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* buffers = GetImmediateDataAs<volatile const GLuint*>(
@@ -759,7 +759,7 @@ error::Error GLES2DecoderImpl::HandleDeleteFramebuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t framebuffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &framebuffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&framebuffers_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* framebuffers =
@@ -780,7 +780,7 @@ error::Error GLES2DecoderImpl::HandleDeleteRenderbuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t renderbuffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &renderbuffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&renderbuffers_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* renderbuffers =
@@ -803,7 +803,7 @@ error::Error GLES2DecoderImpl::HandleDeleteSamplersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t samplers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &samplers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&samplers_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* samplers = GetImmediateDataAs<volatile const GLuint*>(
@@ -834,7 +834,7 @@ error::Error GLES2DecoderImpl::HandleDeleteTexturesImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t textures_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &textures_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&textures_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* textures = GetImmediateDataAs<volatile const GLuint*>(
@@ -857,7 +857,7 @@ error::Error GLES2DecoderImpl::HandleDeleteTransformFeedbacksImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t ids_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &ids_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&ids_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* ids = GetImmediateDataAs<volatile const GLuint*>(
@@ -1111,7 +1111,7 @@ error::Error GLES2DecoderImpl::HandleGenBuffersImmediate(
*static_cast<const volatile gles2::cmds::GenBuffersImmediate*>(cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t buffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &buffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&buffers_size)) {
return error::kOutOfBounds;
}
volatile GLuint* buffers = GetImmediateDataAs<volatile GLuint*>(
@@ -1151,7 +1151,7 @@ error::Error GLES2DecoderImpl::HandleGenFramebuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t framebuffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &framebuffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&framebuffers_size)) {
return error::kOutOfBounds;
}
volatile GLuint* framebuffers = GetImmediateDataAs<volatile GLuint*>(
@@ -1177,7 +1177,7 @@ error::Error GLES2DecoderImpl::HandleGenRenderbuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t renderbuffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &renderbuffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&renderbuffers_size)) {
return error::kOutOfBounds;
}
volatile GLuint* renderbuffers = GetImmediateDataAs<volatile GLuint*>(
@@ -1204,7 +1204,7 @@ error::Error GLES2DecoderImpl::HandleGenSamplersImmediate(
*static_cast<const volatile gles2::cmds::GenSamplersImmediate*>(cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t samplers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &samplers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&samplers_size)) {
return error::kOutOfBounds;
}
volatile GLuint* samplers = GetImmediateDataAs<volatile GLuint*>(
@@ -1229,7 +1229,7 @@ error::Error GLES2DecoderImpl::HandleGenTexturesImmediate(
*static_cast<const volatile gles2::cmds::GenTexturesImmediate*>(cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t textures_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &textures_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&textures_size)) {
return error::kOutOfBounds;
}
volatile GLuint* textures = GetImmediateDataAs<volatile GLuint*>(
@@ -1257,7 +1257,7 @@ error::Error GLES2DecoderImpl::HandleGenTransformFeedbacksImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t ids_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &ids_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&ids_size)) {
return error::kOutOfBounds;
}
volatile GLuint* ids =
@@ -2736,6 +2736,37 @@ error::Error GLES2DecoderImpl::HandleShaderSourceBucket(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleMultiDrawBeginCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawBeginCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::MultiDrawBeginCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw) {
+ return error::kUnknownCommand;
+ }
+
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+ if (drawcount < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glMultiDrawBeginCHROMIUM",
+ "drawcount < 0");
+ return error::kNoError;
+ }
+ DoMultiDrawBeginCHROMIUM(drawcount);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleMultiDrawEndCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!features().webgl_multi_draw) {
+ return error::kUnknownCommand;
+ }
+
+ DoMultiDrawEndCHROMIUM();
+ return error::kNoError;
+}
+
error::Error GLES2DecoderImpl::HandleStencilFunc(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -4417,7 +4448,7 @@ error::Error GLES2DecoderImpl::HandleGenQueriesEXTImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t queries_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &queries_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&queries_size)) {
return error::kOutOfBounds;
}
volatile GLuint* queries = GetImmediateDataAs<volatile GLuint*>(
@@ -4443,7 +4474,7 @@ error::Error GLES2DecoderImpl::HandleDeleteQueriesEXTImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t queries_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &queries_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&queries_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* queries = GetImmediateDataAs<volatile const GLuint*>(
@@ -4535,7 +4566,7 @@ error::Error GLES2DecoderImpl::HandleGenVertexArraysOESImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t arrays_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &arrays_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&arrays_size)) {
return error::kOutOfBounds;
}
volatile GLuint* arrays =
@@ -4561,7 +4592,7 @@ error::Error GLES2DecoderImpl::HandleDeleteVertexArraysOESImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t arrays_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &arrays_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&arrays_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* arrays = GetImmediateDataAs<volatile const GLuint*>(
@@ -5090,6 +5121,44 @@ error::Error GLES2DecoderImpl::HandleFlushDriverCachesCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleScheduleDCLayerCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ScheduleDCLayerCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::ScheduleDCLayerCHROMIUM*>(
+ cmd_data);
+ GLuint y_texture_id = static_cast<GLuint>(c.y_texture_id);
+ GLuint uv_texture_id = static_cast<GLuint>(c.uv_texture_id);
+ GLint z_order = static_cast<GLint>(c.z_order);
+ GLint content_x = static_cast<GLint>(c.content_x);
+ GLint content_y = static_cast<GLint>(c.content_y);
+ GLint content_width = static_cast<GLint>(c.content_width);
+ GLint content_height = static_cast<GLint>(c.content_height);
+ GLint quad_x = static_cast<GLint>(c.quad_x);
+ GLint quad_y = static_cast<GLint>(c.quad_y);
+ GLint quad_width = static_cast<GLint>(c.quad_width);
+ GLint quad_height = static_cast<GLint>(c.quad_height);
+ GLfloat transform_c1r1 = static_cast<GLfloat>(c.transform_c1r1);
+ GLfloat transform_c2r1 = static_cast<GLfloat>(c.transform_c2r1);
+ GLfloat transform_c1r2 = static_cast<GLfloat>(c.transform_c1r2);
+ GLfloat transform_c2r2 = static_cast<GLfloat>(c.transform_c2r2);
+ GLfloat transform_tx = static_cast<GLfloat>(c.transform_tx);
+ GLfloat transform_ty = static_cast<GLfloat>(c.transform_ty);
+ GLboolean is_clipped = static_cast<GLboolean>(c.is_clipped);
+ GLint clip_x = static_cast<GLint>(c.clip_x);
+ GLint clip_y = static_cast<GLint>(c.clip_y);
+ GLint clip_width = static_cast<GLint>(c.clip_width);
+ GLint clip_height = static_cast<GLint>(c.clip_height);
+ GLuint protected_video_type = static_cast<GLuint>(c.protected_video_type);
+ DoScheduleDCLayerCHROMIUM(
+ y_texture_id, uv_texture_id, z_order, content_x, content_y, content_width,
+ content_height, quad_x, quad_y, quad_width, quad_height, transform_c1r1,
+ transform_c2r1, transform_c1r2, transform_c2r2, transform_tx,
+ transform_ty, is_clipped, clip_x, clip_y, clip_width, clip_height,
+ protected_video_type);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderImpl::HandleMatrixLoadfCHROMIUMImmediate(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -5484,7 +5553,6 @@ GLES2DecoderImpl::HandleCreateAndTexStorage2DSharedImageINTERNALImmediate(
CreateAndTexStorage2DSharedImageINTERNALImmediate*>(
cmd_data);
GLuint texture = static_cast<GLuint>(c.texture);
- GLenum internalFormat = static_cast<GLenum>(c.internalFormat);
uint32_t mailbox_size;
if (!GLES2Util::ComputeDataSize<GLbyte, 16>(1, &mailbox_size)) {
return error::kOutOfBounds;
@@ -5494,16 +5562,10 @@ GLES2DecoderImpl::HandleCreateAndTexStorage2DSharedImageINTERNALImmediate(
}
volatile const GLbyte* mailbox = GetImmediateDataAs<volatile const GLbyte*>(
c, mailbox_size, immediate_data_size);
- if (!validators_->texture_internal_format.IsValid(internalFormat)) {
- LOCAL_SET_GL_ERROR_INVALID_ENUM(
- "glCreateAndTexStorage2DSharedImageINTERNAL", internalFormat,
- "internalFormat");
- return error::kNoError;
- }
if (mailbox == nullptr) {
return error::kOutOfBounds;
}
- DoCreateAndTexStorage2DSharedImageINTERNAL(texture, internalFormat, mailbox);
+ DoCreateAndTexStorage2DSharedImageINTERNAL(texture, mailbox);
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
index 563e3ff8aa0..334ee910b73 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
@@ -87,7 +87,7 @@ class MockGLES2Decoder : public GLES2Decoder {
MOCK_CONST_METHOD0(RestoreGlobalState, void());
MOCK_CONST_METHOD0(RestoreProgramBindings, void());
MOCK_METHOD0(RestoreRenderbufferBindings, void());
- MOCK_CONST_METHOD1(RestoreTextureState, void(unsigned service_id));
+ MOCK_METHOD1(RestoreTextureState, void(unsigned service_id));
MOCK_CONST_METHOD1(RestoreTextureUnitBindings, void(unsigned unit));
MOCK_METHOD1(RestoreVertexAttribArray, void(unsigned index));
MOCK_CONST_METHOD0(RestoreDeviceWindowRectangles, void());
@@ -101,8 +101,6 @@ class MockGLES2Decoder : public GLES2Decoder {
GetTransformFeedbackManager, gpu::gles2::TransformFeedbackManager*());
MOCK_METHOD0(GetVertexArrayManager, gpu::gles2::VertexArrayManager*());
MOCK_METHOD0(GetImageManagerForTest, gpu::gles2::ImageManager*());
- MOCK_METHOD1(
- SetResizeCallback, void(const base::Callback<void(gfx::Size, float)>&));
MOCK_METHOD1(SetIgnoreCachedStateForTest, void(bool ignore));
MOCK_METHOD1(SetForceShaderNameHashingForTest, void(bool force));
MOCK_METHOD1(SetAllowExit, void(bool allow));
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index b363a2f5849..f2b5153e3a3 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -8,6 +8,7 @@
#include <utility>
#include "base/callback.h"
+#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/decoder_client.h"
@@ -15,6 +16,7 @@
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/command_buffer/service/gpu_fence_manager.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "gpu/command_buffer/service/multi_draw_manager.h"
#include "gpu/command_buffer/service/passthrough_discardable_manager.h"
#include "gpu/command_buffer/service/program_cache.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
@@ -216,15 +218,28 @@ void PassthroughResources::Destroy(gl::GLApi* api) {
DestroyPendingTextures(have_context);
}
-ScopedFramebufferBindingReset::ScopedFramebufferBindingReset(gl::GLApi* api)
- : api_(api), draw_framebuffer_(0), read_framebuffer_(0) {
- api_->glGetIntegervFn(GL_DRAW_FRAMEBUFFER_BINDING, &draw_framebuffer_);
- api_->glGetIntegervFn(GL_READ_FRAMEBUFFER_BINDING, &read_framebuffer_);
+ScopedFramebufferBindingReset::ScopedFramebufferBindingReset(
+ gl::GLApi* api,
+ bool supports_separate_fbo_bindings)
+ : api_(api),
+ supports_separate_fbo_bindings_(supports_separate_fbo_bindings),
+ draw_framebuffer_(0),
+ read_framebuffer_(0) {
+ if (supports_separate_fbo_bindings_) {
+ api_->glGetIntegervFn(GL_DRAW_FRAMEBUFFER_BINDING, &draw_framebuffer_);
+ api_->glGetIntegervFn(GL_READ_FRAMEBUFFER_BINDING, &read_framebuffer_);
+ } else {
+ api_->glGetIntegervFn(GL_FRAMEBUFFER_BINDING, &draw_framebuffer_);
+ }
}
ScopedFramebufferBindingReset::~ScopedFramebufferBindingReset() {
- api_->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER, draw_framebuffer_);
- api_->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, read_framebuffer_);
+ if (supports_separate_fbo_bindings_) {
+ api_->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER, draw_framebuffer_);
+ api_->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, read_framebuffer_);
+ } else {
+ api_->glBindFramebufferEXTFn(GL_FRAMEBUFFER, draw_framebuffer_);
+ }
}
ScopedRenderbufferBindingReset::ScopedRenderbufferBindingReset(gl::GLApi* api)
@@ -342,9 +357,13 @@ GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::
EmulatedDefaultFramebuffer(
gl::GLApi* api,
const EmulatedDefaultFramebufferFormat& format_in,
- const FeatureInfo* feature_info)
- : api(api), format(format_in) {
- ScopedFramebufferBindingReset scoped_fbo_reset(api);
+ const FeatureInfo* feature_info,
+ bool supports_separate_fbo_bindings_in)
+ : api(api),
+ supports_separate_fbo_bindings(supports_separate_fbo_bindings_in),
+ format(format_in) {
+ ScopedFramebufferBindingReset scoped_fbo_reset(
+ api, supports_separate_fbo_bindings);
ScopedRenderbufferBindingReset scoped_renderbuffer_reset(api);
api->glGenFramebuffersEXTFn(1, &framebuffer_service_id);
@@ -413,7 +432,8 @@ GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::SetColorBuffer(
color_texture = std::move(new_color_buffer);
// Bind the new texture to this FBO
- ScopedFramebufferBindingReset scoped_fbo_reset(api);
+ ScopedFramebufferBindingReset scoped_fbo_reset(
+ api, supports_separate_fbo_bindings);
api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, framebuffer_service_id);
api->glFramebufferTexture2DEXTFn(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
@@ -427,7 +447,8 @@ void GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::Blit(
DCHECK(target != nullptr);
DCHECK(target->size == size);
- ScopedFramebufferBindingReset scoped_fbo_reset(api);
+ ScopedFramebufferBindingReset scoped_fbo_reset(
+ api, supports_separate_fbo_bindings);
api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, framebuffer_service_id);
@@ -476,7 +497,8 @@ bool GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::Resize(
// Check that the framebuffer is complete
{
- ScopedFramebufferBindingReset scoped_fbo_reset(api);
+ ScopedFramebufferBindingReset scoped_fbo_reset(
+ api, supports_separate_fbo_bindings);
api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, framebuffer_service_id);
if (api->glCheckFramebufferStatusEXTFn(GL_FRAMEBUFFER) !=
GL_FRAMEBUFFER_COMPLETE) {
@@ -527,7 +549,8 @@ GLES2DecoderPassthroughImpl::GLES2DecoderPassthroughImpl(
logger_(&debug_marker_manager_,
base::BindRepeating(&DecoderClient::OnConsoleMessage,
base::Unretained(client_),
- 0)),
+ 0),
+ group->gpu_preferences().disable_gl_error_limit),
surface_(),
context_(),
offscreen_(false),
@@ -607,7 +630,7 @@ GLES2Decoder::Error GLES2DecoderPassthroughImpl::DoCommandsImpl(
const unsigned int arg_count = size - 1;
unsigned int command_index = command - kFirstGLES2Command;
- if (command_index < arraysize(command_info)) {
+ if (command_index < base::size(command_info)) {
const CommandInfo& info = command_info[command_index];
unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
@@ -678,11 +701,21 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
surface_ = surface;
offscreen_ = offscreen;
+ // For WebGL contexts, log GL errors so they appear in devtools. Otherwise
+ // only enable debug logging if requested.
+ bool log_non_errors =
+ group_->gpu_preferences().enable_gpu_driver_debug_logging;
+ InitializeGLDebugLogging(log_non_errors, PassthroughGLDebugMessageCallback,
+ this);
+
// Create GPU Tracer for timing values.
gpu_tracer_.reset(new GPUTracer(this));
gpu_fence_manager_.reset(new GpuFenceManager());
+ multi_draw_manager_.reset(
+ new MultiDrawManager(MultiDrawManager::IndexStorageType::Pointer));
+
auto result =
group_->Initialize(this, attrib_helper.context_type, disallowed_features);
if (result != gpu::ContextResult::kSuccess) {
@@ -706,7 +739,7 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
};
RequestExtensions(api(), requestable_extensions,
kRequiredFunctionalityExtensions,
- arraysize(kRequiredFunctionalityExtensions));
+ base::size(kRequiredFunctionalityExtensions));
if (request_optional_extensions_) {
static constexpr const char* kOptionalFunctionalityExtensions[] = {
@@ -753,7 +786,7 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
};
RequestExtensions(api(), requestable_extensions,
kOptionalFunctionalityExtensions,
- arraysize(kOptionalFunctionalityExtensions));
+ base::size(kOptionalFunctionalityExtensions));
}
context->ReinitializeDynamicBindings();
@@ -846,13 +879,6 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
bound_buffers_[GL_DISPATCH_INDIRECT_BUFFER] = 0;
}
- // For WebGL contexts, log GL errors so they appear in devtools. Otherwise
- // only enable debug logging if requested.
- bool log_non_errors =
- group_->gpu_preferences().enable_gpu_driver_debug_logging;
- InitializeGLDebugLogging(log_non_errors, PassthroughGLDebugMessageCallback,
- this);
-
if (feature_info_->feature_flags().chromium_texture_filtering_hint &&
feature_info_->feature_flags().is_swiftshader) {
api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
@@ -920,7 +946,8 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
CheckErrorCallbackState();
emulated_back_buffer_ = std::make_unique<EmulatedDefaultFramebuffer>(
- api(), emulated_default_framebuffer_format_, feature_info_.get());
+ api(), emulated_default_framebuffer_format_, feature_info_.get(),
+ supports_separate_fbo_bindings_);
// Make sure to use a non-empty offscreen surface so that the framebuffer is
// complete.
gfx::Size initial_size(
@@ -1051,6 +1078,10 @@ void GLES2DecoderPassthroughImpl::Destroy(bool have_context) {
gpu_tracer_.reset();
}
+ if (multi_draw_manager_.get()) {
+ multi_draw_manager_.reset();
+ }
+
if (!have_context) {
for (auto& fence : deschedule_until_finished_fences_) {
fence->Invalidate();
@@ -1063,10 +1094,6 @@ void GLES2DecoderPassthroughImpl::Destroy(bool have_context) {
surface_ = nullptr;
if (group_) {
- if (group_->has_program_cache()) {
- group_->get_program_cache()->ResetCacheProgramCallback();
- }
-
group_->Destroy(this, have_context);
group_ = nullptr;
}
@@ -1231,13 +1258,6 @@ bool GLES2DecoderPassthroughImpl::MakeCurrent() {
return false;
}
- // Establish the program binary caching callback.
- if (group_->has_program_cache()) {
- auto program_callback = base::BindRepeating(&DecoderClient::CacheShader,
- base::Unretained(client_));
- group_->get_program_cache()->SetCacheProgramCallback(program_callback);
- }
-
ProcessReadPixels(false);
ProcessQueries(false);
@@ -1380,8 +1400,7 @@ void GLES2DecoderPassthroughImpl::RestoreGlobalState() const {}
void GLES2DecoderPassthroughImpl::RestoreProgramBindings() const {}
-void GLES2DecoderPassthroughImpl::RestoreTextureState(
- unsigned service_id) const {}
+void GLES2DecoderPassthroughImpl::RestoreTextureState(unsigned service_id) {}
void GLES2DecoderPassthroughImpl::RestoreTextureUnitBindings(
unsigned unit) const {}
@@ -1944,9 +1963,11 @@ GLES2DecoderPassthroughImpl::PatchGetFramebufferAttachmentParameter(
}
void GLES2DecoderPassthroughImpl::InsertError(GLenum error,
- const std::string&) {
- // Message ignored for now
+ const std::string& message) {
errors_.insert(error);
+ LogGLDebugMessage(GL_DEBUG_SOURCE_API, GL_DEBUG_TYPE_ERROR, error,
+ GL_DEBUG_SEVERITY_HIGH, message.length(), message.c_str(),
+ GetLogger());
}
GLenum GLES2DecoderPassthroughImpl::PopError() {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index 0a36589dc65..ed682013976 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -46,6 +46,7 @@ namespace gles2 {
class ContextGroup;
class GPUTracer;
+class MultiDrawManager;
class PassthroughAbstractTextureImpl;
struct MappedBuffer {
@@ -110,11 +111,13 @@ struct PassthroughResources {
class ScopedFramebufferBindingReset {
public:
- explicit ScopedFramebufferBindingReset(gl::GLApi* api);
+ explicit ScopedFramebufferBindingReset(gl::GLApi* api,
+ bool supports_separate_fbo_bindings);
~ScopedFramebufferBindingReset();
private:
gl::GLApi* api_;
+ bool supports_separate_fbo_bindings_;
GLint draw_framebuffer_;
GLint read_framebuffer_;
};
@@ -217,7 +220,7 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
void RestoreRenderbufferBindings() override;
void RestoreGlobalState() const override;
void RestoreProgramBindings() const override;
- void RestoreTextureState(unsigned service_id) const override;
+ void RestoreTextureState(unsigned service_id) override;
void RestoreTextureUnitBindings(unsigned unit) const override;
void RestoreVertexAttribArray(unsigned index) override;
void RestoreAllExternalTextureBindingsIfNeeded() override;
@@ -575,6 +578,8 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
std::unique_ptr<GpuFenceManager> gpu_fence_manager_;
+ std::unique_ptr<MultiDrawManager> multi_draw_manager_;
+
// State tracking of currently bound 2D textures (client IDs)
size_t active_texture_unit_;
@@ -764,7 +769,8 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
EmulatedDefaultFramebuffer(
gl::GLApi* api,
const EmulatedDefaultFramebufferFormat& format_in,
- const FeatureInfo* feature_info);
+ const FeatureInfo* feature_info,
+ bool supports_separate_fbo_bindings);
~EmulatedDefaultFramebuffer();
// Set a new color buffer, return the old one
@@ -778,6 +784,7 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
void Destroy(bool have_context);
gl::GLApi* api;
+ bool supports_separate_fbo_bindings = false;
// Service ID of the framebuffer
GLuint framebuffer_service_id = 0;
@@ -820,6 +827,9 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
GLuint bound_draw_framebuffer_;
GLuint bound_read_framebuffer_;
+ // If this context supports both read and draw framebuffer bindings
+ bool supports_separate_fbo_bindings_ = false;
+
// Tracing
std::unique_ptr<GPUTracer> gpu_tracer_;
const unsigned char* gpu_decoder_category_ = nullptr;
@@ -836,8 +846,6 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
// Cache of scratch memory
std::vector<uint8_t> scratch_memory_;
- std::unique_ptr<DCLayerSharedState> dc_layer_shared_state_;
-
// After a second fence is inserted, both the GpuChannelMessageQueue and
// CommandExecutor are descheduled. Once the first fence has completed, both
// get rescheduled.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index 3b8830c9285..046f7918b8a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -430,6 +430,8 @@ error::Error DoLineWidth(GLfloat width);
error::Error DoLinkProgram(GLuint program);
error::Error DoMemoryBarrierEXT(GLbitfield barriers);
error::Error DoMemoryBarrierByRegion(GLbitfield barriers);
+error::Error DoMultiDrawBeginCHROMIUM(GLsizei drawcount);
+error::Error DoMultiDrawEndCHROMIUM();
error::Error DoPauseTransformFeedback();
error::Error DoPixelStorei(GLenum pname, GLint param);
error::Error DoPolygonOffset(GLfloat factor, GLfloat units);
@@ -874,20 +876,29 @@ error::Error DoScheduleCALayerCHROMIUM(GLuint contents_texture_id,
error::Error DoScheduleCALayerInUseQueryCHROMIUM(
GLuint n,
const volatile GLuint* textures);
-error::Error DoScheduleDCLayerSharedStateCHROMIUM(GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint z_order,
- const GLfloat* transform);
-error::Error DoScheduleDCLayerCHROMIUM(
- GLsizei num_textures,
- const volatile GLuint* contents_texture_ids,
- const GLfloat* contents_rect,
- GLuint background_color,
- GLuint edge_aa_mask,
- GLenum filter,
- const GLfloat* bounds_rect,
- GLuint protected_video_type);
+error::Error DoScheduleDCLayerCHROMIUM(GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
+ GLuint protected_video_type);
error::Error DoCommitOverlayPlanesCHROMIUM(uint64_t swap_id, GLbitfield flags);
error::Error DoSetColorSpaceMetadataCHROMIUM(GLuint texture_id,
gfx::ColorSpace color_space);
@@ -1073,7 +1084,6 @@ error::Error DoUnlockDiscardableTextureCHROMIUM(GLuint texture_id);
error::Error DoLockDiscardableTextureCHROMIUM(GLuint texture_id);
error::Error DoCreateAndTexStorage2DSharedImageINTERNAL(
GLuint client_id,
- GLenum internal_format,
const volatile GLbyte* mailbox);
error::Error DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id,
GLenum mode);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index b4edd5dfab8..fc5aca5b2bb 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -10,6 +10,7 @@
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/gpu_fence_manager.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "gpu/command_buffer/service/multi_draw_manager.h"
#include "gpu/command_buffer/service/passthrough_discardable_manager.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
@@ -1218,7 +1219,7 @@ error::Error GLES2DecoderPassthroughImpl::DoFlushMappedBufferRange(
base::CheckedNumeric<size_t> range_start(offset);
base::CheckedNumeric<size_t> range_end = offset + size;
- if (!range_end.IsValid() && range_end.ValueOrDefault(0) > map_info.size) {
+ if (!range_end.IsValid() || range_end.ValueOrDefault(0) > map_info.size) {
InsertError(GL_INVALID_OPERATION,
"Flush range is not within the original mapping size.");
return error::kNoError;
@@ -2165,6 +2166,45 @@ error::Error GLES2DecoderPassthroughImpl::DoMemoryBarrierByRegion(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoMultiDrawBeginCHROMIUM(
+ GLsizei drawcount) {
+ if (!multi_draw_manager_->Begin(drawcount)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoMultiDrawEndCHROMIUM() {
+ MultiDrawManager::ResultData result;
+ if (!multi_draw_manager_->End(&result)) {
+ return error::kInvalidArguments;
+ }
+ switch (result.draw_function) {
+ case MultiDrawManager::DrawFunction::DrawArrays:
+ api()->glMultiDrawArraysANGLEFn(result.mode, result.firsts.data(),
+ result.counts.data(), result.drawcount);
+ return error::kNoError;
+ case MultiDrawManager::DrawFunction::DrawArraysInstanced:
+ api()->glMultiDrawArraysInstancedANGLEFn(
+ result.mode, result.firsts.data(), result.counts.data(),
+ result.instance_counts.data(), result.drawcount);
+ return error::kNoError;
+ case MultiDrawManager::DrawFunction::DrawElements:
+ api()->glMultiDrawElementsANGLEFn(result.mode, result.counts.data(),
+ result.type, result.indices.data(),
+ result.drawcount);
+ return error::kNoError;
+ case MultiDrawManager::DrawFunction::DrawElementsInstanced:
+ api()->glMultiDrawElementsInstancedANGLEFn(
+ result.mode, result.counts.data(), result.type, result.indices.data(),
+ result.instance_counts.data(), result.drawcount);
+ return error::kNoError;
+ default:
+ NOTREACHED();
+ return error::kLostContext;
+ }
+}
+
error::Error GLES2DecoderPassthroughImpl::DoPauseTransformFeedback() {
api()->glPauseTransformFeedbackFn();
return error::kNoError;
@@ -3391,8 +3431,6 @@ error::Error GLES2DecoderPassthroughImpl::DoBindVertexArrayOES(GLuint array) {
error::Error GLES2DecoderPassthroughImpl::DoSwapBuffers(uint64_t swap_id,
GLbitfield flags) {
- dc_layer_shared_state_.reset();
-
if (offscreen_) {
if (offscreen_single_buffer_) {
return error::kNoError;
@@ -3637,9 +3675,9 @@ error::Error GLES2DecoderPassthroughImpl::DoRequestExtensionCHROMIUM(
// Make sure newly enabled extensions are exposed and usable.
context_->ReinitializeDynamicBindings();
- feature_info_->Initialize(feature_info_->context_type(),
- true /* is_passthrough_cmd_decoder */,
- feature_info_->disallowed_features());
+ feature_info_->Initialize(
+ feature_info_->context_type(), true /* is_passthrough_cmd_decoder */,
+ feature_info_->disallowed_features(), true /* force_reinitialize */);
return error::kNoError;
}
@@ -4018,8 +4056,6 @@ error::Error GLES2DecoderPassthroughImpl::DoSwapBuffersWithBoundsCHROMIUM(
return error::kNoError;
}
- dc_layer_shared_state_.reset();
-
std::vector<gfx::Rect> bounds(count);
for (GLsizei i = 0; i < count; ++i) {
bounds[i] = gfx::Rect(rects[i * 4 + 0], rects[i * 4 + 1], rects[i * 4 + 2],
@@ -4045,8 +4081,6 @@ error::Error GLES2DecoderPassthroughImpl::DoPostSubBufferCHROMIUM(
return error::kNoError;
}
- dc_layer_shared_state_.reset();
-
client_->OnSwapBuffers(swap_id, flags);
return CheckSwapBuffersResult(
surface_->PostSubBuffer(x, y, width, height, base::DoNothing()),
@@ -4314,15 +4348,6 @@ error::Error GLES2DecoderPassthroughImpl::DoInsertFenceSyncCHROMIUM(
return error::kNoError;
}
-error::Error GLES2DecoderPassthroughImpl::DoWaitSyncTokenCHROMIUM(
- CommandBufferNamespace namespace_id,
- CommandBufferId command_buffer_id,
- GLuint64 release_count) {
- SyncToken sync_token(namespace_id, command_buffer_id, release_count);
- return client_->OnWaitSyncToken(sync_token) ? error::kDeferCommandUntilLater
- : error::kNoError;
-}
-
error::Error GLES2DecoderPassthroughImpl::DoDrawBuffersEXT(
GLsizei count,
const volatile GLenum* bufs) {
@@ -4385,104 +4410,78 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerInUseQueryCHROMIUM(
return error::kNoError;
}
-error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerSharedStateCHROMIUM(
- GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint z_order,
- const GLfloat* transform) {
- if (!dc_layer_shared_state_) {
- dc_layer_shared_state_.reset(new DCLayerSharedState);
- }
- dc_layer_shared_state_->opacity = opacity;
- dc_layer_shared_state_->is_clipped = is_clipped ? true : false;
- dc_layer_shared_state_->clip_rect = gfx::ToEnclosingRect(
- gfx::RectF(clip_rect[0], clip_rect[1], clip_rect[2], clip_rect[3]));
- dc_layer_shared_state_->z_order = z_order;
- dc_layer_shared_state_->transform =
- gfx::Transform(transform[0], transform[4], transform[8], transform[12],
- transform[1], transform[5], transform[9], transform[13],
- transform[2], transform[6], transform[10], transform[14],
- transform[3], transform[7], transform[11], transform[15]);
- return error::kNoError;
-}
-
error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerCHROMIUM(
- GLsizei num_textures,
- const volatile GLuint* contents_texture_ids,
- const GLfloat* contents_rect,
- GLuint background_color,
- GLuint edge_aa_mask,
- GLenum filter,
- const GLfloat* bounds_rect,
+ GLuint y_texture_id,
+ GLuint uv_texture_id,
+ GLint z_order,
+ GLint content_x,
+ GLint content_y,
+ GLint content_width,
+ GLint content_height,
+ GLint quad_x,
+ GLint quad_y,
+ GLint quad_width,
+ GLint quad_height,
+ GLfloat transform_c1r1,
+ GLfloat transform_c2r1,
+ GLfloat transform_c1r2,
+ GLfloat transform_c2r2,
+ GLfloat transform_tx,
+ GLfloat transform_ty,
+ GLboolean is_clipped,
+ GLint clip_x,
+ GLint clip_y,
+ GLint clip_width,
+ GLint clip_height,
GLuint protected_video_type) {
- switch (filter) {
- case GL_NEAREST:
- case GL_LINEAR:
- break;
- default:
- InsertError(GL_INVALID_OPERATION, "invalid filter.");
- return error::kNoError;
- }
-
- if (!dc_layer_shared_state_) {
- InsertError(GL_INVALID_OPERATION,
- "glScheduleDCLayerSharedStateCHROMIUM has not been called.");
- return error::kNoError;
- }
-
- if (num_textures < 0 || num_textures > 4) {
- InsertError(GL_INVALID_OPERATION,
- "number of textures greater than maximum of 4.");
+ if (protected_video_type >
+ static_cast<GLuint>(ui::ProtectedVideoType::kMaxValue)) {
+ InsertError(GL_INVALID_VALUE, "invalid protected video type");
return error::kNoError;
}
- gfx::RectF contents_rect_object(contents_rect[0], contents_rect[1],
- contents_rect[2], contents_rect[3]);
- gfx::RectF bounds_rect_object(bounds_rect[0], bounds_rect[1], bounds_rect[2],
- bounds_rect[3]);
-
- std::vector<scoped_refptr<gl::GLImage>> images(num_textures);
- for (int i = 0; i < num_textures; ++i) {
- GLuint contents_texture_client_id = contents_texture_ids[i];
- if (contents_texture_client_id != 0) {
- scoped_refptr<TexturePassthrough> passthrough_texture = nullptr;
- if (!resources_->texture_object_map.GetServiceID(
- contents_texture_client_id, &passthrough_texture)) {
- InsertError(GL_INVALID_VALUE, "unknown texture.");
- return error::kNoError;
- }
- DCHECK(passthrough_texture != nullptr);
-
- scoped_refptr<gl::GLImage> image =
- passthrough_texture->GetLevelImage(passthrough_texture->target(), 0);
- if (image == nullptr) {
- InsertError(GL_INVALID_VALUE, "unsupported texture format");
- return error::kNoError;
- }
- images[i] = image;
+ GLuint texture_ids[] = {y_texture_id, uv_texture_id};
+ scoped_refptr<gl::GLImage> images[2];
+ size_t i = 0;
+ for (GLuint texture_id : texture_ids) {
+ if (!texture_id) {
+ InsertError(GL_INVALID_VALUE, "invalid texture");
+ return error::kNoError;
}
- }
- if (protected_video_type >
- static_cast<GLuint>(ui::ProtectedVideoType::kMaxValue)) {
- InsertError(GL_INVALID_VALUE, "unknown protected video type.");
- return error::kNoError;
- }
- ui::ProtectedVideoType protected_video_type_param =
+ scoped_refptr<TexturePassthrough> passthrough_texture;
+ if (!resources_->texture_object_map.GetServiceID(texture_id,
+ &passthrough_texture)) {
+ InsertError(GL_INVALID_VALUE, "unknown texture");
+ return error::kNoError;
+ }
+ DCHECK(passthrough_texture);
+ gl::GLImage* image =
+ passthrough_texture->GetLevelImage(passthrough_texture->target(), 0);
+ if (!image) {
+ InsertError(GL_INVALID_VALUE, "unsupported texture format");
+ return error::kNoError;
+ }
+ images[i++] = scoped_refptr<gl::GLImage>(image);
+ }
+
+ ui::DCRendererLayerParams params;
+ params.y_image = std::move(images[0]);
+ params.uv_image = std::move(images[1]);
+ params.z_order = z_order;
+ params.content_rect =
+ gfx::Rect(content_x, content_y, content_width, content_height);
+ params.quad_rect = gfx::Rect(quad_x, quad_y, quad_width, quad_height);
+ params.transform =
+ gfx::Transform(transform_c1r1, transform_c2r1, transform_c1r2,
+ transform_c2r2, transform_tx, transform_ty);
+ params.is_clipped = is_clipped;
+ params.clip_rect = gfx::Rect(clip_x, clip_y, clip_width, clip_height);
+ params.protected_video_type =
static_cast<ui::ProtectedVideoType>(protected_video_type);
- ui::DCRendererLayerParams params(
- dc_layer_shared_state_->is_clipped, dc_layer_shared_state_->clip_rect,
- dc_layer_shared_state_->z_order, dc_layer_shared_state_->transform,
- images, contents_rect_object, gfx::ToEnclosingRect(bounds_rect_object),
- background_color, edge_aa_mask, dc_layer_shared_state_->opacity, filter,
- protected_video_type_param);
-
- if (!surface_->ScheduleDCLayer(params)) {
+ if (!surface_->ScheduleDCLayer(params))
InsertError(GL_INVALID_OPERATION, "failed to schedule DCLayer");
- return error::kNoError;
- }
return error::kNoError;
}
@@ -4496,8 +4495,6 @@ error::Error GLES2DecoderPassthroughImpl::DoCommitOverlayPlanesCHROMIUM(
return error::kNoError;
}
- dc_layer_shared_state_.reset();
-
client_->OnSwapBuffers(swap_id, flags);
return CheckSwapBuffersResult(
surface_->CommitOverlayPlanes(base::DoNothing()), "CommitOverlayPlanes");
@@ -5054,7 +5051,6 @@ error::Error GLES2DecoderPassthroughImpl::DoUnlockDiscardableTextureCHROMIUM(
error::Error
GLES2DecoderPassthroughImpl::DoCreateAndTexStorage2DSharedImageINTERNAL(
GLuint texture_client_id,
- GLenum internal_format,
const volatile GLbyte* mailbox) {
if (!texture_client_id ||
resources_->texture_id_map.HasClientID(texture_client_id)) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
index a170b70e905..7d7e982db81 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
@@ -5,6 +5,7 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h"
#include "gpu/command_buffer/common/discardable_handle.h"
+#include "gpu/command_buffer/service/multi_draw_manager.h"
#include "ui/gfx/ipc/color/gfx_param_traits.h"
namespace gpu {
@@ -957,7 +958,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleShaderBinary(
uint32_t binary_shm_offset = c.binary_shm_offset;
uint32_t data_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&data_size)) {
return error::kOutOfBounds;
}
const GLuint* shaders = GetSharedMemoryAs<const GLuint*>(
@@ -1615,6 +1616,181 @@ error::Error GLES2DecoderPassthroughImpl::HandleDrawElementsInstancedANGLE(
return DoDrawElementsInstancedANGLE(mode, count, type, indices, primcount);
}
+error::Error GLES2DecoderPassthroughImpl::HandleMultiDrawArraysCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawArraysCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::MultiDrawArraysCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum mode = static_cast<GLenum>(c.mode);
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+
+ uint32_t firsts_size, counts_size;
+ base::CheckedNumeric<uint32_t> checked_size(drawcount);
+ if (!(checked_size * sizeof(GLint)).AssignIfValid(&firsts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLint* firsts = GetSharedMemoryAs<const GLint*>(
+ c.firsts_shm_id, c.firsts_shm_offset, firsts_size);
+ const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>(
+ c.counts_shm_id, c.counts_shm_offset, counts_size);
+ if (firsts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (!multi_draw_manager_->MultiDrawArrays(mode, firsts, counts, drawcount)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error
+GLES2DecoderPassthroughImpl::HandleMultiDrawArraysInstancedCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawArraysInstancedCHROMIUM& c =
+ *static_cast<
+ const volatile gles2::cmds::MultiDrawArraysInstancedCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw_instanced) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum mode = static_cast<GLenum>(c.mode);
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+
+ uint32_t firsts_size, counts_size, instance_counts_size;
+ base::CheckedNumeric<uint32_t> checked_size(drawcount);
+ if (!(checked_size * sizeof(GLint)).AssignIfValid(&firsts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&instance_counts_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLint* firsts = GetSharedMemoryAs<const GLint*>(
+ c.firsts_shm_id, c.firsts_shm_offset, firsts_size);
+ const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>(
+ c.counts_shm_id, c.counts_shm_offset, counts_size);
+ const GLsizei* instance_counts = GetSharedMemoryAs<const GLsizei*>(
+ c.instance_counts_shm_id, c.instance_counts_shm_offset,
+ instance_counts_size);
+ if (firsts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (instance_counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (!multi_draw_manager_->MultiDrawArraysInstanced(
+ mode, firsts, counts, instance_counts, drawcount)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleMultiDrawElementsCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawElementsCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::MultiDrawElementsCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum mode = static_cast<GLenum>(c.mode);
+ GLenum type = static_cast<GLenum>(c.type);
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+
+ uint32_t counts_size, offsets_size;
+ base::CheckedNumeric<uint32_t> checked_size(drawcount);
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&offsets_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>(
+ c.counts_shm_id, c.counts_shm_offset, counts_size);
+ const GLsizei* offsets = GetSharedMemoryAs<const GLsizei*>(
+ c.offsets_shm_id, c.offsets_shm_offset, offsets_size);
+ if (counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (offsets == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (!multi_draw_manager_->MultiDrawElements(mode, counts, type, offsets,
+ drawcount)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error
+GLES2DecoderPassthroughImpl::HandleMultiDrawElementsInstancedCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawElementsInstancedCHROMIUM& c =
+ *static_cast<
+ const volatile gles2::cmds::MultiDrawElementsInstancedCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw_instanced) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum mode = static_cast<GLenum>(c.mode);
+ GLenum type = static_cast<GLenum>(c.type);
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+
+ uint32_t counts_size, offsets_size, instance_counts_size;
+ base::CheckedNumeric<uint32_t> checked_size(drawcount);
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&offsets_size)) {
+ return error::kOutOfBounds;
+ }
+ if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&instance_counts_size)) {
+ return error::kOutOfBounds;
+ }
+ const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>(
+ c.counts_shm_id, c.counts_shm_offset, counts_size);
+ const GLsizei* offsets = GetSharedMemoryAs<const GLsizei*>(
+ c.offsets_shm_id, c.offsets_shm_offset, offsets_size);
+ const GLsizei* instance_counts = GetSharedMemoryAs<const GLsizei*>(
+ c.instance_counts_shm_id, c.instance_counts_shm_offset,
+ instance_counts_size);
+ if (counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (offsets == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (instance_counts == nullptr) {
+ return error::kOutOfBounds;
+ }
+ if (!multi_draw_manager_->MultiDrawElementsInstanced(
+ mode, counts, type, offsets, instance_counts, drawcount)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleVertexAttribDivisorANGLE(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -1697,31 +1873,6 @@ error::Error GLES2DecoderPassthroughImpl::HandleInsertFenceSyncCHROMIUM(
return DoInsertFenceSyncCHROMIUM(release_count);
}
-error::Error GLES2DecoderPassthroughImpl::HandleWaitSyncTokenCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::WaitSyncTokenCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::WaitSyncTokenCHROMIUM*>(
- cmd_data);
- CommandBufferNamespace namespace_id =
- static_cast<gpu::CommandBufferNamespace>(c.namespace_id);
- const uint64_t release_count = c.release_count();
- CommandBufferId command_buffer_id =
- CommandBufferId::FromUnsafeValue(c.command_buffer_id());
-
- const CommandBufferNamespace kMinNamespaceId =
- CommandBufferNamespace::INVALID;
- const CommandBufferNamespace kMaxNamespaceId =
- CommandBufferNamespace::NUM_COMMAND_BUFFER_NAMESPACES;
- if ((namespace_id < static_cast<int32_t>(kMinNamespaceId)) ||
- (namespace_id >= static_cast<int32_t>(kMaxNamespaceId))) {
- namespace_id = gpu::CommandBufferNamespace::INVALID;
- }
-
- return DoWaitSyncTokenCHROMIUM(namespace_id, command_buffer_id,
- release_count);
-}
-
error::Error GLES2DecoderPassthroughImpl::HandleDiscardBackbufferCHROMIUM(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -1801,65 +1952,6 @@ error::Error GLES2DecoderPassthroughImpl::HandleScheduleCALayerCHROMIUM(
background_color, edge_aa_mask, bounds_rect);
}
-error::Error
-GLES2DecoderPassthroughImpl::HandleScheduleDCLayerSharedStateCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::ScheduleDCLayerSharedStateCHROMIUM*>(
- cmd_data);
- GLfloat opacity = static_cast<GLfloat>(c.opacity);
- GLboolean is_clipped = static_cast<GLboolean>(c.is_clipped);
- GLint z_order = static_cast<GLint>(c.z_order);
- uint32_t shm_id = c.shm_id;
- uint32_t shm_offset = c.shm_offset;
-
- const GLfloat* mem = GetSharedMemoryAs<const GLfloat*>(shm_id, shm_offset,
- 20 * sizeof(GLfloat));
- if (!mem) {
- return error::kOutOfBounds;
- }
- const GLfloat* clip_rect = mem + 0;
- const GLfloat* transform = mem + 4;
- return DoScheduleDCLayerSharedStateCHROMIUM(opacity, is_clipped, clip_rect,
- z_order, transform);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandleScheduleDCLayerCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::ScheduleDCLayerCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::ScheduleDCLayerCHROMIUM*>(
- cmd_data);
- GLuint background_color = static_cast<GLuint>(c.background_color);
- GLuint edge_aa_mask = static_cast<GLuint>(c.edge_aa_mask);
- GLenum filter = static_cast<GLenum>(c.filter);
- const GLsizei num_textures = c.num_textures;
- uint32_t shm_id = c.shm_id;
- uint32_t shm_offset = c.shm_offset;
-
- unsigned int size;
- const GLfloat* mem = GetSharedMemoryAndSizeAs<const GLfloat*>(
- shm_id, shm_offset, 8 * sizeof(GLfloat), &size);
- if (!mem) {
- return error::kOutOfBounds;
- }
- if (num_textures < 0 || (size - 8 * sizeof(GLfloat)) / sizeof(GLuint) <
- static_cast<GLuint>(num_textures)) {
- return error::kOutOfBounds;
- }
- const volatile GLuint* contents_texture_ids =
- reinterpret_cast<const volatile GLuint*>(mem + 8);
- const GLfloat* contents_rect = mem;
- const GLfloat* bounds_rect = mem + 4;
- GLuint protected_video_type_param = c.protected_video_type;
-
- return DoScheduleDCLayerCHROMIUM(
- num_textures, contents_texture_ids, contents_rect, background_color,
- edge_aa_mask, filter, bounds_rect, protected_video_type_param);
-}
-
error::Error GLES2DecoderPassthroughImpl::HandleSetColorSpaceMetadataCHROMIUM(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index 275c8413c99..2339a57ad66 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -573,7 +573,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleDeleteBuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t buffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &buffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&buffers_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* buffers = GetImmediateDataAs<volatile const GLuint*>(
@@ -596,7 +596,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleDeleteFramebuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t framebuffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &framebuffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&framebuffers_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* framebuffers =
@@ -633,7 +633,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleDeleteRenderbuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t renderbuffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &renderbuffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&renderbuffers_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* renderbuffers =
@@ -659,7 +659,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleDeleteSamplersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t samplers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &samplers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&samplers_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* samplers = GetImmediateDataAs<volatile const GLuint*>(
@@ -710,7 +710,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleDeleteTexturesImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t textures_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &textures_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&textures_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* textures = GetImmediateDataAs<volatile const GLuint*>(
@@ -737,7 +737,7 @@ GLES2DecoderPassthroughImpl::HandleDeleteTransformFeedbacksImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t ids_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &ids_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&ids_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* ids = GetImmediateDataAs<volatile const GLuint*>(
@@ -957,7 +957,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGenBuffersImmediate(
*static_cast<const volatile gles2::cmds::GenBuffersImmediate*>(cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t buffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &buffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&buffers_size)) {
return error::kOutOfBounds;
}
volatile GLuint* buffers = GetImmediateDataAs<volatile GLuint*>(
@@ -993,7 +993,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGenFramebuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t framebuffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &framebuffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&framebuffers_size)) {
return error::kOutOfBounds;
}
volatile GLuint* framebuffers = GetImmediateDataAs<volatile GLuint*>(
@@ -1016,7 +1016,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGenRenderbuffersImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t renderbuffers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &renderbuffers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&renderbuffers_size)) {
return error::kOutOfBounds;
}
volatile GLuint* renderbuffers = GetImmediateDataAs<volatile GLuint*>(
@@ -1040,7 +1040,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGenSamplersImmediate(
*static_cast<const volatile gles2::cmds::GenSamplersImmediate*>(cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t samplers_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &samplers_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&samplers_size)) {
return error::kOutOfBounds;
}
volatile GLuint* samplers = GetImmediateDataAs<volatile GLuint*>(
@@ -1062,7 +1062,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGenTexturesImmediate(
*static_cast<const volatile gles2::cmds::GenTexturesImmediate*>(cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t textures_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &textures_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&textures_size)) {
return error::kOutOfBounds;
}
volatile GLuint* textures = GetImmediateDataAs<volatile GLuint*>(
@@ -1087,7 +1087,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGenTransformFeedbacksImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t ids_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &ids_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&ids_size)) {
return error::kOutOfBounds;
}
volatile GLuint* ids =
@@ -2299,6 +2299,38 @@ error::Error GLES2DecoderPassthroughImpl::HandleShaderSourceBucket(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleMultiDrawBeginCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::MultiDrawBeginCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::MultiDrawBeginCHROMIUM*>(
+ cmd_data);
+ if (!features().webgl_multi_draw) {
+ return error::kUnknownCommand;
+ }
+
+ GLsizei drawcount = static_cast<GLsizei>(c.drawcount);
+ error::Error error = DoMultiDrawBeginCHROMIUM(drawcount);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleMultiDrawEndCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!features().webgl_multi_draw) {
+ return error::kUnknownCommand;
+ }
+
+ error::Error error = DoMultiDrawEndCHROMIUM();
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleStencilFunc(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -3749,7 +3781,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGenQueriesEXTImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t queries_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &queries_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&queries_size)) {
return error::kOutOfBounds;
}
volatile GLuint* queries = GetImmediateDataAs<volatile GLuint*>(
@@ -3772,7 +3804,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleDeleteQueriesEXTImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t queries_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &queries_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&queries_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* queries = GetImmediateDataAs<volatile const GLuint*>(
@@ -3833,7 +3865,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleGenVertexArraysOESImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t arrays_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &arrays_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&arrays_size)) {
return error::kOutOfBounds;
}
volatile GLuint* arrays =
@@ -3856,7 +3888,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleDeleteVertexArraysOESImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t arrays_size;
- if (!SafeMultiplyUint32(n, sizeof(GLuint), &arrays_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&arrays_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* arrays = GetImmediateDataAs<volatile const GLuint*>(
@@ -4374,6 +4406,47 @@ error::Error GLES2DecoderPassthroughImpl::HandleFlushDriverCachesCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleScheduleDCLayerCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ScheduleDCLayerCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::ScheduleDCLayerCHROMIUM*>(
+ cmd_data);
+ GLuint y_texture_id = static_cast<GLuint>(c.y_texture_id);
+ GLuint uv_texture_id = static_cast<GLuint>(c.uv_texture_id);
+ GLint z_order = static_cast<GLint>(c.z_order);
+ GLint content_x = static_cast<GLint>(c.content_x);
+ GLint content_y = static_cast<GLint>(c.content_y);
+ GLint content_width = static_cast<GLint>(c.content_width);
+ GLint content_height = static_cast<GLint>(c.content_height);
+ GLint quad_x = static_cast<GLint>(c.quad_x);
+ GLint quad_y = static_cast<GLint>(c.quad_y);
+ GLint quad_width = static_cast<GLint>(c.quad_width);
+ GLint quad_height = static_cast<GLint>(c.quad_height);
+ GLfloat transform_c1r1 = static_cast<GLfloat>(c.transform_c1r1);
+ GLfloat transform_c2r1 = static_cast<GLfloat>(c.transform_c2r1);
+ GLfloat transform_c1r2 = static_cast<GLfloat>(c.transform_c1r2);
+ GLfloat transform_c2r2 = static_cast<GLfloat>(c.transform_c2r2);
+ GLfloat transform_tx = static_cast<GLfloat>(c.transform_tx);
+ GLfloat transform_ty = static_cast<GLfloat>(c.transform_ty);
+ GLboolean is_clipped = static_cast<GLboolean>(c.is_clipped);
+ GLint clip_x = static_cast<GLint>(c.clip_x);
+ GLint clip_y = static_cast<GLint>(c.clip_y);
+ GLint clip_width = static_cast<GLint>(c.clip_width);
+ GLint clip_height = static_cast<GLint>(c.clip_height);
+ GLuint protected_video_type = static_cast<GLuint>(c.protected_video_type);
+ error::Error error = DoScheduleDCLayerCHROMIUM(
+ y_texture_id, uv_texture_id, z_order, content_x, content_y, content_width,
+ content_height, quad_x, quad_y, quad_width, quad_height, transform_c1r1,
+ transform_c2r1, transform_c1r2, transform_c2r2, transform_tx,
+ transform_ty, is_clipped, clip_x, clip_y, clip_width, clip_height,
+ protected_video_type);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleMatrixLoadfCHROMIUMImmediate(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -4758,7 +4831,6 @@ error::Error GLES2DecoderPassthroughImpl::
CreateAndTexStorage2DSharedImageINTERNALImmediate*>(
cmd_data);
GLuint texture = static_cast<GLuint>(c.texture);
- GLenum internalFormat = static_cast<GLenum>(c.internalFormat);
uint32_t mailbox_size;
if (!GLES2Util::ComputeDataSize<GLbyte, 16>(1, &mailbox_size)) {
return error::kOutOfBounds;
@@ -4771,8 +4843,8 @@ error::Error GLES2DecoderPassthroughImpl::
if (mailbox == nullptr) {
return error::kOutOfBounds;
}
- error::Error error = DoCreateAndTexStorage2DSharedImageINTERNAL(
- texture, internalFormat, mailbox);
+ error::Error error =
+ DoCreateAndTexStorage2DSharedImageINTERNAL(texture, mailbox);
if (error != error::kNoError) {
return error;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
index 99fef04345d..eac90b80d02 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
@@ -91,7 +91,7 @@ using namespace cmds;
TEST_F(GLES2DecoderPassthroughTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
MemoryTypeTracker memory_tracker(nullptr);
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBackingPassthrough>(
@@ -101,7 +101,7 @@ TEST_F(GLES2DecoderPassthroughTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
@@ -136,7 +136,7 @@ TEST_F(GLES2DecoderPassthroughTest,
Mailbox mailbox;
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
// CreateAndTexStorage2DSharedImage should fail if the mailbox is invalid.
@@ -154,7 +154,7 @@ TEST_F(GLES2DecoderPassthroughTest,
CreateAndTexStorage2DSharedImageCHROMIUMPreexistingTexture) {
MemoryTypeTracker memory_tracker(nullptr);
// Create a texture with kNewClientId.
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBackingPassthrough>(
@@ -165,7 +165,7 @@ TEST_F(GLES2DecoderPassthroughTest,
{
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
@@ -175,7 +175,7 @@ TEST_F(GLES2DecoderPassthroughTest,
{
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
@@ -186,7 +186,7 @@ TEST_F(GLES2DecoderPassthroughTest,
TEST_F(GLES2DecoderPassthroughTest, BeginEndSharedImageAccessCRHOMIUM) {
MemoryTypeTracker memory_tracker(nullptr);
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBackingPassthrough>(
@@ -196,7 +196,7 @@ TEST_F(GLES2DecoderPassthroughTest, BeginEndSharedImageAccessCRHOMIUM) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
@@ -249,7 +249,7 @@ TEST_F(GLES2DecoderPassthroughTest,
BeginSharedImageAccessDirectCHROMIUMCantBeginAccess) {
// Create a shared image.
MemoryTypeTracker memory_tracker(nullptr);
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBackingPassthrough>(
@@ -259,7 +259,7 @@ TEST_F(GLES2DecoderPassthroughTest,
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index 6eda7db38c9..a982b5ceb93 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -8,6 +8,7 @@
#include <stdint.h>
#include "base/command_line.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -925,7 +926,7 @@ static void CheckBeginEndQueryBadMemoryFails(GLES2DecoderTestBase* test,
}
TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTBadMemoryIdFails) {
- for (size_t i = 0; i < arraysize(kQueryTypes); ++i) {
+ for (size_t i = 0; i < base::size(kQueryTypes); ++i) {
CheckBeginEndQueryBadMemoryFails(this, kNewClientId, kQueryTypes[i],
kInvalidSharedMemoryId,
kSharedMemoryOffset);
@@ -933,7 +934,7 @@ TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTBadMemoryIdFails) {
}
TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTBadMemoryOffsetFails) {
- for (size_t i = 0; i < arraysize(kQueryTypes); ++i) {
+ for (size_t i = 0; i < base::size(kQueryTypes); ++i) {
// Out-of-bounds.
CheckBeginEndQueryBadMemoryFails(this, kNewClientId, kQueryTypes[i],
shared_memory_id_,
@@ -945,7 +946,7 @@ TEST_P(GLES2DecoderManualInitTest, BeginEndQueryEXTBadMemoryOffsetFails) {
}
TEST_P(GLES2DecoderManualInitTest, QueryReuseTest) {
- for (size_t i = 0; i < arraysize(kQueryTypes); ++i) {
+ for (size_t i = 0; i < base::size(kQueryTypes); ++i) {
const QueryType& query_type = kQueryTypes[i];
GLES2DecoderTestBase::InitState init;
@@ -1256,7 +1257,7 @@ TEST_P(GLES2DecoderTest, IsEnabledReturnsCachedValue) {
static const GLenum kStates[] = {
GL_DEPTH_TEST, GL_STENCIL_TEST,
};
- for (size_t ii = 0; ii < arraysize(kStates); ++ii) {
+ for (size_t ii = 0; ii < base::size(kStates); ++ii) {
Enable enable_cmd;
GLenum state = kStates[ii];
enable_cmd.Init(state);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
index c6f2b75753c..e1e262e6090 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
@@ -824,24 +824,12 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4fvImmediate, 0>(
};
template <>
-void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4ivImmediate, 0>(
- bool /* valid */) {
- SetupShaderForUniform(GL_INT_VEC4);
-};
-
-template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT2);
};
template <>
-void GLES2DecoderTestBase::SpecializedSetup<
- cmds::UniformMatrix2x3fvImmediate, 0>(bool /* valid */) {
- SetupShaderForUniform(GL_FLOAT_MAT2x3);
-};
-
-template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::TexParameterf, 0>(
bool /* valid */) {
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
index ac03a846021..cbc26cce1e3 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
@@ -1328,29 +1328,4 @@ TEST_P(GLES2DecoderTest2, Uniform4iValidArgs) {
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
-
-TEST_P(GLES2DecoderTest2, Uniform4ivImmediateValidArgs) {
- cmds::Uniform4ivImmediate& cmd = *GetImmediateAs<cmds::Uniform4ivImmediate>();
- SpecializedSetup<cmds::Uniform4ivImmediate, 0>(true);
- GLint temp[4 * 2] = {
- 0,
- };
- EXPECT_CALL(*gl_, Uniform4iv(1, 2, PointsToArray(temp, 4)));
- cmd.Init(1, 2, &temp[0]);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES3DecoderTest2, UniformMatrix2x3fvImmediateValidArgs) {
- cmds::UniformMatrix2x3fvImmediate& cmd =
- *GetImmediateAs<cmds::UniformMatrix2x3fvImmediate>();
- SpecializedSetup<cmds::UniformMatrix2x3fvImmediate, 0>(true);
- GLfloat temp[6 * 2] = {
- 0,
- };
- EXPECT_CALL(*gl_, UniformMatrix2x3fv(1, 2, true, PointsToArray(temp, 6)));
- cmd.Init(1, 2, true, &temp[0]);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
index 689711f9e3d..6bace907132 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
@@ -53,6 +53,12 @@ INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest3, ::testing::Bool());
INSTANTIATE_TEST_CASE_P(Service, GLES3DecoderTest3, ::testing::Bool());
template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4ivImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_INT_VEC4);
+};
+
+template <>
void GLES2DecoderTestBase::SpecializedSetup<UniformMatrix3fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT3);
@@ -65,6 +71,12 @@ void GLES2DecoderTestBase::SpecializedSetup<UniformMatrix4fvImmediate, 0>(
};
template <>
+void GLES2DecoderTestBase::SpecializedSetup<UniformMatrix2x3fvImmediate, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_MAT2x3);
+};
+
+template <>
void GLES2DecoderTestBase::SpecializedSetup<
UniformMatrix2x4fvImmediate, 0>(bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT2x4);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
index 2acc54275b2..5f729cd71e9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
@@ -12,6 +12,31 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
+TEST_P(GLES2DecoderTest3, Uniform4ivImmediateValidArgs) {
+ cmds::Uniform4ivImmediate& cmd = *GetImmediateAs<cmds::Uniform4ivImmediate>();
+ SpecializedSetup<cmds::Uniform4ivImmediate, 0>(true);
+ GLint temp[4 * 2] = {
+ 0,
+ };
+ EXPECT_CALL(*gl_, Uniform4iv(1, 2, PointsToArray(temp, 4)));
+ cmd.Init(1, 2, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES3DecoderTest3, UniformMatrix2x3fvImmediateValidArgs) {
+ cmds::UniformMatrix2x3fvImmediate& cmd =
+ *GetImmediateAs<cmds::UniformMatrix2x3fvImmediate>();
+ SpecializedSetup<cmds::UniformMatrix2x3fvImmediate, 0>(true);
+ GLfloat temp[6 * 2] = {
+ 0,
+ };
+ EXPECT_CALL(*gl_, UniformMatrix2x3fv(1, 2, true, PointsToArray(temp, 6)));
+ cmd.Init(1, 2, true, &temp[0]);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
TEST_P(GLES3DecoderTest3, UniformMatrix2x4fvImmediateValidArgs) {
cmds::UniformMatrix2x4fvImmediate& cmd =
*GetImmediateAs<cmds::UniformMatrix2x4fvImmediate>();
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
index c3199d3b4d7..acbfc810252 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
@@ -7,6 +7,7 @@
#include <stddef.h>
#include "base/command_line.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -14,7 +15,6 @@
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/gl_surface_mock.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
-
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
@@ -207,15 +207,15 @@ TEST_P(GLES2DecoderWithShaderTest, VertexAttribPointer) {
static const GLsizei stride_offset[] = {
0, 0, 1, 0, 1, 0, 0,
};
- for (size_t tt = 0; tt < arraysize(types); ++tt) {
+ for (size_t tt = 0; tt < base::size(types); ++tt) {
GLenum type = types[tt];
GLsizei num_bytes = sizes[tt];
- for (size_t ii = 0; ii < arraysize(indices); ++ii) {
+ for (size_t ii = 0; ii < base::size(indices); ++ii) {
GLuint index = indices[ii];
for (GLint size = 0; size < 5; ++size) {
- for (size_t oo = 0; oo < arraysize(offset_mult); ++oo) {
+ for (size_t oo = 0; oo < base::size(offset_mult); ++oo) {
GLuint offset = num_bytes * offset_mult[oo] + offset_offset[oo];
- for (size_t ss = 0; ss < arraysize(stride_mult); ++ss) {
+ for (size_t ss = 0; ss < base::size(stride_mult); ++ss) {
GLsizei stride = num_bytes * stride_mult[ss] + stride_offset[ss];
for (int normalize = 0; normalize < 2; ++normalize) {
bool index_good = index < static_cast<GLuint>(kNumVertexAttribs);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
index c3ba97e738c..856224e909e 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -13,6 +13,7 @@
#include <vector>
#include "base/command_line.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
@@ -58,7 +59,7 @@ void NormalizeInitState(gpu::gles2::GLES2DecoderTestBase::InitState* init) {
"GL_APPLE_vertex_array_object"
};
bool contains_vao_extension = false;
- for (size_t ii = 0; ii < arraysize(kVAOExtensions); ++ii) {
+ for (size_t ii = 0; ii < base::size(kVAOExtensions); ++ii) {
if (init->extensions.find(kVAOExtensions[ii]) != std::string::npos) {
contains_vao_extension = true;
break;
@@ -141,9 +142,6 @@ void GLES2DecoderTestBase::OnConsoleMessage(int32_t id,
void GLES2DecoderTestBase::CacheShader(const std::string& key,
const std::string& shader) {}
void GLES2DecoderTestBase::OnFenceSyncRelease(uint64_t release) {}
-bool GLES2DecoderTestBase::OnWaitSyncToken(const gpu::SyncToken&) {
- return false;
-}
void GLES2DecoderTestBase::OnDescheduleUntilFinished() {}
void GLES2DecoderTestBase::OnRescheduleAfterFinished() {}
void GLES2DecoderTestBase::OnSwapBuffers(uint64_t swap_id, uint32_t flags) {}
@@ -294,9 +292,9 @@ void GLES2DecoderTestBase::InitDecoderWithWorkarounds(
static GLuint fixed_attrib_buffer_id[] = {
kServiceFixedAttribBufferId,
};
- EXPECT_CALL(*gl_, GenBuffersARB(arraysize(attrib_0_id), _))
+ EXPECT_CALL(*gl_, GenBuffersARB(base::size(attrib_0_id), _))
.WillOnce(SetArrayArgument<1>(attrib_0_id,
- attrib_0_id + arraysize(attrib_0_id)))
+ attrib_0_id + base::size(attrib_0_id)))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, kServiceAttrib0BufferId))
.Times(1)
@@ -307,10 +305,10 @@ void GLES2DecoderTestBase::InitDecoderWithWorkarounds(
EXPECT_CALL(*gl_, BindBuffer(GL_ARRAY_BUFFER, 0))
.Times(1)
.RetiresOnSaturation();
- EXPECT_CALL(*gl_, GenBuffersARB(arraysize(fixed_attrib_buffer_id), _))
+ EXPECT_CALL(*gl_, GenBuffersARB(base::size(fixed_attrib_buffer_id), _))
.WillOnce(SetArrayArgument<1>(
fixed_attrib_buffer_id,
- fixed_attrib_buffer_id + arraysize(fixed_attrib_buffer_id)))
+ fixed_attrib_buffer_id + base::size(fixed_attrib_buffer_id)))
.RetiresOnSaturation();
for (GLint tt = 0; tt < TestHelper::kNumTextureUnits; ++tt) {
@@ -424,13 +422,13 @@ void GLES2DecoderTestBase::InitDecoderWithWorkarounds(
};
EXPECT_CALL(*gl_, GetIntegerv(GL_MAX_VIEWPORT_DIMS, _))
.WillOnce(SetArrayArgument<1>(
- max_viewport_dims, max_viewport_dims + arraysize(max_viewport_dims)))
- .RetiresOnSaturation();
+ max_viewport_dims, max_viewport_dims + base::size(max_viewport_dims)))
+ .RetiresOnSaturation();
static GLfloat line_width_range[] = { 1.0f, 2.0f };
EXPECT_CALL(*gl_, GetFloatv(GL_ALIASED_LINE_WIDTH_RANGE, _))
.WillOnce(SetArrayArgument<1>(
- line_width_range, line_width_range + arraysize(line_width_range)))
+ line_width_range, line_width_range + base::size(line_width_range)))
.RetiresOnSaturation();
if (group_->feature_info()->feature_flags().ext_window_rectangles) {
@@ -1057,10 +1055,10 @@ void GLES2DecoderTestBase::SetupShaderForUniform(GLenum uniform_type) {
const GLuint kServiceVertexShaderId = 6001;
const GLuint kClientFragmentShaderId = 5002;
const GLuint kServiceFragmentShaderId = 6002;
- SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
- client_program_id_, kServiceProgramId,
- kClientVertexShaderId, kServiceVertexShaderId,
- kClientFragmentShaderId, kServiceFragmentShaderId);
+ SetupShader(attribs, base::size(attribs), uniforms, base::size(uniforms),
+ client_program_id_, kServiceProgramId, kClientVertexShaderId,
+ kServiceVertexShaderId, kClientFragmentShaderId,
+ kServiceFragmentShaderId);
EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
.Times(1)
@@ -1968,10 +1966,10 @@ void GLES2DecoderTestBase::SetupDefaultProgram() {
kUniform8FakeLocation, kUniform8RealLocation,
kUniform8DesiredLocation },
};
- SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
- client_program_id_, kServiceProgramId,
- client_vertex_shader_id_, kServiceVertexShaderId,
- client_fragment_shader_id_, kServiceFragmentShaderId);
+ SetupShader(attribs, base::size(attribs), uniforms, base::size(uniforms),
+ client_program_id_, kServiceProgramId, client_vertex_shader_id_,
+ kServiceVertexShaderId, client_fragment_shader_id_,
+ kServiceFragmentShaderId);
}
{
@@ -2014,10 +2012,10 @@ void GLES2DecoderTestBase::SetupCubemapProgram() {
kUniform7FakeLocation, kUniform7RealLocation,
kUniform7DesiredLocation },
};
- SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
- client_program_id_, kServiceProgramId,
- client_vertex_shader_id_, kServiceVertexShaderId,
- client_fragment_shader_id_, kServiceFragmentShaderId);
+ SetupShader(attribs, base::size(attribs), uniforms, base::size(uniforms),
+ client_program_id_, kServiceProgramId, client_vertex_shader_id_,
+ kServiceVertexShaderId, client_fragment_shader_id_,
+ kServiceFragmentShaderId);
}
{
@@ -2060,10 +2058,10 @@ void GLES2DecoderTestBase::SetupSamplerExternalProgram() {
kUniform7FakeLocation, kUniform7RealLocation,
kUniform7DesiredLocation },
};
- SetupShader(attribs, arraysize(attribs), uniforms, arraysize(uniforms),
- client_program_id_, kServiceProgramId,
- client_vertex_shader_id_, kServiceVertexShaderId,
- client_fragment_shader_id_, kServiceFragmentShaderId);
+ SetupShader(attribs, base::size(attribs), uniforms, base::size(uniforms),
+ client_program_id_, kServiceProgramId, client_vertex_shader_id_,
+ kServiceVertexShaderId, client_fragment_shader_id_,
+ kServiceFragmentShaderId);
}
{
@@ -2281,7 +2279,7 @@ void GLES2DecoderTestBase::SetupIndexBuffer() {
client_element_buffer_id_,
kServiceElementBufferId);
static const GLshort indices[] = {100, 1, 2, 3, 4, 5, 6, 7, 100, 9};
- static_assert(arraysize(indices) == kNumIndices,
+ static_assert(base::size(indices) == kNumIndices,
"indices should have kNumIndices elements");
DoBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices));
DoBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, 2, indices);
@@ -2444,9 +2442,6 @@ void GLES2DecoderPassthroughTestBase::OnConsoleMessage(
void GLES2DecoderPassthroughTestBase::CacheShader(const std::string& key,
const std::string& shader) {}
void GLES2DecoderPassthroughTestBase::OnFenceSyncRelease(uint64_t release) {}
-bool GLES2DecoderPassthroughTestBase::OnWaitSyncToken(const gpu::SyncToken&) {
- return false;
-}
void GLES2DecoderPassthroughTestBase::OnDescheduleUntilFinished() {}
void GLES2DecoderPassthroughTestBase::OnRescheduleAfterFinished() {}
void GLES2DecoderPassthroughTestBase::OnSwapBuffers(uint64_t swap_id,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
index 9098e64c3f6..0ba7ee75291 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
@@ -61,7 +61,6 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
void OnConsoleMessage(int32_t id, const std::string& message) override;
void CacheShader(const std::string& key, const std::string& shader) override;
void OnFenceSyncRelease(uint64_t release) override;
- bool OnWaitSyncToken(const gpu::SyncToken&) override;
void OnDescheduleUntilFinished() override;
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
@@ -532,9 +531,7 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
void DoLockDiscardableTextureCHROMIUM(GLuint texture_id);
bool IsDiscardableTextureUnlocked(GLuint texture_id);
- GLvoid* BufferOffset(unsigned i) {
- return static_cast<int8_t*>(nullptr) + (i);
- }
+ GLvoid* BufferOffset(unsigned i) { return reinterpret_cast<GLvoid*>(i); }
template <typename Command, typename Result>
bool IsObjectHelper(GLuint client_id) {
@@ -853,7 +850,6 @@ class GLES2DecoderPassthroughTestBase : public testing::Test,
void OnConsoleMessage(int32_t id, const std::string& message) override;
void CacheShader(const std::string& key, const std::string& shader) override;
void OnFenceSyncRelease(uint64_t release) override;
- bool OnWaitSyncToken(const gpu::SyncToken&) override;
void OnDescheduleUntilFinished() override;
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
index 6c11caac36f..145cbcd9200 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_state.cc
@@ -198,7 +198,7 @@ TEST_P(GLES2DecoderRestoreStateTest, WithPreviousStateBGR) {
// Construct a previous ContextState with all texture bindings
// set to default textures.
- ContextState prev_state(nullptr, nullptr, nullptr);
+ ContextState prev_state(nullptr);
InitializeContextState(&prev_state, std::numeric_limits<uint32_t>::max(), 0);
InSequence sequence;
@@ -221,7 +221,7 @@ TEST_P(GLES2DecoderRestoreStateTest, WithPreviousState) {
// Construct a previous ContextState with all texture bindings
// set to default textures.
- ContextState prev_state(nullptr, nullptr, nullptr);
+ ContextState prev_state(nullptr);
InitializeContextState(&prev_state, std::numeric_limits<uint32_t>::max(), 0);
InSequence sequence;
@@ -251,7 +251,7 @@ TEST_P(GLES2DecoderRestoreStateTest, ActiveUnit1) {
// Construct a previous ContextState with all texture bindings
// set to default textures.
- ContextState prev_state(nullptr, nullptr, nullptr);
+ ContextState prev_state(nullptr);
InitializeContextState(&prev_state, std::numeric_limits<uint32_t>::max(), 0);
InSequence sequence;
@@ -284,7 +284,7 @@ TEST_P(GLES2DecoderRestoreStateTest, NonDefaultUnit0BGR) {
// Construct a previous ContextState with GL_TEXTURE_2D target in
// GL_TEXTURE0 unit bound to a non-default texture and the rest
// set to default textures.
- ContextState prev_state(nullptr, nullptr, nullptr);
+ ContextState prev_state(nullptr);
InitializeContextState(&prev_state, 0, kServiceTextureId);
InSequence sequence;
@@ -316,7 +316,7 @@ TEST_P(GLES2DecoderRestoreStateTest, NonDefaultUnit1BGR) {
// Construct a previous ContextState with GL_TEXTURE_2D target in
// GL_TEXTURE1 unit bound to a non-default texture and the rest
// set to default textures.
- ContextState prev_state(nullptr, nullptr, nullptr);
+ ContextState prev_state(nullptr);
InitializeContextState(&prev_state, 1, kServiceTextureId);
InSequence sequence;
@@ -353,7 +353,7 @@ TEST_P(GLES2DecoderRestoreStateTest, DefaultUnit0) {
// Construct a previous ContextState with GL_TEXTURE_2D target in
// GL_TEXTURE0 unit bound to a non-default texture and the rest
// set to default textures.
- ContextState prev_state(nullptr, nullptr, nullptr);
+ ContextState prev_state(nullptr);
InitializeContextState(&prev_state, 0, kServiceTextureId);
InSequence sequence;
@@ -383,7 +383,7 @@ TEST_P(GLES2DecoderRestoreStateTest, DefaultUnit1) {
// Construct a previous ContextState with GL_TEXTURE_2D target in
// GL_TEXTURE1 unit bound to a non-default texture and the rest
// set to default textures.
- ContextState prev_state(nullptr, nullptr, nullptr);
+ ContextState prev_state(nullptr);
InitializeContextState(&prev_state, 1, kServiceTextureId);
InSequence sequence;
@@ -447,7 +447,7 @@ TEST_P(GLES2DecoderRestoreStateTest, ES3RestoreExistingSampler) {
// Construct a previous ContextState assuming an ES3 context and with all
// texture bindings set to default textures.
- ContextState prev_state(feature_info.get(), nullptr, nullptr);
+ ContextState prev_state(feature_info.get());
InitializeContextState(&prev_state, std::numeric_limits<uint32_t>::max(), 0);
InSequence sequence;
@@ -468,7 +468,7 @@ TEST_P(GLES2DecoderRestoreStateTest, ES3RestoreZeroSampler) {
// Construct a previous ContextState assuming an ES3 context and with all
// texture bindings set to default textures.
SamplerManager sampler_manager(feature_info.get());
- ContextState prev_state(feature_info.get(), nullptr, nullptr);
+ ContextState prev_state(feature_info.get());
InitializeContextState(&prev_state, std::numeric_limits<uint32_t>::max(), 0);
// Set up a sampler in the previous state. The client_id and service_id
// don't matter except that they're non-zero.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
index f700b991d6b..015fdbbb0f0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
@@ -8,6 +8,7 @@
#include <stdint.h>
#include "base/command_line.h"
+#include "base/stl_util.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
@@ -1377,7 +1378,7 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
const GLuint kPaths[] = {client_path_id_, client_path_id_ + 5,
client_path_id_, client_path_id_ + 18};
- const GLsizei kPathCount = arraysize(kPaths);
+ const GLsizei kPathCount = base::size(kPaths);
struct {
GLenum fill_mode;
@@ -1393,7 +1394,7 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
GLuint* paths = GetSharedMemoryAs<GLuint*>();
- for (size_t i = 0; i < arraysize(testcases); ++i) {
+ for (size_t i = 0; i < base::size(testcases); ++i) {
memcpy(paths, kPaths, sizeof(kPaths));
sfi_cmd.Init(kPathCount, GL_UNSIGNED_INT, shared_memory_id_,
shared_memory_offset_, 0, testcases[i].fill_mode,
@@ -1421,7 +1422,7 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
const GLuint kPaths[] = {client_path_id_, client_path_id_ + 5,
client_path_id_, client_path_id_ + 18};
- const GLsizei kPathCount = arraysize(kPaths);
+ const GLsizei kPathCount = base::size(kPaths);
static const GLenum kFillModes[] = {GL_INVERT, GL_COUNT_UP_CHROMIUM,
GL_COUNT_DOWN_CHROMIUM};
@@ -1429,7 +1430,7 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
GLuint* paths = GetSharedMemoryAs<GLuint*>();
- for (size_t i = 0; i < arraysize(kFillModes); ++i) {
+ for (size_t i = 0; i < base::size(kFillModes); ++i) {
memcpy(paths, kPaths, sizeof(kPaths));
EXPECT_CALL(*gl_, StencilFillPathInstancedNV(kPathCount, GL_UNSIGNED_INT, _,
0, kFillModes[i], kMask,
@@ -1458,7 +1459,7 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedCalls) {
SetupExpectationsForApplyingDefaultDirtyState();
const GLuint kPaths[] = {0, client_path_id_, 15, client_path_id_};
- const GLsizei kPathCount = arraysize(kPaths);
+ const GLsizei kPathCount = base::size(kPaths);
// The path base will be client_path_id_, and so 0 is a
// valid path.
@@ -1561,7 +1562,7 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedCalls) {
};
- for (size_t i = 0; i < arraysize(testcases); ++i) {
+ for (size_t i = 0; i < base::size(testcases); ++i) {
SCOPED_TRACE(testing::Message() << "InstancedCalls testcase " << i);
CallAllInstancedCommands(testcases[i]);
}
@@ -1569,7 +1570,7 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedCalls) {
TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedNoCalls) {
const GLuint kPaths[] = {1, client_path_id_, 5, client_path_id_};
- const GLsizei kPathCount = arraysize(kPaths);
+ const GLsizei kPathCount = base::size(kPaths);
const GLenum kFillMode = GL_INVERT;
const GLuint kMask = 0x80;
@@ -1634,7 +1635,7 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedNoCalls) {
GL_NO_ERROR, false},
};
- for (size_t i = 0; i < arraysize(testcases); ++i) {
+ for (size_t i = 0; i < base::size(testcases); ++i) {
SCOPED_TRACE(testing::Message() << "InstancedNoCalls testcase " << i);
CallAllInstancedCommands(testcases[i]);
}
@@ -1642,7 +1643,7 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedNoCalls) {
TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedInvalidSHMValues) {
const GLuint kPaths[] = {1, client_path_id_, 5, client_path_id_};
- const GLsizei kPathCount = arraysize(kPaths);
+ const GLsizei kPathCount = base::size(kPaths);
GLfloat transform_values[12 * kPathCount];
for (GLsizei i = 0; i < kPathCount; ++i) {
for (int j = 0; j < 12; ++j) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
index f15105d0bac..ce62a0a132b 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -11,6 +11,7 @@
#include <memory>
#include "base/command_line.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -18,7 +19,6 @@
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/gl_surface_mock.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
-
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
@@ -1146,7 +1146,7 @@ TEST_P(GLES2DecoderTest, ReadPixelsOutOfRange) {
}, // completely off right
};
- for (size_t tt = 0; tt < arraysize(tests); ++tt) {
+ for (size_t tt = 0; tt < base::size(tests); ++tt) {
CheckReadPixelsOutOfRange(
tests[tt][0], tests[tt][1], tests[tt][2], tests[tt][3], tt == 0);
}
@@ -3263,7 +3263,7 @@ TEST_P(GLES2DecoderTest, DrawBuffersEXTMainFramebuffer) {
DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
{
const GLenum bufs[] = {GL_BACK};
- const GLsizei count = arraysize(bufs);
+ const GLsizei count = base::size(bufs);
cmd.Init(count, bufs);
EXPECT_CALL(*gl_, DrawBuffersARB(count, Pointee(GL_BACK)))
@@ -3284,7 +3284,7 @@ TEST_P(GLES2DecoderTest, DrawBuffersEXTMainFramebuffer) {
}
{
const GLenum bufs[] = {GL_BACK, GL_NONE};
- const GLsizei count = arraysize(bufs);
+ const GLsizei count = base::size(bufs);
cmd.Init(count, bufs);
EXPECT_CALL(*gl_, DrawBuffersARB(_, _)).Times(0).RetiresOnSaturation();
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
index 73b997ef6b1..6a14f508f08 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
@@ -8,6 +8,7 @@
#include <stdint.h>
#include "base/command_line.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -829,7 +830,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformBlockivSucceeds) {
GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER,
GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER,
};
- for (size_t ii = 0; ii < arraysize(kPname); ++ii) {
+ for (size_t ii = 0; ii < base::size(kPname); ++ii) {
result->SetNumResults(0);
cmd.Init(client_program_id_,
0,
@@ -1070,7 +1071,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetUniformIndicesSucceeds) {
const char kName0[] = "Cow";
const char kName1[] = "Chicken";
const char* kNames[] = { kName0, kName1 };
- const size_t kCount = arraysize(kNames);
+ const size_t kCount = base::size(kNames);
const char kValidStrEnd = 0;
const GLuint kIndices[] = { 1, 2 };
SetBucketAsCStrings(kBucketId, kCount, kNames, kCount, kValidStrEnd);
@@ -1103,7 +1104,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetUniformIndicesBadProgramFails) {
const char kName0[] = "Cow";
const char kName1[] = "Chicken";
const char* kNames[] = { kName0, kName1 };
- const size_t kCount = arraysize(kNames);
+ const size_t kCount = base::size(kNames);
const char kValidStrEnd = 0;
SetBucketAsCStrings(kBucketId, kCount, kNames, kCount, kValidStrEnd);
GetUniformIndices::Result* result =
@@ -1132,7 +1133,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetUniformIndicesBadParamsFails) {
const char kName0[] = "Cow";
const char kName1[] = "Chicken";
const char* kNames[] = { kName0, kName1 };
- const size_t kCount = arraysize(kNames);
+ const size_t kCount = base::size(kNames);
const char kValidStrEnd = 0;
const GLuint kIndices[] = { 1, 2 };
SetBucketAsCStrings(kBucketId, kCount, kNames, kCount, kValidStrEnd);
@@ -1162,7 +1163,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetUniformIndicesResultNotInitFails) {
const char kName0[] = "Cow";
const char kName1[] = "Chicken";
const char* kNames[] = { kName0, kName1 };
- const size_t kCount = arraysize(kNames);
+ const size_t kCount = base::size(kNames);
const char kValidStrEnd = 0;
SetBucketAsCStrings(kBucketId, kCount, kNames, kCount, kValidStrEnd);
GetUniformIndices::Result* result =
@@ -1178,7 +1179,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetUniformIndicesBadSharedMemoryFails) {
const char kName0[] = "Cow";
const char kName1[] = "Chicken";
const char* kNames[] = { kName0, kName1 };
- const size_t kCount = arraysize(kNames);
+ const size_t kCount = base::size(kNames);
const char kValidStrEnd = 0;
SetBucketAsCStrings(kBucketId, kCount, kNames, kCount, kValidStrEnd);
GetUniformIndices::Result* result =
@@ -1200,7 +1201,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivSucceeds) {
const uint32_t kBucketId = 123;
const GLuint kIndices[] = { 1, 2 };
const GLint kResults[] = { 1976, 321 };
- const size_t kCount = arraysize(kIndices);
+ const size_t kCount = base::size(kIndices);
SetBucketData(kBucketId, kIndices, sizeof(GLuint) * kCount);
GetActiveUniformsiv::Result* result =
static_cast<GetActiveUniformsiv::Result*>(shared_memory_address_);
@@ -1227,7 +1228,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivSucceeds) {
TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivBadProgramFails) {
const uint32_t kBucketId = 123;
const GLuint kIndices[] = { 1, 2 };
- const size_t kCount = arraysize(kIndices);
+ const size_t kCount = base::size(kIndices);
SetBucketData(kBucketId, kIndices, sizeof(GLuint) * kCount);
GetActiveUniformsiv::Result* result =
static_cast<GetActiveUniformsiv::Result*>(shared_memory_address_);
@@ -1254,7 +1255,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivBadProgramFails) {
TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivBadParamsFails) {
const uint32_t kBucketId = 123;
const GLuint kIndices[] = { 1, 100 };
- const size_t kCount = arraysize(kIndices);
+ const size_t kCount = base::size(kIndices);
SetBucketData(kBucketId, kIndices, sizeof(GLuint) * kCount);
GetActiveUniformsiv::Result* result =
static_cast<GetActiveUniformsiv::Result*>(shared_memory_address_);
@@ -1270,7 +1271,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivBadParamsFails) {
TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivBadPnameFails) {
const uint32_t kBucketId = 123;
const GLuint kIndices[] = { 1, 2 };
- const size_t kCount = arraysize(kIndices);
+ const size_t kCount = base::size(kIndices);
SetBucketData(kBucketId, kIndices, sizeof(GLuint) * kCount);
GetActiveUniformsiv::Result* result =
static_cast<GetActiveUniformsiv::Result*>(shared_memory_address_);
@@ -1294,7 +1295,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivBadPnameFails) {
TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivResultNotInitFails) {
const uint32_t kBucketId = 123;
const GLuint kIndices[] = { 1, 2 };
- const size_t kCount = arraysize(kIndices);
+ const size_t kCount = base::size(kIndices);
SetBucketData(kBucketId, kIndices, sizeof(GLuint) * kCount);
GetActiveUniformsiv::Result* result =
static_cast<GetActiveUniformsiv::Result*>(shared_memory_address_);
@@ -1308,7 +1309,7 @@ TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivResultNotInitFails) {
TEST_P(GLES3DecoderWithShaderTest, GetActiveUniformsivBadSharedMemoryFails) {
const uint32_t kBucketId = 123;
const GLuint kIndices[] = { 1, 2 };
- const size_t kCount = arraysize(kIndices);
+ const size_t kCount = base::size(kIndices);
SetBucketData(kBucketId, kIndices, sizeof(GLuint) * kCount);
GetActiveUniformsiv::Result* result =
static_cast<GetActiveUniformsiv::Result*>(shared_memory_address_);
@@ -2087,18 +2088,12 @@ TEST_P(GLES2DecoderManualInitTest, ClearUniformsBeforeFirstProgramUse) {
{kUniform3Name, kUniform3Size, kUniform3Type, kUniform3FakeLocation,
kUniform3RealLocation, kUniform3DesiredLocation},
};
- SetupShader(attribs,
- arraysize(attribs),
- uniforms,
- arraysize(uniforms),
- client_program_id_,
- kServiceProgramId,
- client_vertex_shader_id_,
- kServiceVertexShaderId,
- client_fragment_shader_id_,
+ SetupShader(attribs, base::size(attribs), uniforms, base::size(uniforms),
+ client_program_id_, kServiceProgramId, client_vertex_shader_id_,
+ kServiceVertexShaderId, client_fragment_shader_id_,
kServiceFragmentShaderId);
- TestHelper::SetupExpectationsForClearingUniforms(
- gl_.get(), uniforms, arraysize(uniforms));
+ TestHelper::SetupExpectationsForClearingUniforms(gl_.get(), uniforms,
+ base::size(uniforms));
}
{
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index a2ac700ba4a..b94cd64e5a4 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -8,6 +8,7 @@
#include <stdint.h>
#include "base/command_line.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -695,7 +696,7 @@ TEST_P(GLES2DecoderManualInitTest, CopyTexImage2DUnsizedInternalFormat) {
EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_NO_ERROR));
EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(_))
.WillRepeatedly(Return(GL_FRAMEBUFFER_COMPLETE));
- for (size_t i = 0; i < arraysize(kUnsizedInternalFormats); ++i) {
+ for (size_t i = 0; i < base::size(kUnsizedInternalFormats); ++i) {
// Copy from main framebuffer to texture, using the unsized internal format.
DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
GLenum internal_format = kUnsizedInternalFormats[i];
@@ -776,7 +777,7 @@ TEST_P(GLES2DecoderManualInitTest, CopyTexImage2DUnsizedInternalFormatES3) {
EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_NO_ERROR));
EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(_))
.WillRepeatedly(Return(GL_FRAMEBUFFER_COMPLETE));
- for (size_t i = 0; i < arraysize(kUnsizedInternalFormats); ++i) {
+ for (size_t i = 0; i < base::size(kUnsizedInternalFormats); ++i) {
// Copy from main framebuffer to texture, using the unsized internal format.
DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
GLenum internal_format = kUnsizedInternalFormats[i].unsized;
@@ -1718,7 +1719,7 @@ TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DS3TCWebGL) {
},
};
- for (size_t ii = 0; ii < arraysize(test_data); ++ii) {
+ for (size_t ii = 0; ii < base::size(test_data); ++ii) {
const S3TCTestData& test = test_data[ii];
CompressedTexImage2DBucket cmd;
// test small width.
@@ -1875,7 +1876,7 @@ TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DS3TC) {
},
};
- for (size_t ii = 0; ii < arraysize(test_data); ++ii) {
+ for (size_t ii = 0; ii < base::size(test_data); ++ii) {
const S3TCTestData& test = test_data[ii];
CompressedTexImage2DBucket cmd;
// test small width.
@@ -3259,7 +3260,7 @@ class TestSharedImageBacking : public SharedImageBacking {
TEST_P(GLES2DecoderTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
MemoryTypeTracker memory_tracker(memory_tracker_.get());
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBacking>(
@@ -3269,7 +3270,7 @@ TEST_P(GLES2DecoderTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
@@ -3299,7 +3300,7 @@ TEST_P(GLES2DecoderTest,
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
// CreateAndTexStorage2DSharedImage should fail if the mailbox is invalid.
@@ -3320,7 +3321,7 @@ TEST_P(GLES2DecoderTest,
CreateAndTexStorage2DSharedImageCHROMIUMPreexistingTexture) {
// Try to create a mailbox with kNewClientId.
MemoryTypeTracker memory_tracker(memory_tracker_.get());
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBacking>(
@@ -3330,7 +3331,7 @@ TEST_P(GLES2DecoderTest,
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(client_texture_id_, GL_RGBA, mailbox.name);
+ cmd.Init(client_texture_id_, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
// CreateAndTexStorage2DSharedImage should fail.
@@ -3343,7 +3344,7 @@ TEST_P(GLES2DecoderTest,
TEST_P(GLES2DecoderTest, BeginEndSharedImageAccessCRHOMIUM) {
MemoryTypeTracker memory_tracker(memory_tracker_.get());
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBacking>(
@@ -3353,7 +3354,7 @@ TEST_P(GLES2DecoderTest, BeginEndSharedImageAccessCRHOMIUM) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
@@ -3403,7 +3404,7 @@ TEST_P(GLES2DecoderTest, BeginSharedImageAccessDirectCHROMIUMNotSharedImage) {
TEST_P(GLES2DecoderTest, BeginSharedImageAccessDirectCHROMIUMCantBeginAccess) {
// Create a shared image.
MemoryTypeTracker memory_tracker(memory_tracker_.get());
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBacking>(
@@ -3413,7 +3414,7 @@ TEST_P(GLES2DecoderTest, BeginSharedImageAccessDirectCHROMIUMCantBeginAccess) {
CreateAndTexStorage2DSharedImageINTERNALImmediate& cmd =
*GetImmediateAs<CreateAndTexStorage2DSharedImageINTERNALImmediate>();
- cmd.Init(kNewClientId, GL_RGBA, mailbox.name);
+ cmd.Init(kNewClientId, mailbox.name);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc b/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc
index 02875468a1b..78631ec29eb 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc
@@ -378,7 +378,7 @@ void SRGBConverter::Blit(
decoder->RestoreGlobalState();
}
-void SRGBConverter::GenerateMipmap(const gles2::GLES2Decoder* decoder,
+void SRGBConverter::GenerateMipmap(gles2::GLES2Decoder* decoder,
Texture* tex,
GLenum target) {
// This function generateMipmap for srgb texture.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.h b/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.h
index db39a96481c..8518a148de1 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.h
@@ -7,7 +7,6 @@
#include <array>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/gl_utils.h"
@@ -51,7 +50,7 @@ class GPU_GLES2_EXPORT SRGBConverter {
bool encode,
bool enable_scissor_test);
- void GenerateMipmap(const gles2::GLES2Decoder* decoder,
+ void GenerateMipmap(gles2::GLES2Decoder* decoder,
Texture* tex,
GLenum target);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
index fb15d14873f..1b0ea9291cb 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
@@ -967,10 +967,7 @@ bool Validators::SwapBuffersFlagsValidator::IsValid(
const GLbitfield value) const {
switch (value) {
case 0:
- case gpu::SwapBuffersFlags::kPresentationFeedback:
case gpu::SwapBuffersFlags::kVSyncParams:
- case gpu::SwapBuffersFlags::kPresentationFeedback |
- gpu::SwapBuffersFlags::kVSyncParams:
return true;
}
return false;
diff --git a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
index 9e583d77ab9..a0c75c43e4f 100644
--- a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
+++ b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
@@ -15,7 +15,7 @@
// into the same call-site).
#define GPU_COMMAND_BUFFER_MEMORY_BLOCK(category) \
do { \
- size_t mb_used = size_ / (1024 * 1024); \
+ uint64_t mb_used = size_ / (1024 * 1024); \
switch (context_type_) { \
case CONTEXT_TYPE_WEBGL1: \
case CONTEXT_TYPE_WEBGL2: \
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.cc b/chromium/gpu/command_buffer/service/gpu_switches.cc
index 66c3610fa8c..14cf67b6c29 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.cc
+++ b/chromium/gpu/command_buffer/service/gpu_switches.cc
@@ -76,4 +76,8 @@ const char kUseCmdDecoder[] = "use-cmd-decoder";
// Turns on rastering to SkImage with RasterDecoder.
const char kEnableRasterToSkImage[] = "enable-raster-to-sk-image";
+// Enable RasterDecoder with passthrough GLES2 command decoding.
+const char kEnablePassthroughRasterDecoder[] =
+ "enable-passthrough-raster-decoder";
+
} // namespace switches
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.h b/chromium/gpu/command_buffer/service/gpu_switches.h
index 8d130e8dbd0..7fa91593549 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.h
+++ b/chromium/gpu/command_buffer/service/gpu_switches.h
@@ -37,6 +37,7 @@ GPU_EXPORT extern const char kGLShaderIntermOutput[];
GPU_EXPORT extern const char kEmulateShaderPrecision[];
GPU_EXPORT extern const char kUseCmdDecoder[];
GPU_EXPORT extern const char kEnableRasterToSkImage[];
+GPU_EXPORT extern const char kEnablePassthroughRasterDecoder[];
} // namespace switches
diff --git a/chromium/gpu/command_buffer/service/gpu_tracer.cc b/chromium/gpu/command_buffer/service/gpu_tracer.cc
index 2376e8dd9fa..667c025a17d 100644
--- a/chromium/gpu/command_buffer/service/gpu_tracer.cc
+++ b/chromium/gpu/command_buffer/service/gpu_tracer.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -31,7 +32,7 @@ constexpr const char* kGpuTraceSourceNames[] = {
"TraceCmd", // kTraceDecoder,
"Disjoint", // kTraceDisjoint, // Used internally.
};
-static_assert(NUM_TRACER_SOURCES == arraysize(kGpuTraceSourceNames),
+static_assert(NUM_TRACER_SOURCES == base::size(kGpuTraceSourceNames),
"Trace source names must match enumeration.");
TraceMarker::TraceMarker(const std::string& category, const std::string& name)
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller.cc b/chromium/gpu/command_buffer/service/gr_cache_controller.cc
index fd322168c1d..2a0b7db0e7b 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller.cc
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller.cc
@@ -6,14 +6,14 @@
#include <chrono>
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "ui/gl/gl_context.h"
namespace gpu {
namespace raster {
GrCacheController::GrCacheController(
- RasterDecoderContextState* context_state,
+ SharedContextState* context_state,
scoped_refptr<base::SingleThreadTaskRunner> task_runner)
: context_state_(context_state), task_runner_(std::move(task_runner)) {}
@@ -21,9 +21,9 @@ GrCacheController::~GrCacheController() = default;
void GrCacheController::ScheduleGrContextCleanup() {
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(context_state_->context->IsCurrent(nullptr));
+ DCHECK(context_state_->IsCurrent(nullptr));
- if (!context_state_->gr_context)
+ if (!context_state_->gr_context())
return;
current_idle_id_++;
@@ -35,9 +35,8 @@ void GrCacheController::ScheduleGrContextCleanup() {
// a long while even if it is under budget. Below we set a call back to
// purge all possible GrContext resources if the context itself is not being
// used.
- context_state_->context->DirtyVirtualContextState();
- context_state_->need_context_state_reset = true;
- context_state_->gr_context->performDeferredCleanup(
+ context_state_->set_need_context_state_reset(true);
+ context_state_->gr_context()->performDeferredCleanup(
std::chrono::seconds(kOldResourceCleanupDelaySeconds));
constexpr int kIdleCleanupDelaySeconds = 1;
@@ -52,16 +51,10 @@ void GrCacheController::ScheduleGrContextCleanup() {
void GrCacheController::PurgeGrCache(uint64_t idle_id) {
purge_gr_cache_cb_.Cancel();
- if (context_state_->context_lost)
- return;
-
- // Skip unnecessary MakeCurrent to improve
+ // We don't care which surface is current. This improves
// performance. https://crbug.com/457431
- if (!context_state_->context->IsCurrent(nullptr) &&
- !context_state_->context->MakeCurrent(context_state_->surface.get())) {
- context_state_->context_lost = true;
+ if (!context_state_->MakeCurrent(nullptr))
return;
- }
// If the idle id changed, the context was used after this callback was
// posted. Schedule another one.
@@ -70,9 +63,8 @@ void GrCacheController::PurgeGrCache(uint64_t idle_id) {
return;
}
- context_state_->context->DirtyVirtualContextState();
- context_state_->need_context_state_reset = true;
- context_state_->gr_context->freeGpuResources();
+ context_state_->set_need_context_state_reset(true);
+ context_state_->gr_context()->freeGpuResources();
}
} // namespace raster
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller.h b/chromium/gpu/command_buffer/service/gr_cache_controller.h
index 2c0b044acd2..d207a923ec8 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller.h
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller.h
@@ -10,8 +10,10 @@
#include "gpu/gpu_gles2_export.h"
namespace gpu {
+
+class SharedContextState;
+
namespace raster {
-struct RasterDecoderContextState;
// Manages clearing the GrContext cache after a period of inactivity.
// TODO(khushalsagar): This class replicates the ContextCacheController used in
@@ -19,7 +21,7 @@ struct RasterDecoderContextState;
// gpu::Scheduler, since it can better identify when we are in an idle state.
class GPU_GLES2_EXPORT GrCacheController {
public:
- GrCacheController(RasterDecoderContextState* context_state,
+ GrCacheController(SharedContextState* context_state,
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
~GrCacheController();
@@ -36,7 +38,7 @@ class GPU_GLES2_EXPORT GrCacheController {
// cache.
uint64_t current_idle_id_ = 0u;
base::CancelableOnceClosure purge_gr_cache_cb_;
- RasterDecoderContextState* context_state_;
+ SharedContextState* context_state_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DISALLOW_COPY_AND_ASSIGN(GrCacheController);
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc b/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
index 04d5c61344c..0deb744a875 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
@@ -4,9 +4,13 @@
#include "gpu/command_buffer/service/gr_cache_controller.h"
+#include "base/bind_helpers.h"
#include "base/test/test_mock_time_task_runner.h"
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/config/gpu_feature_info.h"
+#include "gpu/config/gpu_preferences.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/skia/include/core/SkBitmap.h"
#include "third_party/skia/include/core/SkImage.h"
@@ -32,11 +36,14 @@ class GrCacheControllerTest : public testing::Test {
share_group.get(), surface.get(), gl::GLContextAttribs());
ASSERT_TRUE(context->MakeCurrent(surface.get()));
- task_runner_ = new base::TestMockTimeTaskRunner();
- context_state_ = new raster::RasterDecoderContextState(
+ task_runner_ = base::MakeRefCounted<base::TestMockTimeTaskRunner>();
+ context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), std::move(surface), std::move(context),
- false /* use_virtualized_gl_contexts */);
+ false /* use_virtualized_gl_contexts */, base::DoNothing());
context_state_->InitializeGrContext(workarounds, nullptr);
+ auto feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
+ context_state_->InitializeGL(GpuPreferences(), std::move(feature_info));
controller_ =
std::make_unique<GrCacheController>(context_state_.get(), task_runner_);
@@ -49,10 +56,10 @@ class GrCacheControllerTest : public testing::Test {
gl::init::ShutdownGL(false);
}
- GrContext* gr_context() { return context_state_->gr_context; }
+ GrContext* gr_context() { return context_state_->gr_context(); }
protected:
- scoped_refptr<RasterDecoderContextState> context_state_;
+ scoped_refptr<SharedContextState> context_state_;
scoped_refptr<base::TestMockTimeTaskRunner> task_runner_;
std::unique_ptr<GrCacheController> controller_;
};
diff --git a/chromium/gpu/command_buffer/service/gr_shader_cache.cc b/chromium/gpu/command_buffer/service/gr_shader_cache.cc
index 1a9bdc5f56f..9cec23a2245 100644
--- a/chromium/gpu/command_buffer/service/gr_shader_cache.cc
+++ b/chromium/gpu/command_buffer/service/gr_shader_cache.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/service/gr_shader_cache.h"
+#include "base/base64.h"
#include "base/trace_event/trace_event.h"
namespace gpu {
@@ -64,6 +65,7 @@ void GrShaderCache::store(const SkData& key, const SkData& data) {
void GrShaderCache::PopulateCache(const std::string& key,
const std::string& data) {
+ TRACE_EVENT0("gpu", "GrShaderCache::PopulateCache");
if (data.length() > cache_size_limit_)
return;
@@ -72,7 +74,9 @@ void GrShaderCache::PopulateCache(const std::string& key,
// If we already have this in the cache, skia may have populated it before it
// was loaded off the disk cache. Its better to keep the latest version
// generated version than overwriting it here.
- CacheKey cache_key(MakeData(key));
+ std::string decoded_key;
+ base::Base64Decode(key, &decoded_key);
+ CacheKey cache_key(MakeData(decoded_key));
if (store_.Get(cache_key) != store_.end())
return;
@@ -135,8 +139,10 @@ void GrShaderCache::WriteToDisk(const CacheKey& key, CacheData* data) {
return;
data->pending_disk_write = false;
- client_->StoreShader(MakeString(key.data.get()),
- MakeString(data->data.get()));
+
+ std::string encoded_key;
+ base::Base64Encode(MakeString(key.data.get()), &encoded_key);
+ client_->StoreShader(encoded_key, MakeString(data->data.get()));
}
void GrShaderCache::EnforceLimits(size_t size_needed) {
@@ -149,6 +155,9 @@ void GrShaderCache::EnforceLimits(size_t size_needed) {
GrShaderCache::ScopedCacheUse::ScopedCacheUse(GrShaderCache* cache,
int32_t client_id)
: cache_(cache) {
+ DCHECK_EQ(cache_->current_client_id_, kInvalidClientId);
+ DCHECK_NE(client_id, kInvalidClientId);
+
cache_->current_client_id_ = client_id;
}
diff --git a/chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc b/chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc
index 70879af1de8..68b15106256 100644
--- a/chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/service/gr_shader_cache.h"
+#include "base/base64.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
@@ -66,7 +67,9 @@ TEST_F(GrShaderCacheTest, LoadedFromDisk) {
std::string key_str(static_cast<const char*>(key->data()), key->size());
std::string shader_str(static_cast<const char*>(shader->data()),
shader->size());
- cache_.PopulateCache(key_str, shader_str);
+ std::string encoded_key;
+ base::Base64Encode(key_str, &encoded_key);
+ cache_.PopulateCache(encoded_key, shader_str);
{
GrShaderCache::ScopedCacheUse cache_use(&cache_, regular_client_id);
auto cached_shader = cache_.load(*key);
diff --git a/chromium/gpu/command_buffer/service/id_manager.h b/chromium/gpu/command_buffer/service/id_manager.h
index b855282317f..5f3c2737070 100644
--- a/chromium/gpu/command_buffer/service/id_manager.h
+++ b/chromium/gpu/command_buffer/service/id_manager.h
@@ -5,7 +5,8 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_ID_MANAGER_H_
#define GPU_COMMAND_BUFFER_SERVICE_ID_MANAGER_H_
-#include "base/containers/hash_tables.h"
+#include <unordered_map>
+
#include "base/macros.h"
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/gpu_gles2_export.h"
@@ -38,7 +39,7 @@ class GPU_GLES2_EXPORT IdManager {
bool GetClientId(GLuint service_id, GLuint* client_id);
private:
- typedef base::hash_map<GLuint, GLuint> MapType;
+ typedef std::unordered_map<GLuint, GLuint> MapType;
MapType id_map_;
DISALLOW_COPY_AND_ASSIGN(IdManager);
@@ -48,4 +49,3 @@ class GPU_GLES2_EXPORT IdManager {
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_SERVICE_ID_MANAGER_H_
-
diff --git a/chromium/gpu/command_buffer/service/image_manager.h b/chromium/gpu/command_buffer/service/image_manager.h
index 30ac82feae9..29fa832adb7 100644
--- a/chromium/gpu/command_buffer/service/image_manager.h
+++ b/chromium/gpu/command_buffer/service/image_manager.h
@@ -7,7 +7,8 @@
#include <stdint.h>
-#include "base/containers/hash_tables.h"
+#include <unordered_map>
+
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/gpu_export.h"
@@ -30,7 +31,7 @@ class GPU_EXPORT ImageManager {
gl::GLImage* LookupImage(int32_t service_id);
private:
- typedef base::hash_map<int32_t, scoped_refptr<gl::GLImage>> GLImageMap;
+ typedef std::unordered_map<int32_t, scoped_refptr<gl::GLImage>> GLImageMap;
GLImageMap images_;
DISALLOW_COPY_AND_ASSIGN(ImageManager);
diff --git a/chromium/gpu/command_buffer/service/logger.cc b/chromium/gpu/command_buffer/service/logger.cc
index b7c6f4f527e..07fd32feca8 100644
--- a/chromium/gpu/command_buffer/service/logger.cc
+++ b/chromium/gpu/command_buffer/service/logger.cc
@@ -14,11 +14,13 @@ namespace gpu {
namespace gles2 {
Logger::Logger(const DebugMarkerManager* debug_marker_manager,
- const LogMessageCallback& callback)
+ const LogMessageCallback& callback,
+ bool disable_gl_error_limit)
: debug_marker_manager_(debug_marker_manager),
log_message_callback_(callback),
log_message_count_(0),
- log_synthesized_gl_errors_(true) {
+ log_synthesized_gl_errors_(true),
+ disable_gl_error_limit_(disable_gl_error_limit) {
Logger* this_temp = this;
this_in_hex_ = std::string("GroupMarkerNotSet(crbug.com/242999)!:") +
base::HexEncode(&this_temp, sizeof(this_temp));
@@ -28,9 +30,7 @@ Logger::~Logger() = default;
void Logger::LogMessage(
const char* filename, int line, const std::string& msg) {
- if (log_message_count_ < kMaxLogMessages ||
- base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableGLErrorLimit)) {
+ if (log_message_count_ < kMaxLogMessages || disable_gl_error_limit_) {
std::string prefixed_msg(std::string("[") + GetLogPrefix() + "]" + msg);
++log_message_count_;
// LOG this unless logging is turned off as any chromium code that
diff --git a/chromium/gpu/command_buffer/service/logger.h b/chromium/gpu/command_buffer/service/logger.h
index 68828107dc5..d5f6a5a5a3a 100644
--- a/chromium/gpu/command_buffer/service/logger.h
+++ b/chromium/gpu/command_buffer/service/logger.h
@@ -28,7 +28,8 @@ class GPU_GLES2_EXPORT Logger {
using LogMessageCallback = base::RepeatingCallback<void(const std::string&)>;
Logger(const DebugMarkerManager* debug_marker_manager,
- const LogMessageCallback& callback);
+ const LogMessageCallback& callback,
+ bool disable_gl_error_limit);
~Logger();
void LogMessage(const char* filename, int line, const std::string& msg);
@@ -50,6 +51,7 @@ class GPU_GLES2_EXPORT Logger {
int log_message_count_;
bool log_synthesized_gl_errors_;
+ bool disable_gl_error_limit_;
DISALLOW_COPY_AND_ASSIGN(Logger);
};
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.h b/chromium/gpu/command_buffer/service/memory_program_cache.h
index 04e45845178..06b2e64fa0b 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.h
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.h
@@ -11,12 +11,12 @@
#include <memory>
#include <string>
-#include "base/containers/hash_tables.h"
#include "base/containers/mru_cache.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/program_cache.h"
+#include "gpu/command_buffer/service/shader_translator.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
index d221cb7fdec..0aa22b27103 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
@@ -94,7 +94,6 @@ class MemoryProgramCacheTest : public GpuServiceTest, public DecoderClient {
shader_cache_shader_ = shader;
}
void OnFenceSyncRelease(uint64_t release) override {}
- bool OnWaitSyncToken(const gpu::SyncToken&) override { return false; }
void OnDescheduleUntilFinished() override {}
void OnRescheduleAfterFinished() override {}
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override {}
diff --git a/chromium/gpu/command_buffer/service/mocks.h b/chromium/gpu/command_buffer/service/mocks.h
index 1af73c8c16c..b55ae6cda81 100644
--- a/chromium/gpu/command_buffer/service/mocks.h
+++ b/chromium/gpu/command_buffer/service/mocks.h
@@ -17,6 +17,7 @@
#include <vector>
#include "base/logging.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
#include "gpu/command_buffer/service/async_api_interface.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/program_cache.h"
diff --git a/chromium/gpu/command_buffer/service/multi_draw_manager.cc b/chromium/gpu/command_buffer/service/multi_draw_manager.cc
new file mode 100644
index 00000000000..498ed6adfed
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/multi_draw_manager.cc
@@ -0,0 +1,229 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/multi_draw_manager.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/numerics/checked_math.h"
+
+namespace gpu {
+namespace gles2 {
+
+MultiDrawManager::ResultData::ResultData()
+ : draw_function(DrawFunction::None) {}
+
+MultiDrawManager::ResultData::ResultData(ResultData&& rhs)
+ : draw_function(rhs.draw_function),
+ drawcount(rhs.drawcount),
+ mode(rhs.mode),
+ type(rhs.type),
+ firsts(std::move(rhs.firsts)),
+ counts(std::move(rhs.counts)),
+ offsets(std::move(rhs.offsets)),
+ indices(std::move(rhs.indices)),
+ instance_counts(std::move(rhs.instance_counts)) {
+ rhs.draw_function = DrawFunction::None;
+}
+
+MultiDrawManager::ResultData& MultiDrawManager::ResultData::operator=(
+ ResultData&& rhs) {
+ if (&rhs == this) {
+ return *this;
+ }
+ draw_function = rhs.draw_function;
+ drawcount = rhs.drawcount;
+ mode = rhs.mode;
+ type = rhs.type;
+ std::swap(firsts, rhs.firsts);
+ std::swap(counts, rhs.counts);
+ std::swap(offsets, rhs.offsets);
+ std::swap(indices, rhs.indices);
+ std::swap(instance_counts, rhs.instance_counts);
+
+ rhs.draw_function = DrawFunction::None;
+ return *this;
+}
+
+MultiDrawManager::ResultData::~ResultData() {}
+
+MultiDrawManager::MultiDrawManager(IndexStorageType index_type)
+ : current_draw_offset_(0), index_type_(index_type), result_() {}
+
+bool MultiDrawManager::Begin(GLsizei drawcount) {
+ result_.drawcount = drawcount;
+ current_draw_offset_ = 0;
+ if (result_.draw_function != DrawFunction::None) {
+ NOTREACHED();
+ return false;
+ }
+ return true;
+}
+
+bool MultiDrawManager::End(ResultData* result) {
+ DCHECK(result);
+
+ if (result_.draw_function == DrawFunction::None ||
+ current_draw_offset_ != result_.drawcount) {
+ return false;
+ }
+ *result = std::move(result_);
+ return true;
+}
+
+bool MultiDrawManager::MultiDrawArrays(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount) {
+ if (!EnsureDrawArraysFunction(DrawFunction::DrawArrays, mode) ||
+ base::CheckAdd(current_draw_offset_, drawcount).ValueOrDie() >
+ result_.drawcount) {
+ NOTREACHED();
+ return false;
+ }
+ std::copy(firsts, firsts + drawcount, &result_.firsts[current_draw_offset_]);
+ std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]);
+ current_draw_offset_ += drawcount;
+ return true;
+}
+
+bool MultiDrawManager::MultiDrawArraysInstanced(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ if (!EnsureDrawArraysFunction(DrawFunction::DrawArraysInstanced, mode) ||
+ base::CheckAdd(current_draw_offset_, drawcount).ValueOrDie() >
+ result_.drawcount) {
+ NOTREACHED();
+ return false;
+ }
+ std::copy(firsts, firsts + drawcount, &result_.firsts[current_draw_offset_]);
+ std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]);
+ std::copy(instance_counts, instance_counts + drawcount,
+ &result_.instance_counts[current_draw_offset_]);
+ current_draw_offset_ += drawcount;
+ return true;
+}
+
+bool MultiDrawManager::MultiDrawElements(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount) {
+ if (!EnsureDrawElementsFunction(DrawFunction::DrawElements, mode, type) ||
+ base::CheckAdd(current_draw_offset_, drawcount).ValueOrDie() >
+ result_.drawcount) {
+ NOTREACHED();
+ return false;
+ }
+ std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]);
+ switch (index_type_) {
+ case IndexStorageType::Offset:
+ std::copy(offsets, offsets + drawcount,
+ &result_.offsets[current_draw_offset_]);
+ break;
+ case IndexStorageType::Pointer:
+ std::transform(
+ offsets, offsets + drawcount, &result_.indices[current_draw_offset_],
+ [](uint32_t offset) {
+ return reinterpret_cast<void*>(static_cast<intptr_t>(offset));
+ });
+ break;
+ }
+ current_draw_offset_ += drawcount;
+ return true;
+}
+
+bool MultiDrawManager::MultiDrawElementsInstanced(
+ GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount) {
+ if (!EnsureDrawElementsFunction(DrawFunction::DrawElementsInstanced, mode,
+ type) ||
+ base::CheckAdd(current_draw_offset_, drawcount).ValueOrDie() >
+ result_.drawcount) {
+ NOTREACHED();
+ return false;
+ }
+ std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]);
+ std::copy(instance_counts, instance_counts + drawcount,
+ &result_.instance_counts[current_draw_offset_]);
+ switch (index_type_) {
+ case IndexStorageType::Offset:
+ std::copy(offsets, offsets + drawcount,
+ &result_.offsets[current_draw_offset_]);
+ break;
+ case IndexStorageType::Pointer:
+ std::transform(
+ offsets, offsets + drawcount, &result_.indices[current_draw_offset_],
+ [](uint32_t offset) {
+ return reinterpret_cast<void*>(static_cast<intptr_t>(offset));
+ });
+ break;
+ }
+ current_draw_offset_ += drawcount;
+ return true;
+}
+
+void MultiDrawManager::ResizeArrays() {
+ switch (result_.draw_function) {
+ case DrawFunction::DrawArraysInstanced:
+ result_.instance_counts.resize(result_.drawcount);
+ FALLTHROUGH;
+ case DrawFunction::DrawArrays:
+ result_.firsts.resize(result_.drawcount);
+ result_.counts.resize(result_.drawcount);
+ break;
+ case DrawFunction::DrawElementsInstanced:
+ result_.instance_counts.resize(result_.drawcount);
+ FALLTHROUGH;
+ case DrawFunction::DrawElements:
+ result_.counts.resize(result_.drawcount);
+ switch (index_type_) {
+ case IndexStorageType::Offset:
+ result_.offsets.resize(result_.drawcount);
+ break;
+ case IndexStorageType::Pointer:
+ result_.indices.resize(result_.drawcount);
+ break;
+ }
+ break;
+ default:
+ NOTREACHED();
+ }
+}
+
+bool MultiDrawManager::EnsureDrawArraysFunction(DrawFunction draw_function,
+ GLenum mode) {
+ bool first_call = result_.draw_function == DrawFunction::None;
+ bool enums_match = result_.mode == mode;
+ if (first_call) {
+ result_.draw_function = draw_function;
+ result_.mode = mode;
+ ResizeArrays();
+ }
+ return first_call || enums_match;
+}
+
+bool MultiDrawManager::EnsureDrawElementsFunction(DrawFunction draw_function,
+ GLenum mode,
+ GLenum type) {
+ bool first_call = result_.draw_function == DrawFunction::None;
+ bool enums_match = result_.mode == mode && result_.type == type;
+ if (first_call) {
+ result_.draw_function = draw_function;
+ result_.mode = mode;
+ result_.type = type;
+ ResizeArrays();
+ }
+ return first_call || enums_match;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/multi_draw_manager.h b/chromium/gpu/command_buffer/service/multi_draw_manager.h
new file mode 100644
index 00000000000..daa42d9e8dc
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/multi_draw_manager.h
@@ -0,0 +1,92 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MULTI_DRAW_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MULTI_DRAW_MANAGER_H_
+
+#include <vector>
+
+#include "gpu/gpu_gles2_export.h"
+
+// Forwardly declare a few GL types to avoid including GL header files.
+typedef unsigned GLenum;
+typedef int GLsizei;
+typedef int GLint;
+
+namespace gpu {
+namespace gles2 {
+
+class GPU_GLES2_EXPORT MultiDrawManager {
+ public:
+ enum class DrawFunction {
+ None,
+ DrawArrays,
+ DrawArraysInstanced,
+ DrawElements,
+ DrawElementsInstanced,
+ };
+
+ struct ResultData {
+ DrawFunction draw_function;
+ GLsizei drawcount;
+ GLenum mode;
+ GLenum type;
+ std::vector<GLint> firsts;
+ std::vector<GLsizei> counts;
+ std::vector<GLsizei> offsets;
+ std::vector<const void*> indices;
+ std::vector<GLsizei> instance_counts;
+
+ ResultData();
+ ResultData(ResultData&& rhs);
+ ResultData& operator=(ResultData&& rhs);
+ ~ResultData();
+ };
+
+ enum class IndexStorageType {
+ Offset,
+ Pointer,
+ };
+
+ MultiDrawManager(IndexStorageType index_type);
+
+ bool Begin(GLsizei drawcount);
+ bool End(ResultData* result);
+ bool MultiDrawArrays(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ GLsizei drawcount);
+ bool MultiDrawArraysInstanced(GLenum mode,
+ const GLint* firsts,
+ const GLsizei* counts,
+ const GLsizei* instance_counts,
+ GLsizei drawcount);
+ bool MultiDrawElements(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ GLsizei drawcount);
+ bool MultiDrawElementsInstanced(GLenum mode,
+ const GLsizei* counts,
+ GLenum type,
+ const GLsizei* offsets,
+ const GLsizei* instance_counts,
+ GLsizei drawcount);
+
+ private:
+ void ResizeArrays();
+ bool EnsureDrawArraysFunction(DrawFunction draw_function, GLenum mode);
+ bool EnsureDrawElementsFunction(DrawFunction draw_function,
+ GLenum mode,
+ GLenum type);
+
+ GLsizei current_draw_offset_;
+ IndexStorageType index_type_;
+ ResultData result_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MULTI_DRAW_MANAGER_H_
diff --git a/chromium/gpu/command_buffer/service/passthrough_program_cache.h b/chromium/gpu/command_buffer/service/passthrough_program_cache.h
index 471125b20fe..cb2d05d5e52 100644
--- a/chromium/gpu/command_buffer/service/passthrough_program_cache.h
+++ b/chromium/gpu/command_buffer/service/passthrough_program_cache.h
@@ -10,6 +10,7 @@
#include "base/macros.h"
#include "gpu/command_buffer/service/decoder_context.h"
#include "gpu/command_buffer/service/program_cache.h"
+#include "ui/gl/gl_bindings.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc b/chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc
index 7ca3d738356..394f47efb96 100644
--- a/chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/passthrough_program_cache_unittest.cc
@@ -44,7 +44,6 @@ class PassthroughProgramCacheTest : public GpuServiceTest,
void CacheShader(const std::string& key, const std::string& shader) override {
}
void OnFenceSyncRelease(uint64_t release) override {}
- bool OnWaitSyncToken(const gpu::SyncToken&) override { return false; }
void OnDescheduleUntilFinished() override {}
void OnRescheduleAfterFinished() override {}
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override {}
diff --git a/chromium/gpu/command_buffer/service/program_cache.cc b/chromium/gpu/command_buffer/service/program_cache.cc
index b8b4eb307aa..31bcca0ef41 100644
--- a/chromium/gpu/command_buffer/service/program_cache.cc
+++ b/chromium/gpu/command_buffer/service/program_cache.cc
@@ -16,6 +16,16 @@
namespace gpu {
namespace gles2 {
+ProgramCache::ScopedCacheUse::ScopedCacheUse(ProgramCache* cache,
+ CacheProgramCallback callback)
+ : cache_(cache) {
+ cache_->cache_program_callback_ = callback;
+}
+
+ProgramCache::ScopedCacheUse::~ScopedCacheUse() {
+ cache_->cache_program_callback_.Reset();
+}
+
ProgramCache::ProgramCache(size_t max_cache_size_bytes)
: max_size_bytes_(max_cache_size_bytes) {}
ProgramCache::~ProgramCache() = default;
@@ -184,13 +194,5 @@ void ProgramCache::HandleMemoryPressure(
}
}
-void ProgramCache::SetCacheProgramCallback(CacheProgramCallback callback) {
- cache_program_callback_ = callback;
-} // namespace gles2
-
-void ProgramCache::ResetCacheProgramCallback() {
- cache_program_callback_.Reset();
-}
-
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/program_cache.h b/chromium/gpu/command_buffer/service/program_cache.h
index 4232ad52bfc..d5b0a7dc8dd 100644
--- a/chromium/gpu/command_buffer/service/program_cache.h
+++ b/chromium/gpu/command_buffer/service/program_cache.h
@@ -9,16 +9,18 @@
#include <map>
#include <string>
+#include <unordered_map>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/sha1.h"
-#include "gpu/command_buffer/common/gles2_cmd_format.h"
-#include "gpu/command_buffer/service/program_manager.h"
-#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/common/gl2_types.h"
+#include "gpu/gpu_gles2_export.h"
namespace gpu {
+
+class DecoderClient;
+
namespace gles2 {
class Shader;
@@ -42,6 +44,18 @@ class GPU_GLES2_EXPORT ProgramCache {
PROGRAM_LOAD_SUCCESS
};
+ class GPU_GLES2_EXPORT ScopedCacheUse {
+ public:
+ ScopedCacheUse(ProgramCache* cache, CacheProgramCallback callback);
+ ~ScopedCacheUse();
+
+ ScopedCacheUse(ScopedCacheUse&&) = default;
+ ScopedCacheUse& operator=(ScopedCacheUse&& other) = default;
+
+ private:
+ ProgramCache* cache_;
+ };
+
explicit ProgramCache(size_t max_cache_size_bytes);
virtual ~ProgramCache();
@@ -95,9 +109,6 @@ class GPU_GLES2_EXPORT ProgramCache {
void HandleMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level);
- void SetCacheProgramCallback(CacheProgramCallback callback);
- void ResetCacheProgramCallback();
-
protected:
size_t max_size_bytes() const { return max_size_bytes_; }
@@ -125,8 +136,7 @@ class GPU_GLES2_EXPORT ProgramCache {
CacheProgramCallback cache_program_callback_;
private:
- typedef base::hash_map<std::string,
- LinkedProgramStatus> LinkStatusMap;
+ typedef std::unordered_map<std::string, LinkedProgramStatus> LinkStatusMap;
// called to clear the backend cache
virtual void ClearBackend() = 0;
diff --git a/chromium/gpu/command_buffer/service/program_cache_unittest.cc b/chromium/gpu/command_buffer/service/program_cache_unittest.cc
index 27bee7d5a5b..e97f5ef5450 100644
--- a/chromium/gpu/command_buffer/service/program_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/program_cache_unittest.cc
@@ -8,6 +8,7 @@
#include "gpu/command_buffer/service/mocks.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_bindings.h"
using ::testing::Return;
diff --git a/chromium/gpu/command_buffer/service/program_manager.cc b/chromium/gpu/command_buffer/service/program_manager.cc
index 2947d0c29fa..dfee7338097 100644
--- a/chromium/gpu/command_buffer/service/program_manager.cc
+++ b/chromium/gpu/command_buffer/service/program_manager.cc
@@ -15,10 +15,10 @@
#include <vector>
#include "base/command_line.h"
-#include "base/containers/hash_tables.h"
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_math.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
@@ -99,7 +99,7 @@ bool IsBuiltInFragmentVarying(const std::string& name) {
"gl_FrontFacing",
"gl_PointCoord"
};
- for (size_t ii = 0; ii < arraysize(kBuiltInVaryings); ++ii) {
+ for (size_t ii = 0; ii < base::size(kBuiltInVaryings); ++ii) {
if (name == kBuiltInVaryings[ii])
return true;
}
@@ -399,6 +399,7 @@ Program::Program(ProgramManager* manager, GLuint service_id)
valid_(false),
link_status_(false),
uniforms_cleared_(false),
+ draw_id_uniform_location_(-1),
transform_feedback_buffer_mode_(GL_NONE),
effective_transform_feedback_buffer_mode_(GL_NONE),
fragment_output_type_mask_(0u),
@@ -426,6 +427,7 @@ void Program::Reset() {
attrib_location_to_index_map_.clear();
fragment_output_type_mask_ = 0u;
fragment_output_written_mask_ = 0u;
+ draw_id_uniform_location_ = -1;
ClearVertexInputMasks();
}
@@ -562,6 +564,15 @@ void Program::UpdateTransformFeedbackInfo() {
}
}
+void Program::UpdateDrawIDUniformLocation() {
+ DCHECK(IsValid());
+ GLint fake_location = GetUniformFakeLocation("gl_DrawID");
+ draw_id_uniform_location_ = -1;
+ GLint array_index;
+ GetUniformInfoByFakeLocation(fake_location, &draw_id_uniform_location_,
+ &array_index);
+}
+
std::string Program::ProcessLogInfo(const std::string& log) {
std::string output;
re2::StringPiece input(log);
@@ -747,12 +758,31 @@ void Program::Update() {
DCHECK(length == 0 || name_buffer[length] == '\0');
std::string original_name;
GetVertexAttribData(name_buffer.get(), &original_name, &type);
- size_t location_count = size * LocationCountForAttribType(type);
- // TODO(gman): Should we check for error?
- GLint location = glGetAttribLocation(service_id_, name_buffer.get());
- num_locations = std::max(num_locations, location + location_count);
+ base::CheckedNumeric<size_t> location_count = size;
+ location_count *= LocationCountForAttribType(type);
+ size_t safe_location_count = 0;
+ if (!location_count.AssignIfValid(&safe_location_count))
+ return;
+ GLint location;
+ if (base::StartsWith(name_buffer.get(), "gl_",
+ base::CompareCase::SENSITIVE)) {
+ // Built-in attributes, for example, gl_VertexID, are still considered
+ // as active but their location is -1.
+ // However, on MacOSX, drivers return 0 in this case.
+ // Set |location| to -1 directly.
+ location = -1;
+ } else {
+ // TODO(gman): Should we check for error?
+ location = glGetAttribLocation(service_id_, name_buffer.get());
+ base::CheckedNumeric<size_t> max_location = location;
+ max_location += safe_location_count;
+ size_t safe_max_location = 0;
+ if (!max_location.AssignIfValid(&safe_max_location))
+ return;
+ num_locations = std::max(num_locations, safe_max_location);
+ }
attrib_infos_.push_back(
- VertexAttrib(1, type, original_name, location, location_count));
+ VertexAttrib(1, type, original_name, location, safe_location_count));
max_attrib_name_length_ = std::max(
max_attrib_name_length_, static_cast<GLsizei>(original_name.size()));
}
@@ -1029,14 +1059,14 @@ void Program::UpdateFragmentInputs() {
// Unlike when binding uniforms, we expect the driver to give correct
// names: "name" for simple variable, "name[0]" for an array.
GLsizei query_length = 0;
- GLint query_results[arraysize(kQueryProperties)] = {
+ GLint query_results[base::size(kQueryProperties)] = {
0,
};
glGetProgramResourceiv(service_id_, GL_FRAGMENT_INPUT_NV, ii,
- arraysize(kQueryProperties), kQueryProperties,
- arraysize(query_results), &query_length,
+ base::size(kQueryProperties), kQueryProperties,
+ base::size(query_results), &query_length,
query_results);
- DCHECK(query_length == arraysize(kQueryProperties));
+ DCHECK(query_length == base::size(kQueryProperties));
GLenum type = static_cast<GLenum>(query_results[1]);
GLsizei size = static_cast<GLsizei>(query_results[2]);
@@ -1881,7 +1911,7 @@ bool Program::DetectAttribLocationBindingConflicts() const {
continue;
attrib = shader->GetAttribInfo(*mapped_name);
if (attrib) {
- if (attrib->staticUse)
+ if (shader->shader_version() >= 300 || attrib->staticUse)
break;
else
attrib = nullptr;
@@ -2521,7 +2551,7 @@ bool Program::GetUniformsES3(CommonDecoder::Bucket* bucket) const {
GL_UNIFORM_IS_ROW_MAJOR,
};
const GLint kDefaultValue[] = { -1, -1, -1, -1, 0 };
- const size_t kNumPnames = arraysize(kPname);
+ const size_t kNumPnames = base::size(kPname);
std::vector<GLuint> indices(count);
for (GLsizei ii = 0; ii < count; ++ii) {
indices[ii] = ii;
@@ -2720,6 +2750,11 @@ void ProgramManager::ClearUniforms(Program* program) {
program->ClearUniforms(&zero_);
}
+void ProgramManager::UpdateDrawIDUniformLocation(Program* program) {
+ DCHECK(program);
+ program->UpdateDrawIDUniformLocation();
+}
+
int32_t ProgramManager::MakeFakeLocation(int32_t index, int32_t element) {
return index + element * 0x10000;
}
diff --git a/chromium/gpu/command_buffer/service/program_manager.h b/chromium/gpu/command_buffer/service/program_manager.h
index 7bc019fa3b8..f3452ea9f19 100644
--- a/chromium/gpu/command_buffer/service/program_manager.h
+++ b/chromium/gpu/command_buffer/service/program_manager.h
@@ -429,6 +429,8 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
return effective_transform_feedback_buffer_mode_;
}
+ GLint draw_id_uniform_location() const { return draw_id_uniform_location_; }
+
// See member declaration for details.
// The data are only valid after a successful link.
uint32_t fragment_output_type_mask() const {
@@ -510,6 +512,9 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
// Clears all the uniforms.
void ClearUniforms(std::vector<uint8_t>* zero_buffer);
+ // Updates the draw id uniform location used by ANGLE_multi_draw
+ void UpdateDrawIDUniformLocation();
+
// If long attribate names are mapped during shader translation, call
// glBindAttribLocation() again with the mapped names.
// This is called right before the glLink() call, but after shaders are
@@ -588,6 +593,9 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
// True if the uniforms have been cleared.
bool uniforms_cleared_;
+ // ANGLE_multi_draw
+ GLint draw_id_uniform_location_;
+
// Log info
std::unique_ptr<std::string> log_info_;
@@ -682,6 +690,9 @@ class GPU_GLES2_EXPORT ProgramManager {
// Clears the uniforms for this program.
void ClearUniforms(Program* program);
+ // Updates the draw id location for this program for ANGLE_multi_draw
+ void UpdateDrawIDUniformLocation(Program* program);
+
// Returns true if |name| has a prefix that is intended for GL built-in shader
// variables.
static bool HasBuiltInPrefix(const std::string& name);
diff --git a/chromium/gpu/command_buffer/service/program_manager_unittest.cc b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
index a6a1e4caf97..095a9a112e6 100644
--- a/chromium/gpu/command_buffer/service/program_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
@@ -11,6 +11,7 @@
#include <memory>
#include "base/command_line.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
@@ -90,7 +91,6 @@ class ProgramManagerTestBase : public GpuServiceTest, public DecoderClient {
void CacheShader(const std::string& key, const std::string& shader) override {
}
void OnFenceSyncRelease(uint64_t release) override {}
- bool OnWaitSyncToken(const gpu::SyncToken&) override { return false; }
void OnDescheduleUntilFinished() override {}
void OnRescheduleAfterFinished() override {}
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override {}
@@ -524,7 +524,7 @@ const GLint ProgramManagerWithShaderTest::kBadUniformIndex;
#endif
const size_t ProgramManagerWithShaderTest::kNumAttribs =
- arraysize(ProgramManagerWithShaderTest::kAttribs);
+ base::size(ProgramManagerWithShaderTest::kAttribs);
ProgramManagerWithShaderTest::UniformInfo
ProgramManagerWithShaderTest::kUniforms[] = {
@@ -555,7 +555,7 @@ ProgramManagerWithShaderTest::UniformInfo
};
const size_t ProgramManagerWithShaderTest::kNumUniforms =
- arraysize(ProgramManagerWithShaderTest::kUniforms);
+ base::size(ProgramManagerWithShaderTest::kUniforms);
const char* ProgramManagerWithShaderTest::kAttrib1Name = "attrib1";
const char* ProgramManagerWithShaderTest::kAttrib2Name = "attrib2";
@@ -818,7 +818,7 @@ TEST_F(ProgramManagerWithShaderTest, GLDriverReturnsGLUnderscoreUniform) {
kUniform3NameWithArrayIndex,
},
};
- const size_t kNumUniforms = arraysize(kUniforms);
+ const size_t kNumUniforms = base::size(kUniforms);
SetupShaderExpectations(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
kServiceProgramId);
Shader* vshader = shader_manager_.CreateShader(
@@ -882,7 +882,7 @@ TEST_F(ProgramManagerWithShaderTest, SimilarArrayNames) {
kUniform3Name,
},
};
- const size_t kNumUniforms = arraysize(kUniforms);
+ const size_t kNumUniforms = base::size(kUniforms);
SetupShaderExpectations(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
kServiceProgramId);
Shader* vshader = shader_manager_.CreateShader(
@@ -983,8 +983,8 @@ TEST_F(ProgramManagerWithShaderTest, GLDriverReturnsWrongTypeInfo) {
kUniform3NameWithArrayIndex,
},
};
- const size_t kNumAttribs= arraysize(kAttribs);
- const size_t kNumUniforms = arraysize(kUniforms);
+ const size_t kNumAttribs = base::size(kAttribs);
+ const size_t kNumUniforms = base::size(kUniforms);
SetupShaderExpectations(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
kServiceProgramId);
Program* program =
@@ -1134,8 +1134,8 @@ TEST_F(ProgramManagerWithShaderTest, ProgramInfoGetProgramInfo) {
bucket.GetDataAs<ProgramInfoHeader*>(0, sizeof(ProgramInfoHeader));
ASSERT_TRUE(header != nullptr);
EXPECT_EQ(1u, header->link_status);
- EXPECT_EQ(arraysize(kAttribs), header->num_attribs);
- EXPECT_EQ(arraysize(kUniforms), header->num_uniforms);
+ EXPECT_EQ(base::size(kAttribs), header->num_attribs);
+ EXPECT_EQ(base::size(kUniforms), header->num_uniforms);
const ProgramInput* inputs = bucket.GetDataAs<const ProgramInput*>(
sizeof(*header),
sizeof(ProgramInput) * (header->num_attribs + header->num_uniforms));
@@ -1236,23 +1236,23 @@ TEST_F(ProgramManagerWithShaderTest, ProgramInfoGetUniformBlocksValid) {
data.entry[0].binding = 0;
data.entry[0].data_size = 8;
data.entry[0].name_offset = ComputeOffset(&data, data.name0);
- data.entry[0].name_length = arraysize(data.name0);
- data.entry[0].active_uniforms = arraysize(data.indices0);
+ data.entry[0].name_length = base::size(data.name0);
+ data.entry[0].active_uniforms = base::size(data.indices0);
data.entry[0].active_uniform_offset = ComputeOffset(&data, data.indices0);
data.entry[0].referenced_by_vertex_shader = static_cast<uint32_t>(true);
data.entry[0].referenced_by_fragment_shader = static_cast<uint32_t>(false);
data.entry[1].binding = 1;
data.entry[1].data_size = 4;
data.entry[1].name_offset = ComputeOffset(&data, data.name1);
- data.entry[1].name_length = arraysize(data.name1);
- data.entry[1].active_uniforms = arraysize(data.indices1);
+ data.entry[1].name_length = base::size(data.name1);
+ data.entry[1].active_uniforms = base::size(data.indices1);
data.entry[1].active_uniform_offset = ComputeOffset(&data, data.indices1);
data.entry[1].referenced_by_vertex_shader = static_cast<uint32_t>(false);
data.entry[1].referenced_by_fragment_shader = static_cast<uint32_t>(true);
- memcpy(data.name0, kName[0], arraysize(data.name0));
+ memcpy(data.name0, kName[0], base::size(data.name0));
data.indices0[0] = kIndices[0][0];
data.indices0[1] = kIndices[0][1];
- memcpy(data.name1, kName[1], arraysize(data.name1));
+ memcpy(data.name1, kName[1], base::size(data.name1));
data.indices1[0] = kIndices[1][0];
EXPECT_CALL(*(gl_.get()),
@@ -1399,14 +1399,13 @@ TEST_F(ProgramManagerWithShaderTest,
data.entry[0].size = 1;
data.entry[0].type = GL_FLOAT_VEC2;
data.entry[0].name_offset = ComputeOffset(&data, data.name0);
- data.entry[0].name_length = arraysize(data.name0);
+ data.entry[0].name_length = base::size(data.name0);
data.entry[1].size = 2;
data.entry[1].type = GL_FLOAT;
data.entry[1].name_offset = ComputeOffset(&data, data.name1);
- data.entry[1].name_length = arraysize(data.name1);
- memcpy(data.name0, kName[0], arraysize(data.name0));
- memcpy(data.name1, kName[1], arraysize(data.name1));
-
+ data.entry[1].name_length = base::size(data.name1);
+ memcpy(data.name0, kName[0], base::size(data.name0));
+ memcpy(data.name1, kName[1], base::size(data.name1));
EXPECT_CALL(*(gl_.get()),
GetProgramiv(kServiceProgramId,
@@ -1527,7 +1526,7 @@ TEST_F(ProgramManagerWithShaderTest, ProgramInfoGetUniformsES3Valid) {
kMatrixStride,
kIsRowMajor,
};
- const size_t kNumIterations = arraysize(kPname);
+ const size_t kNumIterations = base::size(kPname);
for (size_t ii = 0; ii < kNumIterations; ++ii) {
EXPECT_CALL(*(gl_.get()),
GetActiveUniformsiv(
@@ -1553,7 +1552,7 @@ TEST_F(ProgramManagerWithShaderTest, UnusedUniformArrayElements) {
ASSERT_TRUE(program != nullptr);
// Emulate the situation that only the first element has a valid location.
// TODO(zmo): Don't assume these are in order.
- for (size_t ii = 0; ii < arraysize(kUniforms); ++ii) {
+ for (size_t ii = 0; ii < base::size(kUniforms); ++ii) {
Program::UniformInfo* uniform = const_cast<Program::UniformInfo*>(
program->GetUniformInfo(ii));
ASSERT_TRUE(uniform != nullptr);
@@ -1567,8 +1566,8 @@ TEST_F(ProgramManagerWithShaderTest, UnusedUniformArrayElements) {
bucket.GetDataAs<ProgramInfoHeader*>(0, sizeof(ProgramInfoHeader));
ASSERT_TRUE(header != nullptr);
EXPECT_EQ(1u, header->link_status);
- EXPECT_EQ(arraysize(kAttribs), header->num_attribs);
- EXPECT_EQ(arraysize(kUniforms), header->num_uniforms);
+ EXPECT_EQ(base::size(kAttribs), header->num_attribs);
+ EXPECT_EQ(base::size(kUniforms), header->num_uniforms);
const ProgramInput* inputs = bucket.GetDataAs<const ProgramInput*>(
sizeof(*header),
sizeof(ProgramInput) * (header->num_attribs + header->num_uniforms));
@@ -2010,7 +2009,7 @@ TEST_F(ProgramManagerWithShaderTest, ClearWithSamplerTypes) {
GL_SAMPLER_3D_OES,
GL_SAMPLER_2D_RECT_ARB,
};
- const size_t kNumSamplerTypes = arraysize(kSamplerTypes);
+ const size_t kNumSamplerTypes = base::size(kSamplerTypes);
for (size_t ii = 0; ii < kNumSamplerTypes; ++ii) {
static ProgramManagerWithShaderTest::AttribInfo kAttribs[] = {
{ kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
@@ -2043,8 +2042,8 @@ TEST_F(ProgramManagerWithShaderTest, ClearWithSamplerTypes) {
kUniform3NameWithArrayIndex,
},
};
- const size_t kNumAttribs = arraysize(kAttribs);
- const size_t kNumUniforms = arraysize(kUniforms);
+ const size_t kNumAttribs = base::size(kAttribs);
+ const size_t kNumUniforms = base::size(kUniforms);
SetupShaderExpectations(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
kServiceProgramId);
program->Link(nullptr, Program::kCountOnlyStaticallyUsed, this);
@@ -2108,8 +2107,8 @@ TEST_F(ProgramManagerWithShaderTest, BindUniformLocation) {
},
};
- const size_t kNumAttribs = arraysize(kAttribs);
- const size_t kNumUniforms = arraysize(kUniforms);
+ const size_t kNumAttribs = base::size(kAttribs);
+ const size_t kNumUniforms = base::size(kUniforms);
SetupShaderExpectations(kAttribs, kNumAttribs, kUniforms, kNumUniforms,
kServiceProgramId);
program->Link(nullptr, Program::kCountOnlyStaticallyUsed, this);
@@ -2130,7 +2129,7 @@ TEST_F(ProgramManagerWithShaderTest, ZeroSizeUniformMarkedInvalid) {
kUniform1DesiredLocation, kUniform1Name,
},
};
- const size_t kNumInvalidUniforms = arraysize(kInvalidUniforms);
+ const size_t kNumInvalidUniforms = base::size(kInvalidUniforms);
SetupShaderExpectations(kAttribs, kNumAttribs, kInvalidUniforms,
kNumInvalidUniforms, kServiceProgramId);
@@ -2520,8 +2519,9 @@ TEST_P(ProgramManagerWithPathRenderingTest, BindFragmentInputLocation) {
};
TestHelper::SetupShaderExpectationsWithVaryings(
gl_.get(), feature_info_.get(), nullptr, 0, nullptr, 0,
- kFragmentInputExpectationInfos, arraysize(kFragmentInputExpectationInfos),
- nullptr, 0, kServiceProgramId);
+ kFragmentInputExpectationInfos,
+ base::size(kFragmentInputExpectationInfos), nullptr, 0,
+ kServiceProgramId);
program->Link(nullptr, Program::kCountOnlyStaticallyUsed, this);
const Program::FragmentInputInfo* info1 =
program->GetFragmentInputInfoByFakeLocation(
@@ -2592,7 +2592,7 @@ TEST_P(ProgramManagerDualSourceBlendingES2Test, UseSecondaryFragCoord) {
int shader_version = 100;
Program* program =
SetupProgramForVariables(nullptr, 0, kFragmentVaryings,
- arraysize(kFragmentVaryings), &shader_version);
+ base::size(kFragmentVaryings), &shader_version);
const gl::GLVersionInfo& gl_version = feature_info_->gl_version_info();
if (!gl_version.is_es) {
@@ -2619,7 +2619,7 @@ TEST_P(ProgramManagerDualSourceBlendingES2Test, UseSecondaryFragData) {
int shader_version = 100;
Program* program =
SetupProgramForVariables(nullptr, 0, kFragmentVaryings,
- arraysize(kFragmentVaryings), &shader_version);
+ base::size(kFragmentVaryings), &shader_version);
const gl::GLVersionInfo& gl_version = feature_info_->gl_version_info();
if (!gl_version.is_es) {
diff --git a/chromium/gpu/command_buffer/service/query_manager.h b/chromium/gpu/command_buffer/service/query_manager.h
index 1b6b8c571e2..a383f0c7d45 100644
--- a/chromium/gpu/command_buffer/service/query_manager.h
+++ b/chromium/gpu/command_buffer/service/query_manager.h
@@ -8,6 +8,8 @@
#include <stdint.h>
#include <memory>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "base/atomicops.h"
@@ -245,10 +247,10 @@ class GPU_GLES2_EXPORT QueryManager {
unsigned query_count_;
// Info for each query in the system.
- using QueryMap = base::hash_map<GLuint, scoped_refptr<Query>>;
+ using QueryMap = std::unordered_map<GLuint, scoped_refptr<Query>>;
QueryMap queries_;
- using GeneratedQueryIds = base::hash_set<GLuint>;
+ using GeneratedQueryIds = std::unordered_set<GLuint>;
GeneratedQueryIds generated_query_ids_;
// A map of targets -> Query for current active queries.
diff --git a/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
index 71a3fa5d0f6..d7e5536439e 100644
--- a/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
@@ -54,7 +54,9 @@ static const GLenum valid_texture_min_filter_mode_table[] = {
};
static const GLenum valid_texture_parameter_table[] = {
- GL_TEXTURE_MAG_FILTER, GL_TEXTURE_MIN_FILTER, GL_TEXTURE_WRAP_S,
+ GL_TEXTURE_MAG_FILTER,
+ GL_TEXTURE_MIN_FILTER,
+ GL_TEXTURE_WRAP_S,
GL_TEXTURE_WRAP_T,
};
@@ -63,7 +65,8 @@ static const GLenum valid_texture_wrap_mode_table[] = {
};
static const gfx::BufferUsage valid_gfx_buffer_usage_table[] = {
- gfx::BufferUsage::GPU_READ, gfx::BufferUsage::SCANOUT,
+ gfx::BufferUsage::GPU_READ,
+ gfx::BufferUsage::SCANOUT,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT,
};
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index 6c158c97485..c16915d4e8b 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -9,14 +9,15 @@
#include <algorithm>
#include <memory>
#include <string>
-#include <unordered_map>
-#include <utility>
#include <vector>
#include "base/atomic_sequence_num.h"
+#include "base/containers/flat_map.h"
+#include "base/debug/crash_logging.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
@@ -30,33 +31,28 @@
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/context_result.h"
#include "gpu/command_buffer/common/debug_marker_manager.h"
-#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/raster_cmd_format.h"
#include "gpu/command_buffer/common/raster_cmd_ids.h"
#include "gpu/command_buffer/common/sync_token.h"
-#include "gpu/command_buffer/service/buffer_manager.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/error_state.h"
#include "gpu/command_buffer/service/feature_info.h"
-#include "gpu/command_buffer/service/framebuffer_manager.h"
#include "gpu/command_buffer/service/gl_stream_texture_image.h"
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/command_buffer/service/gles2_cmd_copy_tex_image.h"
#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
-#include "gpu/command_buffer/service/image_factory.h"
-#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/logger.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/query_manager.h"
#include "gpu/command_buffer/service/raster_cmd_validation.h"
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
#include "gpu/command_buffer/service/service_font_manager.h"
#include "gpu/command_buffer/service/service_transfer_cache.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/skia_utils.h"
@@ -71,7 +67,6 @@
#include "third_party/skia/include/gpu/GrContext.h"
#include "third_party/skia/include/gpu/GrTypes.h"
#include "ui/gfx/buffer_format_util.h"
-#include "ui/gfx/ipc/color/gfx_param_traits.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/gl_surface.h"
@@ -79,17 +74,16 @@
// Local versions of the SET_GL_ERROR macros
#define LOCAL_SET_GL_ERROR(error, function_name, msg) \
- ERRORSTATE_SET_GL_ERROR(state_.GetErrorState(), error, function_name, msg)
-#define LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, value, label) \
- ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(state_.GetErrorState(), function_name, \
+ ERRORSTATE_SET_GL_ERROR(error_state_.get(), error, function_name, msg)
+#define LOCAL_SET_GL_ERROR_INVALID_ENUM(function_name, value, label) \
+ ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(error_state_.get(), function_name, \
static_cast<uint32_t>(value), label)
-#define LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name) \
- ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(state_.GetErrorState(), \
- function_name)
+#define LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(function_name) \
+ ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state_.get(), function_name)
#define LOCAL_PEEK_GL_ERROR(function_name) \
- ERRORSTATE_PEEK_GL_ERROR(state_.GetErrorState(), function_name)
+ ERRORSTATE_PEEK_GL_ERROR(error_state_.get(), function_name)
#define LOCAL_CLEAR_REAL_GL_ERRORS(function_name) \
- ERRORSTATE_CLEAR_REAL_GL_ERRORS(state_.GetErrorState(), function_name)
+ ERRORSTATE_CLEAR_REAL_GL_ERRORS(error_state_.get(), function_name)
#define LOCAL_PERFORMANCE_WARNING(msg) \
PerformanceWarning(__FILE__, __LINE__, msg)
#define LOCAL_RENDER_WARNING(msg) RenderWarning(__FILE__, __LINE__, msg)
@@ -101,42 +95,6 @@ namespace {
base::AtomicSequenceNumber g_raster_decoder_id;
-class TextureMetadata {
- public:
- TextureMetadata(bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format,
- const Capabilities& caps)
- : use_buffer_(use_buffer),
- buffer_usage_(buffer_usage),
- format_(format),
- target_(CalcTarget(use_buffer, buffer_usage, format, caps)) {}
- TextureMetadata(const TextureMetadata& tmd) = default;
-
- bool use_buffer() const { return use_buffer_; }
- gfx::BufferUsage buffer_usage() const { return buffer_usage_; }
- viz::ResourceFormat format() const { return format_; }
- GLenum target() const { return target_; }
-
- private:
- static GLenum CalcTarget(bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat format,
- const Capabilities& caps) {
- if (use_buffer) {
- gfx::BufferFormat buffer_format = viz::BufferFormat(format);
- return GetBufferTextureTarget(buffer_usage, buffer_format, caps);
- } else {
- return GL_TEXTURE_2D;
- }
- }
-
- const bool use_buffer_;
- const gfx::BufferUsage buffer_usage_;
- const viz::ResourceFormat format_;
- const GLenum target_;
-};
-
// This class prevents any GL errors that occur when it is in scope from
// being reported to the client.
class ScopedGLErrorSuppressor {
@@ -162,37 +120,22 @@ class ScopedGLErrorSuppressor {
class ScopedTextureBinder {
public:
ScopedTextureBinder(gles2::ContextState* state,
- gles2::TextureManager* texture_manager,
- gles2::TextureRef* texture_ref,
GLenum target,
- GrContext* gr_context,
- bool state_is_dirty)
- : state_(state),
- target_(target),
- gr_context_(gr_context),
- state_is_dirty_(state_is_dirty) {
+ GLuint texture,
+ GrContext* gr_context)
+ : state_(state), target_(target) {
auto* api = state->api();
api->glActiveTextureFn(GL_TEXTURE0);
- gles2::Texture* texture = texture_ref->texture();
- if (texture->target() == 0)
- texture_manager->SetTarget(texture_ref, target);
- DCHECK_EQ(texture->target(), target_)
- << "Texture bound to more than 1 target.";
- api->glBindTextureFn(target_, texture_ref->service_id());
+ api->glBindTextureFn(target_, texture);
+ if (gr_context)
+ gr_context->resetContext(kTextureBinding_GrGLBackendState);
}
- ~ScopedTextureBinder() {
- if (!state_is_dirty_)
- state_->api()->glBindTextureFn(target_, 0);
- if (gr_context_)
- gr_context_->resetContext(kTextureBinding_GrGLBackendState);
- }
+ ~ScopedTextureBinder() { state_->api()->glBindTextureFn(target_, 0); }
private:
gles2::ContextState* state_;
GLenum target_;
- GrContext* gr_context_;
- const bool state_is_dirty_;
DISALLOW_COPY_AND_ASSIGN(ScopedTextureBinder);
};
@@ -202,14 +145,29 @@ class ScopedTextureBinder {
// scope.
class ScopedPixelUnpackState {
public:
- explicit ScopedPixelUnpackState(gles2::ContextState* state) : state_(state) {
- DCHECK(state_);
- state_->PushTextureUnpackState();
+ explicit ScopedPixelUnpackState(gles2::ContextState* state,
+ GrContext* gr_context,
+ const gles2::FeatureInfo* feature_info) {
+ DCHECK(state);
+ auto* api = state->api();
+ api->glPixelStoreiFn(GL_UNPACK_ALIGNMENT, 4);
+ if (feature_info->gl_version_info().is_es3 ||
+ feature_info->gl_version_info().is_desktop_core_profile ||
+ feature_info->feature_flags().ext_pixel_buffer_object)
+ api->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, 0);
+
+ if (feature_info->gl_version_info().is_es3 ||
+ feature_info->gl_version_info().is_desktop_core_profile ||
+ feature_info->feature_flags().ext_unpack_subimage)
+ api->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, 0);
+ if (gr_context) {
+ gr_context->resetContext(kMisc_GrGLBackendState |
+ kPixelStore_GrGLBackendState);
+ }
}
- ~ScopedPixelUnpackState() { state_->RestoreUnpackState(); }
+ ~ScopedPixelUnpackState() = default;
private:
- gles2::ContextState* state_;
DISALLOW_COPY_AND_ASSIGN(ScopedPixelUnpackState);
};
@@ -231,94 +189,22 @@ bool AllowedBetweenBeginEndRaster(CommandId command) {
}
}
-// Commands that do not require that GL state matches ContextState. Some
-// are completely indifferent to GL state. Others require that GL state
-// matches GrContext state tracking.
-bool PermitsInconsistentContextState(CommandId command) {
- // Note that state restoration is expensive. If you're adding any new
- // command which is frequently used between multiple RasterCHROMIUMs for
- // tiled rasterization, make sure to add it to the whitelist below for
- // commands which don't need consistent GL state.
- switch (command) {
- case kBeginQueryEXT:
- case kBeginRasterCHROMIUMImmediate:
- case kCreateAndConsumeTextureINTERNALImmediate:
- case kCreateTransferCacheEntryINTERNAL:
- case kDeleteQueriesEXTImmediate:
- case kDeleteTexturesImmediate:
- case kDeleteTransferCacheEntryINTERNAL:
- case kEndQueryEXT:
- case kEndRasterCHROMIUM:
- case kFinish:
- case kFlush:
- case kGenQueriesEXTImmediate:
- case kGetError:
- case kInsertFenceSyncCHROMIUM:
- case kRasterCHROMIUM:
- case kSetActiveURLCHROMIUM:
- case kUnlockTransferCacheEntryINTERNAL:
- case kWaitSyncTokenCHROMIUM:
- case kTraceBeginCHROMIUM:
- case kTraceEndCHROMIUM:
- case kDeletePaintCacheTextBlobsINTERNALImmediate:
- case kDeletePaintCachePathsINTERNALImmediate:
- case kClearPaintCacheINTERNAL:
- return true;
- case kLoseContextCHROMIUM:
- case kCopySubTexture:
- return false;
- case kNumCommands:
- case kOneBeforeStartPoint:
- NOTREACHED();
- return false;
- }
-
- NOTREACHED();
- return false;
-}
-
} // namespace
// RasterDecoderImpl uses two separate state trackers (gpu::gles2::ContextState
// and GrContext) that cache the current GL driver state. Each class sees a
// fraction of the GL calls issued and can easily become inconsistent with GL
// state. We guard against that by resetting. But resetting is expensive, so we
-// avoid it as much as possible. The argument for correctness is as follows:
-//
-// - GLES2Decoder: The GL state matches the ContextState before and after a
-// command is executed here. The interesting case is making a GLES2Decoder
-// current. If using a virtual context, we will restore state appropriately
-// when the GLES2Decoder is made current because of the call to
-// RasterDecoderImpl::GetContextState.
-//
-// - RasterDecoder: There are two cases to consider
-//
-// Case 1: Making a RasterDecoder current. If we are using virtual contexts,
-// we will restore to |state_| and GrContext::resetContext because of
-// RasterDecoderImpl::{GetContextState,RestoreState}. If not, we will
-// restore to the previous GL state (either |state_| or GrContext consistent
-// with previous GL state).
-//
-// Case 2a: Executing a PermitsInconsistentContextState command: Either the
-// command doesn't inspect/modify GL state (InsertSyncPoint,
-// CreateAndConsumeTexture) or it requires and maintains that GrContext
-// state tracking matches GL context state (e.g. *RasterCHROMIUM --- see
-// raster_decoder_context_state_->PessimisticallyResetGrContext).
-//
-// Case 2b: Executing a command that is not whitelisted: We force GL state to
-// match |state_| as necessary (see |need_context_state_reset|) in
-// DoCommandsImpl with RestoreState(nullptr). This will call
-// GrContext::resetContext.
+// avoid it as much as possible.
class RasterDecoderImpl final : public RasterDecoder,
public gles2::ErrorStateClient,
public ServiceFontManager::Client {
public:
- RasterDecoderImpl(
- DecoderClient* client,
- CommandBufferServiceBase* command_buffer_service,
- gles2::Outputter* outputter,
- gles2::ContextGroup* group,
- scoped_refptr<RasterDecoderContextState> raster_decoder_context_state);
+ RasterDecoderImpl(DecoderClient* client,
+ CommandBufferServiceBase* command_buffer_service,
+ gles2::Outputter* outputter,
+ gles2::ContextGroup* group,
+ scoped_refptr<SharedContextState> shared_context_state);
~RasterDecoderImpl() override;
gles2::GLES2Util* GetGLES2Util() override { return &util_; }
@@ -340,6 +226,8 @@ class RasterDecoderImpl final : public RasterDecoder,
}
Capabilities GetCapabilities() override;
const gles2::ContextState* GetContextState() override;
+
+ // TODO(penghuang): Remove unused context state related methods.
void RestoreGlobalState() const override;
void ClearAllAttributes() const override;
void RestoreAllAttributes() const override;
@@ -353,11 +241,12 @@ class RasterDecoderImpl final : public RasterDecoder,
void RestoreFramebufferBindings() const override;
void RestoreRenderbufferBindings() override;
void RestoreProgramBindings() const override;
- void RestoreTextureState(unsigned service_id) const override;
+ void RestoreTextureState(unsigned service_id) override;
void RestoreTextureUnitBindings(unsigned unit) const override;
void RestoreVertexAttribArray(unsigned index) override;
void RestoreAllExternalTextureBindingsIfNeeded() override;
QueryManager* GetQueryManager() override;
+
void SetQueryCallback(unsigned int query_client_id,
base::OnceClosure callback) override;
gles2::GpuFenceManager* GetGpuFenceManager() override;
@@ -435,6 +324,7 @@ class RasterDecoderImpl final : public RasterDecoder,
ServiceTransferCache* GetTransferCacheForTest() override;
void SetUpForRasterCHROMIUMForTest() override;
void SetOOMErrorForTest() override;
+ void DisableFlushWorkaroundForTest() override;
// ErrorClientState implementation.
void OnContextLostError() override;
@@ -453,19 +343,17 @@ class RasterDecoderImpl final : public RasterDecoder,
scoped_refptr<Buffer> GetShmBuffer(uint32_t shm_id) override;
private:
- std::unordered_map<GLuint, TextureMetadata> texture_metadata_;
- TextureMetadata* GetTextureMetadata(GLuint client_id) {
- auto it = texture_metadata_.find(client_id);
- DCHECK(it != texture_metadata_.end()) << "Undefined texture id";
- return &it->second;
- }
-
- gl::GLApi* api() const { return state_.api(); }
- GrContext* gr_context() const {
- return raster_decoder_context_state_->gr_context;
+ gles2::ContextState* state() const {
+ if (use_passthrough()) {
+ NOTREACHED();
+ return nullptr;
+ }
+ return shared_context_state_->context_state();
}
+ gl::GLApi* api() const { return api_; }
+ GrContext* gr_context() const { return shared_context_state_->gr_context(); }
ServiceTransferCache* transfer_cache() {
- return raster_decoder_context_state_->transfer_cache.get();
+ return shared_context_state_->transfer_cache();
}
const gles2::FeatureInfo::FeatureFlags& features() const {
@@ -478,7 +366,8 @@ class RasterDecoderImpl final : public RasterDecoder,
bool IsRobustnessSupported() {
return has_robustness_extension_ &&
- context_->WasAllocatedUsingRobustnessExtension();
+ shared_context_state_->context()
+ ->WasAllocatedUsingRobustnessExtension();
}
const gl::GLVersionInfo& gl_version_info() {
@@ -487,6 +376,8 @@ class RasterDecoderImpl final : public RasterDecoder,
MemoryTracker* memory_tracker() { return group_->memory_tracker(); }
+ bool use_passthrough() const { return group_->use_passthrough_cmd_decoder(); }
+
gles2::BufferManager* buffer_manager() { return group_->buffer_manager(); }
const gles2::TextureManager* texture_manager() const {
@@ -497,27 +388,6 @@ class RasterDecoderImpl final : public RasterDecoder,
gles2::ImageManager* image_manager() { return group_->image_manager(); }
- // Creates a Texture for the given texture.
- gles2::TextureRef* CreateTexture(GLuint client_id, GLuint service_id) {
- return texture_manager()->CreateTexture(client_id, service_id);
- }
-
- // Gets the texture info for the given texture. Returns nullptr if none
- // exists.
- gles2::TextureRef* GetTexture(GLuint client_id) const {
- return texture_manager()->GetTexture(client_id);
- }
-
- // Deletes the texture info for the given texture.
- void RemoveTexture(GLuint client_id) {
- texture_manager()->RemoveTexture(client_id);
-
- auto texture_iter = texture_metadata_.find(client_id);
- DCHECK(texture_iter != texture_metadata_.end());
-
- texture_metadata_.erase(texture_iter);
- }
-
// Set remaining commands to process to 0 to force DoCommands to return
// and allow context preemption and GPU watchdog checks in
// CommandExecutor().
@@ -529,12 +399,6 @@ class RasterDecoderImpl final : public RasterDecoder,
int num_entries,
int* entries_processed);
- void DoCreateAndConsumeTextureINTERNAL(GLuint client_id,
- bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat resource_format,
- const volatile GLbyte* key);
- void DeleteTexturesHelper(GLsizei n, const volatile GLuint* client_ids);
bool GenQueriesEXTHelper(GLsizei n, const GLuint* client_ids);
void DeleteQueriesEXTHelper(GLsizei n, const volatile GLuint* client_ids);
void DoFinish();
@@ -543,14 +407,13 @@ class RasterDecoderImpl final : public RasterDecoder,
void DoTraceEndCHROMIUM();
bool InitializeCopyTexImageBlitter();
bool InitializeCopyTextureCHROMIUM();
- void DoCopySubTexture(GLuint source_id,
- GLuint dest_id,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height);
+ void DoCopySubTextureINTERNAL(GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ const volatile GLbyte* mailboxes);
// If the texture has an image but that image is not bound or copied to the
// texture, this will first attempt to bind it, and if that fails
// CopyTexImage on it.
@@ -559,15 +422,14 @@ class RasterDecoderImpl final : public RasterDecoder,
void DoBeginRasterCHROMIUM(GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint color_type,
GLuint color_space_transfer_cache_id,
const volatile GLbyte* key);
void DoRasterCHROMIUM(GLuint raster_shm_id,
GLuint raster_shm_offset,
- GLsizeiptr raster_shm_size,
+ GLuint raster_shm_size,
GLuint font_shm_id,
GLuint font_shm_offset,
- GLsizeiptr font_shm_size);
+ GLuint font_shm_size);
void DoEndRasterCHROMIUM();
void DoCreateTransferCacheEntryINTERNAL(GLuint entry_type,
GLuint entry_id,
@@ -647,7 +509,6 @@ class RasterDecoderImpl final : public RasterDecoder,
bool use_ddl_ = false;
bool has_robustness_extension_ = false;
- bool context_was_lost_ = false;
bool reset_by_robustness_extension_ = false;
// The current decoder error communicates the decoder error through command
@@ -655,31 +516,25 @@ class RasterDecoderImpl final : public RasterDecoder,
// only if not returning an error.
error::Error current_decoder_error_ = error::kNoError;
- scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<gl::GLContext> context_;
DecoderClient* client_;
gles2::DebugMarkerManager debug_marker_manager_;
gles2::Logger logger_;
+ std::unique_ptr<gles2::ErrorState> error_state_;
+ bool context_lost_ = false;
// The ContextGroup for this decoder uses to track resources.
scoped_refptr<gles2::ContextGroup> group_;
- scoped_refptr<RasterDecoderContextState> raster_decoder_context_state_;
+ scoped_refptr<SharedContextState> shared_context_state_;
std::unique_ptr<Validators> validators_;
scoped_refptr<gles2::FeatureInfo> feature_info_;
std::unique_ptr<QueryManager> query_manager_;
- // All the state for this context.
- gles2::ContextState state_;
-
gles2::GLES2Util util_;
- // States related to each manager.
- gles2::DecoderTextureState texture_state_;
- gles2::DecoderFramebufferState framebuffer_state_;
-
// An optional behaviour to lose the context and group when OOM.
bool lose_context_when_out_of_memory_ = false;
@@ -710,6 +565,14 @@ class RasterDecoderImpl final : public RasterDecoder,
// Tracing helpers.
int raster_chromium_id_ = 0;
+ // Workaround for https://crbug.com/906453
+ bool flush_workaround_disabled_for_test_ = false;
+
+ bool in_copy_sub_texture_ = false;
+ bool reset_texture_state_ = false;
+
+ gl::GLApi* api_ = nullptr;
+
base::WeakPtrFactory<DecoderContext> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(RasterDecoderImpl);
@@ -733,9 +596,9 @@ RasterDecoder* RasterDecoder::Create(
CommandBufferServiceBase* command_buffer_service,
gles2::Outputter* outputter,
gles2::ContextGroup* group,
- scoped_refptr<RasterDecoderContextState> raster_decoder_context_state) {
+ scoped_refptr<SharedContextState> shared_context_state) {
return new RasterDecoderImpl(client, command_buffer_service, outputter, group,
- std::move(raster_decoder_context_state));
+ std::move(shared_context_state));
}
RasterDecoder::RasterDecoder(CommandBufferServiceBase* command_buffer_service,
@@ -783,30 +646,27 @@ RasterDecoderImpl::RasterDecoderImpl(
CommandBufferServiceBase* command_buffer_service,
gles2::Outputter* outputter,
gles2::ContextGroup* group,
- scoped_refptr<RasterDecoderContextState> raster_decoder_context_state)
+ scoped_refptr<SharedContextState> shared_context_state)
: RasterDecoder(command_buffer_service, outputter),
raster_decoder_id_(g_raster_decoder_id.GetNext() + 1),
client_(client),
logger_(&debug_marker_manager_,
base::BindRepeating(&DecoderClient::OnConsoleMessage,
base::Unretained(client_),
- 0)),
+ 0),
+ group->gpu_preferences().disable_gl_error_limit),
+ error_state_(gles2::ErrorState::Create(this, &logger_)),
group_(group),
- raster_decoder_context_state_(std::move(raster_decoder_context_state)),
+ shared_context_state_(std::move(shared_context_state)),
validators_(new Validators),
feature_info_(group_->feature_info()),
- state_(group_->feature_info(),
- this,
- &logger_,
- false /* track_texture_and_sampler_units */),
- texture_state_(group_->feature_info()->workarounds()),
service_logging_(
group_->gpu_preferences().enable_gpu_service_logging_gpu),
gpu_decoder_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("gpu.decoder"))),
font_manager_(base::MakeRefCounted<ServiceFontManager>(this)),
weak_ptr_factory_(this) {
- DCHECK(raster_decoder_context_state_);
+ DCHECK(shared_context_state_);
}
RasterDecoderImpl::~RasterDecoderImpl() {
@@ -825,10 +685,10 @@ ContextResult RasterDecoderImpl::Initialize(
const gles2::DisallowedFeatures& disallowed_features,
const ContextCreationAttribs& attrib_helper) {
TRACE_EVENT0("gpu", "RasterDecoderImpl::Initialize");
- DCHECK(context->IsCurrent(surface.get()));
+ DCHECK(shared_context_state_->IsCurrent(surface.get()));
DCHECK(!context_.get());
- state_.set_api(gl::g_current_gl_context);
+ api_ = gl::g_current_gl_context;
set_initialized();
@@ -842,7 +702,8 @@ ContextResult RasterDecoderImpl::Initialize(
if (group_->gpu_preferences().enable_gpu_command_logging)
SetLogCommands(true);
- surface_ = surface;
+ DCHECK_EQ(surface.get(), shared_context_state_->surface());
+ DCHECK_EQ(context.get(), shared_context_state_->context());
context_ = context;
// Create GPU Tracer for timing values.
@@ -860,28 +721,15 @@ ContextResult RasterDecoderImpl::Initialize(
Destroy(true);
return result;
}
- CHECK_GL_ERROR();
-
- // Support for CHROMIUM_texture_storage_image depends on the underlying
- // ImageFactory's ability to create anonymous images.
- gpu::ImageFactory* image_factory = group_->image_factory();
- if (image_factory && image_factory->SupportsCreateAnonymousImage())
- feature_info_->EnableCHROMIUMTextureStorageImage();
- state_.InitGenericAttribs(group_->max_vertex_attribs());
+ CHECK_GL_ERROR();
- query_manager_.reset(new QueryManager());
+ query_manager_ = std::make_unique<QueryManager>();
has_robustness_extension_ = features().arb_robustness ||
features().khr_robustness ||
features().ext_robustness;
- // Set all the default state because some GL drivers get it wrong.
- // TODO(backer): Not all of this state needs to be initialized. Reduce the set
- // if perf becomes a problem.
- state_.InitCapabilities(nullptr);
- state_.InitState(nullptr);
-
if (attrib_helper.enable_oop_rasterization) {
if (!features().chromium_raster_transport) {
LOG(ERROR) << "ContextResult::kFatalFailure: "
@@ -890,7 +738,7 @@ ContextResult RasterDecoderImpl::Initialize(
return ContextResult::kFatalFailure;
}
- supports_oop_raster_ = !!raster_decoder_context_state_->gr_context;
+ supports_oop_raster_ = !!shared_context_state_->gr_context();
if (supports_oop_raster_)
paint_cache_ = std::make_unique<cc::ServicePaintCache>();
use_ddl_ = group_->gpu_preferences().enable_oop_rasterization_ddl;
@@ -903,7 +751,7 @@ void RasterDecoderImpl::Destroy(bool have_context) {
if (!initialized())
return;
- DCHECK(!have_context || context_->IsCurrent(nullptr));
+ DCHECK(!have_context || shared_context_state_->context()->IsCurrent(nullptr));
if (have_context) {
if (copy_tex_image_blit_.get()) {
@@ -928,14 +776,8 @@ void RasterDecoderImpl::Destroy(bool have_context) {
if (group_ && group_->texture_manager()) {
group_->texture_manager()->MarkContextLost();
}
-
- state_.MarkContextLost();
}
- // Unbind everything.
- state_.bound_pixel_pack_buffer = nullptr;
- state_.bound_pixel_unpack_buffer = nullptr;
-
copy_tex_image_blit_.reset();
copy_texture_chromium_.reset();
@@ -951,8 +793,6 @@ void RasterDecoderImpl::Destroy(bool have_context) {
// Destroy the surface before the context, some surface destructors make GL
// calls.
- surface_ = nullptr;
-
if (context_.get()) {
context_->ReleaseCurrent(nullptr);
context_ = nullptr;
@@ -964,21 +804,25 @@ void RasterDecoderImpl::Destroy(bool have_context) {
// Make this decoder's GL context current.
bool RasterDecoderImpl::MakeCurrent() {
- DCHECK(surface_);
+ if (shared_context_state_->use_vulkan_gr_context())
+ return true;
+
if (!context_.get())
return false;
- if (WasContextLost()) {
+ if (context_lost_) {
LOG(ERROR) << " RasterDecoderImpl: Trying to make lost context current.";
return false;
}
- if (!context_->MakeCurrent(surface_.get())) {
+ if (shared_context_state_->context_lost() ||
+ !shared_context_state_->MakeCurrent(nullptr)) {
LOG(ERROR) << " RasterDecoderImpl: Context lost during MakeCurrent.";
MarkContextLost(error::kMakeCurrentFailed);
group_->LoseContexts(error::kUnknown);
return false;
}
+
DCHECK_EQ(api(), gl::g_current_gl_context);
if (CheckResetStatus()) {
@@ -999,7 +843,7 @@ gl::GLContext* RasterDecoderImpl::GetGLContext() {
}
gl::GLSurface* RasterDecoderImpl::GetGLSurface() {
- return surface_.get();
+ return shared_context_state_->surface();
}
Capabilities RasterDecoderImpl::GetCapabilities() {
@@ -1025,120 +869,82 @@ Capabilities RasterDecoderImpl::GetCapabilities() {
caps.context_supports_distance_field_text =
gr_context()->supportsDistanceFieldText();
caps.glyph_cache_max_texture_bytes =
- raster_decoder_context_state_->glyph_cache_max_texture_bytes;
+ shared_context_state_->glyph_cache_max_texture_bytes();
}
return caps;
}
const gles2::ContextState* RasterDecoderImpl::GetContextState() {
- if (raster_decoder_context_state_->need_context_state_reset) {
- // Returning nullptr to force full state restoration by the caller. We do
- // this because GrContext changes to GL state are untracked in our state_.
- return nullptr;
- }
-
- return &state_;
+ NOTREACHED();
+ return nullptr;
}
void RasterDecoderImpl::RestoreGlobalState() const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
- state_.RestoreGlobalState(nullptr);
+ // We mark the context state is dirty instead of restoring global
+ // state, and the global state will be restored by the next context.
+ shared_context_state_->set_need_context_state_reset(true);
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::ClearAllAttributes() const {}
void RasterDecoderImpl::RestoreAllAttributes() const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreState(const gles2::ContextState* prev_state) {
- TRACE_EVENT1("gpu", "RasterDecoderImpl::RestoreState", "context",
- logger_.GetLogPrefix());
- state_.RestoreState(prev_state);
- raster_decoder_context_state_->need_context_state_reset = false;
- raster_decoder_context_state_->PessimisticallyResetGrContext();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreActiveTexture() const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreAllTextureUnitAndSamplerBindings(
const gles2::ContextState* prev_state) const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreActiveTextureUnitBinding(
unsigned int target) const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreBufferBinding(unsigned int target) {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
- if (target == GL_PIXEL_PACK_BUFFER) {
- state_.UpdatePackParameters();
- } else if (target == GL_PIXEL_UNPACK_BUFFER) {
- state_.UpdateUnpackParameters();
- }
- gles2::Buffer* bound_buffer =
- buffer_manager()->GetBufferInfoForTarget(&state_, target);
- api()->glBindBufferFn(target, bound_buffer ? bound_buffer->service_id() : 0);
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreBufferBindings() const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
- state_.RestoreBufferBindings();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreFramebufferBindings() const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
- state_.fbo_binding_for_scissor_workaround_dirty = true;
- state_.stencil_state_changed_since_validation = true;
-
- if (workarounds().flush_on_framebuffer_change)
- api()->glFlushFn();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreRenderbufferBindings() {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
- state_.RestoreRenderbufferBindings();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreProgramBindings() const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
- state_.RestoreProgramSettings(nullptr, false);
-}
-
-void RasterDecoderImpl::RestoreTextureState(unsigned service_id) const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
- gles2::Texture* texture =
- texture_manager()->GetTextureForServiceId(service_id);
- if (texture) {
- GLenum target = texture->target();
- api()->glBindTextureFn(target, service_id);
- api()->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, texture->wrap_s());
- api()->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, texture->wrap_t());
- api()->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER,
- texture->min_filter());
- api()->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER,
- texture->mag_filter());
- if (feature_info_->IsWebGL2OrES3Context()) {
- api()->glTexParameteriFn(target, GL_TEXTURE_BASE_LEVEL,
- texture->base_level());
- }
- }
+ shared_context_state_->PessimisticallyResetGrContext();
+}
+
+void RasterDecoderImpl::RestoreTextureState(unsigned service_id) {
+ DCHECK(in_copy_sub_texture_);
+ reset_texture_state_ = true;
}
void RasterDecoderImpl::RestoreTextureUnitBindings(unsigned unit) const {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreVertexAttribArray(unsigned index) {
- NOTIMPLEMENTED();
+ shared_context_state_->PessimisticallyResetGrContext();
}
void RasterDecoderImpl::RestoreAllExternalTextureBindingsIfNeeded() {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
+ shared_context_state_->PessimisticallyResetGrContext();
}
QueryManager* RasterDecoderImpl::GetQueryManager() {
@@ -1163,13 +969,12 @@ gles2::GpuFenceManager* RasterDecoderImpl::GetGpuFenceManager() {
}
bool RasterDecoderImpl::HasPendingQueries() const {
- return query_manager_.get() && query_manager_->HavePendingQueries();
+ return query_manager_ && query_manager_->HavePendingQueries();
}
void RasterDecoderImpl::ProcessPendingQueries(bool did_finish) {
- if (!query_manager_.get())
- return;
- query_manager_->ProcessPendingQueries(did_finish);
+ if (query_manager_)
+ query_manager_->ProcessPendingQueries(did_finish);
}
bool RasterDecoderImpl::HasMoreIdleWork() const {
@@ -1204,7 +1009,7 @@ void RasterDecoderImpl::SetLevelInfo(uint32_t client_id,
}
bool RasterDecoderImpl::WasContextLost() const {
- return context_was_lost_;
+ return context_lost_;
}
bool RasterDecoderImpl::WasContextLostByRobustnessExtension() const {
@@ -1217,20 +1022,14 @@ void RasterDecoderImpl::MarkContextLost(error::ContextLostReason reason) {
return;
// Don't make GL calls in here, the context might not be current.
+ context_lost_ = true;
command_buffer_service()->SetContextLostReason(reason);
current_decoder_error_ = error::kLostContext;
- context_was_lost_ = true;
-
- state_.MarkContextLost();
- raster_decoder_context_state_->context_lost = true;
-
- if (gr_context())
- gr_context()->abandonContext();
}
bool RasterDecoderImpl::CheckResetStatus() {
DCHECK(!WasContextLost());
- DCHECK(context_->IsCurrent(nullptr));
+ DCHECK(shared_context_state_->context()->IsCurrent(nullptr));
if (IsRobustnessSupported()) {
// If the reason for the call was a GL error, we can try to determine the
@@ -1272,7 +1071,9 @@ gles2::Logger* RasterDecoderImpl::GetLogger() {
}
void RasterDecoderImpl::SetIgnoreCachedStateForTest(bool ignore) {
- state_.SetIgnoreCachedStateForTest(ignore);
+ if (use_passthrough())
+ return;
+ state()->SetIgnoreCachedStateForTest(ignore);
}
gles2::ImageManager* RasterDecoderImpl::GetImageManagerForTest() {
@@ -1316,6 +1117,18 @@ error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
int process_pos = 0;
CommandId command = static_cast<CommandId>(0);
+#if defined(OS_MACOSX)
+ if (!flush_workaround_disabled_for_test_) {
+ // Flush before and after decoding commands.
+ // TODO(ccameron): This is to determine if this high frequency flushing
+ // affects crash rates.
+ // https://crbug.com/906453
+ if (gr_context())
+ gr_context()->flush();
+ api()->glFlushFn();
+ }
+#endif
+
while (process_pos < num_entries && result == error::kNoError &&
commands_to_process_--) {
const unsigned int size = cmd_data->value_header.size;
@@ -1338,7 +1151,7 @@ error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
const unsigned int arg_count = size - 1;
unsigned int command_index = command - kFirstRasterCommand;
- if (command_index < arraysize(command_info)) {
+ if (command_index < base::size(command_info)) {
const CommandInfo& info = command_info[command_index];
if (sk_surface_) {
if (!AllowedBetweenBeginEndRaster(command)) {
@@ -1351,11 +1164,6 @@ error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
continue;
}
}
- if (!PermitsInconsistentContextState(command)) {
- if (raster_decoder_context_state_->need_context_state_reset) {
- RestoreState(nullptr);
- }
- }
unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
(info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
@@ -1413,6 +1221,14 @@ error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
if (supports_oop_raster_)
client_->ScheduleGrContextCleanup();
+#if defined(OS_MACOSX)
+ if (!flush_workaround_disabled_for_test_) {
+ if (gr_context())
+ gr_context()->flush();
+ api()->glFlushFn();
+ }
+#endif
+
return result;
}
@@ -1445,7 +1261,7 @@ gles2::ContextGroup* RasterDecoderImpl::GetContextGroup() {
}
gles2::ErrorState* RasterDecoderImpl::GetErrorState() {
- return state_.GetErrorState();
+ return error_state_.get();
}
std::unique_ptr<gles2::AbstractTexture>
@@ -1485,9 +1301,10 @@ bool RasterDecoderImpl::ClearLevel(gles2::Texture* texture,
uint32_t size;
uint32_t padded_row_size;
+ constexpr GLint unpack_alignment = 4;
if (!gles2::GLES2Util::ComputeImageDataSizes(width, height, 1, format, type,
- state_.unpack_alignment, &size,
- nullptr, &padded_row_size)) {
+ unpack_alignment, &size, nullptr,
+ &padded_row_size)) {
return false;
}
@@ -1504,23 +1321,26 @@ bool RasterDecoderImpl::ClearLevel(gles2::Texture* texture,
DCHECK_GT(padded_row_size, 0U);
tile_height = kMaxZeroSize / padded_row_size;
if (!gles2::GLES2Util::ComputeImageDataSizes(width, tile_height, 1, format,
- type, state_.unpack_alignment,
- &size, nullptr, nullptr)) {
+ type, unpack_alignment, &size,
+ nullptr, nullptr)) {
return false;
}
} else {
tile_height = height;
}
- api()->glBindTextureFn(texture->target(), texture->service_id());
{
+ ScopedTextureBinder binder(state(), texture->target(),
+ texture->service_id(), gr_context());
+ base::Optional<ScopedPixelUnpackState> pixel_unpack_state;
+ if (shared_context_state_->need_context_state_reset()) {
+ pixel_unpack_state.emplace(state(), gr_context(), group_->feature_info());
+ }
// Add extra scope to destroy zero and the object it owns right
// after its usage.
// Assumes the size has already been checked.
std::unique_ptr<char[]> zero(new char[size]);
memset(zero.get(), 0, size);
-
- ScopedPixelUnpackState reset_restore(&state_);
GLint y = 0;
while (y < height) {
GLint h = y + tile_height > height ? height - y : tile_height;
@@ -1532,12 +1352,6 @@ bool RasterDecoderImpl::ClearLevel(gles2::Texture* texture,
}
}
DCHECK(glGetError() == GL_NO_ERROR);
-
- if (gr_context()) {
- gr_context()->resetContext(kPixelStore_GrGLBackendState |
- kTextureBinding_GrGLBackendState);
- }
-
return true;
}
@@ -1547,48 +1361,8 @@ bool RasterDecoderImpl::ClearCompressedTextureLevel(gles2::Texture* texture,
unsigned format,
int width,
int height) {
- DCHECK(target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY);
- // This code path can only be called if the texture was originally
- // allocated via TexStorage2D. Note that TexStorage2D is exposed
- // internally for ES 2.0 contexts, but compressed texture support is
- // not part of that exposure.
- DCHECK(feature_info_->IsWebGL2OrES3Context());
-
- GLsizei bytes_required = 0;
- std::string error_str;
- if (!GetCompressedTexSizeInBytes("ClearCompressedTextureLevel", width, height,
- 1, format, &bytes_required,
- state_.GetErrorState())) {
- return false;
- }
-
- TRACE_EVENT1("gpu", "RasterDecoderImpl::ClearCompressedTextureLevel",
- "bytes_required", bytes_required);
-
- api()->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, 0);
- {
- // Add extra scope to destroy zero and the object it owns right
- // after its usage.
- std::unique_ptr<char[]> zero(new char[bytes_required]);
- memset(zero.get(), 0, bytes_required);
- api()->glBindTextureFn(texture->target(), texture->service_id());
- api()->glCompressedTexSubImage2DFn(target, level, 0, 0, width, height,
- format, bytes_required, zero.get());
- }
- gles2::TextureRef* bound_texture =
- texture_manager()->GetTextureInfoForTarget(&state_, texture->target());
- api()->glBindTextureFn(texture->target(),
- bound_texture ? bound_texture->service_id() : 0);
- gles2::Buffer* bound_buffer =
- buffer_manager()->GetBufferInfoForTarget(&state_, GL_PIXEL_UNPACK_BUFFER);
- if (bound_buffer) {
- api()->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, bound_buffer->service_id());
- }
-
- if (gr_context()) {
- gr_context()->resetContext(kTextureBinding_GrGLBackendState);
- }
- return true;
+ NOTREACHED();
+ return false;
}
int RasterDecoderImpl::DecoderIdForTest() {
@@ -1596,7 +1370,7 @@ int RasterDecoderImpl::DecoderIdForTest() {
}
ServiceTransferCache* RasterDecoderImpl::GetTransferCacheForTest() {
- return raster_decoder_context_state_->transfer_cache.get();
+ return shared_context_state_->transfer_cache();
}
void RasterDecoderImpl::SetUpForRasterCHROMIUMForTest() {
@@ -1612,6 +1386,10 @@ void RasterDecoderImpl::SetOOMErrorForTest() {
"synthetic out of memory");
}
+void RasterDecoderImpl::DisableFlushWorkaroundForTest() {
+ flush_workaround_disabled_for_test_ = true;
+}
+
void RasterDecoderImpl::OnContextLostError() {
if (!WasContextLost()) {
// Need to lose current context before broadcasting!
@@ -1634,34 +1412,6 @@ void RasterDecoderImpl::OnOutOfMemoryError() {
}
}
-error::Error RasterDecoderImpl::HandleWaitSyncTokenCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::WaitSyncTokenCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::WaitSyncTokenCHROMIUM*>(
- cmd_data);
-
- static constexpr CommandBufferNamespace kMinNamespaceId =
- CommandBufferNamespace::INVALID;
- static constexpr CommandBufferNamespace kMaxNamespaceId =
- CommandBufferNamespace::NUM_COMMAND_BUFFER_NAMESPACES;
-
- CommandBufferNamespace namespace_id =
- static_cast<CommandBufferNamespace>(c.namespace_id);
- if ((namespace_id < static_cast<int32_t>(kMinNamespaceId)) ||
- (namespace_id >= static_cast<int32_t>(kMaxNamespaceId))) {
- namespace_id = CommandBufferNamespace::INVALID;
- }
- const CommandBufferId command_buffer_id =
- CommandBufferId::FromUnsafeValue(c.command_buffer_id());
- const uint64_t release = c.release_count();
-
- SyncToken sync_token;
- sync_token.Set(namespace_id, command_buffer_id, release);
- return client_->OnWaitSyncToken(sync_token) ? error::kDeferCommandUntilLater
- : error::kNoError;
-}
-
error::Error RasterDecoderImpl::HandleBeginQueryEXT(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -1674,7 +1424,13 @@ error::Error RasterDecoderImpl::HandleBeginQueryEXT(
switch (target) {
case GL_COMMANDS_ISSUED_CHROMIUM:
+ break;
case GL_COMMANDS_COMPLETED_CHROMIUM:
+ if (!features().chromium_sync_query) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT",
+ "not enabled for commands completed queries");
+ return error::kNoError;
+ }
break;
default:
LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "glBeginQueryEXT",
@@ -1762,83 +1518,17 @@ error::Error RasterDecoderImpl::HandleInsertFenceSyncCHROMIUM(
}
void RasterDecoderImpl::DoFinish() {
- api()->glFinishFn();
+ if (!shared_context_state_->use_vulkan_gr_context())
+ api()->glFinishFn();
ProcessPendingQueries(true);
}
void RasterDecoderImpl::DoFlush() {
- api()->glFlushFn();
+ if (!shared_context_state_->use_vulkan_gr_context())
+ api()->glFlushFn();
ProcessPendingQueries(false);
}
-void RasterDecoderImpl::DoCreateAndConsumeTextureINTERNAL(
- GLuint client_id,
- bool use_buffer,
- gfx::BufferUsage buffer_usage,
- viz::ResourceFormat resource_format,
- const volatile GLbyte* key) {
- TRACE_EVENT2("gpu", "RasterDecoderImpl::DoCreateAndConsumeTextureINTERNAL",
- "context", logger_.GetLogPrefix(), "key[0]",
- static_cast<unsigned char>(key[0]));
- Mailbox mailbox =
- Mailbox::FromVolatile(*reinterpret_cast<const volatile Mailbox*>(key));
- DLOG_IF(ERROR, !mailbox.Verify()) << "CreateAndConsumeTexture was "
- "passed a mailbox that was not "
- "generated by ProduceTextureCHROMIUM.";
- if (!client_id) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
- "glCreateAndConsumeTextureCHROMIUM",
- "invalid client id");
- return;
- }
-
- gles2::TextureRef* texture_ref = GetTexture(client_id);
- if (texture_ref) {
- // No need to create texture here, the client_id already has an associated
- // texture.
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
- "glCreateAndConsumeTextureCHROMIUM",
- "client id already in use");
- return;
- }
-
- texture_metadata_.emplace(std::make_pair(
- client_id, TextureMetadata(use_buffer, buffer_usage, resource_format,
- GetCapabilities())));
-
- gles2::Texture* texture = gles2::Texture::CheckedCast(
- group_->mailbox_manager()->ConsumeTexture(mailbox));
- if (!texture) {
- // Create texture to handle invalid mailbox (see http://crbug.com/472465).
- GLuint service_id = 0;
- api()->glGenTexturesFn(1, &service_id);
- DCHECK(service_id);
- texture_manager()->CreateTexture(client_id, service_id);
-
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
- "glCreateAndConsumeTextureCHROMIUM",
- "invalid mailbox name");
- return;
- }
-
- texture_ref = texture_manager()->Consume(client_id, texture);
-
- // TODO(backer): Validate that the consumed texture is consistent with
- // TextureMetadata.
-}
-
-void RasterDecoderImpl::DeleteTexturesHelper(
- GLsizei n,
- const volatile GLuint* client_ids) {
- for (GLsizei ii = 0; ii < n; ++ii) {
- GLuint client_id = client_ids[ii];
- gles2::TextureRef* texture_ref = GetTexture(client_id);
- if (texture_ref) {
- RemoveTexture(client_id);
- }
- }
-}
-
bool RasterDecoderImpl::GenQueriesEXTHelper(GLsizei n,
const GLuint* client_ids) {
for (GLsizei ii = 0; ii < n; ++ii) {
@@ -1955,50 +1645,86 @@ bool RasterDecoderImpl::InitializeCopyTextureCHROMIUM() {
return true;
}
-void RasterDecoderImpl::DoCopySubTexture(GLuint source_id,
- GLuint dest_id,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height) {
- gles2::TextureRef* source_texture_ref = GetTexture(source_id);
- gles2::TextureRef* dest_texture_ref = GetTexture(dest_id);
- if (!source_texture_ref || !dest_texture_ref) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "unknown texture id");
+void RasterDecoderImpl::DoCopySubTextureINTERNAL(
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ const volatile GLbyte* mailboxes) {
+ Mailbox source_mailbox = Mailbox::FromVolatile(
+ reinterpret_cast<const volatile Mailbox*>(mailboxes)[0]);
+ DLOG_IF(ERROR, !source_mailbox.Verify())
+ << "CopySubTexture was passed an invalid mailbox";
+ Mailbox dest_mailbox = Mailbox::FromVolatile(
+ reinterpret_cast<const volatile Mailbox*>(mailboxes)[1]);
+ DLOG_IF(ERROR, !dest_mailbox.Verify())
+ << "CopySubTexture was passed an invalid mailbox";
+
+ if (use_passthrough()) {
+ // TODO(piman): use shared image representations instead.
+ gles2::TexturePassthrough* source_texture =
+ gles2::TexturePassthrough::CheckedCast(
+ group_->mailbox_manager()->ConsumeTexture(source_mailbox));
+ gles2::TexturePassthrough* dest_texture =
+ gles2::TexturePassthrough::CheckedCast(
+ group_->mailbox_manager()->ConsumeTexture(dest_mailbox));
+ if (!source_texture || !dest_texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "unknown mailbox");
+ return;
+ }
+ if (source_texture->is_bind_pending()) {
+ gl::GLImage* image =
+ source_texture->GetLevelImage(source_texture->target(), 0);
+ if (image) {
+ api()->glBindTextureFn(source_texture->target(),
+ source_texture->service_id());
+ if (!image->BindTexImage(source_texture->target())) {
+ image->CopyTexImage(source_texture->target());
+ }
+ source_texture->set_is_bind_pending(false);
+ }
+ }
+
+ api()->glCopySubTextureCHROMIUMFn(
+ source_texture->service_id(), /*source_level=*/0,
+ dest_texture->target(), dest_texture->service_id(),
+ /*dest_level=*/0, xoffset, yoffset, x, y, width, height,
+ /*unpack_flip_y=*/false, /*unpack_premultiply_alpha=*/false,
+ /*unpack_unmultiply_alpha=*/false);
return;
}
- gles2::Texture* source_texture = source_texture_ref->texture();
- gles2::Texture* dest_texture = dest_texture_ref->texture();
+ // TODO(piman): use shared image representations instead.
+ gles2::Texture* source_texture = gles2::Texture::CheckedCast(
+ group_->mailbox_manager()->ConsumeTexture(source_mailbox));
+ gles2::Texture* dest_texture = gles2::Texture::CheckedCast(
+ group_->mailbox_manager()->ConsumeTexture(dest_mailbox));
+ if (!source_texture || !dest_texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown mailbox");
+ return;
+ }
if (source_texture == dest_texture) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glCopySubTexture",
"source and destination textures are the same");
return;
}
-
- TextureMetadata* source_texture_metadata = GetTextureMetadata(source_id);
- if (!source_texture_metadata) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown texture");
- return;
- }
-
- TextureMetadata* dest_texture_metadata = GetTextureMetadata(dest_id);
- if (!dest_texture_metadata) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown texture");
+ GLenum source_target = source_texture->target();
+ GLenum dest_target = dest_texture->target();
+ if (!source_target || !dest_target) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glCopySubTexture",
+ "textures not initialized");
return;
}
- GLenum source_target = source_texture_metadata->target();
GLint source_level = 0;
- GLenum dest_target = dest_texture_metadata->target();
GLint dest_level = 0;
- ScopedTextureBinder binder(
- &state_, texture_manager(), dest_texture_ref, dest_target, gr_context(),
- raster_decoder_context_state_->need_context_state_reset);
+ ScopedTextureBinder binder(state(), dest_target, dest_texture->service_id(),
+ gr_context());
+ base::Optional<ScopedPixelUnpackState> pixel_unpack_state;
int source_width = 0;
int source_height = 0;
@@ -2022,13 +1748,21 @@ void RasterDecoderImpl::DoCopySubTexture(GLuint source_id,
// See: https://crbug.com/586476
int32_t max_x;
int32_t max_y;
- if (!gles2::SafeAddInt32(x, width, &max_x) ||
- !gles2::SafeAddInt32(y, height, &max_y) || x < 0 || y < 0 ||
+ if (!base::CheckAdd(x, width).AssignIfValid(&max_x) ||
+ !base::CheckAdd(y, height).AssignIfValid(&max_y) || x < 0 || y < 0 ||
max_x > source_width || max_y > source_height) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
"source texture bad dimensions");
return;
}
+
+ if (image->GetType() == gl::GLImage::Type::MEMORY &&
+ shared_context_state_->need_context_state_reset()) {
+ // If the image is in shared memory, we may need upload the pixel data
+ // with SubTexImage2D, so we need reset pixel unpack state if gl context
+ // state has been touched by skia.
+ pixel_unpack_state.emplace(state(), gr_context(), group_->feature_info());
+ }
} else {
if (!source_texture->GetLevelSize(source_target, 0 /* level */,
&source_width, &source_height, nullptr)) {
@@ -2088,12 +1822,12 @@ void RasterDecoderImpl::DoCopySubTexture(GLuint source_id,
source_internal_format) == GL_SRGB ||
gles2::GLES2Util::GetColorEncodingFromInternalFormat(
dest_internal_format) == GL_SRGB;
- state_.EnableDisableFramebufferSRGB(enable_framebuffer_srgb);
+ state()->EnableDisableFramebufferSRGB(enable_framebuffer_srgb);
}
// Clear the source texture if necessary.
- if (!texture_manager()->ClearTextureLevel(this, source_texture_ref,
- source_target, 0 /* level */)) {
+ if (!texture_manager()->ClearTextureLevel(this, source_texture, source_target,
+ 0 /* level */)) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glCopySubTexture",
"source texture dimensions too big");
return;
@@ -2114,20 +1848,18 @@ void RasterDecoderImpl::DoCopySubTexture(GLuint source_id,
dest_texture->GetLevelClearedRect(dest_target, dest_level)
.size()
.GetArea());
- texture_manager()->SetLevelClearedRect(dest_texture_ref, dest_target,
- dest_level, cleared_rect);
+ dest_texture->SetLevelClearedRect(dest_target, dest_level, cleared_rect);
} else {
// Otherwise clear part of texture level that is not already cleared.
- if (!texture_manager()->ClearTextureLevel(this, dest_texture_ref,
- dest_target, dest_level)) {
+ if (!texture_manager()->ClearTextureLevel(this, dest_texture, dest_target,
+ dest_level)) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glCopySubTexture",
"destination texture dimensions too big");
return;
}
}
} else {
- texture_manager()->SetLevelCleared(dest_texture_ref, dest_target,
- dest_level, true);
+ dest_texture->SetLevelCleared(dest_target, dest_level, true);
}
// TODO(qiankun.miao@intel.com): Support level > 0 for CopyTexSubImage.
@@ -2183,6 +1915,7 @@ void RasterDecoderImpl::DoCopySubTexture(GLuint source_id,
}
#endif
+ in_copy_sub_texture_ = true;
copy_texture_chromium_->DoCopySubTexture(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
@@ -2191,6 +1924,21 @@ void RasterDecoderImpl::DoCopySubTexture(GLuint source_id,
false /* unpack_flip_y */, false /* unpack_premultiply_alpha */,
false /* unpack_unmultiply_alpha */, false /* dither */, method,
copy_tex_image_blit_.get());
+ in_copy_sub_texture_ = false;
+ if (reset_texture_state_) {
+ reset_texture_state_ = false;
+ for (auto* texture : {source_texture, dest_texture}) {
+ GLenum target = texture->target();
+ api()->glBindTextureFn(target, texture->service_id());
+ api()->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, texture->wrap_s());
+ api()->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, texture->wrap_t());
+ api()->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER,
+ texture->min_filter());
+ api()->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER,
+ texture->mag_filter());
+ }
+ shared_context_state_->PessimisticallyResetGrContext();
+ }
}
void RasterDecoderImpl::DoBindOrCopyTexImageIfNeeded(gles2::Texture* texture,
@@ -2201,7 +1949,8 @@ void RasterDecoderImpl::DoBindOrCopyTexImageIfNeeded(gles2::Texture* texture,
gl::GLImage* image = texture->GetLevelImage(textarget, 0, &image_state);
if (image && image_state == gles2::Texture::UNBOUND) {
ScopedGLErrorSuppressor suppressor(
- "RasterDecoderImpl::DoBindOrCopyTexImageIfNeeded", GetErrorState());
+ "RasterDecoderImpl::DoBindOrCopyTexImageIfNeeded",
+ error_state_.get());
api()->glBindTextureFn(textarget, texture->service_id());
if (!image->BindTexImage(textarget)) {
// Note: We update the state to COPIED prior to calling CopyTexImage()
@@ -2295,7 +2044,6 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
- GLint color_type,
GLuint color_space_transfer_cache_id,
const volatile GLbyte* key) {
if (!gr_context()) {
@@ -2326,7 +2074,7 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
DCHECK(locked_handles_.empty());
DCHECK(!raster_canvas_);
- raster_decoder_context_state_->need_context_state_reset = true;
+ shared_context_state_->set_need_context_state_reset(true);
// Use unknown pixel geometry to disable LCD text.
uint32_t flags = 0;
@@ -2337,7 +2085,8 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
SkSurfaceProps(flags, SkSurfaceProps::kLegacyFontHost_InitType);
}
- SkColorType sk_color_type = static_cast<SkColorType>(color_type);
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, shared_image_->format());
// If we can't match requested MSAA samples, don't use MSAA.
int final_msaa_count = std::max(static_cast<int>(msaa_sample_count), 0);
if (final_msaa_count >
@@ -2345,7 +2094,7 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
final_msaa_count = 0;
sk_surface_ = shared_image_->BeginWriteAccess(gr_context(), final_msaa_count,
- sk_color_type, surface_props);
+ surface_props);
if (!sk_surface_) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
"failed to create surface");
@@ -2401,10 +2150,10 @@ scoped_refptr<Buffer> RasterDecoderImpl::GetShmBuffer(uint32_t shm_id) {
void RasterDecoderImpl::DoRasterCHROMIUM(GLuint raster_shm_id,
GLuint raster_shm_offset,
- GLsizeiptr raster_shm_size,
+ GLuint raster_shm_size,
GLuint font_shm_id,
GLuint font_shm_offset,
- GLsizeiptr font_shm_size) {
+ GLuint font_shm_size) {
TRACE_EVENT1("gpu", "RasterDecoderImpl::DoRasterCHROMIUM", "raster_id",
++raster_chromium_id_);
@@ -2414,7 +2163,7 @@ void RasterDecoderImpl::DoRasterCHROMIUM(GLuint raster_shm_id,
return;
}
DCHECK(transfer_cache());
- raster_decoder_context_state_->need_context_state_reset = true;
+ shared_context_state_->set_need_context_state_reset(true);
if (font_shm_size > 0) {
// Deserialize fonts before raster.
@@ -2451,8 +2200,9 @@ void RasterDecoderImpl::DoRasterCHROMIUM(GLuint raster_shm_id,
SkCanvas* canvas = raster_canvas_.get();
cc::PlaybackParams playback_params(nullptr, SkMatrix::I());
TransferCacheDeserializeHelperImpl impl(raster_decoder_id_, transfer_cache());
- cc::PaintOp::DeserializeOptions options(&impl, paint_cache_.get(),
- font_manager_->strike_client());
+ cc::PaintOp::DeserializeOptions options(
+ &impl, paint_cache_.get(), font_manager_->strike_client(),
+ shared_context_state_->scratch_deserialization_buffer());
options.crash_dump_on_failure = true;
size_t paint_buffer_size = raster_shm_size;
@@ -2483,7 +2233,7 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
return;
}
- raster_decoder_context_state_->need_context_state_reset = true;
+ shared_context_state_->set_need_context_state_reset(true);
raster_canvas_.reset();
@@ -2516,19 +2266,6 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
// prepareForExternalIO above. Use kDeferLaterCommands to ensure we yield to
// the Scheduler before processing more commands.
current_decoder_error_ = error::kDeferLaterCommands;
-
-#if defined(OS_MACOSX)
- // Aggressively call glFlush on macOS to determine if this is sufficient to
- // avoid GL driver crashes.
- // TODO(ccameron): If this is not sufficient, then add a flush to
- // DoRasterCHROMIUM as well. Also add crash report data to indicate which
- // sequence of commands result in the crash, and formalize this as a GPU
- // bug workaround.
- // https://crbug.com/906453
- if (gr_context())
- gr_context()->flush();
- api()->glFlushFn();
-#endif
}
void RasterDecoderImpl::DoCreateTransferCacheEntryINTERNAL(
@@ -2582,7 +2319,7 @@ void RasterDecoderImpl::DoCreateTransferCacheEntryINTERNAL(
cc::ServiceTransferCacheEntry::UsesGrContext(entry_type) ? gr_context()
: nullptr;
if (context_for_entry)
- raster_decoder_context_state_->need_context_state_reset = true;
+ shared_context_state_->set_need_context_state_reset(true);
if (!transfer_cache()->CreateLockedEntry(
ServiceTransferCache::EntryKey(raster_decoder_id_, entry_type,
@@ -2648,7 +2385,7 @@ void RasterDecoderImpl::DoDeleteTransferCacheEntryINTERNAL(
void RasterDecoderImpl::RestoreStateForAttrib(GLuint attrib_index,
bool restore_array_binding) {
- raster_decoder_context_state_->PessimisticallyResetGrContext();
+ shared_context_state_->PessimisticallyResetGrContext();
}
// Include the auto-generated part of this file. We split this because it means
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.h b/chromium/gpu/command_buffer/service/raster_decoder.h
index 1f4530dfd00..717b79dc551 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder.h
@@ -13,6 +13,7 @@
namespace gpu {
class DecoderClient;
+class SharedContextState;
class ServiceTransferCache;
namespace gles2 {
@@ -24,7 +25,6 @@ class Outputter;
} // namespace gles2
namespace raster {
-struct RasterDecoderContextState;
// This class implements the AsyncAPIInterface interface, decoding
// RasterInterface commands and calling GL.
@@ -36,7 +36,7 @@ class GPU_GLES2_EXPORT RasterDecoder : public DecoderContext,
CommandBufferServiceBase* command_buffer_service,
gles2::Outputter* outputter,
gles2::ContextGroup* group,
- scoped_refptr<RasterDecoderContextState> raster_decoder_context_state);
+ scoped_refptr<SharedContextState> shared_context_state);
~RasterDecoder() override;
@@ -83,6 +83,7 @@ class GPU_GLES2_EXPORT RasterDecoder : public DecoderContext,
virtual void SetUpForRasterCHROMIUMForTest() = 0;
virtual void SetOOMErrorForTest() = 0;
+ virtual void DisableFlushWorkaroundForTest() = 0;
protected:
RasterDecoder(CommandBufferServiceBase* command_buffer_service,
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
index 4f097abcb36..f4b23b0ab3d 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
@@ -12,27 +12,6 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_AUTOGEN_H_
-error::Error RasterDecoderImpl::HandleDeleteTexturesImmediate(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile raster::cmds::DeleteTexturesImmediate& c =
- *static_cast<const volatile raster::cmds::DeleteTexturesImmediate*>(
- cmd_data);
- GLsizei n = static_cast<GLsizei>(c.n);
- uint32_t textures_size;
- if (!gles2::SafeMultiplyUint32(n, sizeof(GLuint), &textures_size)) {
- return error::kOutOfBounds;
- }
- volatile const GLuint* textures =
- gles2::GetImmediateDataAs<volatile const GLuint*>(c, textures_size,
- immediate_data_size);
- if (textures == nullptr) {
- return error::kOutOfBounds;
- }
- DeleteTexturesHelper(n, textures);
- return error::kNoError;
-}
-
error::Error RasterDecoderImpl::HandleFinish(uint32_t immediate_data_size,
const volatile void* cmd_data) {
DoFinish();
@@ -67,7 +46,7 @@ error::Error RasterDecoderImpl::HandleGenQueriesEXTImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t queries_size;
- if (!gles2::SafeMultiplyUint32(n, sizeof(GLuint), &queries_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&queries_size)) {
return error::kOutOfBounds;
}
volatile GLuint* queries = gles2::GetImmediateDataAs<volatile GLuint*>(
@@ -93,7 +72,7 @@ error::Error RasterDecoderImpl::HandleDeleteQueriesEXTImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t queries_size;
- if (!gles2::SafeMultiplyUint32(n, sizeof(GLuint), &queries_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&queries_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* queries =
@@ -135,7 +114,6 @@ error::Error RasterDecoderImpl::HandleBeginRasterCHROMIUMImmediate(
GLuint sk_color = static_cast<GLuint>(c.sk_color);
GLuint msaa_sample_count = static_cast<GLuint>(c.msaa_sample_count);
GLboolean can_use_lcd_text = static_cast<GLboolean>(c.can_use_lcd_text);
- GLint color_type = static_cast<GLint>(c.color_type);
GLuint color_space_transfer_cache_id =
static_cast<GLuint>(c.color_space_transfer_cache_id);
uint32_t mailbox_size;
@@ -152,7 +130,7 @@ error::Error RasterDecoderImpl::HandleBeginRasterCHROMIUMImmediate(
return error::kOutOfBounds;
}
DoBeginRasterCHROMIUM(sk_color, msaa_sample_count, can_use_lcd_text,
- color_type, color_space_transfer_cache_id, mailbox);
+ color_space_transfer_cache_id, mailbox);
return error::kNoError;
}
@@ -167,20 +145,10 @@ error::Error RasterDecoderImpl::HandleRasterCHROMIUM(
GLuint raster_shm_id = static_cast<GLuint>(c.raster_shm_id);
GLuint raster_shm_offset = static_cast<GLuint>(c.raster_shm_offset);
- GLsizeiptr raster_shm_size = static_cast<GLsizeiptr>(c.raster_shm_size);
+ GLuint raster_shm_size = static_cast<GLuint>(c.raster_shm_size);
GLuint font_shm_id = static_cast<GLuint>(c.font_shm_id);
GLuint font_shm_offset = static_cast<GLuint>(c.font_shm_offset);
- GLsizeiptr font_shm_size = static_cast<GLsizeiptr>(c.font_shm_size);
- if (raster_shm_size < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRasterCHROMIUM",
- "raster_shm_size < 0");
- return error::kNoError;
- }
- if (font_shm_size < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRasterCHROMIUM",
- "font_shm_size < 0");
- return error::kNoError;
- }
+ GLuint font_shm_size = static_cast<GLuint>(c.font_shm_size);
DoRasterCHROMIUM(raster_shm_id, raster_shm_offset, raster_shm_size,
font_shm_id, font_shm_offset, font_shm_size);
return error::kNoError;
@@ -248,7 +216,7 @@ RasterDecoderImpl::HandleDeletePaintCacheTextBlobsINTERNALImmediate(
DeletePaintCacheTextBlobsINTERNALImmediate*>(cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t ids_size;
- if (!gles2::SafeMultiplyUint32(n, sizeof(GLuint), &ids_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&ids_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* ids =
@@ -270,7 +238,7 @@ error::Error RasterDecoderImpl::HandleDeletePaintCachePathsINTERNALImmediate(
cmd_data);
GLsizei n = static_cast<GLsizei>(c.n);
uint32_t ids_size;
- if (!gles2::SafeMultiplyUint32(n, sizeof(GLuint), &ids_size)) {
+ if (!base::CheckMul(n, sizeof(GLuint)).AssignIfValid(&ids_size)) {
return error::kOutOfBounds;
}
volatile const GLuint* ids =
@@ -290,66 +258,43 @@ error::Error RasterDecoderImpl::HandleClearPaintCacheINTERNAL(
return error::kNoError;
}
-error::Error RasterDecoderImpl::HandleCreateAndConsumeTextureINTERNALImmediate(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile raster::cmds::CreateAndConsumeTextureINTERNALImmediate& c =
- *static_cast<const volatile raster::cmds::
- CreateAndConsumeTextureINTERNALImmediate*>(cmd_data);
- GLuint texture_id = static_cast<GLuint>(c.texture_id);
- bool use_buffer = static_cast<bool>(c.use_buffer);
- gfx::BufferUsage buffer_usage = static_cast<gfx::BufferUsage>(c.buffer_usage);
- viz::ResourceFormat format = static_cast<viz::ResourceFormat>(c.format);
- uint32_t mailbox_size;
- if (!gles2::GLES2Util::ComputeDataSize<GLbyte, 16>(1, &mailbox_size)) {
- return error::kOutOfBounds;
- }
- if (mailbox_size > immediate_data_size) {
- return error::kOutOfBounds;
- }
- volatile const GLbyte* mailbox =
- gles2::GetImmediateDataAs<volatile const GLbyte*>(c, mailbox_size,
- immediate_data_size);
- if (!validators_->gfx_buffer_usage.IsValid(buffer_usage)) {
- LOCAL_SET_GL_ERROR_INVALID_ENUM("glCreateAndConsumeTextureINTERNAL",
- buffer_usage, "buffer_usage");
- return error::kNoError;
- }
- if (!validators_->viz_resource_format.IsValid(format)) {
- LOCAL_SET_GL_ERROR_INVALID_ENUM("glCreateAndConsumeTextureINTERNAL", format,
- "format");
- return error::kNoError;
- }
- if (mailbox == nullptr) {
- return error::kOutOfBounds;
- }
- DoCreateAndConsumeTextureINTERNAL(texture_id, use_buffer, buffer_usage,
- format, mailbox);
- return error::kNoError;
-}
-
-error::Error RasterDecoderImpl::HandleCopySubTexture(
+error::Error RasterDecoderImpl::HandleCopySubTextureINTERNALImmediate(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
- const volatile raster::cmds::CopySubTexture& c =
- *static_cast<const volatile raster::cmds::CopySubTexture*>(cmd_data);
- GLuint source_id = static_cast<GLuint>(c.source_id);
- GLuint dest_id = static_cast<GLuint>(c.dest_id);
+ const volatile raster::cmds::CopySubTextureINTERNALImmediate& c =
+ *static_cast<
+ const volatile raster::cmds::CopySubTextureINTERNALImmediate*>(
+ cmd_data);
GLint xoffset = static_cast<GLint>(c.xoffset);
GLint yoffset = static_cast<GLint>(c.yoffset);
GLint x = static_cast<GLint>(c.x);
GLint y = static_cast<GLint>(c.y);
GLsizei width = static_cast<GLsizei>(c.width);
GLsizei height = static_cast<GLsizei>(c.height);
+ uint32_t mailboxes_size;
+ if (!gles2::GLES2Util::ComputeDataSize<GLbyte, 32>(1, &mailboxes_size)) {
+ return error::kOutOfBounds;
+ }
+ if (mailboxes_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ volatile const GLbyte* mailboxes =
+ gles2::GetImmediateDataAs<volatile const GLbyte*>(c, mailboxes_size,
+ immediate_data_size);
if (width < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "width < 0");
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTextureINTERNAL",
+ "width < 0");
return error::kNoError;
}
if (height < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "height < 0");
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTextureINTERNAL",
+ "height < 0");
return error::kNoError;
}
- DoCopySubTexture(source_id, dest_id, xoffset, yoffset, x, y, width, height);
+ if (mailboxes == nullptr) {
+ return error::kOutOfBounds;
+ }
+ DoCopySubTextureINTERNAL(xoffset, yoffset, x, y, width, height, mailboxes);
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_context_state.cc b/chromium/gpu/command_buffer/service/raster_decoder_context_state.cc
deleted file mode 100644
index 93f2bbda198..00000000000
--- a/chromium/gpu/command_buffer/service/raster_decoder_context_state.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
-
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "gpu/command_buffer/common/activity_flags.h"
-#include "gpu/command_buffer/service/service_transfer_cache.h"
-#include "gpu/config/gpu_driver_bug_workarounds.h"
-#include "gpu/vulkan/buildflags.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_share_group.h"
-#include "ui/gl/gl_surface.h"
-#include "ui/gl/init/create_gr_gl_interface.h"
-
-#if BUILDFLAG(ENABLE_VULKAN)
-#include "components/viz/common/gpu/vulkan_context_provider.h"
-#endif
-
-namespace gpu {
-namespace raster {
-
-RasterDecoderContextState::RasterDecoderContextState(
- scoped_refptr<gl::GLShareGroup> share_group,
- scoped_refptr<gl::GLSurface> surface,
- scoped_refptr<gl::GLContext> context,
- bool use_virtualized_gl_contexts,
- viz::VulkanContextProvider* vulkan_context_provider)
- : share_group(std::move(share_group)),
- surface(std::move(surface)),
- context(std::move(context)),
- use_virtualized_gl_contexts(use_virtualized_gl_contexts),
- vk_context_provider(vulkan_context_provider),
-#if BUILDFLAG(ENABLE_VULKAN)
- gr_context(vk_context_provider ? vk_context_provider->GetGrContext()
- : nullptr),
-#endif
- use_vulkan_gr_context(!!gr_context) {
- if (base::ThreadTaskRunnerHandle::IsSet()) {
- base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- this, "RasterDecoderContextState", base::ThreadTaskRunnerHandle::Get());
- }
-}
-
-RasterDecoderContextState::~RasterDecoderContextState() {
- if (gr_context)
- gr_context->abandonContext();
- base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
- this);
-}
-
-void RasterDecoderContextState::InitializeGrContext(
- const GpuDriverBugWorkarounds& workarounds,
- GrContextOptions::PersistentCache* cache,
- GpuProcessActivityFlags* activity_flags,
- gl::ProgressReporter* progress_reporter) {
- if (!use_vulkan_gr_context) {
- DCHECK(context->IsCurrent(surface.get()));
-
- sk_sp<GrGLInterface> interface(gl::init::CreateGrGLInterface(
- *context->GetVersionInfo(), workarounds.use_es2_for_oopr,
- progress_reporter));
- if (!interface) {
- LOG(ERROR) << "OOP raster support disabled: GrGLInterface creation "
- "failed.";
- return;
- }
-
- if (activity_flags && cache) {
- // |activity_flags| is safe to capture here since it must outlive the
- // this context state.
- interface->fFunctions.fProgramBinary =
- [activity_flags](GrGLuint program, GrGLenum binaryFormat,
- void* binary, GrGLsizei length) {
- GpuProcessActivityFlags::ScopedSetFlag scoped_set_flag(
- activity_flags, ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY);
- glProgramBinary(program, binaryFormat, binary, length);
- };
- }
-
- // If you make any changes to the GrContext::Options here that could
- // affect text rendering, make sure to match the capabilities initialized
- // in GetCapabilities and ensuring these are also used by the
- // PaintOpBufferSerializer.
- GrContextOptions options;
- options.fDriverBugWorkarounds =
- GrDriverBugWorkarounds(workarounds.ToIntSet());
- options.fDisableCoverageCountingPaths = true;
- size_t max_resource_cache_bytes = 0u;
- raster::DetermineGrCacheLimitsFromAvailableMemory(
- &max_resource_cache_bytes, &glyph_cache_max_texture_bytes);
- options.fGlyphCacheTextureMaximumBytes = glyph_cache_max_texture_bytes;
- options.fPersistentCache = cache;
- options.fAvoidStencilBuffers = workarounds.avoid_stencil_buffers;
- owned_gr_context = GrContext::MakeGL(std::move(interface), options);
- gr_context = owned_gr_context.get();
- if (!gr_context) {
- LOG(ERROR) << "OOP raster support disabled: GrContext creation "
- "failed.";
- } else {
- constexpr int kMaxGaneshResourceCacheCount = 16384;
- gr_context->setResourceCacheLimits(kMaxGaneshResourceCacheCount,
- max_resource_cache_bytes);
- }
- }
- transfer_cache = std::make_unique<ServiceTransferCache>();
-}
-
-bool RasterDecoderContextState::OnMemoryDump(
- const base::trace_event::MemoryDumpArgs& args,
- base::trace_event::ProcessMemoryDump* pmd) {
- if (gr_context)
- DumpGrMemoryStatistics(gr_context, pmd, base::nullopt);
- return true;
-}
-
-void RasterDecoderContextState::PurgeMemory(
- base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
- if (!gr_context) {
- DCHECK(!transfer_cache);
- return;
- }
-
- // Ensure the context is current before doing any GPU cleanup.
- context->MakeCurrent(surface.get());
-
- switch (memory_pressure_level) {
- case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- // This function is only called with moderate or critical pressure.
- NOTREACHED();
- return;
- case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
- // With moderate pressure, clear any unlocked resources.
- gr_context->purgeUnlockedResources(true /* scratchResourcesOnly */);
- break;
- case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
- // With critical pressure, purge as much as possible.
- gr_context->freeGpuResources();
- break;
- }
-
- transfer_cache->PurgeMemory(memory_pressure_level);
-}
-
-void RasterDecoderContextState::PessimisticallyResetGrContext() const {
- // Calling GrContext::resetContext() is very cheap, so we do it
- // pessimistically. We could dirty less state if skia state setting
- // performance becomes an issue.
- if (gr_context && !use_vulkan_gr_context)
- gr_context->resetContext();
-}
-
-} // namespace raster
-} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_context_state.h b/chromium/gpu/command_buffer/service/raster_decoder_context_state.h
deleted file mode 100644
index 91790319bf7..00000000000
--- a/chromium/gpu/command_buffer/service/raster_decoder_context_state.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_CONTEXT_STATE_H_
-#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_CONTEXT_STATE_H_
-
-#include "base/memory/memory_pressure_listener.h"
-#include "base/memory/ref_counted.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "gpu/command_buffer/common/skia_utils.h"
-#include "gpu/gpu_gles2_export.h"
-#include "third_party/skia/include/gpu/GrContext.h"
-#include "ui/gl/progress_reporter.h"
-
-namespace gl {
-class GLContext;
-class GLShareGroup;
-class GLSurface;
-} // namespace gl
-
-namespace viz {
-class VulkanContextProvider;
-} // namespace viz
-
-namespace gpu {
-class GpuDriverBugWorkarounds;
-class GpuProcessActivityFlags;
-class ServiceTransferCache;
-
-namespace raster {
-
-struct GPU_GLES2_EXPORT RasterDecoderContextState
- : public base::RefCounted<RasterDecoderContextState>,
- public base::trace_event::MemoryDumpProvider {
- public:
- // TODO: Refactor code to have seperate constructor for GL and Vulkan and not
- // initialize/use GL related info for vulkan and vice-versa.
- RasterDecoderContextState(
- scoped_refptr<gl::GLShareGroup> share_group,
- scoped_refptr<gl::GLSurface> surface,
- scoped_refptr<gl::GLContext> context,
- bool use_virtualized_gl_contexts,
- viz::VulkanContextProvider* vulkan_context_provider = nullptr);
-
- void InitializeGrContext(const GpuDriverBugWorkarounds& workarounds,
- GrContextOptions::PersistentCache* cache,
- GpuProcessActivityFlags* activity_flags = nullptr,
- gl::ProgressReporter* progress_reporter = nullptr);
- void PurgeMemory(
- base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level);
-
- void PessimisticallyResetGrContext() const;
-
- scoped_refptr<gl::GLShareGroup> share_group;
- scoped_refptr<gl::GLSurface> surface;
- scoped_refptr<gl::GLContext> context;
- sk_sp<GrContext> owned_gr_context;
- std::unique_ptr<ServiceTransferCache> transfer_cache;
- const bool use_virtualized_gl_contexts = false;
- viz::VulkanContextProvider* vk_context_provider = nullptr;
- GrContext* gr_context = nullptr;
- const bool use_vulkan_gr_context = false;
- bool context_lost = false;
- size_t glyph_cache_max_texture_bytes = 0u;
-
- // |need_context_state_reset| is set whenever Skia may have altered the
- // driver's GL state. It signals the need to restore driver GL state to
- // |state_| before executing commands that do not
- // PermitsInconsistentContextState.
- bool need_context_state_reset = false;
-
- // base::trace_event::MemoryDumpProvider implementation.
- bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
- base::trace_event::ProcessMemoryDump* pmd) override;
-
- private:
- friend class base::RefCounted<RasterDecoderContextState>;
- ~RasterDecoderContextState() override;
-};
-
-} // namespace raster
-} // namespace gpu
-
-#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_CONTEXT_STATE_H_
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_mock.h b/chromium/gpu/command_buffer/service/raster_decoder_mock.h
index a70c669492b..6c5424cccf9 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_mock.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_mock.h
@@ -87,7 +87,7 @@ class MockRasterDecoder : public RasterDecoder {
MOCK_CONST_METHOD0(RestoreFramebufferBindings, void());
MOCK_CONST_METHOD0(RestoreProgramBindings, void());
MOCK_METHOD0(RestoreRenderbufferBindings, void());
- MOCK_CONST_METHOD1(RestoreTextureState, void(unsigned service_id));
+ MOCK_METHOD1(RestoreTextureState, void(unsigned service_id));
MOCK_CONST_METHOD1(RestoreTextureUnitBindings, void(unsigned unit));
MOCK_METHOD1(RestoreVertexAttribArray, void(unsigned index));
@@ -100,6 +100,7 @@ class MockRasterDecoder : public RasterDecoder {
MOCK_METHOD0(DecoderIdForTest, int());
MOCK_METHOD0(SetUpForRasterCHROMIUMForTest, void());
MOCK_METHOD0(SetOOMErrorForTest, void());
+ MOCK_METHOD0(DisableFlushWorkaroundForTest, void());
MOCK_METHOD4(DoCommands,
error::Error(unsigned int num_commands,
const volatile void* buffer,
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
index 14488928c33..f9ec337a663 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
@@ -8,7 +8,6 @@
#include "base/command_line.h"
#include "base/memory/ptr_util.h"
-#include "build/build_config.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/raster_cmd_format.h"
@@ -16,8 +15,8 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/program_manager.h"
#include "gpu/command_buffer/service/query_manager.h"
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
#include "gpu/command_buffer/service/raster_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -36,6 +35,17 @@ using namespace gpu::raster::cmds;
namespace gpu {
namespace raster {
+namespace {
+
+void CopyMailboxes(GLbyte (&output)[sizeof(Mailbox) * 2],
+ const Mailbox& source,
+ const Mailbox& dest) {
+ memcpy(output, source.name, sizeof(source.name));
+ memcpy(output + sizeof(source.name), dest.name, sizeof(dest.name));
+}
+
+} // anonymous namespace
+
class RasterDecoderTest : public RasterDecoderTestBase {
public:
RasterDecoderTest() = default;
@@ -138,11 +148,12 @@ TEST_P(RasterDecoderTest, BeginEndQueryEXTCommandsIssuedCHROMIUM) {
}
TEST_P(RasterDecoderTest, CopyTexSubImage2DTwiceClearsUnclearedTexture) {
+ shared_context_state_->set_need_context_state_reset(true);
// Create uninitialized source texture.
- GLuint source_texture_id = kNewClientId;
- CreateFakeTexture(source_texture_id, kNewServiceId,
- viz::ResourceFormat::RGBA_8888, /*width=*/2, /*height=*/2,
- /*cleared=*/false);
+ gpu::Mailbox source_texture_mailbox =
+ CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RGBA_8888,
+ /*width=*/2, /*height=*/2,
+ /*cleared=*/false);
// This will initialize the top half of destination.
{
@@ -152,9 +163,11 @@ TEST_P(RasterDecoderTest, CopyTexSubImage2DTwiceClearsUnclearedTexture) {
GL_TEXTURE_2D, GL_TEXTURE_2D, 0, GL_RGBA,
GL_UNSIGNED_BYTE, 0, 0, 2, 2, 0);
SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
- CopySubTexture cmd;
- cmd.Init(source_texture_id, client_texture_id_, 0, 0, 0, 0, 2, 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ auto& cmd = *GetImmediateAs<CopySubTextureINTERNALImmediate>();
+ GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
+ CopyMailboxes(mailboxes, source_texture_mailbox, client_texture_mailbox_);
+ cmd.Init(0, 0, 0, 0, 2, 1, mailboxes);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
}
// This will initialize bottom right corner of the destination.
@@ -165,15 +178,15 @@ TEST_P(RasterDecoderTest, CopyTexSubImage2DTwiceClearsUnclearedTexture) {
GL_TEXTURE_2D, GL_TEXTURE_2D, 0, GL_RGBA,
GL_UNSIGNED_BYTE, 0, 1, 2, 1, 0);
SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
- CopySubTexture cmd;
- cmd.Init(source_texture_id, client_texture_id_, 1, 1, 0, 0, 1, 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ auto& cmd = *GetImmediateAs<CopySubTextureINTERNALImmediate>();
+ GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
+ CopyMailboxes(mailboxes, source_texture_mailbox, client_texture_mailbox_);
+ cmd.Init(1, 1, 0, 0, 1, 1, mailboxes);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
}
- gles2::TextureManager* manager = group().texture_manager();
- gles2::TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
- ASSERT_TRUE(texture_ref != nullptr);
- gles2::Texture* texture = texture_ref->texture();
+ auto* texture = gles2::Texture::CheckedCast(
+ group().mailbox_manager()->ConsumeTexture(client_texture_mailbox_));
EXPECT_TRUE(texture->SafeToRenderFrom());
}
@@ -184,21 +197,20 @@ TEST_P(RasterDecoderManualInitTest, CopyTexSubImage2DValidateColorFormat) {
InitDecoder(init);
// Create dest texture.
- GLuint dest_texture_id = kNewClientId;
- CreateFakeTexture(dest_texture_id, kNewServiceId, viz::ResourceFormat::RED_8,
- /*width=*/2, /*height=*/2, /*cleared=*/true);
+ gpu::Mailbox dest_texture_mailbox =
+ CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RED_8,
+ /*width=*/2, /*height=*/2, /*cleared=*/true);
SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
- CopySubTexture copy_cmd;
- copy_cmd.Init(client_texture_id_, dest_texture_id, 0, 0, 0, 0, 2, 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(copy_cmd));
+ auto& copy_cmd = *GetImmediateAs<CopySubTextureINTERNALImmediate>();
+ GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
+ CopyMailboxes(mailboxes, client_texture_mailbox_, dest_texture_mailbox);
+ copy_cmd.Init(0, 0, 0, 0, 2, 1, mailboxes);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(copy_cmd, sizeof(mailboxes)));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
TEST_P(RasterDecoderTest, YieldAfterEndRasterCHROMIUM) {
-#if defined(OS_MACOSX)
- EXPECT_CALL(*gl_, Flush()).RetiresOnSaturation();
-#endif
GetDecoder()->SetUpForRasterCHROMIUMForTest();
cmds::EndRasterCHROMIUM end_raster_cmd;
end_raster_cmd.Init();
@@ -213,6 +225,10 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
gl::GLSurfaceTestSupport::InitializeOneOff();
gpu::GpuDriverBugWorkarounds workarounds;
+ GpuFeatureInfo gpu_feature_info;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] =
+ kGpuFeatureStatusEnabled;
+
scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
scoped_refptr<gl::GLSurface> surface =
gl::init::CreateOffscreenGLSurface(gfx::Size());
@@ -220,16 +236,15 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
share_group.get(), surface.get(), gl::GLContextAttribs());
ASSERT_TRUE(context->MakeCurrent(surface.get()));
- context_state_ = new raster::RasterDecoderContextState(
+ auto feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, gpu_feature_info);
+
+ context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), std::move(surface), std::move(context),
- false /* use_virtualized_gl_contexts */);
+ false /* use_virtualized_gl_contexts */, base::DoNothing());
context_state_->InitializeGrContext(workarounds, nullptr);
+ context_state_->InitializeGL(GpuPreferences(), feature_info);
- GpuFeatureInfo gpu_feature_info;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] =
- kGpuFeatureStatusEnabled;
- scoped_refptr<gles2::FeatureInfo> feature_info =
- new gles2::FeatureInfo(workarounds, gpu_feature_info);
group_ = new gles2::ContextGroup(
gpu_preferences_, false, &mailbox_manager_,
nullptr /* memory_tracker */, &shader_translator_cache_,
@@ -249,7 +264,6 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
void CacheShader(const std::string& key, const std::string& shader) override {
}
void OnFenceSyncRelease(uint64_t release) override {}
- bool OnWaitSyncToken(const gpu::SyncToken&) override { return false; }
void OnDescheduleUntilFinished() override {}
void OnRescheduleAfterFinished() override {}
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override {}
@@ -262,10 +276,10 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
ContextCreationAttribs attribs;
attribs.enable_oop_rasterization = true;
attribs.enable_raster_interface = true;
- CHECK_EQ(
- decoder->Initialize(context_state_->surface, context_state_->context,
- true, gles2::DisallowedFeatures(), attribs),
- ContextResult::kSuccess);
+ CHECK_EQ(decoder->Initialize(context_state_->surface(),
+ context_state_->context(), true,
+ gles2::DisallowedFeatures(), attribs),
+ ContextResult::kSuccess);
return decoder;
}
@@ -282,7 +296,7 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
protected:
gles2::TraceOutputter outputter_;
FakeCommandBufferServiceBase command_buffer_service_;
- scoped_refptr<RasterDecoderContextState> context_state_;
+ scoped_refptr<SharedContextState> context_state_;
GpuPreferences gpu_preferences_;
gles2::MailboxManagerImpl mailbox_manager_;
@@ -297,33 +311,26 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
TEST_F(RasterDecoderOOPTest, StateRestoreAcrossDecoders) {
// First decoder receives a skia command requiring context state reset.
auto decoder1 = CreateDecoder();
- EXPECT_FALSE(context_state_->need_context_state_reset);
+ EXPECT_FALSE(context_state_->need_context_state_reset());
decoder1->SetUpForRasterCHROMIUMForTest();
cmds::EndRasterCHROMIUM end_raster_cmd;
end_raster_cmd.Init();
EXPECT_FALSE(error::IsError(ExecuteCmd(decoder1.get(), end_raster_cmd)));
- EXPECT_TRUE(context_state_->need_context_state_reset);
+ EXPECT_TRUE(context_state_->need_context_state_reset());
// Another decoder receives a command which does not require consistent state,
// it should be processed without state restoration.
auto decoder2 = CreateDecoder();
decoder2->SetUpForRasterCHROMIUMForTest();
EXPECT_FALSE(error::IsError(ExecuteCmd(decoder2.get(), end_raster_cmd)));
- EXPECT_TRUE(context_state_->need_context_state_reset);
-
- // Now process a command which requires consistent state.
- LoseContextCHROMIUM lose_context_cmd;
- lose_context_cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB,
- GL_INNOCENT_CONTEXT_RESET_ARB);
- EXPECT_FALSE(error::IsError(ExecuteCmd(decoder2.get(), lose_context_cmd)));
- EXPECT_FALSE(context_state_->need_context_state_reset);
+ EXPECT_TRUE(context_state_->need_context_state_reset());
decoder1->Destroy(true);
- context_state_->context->MakeCurrent(context_state_->surface.get());
+ context_state_->MakeCurrent(nullptr);
decoder2->Destroy(true);
// Make sure the context is preserved across decoders.
- EXPECT_FALSE(context_state_->gr_context->abandoned());
+ EXPECT_FALSE(context_state_->gr_context()->abandoned());
}
} // namespace raster
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h
index e544277de25..f6c322fe056 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h
@@ -12,27 +12,6 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_1_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_1_AUTOGEN_H_
-TEST_P(RasterDecoderTest1, DeleteTexturesImmediateValidArgs) {
- EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(kServiceTextureId))).Times(1);
- cmds::DeleteTexturesImmediate& cmd =
- *GetImmediateAs<cmds::DeleteTexturesImmediate>();
- SpecializedSetup<cmds::DeleteTexturesImmediate, 0>(true);
- cmd.Init(1, &client_texture_id_);
- EXPECT_EQ(error::kNoError,
- ExecuteImmediateCmd(cmd, sizeof(client_texture_id_)));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- EXPECT_TRUE(GetTexture(client_texture_id_) == nullptr);
-}
-
-TEST_P(RasterDecoderTest1, DeleteTexturesImmediateInvalidArgs) {
- cmds::DeleteTexturesImmediate& cmd =
- *GetImmediateAs<cmds::DeleteTexturesImmediate>();
- SpecializedSetup<cmds::DeleteTexturesImmediate, 0>(false);
- GLuint temp = kInvalidClientId;
- cmd.Init(1, &temp);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
-}
-
TEST_P(RasterDecoderTest1, FinishValidArgs) {
EXPECT_CALL(*gl_, Finish());
SpecializedSetup<cmds::Finish, 0>(true);
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
index 833769c620d..7c953e1c84c 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
@@ -24,8 +24,8 @@
#include "gpu/command_buffer/service/logger.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/program_manager.h"
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "gpu/command_buffer/service/vertex_attrib_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -54,7 +54,6 @@ RasterDecoderTestBase::InitState::~InitState() = default;
RasterDecoderTestBase::RasterDecoderTestBase()
: surface_(nullptr),
context_(nullptr),
- client_texture_id_(106),
shared_memory_id_(0),
shared_memory_offset_(0),
shared_memory_address_(nullptr),
@@ -72,9 +71,6 @@ void RasterDecoderTestBase::OnConsoleMessage(int32_t id,
void RasterDecoderTestBase::CacheShader(const std::string& key,
const std::string& shader) {}
void RasterDecoderTestBase::OnFenceSyncRelease(uint64_t release) {}
-bool RasterDecoderTestBase::OnWaitSyncToken(const gpu::SyncToken&) {
- return false;
-}
void RasterDecoderTestBase::OnDescheduleUntilFinished() {}
void RasterDecoderTestBase::OnRescheduleAfterFinished() {}
void RasterDecoderTestBase::OnSwapBuffers(uint64_t swap_id, uint32_t flags) {}
@@ -145,15 +141,14 @@ void RasterDecoderTestBase::ExpectEnableDisable(GLenum cap, bool enable) {
}
}
-void RasterDecoderTestBase::CreateFakeTexture(
- GLuint client_id,
+gpu::Mailbox RasterDecoderTestBase::CreateFakeTexture(
GLuint service_id,
viz::ResourceFormat resource_format,
GLsizei width,
GLsizei height,
bool cleared) {
// Create texture and temporary ref.
- const GLuint kTempClientId = 271828;
+ const GLuint kTempClientId = next_fake_texture_client_id_++;
auto* temp_ref =
group_->texture_manager()->CreateTexture(kTempClientId, service_id);
group_->texture_manager()->SetTarget(temp_ref, GL_TEXTURE_2D);
@@ -164,23 +159,8 @@ void RasterDecoderTestBase::CreateFakeTexture(
cleared ? gfx::Rect(width, height) : gfx::Rect());
gpu::Mailbox mailbox = gpu::Mailbox::Generate();
group_->mailbox_manager()->ProduceTexture(mailbox, temp_ref->texture());
-
- // Consume texture to hold a permanent ref.
- cmds::CreateAndConsumeTextureINTERNALImmediate& cmd =
- *GetImmediateAs<cmds::CreateAndConsumeTextureINTERNALImmediate>();
- cmd.Init(client_id, false /* use_buffer */, gfx::BufferUsage::GPU_READ,
- resource_format, mailbox.name);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
-
- // Check that client_texture_id has appropriate attributes.
- auto* texture_ref = group().texture_manager()->GetTexture(client_id);
- ASSERT_NE(texture_ref, nullptr);
- auto* texture = texture_ref->texture();
- EXPECT_EQ(service_id, texture->service_id());
-
- // Release temporary ref.
- group_->texture_manager()->RemoveTexture(kTempClientId);
EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ return mailbox;
}
void RasterDecoderTestBase::InitDecoder(const InitState& init) {
@@ -230,7 +210,10 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
// we can use the ContextGroup to figure out how the real RasterDecoder
// will initialize itself.
command_buffer_service_.reset(new FakeCommandBufferServiceBase());
- mock_decoder_.reset(new MockRasterDecoder(command_buffer_service_.get()));
+ command_buffer_service_for_mock_decoder_.reset(
+ new FakeCommandBufferServiceBase());
+ mock_decoder_.reset(
+ new MockRasterDecoder(command_buffer_service_for_mock_decoder_.get()));
EXPECT_EQ(group_->Initialize(mock_decoder_.get(), context_type,
gles2::DisallowedFeatures()),
@@ -250,24 +233,32 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
init.lose_context_when_out_of_memory;
attribs.context_type = context_type;
+ // Setup expectations for SharedContextState::InitializeGL().
+ EXPECT_CALL(*gl_, GetIntegerv(GL_MAX_VERTEX_ATTRIBS, _))
+ .WillOnce(SetArgPointee<1>(8u))
+ .RetiresOnSaturation();
SetupInitCapabilitiesExpectations(group_->feature_info()->IsES3Capable());
SetupInitStateExpectations(group_->feature_info()->IsES3Capable());
- scoped_refptr<raster::RasterDecoderContextState> context_state =
- new raster::RasterDecoderContextState(
- new gl::GLShareGroup(), surface_, context_,
- feature_info->workarounds().use_virtualized_gl_contexts);
+ shared_context_state_ = base::MakeRefCounted<SharedContextState>(
+ new gl::GLShareGroup(), surface_, context_,
+ feature_info->workarounds().use_virtualized_gl_contexts,
+ base::DoNothing());
+
+ shared_context_state_->InitializeGL(GpuPreferences(), feature_info);
+
decoder_.reset(RasterDecoder::Create(this, command_buffer_service_.get(),
&outputter_, group_.get(),
- std::move(context_state)));
+ shared_context_state_));
decoder_->SetIgnoreCachedStateForTest(ignore_cached_state_for_test_);
+ decoder_->DisableFlushWorkaroundForTest();
decoder_->GetLogger()->set_log_synthesized_gl_errors(false);
copy_texture_manager_ = new gles2::MockCopyTextureResourceManager();
decoder_->SetCopyTextureResourceManagerForTest(copy_texture_manager_);
- ASSERT_EQ(decoder_->Initialize(surface_, context_, true,
- gles2::DisallowedFeatures(), attribs),
+ ASSERT_EQ(decoder_->Initialize(surface_, shared_context_state_->context(),
+ true, gles2::DisallowedFeatures(), attribs),
gpu::ContextResult::kSuccess);
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
@@ -278,9 +269,9 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
decoder_->MakeCurrent();
decoder_->BeginDecoding();
- CreateFakeTexture(client_texture_id_, kServiceTextureId,
- viz::ResourceFormat::RGBA_8888, /*width=*/2,
- /*height=*/2, /*cleared=*/false);
+ client_texture_mailbox_ = CreateFakeTexture(
+ kServiceTextureId, viz::ResourceFormat::RGBA_8888, /*width=*/2,
+ /*height=*/2, /*cleared=*/false);
}
void RasterDecoderTestBase::ResetDecoder() {
@@ -302,6 +293,7 @@ void RasterDecoderTestBase::ResetDecoder() {
decoder_.reset();
group_->Destroy(mock_decoder_.get(), false);
command_buffer_service_.reset();
+ command_buffer_service_for_mock_decoder_.reset();
::gl::MockGLInterface::SetGLInterface(nullptr);
gl_.reset();
gl::init::ShutdownGL(false);
@@ -376,28 +368,9 @@ void RasterDecoderTestBase::SetBucketAsCStrings(uint32_t bucket_id,
ClearSharedMemory();
}
-void RasterDecoderTestBase::DoDeleteTexture(GLuint client_id,
- GLuint service_id) {
- {
- InSequence s;
-
- // Calling DoDeleteTexture will unbind the texture from any texture units
- // it's currently bound to.
- EXPECT_CALL(*gl_, BindTexture(_, 0)).Times(AnyNumber());
-
- EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(service_id)))
- .Times(1)
- .RetiresOnSaturation();
-
- GenHelper<cmds::DeleteTexturesImmediate>(client_id);
- }
-}
-
void RasterDecoderTestBase::SetScopedTextureBinderExpectations(GLenum target) {
// ScopedTextureBinder
- EXPECT_CALL(*gl_, ActiveTexture(_))
- .Times(Between(1, 2))
- .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, ActiveTexture(_)).Times(1).RetiresOnSaturation();
EXPECT_CALL(*gl_, BindTexture(target, Ne(0U))).Times(1).RetiresOnSaturation();
EXPECT_CALL(*gl_, BindTexture(target, 0)).Times(1).RetiresOnSaturation();
}
@@ -415,11 +388,9 @@ void RasterDecoderTestBase::SetupClearTextureExpectations(
GLsizei width,
GLsizei height,
GLuint bound_pixel_unpack_buffer) {
- EXPECT_CALL(*gl_, BindTexture(bind_target, service_id))
- .Times(1)
- .RetiresOnSaturation();
+ SetScopedTextureBinderExpectations(bind_target);
EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_ALIGNMENT, _))
- .Times(2)
+ .Times(1)
.RetiresOnSaturation();
if (bound_pixel_unpack_buffer) {
EXPECT_CALL(*gl_, BindBuffer(GL_PIXEL_UNPACK_BUFFER, _))
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
index f34c5db5f80..6ac4cf08acb 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
@@ -59,7 +59,6 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
void OnConsoleMessage(int32_t id, const std::string& message) override;
void CacheShader(const std::string& key, const std::string& shader) override;
void OnFenceSyncRelease(uint64_t release) override;
- bool OnWaitSyncToken(const gpu::SyncToken&) override;
void OnDescheduleUntilFinished() override;
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
@@ -181,12 +180,11 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
void SetupInitStateManualExpectationsForDoLineWidth(GLfloat width);
void ExpectEnableDisable(GLenum cap, bool enable);
- void CreateFakeTexture(GLuint client_id,
- GLuint service_id,
- viz::ResourceFormat resource_format,
- GLsizei width,
- GLsizei height,
- bool cleared);
+ gpu::Mailbox CreateFakeTexture(GLuint service_id,
+ viz::ResourceFormat resource_format,
+ GLsizei width,
+ GLsizei height,
+ bool cleared);
// Note that the error is returned as GLint instead of GLenum.
// This is because there is a mismatch in the types of GLenum and
@@ -197,7 +195,6 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
GLint GetGLError();
void DoBindTexture(GLenum target, GLuint client_id, GLuint service_id);
- void DoDeleteTexture(GLuint client_id, GLuint service_id);
void SetScopedTextureBinderExpectations(GLenum target);
void SetupClearTextureExpectations(GLuint service_id,
@@ -213,9 +210,7 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
GLsizei height,
GLuint bound_pixel_unpack_buffer);
- GLvoid* BufferOffset(unsigned i) {
- return static_cast<int8_t*>(nullptr) + (i);
- }
+ GLvoid* BufferOffset(unsigned i) { return reinterpret_cast<GLvoid*>(i); }
protected:
static const GLint kMaxTextureSize = 2048;
@@ -250,9 +245,11 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
std::unique_ptr<FakeCommandBufferServiceBase> command_buffer_service_;
gles2::TraceOutputter outputter_;
std::unique_ptr<MockRasterDecoder> mock_decoder_;
+ std::unique_ptr<FakeCommandBufferServiceBase>
+ command_buffer_service_for_mock_decoder_;
std::unique_ptr<RasterDecoder> decoder_;
- GLuint client_texture_id_;
+ gpu::Mailbox client_texture_mailbox_;
int32_t shared_memory_id_;
uint32_t shared_memory_offset_;
@@ -262,6 +259,7 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
uint32_t immediate_buffer_[64];
const bool ignore_cached_state_for_test_;
+ scoped_refptr<SharedContextState> shared_context_state_;
private:
GpuPreferences gpu_preferences_;
@@ -274,6 +272,7 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
scoped_refptr<gles2::ContextGroup> group_;
base::MessageLoop message_loop_;
gles2::MockCopyTextureResourceManager* copy_texture_manager_; // not owned
+ GLuint next_fake_texture_client_id_ = 271828;
};
class RasterDecoderManualInitTest : public RasterDecoderTestBase {
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
index be57e397a32..3c47d6845da 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
@@ -145,6 +145,7 @@ TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrent) {
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(false));
// Expect the group to be lost.
EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
+ EXPECT_FALSE(decoder_->WasContextLost());
decoder_->MakeCurrent();
EXPECT_TRUE(decoder_->WasContextLost());
EXPECT_EQ(error::kMakeCurrentFailed, GetContextLostReason());
@@ -175,7 +176,7 @@ TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrentWithRobustness) {
TEST_P(RasterDecoderLostContextTest, TextureDestroyAfterLostFromMakeCurrent) {
Init(/*has_robustness=*/true);
- CreateFakeTexture(kNewClientId, kNewServiceId, viz::ResourceFormat::RGBA_8888,
+ CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RGBA_8888,
/*width=*/2, /*height=*/2,
/*cleared=*/false);
@@ -285,18 +286,6 @@ TEST_P(RasterDecoderLostContextTest, LoseInnocentFromGLError) {
EXPECT_EQ(error::kInnocent, GetContextLostReason());
}
-TEST_P(RasterDecoderLostContextTest, LoseVirtualContextWithRobustness) {
- InitWithVirtualContextsAndRobustness();
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown)).Times(1);
- // Signal guilty....
- DoGetErrorWithContextLost(GL_GUILTY_CONTEXT_RESET_KHR);
- EXPECT_TRUE(decoder_->WasContextLost());
- EXPECT_TRUE(decoder_->WasContextLostByRobustnessExtension());
- // ...but make sure we don't pretend, since for virtual contexts we don't
- // know if this was really the guilty client.
- EXPECT_EQ(error::kUnknown, GetContextLostReason());
-}
-
TEST_P(RasterDecoderLostContextTest, LoseGroupFromRobustness) {
// If one context in a group is lost through robustness,
// the other ones should also get lost and query the reset status.
diff --git a/chromium/gpu/command_buffer/service/renderbuffer_manager.cc b/chromium/gpu/command_buffer/service/renderbuffer_manager.cc
index da6e87688df..43c420388c4 100644
--- a/chromium/gpu/command_buffer/service/renderbuffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/renderbuffer_manager.cc
@@ -291,21 +291,13 @@ bool RenderbufferManager::ComputeEstimatedRenderbufferSize(
int internal_format,
uint32_t* size) const {
DCHECK(size);
-
- uint32_t temp = 0;
- if (!SafeMultiplyUint32(width, height, &temp)) {
- return false;
- }
- if (!SafeMultiplyUint32(temp, (samples == 0 ? 1 : samples), &temp)) {
- return false;
- }
GLenum impl_format = InternalRenderbufferFormatToImplFormat(internal_format);
- if (!SafeMultiplyUint32(
- temp, GLES2Util::RenderbufferBytesPerPixel(impl_format), &temp)) {
- return false;
- }
- *size = temp;
- return true;
+ uint32_t bytes_per_pixel = GLES2Util::RenderbufferBytesPerPixel(impl_format);
+ base::CheckedNumeric<uint32_t> checked_size = width;
+ checked_size *= height;
+ checked_size *= (samples == 0 ? 1 : samples);
+ checked_size *= bytes_per_pixel;
+ return checked_size.AssignIfValid(size);
}
GLenum RenderbufferManager::InternalRenderbufferFormatToImplFormat(
diff --git a/chromium/gpu/command_buffer/service/renderbuffer_manager.h b/chromium/gpu/command_buffer/service/renderbuffer_manager.h
index a647c5b5dfe..9e68d3ddef8 100644
--- a/chromium/gpu/command_buffer/service/renderbuffer_manager.h
+++ b/chromium/gpu/command_buffer/service/renderbuffer_manager.h
@@ -10,9 +10,9 @@
#include <memory>
#include <string>
+#include <unordered_map>
#include "base/containers/flat_set.h"
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/gl_utils.h"
@@ -224,7 +224,8 @@ class GPU_GLES2_EXPORT RenderbufferManager
bool have_context_;
// Info for each renderbuffer in the system.
- typedef base::hash_map<GLuint, scoped_refptr<Renderbuffer> > RenderbufferMap;
+ typedef std::unordered_map<GLuint, scoped_refptr<Renderbuffer>>
+ RenderbufferMap;
RenderbufferMap renderbuffers_;
DISALLOW_COPY_AND_ASSIGN(RenderbufferManager);
diff --git a/chromium/gpu/command_buffer/service/sampler_manager.h b/chromium/gpu/command_buffer/service/sampler_manager.h
index 05d2958c877..0ea9ce42af6 100644
--- a/chromium/gpu/command_buffer/service/sampler_manager.h
+++ b/chromium/gpu/command_buffer/service/sampler_manager.h
@@ -5,9 +5,9 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_SAMPLER_MANAGER_H_
#define GPU_COMMAND_BUFFER_SERVICE_SAMPLER_MANAGER_H_
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -160,7 +160,7 @@ class GPU_GLES2_EXPORT SamplerManager {
scoped_refptr<FeatureInfo> feature_info_;
// Info for each sampler in the system.
- typedef base::hash_map<GLuint, scoped_refptr<Sampler> > SamplerMap;
+ typedef std::unordered_map<GLuint, scoped_refptr<Sampler>> SamplerMap;
SamplerMap samplers_;
bool have_context_;
diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc
index 677c410e1f1..112948d9381 100644
--- a/chromium/gpu/command_buffer/service/scheduler.cc
+++ b/chromium/gpu/command_buffer/service/scheduler.cc
@@ -103,8 +103,9 @@ void Scheduler::Sequence::UpdateSchedulingPriority() {
}
bool Scheduler::Sequence::NeedsRescheduling() const {
- return running_state_ != IDLE &&
- scheduling_state_.priority != current_priority();
+ return (running_state_ != IDLE &&
+ scheduling_state_.priority != current_priority()) ||
+ (running_state_ == SCHEDULED && !IsRunnable());
}
bool Scheduler::Sequence::IsRunnable() const {
@@ -122,16 +123,15 @@ bool Scheduler::Sequence::ShouldYieldTo(const Sequence* other) const {
void Scheduler::Sequence::SetEnabled(bool enabled) {
if (enabled_ == enabled)
return;
- DCHECK_EQ(running_state_, enabled ? IDLE : RUNNING);
enabled_ = enabled;
if (enabled) {
TRACE_EVENT_ASYNC_BEGIN1("gpu", "SequenceEnabled", this, "sequence_id",
sequence_id_.GetUnsafeValue());
- scheduler_->TryScheduleSequence(this);
} else {
TRACE_EVENT_ASYNC_END1("gpu", "SequenceEnabled", this, "sequence_id",
sequence_id_.GetUnsafeValue());
}
+ scheduler_->TryScheduleSequence(this);
}
Scheduler::SchedulingState Scheduler::Sequence::SetScheduled() {
@@ -293,6 +293,8 @@ Scheduler::Scheduler(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
sync_point_manager_(sync_point_manager),
weak_factory_(this) {
DCHECK(thread_checker_.CalledOnValidThread());
+ // Store weak ptr separately because calling GetWeakPtr() is not thread safe.
+ weak_ptr_ = weak_factory_.GetWeakPtr();
}
Scheduler::~Scheduler() {
@@ -390,11 +392,11 @@ void Scheduler::ScheduleTaskHelper(Task task) {
Sequence* release_sequence = GetSequence(release_sequence_id);
if (!release_sequence)
continue;
- if (sync_point_manager_->Wait(
- sync_token, sequence_id, order_num,
- base::Bind(&Scheduler::SyncTokenFenceReleased,
- weak_factory_.GetWeakPtr(), sync_token, order_num,
- release_sequence_id, sequence_id))) {
+ if (sync_point_manager_->WaitNonThreadSafe(
+ sync_token, sequence_id, order_num, task_runner_,
+ base::BindOnce(&Scheduler::SyncTokenFenceReleased, weak_ptr_,
+ sync_token, order_num, release_sequence_id,
+ sequence_id))) {
sequence->AddWaitFence(sync_token, order_num, release_sequence_id,
release_sequence);
}
@@ -464,8 +466,8 @@ void Scheduler::TryScheduleSequence(Sequence* sequence) {
if (!running_) {
TRACE_EVENT_ASYNC_BEGIN0("gpu", "Scheduler::Running", this);
running_ = true;
- task_runner_->PostTask(FROM_HERE, base::Bind(&Scheduler::RunNextTask,
- weak_factory_.GetWeakPtr()));
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&Scheduler::RunNextTask, weak_ptr_));
}
}
}
@@ -540,8 +542,8 @@ void Scheduler::RunNextTask() {
}
}
- task_runner_->PostTask(FROM_HERE, base::Bind(&Scheduler::RunNextTask,
- weak_factory_.GetWeakPtr()));
+ task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(&Scheduler::RunNextTask, weak_ptr_));
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/scheduler.h b/chromium/gpu/command_buffer/service/scheduler.h
index 39b7ae5a045..41ac5cfca3d 100644
--- a/chromium/gpu/command_buffer/service/scheduler.h
+++ b/chromium/gpu/command_buffer/service/scheduler.h
@@ -333,6 +333,8 @@ class GPU_EXPORT Scheduler {
base::ThreadChecker thread_checker_;
+ // Invalidated on main thread.
+ base::WeakPtr<Scheduler> weak_ptr_;
base::WeakPtrFactory<Scheduler> weak_factory_;
private:
diff --git a/chromium/gpu/command_buffer/service/scheduler_unittest.cc b/chromium/gpu/command_buffer/service/scheduler_unittest.cc
index 96d34b6b0c8..e2b8a9555ed 100644
--- a/chromium/gpu/command_buffer/service/scheduler_unittest.cc
+++ b/chromium/gpu/command_buffer/service/scheduler_unittest.cc
@@ -431,8 +431,8 @@ TEST_F(SchedulerTest, ReentrantEnableSequenceShouldNotDeadlock) {
ran1 = true;
release_state1->Wait(
sync_token,
- base::Bind(&Scheduler::EnableSequence,
- base::Unretained(scheduler()), sequence_id1));
+ base::BindOnce(&Scheduler::EnableSequence,
+ base::Unretained(scheduler()), sequence_id1));
scheduler()->DisableSequence(sequence_id1);
}),
std::vector<SyncToken>()));
diff --git a/chromium/gpu/command_buffer/service/service_font_manager.cc b/chromium/gpu/command_buffer/service/service_font_manager.cc
index f4f40f98a40..53bcf2194cb 100644
--- a/chromium/gpu/command_buffer/service/service_font_manager.cc
+++ b/chromium/gpu/command_buffer/service/service_font_manager.cc
@@ -14,7 +14,7 @@ namespace gpu {
namespace {
class Deserializer {
public:
- Deserializer(const volatile char* memory, size_t memory_size)
+ Deserializer(const volatile char* memory, uint32_t memory_size)
: memory_(memory), memory_size_(memory_size) {}
~Deserializer() = default;
@@ -32,7 +32,7 @@ class Deserializer {
return true;
}
- bool ReadStrikeData(SkStrikeClient* strike_client, size_t size) {
+ bool ReadStrikeData(SkStrikeClient* strike_client, uint32_t size) {
if (size == 0u)
return true;
@@ -48,14 +48,20 @@ class Deserializer {
}
private:
- bool AlignMemory(size_t size, size_t alignment) {
+ bool AlignMemory(uint32_t size, size_t alignment) {
// Due to the math below, alignment must be a power of two.
DCHECK_GT(alignment, 0u);
DCHECK_EQ(alignment & (alignment - 1), 0u);
uintptr_t memory = reinterpret_cast<uintptr_t>(memory_);
size_t padding = ((memory + alignment - 1) & ~(alignment - 1)) - memory;
- if (bytes_read_ + size + padding > memory_size_)
+
+ base::CheckedNumeric<uint32_t> checked_padded_size = bytes_read_;
+ checked_padded_size += padding;
+ checked_padded_size += size;
+ uint32_t padded_size = 0;
+ if (!checked_padded_size.AssignIfValid(&padded_size) ||
+ padded_size > memory_size_)
return false;
memory_ += padding;
@@ -64,8 +70,8 @@ class Deserializer {
}
const volatile char* memory_;
- size_t memory_size_;
- size_t bytes_read_ = 0u;
+ uint32_t memory_size_;
+ uint32_t bytes_read_ = 0u;
};
} // namespace
@@ -125,7 +131,7 @@ void ServiceFontManager::Destroy() {
bool ServiceFontManager::Deserialize(
const volatile char* memory,
- size_t memory_size,
+ uint32_t memory_size,
std::vector<SkDiscardableHandleId>* locked_handles) {
base::AutoLock hold(lock_);
@@ -134,11 +140,11 @@ bool ServiceFontManager::Deserialize(
// All new handles.
Deserializer deserializer(memory, memory_size);
- uint64_t new_handles_created;
- if (!deserializer.Read<uint64_t>(&new_handles_created))
+ uint32_t new_handles_created;
+ if (!deserializer.Read<uint32_t>(&new_handles_created))
return false;
- for (size_t i = 0; i < new_handles_created; ++i) {
+ for (uint32_t i = 0; i < new_handles_created; ++i) {
SerializableSkiaHandle handle;
if (!deserializer.Read<SerializableSkiaHandle>(&handle))
return false;
@@ -156,19 +162,19 @@ bool ServiceFontManager::Deserialize(
}
// All locked handles
- uint64_t num_locked_handles;
- if (!deserializer.Read<uint64_t>(&num_locked_handles))
+ uint32_t num_locked_handles;
+ if (!deserializer.Read<uint32_t>(&num_locked_handles))
return false;
locked_handles->resize(num_locked_handles);
- for (size_t i = 0; i < num_locked_handles; ++i) {
+ for (uint32_t i = 0; i < num_locked_handles; ++i) {
if (!deserializer.Read<SkDiscardableHandleId>(&locked_handles->at(i)))
return false;
}
// Skia font data.
- uint64_t skia_data_size = 0u;
- if (!deserializer.Read<uint64_t>(&skia_data_size))
+ uint32_t skia_data_size = 0u;
+ if (!deserializer.Read<uint32_t>(&skia_data_size))
return false;
{
diff --git a/chromium/gpu/command_buffer/service/service_font_manager.h b/chromium/gpu/command_buffer/service/service_font_manager.h
index 5bc77d6f1ca..198c54fecb3 100644
--- a/chromium/gpu/command_buffer/service/service_font_manager.h
+++ b/chromium/gpu/command_buffer/service/service_font_manager.h
@@ -28,7 +28,7 @@ class GPU_GLES2_EXPORT ServiceFontManager
void Destroy();
bool Deserialize(const volatile char* memory,
- size_t memory_size,
+ uint32_t memory_size,
std::vector<SkDiscardableHandleId>* locked_handles);
bool Unlock(const std::vector<SkDiscardableHandleId>& handles);
SkStrikeClient* strike_client() { return strike_client_.get(); }
diff --git a/chromium/gpu/command_buffer/service/shader_manager.h b/chromium/gpu/command_buffer/service/shader_manager.h
index 579dddd14e1..251311a693a 100644
--- a/chromium/gpu/command_buffer/service/shader_manager.h
+++ b/chromium/gpu/command_buffer/service/shader_manager.h
@@ -6,8 +6,8 @@
#define GPU_COMMAND_BUFFER_SERVICE_SHADER_MANAGER_H_
#include <string>
+#include <unordered_map>
-#include "base/containers/hash_tables.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -316,7 +316,7 @@ class GPU_GLES2_EXPORT ShaderManager {
friend class Shader;
// Info for each shader by service side shader Id.
- typedef base::hash_map<GLuint, scoped_refptr<Shader> > ShaderMap;
+ typedef std::unordered_map<GLuint, scoped_refptr<Shader>> ShaderMap;
ShaderMap shaders_;
void RemoveShaderIfUnused(Shader* shader);
diff --git a/chromium/gpu/command_buffer/service/shader_translator.cc b/chromium/gpu/command_buffer/service/shader_translator.cc
index 628d2eea9d0..6182606b0c3 100644
--- a/chromium/gpu/command_buffer/service/shader_translator.cc
+++ b/chromium/gpu/command_buffer/service/shader_translator.cc
@@ -165,10 +165,10 @@ bool ShaderTranslator::Init(GLenum shader_type,
shader_output_language, resources);
}
- compile_options_ = SH_OBJECT_CODE | SH_VARIABLES |
- SH_ENFORCE_PACKING_RESTRICTIONS |
- SH_LIMIT_EXPRESSION_COMPLEXITY |
- SH_LIMIT_CALL_STACK_DEPTH | SH_CLAMP_INDIRECT_ARRAY_BOUNDS;
+ compile_options_ =
+ SH_OBJECT_CODE | SH_VARIABLES | SH_ENFORCE_PACKING_RESTRICTIONS |
+ SH_LIMIT_EXPRESSION_COMPLEXITY | SH_LIMIT_CALL_STACK_DEPTH |
+ SH_CLAMP_INDIRECT_ARRAY_BOUNDS | SH_EMULATE_GL_DRAW_ID;
if (gl_shader_interm_output)
compile_options_ |= SH_INTERMEDIATE_TREE;
compile_options_ |= driver_bug_workarounds;
diff --git a/chromium/gpu/command_buffer/service/shader_translator.h b/chromium/gpu/command_buffer/service/shader_translator.h
index 98ccdb37cdc..745b02728e7 100644
--- a/chromium/gpu/command_buffer/service/shader_translator.h
+++ b/chromium/gpu/command_buffer/service/shader_translator.h
@@ -6,8 +6,8 @@
#define GPU_COMMAND_BUFFER_SERVICE_SHADER_TRANSLATOR_H_
#include <string>
+#include <unordered_map>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/observer_list.h"
@@ -22,11 +22,11 @@ namespace gpu {
namespace gles2 {
// Mapping between variable name and info.
-typedef base::hash_map<std::string, sh::Attribute> AttributeMap;
+typedef std::unordered_map<std::string, sh::Attribute> AttributeMap;
typedef std::vector<sh::OutputVariable> OutputVariableList;
-typedef base::hash_map<std::string, sh::Uniform> UniformMap;
-typedef base::hash_map<std::string, sh::Varying> VaryingMap;
-typedef base::hash_map<std::string, sh::InterfaceBlock> InterfaceBlockMap;
+typedef std::unordered_map<std::string, sh::Uniform> UniformMap;
+typedef std::unordered_map<std::string, sh::Varying> VaryingMap;
+typedef std::unordered_map<std::string, sh::InterfaceBlock> InterfaceBlockMap;
typedef base::RefCountedData<std::string> OptionsAffectingCompilationString;
// Translates a GLSL ES 2.0 shader to desktop GLSL shader, or just
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc
new file mode 100644
index 00000000000..7b4ccdbe188
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_context_state.cc
@@ -0,0 +1,353 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_context_state.h"
+
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "gpu/command_buffer/common/activity_flags.h"
+#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/gl_context_virtual.h"
+#include "gpu/command_buffer/service/service_transfer_cache.h"
+#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/vulkan/buildflags.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_share_group.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/init/create_gr_gl_interface.h"
+
+#if BUILDFLAG(ENABLE_VULKAN)
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#endif
+
+namespace {
+static constexpr size_t kInitialScratchDeserializationBufferSize = 1024;
+}
+
+namespace gpu {
+
+SharedContextState::SharedContextState(
+ scoped_refptr<gl::GLShareGroup> share_group,
+ scoped_refptr<gl::GLSurface> surface,
+ scoped_refptr<gl::GLContext> context,
+ bool use_virtualized_gl_contexts,
+ base::OnceClosure context_lost_callback,
+ viz::VulkanContextProvider* vulkan_context_provider)
+ : use_virtualized_gl_contexts_(use_virtualized_gl_contexts),
+ context_lost_callback_(std::move(context_lost_callback)),
+ vk_context_provider_(vulkan_context_provider),
+#if BUILDFLAG(ENABLE_VULKAN)
+ gr_context_(vk_context_provider_ ? vk_context_provider_->GetGrContext()
+ : nullptr),
+#endif
+ use_vulkan_gr_context_(!!vk_context_provider_),
+ share_group_(std::move(share_group)),
+ context_(context),
+ real_context_(std::move(context)),
+ surface_(std::move(surface)),
+ weak_ptr_factory_(this) {
+ if (use_vulkan_gr_context_) {
+ DCHECK(gr_context_);
+ use_virtualized_gl_contexts_ = false;
+ }
+ if (base::ThreadTaskRunnerHandle::IsSet()) {
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "SharedContextState", base::ThreadTaskRunnerHandle::Get());
+ }
+ // Initialize the scratch buffer to some small initial size.
+ scratch_deserialization_buffer_.resize(
+ kInitialScratchDeserializationBufferSize);
+}
+
+SharedContextState::~SharedContextState() {
+ if (gr_context_)
+ gr_context_->abandonContext();
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ this);
+}
+
+void SharedContextState::InitializeGrContext(
+ const GpuDriverBugWorkarounds& workarounds,
+ GrContextOptions::PersistentCache* cache,
+ GpuProcessActivityFlags* activity_flags,
+ gl::ProgressReporter* progress_reporter) {
+ if (!use_vulkan_gr_context_) {
+ DCHECK(context_->IsCurrent(nullptr));
+ sk_sp<GrGLInterface> interface(gl::init::CreateGrGLInterface(
+ *context_->GetVersionInfo(), workarounds.use_es2_for_oopr,
+ progress_reporter));
+ if (!interface) {
+ LOG(ERROR) << "OOP raster support disabled: GrGLInterface creation "
+ "failed.";
+ return;
+ }
+
+ if (activity_flags && cache) {
+ // |activity_flags| is safe to capture here since it must outlive the
+ // this context state.
+ interface->fFunctions.fProgramBinary =
+ [activity_flags](GrGLuint program, GrGLenum binaryFormat,
+ void* binary, GrGLsizei length) {
+ GpuProcessActivityFlags::ScopedSetFlag scoped_set_flag(
+ activity_flags, ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY);
+ glProgramBinary(program, binaryFormat, binary, length);
+ };
+ }
+
+ // If you make any changes to the GrContext::Options here that could
+ // affect text rendering, make sure to match the capabilities initialized
+ // in GetCapabilities and ensuring these are also used by the
+ // PaintOpBufferSerializer.
+ GrContextOptions options;
+ options.fDriverBugWorkarounds =
+ GrDriverBugWorkarounds(workarounds.ToIntSet());
+ options.fDisableCoverageCountingPaths = true;
+ size_t max_resource_cache_bytes = 0u;
+ raster::DetermineGrCacheLimitsFromAvailableMemory(
+ &max_resource_cache_bytes, &glyph_cache_max_texture_bytes_);
+ options.fGlyphCacheTextureMaximumBytes = glyph_cache_max_texture_bytes_;
+ options.fPersistentCache = cache;
+ options.fAvoidStencilBuffers = workarounds.avoid_stencil_buffers;
+ owned_gr_context_ = GrContext::MakeGL(std::move(interface), options);
+ gr_context_ = owned_gr_context_.get();
+ if (!gr_context_) {
+ LOG(ERROR) << "OOP raster support disabled: GrContext creation "
+ "failed.";
+ } else {
+ constexpr int kMaxGaneshResourceCacheCount = 16384;
+ gr_context_->setResourceCacheLimits(kMaxGaneshResourceCacheCount,
+ max_resource_cache_bytes);
+ }
+ }
+ transfer_cache_ = std::make_unique<ServiceTransferCache>();
+}
+
+bool SharedContextState::InitializeGL(
+ const GpuPreferences& gpu_preferences,
+ scoped_refptr<gles2::FeatureInfo> feature_info) {
+ // We still need initialize GL when Vulkan is used, because RasterDecoder
+ // depends on GL.
+ // TODO(penghuang): don't initialize GL when RasterDecoder can work without
+ // GL.
+ if (IsGLInitialized()) {
+ DCHECK(feature_info == feature_info_);
+ DCHECK(context_state_);
+ return true;
+ }
+
+ DCHECK(context_->IsCurrent(nullptr));
+
+ feature_info_ = std::move(feature_info);
+ feature_info_->Initialize(gpu::CONTEXT_TYPE_OPENGLES2,
+ gpu_preferences.use_passthrough_cmd_decoder &&
+ gles2::PassthroughCommandDecoderSupported(),
+ gles2::DisallowedFeatures());
+
+ auto* api = gl::g_current_gl_context;
+ const GLint kGLES2RequiredMinimumVertexAttribs = 8u;
+ GLint max_vertex_attribs = 0;
+ api->glGetIntegervFn(GL_MAX_VERTEX_ATTRIBS, &max_vertex_attribs);
+ if (max_vertex_attribs < kGLES2RequiredMinimumVertexAttribs) {
+ feature_info_ = nullptr;
+ return false;
+ }
+
+ context_state_ = std::make_unique<gles2::ContextState>(
+ feature_info_.get(), false /* track_texture_and_sampler_units */);
+
+ context_state_->set_api(api);
+ context_state_->InitGenericAttribs(max_vertex_attribs);
+
+ // Set all the default state because some GL drivers get it wrong.
+ // TODO(backer): Not all of this state needs to be initialized. Reduce the set
+ // if perf becomes a problem.
+ context_state_->InitCapabilities(nullptr);
+ context_state_->InitState(nullptr);
+
+ if (use_virtualized_gl_contexts_) {
+ auto virtual_context = base::MakeRefCounted<GLContextVirtual>(
+ share_group_.get(), real_context_.get(),
+ weak_ptr_factory_.GetWeakPtr());
+ if (!virtual_context->Initialize(surface_.get(), gl::GLContextAttribs())) {
+ feature_info_ = nullptr;
+ context_state_ = nullptr;
+ return false;
+ }
+ context_ = std::move(virtual_context);
+ MakeCurrent(nullptr);
+ }
+ return true;
+}
+
+bool SharedContextState::MakeCurrent(gl::GLSurface* surface) {
+ if (use_vulkan_gr_context_)
+ return true;
+
+ if (context_lost_)
+ return false;
+
+ if (!context_->MakeCurrent(surface ? surface : surface_.get())) {
+ MarkContextLost();
+ return false;
+ }
+ return true;
+}
+
+void SharedContextState::MarkContextLost() {
+ if (!context_lost_) {
+ context_lost_ = true;
+ // context_state_ could be nullptr for some unittests.
+ if (context_state_)
+ context_state_->MarkContextLost();
+ if (gr_context_)
+ gr_context_->abandonContext();
+ std::move(context_lost_callback_).Run();
+ }
+}
+
+bool SharedContextState::IsCurrent(gl::GLSurface* surface) {
+ if (use_vulkan_gr_context_)
+ return true;
+ return context_->IsCurrent(surface);
+}
+
+bool SharedContextState::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) {
+ if (gr_context_)
+ raster::DumpGrMemoryStatistics(gr_context_, pmd, base::nullopt);
+ return true;
+}
+
+void SharedContextState::PurgeMemory(
+ base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
+ if (!gr_context_) {
+ DCHECK(!transfer_cache_);
+ return;
+ }
+
+ // Ensure the context is current before doing any GPU cleanup.
+ MakeCurrent(nullptr);
+
+ switch (memory_pressure_level) {
+ case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
+ // This function is only called with moderate or critical pressure.
+ NOTREACHED();
+ return;
+ case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
+ // With moderate pressure, clear any unlocked resources.
+ gr_context_->purgeUnlockedResources(true /* scratchResourcesOnly */);
+ scratch_deserialization_buffer_.resize(
+ kInitialScratchDeserializationBufferSize);
+ scratch_deserialization_buffer_.shrink_to_fit();
+ break;
+ case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
+ // With critical pressure, purge as much as possible.
+ gr_context_->freeGpuResources();
+ scratch_deserialization_buffer_.resize(0u);
+ scratch_deserialization_buffer_.shrink_to_fit();
+ break;
+ }
+
+ transfer_cache_->PurgeMemory(memory_pressure_level);
+}
+
+void SharedContextState::PessimisticallyResetGrContext() const {
+ // Calling GrContext::resetContext() is very cheap, so we do it
+ // pessimistically. We could dirty less state if skia state setting
+ // performance becomes an issue.
+ if (gr_context_ && !use_vulkan_gr_context_)
+ gr_context_->resetContext();
+}
+
+bool SharedContextState::initialized() const {
+ return true;
+}
+
+const gles2::ContextState* SharedContextState::GetContextState() {
+ if (need_context_state_reset_) {
+ // Returning nullptr to force full state restoration by the caller. We do
+ // this because GrContext changes to GL state are untracked in our
+ // context_state_.
+ return nullptr;
+ }
+ return context_state_.get();
+}
+
+void SharedContextState::RestoreState(const gles2::ContextState* prev_state) {
+ PessimisticallyResetGrContext();
+ context_state_->RestoreState(prev_state);
+ need_context_state_reset_ = false;
+}
+
+void SharedContextState::RestoreGlobalState() const {
+ PessimisticallyResetGrContext();
+ context_state_->RestoreGlobalState(nullptr);
+}
+void SharedContextState::ClearAllAttributes() const {}
+
+void SharedContextState::RestoreActiveTexture() const {
+ PessimisticallyResetGrContext();
+}
+
+void SharedContextState::RestoreAllTextureUnitAndSamplerBindings(
+ const gles2::ContextState* prev_state) const {
+ PessimisticallyResetGrContext();
+}
+
+void SharedContextState::RestoreActiveTextureUnitBinding(
+ unsigned int target) const {
+ PessimisticallyResetGrContext();
+}
+
+void SharedContextState::RestoreBufferBinding(unsigned int target) {
+ PessimisticallyResetGrContext();
+ if (target == GL_PIXEL_PACK_BUFFER) {
+ context_state_->UpdatePackParameters();
+ } else if (target == GL_PIXEL_UNPACK_BUFFER) {
+ context_state_->UpdateUnpackParameters();
+ }
+ context_state_->api()->glBindBufferFn(target, 0);
+}
+
+void SharedContextState::RestoreBufferBindings() const {
+ PessimisticallyResetGrContext();
+ context_state_->RestoreBufferBindings();
+}
+
+void SharedContextState::RestoreFramebufferBindings() const {
+ PessimisticallyResetGrContext();
+ context_state_->fbo_binding_for_scissor_workaround_dirty = true;
+ context_state_->stencil_state_changed_since_validation = true;
+}
+
+void SharedContextState::RestoreRenderbufferBindings() {
+ PessimisticallyResetGrContext();
+ context_state_->RestoreRenderbufferBindings();
+}
+
+void SharedContextState::RestoreProgramBindings() const {
+ PessimisticallyResetGrContext();
+ context_state_->RestoreProgramSettings(nullptr, false);
+}
+
+void SharedContextState::RestoreTextureUnitBindings(unsigned unit) const {
+ PessimisticallyResetGrContext();
+}
+
+void SharedContextState::RestoreVertexAttribArray(unsigned index) {
+ NOTIMPLEMENTED();
+}
+
+void SharedContextState::RestoreAllExternalTextureBindingsIfNeeded() {
+ PessimisticallyResetGrContext();
+}
+
+QueryManager* SharedContextState::GetQueryManager() {
+ return nullptr;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.h b/chromium/gpu/command_buffer/service/shared_context_state.h
new file mode 100644
index 00000000000..51a28b2ebc5
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_context_state.h
@@ -0,0 +1,164 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_CONTEXT_STATE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_CONTEXT_STATE_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/memory/memory_pressure_listener.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "gpu/command_buffer/common/skia_utils.h"
+#include "gpu/command_buffer/service/gl_context_virtual_delegate.h"
+#include "gpu/gpu_gles2_export.h"
+#include "third_party/skia/include/gpu/GrContext.h"
+#include "ui/gl/progress_reporter.h"
+
+namespace gl {
+class GLContext;
+class GLShareGroup;
+class GLSurface;
+} // namespace gl
+
+namespace viz {
+class VulkanContextProvider;
+} // namespace viz
+
+namespace gpu {
+class GpuDriverBugWorkarounds;
+class GpuProcessActivityFlags;
+class ServiceTransferCache;
+struct GpuPreferences;
+
+namespace gles2 {
+class FeatureInfo;
+struct ContextState;
+} // namespace gles2
+
+class GPU_GLES2_EXPORT SharedContextState
+ : public base::trace_event::MemoryDumpProvider,
+ public gpu::GLContextVirtualDelegate,
+ public base::RefCounted<SharedContextState> {
+ public:
+ // TODO: Refactor code to have seperate constructor for GL and Vulkan and not
+ // initialize/use GL related info for vulkan and vice-versa.
+ SharedContextState(
+ scoped_refptr<gl::GLShareGroup> share_group,
+ scoped_refptr<gl::GLSurface> surface,
+ scoped_refptr<gl::GLContext> context,
+ bool use_virtualized_gl_contexts,
+ base::OnceClosure context_lost_callback,
+ viz::VulkanContextProvider* vulkan_context_provider = nullptr);
+
+ void InitializeGrContext(const GpuDriverBugWorkarounds& workarounds,
+ GrContextOptions::PersistentCache* cache,
+ GpuProcessActivityFlags* activity_flags = nullptr,
+ gl::ProgressReporter* progress_reporter = nullptr);
+
+ bool InitializeGL(const GpuPreferences& gpu_preferences,
+ scoped_refptr<gles2::FeatureInfo> feature_info);
+ bool IsGLInitialized() const { return !!feature_info_; }
+
+ bool MakeCurrent(gl::GLSurface* surface);
+ void MarkContextLost();
+ bool IsCurrent(gl::GLSurface* surface);
+
+ void PurgeMemory(
+ base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level);
+
+ void PessimisticallyResetGrContext() const;
+
+ gl::GLShareGroup* share_group() { return share_group_.get(); }
+ gl::GLContext* context() { return context_.get(); }
+ gl::GLContext* real_context() { return real_context_.get(); }
+ gl::GLSurface* surface() { return surface_.get(); }
+ viz::VulkanContextProvider* vk_context_provider() {
+ return vk_context_provider_;
+ }
+ GrContext* gr_context() { return gr_context_; }
+ gles2::FeatureInfo* feature_info() { return feature_info_.get(); }
+ gles2::ContextState* context_state() const { return context_state_.get(); }
+ bool context_lost() const { return context_lost_; }
+ bool need_context_state_reset() const { return need_context_state_reset_; }
+ void set_need_context_state_reset(bool reset) {
+ need_context_state_reset_ = reset;
+ }
+ ServiceTransferCache* transfer_cache() { return transfer_cache_.get(); }
+ std::vector<uint8_t>* scratch_deserialization_buffer() {
+ return &scratch_deserialization_buffer_;
+ }
+ bool use_vulkan_gr_context() const { return use_vulkan_gr_context_; }
+ size_t glyph_cache_max_texture_bytes() const {
+ return glyph_cache_max_texture_bytes_;
+ }
+ bool use_virtualized_gl_contexts() const {
+ return use_virtualized_gl_contexts_;
+ }
+
+ // base::trace_event::MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+
+ private:
+ friend class base::RefCounted<SharedContextState>;
+
+ ~SharedContextState() override;
+
+ // gpu::GLContextVirtualDelegate implementation.
+ bool initialized() const override;
+ const gles2::ContextState* GetContextState() override;
+ void RestoreState(const gles2::ContextState* prev_state) override;
+ void RestoreGlobalState() const override;
+ void ClearAllAttributes() const override;
+ void RestoreActiveTexture() const override;
+ void RestoreAllTextureUnitAndSamplerBindings(
+ const gles2::ContextState* prev_state) const override;
+ void RestoreActiveTextureUnitBinding(unsigned int target) const override;
+ void RestoreBufferBinding(unsigned int target) override;
+ void RestoreBufferBindings() const override;
+ void RestoreFramebufferBindings() const override;
+ void RestoreRenderbufferBindings() override;
+ void RestoreProgramBindings() const override;
+ void RestoreTextureUnitBindings(unsigned unit) const override;
+ void RestoreVertexAttribArray(unsigned index) override;
+ void RestoreAllExternalTextureBindingsIfNeeded() override;
+ QueryManager* GetQueryManager() override;
+
+ bool use_virtualized_gl_contexts_ = false;
+ base::OnceClosure context_lost_callback_;
+ viz::VulkanContextProvider* vk_context_provider_ = nullptr;
+ GrContext* gr_context_ = nullptr;
+ const bool use_vulkan_gr_context_;
+
+ scoped_refptr<gl::GLShareGroup> share_group_;
+ scoped_refptr<gl::GLContext> context_;
+ scoped_refptr<gl::GLContext> real_context_;
+ scoped_refptr<gl::GLSurface> surface_;
+ scoped_refptr<gles2::FeatureInfo> feature_info_;
+
+ // raster decoders and display compositor share this context_state_.
+ std::unique_ptr<gles2::ContextState> context_state_;
+
+ sk_sp<GrContext> owned_gr_context_;
+ std::unique_ptr<ServiceTransferCache> transfer_cache_;
+ size_t glyph_cache_max_texture_bytes_ = 0u;
+ std::vector<uint8_t> scratch_deserialization_buffer_;
+
+ // |need_context_state_reset| is set whenever Skia may have altered the
+ // driver's GL state.
+ bool need_context_state_reset_ = false;
+
+ bool context_lost_ = false;
+
+ base::WeakPtrFactory<SharedContextState> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedContextState);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_CONTEXT_STATE_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
index f1ae9afb518..14dcb5bae91 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
@@ -32,6 +32,13 @@ class SharedImageBackingFactory {
uint32_t usage) = 0;
virtual std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) = 0;
+ virtual std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
int client_id,
gfx::GpuMemoryBufferHandle handle,
gfx::BufferFormat format,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
index 37b766eaeac..8f3f5057b29 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
@@ -4,9 +4,12 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h"
+#include <sync/sync.h>
+
#include "base/android/android_hardware_buffer_compat.h"
#include "base/android/scoped_hardware_buffer_handle.h"
#include "base/logging.h"
+#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -15,20 +18,88 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/ipc/common/android/android_image_reader_utils.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_implementation.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_fence_android_native_fence_sync.h"
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/gl_image_ahardwarebuffer.h"
#include "ui/gl/gl_version_info.h"
namespace gpu {
+// Implementation of SharedImageBacking that holds an AHardwareBuffer. This
+// can be used to create a GL texture or a VK Image from the AHardwareBuffer
+// backing.
+class SharedImageBackingAHB : public SharedImageBacking {
+ public:
+ SharedImageBackingAHB(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::android::ScopedHardwareBufferHandle handle,
+ size_t estimated_size,
+ SharedContextState* context_state);
+
+ ~SharedImageBackingAHB() override;
+
+ bool IsCleared() const override;
+ void SetCleared() override;
+ void Update() override;
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
+ void Destroy() override;
+ SharedContextState* GetContextState() const;
+ base::ScopedFD TakeGLWriteSyncFd();
+ base::ScopedFD TakeVkReadSyncFd();
+ base::android::ScopedHardwareBufferHandle GetAhbHandle();
+ void SetGLWriteSyncFd(base::ScopedFD fd);
+ void SetVkReadSyncFd(base::ScopedFD fd);
+
+ protected:
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+
+ private:
+ bool GenGLTexture();
+ base::android::ScopedHardwareBufferHandle hardware_buffer_handle_;
+
+ // This texture will be lazily initialised/created when ProduceGLTexture is
+ // called.
+ gles2::Texture* texture_ = nullptr;
+
+ // TODO(vikassoni): In future when we add begin/end write support, we will
+ // need to properly use this flag to pass the is_cleared_ information to
+ // the GL texture representation while begin write and back to this class from
+ // the GL texture represntation after end write. This is because this class
+ // will not know if SetCleared() arrives during begin write happening on GL
+ // texture representation.
+ bool is_cleared_ = false;
+ SharedContextState* context_state_ = nullptr;
+ base::ScopedFD gl_write_sync_fd_;
+ base::ScopedFD vk_read_sync_fd_;
+
+ sk_sp<SkPromiseImageTexture> cached_promise_texture_;
+ DISALLOW_COPY_AND_ASSIGN(SharedImageBackingAHB);
+};
+
// Representation of a SharedImageBackingAHB as a GL Texture.
class SharedImageRepresentationGLTextureAHB
: public SharedImageRepresentationGLTexture {
@@ -42,266 +113,516 @@ class SharedImageRepresentationGLTextureAHB
gles2::Texture* GetTexture() override { return texture_; }
+ bool BeginAccess(GLenum mode) override {
+ // TODO(vikassoni): Currently Skia Vk backing never does a write. So GL read
+ // do not need to wait for the Vk write to finish. Eventually when Vk starts
+ // writing, we will need to TakeVkWriteSyncFd() and wait on it for mode =
+ // GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM.
+
+ // Wait on Vk read if GL is going to write.
+ // TODO(vikassoni): GL writes should wait on both Vk read and Vk writes.
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) {
+ base::ScopedFD sync_fd = ahb_backing()->TakeVkReadSyncFd();
+
+ // Create an egl fence sync and do a server side wait.
+ if (!InsertEglFenceAndWait(std::move(sync_fd)))
+ return false;
+ }
+ mode_ = mode;
+ return true;
+ }
+
+ void EndAccess() override {
+ // TODO(vikassoni): Currently Skia Vk backing never does a write. So Vk
+ // writes do not need to wait on GL to finish the read. Eventually when Vk
+ // starts writing, we will need to create and set a GLReadSyncFd for mode =
+ // GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM for Vk to wait on it.
+ if (mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) {
+ base::ScopedFD sync_fd = CreateEglFenceAndExportFd();
+ if (!sync_fd.is_valid())
+ return;
+
+ // Pass this fd to its backing.
+ ahb_backing()->SetGLWriteSyncFd(std::move(sync_fd));
+ }
+ }
+
private:
+ SharedImageBackingAHB* ahb_backing() {
+ return static_cast<SharedImageBackingAHB*>(backing());
+ }
+
gles2::Texture* texture_;
+ GLenum mode_ = GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM;
DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLTextureAHB);
};
// GL backed Skia representation of SharedImageBackingAHB.
-// TODO(vikassoni): Add follow up patch to add a vulkan backed skia
-// representation.
class SharedImageRepresentationSkiaGLAHB
: public SharedImageRepresentationSkia {
public:
- SharedImageRepresentationSkiaGLAHB(SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- GLenum target,
- GLenum internal_format,
- GLenum driver_internal_format,
- GLuint service_id)
+ SharedImageRepresentationSkiaGLAHB(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ sk_sp<SkPromiseImageTexture> cached_promise_image_texture,
+ MemoryTypeTracker* tracker,
+ GLenum target,
+ GLuint service_id)
: SharedImageRepresentationSkia(manager, backing, tracker),
- target_(target),
- internal_format_(internal_format),
- driver_internal_format_(driver_internal_format),
- service_id_(service_id) {}
+ promise_texture_(cached_promise_image_texture) {
+#if DCHECK_IS_ON()
+ context_ = gl::GLContext::GetCurrent();
+#endif
+ }
~SharedImageRepresentationSkiaGLAHB() override { DCHECK(!write_surface_); }
sk_sp<SkSurface> BeginWriteAccess(
GrContext* gr_context,
int final_msaa_count,
- SkColorType color_type,
const SkSurfaceProps& surface_props) override {
+ CheckContext();
+ // if there is already a write_surface_, it means previous BeginWriteAccess
+ // doesn't have a corresponding EndWriteAccess.
if (write_surface_)
return nullptr;
- GrBackendTexture backend_texture;
- if (!GetGrBackendTexture(target_, size(), internal_format_,
- driver_internal_format_, service_id_, color_type,
- &backend_texture)) {
+ // Synchronise this access with the Vk reads.
+ // TODO(vikassoni): SkiaGL writes should wait on both Vk read and Vk writes.
+ base::ScopedFD sync_fd = ahb_backing()->TakeVkReadSyncFd();
+
+ // Create an egl fence sync and do a server side wait.
+ if (!InsertEglFenceAndWait(std::move(sync_fd)))
+ return nullptr;
+
+ if (!promise_texture_) {
return nullptr;
}
+
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format());
auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
- gr_context, backend_texture, kTopLeft_GrSurfaceOrigin, final_msaa_count,
- color_type, nullptr, &surface_props);
+ gr_context, promise_texture_->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, nullptr,
+ &surface_props);
write_surface_ = surface.get();
return surface;
}
void EndWriteAccess(sk_sp<SkSurface> surface) override {
+ CheckContext();
DCHECK_EQ(surface.get(), write_surface_);
DCHECK(surface->unique());
// TODO(ericrk): Keep the surface around for re-use.
write_surface_ = nullptr;
+
+ // Insert a gl fence to signal the write completion. Vulkan representation
+ // needs to wait on this signal before it can read from this.
+ base::ScopedFD sync_fd = CreateEglFenceAndExportFd();
+ if (!sync_fd.is_valid())
+ return;
+
+ // Pass this fd to its backing.
+ ahb_backing()->SetGLWriteSyncFd(std::move(sync_fd));
}
- bool BeginReadAccess(SkColorType color_type,
- GrBackendTexture* backend_texture) override {
- if (!GetGrBackendTexture(target_, size(), internal_format_,
- driver_internal_format_, service_id_, color_type,
- backend_texture)) {
- return false;
- }
- return true;
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(SkSurface* sk_surface) override {
+ CheckContext();
+ // TODO(vikassoni): Currently Skia Vk backing never does a write. So this
+ // read do not need to wait for the Vk write to finish. Eventually when Vk
+ // starts writing, we might need to TakeVkWriteSyncFd() and wait on it.
+ return promise_texture_;
}
void EndReadAccess() override {
+ CheckContext();
+ // TODO(vikassoni): Currently Skia Vk backing never does a write. So Vk
+ // writes do not need to wait on this read to finish. Eventually when Vk
+ // starts writing, we will need to create and set a SkiaGLReadSyncFd.
// TODO(ericrk): Handle begin/end correctness checks.
}
private:
- GLenum target_;
- GLenum internal_format_ = 0;
- GLenum driver_internal_format_ = 0;
- GLuint service_id_;
+ SharedImageBackingAHB* ahb_backing() {
+ return static_cast<SharedImageBackingAHB*>(backing());
+ }
+ void CheckContext() {
+#if DCHECK_IS_ON()
+ DCHECK(gl::GLContext::GetCurrent() == context_);
+#endif
+ }
+
+ sk_sp<SkPromiseImageTexture> promise_texture_;
SkSurface* write_surface_ = nullptr;
+#if DCHECK_IS_ON()
+ gl::GLContext* context_;
+#endif
};
-// Implementation of SharedImageBacking that holds an AHardwareBuffer. This
-// can be used to create a GL texture or a VK Image from the AHardwareBuffer
-// backing.
-class SharedImageBackingAHB : public SharedImageBacking {
+// Vk backed Skia representation of SharedImageBackingAHB.
+class SharedImageRepresentationSkiaVkAHB
+ : public SharedImageRepresentationSkia {
public:
- SharedImageBackingAHB(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- base::android::ScopedHardwareBufferHandle handle,
- size_t estimated_size)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- estimated_size),
- hardware_buffer_handle_(std::move(handle)) {
- DCHECK(hardware_buffer_handle_.is_valid());
+ SharedImageRepresentationSkiaVkAHB(SharedImageManager* manager,
+ SharedImageBacking* backing)
+ : SharedImageRepresentationSkia(manager, backing, nullptr) {
+ SharedImageBackingAHB* ahb_backing =
+ static_cast<SharedImageBackingAHB*>(backing);
+ DCHECK(ahb_backing);
+ SharedContextState* context_state = ahb_backing->GetContextState();
+ DCHECK(context_state);
+ DCHECK(context_state->vk_context_provider());
+
+ vk_device_ = context_state->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ vk_phy_device_ = context_state->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanPhysicalDevice();
+ vk_implementation_ =
+ context_state->vk_context_provider()->GetVulkanImplementation();
}
- ~SharedImageBackingAHB() override {
- // Check to make sure buffer is explicitly destroyed using Destroy() api
- // before this destructor is called.
- DCHECK(!hardware_buffer_handle_.is_valid());
- DCHECK(!texture_);
- }
+ ~SharedImageRepresentationSkiaVkAHB() override { DCHECK(!read_surface_); }
- bool IsCleared() const override {
- if (texture_)
- return texture_->IsLevelCleared(texture_->target(), 0);
- return is_cleared_;
+ sk_sp<SkSurface> BeginWriteAccess(
+ GrContext* gr_context,
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props) override {
+ NOTIMPLEMENTED();
+ return nullptr;
}
- void SetCleared() override {
- if (texture_)
- texture_->SetLevelCleared(texture_->target(), 0, true);
- is_cleared_ = true;
- }
+ void EndWriteAccess(sk_sp<SkSurface> surface) override { NOTIMPLEMENTED(); }
- void Update() override {}
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(SkSurface* sk_surface) override {
+ // If previous read access has not ended.
+ if (read_surface_)
+ return nullptr;
+ DCHECK(sk_surface);
+
+ // Synchronise the read access with the GL writes.
+ base::ScopedFD sync_fd = ahb_backing()->TakeGLWriteSyncFd();
+
+ // We need to wait only if there is a valid fd.
+ if (sync_fd.is_valid()) {
+ // Do a client side wait for now.
+ // TODO(vikassoni): There seems to be a skia bug -
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=916812 currently
+ // where wait() on the sk surface crashes. Remove the sync_wait() and
+ // apply CL mentioned in the bug when the issue is fixed.
+ static const int InfiniteSyncWaitTimeout = -1;
+ if (sync_wait(sync_fd.get(), InfiniteSyncWaitTimeout) < 0) {
+ LOG(ERROR) << "Failed while waiting on GL Write sync fd";
+ return nullptr;
+ }
+ }
- bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
- DCHECK(hardware_buffer_handle_.is_valid());
- if (!GenGLTexture())
- return false;
- DCHECK(texture_);
- mailbox_manager->ProduceTexture(mailbox(), texture_);
- return true;
+ // Create a VkImage and import AHB.
+ VkImage vk_image;
+ VkImageCreateInfo vk_image_info;
+ VkDeviceMemory vk_device_memory;
+ VkDeviceSize mem_allocation_size;
+ if (!vk_implementation_->CreateVkImageAndImportAHB(
+ vk_device_, vk_phy_device_, size(), ahb_backing()->GetAhbHandle(),
+ &vk_image, &vk_image_info, &vk_device_memory,
+ &mem_allocation_size)) {
+ return nullptr;
+ }
+
+ // Create backend texture from the VkImage.
+ GrVkAlloc alloc = {vk_device_memory, 0, mem_allocation_size, 0};
+ GrVkImageInfo vk_info = {vk_image,
+ alloc,
+ vk_image_info.tiling,
+ vk_image_info.initialLayout,
+ vk_image_info.format,
+ vk_image_info.mipLevels};
+ // TODO(bsalomon): Determine whether it makes sense to attempt to reuse this
+ // if the vk_info stays the same on subsequent calls.
+ auto promise_texture = SkPromiseImageTexture::Make(
+ GrBackendTexture(size().width(), size().height(), vk_info));
+ if (!promise_texture) {
+ vkDestroyImage(vk_device_, vk_image, nullptr);
+ vkFreeMemory(vk_device_, vk_device_memory, nullptr);
+ return nullptr;
+ }
+
+ // Cache the sk surface in the representation so that it can be used in the
+ // EndReadAccess. Also make sure previous read_surface_ have been consumed
+ // by EndReadAccess() call.
+ read_surface_ = sk_surface;
+ return promise_texture;
}
- void Destroy() override {
- DCHECK(hardware_buffer_handle_.is_valid());
- if (texture_) {
- texture_->RemoveLightweightRef(have_context());
- texture_ = nullptr;
+ void EndReadAccess() override {
+ // There should be a read_surface_ from the BeginReadAccess().
+ DCHECK(read_surface_);
+
+ // Create a vk semaphore which can be exported.
+ VkExportSemaphoreCreateInfo export_info;
+ export_info.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
+ export_info.pNext = nullptr;
+ export_info.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+
+ VkSemaphore vk_semaphore;
+ VkSemaphoreCreateInfo sem_info;
+ sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ sem_info.pNext = &export_info;
+ sem_info.flags = 0;
+ bool result =
+ vkCreateSemaphore(vk_device_, &sem_info, nullptr, &vk_semaphore);
+ if (result != VK_SUCCESS) {
+ // TODO(vikassoni): add more error handling rather than just return ?
+ LOG(ERROR) << "vkCreateSemaphore failed";
+ read_surface_ = nullptr;
+ return;
}
- hardware_buffer_handle_.reset();
+ GrBackendSemaphore gr_semaphore;
+ gr_semaphore.initVulkan(vk_semaphore);
+
+ // If GrSemaphoresSubmitted::kNo is returned, the GPU back-end did not
+ // create or add any semaphores to signal on the GPU; the caller should not
+ // instruct the GPU to wait on any of the semaphores.
+ if (read_surface_->flushAndSignalSemaphores(1, &gr_semaphore) ==
+ GrSemaphoresSubmitted::kNo) {
+ vkDestroySemaphore(vk_device_, vk_semaphore, nullptr);
+ read_surface_ = nullptr;
+ return;
+ }
+ read_surface_ = nullptr;
+
+ // All the pending SkSurface commands to the GPU-backed API are issued and
+ // any SkSurface MSAA are resolved. After issuing all commands,
+ // signalSemaphores of count numSemaphores semaphores are signaled by the
+ // GPU. The caller must delete the semaphores created.
+ // Export a sync fd from the semaphore.
+ base::ScopedFD sync_fd;
+ vk_implementation_->GetSemaphoreFdKHR(vk_device_, vk_semaphore, &sync_fd);
+
+ // pass this sync fd to the backing.
+ ahb_backing()->SetVkReadSyncFd(std::move(sync_fd));
+
+ // TODO(vikassoni): We need to wait for the queue submission to complete
+ // before we can destroy the semaphore. This will decrease the performance.
+ // Add a future patch to handle this in more efficient way. Keep semaphores
+ // in a STL queue instead of destroying it. Later use a fence to check if
+ // the batch that refers the semaphore has completed execution. Delete the
+ // semaphore once the fence is signalled.
+ vkDeviceWaitIdle(vk_device_);
+ vkDestroySemaphore(vk_device_, vk_semaphore, nullptr);
}
- protected:
- std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- // Use same texture for all the texture representations generated from same
- // backing.
- if (!GenGLTexture())
- return nullptr;
+ private:
+ SharedImageBackingAHB* ahb_backing() {
+ return static_cast<SharedImageBackingAHB*>(backing());
+ }
+
+ SkSurface* read_surface_ = nullptr;
+ gpu::VulkanImplementation* vk_implementation_ = nullptr;
+ VkDevice vk_device_ = VK_NULL_HANDLE;
+ VkPhysicalDevice vk_phy_device_ = VK_NULL_HANDLE;
+};
+
+SharedImageBackingAHB::SharedImageBackingAHB(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::android::ScopedHardwareBufferHandle handle,
+ size_t estimated_size,
+ SharedContextState* context_state)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size),
+ hardware_buffer_handle_(std::move(handle)),
+ context_state_(context_state) {
+ DCHECK(hardware_buffer_handle_.is_valid());
+}
+
+SharedImageBackingAHB::~SharedImageBackingAHB() {
+ // Check to make sure buffer is explicitly destroyed using Destroy() api
+ // before this destructor is called.
+ DCHECK(!hardware_buffer_handle_.is_valid());
+ DCHECK(!texture_);
+}
+
+bool SharedImageBackingAHB::IsCleared() const {
+ if (texture_)
+ return texture_->IsLevelCleared(texture_->target(), 0);
+ return is_cleared_;
+}
+
+void SharedImageBackingAHB::SetCleared() {
+ if (texture_)
+ texture_->SetLevelCleared(texture_->target(), 0, true);
+ is_cleared_ = true;
+}
+
+void SharedImageBackingAHB::Update() {}
+
+bool SharedImageBackingAHB::ProduceLegacyMailbox(
+ MailboxManager* mailbox_manager) {
+ DCHECK(hardware_buffer_handle_.is_valid());
+ if (!GenGLTexture())
+ return false;
+ DCHECK(texture_);
+ mailbox_manager->ProduceTexture(mailbox(), texture_);
+ return true;
+}
- DCHECK(texture_);
- return std::make_unique<SharedImageRepresentationGLTextureAHB>(
- manager, this, tracker, texture_);
+void SharedImageBackingAHB::Destroy() {
+ DCHECK(hardware_buffer_handle_.is_valid());
+ if (texture_) {
+ texture_->RemoveLightweightRef(have_context());
+ texture_ = nullptr;
}
+ hardware_buffer_handle_.reset();
+}
- std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- // TODO(vikassoni): Currently we only have a GL backed skia representation.
- // Follow up patch will add support to check whether we are in Vulkan mode
- // OR GL mode and accordingly create Skia representation.
- if (!GenGLTexture())
- return nullptr;
+SharedContextState* SharedImageBackingAHB::GetContextState() const {
+ return context_state_;
+}
- DCHECK(texture_);
- return std::make_unique<SharedImageRepresentationSkiaGLAHB>(
- manager, this, tracker, texture_->target(), internal_format_,
- driver_internal_format_, texture_->service_id());
+base::ScopedFD SharedImageBackingAHB::TakeGLWriteSyncFd() {
+ return std::move(gl_write_sync_fd_);
+}
+
+void SharedImageBackingAHB::SetGLWriteSyncFd(base::ScopedFD fd) {
+ gl_write_sync_fd_ = std::move(fd);
+}
+
+base::ScopedFD SharedImageBackingAHB::TakeVkReadSyncFd() {
+ return std::move(vk_read_sync_fd_);
+}
+
+void SharedImageBackingAHB::SetVkReadSyncFd(base::ScopedFD fd) {
+ vk_read_sync_fd_ = std::move(fd);
+}
+
+base::android::ScopedHardwareBufferHandle
+SharedImageBackingAHB::GetAhbHandle() {
+ return hardware_buffer_handle_.Clone();
+}
+
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageBackingAHB::ProduceGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ // Use same texture for all the texture representations generated from same
+ // backing.
+ if (!GenGLTexture())
+ return nullptr;
+
+ DCHECK(texture_);
+ return std::make_unique<SharedImageRepresentationGLTextureAHB>(
+ manager, this, tracker, texture_);
+}
+
+std::unique_ptr<SharedImageRepresentationSkia>
+SharedImageBackingAHB::ProduceSkia(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ DCHECK(context_state_);
+
+ // Check whether we are in Vulkan mode OR GL mode and accordingly create
+ // Skia representation.
+ if (context_state_->use_vulkan_gr_context()) {
+ return std::make_unique<SharedImageRepresentationSkiaVkAHB>(manager, this);
}
- private:
- bool GenGLTexture() {
- if (texture_)
- return true;
-
- DCHECK(hardware_buffer_handle_.is_valid());
-
- // Target for AHB backed egl images.
- // Note that we are not using GL_TEXTURE_EXTERNAL_OES target since sksurface
- // doesnt supports it. As per the egl documentation -
- // https://www.khronos.org/registry/OpenGL/extensions/OES/OES_EGL_image_external.txt
- // if GL_OES_EGL_image is supported then <target> may also be TEXTURE_2D.
- GLenum target = GL_TEXTURE_2D;
- GLenum get_target = GL_TEXTURE_BINDING_2D;
-
- // Create a gles2 texture using the AhardwareBuffer.
- gl::GLApi* api = gl::g_current_gl_context;
- GLuint service_id = 0;
- api->glGenTexturesFn(1, &service_id);
- GLint old_texture_binding = 0;
- api->glGetIntegervFn(get_target, &old_texture_binding);
- api->glBindTextureFn(target, service_id);
- api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
-
- // Create an egl image using AHardwareBuffer.
- auto egl_image = base::MakeRefCounted<gl::GLImageAHardwareBuffer>(size());
- if (!egl_image->Initialize(hardware_buffer_handle_.get(), false)) {
- LOG(ERROR) << "Failed to create EGL image ";
- api->glBindTextureFn(target, old_texture_binding);
- api->glDeleteTexturesFn(1, &service_id);
- return false;
- }
- if (!egl_image->BindTexImage(target)) {
- LOG(ERROR) << "Failed to bind egl image";
- api->glBindTextureFn(target, old_texture_binding);
- api->glDeleteTexturesFn(1, &service_id);
- return false;
- }
+ if (!GenGLTexture())
+ return nullptr;
- // Create a gles2 Texture.
- texture_ = new gles2::Texture(service_id);
- texture_->SetLightweightRef();
- texture_->SetTarget(target, 1);
- texture_->sampler_state_.min_filter = GL_LINEAR;
- texture_->sampler_state_.mag_filter = GL_LINEAR;
- texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
-
- // If the backing is already cleared, no need to clear it again.
- gfx::Rect cleared_rect;
- if (is_cleared_)
- cleared_rect = gfx::Rect(size());
-
- GLenum gl_format = viz::GLDataFormat(format());
- GLenum gl_type = viz::GLDataType(format());
- texture_->SetLevelInfo(target, 0, egl_image->GetInternalFormat(),
- size().width(), size().height(), 1, 0, gl_format,
- gl_type, cleared_rect);
- texture_->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
- texture_->SetImmutable(true);
- api->glBindTextureFn(target, old_texture_binding);
- internal_format_ = egl_image->GetInternalFormat();
- driver_internal_format_ = gl::GetInternalFormat(
- gl::GLContext::GetCurrent()->GetVersionInfo(), internal_format_);
- return true;
+ if (!cached_promise_texture_) {
+ GrBackendTexture backend_texture;
+ GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(),
+ texture_->target(), size(), texture_->service_id(),
+ format(), &backend_texture);
+ cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
}
+ DCHECK(texture_);
+ return std::make_unique<SharedImageRepresentationSkiaGLAHB>(
+ manager, this, cached_promise_texture_, tracker, texture_->target(),
+ texture_->service_id());
+}
- base::android::ScopedHardwareBufferHandle hardware_buffer_handle_;
+bool SharedImageBackingAHB::GenGLTexture() {
+ if (texture_)
+ return true;
- // This texture will be lazily initialised/created when ProduceGLTexture is
- // called.
- gles2::Texture* texture_ = nullptr;
- GLenum internal_format_ = 0;
- GLenum driver_internal_format_ = 0;
+ DCHECK(hardware_buffer_handle_.is_valid());
- // TODO(vikassoni): In future when we add begin/end write support, we will
- // need to properly use this flag to pass the is_cleared_ information to
- // the GL texture representation while begin write and back to this class from
- // the GL texture represntation after end write. This is because this class
- // will not know if SetCleared() arrives during begin write happening on GL
- // texture representation.
- bool is_cleared_ = false;
+ // Target for AHB backed egl images.
+ // Note that we are not using GL_TEXTURE_EXTERNAL_OES target since sksurface
+ // doesn't supports it. As per the egl documentation -
+ // https://www.khronos.org/registry/OpenGL/extensions/OES/OES_EGL_image_external.txt
+ // if GL_OES_EGL_image is supported then <target> may also be TEXTURE_2D.
+ GLenum target = GL_TEXTURE_2D;
+ GLenum get_target = GL_TEXTURE_BINDING_2D;
- DISALLOW_COPY_AND_ASSIGN(SharedImageBackingAHB);
-};
+ // Create a gles2 texture using the AhardwareBuffer.
+ gl::GLApi* api = gl::g_current_gl_context;
+ GLuint service_id = 0;
+ api->glGenTexturesFn(1, &service_id);
+ GLint old_texture_binding = 0;
+ api->glGetIntegervFn(get_target, &old_texture_binding);
+ api->glBindTextureFn(target, service_id);
+ api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ // Create an egl image using AHardwareBuffer.
+ auto egl_image = base::MakeRefCounted<gl::GLImageAHardwareBuffer>(size());
+ if (!egl_image->Initialize(hardware_buffer_handle_.get(), false)) {
+ LOG(ERROR) << "Failed to create EGL image ";
+ api->glBindTextureFn(target, old_texture_binding);
+ api->glDeleteTexturesFn(1, &service_id);
+ return false;
+ }
+ if (!egl_image->BindTexImage(target)) {
+ LOG(ERROR) << "Failed to bind egl image";
+ api->glBindTextureFn(target, old_texture_binding);
+ api->glDeleteTexturesFn(1, &service_id);
+ return false;
+ }
+
+ // Create a gles2 Texture.
+ texture_ = new gles2::Texture(service_id);
+ texture_->SetLightweightRef();
+ texture_->SetTarget(target, 1);
+ texture_->sampler_state_.min_filter = GL_LINEAR;
+ texture_->sampler_state_.mag_filter = GL_LINEAR;
+ texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+
+ // If the backing is already cleared, no need to clear it again.
+ gfx::Rect cleared_rect;
+ if (is_cleared_)
+ cleared_rect = gfx::Rect(size());
+
+ GLenum gl_format = viz::GLDataFormat(format());
+ GLenum gl_type = viz::GLDataType(format());
+ texture_->SetLevelInfo(target, 0, egl_image->GetInternalFormat(),
+ size().width(), size().height(), 1, 0, gl_format,
+ gl_type, cleared_rect);
+ texture_->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
+ texture_->SetImmutable(true);
+ api->glBindTextureFn(target, old_texture_binding);
+ DCHECK_EQ(egl_image->GetInternalFormat(), gl_format);
+ return true;
+}
SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB(
const GpuDriverBugWorkarounds& workarounds,
- const GpuFeatureInfo& gpu_feature_info) {
+ const GpuFeatureInfo& gpu_feature_info,
+ SharedContextState* context_state)
+ : context_state_(context_state) {
scoped_refptr<gles2::FeatureInfo> feature_info =
new gles2::FeatureInfo(workarounds, gpu_feature_info);
feature_info->Initialize(ContextType::CONTEXT_TYPE_OPENGLES2, false,
@@ -324,7 +645,8 @@ SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB(
info.ahb_format = AHardwareBufferFormat(format);
// TODO(vikassoni): In future when we use GL_TEXTURE_EXTERNAL_OES target
- // with AHB, we need to check if oes_egl_image_external is supported or not.
+ // with AHB, we need to check if oes_egl_image_external is supported or
+ // not.
if (!is_egl_image_supported)
continue;
@@ -335,11 +657,8 @@ SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB(
GLenum gl_format = viz::GLDataFormat(format);
GLenum gl_type = viz::GLDataType(format);
- // GLImageAHardwareBuffer currently supports internal format GL_RGBA only.
- // TODO(vikassoni): Pass the AHBuffer format while GLImageAHardwareBuffer
- // creation and based on that return the equivalent internal format as
- // GL_RGBA or GL_RGB.
- if (internal_format != GL_RGBA)
+ // GLImageAHardwareBuffer supports internal format GL_RGBA and GL_RGB.
+ if (internal_format != GL_RGBA && internal_format != GL_RGB)
continue;
// Validate if GL format, type and internal format is supported.
@@ -354,12 +673,13 @@ SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB(
}
// TODO(vikassoni): We are using below GL api calls for now as Vulkan mode
// doesn't exist. Once we have vulkan support, we shouldn't query GL in this
- // code until we are asked to make a GL representation (or allocate a backing
- // for import into GL)? We may use an AHardwareBuffer exclusively with Vulkan,
- // where there is no need to require that a GL context is current. Maybe we
- // can lazy init this if someone tries to create an AHardwareBuffer with
- // SHARED_IMAGE_USAGE_GLES2 || !gpu_preferences.enable_vulkan. When in Vulkan
- // mode, we should only need this with GLES2.
+ // code until we are asked to make a GL representation (or allocate a
+ // backing for import into GL)? We may use an AHardwareBuffer exclusively
+ // with Vulkan, where there is no need to require that a GL context is
+ // current. Maybe we can lazy init this if someone tries to create an
+ // AHardwareBuffer with SHARED_IMAGE_USAGE_GLES2 ||
+ // !gpu_preferences.enable_vulkan. When in Vulkan mode, we should only need
+ // this with GLES2.
gl::GLApi* api = gl::g_current_gl_context;
api->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &max_gl_texture_size_);
@@ -391,16 +711,16 @@ SharedImageBackingFactoryAHB::CreateSharedImage(
}
// SHARED_IMAGE_USAGE_RASTER is set when we want to write on Skia
- // representation and SHARED_IMAGE_USAGE_DISPLAY is used for cases we want to
- // read from skia representation.
- // TODO(vikassoni): Also check gpu_preferences.enable_vulkan to figure out if
- // skia is using vulkan backing or GL backing.
+ // representation and SHARED_IMAGE_USAGE_DISPLAY is used for cases we want
+ // to read from skia representation.
+ // TODO(vikassoni): Also check gpu_preferences.enable_vulkan to figure out
+ // if skia is using vulkan backing or GL backing.
const bool use_gles2 =
(usage & (SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_RASTER |
SHARED_IMAGE_USAGE_DISPLAY));
- // If usage flags indicated this backing can be used as a GL texture, then do
- // below gl related checks.
+ // If usage flags indicated this backing can be used as a GL texture, then
+ // do below gl related checks.
if (use_gles2) {
// Check if the GL texture can be created from AHB with this format.
if (!format_info.gl_supported) {
@@ -459,10 +779,23 @@ SharedImageBackingFactoryAHB::CreateSharedImage(
auto backing = std::make_unique<SharedImageBackingAHB>(
mailbox, format, size, color_space, usage,
- base::android::ScopedHardwareBufferHandle::Adopt(buffer), estimated_size);
+ base::android::ScopedHardwareBufferHandle::Adopt(buffer), estimated_size,
+ context_state_);
return backing;
}
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryAHB::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ NOTIMPLEMENTED();
+ return nullptr;
+}
+
SharedImageBackingFactoryAHB::FormatInfo::FormatInfo() = default;
SharedImageBackingFactoryAHB::FormatInfo::~FormatInfo() = default;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
index 75625455bc9..e733b341f63 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
@@ -17,6 +17,7 @@ class ColorSpace;
} // namespace gfx
namespace gpu {
+class SharedContextState;
class SharedImageBacking;
class GpuDriverBugWorkarounds;
struct GpuFeatureInfo;
@@ -28,7 +29,8 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryAHB
: public SharedImageBackingFactory {
public:
SharedImageBackingFactoryAHB(const GpuDriverBugWorkarounds& workarounds,
- const GpuFeatureInfo& gpu_feature_info);
+ const GpuFeatureInfo& gpu_feature_info,
+ SharedContextState* context_state);
~SharedImageBackingFactoryAHB() override;
// SharedImageBackingFactory implementation.
@@ -40,6 +42,13 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryAHB
uint32_t usage) override;
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
int client_id,
gfx::GpuMemoryBufferHandle handle,
gfx::BufferFormat format,
@@ -70,6 +79,7 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryAHB
// Used to limit the max size of AHardwareBuffer.
int32_t max_gl_texture_size_ = 0;
+ SharedContextState* context_state_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(SharedImageBackingFactoryAHB);
};
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
index 20ab75f9d58..839e44aa003 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
@@ -5,18 +5,22 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h"
#include "base/android/android_hardware_buffer_compat.h"
+#include "base/bind_helpers.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/mailbox_manager_impl.h"
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/config/gpu_feature_info.h"
+#include "gpu/config/gpu_preferences.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/skia/include/core/SkImage.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gfx/color_space.h"
@@ -47,14 +51,18 @@ class SharedImageBackingFactoryAHBTest : public testing::Test {
GpuDriverBugWorkarounds workarounds;
workarounds.max_texture_size = INT_MAX - 1;
- backing_factory_ = std::make_unique<SharedImageBackingFactoryAHB>(
- workarounds, GpuFeatureInfo());
scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
- context_state_ = new raster::RasterDecoderContextState(
+ context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), surface_, context_,
- false /* use_virtualized_gl_contexts */);
+ false /* use_virtualized_gl_contexts */, base::DoNothing());
context_state_->InitializeGrContext(workarounds, nullptr);
+ auto feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
+ context_state_->InitializeGL(GpuPreferences(), std::move(feature_info));
+
+ backing_factory_ = std::make_unique<SharedImageBackingFactoryAHB>(
+ workarounds, GpuFeatureInfo(), context_state_.get());
memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
shared_image_representation_factory_ =
@@ -62,12 +70,12 @@ class SharedImageBackingFactoryAHBTest : public testing::Test {
&shared_image_manager_, nullptr);
}
- GrContext* gr_context() { return context_state_->gr_context; }
+ GrContext* gr_context() { return context_state_->gr_context(); }
protected:
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<gl::GLContext> context_;
- scoped_refptr<raster::RasterDecoderContextState> context_state_;
+ scoped_refptr<SharedContextState> context_state_;
std::unique_ptr<SharedImageBackingFactoryAHB> backing_factory_;
gles2::MailboxManagerImpl mailbox_manager_;
SharedImageManager shared_image_manager_;
@@ -138,18 +146,19 @@ TEST_F(SharedImageBackingFactoryAHBTest, Basic) {
shared_image_representation_factory_->ProduceSkia(mailbox);
EXPECT_TRUE(skia_representation);
auto surface = skia_representation->BeginWriteAccess(
- gr_context(), 0, kRGBA_8888_SkColorType,
- SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ gr_context(), 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
EXPECT_TRUE(surface);
EXPECT_EQ(size.width(), surface->width());
EXPECT_EQ(size.height(), surface->height());
skia_representation->EndWriteAccess(std::move(surface));
- GrBackendTexture backend_texture;
- EXPECT_TRUE(skia_representation->BeginReadAccess(
-
- kRGBA_8888_SkColorType, &backend_texture));
- EXPECT_EQ(size.width(), backend_texture.width());
- EXPECT_EQ(size.width(), backend_texture.width());
+ auto promise_texture = skia_representation->BeginReadAccess(nullptr);
+ EXPECT_TRUE(promise_texture);
+ if (promise_texture) {
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+ }
skia_representation->EndReadAccess();
skia_representation.reset();
@@ -206,15 +215,18 @@ TEST_F(SharedImageBackingFactoryAHBTest, GLSkiaGL) {
auto skia_representation =
shared_image_representation_factory_->ProduceSkia(mailbox);
EXPECT_TRUE(skia_representation);
- GrBackendTexture backend_texture;
- EXPECT_TRUE(skia_representation->BeginReadAccess(kRGBA_8888_SkColorType,
- &backend_texture));
- EXPECT_EQ(size.width(), backend_texture.width());
- EXPECT_EQ(size.width(), backend_texture.width());
+ auto promise_texture = skia_representation->BeginReadAccess(nullptr);
+ EXPECT_TRUE(promise_texture);
+ if (promise_texture) {
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+ }
// Create an Sk Image from GrBackendTexture.
auto sk_image = SkImage::MakeFromTexture(
- gr_context(), backend_texture, kTopLeft_GrSurfaceOrigin,
+ gr_context(), promise_texture->backendTexture(), kTopLeft_GrSurfaceOrigin,
kRGBA_8888_SkColorType, kOpaque_SkAlphaType, nullptr);
SkImageInfo dst_info =
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
index 9fa3c3c2cc8..71542287e4c 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
@@ -8,6 +8,7 @@
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
@@ -20,7 +21,7 @@
#include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/config/gpu_finch_features.h"
#include "gpu/config/gpu_preferences.h"
-#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_bindings.h"
@@ -33,6 +34,106 @@ namespace gpu {
namespace {
+class ScopedResetAndRestoreUnpackState {
+ public:
+ ScopedResetAndRestoreUnpackState(gl::GLApi* api,
+ bool es3_capable,
+ bool desktop_gl,
+ bool supports_unpack_subimage,
+ bool uploading_data)
+ : api_(api) {
+ if (es3_capable) {
+ // Need to unbind any GL_PIXEL_UNPACK_BUFFER for the nullptr in
+ // glTexImage2D to mean "no pixels" (as opposed to offset 0 in the
+ // buffer).
+ api_->glGetIntegervFn(GL_PIXEL_UNPACK_BUFFER_BINDING, &unpack_buffer_);
+ if (unpack_buffer_)
+ api_->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, 0);
+ }
+ if (uploading_data) {
+ api_->glGetIntegervFn(GL_UNPACK_ALIGNMENT, &unpack_alignment_);
+ if (unpack_alignment_ != 4)
+ api_->glPixelStoreiFn(GL_UNPACK_ALIGNMENT, 4);
+
+ if (es3_capable || supports_unpack_subimage) {
+ api_->glGetIntegervFn(GL_UNPACK_ROW_LENGTH, &unpack_row_length_);
+ if (unpack_row_length_)
+ api_->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, 0);
+ api_->glGetIntegervFn(GL_UNPACK_SKIP_ROWS, &unpack_skip_rows_);
+ if (unpack_skip_rows_)
+ api_->glPixelStoreiFn(GL_UNPACK_SKIP_ROWS, 0);
+ api_->glGetIntegervFn(GL_UNPACK_SKIP_PIXELS, &unpack_skip_pixels_);
+ if (unpack_skip_pixels_)
+ api_->glPixelStoreiFn(GL_UNPACK_SKIP_PIXELS, 0);
+ }
+
+ if (es3_capable) {
+ api_->glGetIntegervFn(GL_UNPACK_SKIP_IMAGES, &unpack_skip_images_);
+ if (unpack_skip_images_)
+ api_->glPixelStoreiFn(GL_UNPACK_SKIP_IMAGES, 0);
+ api_->glGetIntegervFn(GL_UNPACK_IMAGE_HEIGHT, &unpack_image_height_);
+ if (unpack_image_height_)
+ api_->glPixelStoreiFn(GL_UNPACK_IMAGE_HEIGHT, 0);
+ }
+
+ if (desktop_gl) {
+ api->glGetBooleanvFn(GL_UNPACK_SWAP_BYTES, &unpack_swap_bytes_);
+ if (unpack_swap_bytes_)
+ api->glPixelStoreiFn(GL_UNPACK_SWAP_BYTES, GL_FALSE);
+ api->glGetBooleanvFn(GL_UNPACK_LSB_FIRST, &unpack_lsb_first_);
+ if (unpack_lsb_first_)
+ api->glPixelStoreiFn(GL_UNPACK_LSB_FIRST, GL_FALSE);
+ }
+ }
+ }
+
+ ~ScopedResetAndRestoreUnpackState() {
+ if (unpack_buffer_)
+ api_->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, unpack_buffer_);
+ if (unpack_alignment_ != 4)
+ api_->glPixelStoreiFn(GL_UNPACK_ALIGNMENT, unpack_alignment_);
+ if (unpack_row_length_)
+ api_->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, unpack_row_length_);
+ if (unpack_image_height_)
+ api_->glPixelStoreiFn(GL_UNPACK_IMAGE_HEIGHT, unpack_image_height_);
+ if (unpack_skip_rows_)
+ api_->glPixelStoreiFn(GL_UNPACK_SKIP_ROWS, unpack_skip_rows_);
+ if (unpack_skip_images_)
+ api_->glPixelStoreiFn(GL_UNPACK_SKIP_IMAGES, unpack_skip_images_);
+ if (unpack_skip_pixels_)
+ api_->glPixelStoreiFn(GL_UNPACK_SKIP_PIXELS, unpack_skip_pixels_);
+ if (unpack_swap_bytes_)
+ api_->glPixelStoreiFn(GL_UNPACK_SWAP_BYTES, unpack_swap_bytes_);
+ if (unpack_lsb_first_)
+ api_->glPixelStoreiFn(GL_UNPACK_LSB_FIRST, unpack_lsb_first_);
+ }
+
+ private:
+ gl::GLApi* const api_;
+
+ // Always used if |es3_capable|.
+ GLint unpack_buffer_ = 0;
+
+ // Always used when |uploading_data|.
+ GLint unpack_alignment_ = 4;
+
+ // Used when |uploading_data_| and (|es3_capable| or
+ // |supports_unpack_subimage|).
+ GLint unpack_row_length_ = 0;
+ GLint unpack_skip_pixels_ = 0;
+ GLint unpack_skip_rows_ = 0;
+
+ // Used when |uploading_data| and |es3_capable|.
+ GLint unpack_skip_images_ = 0;
+ GLint unpack_image_height_ = 0;
+
+ // Used when |desktop_gl|.
+ GLboolean unpack_swap_bytes_ = GL_FALSE;
+ GLboolean unpack_lsb_first_ = GL_FALSE;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedResetAndRestoreUnpackState);
+};
+
class ScopedRestoreTexture {
public:
ScopedRestoreTexture(gl::GLApi* api, GLenum target)
@@ -128,38 +229,45 @@ class SharedImageRepresentationGLTexturePassthroughImpl
class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
public:
- SharedImageRepresentationSkiaImpl(SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- GLenum target,
- GLenum internal_format,
- GLenum driver_internal_format,
- GLuint service_id)
+ SharedImageRepresentationSkiaImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ sk_sp<SkPromiseImageTexture> cached_promise_texture,
+ MemoryTypeTracker* tracker,
+ GLenum target,
+ GLuint service_id)
: SharedImageRepresentationSkia(manager, backing, tracker),
- target_(target),
- internal_format_(internal_format),
- driver_internal_format_(driver_internal_format),
- service_id_(service_id) {}
+ promise_texture_(cached_promise_texture) {
+ if (!promise_texture_) {
+ GrBackendTexture backend_texture;
+ GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(), target,
+ size(), service_id, format(), &backend_texture);
+ promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
+ }
+#if DCHECK_IS_ON()
+ context_ = gl::GLContext::GetCurrent();
+#endif
+ }
~SharedImageRepresentationSkiaImpl() override { DCHECK(!write_surface_); }
sk_sp<SkSurface> BeginWriteAccess(
GrContext* gr_context,
int final_msaa_count,
- SkColorType color_type,
const SkSurfaceProps& surface_props) override {
+ CheckContext();
if (write_surface_)
return nullptr;
- GrBackendTexture backend_texture;
- if (!GetGrBackendTexture(target_, size(), internal_format_,
- driver_internal_format_, service_id_, color_type,
- &backend_texture)) {
+ if (!promise_texture_) {
return nullptr;
}
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format());
auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
- gr_context, backend_texture, kTopLeft_GrSurfaceOrigin, final_msaa_count,
- color_type, nullptr, &surface_props);
+ gr_context, promise_texture_->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, nullptr,
+ &surface_props);
write_surface_ = surface.get();
return surface;
}
@@ -167,31 +275,35 @@ class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
void EndWriteAccess(sk_sp<SkSurface> surface) override {
DCHECK_EQ(surface.get(), write_surface_);
DCHECK(surface->unique());
+ CheckContext();
// TODO(ericrk): Keep the surface around for re-use.
write_surface_ = nullptr;
}
- bool BeginReadAccess(SkColorType color_type,
- GrBackendTexture* backend_texture) override {
- if (!GetGrBackendTexture(target_, size(), internal_format_,
- driver_internal_format_, service_id_, color_type,
- backend_texture)) {
- return false;
- }
- return true;
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(SkSurface* sk_surface) override {
+ CheckContext();
+ return promise_texture_;
}
void EndReadAccess() override {
// TODO(ericrk): Handle begin/end correctness checks.
}
+ sk_sp<SkPromiseImageTexture> promise_texture() { return promise_texture_; }
+
private:
- GLenum target_;
- GLenum internal_format_ = 0;
- GLenum driver_internal_format_ = 0;
- GLuint service_id_;
+ void CheckContext() {
+#if DCHECK_IS_ON()
+ DCHECK(gl::GLContext::GetCurrent() == context_);
+#endif
+ }
+
+ sk_sp<SkPromiseImageTexture> promise_texture_;
SkSurface* write_surface_ = nullptr;
+#if DCHECK_IS_ON()
+ gl::GLContext* context_;
+#endif
};
// Implementation of SharedImageBacking that creates a GL Texture and stores it
@@ -203,18 +315,14 @@ class SharedImageBackingGLTexture : public SharedImageBacking {
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
- gles2::Texture* texture,
- GLenum internal_format,
- GLenum driver_internal_format)
+ gles2::Texture* texture)
: SharedImageBacking(mailbox,
format,
size,
color_space,
usage,
texture->estimated_size()),
- texture_(texture),
- internal_format_(internal_format),
- driver_internal_format_(driver_internal_format) {
+ texture_(texture) {
DCHECK(texture_);
}
@@ -288,15 +396,16 @@ class SharedImageBackingGLTexture : public SharedImageBacking {
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker) override {
- return std::make_unique<SharedImageRepresentationSkiaImpl>(
- manager, this, tracker, texture_->target(), internal_format_,
- driver_internal_format_, texture_->service_id());
+ auto result = std::make_unique<SharedImageRepresentationSkiaImpl>(
+ manager, this, cached_promise_texture_, tracker, texture_->target(),
+ texture_->service_id());
+ cached_promise_texture_ = result->promise_texture();
+ return result;
}
private:
gles2::Texture* texture_ = nullptr;
- GLenum internal_format_ = 0;
- GLenum driver_internal_format_ = 0;
+ sk_sp<SkPromiseImageTexture> cached_promise_texture_;
};
// Implementation of SharedImageBacking that creates a GL Texture and stores it
@@ -311,8 +420,7 @@ class SharedImageBackingPassthroughGLTexture : public SharedImageBacking {
const gfx::ColorSpace& color_space,
uint32_t usage,
scoped_refptr<gles2::TexturePassthrough> passthrough_texture,
- GLenum internal_format,
- GLenum driver_internal_format)
+ bool is_cleared)
: SharedImageBacking(mailbox,
format,
size,
@@ -320,8 +428,7 @@ class SharedImageBackingPassthroughGLTexture : public SharedImageBacking {
usage,
passthrough_texture->estimated_size()),
texture_passthrough_(std::move(passthrough_texture)),
- internal_format_(internal_format),
- driver_internal_format_(driver_internal_format) {
+ is_cleared_(is_cleared) {
DCHECK(texture_passthrough_);
}
@@ -359,6 +466,26 @@ class SharedImageBackingPassthroughGLTexture : public SharedImageBacking {
texture_passthrough_.reset();
}
+ void OnMemoryDump(const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) override {
+ // Add a |service_guid| which expresses shared ownership between the
+ // various GPU dumps.
+ auto client_guid = GetSharedImageGUIDForTracing(mailbox());
+ auto service_guid = gl::GetGLTextureServiceGUIDForTracing(
+ texture_passthrough_->service_id());
+ pmd->CreateSharedGlobalAllocatorDump(service_guid);
+
+ int importance = 2; // This client always owns the ref.
+ pmd->AddOwnershipEdge(client_guid, service_guid, importance);
+
+ auto* gl_image = texture_passthrough_->GetLevelImage(
+ texture_passthrough_->target(), /*level=*/0);
+ if (gl_image)
+ gl_image->OnMemoryDump(pmd, client_tracing_id, dump_name);
+ }
+
protected:
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
ProduceGLTexturePassthrough(SharedImageManager* manager,
@@ -369,16 +496,17 @@ class SharedImageBackingPassthroughGLTexture : public SharedImageBacking {
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker) override {
- return std::make_unique<SharedImageRepresentationSkiaImpl>(
- manager, this, tracker, texture_passthrough_->target(),
- internal_format_, driver_internal_format_,
- texture_passthrough_->service_id());
+ auto result = std::make_unique<SharedImageRepresentationSkiaImpl>(
+ manager, this, cached_promise_texture_, tracker,
+ texture_passthrough_->target(), texture_passthrough_->service_id());
+ cached_promise_texture_ = result->promise_texture();
+ return result;
}
private:
scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
- GLenum internal_format_ = 0;
- GLenum driver_internal_format_ = 0;
+ sk_sp<SkPromiseImageTexture> cached_promise_texture_;
+
bool is_cleared_ = false;
};
@@ -406,6 +534,12 @@ SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
feature_info->feature_flags().gpu_memory_buffer_formats;
texture_usage_angle_ = feature_info->feature_flags().angle_texture_usage;
es3_capable_ = feature_info->IsES3Capable();
+ desktop_gl_ = !feature_info->gl_version_info().is_es;
+ // Can't use the value from feature_info, as we unconditionally enable this
+ // extension, and assume it can't be used if PBOs are not used (which isn't
+ // true for Skia used direclty against GL).
+ supports_unpack_subimage_ =
+ gl::g_current_gl_driver->ext.b_GL_EXT_unpack_subimage;
bool enable_texture_storage =
feature_info->feature_flags().ext_texture_storage;
bool enable_scanout_images =
@@ -414,17 +548,20 @@ SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
for (int i = 0; i <= viz::RESOURCE_FORMAT_MAX; ++i) {
auto format = static_cast<viz::ResourceFormat>(i);
FormatInfo& info = format_info_[i];
- // TODO(piman): do we need to support ETC1?
- if (!viz::GLSupportsFormat(format) ||
- viz::IsResourceFormatCompressed(format))
+ if (!viz::GLSupportsFormat(format))
continue;
GLuint image_internal_format = viz::GLInternalFormat(format);
GLenum gl_format = viz::GLDataFormat(format);
GLenum gl_type = viz::GLDataType(format);
- if (validators->texture_internal_format.IsValid(image_internal_format) &&
- validators->texture_format.IsValid(gl_format) &&
+ bool uncompressed_format_valid =
+ validators->texture_internal_format.IsValid(image_internal_format) &&
+ validators->texture_format.IsValid(gl_format);
+ bool compressed_format_valid =
+ validators->compressed_texture_format.IsValid(image_internal_format);
+ if ((uncompressed_format_valid || compressed_format_valid) &&
validators->pixel_type.IsValid(gl_type)) {
info.enabled = true;
+ info.is_compressed = compressed_format_valid;
info.gl_format = gl_format;
info.gl_type = gl_type;
info.swizzle = gles2::TextureManager::GetCompatibilitySwizzle(
@@ -437,7 +574,7 @@ SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
}
if (!info.enabled)
continue;
- if (enable_texture_storage) {
+ if (enable_texture_storage && !info.is_compressed) {
GLuint storage_internal_format = viz::TextureStorageFormat(format);
if (validators->texture_internal_format_storage.IsValid(
storage_internal_format)) {
@@ -481,6 +618,18 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
+ return CreateSharedImage(mailbox, format, size, color_space, usage,
+ base::span<const uint8_t>());
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryGLTexture::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
const FormatInfo& format_info = format_info_[format];
if (!format_info.enabled) {
LOG(ERROR) << "CreateSharedImage: invalid format";
@@ -501,6 +650,54 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
GLenum target = use_buffer ? format_info.target_for_scanout : GL_TEXTURE_2D;
+ // If we have initial data to upload, ensure it is sized appropriately.
+ if (!pixel_data.empty()) {
+ if (format_info.is_compressed) {
+ const char* error_message = "unspecified";
+ if (!gles2::ValidateCompressedTexDimensions(
+ target, 0 /* level */, size.width(), size.height(), 1 /* depth */,
+ format_info.image_internal_format, false /* restrict_for_webgl */,
+ &error_message)) {
+ LOG(ERROR) << "CreateSharedImage: "
+ "ValidateCompressedTexDimensionsFailed with error: "
+ << error_message;
+ return nullptr;
+ }
+
+ GLsizei bytes_required = 0;
+ if (!gles2::GetCompressedTexSizeInBytes(
+ nullptr /* function_name */, size.width(), size.height(),
+ 1 /* depth */, format_info.image_internal_format, &bytes_required,
+ nullptr /* error_state */)) {
+ LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
+ "initial texture upload.";
+ return nullptr;
+ }
+
+ if (bytes_required < 0 ||
+ pixel_data.size() != static_cast<size_t>(bytes_required)) {
+ LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
+ "size.";
+ return nullptr;
+ }
+ } else {
+ uint32_t bytes_required;
+ if (!gles2::GLES2Util::ComputeImageDataSizes(
+ size.width(), size.height(), 1 /* depth */, format_info.gl_format,
+ format_info.gl_type, 4 /* alignment */, &bytes_required, nullptr,
+ nullptr)) {
+ LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
+ "initial texture upload.";
+ return nullptr;
+ }
+ if (pixel_data.size() != bytes_required) {
+ LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
+ "size.";
+ return nullptr;
+ }
+ }
+ }
+
gl::GLApi* api = gl::g_current_gl_context;
ScopedRestoreTexture scoped_restore(api, target);
@@ -516,6 +713,7 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
// the internal format in the LevelInfo. https://crbug.com/628064
GLuint level_info_internal_format = format_info.gl_format;
bool is_cleared = false;
+ bool needs_subimage_upload = false;
if (use_buffer) {
image = image_factory_->CreateAnonymousImage(
size, format_info.buffer_format, gfx::BufferUsage::SCANOUT,
@@ -528,32 +726,44 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
level_info_internal_format = image->GetInternalFormat();
if (color_space.IsValid())
image->SetColorSpace(color_space);
+ needs_subimage_upload = !pixel_data.empty();
} else if (format_info.supports_storage) {
api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
size.width(), size.height());
+ needs_subimage_upload = !pixel_data.empty();
+ } else if (format_info.is_compressed) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(
+ api, es3_capable_, desktop_gl_, supports_unpack_subimage_,
+ !pixel_data.empty());
+ api->glCompressedTexImage2DFn(target, 0, format_info.image_internal_format,
+ size.width(), size.height(), 0,
+ pixel_data.size(), pixel_data.data());
} else {
- // Need to unbind any GL_PIXEL_UNPACK_BUFFER for the nullptr in
- // glTexImage2D to mean "no pixels" (as opposed to offset 0 in the
- // buffer).
- GLint bound_pixel_unpack_buffer = 0;
- if (es3_capable_) {
- api->glGetIntegervFn(GL_PIXEL_UNPACK_BUFFER_BINDING,
- &bound_pixel_unpack_buffer);
- if (bound_pixel_unpack_buffer)
- api->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, 0);
- }
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(
+ api, es3_capable_, desktop_gl_, supports_unpack_subimage_,
+ !pixel_data.empty());
api->glTexImage2DFn(target, 0, format_info.image_internal_format,
size.width(), size.height(), 0,
format_info.adjusted_format, format_info.gl_type,
- nullptr);
- if (bound_pixel_unpack_buffer)
- api->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, bound_pixel_unpack_buffer);
+ pixel_data.data());
+ }
+
+ // If we are using a buffer or TexStorage API but have data to upload, do so
+ // now via TexSubImage2D.
+ if (needs_subimage_upload) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(
+ api, es3_capable_, desktop_gl_, supports_unpack_subimage_,
+ !pixel_data.empty());
+ api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
+ format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
}
return MakeBacking(mailbox, target, service_id, image, gles2::Texture::BOUND,
level_info_internal_format, format_info.gl_format,
- format_info.gl_type, format_info.swizzle, is_cleared,
- format, size, color_space, usage);
+ format_info.gl_type, format_info.swizzle,
+ pixel_data.empty() ? is_cleared : true, format, size,
+ color_space, usage);
}
std::unique_ptr<SharedImageBacking>
@@ -608,15 +818,11 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
image_state = gles2::Texture::COPIED;
}
- // TODO(piman): this is consistent with
- // GLES2DecoderImpl::BindTexImage2DCHROMIUMImpl or
- // RasterDecoderImpl::DoBindTexImage2DCHROMIUM but seems wrong:
- //
- // - internalformat might be sized, which is wrong for format
- // - gl_type shouldn't be GL_UNSIGNED_BYTE for RGBA4444 for example.
GLuint internal_format = image->GetInternalFormat();
- GLenum gl_format = internal_format;
- GLenum gl_type = GL_UNSIGNED_BYTE;
+ GLenum gl_format =
+ gles2::TextureManager::ExtractFormatFromStorageFormat(internal_format);
+ GLenum gl_type =
+ gles2::TextureManager::ExtractTypeFromStorageFormat(internal_format);
return MakeBacking(mailbox, target, service_id, image, image_state,
internal_format, gl_format, gl_type, nullptr, true, format,
@@ -629,24 +835,23 @@ scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLTexture::MakeGLImage(
gfx::BufferFormat format,
SurfaceHandle surface_handle,
const gfx::Size& size) {
+ if (handle.type == gfx::SHARED_MEMORY_BUFFER) {
+ if (!base::IsValueInRangeForNumericType<size_t>(handle.stride))
+ return nullptr;
+ auto image = base::MakeRefCounted<gl::GLImageSharedMemory>(size);
+ if (!image->Initialize(handle.region, handle.id, format, handle.offset,
+ handle.stride)) {
+ return nullptr;
+ }
+
+ return image;
+ }
+
if (!image_factory_)
return nullptr;
- switch (handle.type) {
- case gfx::SHARED_MEMORY_BUFFER: {
- if (!base::IsValueInRangeForNumericType<size_t>(handle.stride))
- return nullptr;
- auto image = base::MakeRefCounted<gl::GLImageSharedMemory>(size);
- if (!image->Initialize(handle.region, handle.id, format, handle.offset,
- handle.stride)) {
- return nullptr;
- }
- return image;
- }
- default:
- return image_factory_->CreateImageForGpuMemoryBuffer(
- std::move(handle), size, format, client_id, surface_handle);
- }
+ return image_factory_->CreateImageForGpuMemoryBuffer(
+ std::move(handle), size, format, client_id, surface_handle);
}
std::unique_ptr<SharedImageBacking>
@@ -665,12 +870,6 @@ SharedImageBackingFactoryGLTexture::MakeBacking(
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
- // Calculate |driver_internal_format| here rather than caching on
- // format_info, as we need to use the |level_info_internal_format| which may
- // depend on the generated |image|.
- GLenum driver_internal_format =
- gl::GetInternalFormat(gl::GLContext::GetCurrent()->GetVersionInfo(),
- level_info_internal_format);
if (use_passthrough_) {
scoped_refptr<gles2::TexturePassthrough> passthrough_texture =
base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
@@ -686,8 +885,7 @@ SharedImageBackingFactoryGLTexture::MakeBacking(
return std::make_unique<SharedImageBackingPassthroughGLTexture>(
mailbox, format, size, color_space, usage,
- std::move(passthrough_texture), level_info_internal_format,
- driver_internal_format);
+ std::move(passthrough_texture), is_cleared);
} else {
gles2::Texture* texture = new gles2::Texture(service_id);
texture->SetLightweightRef();
@@ -706,8 +904,7 @@ SharedImageBackingFactoryGLTexture::MakeBacking(
texture->SetImmutable(true);
return std::make_unique<SharedImageBackingGLTexture>(
- mailbox, format, size, color_space, usage, texture,
- level_info_internal_format, driver_internal_format);
+ mailbox, format, size, color_space, usage, texture);
}
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
index 6fec6d23f61..5705b92bf1b 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
@@ -50,6 +50,13 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
uint32_t usage) override;
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
int client_id,
gfx::GpuMemoryBufferHandle handle,
gfx::BufferFormat format,
@@ -92,6 +99,9 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
// Whether to allow SHARED_IMAGE_USAGE_SCANOUT.
bool allow_scanout = false;
+ // Whether the texture is a compressed type.
+ bool is_compressed = false;
+
GLenum gl_format = 0;
GLenum gl_type = 0;
const gles2::Texture::CompatibilitySwizzle* swizzle = nullptr;
@@ -126,6 +136,8 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
int32_t max_texture_size_ = 0;
bool texture_usage_angle_ = false;
bool es3_capable_ = false;
+ bool desktop_gl_ = false;
+ bool supports_unpack_subimage_ = false;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
index 2810dffbd44..a1ee348f004 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
@@ -4,13 +4,15 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
+#include "base/bind_helpers.h"
#include "components/viz/common/resources/resource_format_utils.h"
+#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/mailbox_manager_impl.h"
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
@@ -21,6 +23,7 @@
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_preferences.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gfx/buffer_format_util.h"
@@ -54,33 +57,43 @@ class SharedImageBackingFactoryGLTextureTestBase
preferences, workarounds, GpuFeatureInfo(), factory);
scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
- context_state_ = new raster::RasterDecoderContextState(
+ auto feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
+ context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), surface_, context_,
- false /* use_virtualized_gl_contexts */);
+ false /* use_virtualized_gl_contexts */, base::DoNothing());
context_state_->InitializeGrContext(workarounds, nullptr);
+ context_state_->InitializeGL(GpuPreferences(), feature_info);
memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
shared_image_representation_factory_ =
std::make_unique<SharedImageRepresentationFactory>(
&shared_image_manager_, nullptr);
+
+ supports_etc1_ =
+ feature_info->validators()->compressed_texture_format.IsValid(
+ GL_ETC1_RGB8_OES);
}
bool use_passthrough() {
return GetParam() && gles2::PassthroughCommandDecoderSupported();
}
- GrContext* gr_context() { return context_state_->gr_context; }
+ bool supports_etc1() { return supports_etc1_; }
+
+ GrContext* gr_context() { return context_state_->gr_context(); }
protected:
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<gl::GLContext> context_;
- scoped_refptr<raster::RasterDecoderContextState> context_state_;
+ scoped_refptr<SharedContextState> context_state_;
std::unique_ptr<SharedImageBackingFactoryGLTexture> backing_factory_;
gles2::MailboxManagerImpl mailbox_manager_;
SharedImageManager shared_image_manager_;
std::unique_ptr<MemoryTypeTracker> memory_type_tracker_;
std::unique_ptr<SharedImageRepresentationFactory>
shared_image_representation_factory_;
+ bool supports_etc1_ = false;
};
class SharedImageBackingFactoryGLTextureTest
@@ -97,7 +110,7 @@ class SharedImageBackingFactoryGLTextureTest
};
TEST_P(SharedImageBackingFactoryGLTextureTest, Basic) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -168,18 +181,19 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Basic) {
shared_image_representation_factory_->ProduceSkia(mailbox);
EXPECT_TRUE(skia_representation);
auto surface = skia_representation->BeginWriteAccess(
- gr_context(), 0, kRGBA_8888_SkColorType,
- SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ gr_context(), 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
EXPECT_TRUE(surface);
EXPECT_EQ(size.width(), surface->width());
EXPECT_EQ(size.height(), surface->height());
skia_representation->EndWriteAccess(std::move(surface));
- GrBackendTexture backend_texture;
- EXPECT_TRUE(skia_representation->BeginReadAccess(
-
- kRGBA_8888_SkColorType, &backend_texture));
- EXPECT_EQ(size.width(), backend_texture.width());
- EXPECT_EQ(size.width(), backend_texture.width());
+ auto promise_texture = skia_representation->BeginReadAccess(nullptr);
+ EXPECT_TRUE(promise_texture);
+ if (promise_texture) {
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+ }
skia_representation->EndReadAccess();
skia_representation.reset();
@@ -188,7 +202,7 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Basic) {
}
TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -255,18 +269,19 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
shared_image_representation_factory_->ProduceSkia(mailbox);
EXPECT_TRUE(skia_representation);
auto surface = skia_representation->BeginWriteAccess(
- gr_context(), 0, kRGBA_8888_SkColorType,
- SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ gr_context(), 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
EXPECT_TRUE(surface);
EXPECT_EQ(size.width(), surface->width());
EXPECT_EQ(size.height(), surface->height());
skia_representation->EndWriteAccess(std::move(surface));
- GrBackendTexture backend_texture;
- EXPECT_TRUE(skia_representation->BeginReadAccess(
-
- kRGBA_8888_SkColorType, &backend_texture));
- EXPECT_EQ(size.width(), backend_texture.width());
- EXPECT_EQ(size.width(), backend_texture.width());
+ auto promise_texture = skia_representation->BeginReadAccess(nullptr);
+ EXPECT_TRUE(promise_texture);
+ if (promise_texture) {
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+ }
skia_representation->EndReadAccess();
skia_representation.reset();
@@ -297,8 +312,117 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
}
}
+TEST_P(SharedImageBackingFactoryGLTextureTest, InitialData) {
+ for (auto format :
+ {viz::ResourceFormat::RGBA_8888, viz::ResourceFormat::ETC1}) {
+ if (format == viz::ResourceFormat::ETC1 && !supports_etc1())
+ continue;
+
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
+ std::vector<uint8_t> initial_data(
+ viz::ResourceSizes::CheckedSizeInBytes<unsigned int>(size, format));
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, usage, initial_data);
+ EXPECT_TRUE(backing);
+ EXPECT_TRUE(backing->IsCleared());
+
+ // Validate via a SharedImageRepresentationGLTexture(Passthrough).
+ std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+ EXPECT_TRUE(shared_image);
+ GLenum expected_target = GL_TEXTURE_2D;
+ if (!use_passthrough()) {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexture()->service_id());
+ EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+ } else {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexturePassthrough(
+ mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexturePassthrough()->service_id());
+ EXPECT_EQ(expected_target,
+ gl_representation->GetTexturePassthrough()->target());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+ }
+
+ shared_image.reset();
+ EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+ }
+}
+
+TEST_P(SharedImageBackingFactoryGLTextureTest, InitialDataImage) {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto format = viz::ResourceFormat::RGBA_8888;
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_SCANOUT;
+ std::vector<uint8_t> initial_data(256 * 256 * 4);
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, usage, initial_data);
+
+ // Validate via a SharedImageRepresentationGLTexture(Passthrough).
+ std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+ EXPECT_TRUE(shared_image);
+ if (!use_passthrough()) {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexture()->service_id());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+ } else {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexturePassthrough(
+ mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexturePassthrough()->service_id());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+ }
+}
+
+TEST_P(SharedImageBackingFactoryGLTextureTest, InitialDataWrongSize) {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto format = viz::ResourceFormat::RGBA_8888;
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
+ std::vector<uint8_t> initial_data_small(256 * 128 * 4);
+ std::vector<uint8_t> initial_data_large(256 * 512 * 4);
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, usage, initial_data_small);
+ EXPECT_FALSE(backing);
+ backing = backing_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, usage, initial_data_large);
+ EXPECT_FALSE(backing);
+}
+
TEST_P(SharedImageBackingFactoryGLTextureTest, InvalidFormat) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::UYVY_422;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -309,7 +433,7 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, InvalidFormat) {
}
TEST_P(SharedImageBackingFactoryGLTextureTest, InvalidSize) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(0, 0);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -325,7 +449,7 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, InvalidSize) {
}
TEST_P(SharedImageBackingFactoryGLTextureTest, EstimatedSize) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -467,7 +591,7 @@ class SharedImageBackingFactoryGLTextureWithGMBTest
TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
GpuMemoryBufferImportEmpty) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
gfx::Size size(256, 256);
gfx::BufferFormat format = gfx::BufferFormat::RGBA_8888;
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -482,7 +606,7 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
GpuMemoryBufferImportNative) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
gfx::Size size(256, 256);
gfx::BufferFormat format = gfx::BufferFormat::RGBA_8888;
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -510,7 +634,7 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
GpuMemoryBufferImportSharedMemory) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
gfx::Size size(256, 256);
gfx::BufferFormat format = gfx::BufferFormat::RGBA_8888;
auto color_space = gfx::ColorSpace::CreateSRGB();
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc
index 91fbc519783..e848d6c53e1 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc
@@ -16,6 +16,7 @@
#include "gpu/command_buffer/service/image_factory.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
@@ -47,7 +48,7 @@ SharedImageFactory::SharedImageFactory(
const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
const GpuFeatureInfo& gpu_feature_info,
- raster::RasterDecoderContextState* context_state,
+ SharedContextState* context_state,
MailboxManager* mailbox_manager,
SharedImageManager* shared_image_manager,
ImageFactory* image_factory,
@@ -55,6 +56,7 @@ SharedImageFactory::SharedImageFactory(
: mailbox_manager_(mailbox_manager),
shared_image_manager_(shared_image_manager),
memory_tracker_(std::make_unique<MemoryTypeTracker>(memory_tracker)),
+ using_vulkan_(context_state && context_state->use_vulkan_gr_context()),
backing_factory_(
std::make_unique<SharedImageBackingFactoryGLTexture>(gpu_preferences,
workarounds,
@@ -89,6 +91,28 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
}
bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> data) {
+ std::unique_ptr<SharedImageBacking> backing;
+ bool vulkan_data_upload = using_vulkan_ && !data.empty();
+ bool oop_rasterization = usage & SHARED_IMAGE_USAGE_OOP_RASTERIZATION;
+ bool using_wrapped_sk_image =
+ (wrapped_sk_image_factory_ && (vulkan_data_upload || oop_rasterization));
+ if (using_wrapped_sk_image) {
+ backing = wrapped_sk_image_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, usage, data);
+ } else {
+ backing = backing_factory_->CreateSharedImage(mailbox, format, size,
+ color_space, usage, data);
+ }
+
+ return RegisterBacking(std::move(backing), !using_wrapped_sk_image);
+}
+
+bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
int client_id,
gfx::GpuMemoryBufferHandle handle,
gfx::BufferFormat format,
@@ -155,18 +179,15 @@ bool SharedImageFactory::RegisterBacking(
return false;
}
- Mailbox mailbox = backing->mailbox();
- if (shared_image_manager_->IsSharedImage(mailbox)) {
- LOG(ERROR) << "CreateSharedImage: mailbox is already associated with a "
- "SharedImage";
- backing->Destroy();
- return false;
- }
-
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
shared_image_manager_->Register(std::move(backing),
memory_tracker_.get());
+ if (!shared_image) {
+ LOG(ERROR) << "CreateSharedImage: could not register backing.";
+ return false;
+ }
+
// TODO(ericrk): Remove this once no legacy cases remain.
if (legacy_mailbox && !shared_image->ProduceLegacyMailbox(mailbox_manager_)) {
LOG(ERROR) << "CreateSharedImage: could not convert shared_image to legacy "
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h
index 78c79330428..8968052f892 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.h
@@ -20,6 +20,7 @@
#include "ui/gl/gl_bindings.h"
namespace gpu {
+class SharedContextState;
class GpuDriverBugWorkarounds;
class ImageFactory;
class MailboxManager;
@@ -30,7 +31,6 @@ class MemoryTracker;
namespace raster {
class WrappedSkImageFactory;
-struct RasterDecoderContextState;
} // namespace raster
// TODO(ericrk): Make this a very thin wrapper around SharedImageManager like
@@ -40,7 +40,7 @@ class GPU_GLES2_EXPORT SharedImageFactory {
SharedImageFactory(const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
const GpuFeatureInfo& gpu_feature_info,
- raster::RasterDecoderContextState* context_state,
+ SharedContextState* context_state,
MailboxManager* mailbox_manager,
SharedImageManager* manager,
ImageFactory* image_factory,
@@ -53,6 +53,12 @@ class GPU_GLES2_EXPORT SharedImageFactory {
const gfx::ColorSpace& color_space,
uint32_t usage);
bool CreateSharedImage(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data);
+ bool CreateSharedImage(const Mailbox& mailbox,
int client_id,
gfx::GpuMemoryBufferHandle handle,
gfx::BufferFormat format,
@@ -75,6 +81,7 @@ class GPU_GLES2_EXPORT SharedImageFactory {
MailboxManager* mailbox_manager_;
SharedImageManager* shared_image_manager_;
std::unique_ptr<MemoryTypeTracker> memory_tracker_;
+ const bool using_vulkan_;
// The set of SharedImages which have been created (and are being kept alive)
// by this factory.
@@ -95,10 +102,6 @@ class GPU_GLES2_EXPORT SharedImageRepresentationFactory {
MemoryTracker* tracker);
~SharedImageRepresentationFactory();
- bool IsSharedImage(const Mailbox& mailbox) const {
- return manager_->IsSharedImage(mailbox);
- }
-
// Helpers which call similar classes on SharedImageManager, providing a
// MemoryTypeTracker.
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
index c6e82086bd7..fbc5770bff9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
@@ -58,7 +58,7 @@ class SharedImageFactoryTest : public testing::Test {
};
TEST_F(SharedImageFactoryTest, Basic) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -74,7 +74,7 @@ TEST_F(SharedImageFactoryTest, Basic) {
}
TEST_F(SharedImageFactoryTest, DuplicateMailbox) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -95,7 +95,7 @@ TEST_F(SharedImageFactoryTest, DuplicateMailbox) {
}
TEST_F(SharedImageFactoryTest, DestroyInexistentMailbox) {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
EXPECT_FALSE(factory_->DestroySharedImage(mailbox));
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc
index 162caf8231d..d9abf9b9d75 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc
@@ -41,6 +41,13 @@ SharedImageManager::~SharedImageManager() {
std::unique_ptr<SharedImageRepresentationFactoryRef>
SharedImageManager::Register(std::unique_ptr<SharedImageBacking> backing,
MemoryTypeTracker* tracker) {
+ DCHECK(backing->mailbox().IsSharedImage());
+ if (images_.find(backing->mailbox()) != images_.end()) {
+ LOG(ERROR) << "ShraedImageManager::Register: Trying to register an "
+ "already registered mailbox.";
+ backing->Destroy();
+ return nullptr;
+ }
auto factory_ref = std::make_unique<SharedImageRepresentationFactoryRef>(
this, backing.get(), tracker);
images_.emplace(std::move(backing));
@@ -58,11 +65,6 @@ void SharedImageManager::OnContextLost(const Mailbox& mailbox) {
(*found)->OnContextLost();
}
-bool SharedImageManager::IsSharedImage(const Mailbox& mailbox) {
- auto found = images_.find(mailbox);
- return found != images_.end();
-}
-
std::unique_ptr<SharedImageRepresentationGLTexture>
SharedImageManager::ProduceGLTexture(const Mailbox& mailbox,
MemoryTypeTracker* tracker) {
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.h b/chromium/gpu/command_buffer/service/shared_image_manager.h
index 682eba3ca2b..51134762305 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.h
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.h
@@ -28,9 +28,6 @@ class GPU_GLES2_EXPORT SharedImageManager {
// Marks the backing associated with a mailbox as context lost.
void OnContextLost(const Mailbox& mailbox);
- // Indicates whether a mailbox is associated with a SharedImage.
- bool IsSharedImage(const Mailbox& mailbox);
-
// Accessors which return a SharedImageRepresentation. Representations also
// take a ref on the mailbox, releasing it when the representation is
// destroyed.
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc
index 277b048ce42..c0d3defd9d7 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc
@@ -76,7 +76,7 @@ TEST(SharedImageManagerTest, BasicRefCounting) {
SharedImageManager manager;
auto tracker = std::make_unique<MemoryTypeTracker>(nullptr);
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -116,7 +116,7 @@ TEST(SharedImageManagerTest, TransferRefSameTracker) {
SharedImageManager manager;
auto tracker = std::make_unique<MemoryTypeTracker>(nullptr);
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -148,7 +148,7 @@ TEST(SharedImageManagerTest, TransferRefNewTracker) {
auto tracker = std::make_unique<MemoryTypeTracker>(nullptr);
auto tracker2 = std::make_unique<MemoryTypeTracker>(nullptr);
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.h b/chromium/gpu/command_buffer/service/shared_image_representation.h
index 1e2fd6fa3b0..d9a3464e116 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.h
@@ -16,6 +16,7 @@
class GrContext;
typedef unsigned int GLenum;
+class SkPromiseImageTexture;
namespace gpu {
namespace gles2 {
@@ -110,11 +111,10 @@ class SharedImageRepresentationSkia : public SharedImageRepresentation {
virtual sk_sp<SkSurface> BeginWriteAccess(
GrContext* gr_context,
int final_msaa_count,
- SkColorType color_type,
const SkSurfaceProps& surface_props) = 0;
virtual void EndWriteAccess(sk_sp<SkSurface> surface) = 0;
- virtual bool BeginReadAccess(SkColorType color_type,
- GrBackendTexture* backend_texture_out) = 0;
+ virtual sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ SkSurface* sk_surface) = 0;
virtual void EndReadAccess() = 0;
};
diff --git a/chromium/gpu/command_buffer/service/skia_utils.cc b/chromium/gpu/command_buffer/service/skia_utils.cc
index 5143beec0d7..5c4466b1619 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.cc
+++ b/chromium/gpu/command_buffer/service/skia_utils.cc
@@ -5,19 +5,21 @@
#include "gpu/command_buffer/service/skia_utils.h"
#include "base/logging.h"
+#include "components/viz/common/resources/resource_format_utils.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/gl/GrGLTypes.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_gl_api_implementation.h"
+#include "ui/gl/gl_version_info.h"
namespace gpu {
-bool GetGrBackendTexture(GLenum target,
+bool GetGrBackendTexture(const gl::GLVersionInfo* version_info,
+ GLenum target,
const gfx::Size& size,
- GLenum internal_format,
- GLenum driver_internal_format,
GLuint service_id,
- GLint sk_color_type,
+ viz::ResourceFormat resource_format,
GrBackendTexture* gr_texture) {
if (target != GL_TEXTURE_2D && target != GL_TEXTURE_RECTANGLE_ARB) {
LOG(ERROR) << "GetGrBackendTexture: invalid texture target.";
@@ -27,58 +29,8 @@ bool GetGrBackendTexture(GLenum target,
GrGLTextureInfo texture_info;
texture_info.fID = service_id;
texture_info.fTarget = target;
-
- // |driver_internal_format| may be a base internal format but Skia requires a
- // sized internal format. So this may be adjusted below.
- texture_info.fFormat = driver_internal_format;
- switch (sk_color_type) {
- case kARGB_4444_SkColorType:
- if (internal_format != GL_RGBA4 && internal_format != GL_RGBA) {
- LOG(ERROR)
- << "GetGrBackendTexture: color type mismatch. internal_format=0x"
- << std::hex << internal_format;
- return false;
- }
- if (texture_info.fFormat == GL_RGBA)
- texture_info.fFormat = GL_RGBA4;
- break;
- case kRGBA_8888_SkColorType:
- if (internal_format != GL_RGBA8_OES && internal_format != GL_RGBA) {
- LOG(ERROR)
- << "GetGrBackendTexture: color type mismatch. internal_format=0x"
- << std::hex << internal_format;
- return false;
- }
- if (texture_info.fFormat == GL_RGBA)
- texture_info.fFormat = GL_RGBA8_OES;
- break;
- case kRGB_888x_SkColorType:
- if (internal_format != GL_RGB8_OES && internal_format != GL_RGB) {
- LOG(ERROR)
- << "GetGrBackendTexture: color type mismatch. internal_format=0x"
- << std::hex << internal_format;
- return false;
- }
- if (texture_info.fFormat == GL_RGB)
- texture_info.fFormat = GL_RGB8_OES;
- break;
- case kBGRA_8888_SkColorType:
- if (internal_format != GL_BGRA_EXT && internal_format != GL_BGRA8_EXT) {
- LOG(ERROR)
- << "GetGrBackendTexture: color type mismatch. internal_format=0x"
- << std::hex << internal_format;
- return false;
- }
- if (texture_info.fFormat == GL_BGRA_EXT)
- texture_info.fFormat = GL_BGRA8_EXT;
- if (texture_info.fFormat == GL_RGBA)
- texture_info.fFormat = GL_RGBA8_OES;
- break;
- default:
- LOG(ERROR) << "GetGrBackendTexture: unsupported color type.";
- return false;
- }
-
+ texture_info.fFormat = gl::GetInternalFormat(
+ version_info, viz::TextureStorageFormat(resource_format));
*gr_texture = GrBackendTexture(size.width(), size.height(), GrMipMapped::kNo,
texture_info);
return true;
diff --git a/chromium/gpu/command_buffer/service/skia_utils.h b/chromium/gpu/command_buffer/service/skia_utils.h
index 301da43ac71..166314616bc 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.h
+++ b/chromium/gpu/command_buffer/service/skia_utils.h
@@ -5,6 +5,7 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_SKIA_UTILS_H_
#define GPU_COMMAND_BUFFER_SERVICE_SKIA_UTILS_H_
+#include "components/viz/common/resources/resource_format.h"
#include "gpu/gpu_gles2_export.h"
// Forwardly declare a few GL types to avoid including GL header files.
@@ -18,15 +19,18 @@ namespace gfx {
class Size;
} // namespace gfx
+namespace gl {
+struct GLVersionInfo;
+} // namespace gl
+
namespace gpu {
// Creates a GrBackendTexture from a service ID. Skia does not take ownership.
// Returns true on success.
-GPU_GLES2_EXPORT bool GetGrBackendTexture(GLenum target,
+GPU_GLES2_EXPORT bool GetGrBackendTexture(const gl::GLVersionInfo* version_info,
+ GLenum target,
const gfx::Size& size,
- GLenum internal_format,
- GLenum driver_internal_format,
GLuint service_id,
- GLint sk_color_type,
+ viz::ResourceFormat resource_format,
GrBackendTexture* gr_texture);
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.cc b/chromium/gpu/command_buffer/service/sync_point_manager.cc
index 09a39933953..740ffaa6e71 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager.cc
+++ b/chromium/gpu/command_buffer/service/sync_point_manager.cc
@@ -13,6 +13,7 @@
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
namespace gpu {
@@ -362,7 +363,8 @@ SyncPointManager::CreateSyncPointClientState(
{
base::AutoLock auto_lock(lock_);
DCHECK_GE(namespace_id, 0);
- DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
+ DCHECK_LT(static_cast<size_t>(namespace_id),
+ base::size(client_state_maps_));
DCHECK(!client_state_maps_[namespace_id].count(command_buffer_id));
client_state_maps_[namespace_id].insert(
std::make_pair(command_buffer_id, client_state));
@@ -376,7 +378,7 @@ void SyncPointManager::DestroyedSyncPointClientState(
CommandBufferId command_buffer_id) {
base::AutoLock auto_lock(lock_);
DCHECK_GE(namespace_id, 0);
- DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
+ DCHECK_LT(static_cast<size_t>(namespace_id), base::size(client_state_maps_));
DCHECK(client_state_maps_[namespace_id].count(command_buffer_id));
client_state_maps_[namespace_id].erase(command_buffer_id);
}
@@ -466,7 +468,8 @@ scoped_refptr<SyncPointClientState> SyncPointManager::GetSyncPointClientState(
CommandBufferNamespace namespace_id,
CommandBufferId command_buffer_id) {
if (namespace_id >= 0) {
- DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_state_maps_));
+ DCHECK_LT(static_cast<size_t>(namespace_id),
+ base::size(client_state_maps_));
base::AutoLock auto_lock(lock_);
ClientStateMap& client_state_map = client_state_maps_[namespace_id];
auto it = client_state_map.find(command_buffer_id);
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc b/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc
index 5ed87c75ff1..dc141e86937 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc
@@ -168,8 +168,8 @@ TEST_F(SyncPointManagerTest, BasicFenceSyncWaitRelease) {
wait_stream.BeginProcessing();
int test_num = 10;
bool valid_wait = wait_stream.client_state->Wait(
- sync_token,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ sync_token, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction,
+ &test_num, 123));
EXPECT_TRUE(valid_wait);
EXPECT_EQ(10, test_num);
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
@@ -199,8 +199,8 @@ TEST_F(SyncPointManagerTest, WaitOnSelfFails) {
wait_stream.BeginProcessing();
int test_num = 10;
bool valid_wait = wait_stream.client_state->Wait(
- sync_token,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ sync_token, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction,
+ &test_num, 123));
EXPECT_FALSE(valid_wait);
EXPECT_EQ(10, test_num);
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
@@ -226,8 +226,8 @@ TEST_F(SyncPointManagerTest, OutOfOrderRelease) {
wait_stream.BeginProcessing();
int test_num = 10;
bool valid_wait = wait_stream.client_state->Wait(
- sync_token,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ sync_token, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction,
+ &test_num, 123));
EXPECT_FALSE(valid_wait);
EXPECT_EQ(10, test_num);
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
@@ -259,8 +259,8 @@ TEST_F(SyncPointManagerTest, HigherOrderNumberRelease) {
wait_stream.BeginProcessing();
int test_num = 10;
bool valid_wait = wait_stream.client_state->Wait(
- sync_token,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ sync_token, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction,
+ &test_num, 123));
EXPECT_FALSE(valid_wait);
EXPECT_EQ(10, test_num);
EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token));
@@ -286,8 +286,8 @@ TEST_F(SyncPointManagerTest, DestroyedClientRelease) {
int test_num = 10;
bool valid_wait = wait_stream.client_state->Wait(
- sync_token,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ sync_token, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction,
+ &test_num, 123));
EXPECT_TRUE(valid_wait);
EXPECT_EQ(10, test_num);
@@ -322,8 +322,8 @@ TEST_F(SyncPointManagerTest, NonExistentRelease) {
wait_stream.BeginProcessing();
int test_num = 10;
bool valid_wait = wait_stream.client_state->Wait(
- sync_token,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ sync_token, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction,
+ &test_num, 123));
EXPECT_TRUE(valid_wait);
EXPECT_EQ(10, test_num);
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
@@ -368,8 +368,8 @@ TEST_F(SyncPointManagerTest, NonExistentRelease2) {
EXPECT_EQ(3u, wait_stream.order_data->current_order_num());
int test_num = 10;
bool valid_wait = wait_stream.client_state->Wait(
- sync_token,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ sync_token, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction,
+ &test_num, 123));
EXPECT_TRUE(valid_wait);
EXPECT_EQ(10, test_num);
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
@@ -425,8 +425,8 @@ TEST_F(SyncPointManagerTest, NonExistentOrderNumRelease) {
EXPECT_EQ(3u, wait_stream.order_data->current_order_num());
int test_num = 10;
bool valid_wait = wait_stream.client_state->Wait(
- sync_token,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ sync_token, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction,
+ &test_num, 123));
EXPECT_TRUE(valid_wait);
EXPECT_EQ(10, test_num);
@@ -467,7 +467,8 @@ TEST_F(SyncPointManagerTest, WaitOnSameSequenceFails) {
bool valid_wait = sync_point_manager_->Wait(
sync_token, stream.order_data->sequence_id(),
stream.order_data->unprocessed_order_num(),
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ base::BindOnce(&SyncPointManagerTest::SetIntegerFunction, &test_num,
+ 123));
EXPECT_FALSE(valid_wait);
EXPECT_EQ(10, test_num);
EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token));
@@ -498,7 +499,8 @@ TEST_F(SyncPointManagerTest, HandleInvalidWaitOrderNumber) {
bool valid_wait = sync_point_manager_->Wait(
SyncToken(kNamespaceId, kCmdBufferId1, 1),
stream2.order_data->sequence_id(), 3,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ base::BindOnce(&SyncPointManagerTest::SetIntegerFunction, &test_num,
+ 123));
EXPECT_FALSE(valid_wait);
EXPECT_EQ(10, test_num);
}
@@ -522,7 +524,8 @@ TEST_F(SyncPointManagerTest, RetireInvalidWaitAfterOrderNumberPasses) {
bool valid_wait = sync_point_manager_->Wait(
SyncToken(kNamespaceId, kCmdBufferId1, 1),
stream2.order_data->sequence_id(), 3,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num, 123));
+ base::BindOnce(&SyncPointManagerTest::SetIntegerFunction, &test_num,
+ 123));
EXPECT_TRUE(valid_wait);
EXPECT_EQ(10, test_num);
@@ -560,7 +563,8 @@ TEST_F(SyncPointManagerTest, HandleInvalidCyclicWaits) {
bool valid_wait = sync_point_manager_->Wait(
SyncToken(kNamespaceId, kCmdBufferId1, 1),
stream2.order_data->sequence_id(), 2,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num1, 123));
+ base::BindOnce(&SyncPointManagerTest::SetIntegerFunction, &test_num1,
+ 123));
EXPECT_TRUE(valid_wait);
EXPECT_EQ(10, test_num1);
@@ -569,7 +573,8 @@ TEST_F(SyncPointManagerTest, HandleInvalidCyclicWaits) {
valid_wait = sync_point_manager_->Wait(
SyncToken(kNamespaceId, kCmdBufferId2, 1),
stream1.order_data->sequence_id(), 3,
- base::Bind(&SyncPointManagerTest::SetIntegerFunction, &test_num2, 123));
+ base::BindOnce(&SyncPointManagerTest::SetIntegerFunction, &test_num2,
+ 123));
EXPECT_TRUE(valid_wait);
EXPECT_EQ(10, test_num2);
diff --git a/chromium/gpu/command_buffer/service/test_helper.cc b/chromium/gpu/command_buffer/service/test_helper.cc
index 9a3cfc79afe..86c8fab2f14 100644
--- a/chromium/gpu/command_buffer/service/test_helper.cc
+++ b/chromium/gpu/command_buffer/service/test_helper.cc
@@ -10,6 +10,7 @@
#include <algorithm>
#include <string>
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/service/buffer_manager.h"
#include "gpu/command_buffer/service/error_state_mock.h"
@@ -174,7 +175,7 @@ void TestHelper::SetupTextureInitializationExpectations(
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
};
- for (size_t ii = 0; ii < arraysize(faces); ++ii) {
+ for (size_t ii = 0; ii < base::size(faces); ++ii) {
EXPECT_CALL(*gl, TexImage2D(faces[ii], 0, GL_RGBA, 1, 1, 0, GL_RGBA,
GL_UNSIGNED_BYTE, _))
.Times(1)
@@ -954,7 +955,7 @@ void TestHelper::SetupProgramSuccessExpectations(
static const GLenum kPropsArray[] = {GL_LOCATION, GL_TYPE,
GL_ARRAY_SIZE};
- static const size_t kPropsSize = arraysize(kPropsArray);
+ static const size_t kPropsSize = base::size(kPropsArray);
EXPECT_CALL(
*gl, GetProgramResourceiv(
service_id, GL_FRAGMENT_INPUT_NV, ii, kPropsSize,
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index 552bd187d66..c8632ba30b3 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -16,6 +16,7 @@
#include "base/format_macros.h"
#include "base/lazy_instance.h"
#include "base/metrics/histogram_macros.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
@@ -31,6 +32,7 @@
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_enums.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_state_restorer.h"
#include "ui/gl/gl_version_info.h"
@@ -258,11 +260,11 @@ class FormatTypeValidator {
{GL_RED, GL_RED, GL_UNSIGNED_SHORT},
};
- for (size_t ii = 0; ii < arraysize(kSupportedFormatTypes); ++ii) {
+ for (size_t ii = 0; ii < base::size(kSupportedFormatTypes); ++ii) {
supported_combinations_.insert(kSupportedFormatTypes[ii]);
}
- for (size_t ii = 0; ii < arraysize(kSupportedFormatTypesES2Only); ++ii) {
+ for (size_t ii = 0; ii < base::size(kSupportedFormatTypesES2Only); ++ii) {
supported_combinations_es2_only_.insert(kSupportedFormatTypesES2Only[ii]);
}
}
@@ -312,7 +314,7 @@ static const Texture::CompatibilitySwizzle kSwizzledFormats[] = {
const Texture::CompatibilitySwizzle* GetCompatibilitySwizzleInternal(
GLenum format) {
- size_t count = arraysize(kSwizzledFormats);
+ size_t count = base::size(kSwizzledFormats);
for (size_t i = 0; i < count; ++i) {
if (kSwizzledFormats[i].format == format)
return &kSwizzledFormats[i];
@@ -494,7 +496,7 @@ void TextureManager::Destroy() {
}
if (have_context_) {
- glDeleteTextures(arraysize(black_texture_ids_), black_texture_ids_);
+ glDeleteTextures(base::size(black_texture_ids_), black_texture_ids_);
}
DCHECK_EQ(0u, memory_type_tracker_->GetMemRepresented());
@@ -794,8 +796,7 @@ void Texture::AddToSignature(
DCHECK(signature);
DCHECK_GE(level, 0);
size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
- DCHECK_LT(static_cast<size_t>(face_index),
- face_infos_.size());
+ DCHECK_LT(face_index, face_infos_.size());
DCHECK_LT(static_cast<size_t>(level),
face_infos_[face_index].level_infos.size());
@@ -1017,8 +1018,7 @@ void Texture::SetLevelClearedRect(GLenum target,
const gfx::Rect& cleared_rect) {
DCHECK_GE(level, 0);
size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
- DCHECK_LT(static_cast<size_t>(face_index),
- face_infos_.size());
+ DCHECK_LT(face_index, face_infos_.size());
DCHECK_LT(static_cast<size_t>(level),
face_infos_[face_index].level_infos.size());
Texture::LevelInfo& info =
@@ -1030,7 +1030,7 @@ void Texture::SetLevelClearedRect(GLenum target,
void Texture::SetLevelCleared(GLenum target, GLint level, bool cleared) {
DCHECK_GE(level, 0);
size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
- DCHECK_LT(static_cast<size_t>(face_index), face_infos_.size());
+ DCHECK_LT(face_index, face_infos_.size());
DCHECK_LT(static_cast<size_t>(level),
face_infos_[face_index].level_infos.size());
Texture::LevelInfo& info = face_infos_[face_index].level_infos[level];
@@ -1126,12 +1126,14 @@ void Texture::IncAllFramebufferStateChangeCount() {
(*it)->manager()->IncFramebufferStateChangeCount();
}
-void Texture::UpdateBaseLevel(GLint base_level) {
+void Texture::UpdateBaseLevel(GLint base_level,
+ const FeatureInfo* feature_info) {
if (unclamped_base_level_ == base_level)
return;
unclamped_base_level_ = base_level;
UpdateNumMipLevels();
+ ApplyFormatWorkarounds(feature_info);
}
void Texture::UpdateMaxLevel(GLint max_level) {
@@ -1175,6 +1177,15 @@ void Texture::UpdateNumMipLevels() {
UpdateCanRenderCondition();
}
+void Texture::ApplyClampedBaseLevelAndMaxLevelToDriver() {
+ if (base_level_ != unclamped_base_level_) {
+ glTexParameteri(target_, GL_TEXTURE_BASE_LEVEL, base_level_);
+ }
+ if (max_level_ != unclamped_max_level_) {
+ glTexParameteri(target_, GL_TEXTURE_MAX_LEVEL, max_level_);
+ }
+}
+
void Texture::SetLevelInfo(GLenum target,
GLint level,
GLenum internal_format,
@@ -1187,8 +1198,7 @@ void Texture::SetLevelInfo(GLenum target,
const gfx::Rect& cleared_rect) {
DCHECK_GE(level, 0);
size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
- DCHECK_LT(static_cast<size_t>(face_index),
- face_infos_.size());
+ DCHECK_LT(face_index, face_infos_.size());
DCHECK_LT(static_cast<size_t>(level),
face_infos_[face_index].level_infos.size());
DCHECK_GE(width, 0);
@@ -1292,8 +1302,7 @@ void Texture::SetStreamTextureServiceId(GLuint service_id) {
void Texture::MarkLevelAsInternalWorkaround(GLenum target, GLint level) {
DCHECK_GE(level, 0);
size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
- DCHECK_LT(static_cast<size_t>(face_index),
- face_infos_.size());
+ DCHECK_LT(face_index, face_infos_.size());
DCHECK_LT(static_cast<size_t>(level),
face_infos_[face_index].level_infos.size());
Texture::LevelInfo& info =
@@ -1320,18 +1329,12 @@ bool Texture::ValidForTexture(
int32_t max_x;
int32_t max_y;
int32_t max_z;
- return xoffset >= 0 &&
- yoffset >= 0 &&
- zoffset >= 0 &&
- width >= 0 &&
- height >= 0 &&
- depth >= 0 &&
- SafeAddInt32(xoffset, width, &max_x) &&
- SafeAddInt32(yoffset, height, &max_y) &&
- SafeAddInt32(zoffset, depth, &max_z) &&
- max_x <= info.width &&
- max_y <= info.height &&
- max_z <= info.depth;
+ return xoffset >= 0 && yoffset >= 0 && zoffset >= 0 && width >= 0 &&
+ height >= 0 && depth >= 0 &&
+ base::CheckAdd(xoffset, width).AssignIfValid(&max_x) &&
+ base::CheckAdd(yoffset, height).AssignIfValid(&max_y) &&
+ base::CheckAdd(zoffset, depth).AssignIfValid(&max_z) &&
+ max_x <= info.width && max_y <= info.height && max_z <= info.depth;
}
return false;
}
@@ -1440,7 +1443,7 @@ GLenum Texture::SetParameteri(
if (param < 0) {
return GL_INVALID_VALUE;
}
- UpdateBaseLevel(param);
+ UpdateBaseLevel(param, feature_info);
break;
case GL_TEXTURE_MAX_LEVEL:
if (param < 0) {
@@ -1774,7 +1777,7 @@ void Texture::SetLevelImageInternal(GLenum target,
DCHECK(!stream_texture_image || stream_texture_image == image);
DCHECK_GE(level, 0);
size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
- DCHECK_LT(static_cast<size_t>(face_index), face_infos_.size());
+ DCHECK_LT(face_index, face_infos_.size());
DCHECK_LT(static_cast<size_t>(level),
face_infos_[face_index].level_infos.size());
Texture::LevelInfo& info = face_infos_[face_index].level_infos[level];
@@ -1809,7 +1812,7 @@ void Texture::SetLevelStreamTextureImage(GLenum target,
void Texture::SetLevelImageState(GLenum target, GLint level, ImageState state) {
DCHECK_GE(level, 0);
size_t face_index = GLES2Util::GLTargetToFaceIndex(target);
- DCHECK_LT(static_cast<size_t>(face_index), face_infos_.size());
+ DCHECK_LT(face_index, face_infos_.size());
DCHECK_LT(static_cast<size_t>(level),
face_infos_[face_index].level_infos.size());
Texture::LevelInfo& info = face_infos_[face_index].level_infos[level];
@@ -1937,7 +1940,7 @@ void Texture::SetCompatibilitySwizzle(const CompatibilitySwizzle* swizzle) {
GetSwizzleForChannel(swizzle_a_, swizzle));
}
-void Texture::ApplyFormatWorkarounds(FeatureInfo* feature_info) {
+void Texture::ApplyFormatWorkarounds(const FeatureInfo* feature_info) {
if (feature_info->gl_version_info().NeedsLuminanceAlphaEmulation()) {
if (static_cast<size_t>(base_level_) >= face_infos_[0].level_infos.size())
return;
@@ -2202,6 +2205,13 @@ bool TextureManager::ClearTextureLevel(DecoderContext* decoder,
GLint level) {
DCHECK(ref);
Texture* texture = ref->texture();
+ return ClearTextureLevel(decoder, texture, target, level);
+}
+
+bool TextureManager::ClearTextureLevel(DecoderContext* decoder,
+ Texture* texture,
+ GLenum target,
+ GLint level) {
if (texture->num_uncleared_mips() == 0) {
return true;
}
@@ -2603,12 +2613,11 @@ TextureRef* TextureManager::GetTextureInfoForTargetUnlessDefault(
return texture;
}
-bool TextureManager::ValidateTexImage(
- ContextState* state,
- const char* function_name,
- const DoTexImageArguments& args,
- TextureRef** texture_ref) {
- ErrorState* error_state = state->GetErrorState();
+bool TextureManager::ValidateTexImage(ContextState* state,
+ ErrorState* error_state,
+ const char* function_name,
+ const DoTexImageArguments& args,
+ TextureRef** texture_ref) {
const Validators* validators = feature_info_->validators();
if (((args.command_type == DoTexImageArguments::kTexImage2D) &&
!validators->texture_target.IsValid(args.target)) ||
@@ -2707,7 +2716,7 @@ bool TextureManager::ValidateTexImage(
"pixel unpack buffer is not large enough");
return false;
}
- size_t type_size = GLES2Util::GetGLTypeSizeForTextures(args.type);
+ uint32_t type_size = GLES2Util::GetGLTypeSizeForTextures(args.type);
DCHECK_LT(0u, type_size);
if (offset % type_size != 0) {
ERRORSTATE_SET_GL_ERROR(
@@ -2725,6 +2734,7 @@ bool TextureManager::ValidateTexImage(
void TextureManager::DoCubeMapWorkaround(
DecoderTextureState* texture_state,
ContextState* state,
+ ErrorState* error_state,
DecoderFramebufferState* framebuffer_state,
TextureRef* texture_ref,
const char* function_name,
@@ -2759,7 +2769,7 @@ void TextureManager::DoCubeMapWorkaround(
for (GLenum face : undefined_faces) {
new_args.target = face;
new_args.pixels = zero.get();
- DoTexImage(texture_state, state, framebuffer_state,
+ DoTexImage(texture_state, state, error_state, framebuffer_state,
function_name, texture_ref, new_args);
texture->MarkLevelAsInternalWorkaround(face, args.level);
}
@@ -2769,11 +2779,13 @@ void TextureManager::DoCubeMapWorkaround(
void TextureManager::ValidateAndDoTexImage(
DecoderTextureState* texture_state,
ContextState* state,
+ ErrorState* error_state,
DecoderFramebufferState* framebuffer_state,
const char* function_name,
const DoTexImageArguments& args) {
TextureRef* texture_ref;
- if (!ValidateTexImage(state, function_name, args, &texture_ref)) {
+ if (!ValidateTexImage(state, error_state, function_name, args,
+ &texture_ref)) {
return;
}
@@ -2797,7 +2809,7 @@ void TextureManager::ValidateAndDoTexImage(
GL_SRGB));
if (need_cube_map_workaround && !buffer) {
- DoCubeMapWorkaround(texture_state, state, framebuffer_state,
+ DoCubeMapWorkaround(texture_state, state, error_state, framebuffer_state,
texture_ref, function_name, args);
}
@@ -2813,8 +2825,9 @@ void TextureManager::ValidateAndDoTexImage(
// The rows overlap in unpack memory. Upload the texture row by row to
// work around driver bug.
- ReserveTexImageToBeFilled(texture_state, state, framebuffer_state,
- function_name, texture_ref, args);
+ ReserveTexImageToBeFilled(texture_state, state, error_state,
+ framebuffer_state, function_name, texture_ref,
+ args);
DoTexSubImageArguments sub_args = {
args.target, args.level, 0, 0, 0, args.width, args.height, args.depth,
@@ -2837,8 +2850,9 @@ void TextureManager::ValidateAndDoTexImage(
const PixelStoreParams unpack_params(state->GetUnpackParams(dimension));
if (unpack_params.image_height != 0 &&
unpack_params.image_height != args.height) {
- ReserveTexImageToBeFilled(texture_state, state, framebuffer_state,
- function_name, texture_ref, args);
+ ReserveTexImageToBeFilled(texture_state, state, error_state,
+ framebuffer_state, function_name, texture_ref,
+ args);
DoTexSubImageArguments sub_args = {
args.target,
@@ -2869,8 +2883,9 @@ void TextureManager::ValidateAndDoTexImage(
if (buffer_size - args.pixels_size - ToGLuint(args.pixels) < args.padding) {
// In ValidateTexImage(), we already made sure buffer size is no less
// than offset + pixels_size.
- ReserveTexImageToBeFilled(texture_state, state, framebuffer_state,
- function_name, texture_ref, args);
+ ReserveTexImageToBeFilled(texture_state, state, error_state,
+ framebuffer_state, function_name, texture_ref,
+ args);
DoTexSubImageArguments sub_args = {
args.target, args.level, 0, 0, 0, args.width, args.height, args.depth,
@@ -2884,13 +2899,14 @@ void TextureManager::ValidateAndDoTexImage(
return;
}
}
- DoTexImage(texture_state, state, framebuffer_state,
+ DoTexImage(texture_state, state, error_state, framebuffer_state,
function_name, texture_ref, args);
}
void TextureManager::ReserveTexImageToBeFilled(
DecoderTextureState* texture_state,
ContextState* state,
+ ErrorState* error_state,
DecoderFramebufferState* framebuffer_state,
const char* function_name,
TextureRef* texture_ref,
@@ -2901,17 +2917,17 @@ void TextureManager::ReserveTexImageToBeFilled(
DoTexImageArguments new_args = args;
new_args.pixels = nullptr;
// pixels_size might be incorrect, but it's not used in this case.
- DoTexImage(texture_state, state, framebuffer_state, function_name,
- texture_ref, new_args);
+ DoTexImage(texture_state, state, error_state, framebuffer_state,
+ function_name, texture_ref, new_args);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->service_id());
state->SetBoundBuffer(GL_PIXEL_UNPACK_BUFFER, buffer);
}
bool TextureManager::ValidateTexSubImage(ContextState* state,
+ ErrorState* error_state,
const char* function_name,
const DoTexSubImageArguments& args,
TextureRef** texture_ref) {
- ErrorState* error_state = state->GetErrorState();
const Validators* validators = feature_info_->validators();
if ((args.command_type == DoTexSubImageArguments::kTexSubImage2D &&
@@ -2997,7 +3013,7 @@ bool TextureManager::ValidateTexSubImage(ContextState* state,
"pixel unpack buffer is not large enough");
return false;
}
- size_t type_size = GLES2Util::GetGLTypeSizeForTextures(args.type);
+ uint32_t type_size = GLES2Util::GetGLTypeSizeForTextures(args.type);
DCHECK_LT(0u, type_size);
if (offset % type_size != 0) {
ERRORSTATE_SET_GL_ERROR(
@@ -3022,13 +3038,14 @@ void TextureManager::ValidateAndDoTexSubImage(
DecoderContext* decoder,
DecoderTextureState* texture_state,
ContextState* state,
+ ErrorState* error_state,
DecoderFramebufferState* framebuffer_state,
const char* function_name,
const DoTexSubImageArguments& args) {
TRACE_EVENT0("gpu", "TextureManager::ValidateAndDoTexSubImage");
- ErrorState* error_state = state->GetErrorState();
TextureRef* texture_ref;
- if (!ValidateTexSubImage(state, function_name, args, &texture_ref)) {
+ if (!ValidateTexSubImage(state, error_state, function_name, args,
+ &texture_ref)) {
return;
}
@@ -3429,14 +3446,13 @@ GLenum TextureManager::AdjustTexStorageFormat(
return format;
}
-void TextureManager::DoTexImage(
- DecoderTextureState* texture_state,
- ContextState* state,
- DecoderFramebufferState* framebuffer_state,
- const char* function_name,
- TextureRef* texture_ref,
- const DoTexImageArguments& args) {
- ErrorState* error_state = state->GetErrorState();
+void TextureManager::DoTexImage(DecoderTextureState* texture_state,
+ ContextState* state,
+ ErrorState* error_state,
+ DecoderFramebufferState* framebuffer_state,
+ const char* function_name,
+ TextureRef* texture_ref,
+ const DoTexImageArguments& args) {
Texture* texture = texture_ref->texture();
GLsizei tex_width = 0;
GLsizei tex_height = 0;
@@ -3656,6 +3672,9 @@ GLenum TextureManager::ExtractFormatFromStorageFormat(GLenum internalformat) {
case GL_RGB9_E5:
case GL_RGB16F:
case GL_RGB32F:
+ case GL_RGB_YCBCR_420V_CHROMIUM:
+ case GL_RGB_YCBCR_422_CHROMIUM:
+ case GL_RGB_YCRCB_420_CHROMIUM:
return GL_RGB;
case GL_RGB8UI:
case GL_RGB8I:
@@ -3897,5 +3916,121 @@ void TextureManager::IncrementServiceIdGeneration() {
current_service_id_generation_++;
}
+const Texture::LevelInfo* Texture::GetBaseLevelInfo() const {
+ if (face_infos_.empty() ||
+ static_cast<size_t>(base_level_) >= face_infos_[0].level_infos.size()) {
+ return nullptr;
+ }
+ return &face_infos_[0].level_infos[base_level_];
+}
+
+GLenum Texture::GetInternalFormatOfBaseLevel() const {
+ const LevelInfo* level_info = GetBaseLevelInfo();
+ return level_info ? level_info->internal_format : GL_NONE;
+}
+
+bool Texture::CompatibleWithSamplerUniformType(GLenum type) const {
+ enum {
+ SAMPLER_INVALID,
+ SAMPLER_FLOAT,
+ SAMPLER_UNSIGNED,
+ SAMPLER_SIGNED,
+ SAMPLER_SHADOW,
+ } category = SAMPLER_INVALID;
+
+ switch (type) {
+ case GL_SAMPLER_2D:
+ case GL_SAMPLER_2D_RECT_ARB:
+ case GL_SAMPLER_CUBE:
+ case GL_SAMPLER_EXTERNAL_OES:
+ case GL_SAMPLER_3D:
+ case GL_SAMPLER_2D_ARRAY:
+ category = SAMPLER_FLOAT;
+ break;
+ case GL_INT_SAMPLER_2D:
+ case GL_INT_SAMPLER_3D:
+ case GL_INT_SAMPLER_CUBE:
+ case GL_INT_SAMPLER_2D_ARRAY:
+ category = SAMPLER_SIGNED;
+ break;
+ case GL_UNSIGNED_INT_SAMPLER_2D:
+ case GL_UNSIGNED_INT_SAMPLER_3D:
+ case GL_UNSIGNED_INT_SAMPLER_CUBE:
+ case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+ category = SAMPLER_UNSIGNED;
+ break;
+ case GL_SAMPLER_2D_SHADOW:
+ case GL_SAMPLER_2D_ARRAY_SHADOW:
+ case GL_SAMPLER_CUBE_SHADOW:
+ case GL_SAMPLER_2D_RECT_SHADOW_ARB:
+ category = SAMPLER_SHADOW;
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ const LevelInfo* level_info = GetBaseLevelInfo();
+ if (!level_info) {
+ return false;
+ }
+ if ((level_info->format == GL_DEPTH_COMPONENT ||
+ level_info->format == GL_DEPTH_STENCIL) &&
+ sampler_state_.compare_mode != GL_NONE) {
+ // If TEXTURE_COMPARE_MODE is set, then depth textures can only be sampled
+ // by shadow samplers.
+ return category == SAMPLER_SHADOW;
+ }
+
+ if (level_info->type == GL_NONE && level_info->format == GL_NONE &&
+ level_info->internal_format != GL_NONE) {
+ // This is probably a compressed texture format. All compressed formats are
+ // sampled as float.
+ return category == SAMPLER_FLOAT;
+ }
+
+ bool normalized =
+ level_info->format == GL_RED || level_info->format == GL_RG ||
+ level_info->format == GL_RGB || level_info->format == GL_RGBA ||
+ level_info->format == GL_DEPTH_COMPONENT ||
+ level_info->format == GL_DEPTH_STENCIL ||
+ level_info->format == GL_LUMINANCE_ALPHA ||
+ level_info->format == GL_LUMINANCE || level_info->format == GL_ALPHA ||
+ level_info->format == GL_BGRA_EXT || level_info->format == GL_SRGB_EXT;
+ if (normalized) {
+ // All normalized texture formats are sampled as float.
+ return category == SAMPLER_FLOAT;
+ }
+
+ switch (level_info->type) {
+ case GL_HALF_FLOAT:
+ case GL_FLOAT:
+ case GL_FLOAT_32_UNSIGNED_INT_24_8_REV:
+ // Float formats.
+ return category == SAMPLER_FLOAT;
+ case GL_BYTE:
+ case GL_SHORT:
+ case GL_INT:
+ // Signed integer formats.
+ return category == SAMPLER_SIGNED;
+ case GL_UNSIGNED_BYTE:
+ case GL_UNSIGNED_SHORT:
+ case GL_UNSIGNED_INT:
+ case GL_UNSIGNED_SHORT_5_5_5_1:
+ case GL_UNSIGNED_INT_2_10_10_10_REV:
+ case GL_UNSIGNED_INT_10F_11F_11F_REV:
+ case GL_UNSIGNED_INT_5_9_9_9_REV:
+ case GL_UNSIGNED_INT_24_8:
+ // Unsigned integer formats.
+ return category == SAMPLER_UNSIGNED;
+ default:
+ NOTREACHED() << "Type: " << GLES2Util::GetStringEnum(level_info->type)
+ << " Format: "
+ << GLES2Util::GetStringEnum(level_info->format)
+ << " Internal format: "
+ << GLES2Util::GetStringEnum(level_info->internal_format);
+ }
+ return false;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index 6f5daa0f183..fe981ad524b 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -13,9 +13,9 @@
#include <memory>
#include <set>
#include <string>
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -253,6 +253,7 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// Set the ImageState for the image bound to the given level.
void SetLevelImageState(GLenum target, GLint level, ImageState state);
+ bool CompatibleWithSamplerUniformType(GLenum type) const;
// Get the image associated with a particular level. Returns NULL if level
// does not exist.
@@ -312,6 +313,11 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// rectangle if level does not exist.
gfx::Rect GetLevelClearedRect(GLenum target, GLint level) const;
+ // Marks a |rect| of a particular level as cleared.
+ void SetLevelClearedRect(GLenum target,
+ GLint level,
+ const gfx::Rect& cleared_rect);
+
// Whether a particular level/face is cleared.
bool IsLevelCleared(GLenum target, GLint level) const;
// Whether a particular level/face is partially cleared.
@@ -329,7 +335,7 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
uint64_t client_tracing_id,
const std::string& dump_name) const;
- void ApplyFormatWorkarounds(FeatureInfo* feature_info);
+ void ApplyFormatWorkarounds(const FeatureInfo* feature_info);
bool EmulatingRGB();
@@ -348,8 +354,13 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// Marks a particular level as cleared or uncleared.
void SetLevelCleared(GLenum target, GLint level, bool cleared);
+ void ApplyClampedBaseLevelAndMaxLevelToDriver();
+
MemoryTypeTracker* GetMemTracker();
+ // Returns GL_NONE on error.
+ GLenum GetInternalFormatOfBaseLevel() const;
+
private:
friend class MailboxManagerSync;
friend class MailboxManagerTest;
@@ -424,6 +435,8 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// Returns the LevelInfo for |target| and |level| if it's set, else NULL.
const LevelInfo* GetLevelInfo(GLint target, GLint level) const;
+ // Returns NULL if the base level is not defined.
+ const LevelInfo* GetBaseLevelInfo() const;
// Set the info for a particular level.
void SetLevelInfo(GLenum target,
@@ -457,11 +470,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
return npot_;
}
- // Marks a |rect| of a particular level as cleared.
- void SetLevelClearedRect(GLenum target,
- GLint level,
- const gfx::Rect& cleared_rect);
-
// Updates the cleared flag for this texture by inspecting all the mips.
void UpdateCleared();
@@ -575,7 +583,7 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// referencing this texture.
void IncAllFramebufferStateChangeCount();
- void UpdateBaseLevel(GLint base_level);
+ void UpdateBaseLevel(GLint base_level, const FeatureInfo* feature_info);
void UpdateMaxLevel(GLint max_level);
void UpdateNumMipLevels();
@@ -932,6 +940,11 @@ class GPU_GLES2_EXPORT TextureManager
GLenum target,
GLint level);
+ bool ClearTextureLevel(DecoderContext* decoder,
+ Texture* texture,
+ GLenum target,
+ GLint level);
+
// Creates a new texture info.
TextureRef* CreateTexture(GLuint client_id, GLuint service_id);
@@ -1069,20 +1082,20 @@ class GPU_GLES2_EXPORT TextureManager
TexImageCommandType command_type;
};
- bool ValidateTexImage(
- ContextState* state,
- const char* function_name,
- const DoTexImageArguments& args,
- // Pointer to TextureRef filled in if validation successful.
- // Presumes the pointer is valid.
- TextureRef** texture_ref);
-
- void ValidateAndDoTexImage(
- DecoderTextureState* texture_state,
- ContextState* state,
- DecoderFramebufferState* framebuffer_state,
- const char* function_name,
- const DoTexImageArguments& args);
+ bool ValidateTexImage(ContextState* state,
+ ErrorState* error_state,
+ const char* function_name,
+ const DoTexImageArguments& args,
+ // Pointer to TextureRef filled in if validation
+ // successful. Presumes the pointer is valid.
+ TextureRef** texture_ref);
+
+ void ValidateAndDoTexImage(DecoderTextureState* texture_state,
+ ContextState* state,
+ ErrorState* error_state,
+ DecoderFramebufferState* framebuffer_state,
+ const char* function_name,
+ const DoTexImageArguments& args);
struct DoTexSubImageArguments {
enum TexSubImageCommandType {
@@ -1106,17 +1119,18 @@ class GPU_GLES2_EXPORT TextureManager
TexSubImageCommandType command_type;
};
- bool ValidateTexSubImage(
- ContextState* state,
- const char* function_name,
- const DoTexSubImageArguments& args,
- // Pointer to TextureRef filled in if validation successful.
- // Presumes the pointer is valid.
- TextureRef** texture_ref);
+ bool ValidateTexSubImage(ContextState* state,
+ ErrorState* error_state,
+ const char* function_name,
+ const DoTexSubImageArguments& args,
+ // Pointer to TextureRef filled in if validation
+ // successful. Presumes the pointer is valid.
+ TextureRef** texture_ref);
void ValidateAndDoTexSubImage(DecoderContext* decoder,
DecoderTextureState* texture_state,
ContextState* state,
+ ErrorState* error_state,
DecoderFramebufferState* framebuffer_state,
const char* function_name,
const DoTexSubImageArguments& args);
@@ -1162,14 +1176,14 @@ class GPU_GLES2_EXPORT TextureManager
static GLenum AdjustTexStorageFormat(const gles2::FeatureInfo* feature_info,
GLenum format);
- void WorkaroundCopyTexImageCubeMap(
- DecoderTextureState* texture_state,
- ContextState* state,
- DecoderFramebufferState* framebuffer_state,
- TextureRef* texture_ref,
- const char* function_name,
- const DoTexImageArguments& args) {
- DoCubeMapWorkaround(texture_state, state, framebuffer_state,
+ void WorkaroundCopyTexImageCubeMap(DecoderTextureState* texture_state,
+ ContextState* state,
+ ErrorState* error_state,
+ DecoderFramebufferState* framebuffer_state,
+ TextureRef* texture_ref,
+ const char* function_name,
+ const DoTexImageArguments& args) {
+ DoCubeMapWorkaround(texture_state, state, error_state, framebuffer_state,
texture_ref, function_name, args);
}
@@ -1182,19 +1196,20 @@ class GPU_GLES2_EXPORT TextureManager
GLenum target,
GLuint* black_texture);
- void DoTexImage(
- DecoderTextureState* texture_state,
- ContextState* state,
- DecoderFramebufferState* framebuffer_state,
- const char* function_name,
- TextureRef* texture_ref,
- const DoTexImageArguments& args);
+ void DoTexImage(DecoderTextureState* texture_state,
+ ContextState* state,
+ ErrorState* error_state,
+ DecoderFramebufferState* framebuffer_state,
+ const char* function_name,
+ TextureRef* texture_ref,
+ const DoTexImageArguments& args);
// Reserve memory for the texture and set its attributes so it can be filled
// with TexSubImage. The image contents are undefined after this function,
// so make sure it's subsequently filled in its entirety.
void ReserveTexImageToBeFilled(DecoderTextureState* texture_state,
ContextState* state,
+ ErrorState* error_state,
DecoderFramebufferState* framebuffer_state,
const char* function_name,
TextureRef* texture_ref,
@@ -1216,13 +1231,13 @@ class GPU_GLES2_EXPORT TextureManager
const DoTexSubImageArguments& args,
const PixelStoreParams& unpack_params);
- void DoCubeMapWorkaround(
- DecoderTextureState* texture_state,
- ContextState* state,
- DecoderFramebufferState* framebuffer_state,
- TextureRef* texture_ref,
- const char* function_name,
- const DoTexImageArguments& args);
+ void DoCubeMapWorkaround(DecoderTextureState* texture_state,
+ ContextState* state,
+ ErrorState* error_state,
+ DecoderFramebufferState* framebuffer_state,
+ TextureRef* texture_ref,
+ const char* function_name,
+ const DoTexImageArguments& args);
void StartTracking(TextureRef* texture);
void StopTracking(TextureRef* texture);
@@ -1247,7 +1262,7 @@ class GPU_GLES2_EXPORT TextureManager
std::vector<FramebufferManager*> framebuffer_managers_;
// Info for each texture in the system.
- typedef base::hash_map<GLuint, scoped_refptr<TextureRef> > TextureMap;
+ typedef std::unordered_map<GLuint, scoped_refptr<TextureRef>> TextureMap;
TextureMap textures_;
GLsizei max_texture_size_;
diff --git a/chromium/gpu/command_buffer/service/transform_feedback_manager.cc b/chromium/gpu/command_buffer/service/transform_feedback_manager.cc
index b38076b05c5..95f7a57cae0 100644
--- a/chromium/gpu/command_buffer/service/transform_feedback_manager.cc
+++ b/chromium/gpu/command_buffer/service/transform_feedback_manager.cc
@@ -102,11 +102,13 @@ void TransformFeedback::DoResumeTransformFeedback() {
bool TransformFeedback::GetVerticesNeededForDraw(GLenum mode,
GLsizei count,
GLsizei primcount,
+ GLsizei pending_vertices_drawn,
GLsizei* vertices_out) const {
// Transform feedback only outputs complete primitives, so we need to round
// down to the nearest complete primitive before multiplying by the number of
// instances.
- base::CheckedNumeric<GLsizei> checked_vertices = vertices_drawn_;
+ base::CheckedNumeric<GLsizei> checked_vertices =
+ vertices_drawn_ + pending_vertices_drawn;
base::CheckedNumeric<GLsizei> checked_count = count;
base::CheckedNumeric<GLsizei> checked_primcount = primcount;
switch (mode) {
@@ -131,14 +133,9 @@ bool TransformFeedback::GetVerticesNeededForDraw(GLenum mode,
return checked_vertices.IsValid();
}
-void TransformFeedback::OnVerticesDrawn(GLenum mode,
- GLsizei count,
- GLsizei primcount) {
+void TransformFeedback::OnVerticesDrawn(GLsizei vertices_drawn) {
if (active_ && !paused_) {
- GLsizei vertices = 0;
- bool valid = GetVerticesNeededForDraw(mode, count, primcount, &vertices);
- DCHECK(valid);
- vertices_drawn_ = vertices;
+ vertices_drawn_ = vertices_drawn;
}
}
diff --git a/chromium/gpu/command_buffer/service/transform_feedback_manager.h b/chromium/gpu/command_buffer/service/transform_feedback_manager.h
index d4b0540399b..dc550793673 100644
--- a/chromium/gpu/command_buffer/service/transform_feedback_manager.h
+++ b/chromium/gpu/command_buffer/service/transform_feedback_manager.h
@@ -5,9 +5,9 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_TRANSFORM_FEEDBACK_MANAGER_H_
#define GPU_COMMAND_BUFFER_SERVICE_TRANSFORM_FEEDBACK_MANAGER_H_
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/command_buffer/service/indexed_buffer_binding_host.h"
@@ -64,17 +64,20 @@ class GPU_GLES2_EXPORT TransformFeedback : public IndexedBufferBindingHost {
// Calculates the number of vertices that this draw call will write to the
// transform feedback buffer, plus the number of vertices that were previously
// written since the last call to BeginTransformFeedback (because vertices are
- // written starting just after the last vertex written by the previous draw).
+ // written starting just after the last vertex written by the previous draw),
+ // plus |pending_vertices_drawn|. The pending vertices are used to iteratively
+ // validate and accumulate the number of vertices drawn for multiple draws.
// This is used to calculate whether there is enough space in the transform
// feedback buffers. Returns false on integer overflow.
bool GetVerticesNeededForDraw(GLenum mode,
GLsizei count,
GLsizei primcount,
+ GLsizei pending_vertices_drawn,
GLsizei* vertices_out) const;
// This must be called every time a transform feedback draw happens to keep
// track of how many vertices have been written to the transform feedback
// buffers.
- void OnVerticesDrawn(GLenum mode, GLsizei count, GLsizei primcount);
+ void OnVerticesDrawn(GLsizei vertices_drawn);
private:
~TransformFeedback() override;
@@ -135,8 +138,8 @@ class GPU_GLES2_EXPORT TransformFeedbackManager {
private:
// Info for each transform feedback in the system.
- base::hash_map<GLuint,
- scoped_refptr<TransformFeedback> > transform_feedbacks_;
+ std::unordered_map<GLuint, scoped_refptr<TransformFeedback>>
+ transform_feedbacks_;
GLuint max_transform_feedback_separate_attribs_;
diff --git a/chromium/gpu/command_buffer/service/vertex_array_manager.h b/chromium/gpu/command_buffer/service/vertex_array_manager.h
index 29cc3d67888..2053fb2342b 100644
--- a/chromium/gpu/command_buffer/service/vertex_array_manager.h
+++ b/chromium/gpu/command_buffer/service/vertex_array_manager.h
@@ -7,7 +7,8 @@
#include <stdint.h>
-#include "base/containers/hash_tables.h"
+#include <unordered_map>
+
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -56,7 +57,7 @@ class GPU_GLES2_EXPORT VertexArrayManager {
void StopTracking(VertexAttribManager* vertex_attrib_manager);
// Info for each vertex array in the system.
- typedef base::hash_map<GLuint, scoped_refptr<VertexAttribManager> >
+ typedef std::unordered_map<GLuint, scoped_refptr<VertexAttribManager>>
VertexAttribManagerMap;
VertexAttribManagerMap client_vertex_attrib_managers_;
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder.cc b/chromium/gpu/command_buffer/service/webgpu_decoder.cc
index 50388ac8b21..6e1cb4e7d64 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder.cc
@@ -67,7 +67,7 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
void RestoreFramebufferBindings() const override { NOTREACHED(); }
void RestoreRenderbufferBindings() override { NOTREACHED(); }
void RestoreProgramBindings() const override { NOTREACHED(); }
- void RestoreTextureState(unsigned service_id) const override { NOTREACHED(); }
+ void RestoreTextureState(unsigned service_id) override { NOTREACHED(); }
void RestoreTextureUnitBindings(unsigned unit) const override {
NOTREACHED();
}
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
index 911e9171dc6..b551da60f9d 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
@@ -12,9 +12,10 @@
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/core/SkSurfaceProps.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
@@ -30,10 +31,10 @@ namespace {
class WrappedSkImage : public SharedImageBacking {
public:
~WrappedSkImage() override {
- DCHECK(context_state_->context_lost ||
- context_state_->context->IsCurrent(nullptr));
- if (!context_state_->context_lost)
- context_state_->need_context_state_reset = true;
+ DCHECK(context_state_->context_lost() ||
+ context_state_->IsCurrent(nullptr));
+ if (!context_state_->context_lost())
+ context_state_->set_need_context_state_reset(true);
}
// SharedImageBacking implementation.
@@ -70,22 +71,18 @@ class WrappedSkImage : public SharedImageBacking {
sk_sp<SkSurface> GetSkSurface(int final_msaa_count,
SkColorType color_type,
const SkSurfaceProps& surface_props) {
- if (context_state_->context_lost)
+ if (context_state_->context_lost())
return nullptr;
- DCHECK(context_state_->context->IsCurrent(nullptr));
+ DCHECK(context_state_->IsCurrent(nullptr));
GrBackendTexture gr_texture =
image_->getBackendTexture(/*flushPendingGrContextIO=*/true);
DCHECK(gr_texture.isValid());
return SkSurface::MakeFromBackendTextureAsRenderTarget(
- context_state_->gr_context, gr_texture, kTopLeft_GrSurfaceOrigin,
+ context_state_->gr_context(), gr_texture, kTopLeft_GrSurfaceOrigin,
final_msaa_count, color_type, /*colorSpace=*/nullptr, &surface_props);
}
- bool GetGrBackendTexture(GrBackendTexture* gr_texture) const {
- context_state_->need_context_state_reset = true;
- *gr_texture = image_->getBackendTexture(/*flushPendingGrContextIO=*/true);
- return gr_texture->isValid();
- }
+ sk_sp<SkPromiseImageTexture> promise_texture() { return promise_texture_; }
protected:
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
@@ -101,7 +98,7 @@ class WrappedSkImage : public SharedImageBacking {
const gfx::ColorSpace& color_space,
uint32_t usage,
size_t estimated_size,
- raster::RasterDecoderContextState* context_state)
+ SharedContextState* context_state)
: SharedImageBacking(mailbox,
format,
size,
@@ -112,19 +109,33 @@ class WrappedSkImage : public SharedImageBacking {
DCHECK(!!context_state_);
}
- bool Initialize(const SkImageInfo& info) {
- if (context_state_->context_lost)
+ bool Initialize(const SkImageInfo& info, base::span<const uint8_t> data) {
+ if (context_state_->context_lost())
return false;
- DCHECK(context_state_->context->IsCurrent(nullptr));
+ DCHECK(context_state_->IsCurrent(nullptr));
- context_state_->need_context_state_reset = true;
+ context_state_->set_need_context_state_reset(true);
- auto surface = SkSurface::MakeRenderTarget(context_state_->gr_context,
- SkBudgeted::kNo, info);
- if (!surface)
- return false;
+ if (data.empty()) {
+ auto surface = SkSurface::MakeRenderTarget(context_state_->gr_context(),
+ SkBudgeted::kNo, info);
+ if (!surface)
+ return false;
+
+ image_ = surface->makeImageSnapshot();
+ } else {
+ SkBitmap bitmap;
+ if (!bitmap.installPixels(info, const_cast<uint8_t*>(data.data()),
+ info.minRowBytes())) {
+ return false;
+ }
+ sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
+ if (!image)
+ return false;
+ image_ = image->makeTextureImage(context_state_->gr_context(),
+ image->colorSpace());
+ }
- image_ = surface->makeImageSnapshot();
if (!image_ || !image_->isTextureBacked())
return false;
@@ -132,6 +143,7 @@ class WrappedSkImage : public SharedImageBacking {
image_->getBackendTexture(/*flushPendingGrContextIO=*/false);
if (!gr_texture.isValid())
return false;
+ promise_texture_ = SkPromiseImageTexture::Make(gr_texture);
switch (gr_texture.backend()) {
case GrBackendApi::kOpenGL: {
@@ -153,9 +165,11 @@ class WrappedSkImage : public SharedImageBacking {
return true;
}
- RasterDecoderContextState* const context_state_;
+ SharedContextState* const context_state_;
sk_sp<SkImage> image_;
+ sk_sp<SkPromiseImageTexture> promise_texture_;
+
bool cleared_ = false;
uint64_t tracing_id_ = 0;
@@ -175,10 +189,12 @@ class WrappedSkImageRepresentation : public SharedImageRepresentationSkia {
sk_sp<SkSurface> BeginWriteAccess(
GrContext* gr_context,
int final_msaa_count,
- SkColorType color_type,
const SkSurfaceProps& surface_props) override {
- auto surface = wrapped_sk_image()->GetSkSurface(final_msaa_count,
- color_type, surface_props);
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format());
+
+ auto surface = wrapped_sk_image()->GetSkSurface(
+ final_msaa_count, sk_color_type, surface_props);
write_surface_ = surface.get();
return surface;
}
@@ -189,11 +205,8 @@ class WrappedSkImageRepresentation : public SharedImageRepresentationSkia {
write_surface_ = nullptr;
}
- bool BeginReadAccess(SkColorType color_type,
- GrBackendTexture* backend_texture) override {
- if (!wrapped_sk_image()->GetGrBackendTexture(backend_texture))
- return false;
- return true;
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(SkSurface* sk_surface) override {
+ return wrapped_sk_image()->promise_texture();
}
void EndReadAccess() override {
@@ -210,8 +223,7 @@ class WrappedSkImageRepresentation : public SharedImageRepresentationSkia {
} // namespace
-WrappedSkImageFactory::WrappedSkImageFactory(
- RasterDecoderContextState* context_state)
+WrappedSkImageFactory::WrappedSkImageFactory(SharedContextState* context_state)
: context_state_(context_state) {}
WrappedSkImageFactory::~WrappedSkImageFactory() = default;
@@ -222,6 +234,17 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
+ return CreateSharedImage(mailbox, format, size, color_space, usage,
+ base::span<uint8_t>());
+}
+
+std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> data) {
auto info = SkImageInfo::Make(size.width(), size.height(),
ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format),
@@ -230,7 +253,7 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
std::unique_ptr<WrappedSkImage> texture(
new WrappedSkImage(mailbox, format, size, color_space, usage,
estimated_size, context_state_));
- if (!texture->Initialize(info))
+ if (!texture->Initialize(info, data))
return nullptr;
return texture;
}
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.h b/chromium/gpu/command_buffer/service/wrapped_sk_image.h
index 02458e51710..ca108bc35c0 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.h
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.h
@@ -16,14 +16,15 @@
#include "ui/gfx/geometry/size.h"
namespace gpu {
-namespace raster {
-struct RasterDecoderContextState;
+class SharedContextState;
+
+namespace raster {
class GPU_GLES2_EXPORT WrappedSkImageFactory
: public gpu::SharedImageBackingFactory {
public:
- explicit WrappedSkImageFactory(RasterDecoderContextState* context_state);
+ explicit WrappedSkImageFactory(SharedContextState* context_state);
~WrappedSkImageFactory() override;
// SharedImageBackingFactory implementation:
@@ -35,6 +36,13 @@ class GPU_GLES2_EXPORT WrappedSkImageFactory
uint32_t usage) override;
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
int client_id,
gfx::GpuMemoryBufferHandle handle,
gfx::BufferFormat format,
@@ -44,7 +52,7 @@ class GPU_GLES2_EXPORT WrappedSkImageFactory
uint32_t usage) override;
private:
- RasterDecoderContextState* const context_state_;
+ SharedContextState* const context_state_;
DISALLOW_COPY_AND_ASSIGN(WrappedSkImageFactory);
};
diff --git a/chromium/gpu/config/BUILD.gn b/chromium/gpu/config/BUILD.gn
index 495a9eef9f7..70df68296d4 100644
--- a/chromium/gpu/config/BUILD.gn
+++ b/chromium/gpu/config/BUILD.gn
@@ -157,7 +157,7 @@ jumbo_source_set("config_sources") {
"//media:media_buildflags",
"//third_party/re2",
"//ui/gl",
- "//ui/gl:gl_features",
+ "//ui/gl:buildflags",
"//ui/gl/init",
]
diff --git a/chromium/gpu/config/gpu_control_list.cc b/chromium/gpu/config/gpu_control_list.cc
index b3c3456e0db..4e28d955d44 100644
--- a/chromium/gpu/config/gpu_control_list.cc
+++ b/chromium/gpu/config/gpu_control_list.cc
@@ -310,6 +310,22 @@ bool GpuControlList::More::Contains(const GPUInfo& gpu_info) const {
!pixel_shader_version.Contains(gpu_info.pixel_shader_version)) {
return false;
}
+ switch (hardware_overlay) {
+ case kDontCare:
+ break;
+ case kSupported:
+#if defined(OS_WIN)
+ if (!gpu_info.supports_overlays)
+ return false;
+#endif // OS_WIN
+ break;
+ case kUnsupported:
+#if defined(OS_WIN)
+ if (gpu_info.supports_overlays)
+ return false;
+#endif // OS_WIN
+ break;
+ }
return true;
}
diff --git a/chromium/gpu/config/gpu_control_list.h b/chromium/gpu/config/gpu_control_list.h
index 64ac36f2ea5..e84b9625f44 100644
--- a/chromium/gpu/config/gpu_control_list.h
+++ b/chromium/gpu/config/gpu_control_list.h
@@ -9,9 +9,9 @@
#include <set>
#include <string>
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/values.h"
#include "gpu/config/gpu_info.h"
#include "gpu/gpu_export.h"
@@ -22,7 +22,7 @@ struct GPUInfo;
class GPU_EXPORT GpuControlList {
public:
- typedef base::hash_map<int, std::string> FeatureMap;
+ typedef std::unordered_map<int, std::string> FeatureMap;
enum OsType {
kOsLinux,
@@ -85,6 +85,12 @@ class GPU_EXPORT GpuControlList {
kVersionStyleUnknown
};
+ enum SupportedOrNot {
+ kSupported,
+ kUnsupported,
+ kDontCare,
+ };
+
struct GPU_EXPORT Version {
NumericOp op;
VersionStyle style;
@@ -149,6 +155,7 @@ class GPU_EXPORT GpuControlList {
uint32_t gl_reset_notification_strategy;
bool direct_rendering;
Version gpu_count;
+ SupportedOrNot hardware_overlay;
uint32_t test_group;
diff --git a/chromium/gpu/config/gpu_control_list_entry_unittest.cc b/chromium/gpu/config/gpu_control_list_entry_unittest.cc
index 1d1b47c9128..6214f409816 100644
--- a/chromium/gpu/config/gpu_control_list_entry_unittest.cc
+++ b/chromium/gpu/config/gpu_control_list_entry_unittest.cc
@@ -4,6 +4,8 @@
#include <stddef.h>
+#include "base/stl_util.h"
+#include "build/build_config.h"
#include "gpu/config/gpu_control_list.h"
#include "gpu/config/gpu_control_list_testing_data.h"
#include "gpu/config/gpu_info.h"
@@ -86,7 +88,7 @@ TEST_F(GpuControlListEntryTest, VendorOnAllOsEntry) {
EXPECT_EQ(kOsAny, entry.conditions.os_type);
const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsLinux,
kOsChromeOS, kOsAndroid};
- for (size_t i = 0; i < arraysize(os_type); ++i)
+ for (size_t i = 0; i < base::size(os_type); ++i)
EXPECT_TRUE(entry.Contains(os_type[i], "10.6", gpu_info()));
}
@@ -95,7 +97,7 @@ TEST_F(GpuControlListEntryTest, VendorOnLinuxEntry) {
EXPECT_EQ(kOsLinux, entry.conditions.os_type);
const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsChromeOS,
kOsAndroid};
- for (size_t i = 0; i < arraysize(os_type); ++i)
+ for (size_t i = 0; i < base::size(os_type); ++i)
EXPECT_FALSE(entry.Contains(os_type[i], "10.6", gpu_info()));
EXPECT_TRUE(entry.Contains(kOsLinux, "10.6", gpu_info()));
}
@@ -106,7 +108,7 @@ TEST_F(GpuControlListEntryTest, AllExceptNVidiaOnLinuxEntry) {
EXPECT_EQ(kOsLinux, entry.conditions.os_type);
const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsLinux,
kOsChromeOS, kOsAndroid};
- for (size_t i = 0; i < arraysize(os_type); ++i)
+ for (size_t i = 0; i < base::size(os_type); ++i)
EXPECT_FALSE(entry.Contains(os_type[i], "10.6", gpu_info()));
}
@@ -116,7 +118,7 @@ TEST_F(GpuControlListEntryTest, AllExceptIntelOnLinuxEntry) {
EXPECT_EQ(kOsLinux, entry.conditions.os_type);
const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsChromeOS,
kOsAndroid};
- for (size_t i = 0; i < arraysize(os_type); ++i)
+ for (size_t i = 0; i < base::size(os_type); ++i)
EXPECT_FALSE(entry.Contains(os_type[i], "10.6", gpu_info()));
EXPECT_TRUE(entry.Contains(kOsLinux, "10.6", gpu_info()));
}
@@ -138,7 +140,7 @@ TEST_F(GpuControlListEntryTest, MultipleDevicesEntry) {
EXPECT_EQ(kOsAny, entry.conditions.os_type);
const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsLinux,
kOsChromeOS, kOsAndroid};
- for (size_t i = 0; i < arraysize(os_type); ++i)
+ for (size_t i = 0; i < base::size(os_type); ++i)
EXPECT_TRUE(entry.Contains(os_type[i], "10.6", gpu_info()));
}
@@ -147,7 +149,7 @@ TEST_F(GpuControlListEntryTest, ChromeOSEntry) {
EXPECT_EQ(kOsChromeOS, entry.conditions.os_type);
const GpuControlList::OsType os_type[] = {kOsMacosx, kOsWin, kOsLinux,
kOsAndroid};
- for (size_t i = 0; i < arraysize(os_type); ++i)
+ for (size_t i = 0; i < base::size(os_type); ++i)
EXPECT_FALSE(entry.Contains(os_type[i], "10.6", gpu_info()));
EXPECT_TRUE(entry.Contains(kOsChromeOS, "10.6", gpu_info()));
}
@@ -547,7 +549,7 @@ TEST_F(GpuControlListEntryTest, OsComparison) {
const Entry& entry = GetEntry(kGpuControlListEntryTest_OsComparisonAny);
const GpuControlList::OsType os_type[] = {kOsWin, kOsLinux, kOsMacosx,
kOsChromeOS, kOsAndroid};
- for (size_t i = 0; i < arraysize(os_type); ++i) {
+ for (size_t i = 0; i < base::size(os_type); ++i) {
EXPECT_TRUE(entry.Contains(os_type[i], std::string(), gpu_info()));
EXPECT_TRUE(entry.Contains(os_type[i], "7.8", gpu_info()));
}
@@ -868,4 +870,17 @@ TEST_F(GpuControlListEntryTest, MultipleDrivers) {
EXPECT_TRUE(entry.Contains(kOsWin, "10.0", gpu_info));
}
+#if defined(OS_WIN)
+TEST_F(GpuControlListEntryTest, HardwareOverlay) {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_HardwareOverlay);
+ GPUInfo gpu_info;
+ gpu_info.gpu.vendor_id = 0x8086;
+ gpu_info.supports_overlays = true;
+ EXPECT_FALSE(entry.Contains(kOsWin, "10.0", gpu_info));
+
+ gpu_info.supports_overlays = false;
+ EXPECT_TRUE(entry.Contains(kOsWin, "10.0", gpu_info));
+}
+#endif // OS_WIN
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_format.txt b/chromium/gpu/config/gpu_control_list_format.txt
index 1cfd6522896..e9ff9a1b84e 100644
--- a/chromium/gpu/config/gpu_control_list_format.txt
+++ b/chromium/gpu/config/gpu_control_list_format.txt
@@ -73,6 +73,10 @@
// "intel_haswell", "intel_cherryview", "intel_broadwell",
// "intel_apollolake", "intel_skylake", "intel_geminilake",
// "intel_kabylake", "intel_coffeelake".
+// 30. "hardware_overlay" is either "supported" or "unsupported". Currently it
+// only applies on Windows where hardware overlays may be supported on
+// certain Intel GPUs. By default it's "dont_care" and there is no need to
+// specify that.
//
// VERSION includes "op", "style", "value", and "value2". "op" can be any of
// the following values: "=", "<", "<=", ">", ">=", "any", "between". "style"
diff --git a/chromium/gpu/config/gpu_control_list_testing.json b/chromium/gpu/config/gpu_control_list_testing.json
index 114173c9c78..3c4cd7764dd 100644
--- a/chromium/gpu/config/gpu_control_list_testing.json
+++ b/chromium/gpu/config/gpu_control_list_testing.json
@@ -814,6 +814,15 @@
"features": [
"test_feature_0"
]
+ },
+ {
+ "id": 68,
+ "description": "GpuControlListEntryTest.HardwareOverlay",
+ "vendor_id": "0x8086",
+ "hardware_overlay": "unsupported",
+ "features": [
+ "test_feature_0"
+ ]
}
]
}
diff --git a/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
index 96573345737..f543d980110 100644
--- a/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
+++ b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
@@ -19,11 +19,13 @@ const int kFeatureListForGpuControlTestingEntry1[1] = {
};
const char* const kDisabledExtensionsForEntry1[2] = {
- "test_extension1", "test_extension2",
+ "test_extension1",
+ "test_extension2",
};
const uint32_t kCrBugsForGpuControlTestingEntry1[2] = {
- 1024, 678,
+ 1024,
+ 678,
};
const uint32_t kDeviceIDsForGpuControlTestingEntry1[1] = {
@@ -71,7 +73,8 @@ const int kFeatureListForGpuControlTestingEntry7[1] = {
};
const uint32_t kDeviceIDsForGpuControlTestingEntry7[2] = {
- 0x1023, 0x0640,
+ 0x1023,
+ 0x0640,
};
const int kFeatureListForGpuControlTestingEntry8[1] = {
@@ -92,8 +95,9 @@ const GpuControlList::More kMoreForEntry9 = {
0, // gl_reset_notification_strategy
true, // direct_rendering
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
- nullptr}, // gpu_count
- 0, // test_group
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
};
const int kFeatureListForGpuControlTestingEntry10[1] = {
@@ -110,8 +114,9 @@ const GpuControlList::More kMoreForEntry10 = {
0, // gl_reset_notification_strategy
true, // direct_rendering
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
- nullptr}, // gpu_count
- 0, // test_group
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
};
const int kFeatureListForGpuControlTestingEntry11[1] = {
@@ -128,8 +133,9 @@ const GpuControlList::More kMoreForEntry11 = {
0, // gl_reset_notification_strategy
true, // direct_rendering
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
- nullptr}, // gpu_count
- 0, // test_group
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
};
const int kFeatureListForGpuControlTestingEntry12[1] = {
@@ -137,7 +143,10 @@ const int kFeatureListForGpuControlTestingEntry12[1] = {
};
const GpuControlList::GLStrings kGLStringsForGpuControlTestingEntry12 = {
- "NVIDIA", nullptr, nullptr, nullptr,
+ "NVIDIA",
+ nullptr,
+ nullptr,
+ nullptr,
};
const int kFeatureListForGpuControlTestingEntry13[1] = {
@@ -145,7 +154,10 @@ const int kFeatureListForGpuControlTestingEntry13[1] = {
};
const GpuControlList::GLStrings kGLStringsForGpuControlTestingEntry13 = {
- "X\\.Org.*", nullptr, nullptr, nullptr,
+ "X\\.Org.*",
+ nullptr,
+ nullptr,
+ nullptr,
};
const int kFeatureListForGpuControlTestingEntry14[1] = {
@@ -153,7 +165,10 @@ const int kFeatureListForGpuControlTestingEntry14[1] = {
};
const GpuControlList::GLStrings kGLStringsForGpuControlTestingEntry14 = {
- nullptr, ".*GeForce.*", nullptr, nullptr,
+ nullptr,
+ ".*GeForce.*",
+ nullptr,
+ nullptr,
};
const int kFeatureListForGpuControlTestingEntry15[1] = {
@@ -161,7 +176,10 @@ const int kFeatureListForGpuControlTestingEntry15[1] = {
};
const GpuControlList::GLStrings kGLStringsForGpuControlTestingEntry15 = {
- nullptr, "(?i).*software.*", nullptr, nullptr,
+ nullptr,
+ "(?i).*software.*",
+ nullptr,
+ nullptr,
};
const int kFeatureListForGpuControlTestingEntry16[1] = {
@@ -169,7 +187,10 @@ const int kFeatureListForGpuControlTestingEntry16[1] = {
};
const GpuControlList::GLStrings kGLStringsForGpuControlTestingEntry16 = {
- nullptr, nullptr, ".*GL_SUN_slice_accum", nullptr,
+ nullptr,
+ nullptr,
+ ".*GL_SUN_slice_accum",
+ nullptr,
};
const int kFeatureListForGpuControlTestingEntry17[1] = {
@@ -222,7 +243,10 @@ const int kFeatureListForGpuControlTestingEntry22[1] = {
const GpuControlList::GLStrings
kGLStringsForGpuControlTestingEntry22Exception0 = {
- nullptr, ".*mesa.*", nullptr, nullptr,
+ nullptr,
+ ".*mesa.*",
+ nullptr,
+ nullptr,
};
const int kFeatureListForGpuControlTestingEntry23[1] = {
@@ -239,16 +263,20 @@ const GpuControlList::More kMoreForEntry23 = {
0, // gl_reset_notification_strategy
true, // direct_rendering
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
- nullptr}, // gpu_count
- 0, // test_group
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
};
const int kFeatureListForGpuControlTestingEntry24[3] = {
- TEST_FEATURE_0, TEST_FEATURE_1, TEST_FEATURE_2,
+ TEST_FEATURE_0,
+ TEST_FEATURE_1,
+ TEST_FEATURE_2,
};
const int kFeatureListForGpuControlTestingEntry25[2] = {
- TEST_FEATURE_1, TEST_FEATURE_2,
+ TEST_FEATURE_1,
+ TEST_FEATURE_2,
};
const int kFeatureListForGpuControlTestingEntry26[1] = {
@@ -264,7 +292,10 @@ const int kFeatureListForGpuControlTestingEntry27[1] = {
};
const char* const kMachineModelNameForEntry27[4] = {
- "Nexus 4", "XT1032", "GT-.*", "SCH-.*",
+ "Nexus 4",
+ "XT1032",
+ "GT-.*",
+ "SCH-.*",
};
const GpuControlList::MachineModelInfo kMachineModelInfoForEntry27 = {
@@ -372,7 +403,8 @@ const int kFeatureListForGpuControlTestingEntry36[1] = {
};
const uint32_t kDeviceIDsForGpuControlTestingEntry36[2] = {
- 0x0166, 0x0168,
+ 0x0166,
+ 0x0168,
};
const int kFeatureListForGpuControlTestingEntry37[1] = {
@@ -405,8 +437,9 @@ const GpuControlList::More kMoreForEntry40 = {
0, // gl_reset_notification_strategy
true, // direct_rendering
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
- nullptr}, // gpu_count
- 0, // test_group
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
};
const int kFeatureListForGpuControlTestingEntry41[1] = {
@@ -477,8 +510,9 @@ const GpuControlList::More kMoreForEntry48 = {
0, // gl_reset_notification_strategy
true, // direct_rendering
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
- nullptr}, // gpu_count
- 0, // test_group
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
};
const int kFeatureListForGpuControlTestingEntry49[1] = {
@@ -515,7 +549,10 @@ const int kFeatureListForGpuControlTestingEntry52[1] = {
const GpuControlList::GLStrings
kGLStringsForGpuControlTestingEntry52Exception0 = {
- nullptr, ".*mesa.*", nullptr, nullptr,
+ nullptr,
+ ".*mesa.*",
+ nullptr,
+ nullptr,
};
const int kFeatureListForGpuControlTestingEntry53[1] = {
@@ -535,11 +572,13 @@ const GpuControlList::DriverInfo kDriverInfoForGpuControlTestingEntry54 = {
};
const char* const kDisabledExtensionsForEntry55[2] = {
- "test_extension2", "test_extension1",
+ "test_extension2",
+ "test_extension1",
};
const char* const kDisabledExtensionsForEntry56[2] = {
- "test_extension3", "test_extension2",
+ "test_extension3",
+ "test_extension2",
};
const int kFeatureListForGpuControlTestingEntry57[1] = {
@@ -556,8 +595,9 @@ const GpuControlList::More kMoreForEntry57 = {
0, // gl_reset_notification_strategy
false, // direct_rendering
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
- nullptr}, // gpu_count
- 0, // test_group
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
};
const int kFeatureListForGpuControlTestingEntry58[1] = {
@@ -578,8 +618,9 @@ const GpuControlList::More kMoreForEntry59 = {
0, // gl_reset_notification_strategy
true, // direct_rendering
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
- nullptr}, // gpu_count
- 1, // test_group
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 1, // test_group
};
const int kFeatureListForGpuControlTestingEntry60[1] = {
@@ -596,8 +637,9 @@ const GpuControlList::More kMoreForEntry60 = {
0, // gl_reset_notification_strategy
true, // direct_rendering
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
- nullptr}, // gpu_count
- 2, // test_group
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 2, // test_group
};
const int kFeatureListForGpuControlTestingEntry61[1] = {
@@ -605,7 +647,8 @@ const int kFeatureListForGpuControlTestingEntry61[1] = {
};
const GpuSeriesType kGpuSeriesForEntry61[2] = {
- GpuSeriesType::kIntelSkyLake, GpuSeriesType::kIntelKabyLake,
+ GpuSeriesType::kIntelSkyLake,
+ GpuSeriesType::kIntelKabyLake,
};
const int kFeatureListForGpuControlTestingEntry62[1] = {
@@ -660,6 +703,25 @@ const GpuControlList::DriverInfo kDriverInfoForGpuControlTestingEntry67 = {
nullptr}, // driver_date
};
+const int kFeatureListForGpuControlTestingEntry68[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::More kMoreForEntry68 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ true, // direct_rendering
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+ GpuControlList::kUnsupported, // hardware_overlay
+ 0, // test_group
+};
+
} // namespace gpu
#endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_ARRAYS_AND_STRUCTS_AUTOGEN_H_
diff --git a/chromium/gpu/config/gpu_control_list_testing_autogen.cc b/chromium/gpu/config/gpu_control_list_testing_autogen.cc
index ad00d6ac5a2..7601e14f721 100644
--- a/chromium/gpu/config/gpu_control_list_testing_autogen.cc
+++ b/chromium/gpu/config/gpu_control_list_testing_autogen.cc
@@ -2036,6 +2036,36 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
0, // exceptions count
nullptr, // exceptions
},
+ {
+ 68, // id
+ "GpuControlListEntryTest.HardwareOverlay",
+ base::size(kFeatureListForGpuControlTestingEntry68), // features size
+ kFeatureListForGpuControlTestingEntry68, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // DisabledWebGLExtensions size
+ nullptr, // DisabledWebGLExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // gpu_series size
+ nullptr, // gpu_series
+ &kMoreForEntry68, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
};
-const size_t kGpuControlListTestingEntryCount = 67;
+const size_t kGpuControlListTestingEntryCount = 68;
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
index d896d1ad1ba..ea120e72f99 100644
--- a/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
+++ b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
@@ -80,6 +80,7 @@ enum GpuControlListTestingEntryEnum {
kGpuControlListEntryTest_GpuSeriesSecondary = 64,
kGpuControlListEntryTest_GpuSeriesInException = 65,
kGpuControlListEntryTest_MultipleDrivers = 66,
+ kGpuControlListEntryTest_HardwareOverlay = 67,
};
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_driver_bug_list.cc b/chromium/gpu/config/gpu_driver_bug_list.cc
index 6e828f7e3a9..af5331d274c 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.cc
+++ b/chromium/gpu/config/gpu_driver_bug_list.cc
@@ -5,6 +5,7 @@
#include "gpu/config/gpu_driver_bug_list.h"
#include "base/logging.h"
+#include "base/stl_util.h"
#include "gpu/config/gpu_driver_bug_list_autogen.h"
#include "gpu/config/gpu_driver_bug_workaround_type.h"
#include "gpu/config/gpu_switches.h"
@@ -44,7 +45,7 @@ std::unique_ptr<GpuDriverBugList> GpuDriverBugList::Create(
const GpuControlListData& data) {
std::unique_ptr<GpuDriverBugList> list(new GpuDriverBugList(data));
- DCHECK_EQ(static_cast<int>(arraysize(kFeatureList)),
+ DCHECK_EQ(static_cast<int>(base::size(kFeatureList)),
NUMBER_OF_GPU_DRIVER_BUG_WORKAROUND_TYPES);
for (int i = 0; i < NUMBER_OF_GPU_DRIVER_BUG_WORKAROUND_TYPES; ++i) {
list->AddSupportedFeature(kFeatureList[i].name,
diff --git a/chromium/gpu/config/gpu_driver_bug_list.json b/chromium/gpu/config/gpu_driver_bug_list.json
index 17f109e36c2..2ac3cf79c5e 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.json
+++ b/chromium/gpu/config/gpu_driver_bug_list.json
@@ -217,22 +217,6 @@
]
},
{
- "id": 36,
- "cr_bugs": [163464, 233612],
- "description": "Share-group related crashes on Qualcomm drivers",
- "os": {
- "type": "android",
- "version": {
- "op": "<",
- "value": "4.3"
- }
- },
- "gl_vendor": "Qualcomm.*",
- "features": [
- "use_virtualized_gl_contexts"
- ]
- },
- {
"id": 37,
"cr_bugs": [286468],
"description": "Program link fails in NVIDIA Linux if gl_Position is not set",
@@ -247,7 +231,7 @@
},
{
"id": 38,
- "cr_bugs": [289461],
+ "cr_bugs": [163464, 233612, 289461],
"description": "Non-virtual contexts on Qualcomm sometimes cause out-of-order frames",
"os": {
"type": "android"
@@ -2419,7 +2403,7 @@
{
"id": 233,
"description": "Delayed copy NV12 displays incorrect colors on NVIDIA drivers.",
- "cr_bugs": [728670],
+ "cr_bugs": [727216],
"os": {
"type": "win"
},
@@ -3042,6 +3026,47 @@
"features": [
"exit_on_context_lost"
]
+ },
+ {
+ "id": 284,
+ "cr_bugs": [913301],
+ "description": "Clamp texture's BASE_LEVEL/MAX_LEVEL for GenerateMipmap",
+ "os": {
+ "type": "macosx"
+ },
+ "features": [
+ "clamp_texture_base_level_and_max_level"
+ ]
+ },
+ {
+ "id": 285,
+ "cr_bugs": [914976],
+ "description": "Context flush ordering doesn't seem to work on AMD",
+ "vendor_id": "0x1002",
+ "os": {
+ "type": "linux"
+ },
+ "features": [
+ "use_virtualized_gl_contexts"
+ ]
+ },
+ {
+ "id": 286,
+ "cr_bugs": [908069],
+ "description": "Video corruption on Intel HD 530 without hardware overlay support",
+ "vendor_id": "0x8086",
+ "device_id": ["0x1912"],
+ "os": {
+ "type": "win"
+ },
+ "driver_version": {
+ "op": ">=",
+ "value": "24"
+ },
+ "hardware_overlay": "unsupported",
+ "features": [
+ "disable_direct_composition"
+ ]
}
]
}
diff --git a/chromium/gpu/config/gpu_dx_diagnostics_win.cc b/chromium/gpu/config/gpu_dx_diagnostics_win.cc
index cd2396094c8..9e85f16c96e 100644
--- a/chromium/gpu/config/gpu_dx_diagnostics_win.cc
+++ b/chromium/gpu/config/gpu_dx_diagnostics_win.cc
@@ -9,6 +9,7 @@
#include <dxdiag.h>
#include <windows.h>
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/utf_string_conversions.h"
#include "base/win/com_init_util.h"
@@ -34,7 +35,7 @@ void RecurseDiagnosticTree(DxDiagNode* output,
if (SUCCEEDED(hr)) {
for (DWORD i = 0; i < prop_count; i++) {
WCHAR prop_name16[256];
- hr = container->EnumPropNames(i, prop_name16, arraysize(prop_name16));
+ hr = container->EnumPropNames(i, prop_name16, base::size(prop_name16));
if (SUCCEEDED(hr)) {
std::string prop_name8 = base::WideToUTF8(prop_name16);
@@ -70,9 +71,8 @@ void RecurseDiagnosticTree(DxDiagNode* output,
if (SUCCEEDED(hr)) {
for (DWORD i = 0; i < child_count; i++) {
WCHAR child_name16[256];
- hr = container->EnumChildContainerNames(i,
- child_name16,
- arraysize(child_name16));
+ hr = container->EnumChildContainerNames(i, child_name16,
+ base::size(child_name16));
if (SUCCEEDED(hr)) {
std::string child_name8 = base::WideToUTF8(child_name16);
DxDiagNode* output_child = &output->children[child_name8];
diff --git a/chromium/gpu/config/gpu_finch_features.cc b/chromium/gpu/config/gpu_finch_features.cc
index 84efe199beb..a8748066ea1 100644
--- a/chromium/gpu/config/gpu_finch_features.cc
+++ b/chromium/gpu/config/gpu_finch_features.cc
@@ -56,6 +56,11 @@ const base::Feature kUseDCOverlaysForSoftwareProtectedVideo{
"UseDCOverlaysForSoftwareProtectedVideo",
base::FEATURE_DISABLED_BY_DEFAULT};
+// Use decode swap chain created from compatible video decoder buffers.
+const base::Feature kDirectCompositionUseNV12DecodeSwapChain{
+ "DirectCompositionUseNV12DecodeSwapChain",
+ base::FEATURE_DISABLED_BY_DEFAULT};
+
// Controls the decode acceleration of JPEG images (as opposed to camera
// captures) in Chrome OS using the VA-API.
// TODO(andrescj): remove or enable by default in Chrome OS once
diff --git a/chromium/gpu/config/gpu_finch_features.h b/chromium/gpu/config/gpu_finch_features.h
index 2c70110d63d..833ec88c081 100644
--- a/chromium/gpu/config/gpu_finch_features.h
+++ b/chromium/gpu/config/gpu_finch_features.h
@@ -33,6 +33,8 @@ GPU_EXPORT extern const base::Feature kSharedImageManager;
GPU_EXPORT extern const base::Feature kUseDCOverlaysForSoftwareProtectedVideo;
+GPU_EXPORT extern const base::Feature kDirectCompositionUseNV12DecodeSwapChain;
+
GPU_EXPORT extern const base::Feature kVaapiJpegImageDecodeAcceleration;
} // namespace features
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index af06601ebde..c47e7f39393 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -14,6 +14,7 @@
#include "base/command_line.h"
#include "base/logging.h"
#include "base/metrics/histogram_functions.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
@@ -294,16 +295,16 @@ void IdentifyActiveGPU(GPUInfo* gpu_info) {
uint32_t active_vendor_id = 0;
if (!gpu_info->gl_vendor.empty()) {
std::string gl_vendor_lower = base::ToLowerASCII(gpu_info->gl_vendor);
- int index = StringContainsName(
- gl_vendor_lower, kVendorNames, arraysize(kVendorNames));
+ int index = StringContainsName(gl_vendor_lower, kVendorNames,
+ base::size(kVendorNames));
if (index >= 0) {
active_vendor_id = kVendorIDs[index];
}
}
if (active_vendor_id == 0 && !gpu_info->gl_renderer.empty()) {
std::string gl_renderer_lower = base::ToLowerASCII(gpu_info->gl_renderer);
- int index = StringContainsName(
- gl_renderer_lower, kVendorNames, arraysize(kVendorNames));
+ int index = StringContainsName(gl_renderer_lower, kVendorNames,
+ base::size(kVendorNames));
if (index >= 0) {
active_vendor_id = kVendorIDs[index];
}
diff --git a/chromium/gpu/config/gpu_lists_version.h b/chromium/gpu/config/gpu_lists_version.h
index f6df02870fc..545fc54a310 100644
--- a/chromium/gpu/config/gpu_lists_version.h
+++ b/chromium/gpu/config/gpu_lists_version.h
@@ -3,6 +3,6 @@
#ifndef GPU_CONFIG_GPU_LISTS_VERSION_H_
#define GPU_CONFIG_GPU_LISTS_VERSION_H_
-#define GPU_LISTS_VERSION "38c6381358a1ec7a9fc161a038fb9d7407334aa4"
+#define GPU_LISTS_VERSION "8cd51bd20244a019d86ed5092017a118e2ad962a"
#endif // GPU_CONFIG_GPU_LISTS_VERSION_H_
diff --git a/chromium/gpu/config/gpu_preferences.h b/chromium/gpu/config/gpu_preferences.h
index 1d5d6c0cbd6..96f525f0624 100644
--- a/chromium/gpu/config/gpu_preferences.h
+++ b/chromium/gpu/config/gpu_preferences.h
@@ -101,6 +101,9 @@ struct GPU_EXPORT GpuPreferences {
bool log_gpu_control_list_decisions = false;
+ // Enable exporting of events to ETW (on Windows).
+ bool enable_trace_export_events_to_etw = false;
+
// ===================================
// Settings from //gpu/command_buffer/service/gpu_switches.cc
@@ -202,6 +205,7 @@ struct GPU_EXPORT GpuPreferences {
bool enable_oop_rasterization_ddl = false;
bool enable_raster_to_sk_image = false;
+ bool enable_passthrough_raster_decoder = false;
// Start the watchdog suspended, as the app is already backgrounded and won't
// send a background/suspend signal.
diff --git a/chromium/gpu/config/gpu_preferences_unittest.cc b/chromium/gpu/config/gpu_preferences_unittest.cc
index d31ea56d87f..260ef9ce36f 100644
--- a/chromium/gpu/config/gpu_preferences_unittest.cc
+++ b/chromium/gpu/config/gpu_preferences_unittest.cc
@@ -35,6 +35,8 @@ void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
right.disable_software_rasterizer);
EXPECT_EQ(left.log_gpu_control_list_decisions,
right.log_gpu_control_list_decisions);
+ EXPECT_EQ(left.enable_trace_export_events_to_etw,
+ right.enable_trace_export_events_to_etw);
EXPECT_EQ(left.compile_shader_always_succeeds,
right.compile_shader_always_succeeds);
EXPECT_EQ(left.disable_gl_error_limit, right.disable_gl_error_limit);
@@ -125,6 +127,7 @@ TEST(GpuPreferencesTest, EncodeDecode) {
GPU_PREFERENCES_FIELD(enable_media_foundation_vea_on_windows7, true)
GPU_PREFERENCES_FIELD(disable_software_rasterizer, true)
GPU_PREFERENCES_FIELD(log_gpu_control_list_decisions, true)
+ GPU_PREFERENCES_FIELD(enable_trace_export_events_to_etw, true)
GPU_PREFERENCES_FIELD(compile_shader_always_succeeds, true)
GPU_PREFERENCES_FIELD(disable_gl_error_limit, true)
GPU_PREFERENCES_FIELD(disable_glsl_translator, true)
diff --git a/chromium/gpu/config/gpu_test_config.cc b/chromium/gpu/config/gpu_test_config.cc
index a3389df6fc5..dea06bd2be6 100644
--- a/chromium/gpu/config/gpu_test_config.cc
+++ b/chromium/gpu/config/gpu_test_config.cc
@@ -75,6 +75,8 @@ GPUTestConfig::OS GetCurrentOS() {
}
#elif defined(OS_ANDROID)
return GPUTestConfig::kOsAndroid;
+#elif defined(OS_FUCHSIA)
+ return GPUTestConfig::kOsFuchsia;
#endif
return GPUTestConfig::kOsUnknown;
}
@@ -92,7 +94,8 @@ GPUTestConfig::GPUTestConfig(const GPUTestConfig& other) = default;
GPUTestConfig::~GPUTestConfig() = default;
void GPUTestConfig::set_os(int32_t os) {
- DCHECK_EQ(0, os & ~(kOsAndroid | kOsWin | kOsMac | kOsLinux | kOsChromeOS));
+ DCHECK_EQ(0, os & ~(kOsAndroid | kOsWin | kOsMac | kOsLinux | kOsChromeOS |
+ kOsFuchsia));
os_ = os;
}
@@ -194,6 +197,7 @@ bool GPUTestBotConfig::IsValid() const {
case kOsLinux:
case kOsChromeOS:
case kOsAndroid:
+ case kOsFuchsia:
break;
default:
return false;
diff --git a/chromium/gpu/config/gpu_test_config.h b/chromium/gpu/config/gpu_test_config.h
index e4f3b894d64..79a3529a92c 100644
--- a/chromium/gpu/config/gpu_test_config.h
+++ b/chromium/gpu/config/gpu_test_config.h
@@ -43,6 +43,7 @@ class GPU_EXPORT GPUTestConfig {
kOsAndroid = 1 << 16,
kOsWin10 = 1 << 17,
kOsWin = kOsWinXP | kOsWinVista | kOsWin7 | kOsWin8 | kOsWin10,
+ kOsFuchsia = 1 << 18,
};
enum BuildType {
diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc
index 32dfbd41ec1..c4f4b72d2c8 100644
--- a/chromium/gpu/config/gpu_util.cc
+++ b/chromium/gpu/config/gpu_util.cc
@@ -25,7 +25,7 @@
#include "gpu/config/gpu_preferences.h"
#include "gpu/config/gpu_switches.h"
#include "ui/gfx/extension_set.h"
-#include "ui/gl/gl_features.h"
+#include "ui/gl/buildflags.h"
#include "ui/gl/gl_switches.h"
#if defined(OS_ANDROID)
@@ -94,6 +94,10 @@ GpuFeatureStatus GetOopRasterizationFeatureStatus(
if (!gpu_info.oop_rasterization_supported)
return kGpuFeatureStatusDisabled;
+ if (gpu_preferences.use_passthrough_cmd_decoder &&
+ !gpu_preferences.enable_passthrough_raster_decoder)
+ return kGpuFeatureStatusDisabled;
+
if (gpu_preferences.disable_oop_rasterization)
return kGpuFeatureStatusDisabled;
else if (gpu_preferences.enable_oop_rasterization)
diff --git a/chromium/gpu/config/gpu_workaround_list.txt b/chromium/gpu/config/gpu_workaround_list.txt
index ca22a3c9a92..826c74a5e00 100644
--- a/chromium/gpu/config/gpu_workaround_list.txt
+++ b/chromium/gpu/config/gpu_workaround_list.txt
@@ -4,6 +4,7 @@ avoid_egl_image_target_texture_reuse
avoid_one_component_egl_images
avoid_stencil_buffers
broken_egl_image_ref_counting
+clamp_texture_base_level_and_max_level
clear_pixel_unpack_buffer_before_copyteximage
clear_to_zero_or_one_broken
clear_uniforms_before_first_program_use
diff --git a/chromium/gpu/config/process_json.py b/chromium/gpu/config/process_json.py
index 17c942f15b7..c4faaf27ab0 100755
--- a/chromium/gpu/config/process_json.py
+++ b/chromium/gpu/config/process_json.py
@@ -343,6 +343,19 @@ def write_gl_type(gl_type, data_file):
data_file.write('GpuControlList::kGLType%s, // gl_type\n' % map[gl_type])
+def write_supported_or_not(feature_value, feature_name, data_file):
+ if feature_value is None:
+ feature_value = 'dont_care'
+ map = {
+ 'supported': 'Supported',
+ 'unsupported': 'Unsupported',
+ 'dont_care': 'DontCare',
+ }
+ assert map.has_key(feature_value)
+ data_file.write('GpuControlList::k%s, // %s\n' %
+ (map[feature_value], feature_name))
+
+
def write_conditions(entry_id, is_exception, exception_id, entry,
unique_symbol_id, data_file, data_helper_file,
_data_exception_file):
@@ -367,6 +380,7 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
gl_reset_notification_strategy = None
direct_rendering = True
gpu_count = None
+ hardware_overlay = None
test_group = 0
machine_model_name = None
machine_model_version = None
@@ -442,6 +456,8 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
direct_rendering = False
elif key == 'gpu_count':
gpu_count = entry[key]
+ elif key == 'hardware_overlay':
+ hardware_overlay = entry[key]
elif key == 'test_group':
assert entry[key] > 0
test_group = entry[key]
@@ -487,11 +503,12 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
# group a bunch of less used conditions
if (gl_version != None or pixel_shader_version != None or in_process_gpu or
gl_reset_notification_strategy != None or (not direct_rendering) or
- gpu_count != None or test_group != 0):
+ gpu_count != None or hardware_overlay != None or test_group != 0):
write_entry_more_data(entry_id, is_exception, exception_id, gl_type,
gl_version, pixel_shader_version, in_process_gpu,
gl_reset_notification_strategy, direct_rendering,
- gpu_count, test_group, data_file, data_helper_file)
+ gpu_count, hardware_overlay, test_group, data_file,
+ data_helper_file)
else:
data_file.write('nullptr, // more conditions\n')
@@ -533,7 +550,8 @@ def write_gpu_series_list(entry_id, is_exception, exception_id, gpu_series_list,
def write_entry_more_data(entry_id, is_exception, exception_id, gl_type,
gl_version, pixel_shader_version, in_process_gpu,
gl_reset_notification_strategy, direct_rendering,
- gpu_count, test_group, data_file, data_helper_file):
+ gpu_count, hardware_overlay, test_group, data_file,
+ data_helper_file):
# write more data
var_name = 'kMoreForEntry' + str(entry_id)
if is_exception:
@@ -549,6 +567,7 @@ def write_entry_more_data(entry_id, is_exception, exception_id, gl_type,
gl_reset_notification_strategy)
write_boolean_value(direct_rendering, 'direct_rendering', data_helper_file)
write_version(gpu_count, 'gpu_count', data_helper_file)
+ write_supported_or_not(hardware_overlay, 'hardware_overlay', data_helper_file)
write_integer_value(test_group, 'test_group', data_helper_file)
data_helper_file.write('};\n\n')
# reference more data in entry
diff --git a/chromium/gpu/config/software_rendering_list.json b/chromium/gpu/config/software_rendering_list.json
index 0203333b985..abb9a898f3a 100644
--- a/chromium/gpu/config/software_rendering_list.json
+++ b/chromium/gpu/config/software_rendering_list.json
@@ -822,6 +822,12 @@
"type": "android"
},
"gl_renderer": ".*Google.*"
+ },
+ {
+ "os": {
+ "type": "android"
+ },
+ "gl_renderer": "ANGLE.*"
}
],
"features": [
@@ -1358,7 +1364,7 @@
},
{
"id": 137,
- "description": "GPU rasterization on CrOS is blacklisted on anything but Intel, Imagination, or AMD GPUs for now.",
+ "description": "GPU rasterization on CrOS is blacklisted on anything but Intel, Mali, Imagination, or AMD GPUs for now.",
"cr_bugs": [684094],
"os": {
"type": "chromeos"
@@ -1368,6 +1374,7 @@
],
"exceptions": [
{ "vendor_id": "0x8086" },
+ { "gl_renderer": "Mali-T.*" },
{ "gl_renderer": "PowerVR.*" },
{ "vendor_id": "0x1002" }
]
@@ -1650,7 +1657,7 @@
]
},
{
- "id":156,
+ "id": 156,
"cr_bugs": [870964],
"description": "Frequent crashes on Adreno (TM) on L and below",
"os": {
@@ -1681,6 +1688,42 @@
"accelerated_webgl",
"accelerated_webgl2"
]
+ },
+ {
+ "id": 158,
+ "cr_bugs": [829435],
+ "description": "OOP rasterization on top of ANGLE not supported",
+ "gl_renderer": "ANGLE.*",
+ "features": [
+ "oop_rasterization"
+ ]
+ },
+ {
+ "id": 159,
+ "cr_bugs": [902247],
+ "description": "Disallow OpenGL use on Mac with old NVIDIA GPUs",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x10de",
+ "device_id": ["0x0861", "0x0866", "0x0867", "0x0869", "0x08a0", "0x08a2",
+ "0x08a4", "0x0a29"],
+ "features": [
+ "all"
+ ]
+ },
+ {
+ "id": 160,
+ "cr_bugs": [902247],
+ "description": "Disallow OpenGL use on Mac with old AMD GPUs",
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x1002",
+ "device_id": ["0x944a", "0x9488", "0x94c8", "0x9583"],
+ "features": [
+ "all"
+ ]
}
]
}
diff --git a/chromium/gpu/gles2_conform_support/egl/context.cc b/chromium/gpu/gles2_conform_support/egl/context.cc
index 9009b10b996..06f61d4c10c 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.cc
+++ b/chromium/gpu/gles2_conform_support/egl/context.cc
@@ -172,38 +172,38 @@ const gpu::Capabilities& Context::GetCapabilities() const {
}
int32_t Context::CreateImage(ClientBuffer buffer, size_t width, size_t height) {
- NOTIMPLEMENTED();
+ NOTREACHED();
return -1;
}
void Context::DestroyImage(int32_t id) {
- NOTIMPLEMENTED();
+ NOTREACHED();
}
void Context::SignalQuery(uint32_t query, base::OnceClosure callback) {
- NOTIMPLEMENTED();
+ NOTREACHED();
}
void Context::CreateGpuFence(uint32_t gpu_fence_id, ClientGpuFence source) {
- NOTIMPLEMENTED();
+ NOTREACHED();
}
void Context::GetGpuFence(
uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback) {
- NOTIMPLEMENTED();
+ NOTREACHED();
}
void Context::SetLock(base::Lock*) {
- NOTIMPLEMENTED();
+ NOTREACHED();
}
void Context::EnsureWorkVisible() {
- // This is only relevant for out-of-process command buffers.
+ NOTREACHED();
}
gpu::CommandBufferNamespace Context::GetNamespaceID() const {
- return gpu::CommandBufferNamespace::IN_PROCESS;
+ return gpu::CommandBufferNamespace::INVALID;
}
gpu::CommandBufferId Context::GetCommandBufferID() const {
@@ -211,24 +211,27 @@ gpu::CommandBufferId Context::GetCommandBufferID() const {
}
void Context::FlushPendingWork() {
- // This is only relevant for out-of-process command buffers.
+ NOTREACHED();
}
uint64_t Context::GenerateFenceSyncRelease() {
- return display_->GenerateFenceSyncRelease();
+ NOTREACHED();
+ return 0;
}
bool Context::IsFenceSyncReleased(uint64_t release) {
- NOTIMPLEMENTED();
+ NOTREACHED();
return false;
}
void Context::SignalSyncToken(const gpu::SyncToken& sync_token,
base::OnceClosure callback) {
- NOTIMPLEMENTED();
+ NOTREACHED();
}
-void Context::WaitSyncTokenHint(const gpu::SyncToken& sync_token) {}
+void Context::WaitSyncToken(const gpu::SyncToken& sync_token) {
+ NOTREACHED();
+}
bool Context::CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) {
return false;
@@ -351,11 +354,11 @@ void Context::DestroyService() {
gl_context_ = nullptr;
transfer_buffer_.reset();
+ gles2_cmd_helper_.reset();
+ command_buffer_.reset();
if (decoder_)
decoder_->Destroy(have_context);
decoder_.reset();
- gles2_cmd_helper_.reset();
- command_buffer_.reset();
}
bool Context::HasService() const {
diff --git a/chromium/gpu/gles2_conform_support/egl/context.h b/chromium/gpu/gles2_conform_support/egl/context.h
index 31234010f41..914d3bd0918 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.h
+++ b/chromium/gpu/gles2_conform_support/egl/context.h
@@ -80,7 +80,7 @@ class Context : public base::RefCountedThreadSafe<Context>,
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
base::OnceClosure callback) override;
- void WaitSyncTokenHint(const gpu::SyncToken& sync_token) override;
+ void WaitSyncToken(const gpu::SyncToken& sync_token) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) override;
// Called by ThreadState to set the needed global variables when this context
diff --git a/chromium/gpu/gles2_conform_support/egl/display.cc b/chromium/gpu/gles2_conform_support/egl/display.cc
index 5e95436935f..89741016fa4 100644
--- a/chromium/gpu/gles2_conform_support/egl/display.cc
+++ b/chromium/gpu/gles2_conform_support/egl/display.cc
@@ -4,6 +4,7 @@
#include "gpu/gles2_conform_support/egl/display.h"
+#include "base/stl_util.h"
#include "build/build_config.h"
#include "gpu/gles2_conform_support/egl/config.h"
#include "gpu/gles2_conform_support/egl/context.h"
@@ -86,7 +87,7 @@ EGLBoolean Display::ChooseConfig(ThreadState* ts,
if (!configs)
config_size = 0;
*num_config = 0;
- for (size_t i = 0; i < arraysize(configs_); ++i) {
+ for (size_t i = 0; i < base::size(configs_); ++i) {
if (configs_[i]->Matches(attrib_list)) {
if (*num_config < config_size) {
configs[*num_config] = configs_[i].get();
@@ -109,9 +110,9 @@ EGLBoolean Display::GetConfigs(ThreadState* ts,
InitializeConfigsIfNeeded();
if (!configs)
config_size = 0;
- *num_config = arraysize(configs_);
+ *num_config = base::size(configs_);
size_t count =
- std::min(arraysize(configs_), static_cast<size_t>(config_size));
+ std::min(base::size(configs_), static_cast<size_t>(config_size));
for (size_t i = 0; i < count; ++i)
configs[i] = configs_[i].get();
return ts->ReturnSuccess(EGL_TRUE);
diff --git a/chromium/gpu/gles2_conform_support/gles2_conform_test.cc b/chromium/gpu/gles2_conform_support/gles2_conform_test.cc
index 22a23d30c69..40ea39108ea 100644
--- a/chromium/gpu/gles2_conform_support/gles2_conform_test.cc
+++ b/chromium/gpu/gles2_conform_support/gles2_conform_test.cc
@@ -132,8 +132,6 @@ int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
base::TestSuite test_suite(argc, argv);
int rt = base::LaunchUnitTestsSerially(
- argc,
- argv,
- base::Bind(&RunHelper, base::Unretained(&test_suite)));
+ argc, argv, base::BindOnce(&RunHelper, base::Unretained(&test_suite)));
return rt;
}
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index 0345c33b863..57827e5ffc2 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -270,10 +270,6 @@ void CommandBufferProxyImpl::OrderingBarrierHelper(int32_t put_offset) {
last_put_offset_ = put_offset;
last_flush_id_ = channel_->OrderingBarrier(
route_id_, put_offset, std::move(pending_sync_token_fences_));
-
- pending_sync_token_fences_.clear();
-
- flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
}
void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
@@ -360,13 +356,13 @@ void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) {
}
scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
- size_t size,
+ uint32_t size,
int32_t* id) {
CheckLock();
base::AutoLock lock(last_state_lock_);
*id = -1;
- int32_t new_id = channel_->ReserveTransferBufferId();
+ int32_t new_id = GetNextBufferId();
base::UnsafeSharedMemoryRegion shared_memory_region;
base::WritableSharedMemoryMapping shared_memory_mapping;
@@ -377,6 +373,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
OnClientError(gpu::error::kOutOfBounds);
return nullptr;
}
+ DCHECK_LE(shared_memory_mapping.size(), static_cast<size_t>(UINT32_MAX));
if (last_state_.error == gpu::error::kNoError) {
base::UnsafeSharedMemoryRegion region =
@@ -436,13 +433,9 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
bool requires_sync_token = handle.type == gfx::IO_SURFACE_BUFFER;
uint64_t image_fence_sync = 0;
- if (requires_sync_token) {
+ if (requires_sync_token)
image_fence_sync = GenerateFenceSyncRelease();
- // Make sure fence syncs were flushed before CreateImage() was called.
- DCHECK_EQ(image_fence_sync, flushed_fence_sync_release_ + 1);
- }
-
DCHECK(gpu::IsImageFromGpuMemoryBufferFormatSupported(
gpu_memory_buffer->GetFormat(), capabilities_));
DCHECK(gpu::IsImageSizeValidForGpuMemoryBufferFormat(
@@ -548,8 +541,7 @@ void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token,
signal_tasks_.insert(std::make_pair(signal_id, std::move(callback)));
}
-void CommandBufferProxyImpl::WaitSyncTokenHint(
- const gpu::SyncToken& sync_token) {
+void CommandBufferProxyImpl::WaitSyncToken(const gpu::SyncToken& sync_token) {
CheckLock();
base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
@@ -656,8 +648,9 @@ void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox,
if (last_state_.error != gpu::error::kNoError)
return;
- Send(new GpuCommandBufferMsg_WaitSyncToken(route_id_, sync_token));
- Send(new GpuCommandBufferMsg_ReturnFrontBuffer(route_id_, mailbox, is_lost));
+ last_flush_id_ = channel_->EnqueueDeferredMessage(
+ GpuCommandBufferMsg_ReturnFrontBuffer(route_id_, mailbox, is_lost),
+ {sync_token});
}
bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
@@ -746,8 +739,8 @@ void CommandBufferProxyImpl::TryUpdateStateThreadSafe() {
if (last_state_.error != gpu::error::kNoError) {
callback_thread_->PostTask(
FROM_HERE,
- base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel,
- weak_ptr_factory_.GetWeakPtr()));
+ base::BindOnce(&CommandBufferProxyImpl::LockAndDisconnectChannel,
+ weak_ptr_factory_.GetWeakPtr()));
}
}
}
@@ -838,8 +831,9 @@ void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() {
// stack in case things will use it, and give the GpuChannelClient a chance to
// act fully on the lost context.
callback_thread_->PostTask(
- FROM_HERE, base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel,
- weak_ptr_factory_.GetWeakPtr()));
+ FROM_HERE,
+ base::BindOnce(&CommandBufferProxyImpl::LockAndDisconnectChannel,
+ weak_ptr_factory_.GetWeakPtr()));
}
void CommandBufferProxyImpl::LockAndDisconnectChannel() {
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index 6f74b8a5fce..6b0629bf466 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -12,11 +12,11 @@
#include <memory>
#include <queue>
#include <string>
+#include <unordered_map>
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/containers/flat_map.h"
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/shared_memory_mapping.h"
@@ -72,9 +72,6 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
virtual ~DeletionObserver() = default;
};
- typedef base::Callback<void(const std::string& msg, int id)>
- GpuConsoleMessageCallback;
-
CommandBufferProxyImpl(
scoped_refptr<GpuChannelHost> channel,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
@@ -102,7 +99,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
int32_t start,
int32_t end) override;
void SetGetBuffer(int32_t shm_id) override;
- scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ scoped_refptr<gpu::Buffer> CreateTransferBuffer(uint32_t size,
int32_t* id) override;
void DestroyTransferBuffer(int32_t id) override;
@@ -128,7 +125,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
base::OnceClosure callback) override;
- void WaitSyncTokenHint(const gpu::SyncToken& sync_token) override;
+ void WaitSyncToken(const gpu::SyncToken& sync_token) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) override;
void TakeFrontBuffer(const gpu::Mailbox& mailbox);
void ReturnFrontBuffer(const gpu::Mailbox& mailbox,
@@ -141,7 +138,8 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
bool EnsureBackbuffer();
using UpdateVSyncParametersCallback =
- base::Callback<void(base::TimeTicks timebase, base::TimeDelta interval)>;
+ base::RepeatingCallback<void(base::TimeTicks timebase,
+ base::TimeDelta interval)>;
void SetUpdateVSyncParametersCallback(
const UpdateVSyncParametersCallback& callback);
@@ -156,7 +154,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
private:
typedef std::map<int32_t, scoped_refptr<gpu::Buffer>> TransferBufferMap;
- typedef base::hash_map<uint32_t, base::OnceClosure> SignalTaskMap;
+ typedef std::unordered_map<uint32_t, base::OnceClosure> SignalTaskMap;
void CheckLock() {
if (lock_) {
@@ -262,14 +260,6 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
// Sync token waits that haven't been flushed yet.
std::vector<SyncToken> pending_sync_token_fences_;
- // Last flushed fence sync release, same as last item in queue if not empty.
- uint64_t flushed_fence_sync_release_ = 0;
-
- // Last verified fence sync.
- uint64_t verified_fence_sync_release_ = 0;
-
- GpuConsoleMessageCallback console_message_callback_;
-
// Tasks to be invoked in SignalSyncPoint responses.
uint32_t next_signal_id_ = 0;
SignalTaskMap signal_tasks_;
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.cc b/chromium/gpu/ipc/client/gpu_channel_host.cc
index b4477e99385..be5892f1809 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.cc
+++ b/chromium/gpu/ipc/client/gpu_channel_host.cc
@@ -24,12 +24,6 @@
using base::AutoLock;
namespace gpu {
-namespace {
-
-// Global atomic to generate unique transfer buffer IDs.
-base::AtomicSequenceNumber g_next_transfer_buffer_id;
-
-} // namespace
GpuChannelHost::GpuChannelHost(int channel_id,
const gpu::GPUInfo& gpu_info,
@@ -160,7 +154,8 @@ void GpuChannelHost::EnqueuePendingOrderingBarrier() {
deferred_message.message = GpuCommandBufferMsg_AsyncFlush(
pending_ordering_barrier_->route_id,
pending_ordering_barrier_->put_offset,
- pending_ordering_barrier_->deferred_message_id);
+ pending_ordering_barrier_->deferred_message_id,
+ pending_ordering_barrier_->sync_token_fences);
deferred_message.sync_token_fences =
std::move(pending_ordering_barrier_->sync_token_fences);
deferred_messages_.push_back(std::move(deferred_message));
@@ -199,16 +194,16 @@ void GpuChannelHost::AddRouteWithTaskRunner(
int route_id,
base::WeakPtr<IPC::Listener> listener,
scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
- io_thread_->PostTask(FROM_HERE,
- base::Bind(&GpuChannelHost::Listener::AddRoute,
- base::Unretained(listener_.get()), route_id,
- listener, task_runner));
+ io_thread_->PostTask(
+ FROM_HERE, base::BindOnce(&GpuChannelHost::Listener::AddRoute,
+ base::Unretained(listener_.get()), route_id,
+ listener, task_runner));
}
void GpuChannelHost::RemoveRoute(int route_id) {
- io_thread_->PostTask(FROM_HERE,
- base::Bind(&GpuChannelHost::Listener::RemoveRoute,
- base::Unretained(listener_.get()), route_id));
+ io_thread_->PostTask(
+ FROM_HERE, base::BindOnce(&GpuChannelHost::Listener::RemoveRoute,
+ base::Unretained(listener_.get()), route_id));
}
base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
@@ -227,14 +222,6 @@ base::UnsafeSharedMemoryRegion GpuChannelHost::ShareToGpuProcess(
return source_region.Duplicate();
}
-int32_t GpuChannelHost::ReserveTransferBufferId() {
- // 0 is a reserved value.
- int32_t id = g_next_transfer_buffer_id.GetNext();
- if (id)
- return id;
- return g_next_transfer_buffer_id.GetNext();
-}
-
int32_t GpuChannelHost::ReserveImageId() {
return next_image_id_.GetNext();
}
@@ -307,7 +294,8 @@ void GpuChannelHost::Listener::AddRoute(
if (lost_) {
info.task_runner->PostTask(
- FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener));
+ FROM_HERE,
+ base::BindOnce(&IPC::Listener::OnChannelError, info.listener));
}
}
@@ -338,8 +326,8 @@ bool GpuChannelHost::Listener::OnMessageReceived(const IPC::Message& message) {
const RouteInfo& info = it->second;
info.task_runner->PostTask(
FROM_HERE,
- base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived),
- info.listener, message));
+ base::BindOnce(base::IgnoreResult(&IPC::Listener::OnMessageReceived),
+ info.listener, message));
return true;
}
@@ -364,7 +352,8 @@ void GpuChannelHost::Listener::OnChannelError() {
for (const auto& kv : routes_) {
const RouteInfo& info = kv.second;
info.task_runner->PostTask(
- FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener));
+ FROM_HERE,
+ base::BindOnce(&IPC::Listener::OnChannelError, info.listener));
}
routes_.clear();
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.h b/chromium/gpu/ipc/client/gpu_channel_host.h
index 69d7e741522..cc0c56ca3ce 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.h
+++ b/chromium/gpu/ipc/client/gpu_channel_host.h
@@ -10,6 +10,7 @@
#include <memory>
#include <string>
+#include <unordered_map>
#include <vector>
#include "base/atomic_sequence_num.h"
@@ -132,9 +133,6 @@ class GPU_EXPORT GpuChannelHost
base::UnsafeSharedMemoryRegion ShareToGpuProcess(
const base::UnsafeSharedMemoryRegion& source_region);
- // Reserve one unused transfer buffer ID.
- int32_t ReserveTransferBufferId();
-
// Reserve one unused image ID.
int32_t ReserveImageId();
@@ -208,7 +206,7 @@ class GPU_EXPORT GpuChannelHost
// Threading notes: most fields are only accessed on the IO thread, except
// for lost_ which is protected by |lock_|.
- base::hash_map<int32_t, RouteInfo> routes_;
+ std::unordered_map<int32_t, RouteInfo> routes_;
std::unique_ptr<IPC::ChannelMojo> channel_;
base::flat_map<int, IPC::PendingSyncMsg*> pending_syncs_;
diff --git a/chromium/gpu/ipc/client/gpu_context_tests.h b/chromium/gpu/ipc/client/gpu_context_tests.h
index b5b40cc4729..9c5d563bab6 100644
--- a/chromium/gpu/ipc/client/gpu_context_tests.h
+++ b/chromium/gpu/ipc/client/gpu_context_tests.h
@@ -23,12 +23,6 @@ namespace {
class SignalTest : public ContextTestBase {
public:
- static void RunOnlyOnce(base::Closure cb, int* tmp) {
- CHECK_EQ(*tmp, 0);
- ++*tmp;
- cb.Run();
- }
-
// These tests should time out if the callback doesn't get called.
void TestSignalSyncToken(const gpu::SyncToken& sync_token) {
base::RunLoop run_loop;
@@ -39,9 +33,7 @@ class SignalTest : public ContextTestBase {
// These tests should time out if the callback doesn't get called.
void TestSignalQuery(GLuint query) {
base::RunLoop run_loop;
- context_support_->SignalQuery(
- query, base::Bind(&RunOnlyOnce, run_loop.QuitClosure(),
- base::Owned(new int(0))));
+ context_support_->SignalQuery(query, run_loop.QuitClosure());
run_loop.Run();
}
};
diff --git a/chromium/gpu/ipc/client/raster_in_process_context_tests.cc b/chromium/gpu/ipc/client/raster_in_process_context_tests.cc
index 712d5768136..f6d52f2d719 100644
--- a/chromium/gpu/ipc/client/raster_in_process_context_tests.cc
+++ b/chromium/gpu/ipc/client/raster_in_process_context_tests.cc
@@ -7,7 +7,6 @@
#include "build/build_config.h"
#include "cc/paint/color_space_transfer_cache_entry.h"
#include "components/viz/common/resources/resource_format.h"
-#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/test/test_gpu_memory_buffer_manager.h"
#include "gpu/command_buffer/client/raster_implementation.h"
#include "gpu/command_buffer/client/shared_image_interface.h"
@@ -109,15 +108,13 @@ TEST_F(RasterInProcessCommandBufferTest,
cc::RasterColorSpace raster_color_space(color_space, 0);
ri_->BeginRasterCHROMIUM(/*sk_color=*/0, /*msaa_sample_count=*/0,
/*can_use_lcd_text=*/false,
- viz::ResourceFormatToClosestSkColorType(
- /*gpu_compositing=*/true, kResourceFormat),
raster_color_space, mailbox.name);
EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), ri_->GetError());
// Should flag an error this command is not allowed between a Begin and
// EndRasterCHROMIUM.
SyncToken sync_token;
- ri_->GenSyncTokenCHROMIUM(sync_token.GetData());
+ ri_->GenUnverifiedSyncTokenCHROMIUM(sync_token.GetData());
EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), ri_->GetError());
// Confirm that we skip over without error.
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
index 7dccdbf4043..57be23080de 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
@@ -4,27 +4,64 @@
#include "gpu/ipc/client/shared_image_interface_proxy.h"
+#include "base/bits.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/ipc/client/gpu_channel_host.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/common/gpu_param_traits_macros.h"
+#include "mojo/public/cpp/base/shared_memory_utils.h"
namespace gpu {
+namespace {
+
+bool SafeIncrementAndAlign(size_t aligned_value,
+ size_t increment,
+ size_t alignment,
+ size_t* result) {
+ base::CheckedNumeric<size_t> sum = aligned_value;
+ sum += increment;
+ // Taken from base::bits::Align.
+ // TODO(ericrk): Update base::bits::Align to handle CheckedNumeric.
+ DCHECK(base::bits::IsPowerOfTwo(alignment));
+ sum = (sum + alignment - 1) & ~(alignment - 1);
+ return sum.AssignIfValid(result);
+}
+
+size_t GetRemainingSize(const base::MappedReadOnlyRegion& region,
+ size_t offset) {
+ if (offset > region.mapping.size())
+ return 0;
+ return region.mapping.size() - offset;
+}
+
+void* GetDataAddress(const base::MappedReadOnlyRegion& region,
+ size_t offset,
+ size_t size) {
+ base::CheckedNumeric<size_t> safe_end = offset;
+ safe_end += size;
+ size_t end;
+ if (!safe_end.AssignIfValid(&end) || end > region.mapping.size())
+ return nullptr;
+ return region.mapping.GetMemoryAs<uint8_t>() + offset;
+}
+
+} // namespace
SharedImageInterfaceProxy::SharedImageInterfaceProxy(GpuChannelHost* host,
int32_t route_id)
: host_(host), route_id_(route_id) {}
SharedImageInterfaceProxy::~SharedImageInterfaceProxy() = default;
+
Mailbox SharedImageInterfaceProxy::CreateSharedImage(
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
GpuChannelMsg_CreateSharedImage_Params params;
- params.mailbox = Mailbox::Generate();
+ params.mailbox = Mailbox::GenerateForSharedImage();
params.format = format;
params.size = size;
params.color_space = color_space;
@@ -41,13 +78,54 @@ Mailbox SharedImageInterfaceProxy::CreateSharedImage(
}
Mailbox SharedImageInterfaceProxy::CreateSharedImage(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ // Pixel data's size must fit into a uint32_t to be sent via
+ // GpuChannelMsg_CreateSharedImageWithData_Params.
+ if (!base::IsValueInRangeForNumericType<uint32_t>(pixel_data.size())) {
+ LOG(ERROR)
+ << "CreateSharedImage: SharedImage upload data overflows uint32_t";
+ return Mailbox();
+ }
+
+ // Hold the lock for the rest of this function, as we need to ensure that SHM
+ // reallocation / registration and the following use of that SHM via deferred
+ // message are not interrupted by a SHM allocation on another thread.
+ base::AutoLock lock(lock_);
+
+ bool done_with_shm;
+ size_t shm_offset;
+ if (!GetSHMForPixelData(pixel_data, &shm_offset, &done_with_shm)) {
+ LOG(ERROR) << "CreateSharedImage: Could not get SHM for data upload.";
+ return Mailbox();
+ }
+
+ GpuChannelMsg_CreateSharedImageWithData_Params params;
+ params.mailbox = Mailbox::GenerateForSharedImage();
+ params.format = format;
+ params.size = size;
+ params.color_space = color_space;
+ params.usage = usage;
+ params.pixel_data_offset = shm_offset;
+ params.pixel_data_size = pixel_data.size();
+ params.done_with_shm = done_with_shm;
+ params.release_id = ++next_release_id_;
+ last_flush_id_ = host_->EnqueueDeferredMessage(
+ GpuChannelMsg_CreateSharedImageWithData(route_id_, params));
+ return params.mailbox;
+}
+
+Mailbox SharedImageInterfaceProxy::CreateSharedImage(
gfx::GpuMemoryBuffer* gpu_memory_buffer,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
const gfx::ColorSpace& color_space,
uint32_t usage) {
DCHECK(gpu_memory_buffer_manager);
GpuChannelMsg_CreateGMBSharedImage_Params params;
- params.mailbox = Mailbox::Generate();
+ params.mailbox = Mailbox::GenerateForSharedImage();
params.handle = gpu_memory_buffer->CloneHandle();
params.size = gpu_memory_buffer->GetSize();
params.format = gpu_memory_buffer->GetFormat();
@@ -123,12 +201,11 @@ void SharedImageInterfaceProxy::DestroySharedImage(const SyncToken& sync_token,
new_token.SetVerifyFlush();
}
}
- uint32_t flush_id = host_->EnqueueDeferredMessage(
- GpuChannelMsg_DestroySharedImage(route_id_, mailbox),
- std::move(dependencies));
{
base::AutoLock lock(lock_);
- last_flush_id_ = flush_id;
+ last_flush_id_ = host_->EnqueueDeferredMessage(
+ GpuChannelMsg_DestroySharedImage(route_id_, mailbox),
+ std::move(dependencies));
}
}
@@ -140,4 +217,67 @@ SyncToken SharedImageInterfaceProxy::GenUnverifiedSyncToken() {
next_release_id_);
}
+bool SharedImageInterfaceProxy::GetSHMForPixelData(
+ base::span<const uint8_t> pixel_data,
+ size_t* shm_offset,
+ bool* done_with_shm) {
+ const size_t kUploadBufferSize = 1 * 1024 * 1024; // 1MB
+ *shm_offset = 0;
+ *done_with_shm = false;
+
+ lock_.AssertAcquired();
+ if (!upload_buffer_.IsValid() ||
+ GetRemainingSize(upload_buffer_, upload_buffer_offset_) <
+ pixel_data.size()) {
+ size_t size_to_alloc = std::max(kUploadBufferSize, pixel_data.size());
+ auto shm = mojo::CreateReadOnlySharedMemoryRegion(size_to_alloc);
+ if (!shm.IsValid())
+ return false;
+
+ // Duplicate the buffer for sharing to the GPU process.
+ base::ReadOnlySharedMemoryRegion shared_shm = shm.region.Duplicate();
+ if (!shared_shm.IsValid())
+ return false;
+
+ // Share the SHM to the GPU process. In order to ensure that any deferred
+ // messages which rely on the previous SHM have a chance to execute before
+ // it is replaced, flush before sending.
+ host_->EnsureFlush(last_flush_id_);
+ host_->Send(new GpuChannelMsg_RegisterSharedImageUploadBuffer(
+ route_id_, std::move(shared_shm)));
+
+ upload_buffer_ = std::move(shm);
+ upload_buffer_offset_ = 0;
+ }
+
+ // We now have an |upload_buffer_| that fits our data.
+
+ void* target =
+ GetDataAddress(upload_buffer_, upload_buffer_offset_, pixel_data.size());
+ DCHECK(target);
+ memcpy(target, pixel_data.data(), pixel_data.size());
+ *shm_offset = upload_buffer_offset_;
+
+ // Now that we've successfully used up a portion of our buffer, increase our
+ // |upload_buffer_offset_|. If our |upload_buffer_offset_| is at the end (or
+ // past the end with rounding), we discard the current buffer. We'll allocate
+ // a new buffer the next time we enter this function.
+ bool discard_buffer = false;
+ if (SafeIncrementAndAlign(upload_buffer_offset_, pixel_data.size(),
+ 4 /* alignment */, &upload_buffer_offset_)) {
+ discard_buffer =
+ GetRemainingSize(upload_buffer_, upload_buffer_offset_) == 0;
+ } else {
+ discard_buffer = true;
+ }
+
+ if (discard_buffer) {
+ *done_with_shm = true;
+ upload_buffer_ = base::MappedReadOnlyRegion();
+ upload_buffer_offset_ = 0;
+ }
+
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.h b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
index 1a833f09245..47178991cde 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.h
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
@@ -5,9 +5,11 @@
#ifndef GPU_IPC_CLIENT_SHARED_IMAGE_INTERFACE_PROXY_H_
#define GPU_IPC_CLIENT_SHARED_IMAGE_INTERFACE_PROXY_H_
+#include "base/memory/read_only_shared_memory_region.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "gpu/command_buffer/client/shared_image_interface.h"
+#include "gpu/command_buffer/common/buffer.h"
namespace gpu {
class GpuChannelHost;
@@ -22,6 +24,11 @@ class SharedImageInterfaceProxy : public SharedImageInterface {
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) override;
+ Mailbox CreateSharedImage(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
Mailbox CreateSharedImage(gfx::GpuMemoryBuffer* gpu_memory_buffer,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
const gfx::ColorSpace& color_space,
@@ -34,11 +41,20 @@ class SharedImageInterfaceProxy : public SharedImageInterface {
SyncToken GenUnverifiedSyncToken() override;
private:
+ bool GetSHMForPixelData(base::span<const uint8_t> pixel_data,
+ size_t* shm_offset,
+ bool* done_with_shm) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
GpuChannelHost* const host_;
const int32_t route_id_;
base::Lock lock_;
uint32_t next_release_id_ GUARDED_BY(lock_) = 0;
uint32_t last_flush_id_ GUARDED_BY(lock_) = 0;
+
+ // A buffer used to upload initial data during SharedImage creation.
+ base::MappedReadOnlyRegion upload_buffer_ GUARDED_BY(lock_);
+ // The offset into |upload_buffer_| at which data is no longer used.
+ size_t upload_buffer_offset_ GUARDED_BY(lock_) = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/ipc/command_buffer_task_executor.cc b/chromium/gpu/ipc/command_buffer_task_executor.cc
index 56567e905c5..fb1b239ebe7 100644
--- a/chromium/gpu/ipc/command_buffer_task_executor.cc
+++ b/chromium/gpu/ipc/command_buffer_task_executor.cc
@@ -8,6 +8,7 @@
#include "gpu/command_buffer/service/mailbox_manager_factory.h"
#include "gpu/command_buffer/service/memory_program_cache.h"
#include "gpu/command_buffer/service/program_cache.h"
+#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_share_group.h"
namespace gpu {
@@ -18,19 +19,20 @@ CommandBufferTaskExecutor::CommandBufferTaskExecutor(
SyncPointManager* sync_point_manager,
MailboxManager* mailbox_manager,
scoped_refptr<gl::GLShareGroup> share_group,
- gl::GLSurfaceFormat share_group_surface_format)
+ gl::GLSurfaceFormat share_group_surface_format,
+ SharedImageManager* shared_image_manager,
+ gles2::ProgramCache* program_cache)
: gpu_preferences_(gpu_preferences),
gpu_feature_info_(gpu_feature_info),
sync_point_manager_(sync_point_manager),
mailbox_manager_(mailbox_manager),
share_group_(share_group),
share_group_surface_format_(share_group_surface_format),
- shader_translator_cache_(gpu_preferences_) {
- if (!mailbox_manager_) {
- // TODO(piman): have embedders own the mailbox manager.
- owned_mailbox_manager_ = gles2::CreateMailboxManager(gpu_preferences_);
- mailbox_manager_ = owned_mailbox_manager_.get();
- }
+ program_cache_(program_cache),
+ shader_translator_cache_(gpu_preferences_),
+ shared_image_manager_(shared_image_manager) {
+ DCHECK(mailbox_manager_);
+ DCHECK(shared_image_manager_);
}
CommandBufferTaskExecutor::~CommandBufferTaskExecutor() = default;
@@ -57,13 +59,14 @@ gles2::ProgramCache* CommandBufferTaskExecutor::program_cache() {
bool disable_disk_cache =
gpu_preferences_.disable_gpu_shader_disk_cache ||
gpu_feature_info_.IsWorkaroundEnabled(gpu::DISABLE_PROGRAM_DISK_CACHE);
- program_cache_ = std::make_unique<gles2::MemoryProgramCache>(
+ owned_program_cache_ = std::make_unique<gles2::MemoryProgramCache>(
gpu_preferences_.gpu_program_cache_size, disable_disk_cache,
gpu_feature_info_.IsWorkaroundEnabled(
gpu::DISABLE_PROGRAM_CACHING_FOR_TRANSFORM_FEEDBACK),
&activity_flags_);
+ program_cache_ = owned_program_cache_.get();
}
- return program_cache_.get();
+ return program_cache_;
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/command_buffer_task_executor.h b/chromium/gpu/ipc/command_buffer_task_executor.h
index 0e593ae78e8..455a4125319 100644
--- a/chromium/gpu/ipc/command_buffer_task_executor.h
+++ b/chromium/gpu/ipc/command_buffer_task_executor.h
@@ -6,6 +6,7 @@
#define GPU_IPC_COMMAND_BUFFER_TASK_EXECUTOR_H_
#include <memory>
+#include <vector>
#include "base/callback.h"
#include "base/macros.h"
@@ -57,9 +58,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor
// Returns true if sequence should yield while running its current task.
virtual bool ShouldYield() = 0;
- // Enables or disables further execution of tasks in this sequence.
- virtual void SetEnabled(bool enabled) = 0;
-
// Schedule a task with provided sync token dependencies. The dependencies
// are hints for sync token waits within the task, and can be ignored by the
// implementation.
@@ -75,7 +73,9 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor
SyncPointManager* sync_point_manager,
MailboxManager* mailbox_manager,
scoped_refptr<gl::GLShareGroup> share_group,
- gl::GLSurfaceFormat share_group_surface_format);
+ gl::GLSurfaceFormat share_group_surface_format,
+ SharedImageManager* shared_image_manager,
+ gles2::ProgramCache* program_cache);
// Always use virtualized GL contexts if this returns true.
virtual bool ForceVirtualizedGLContexts() const = 0;
@@ -83,10 +83,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor
// Creates a memory tracker for the context group if this returns true.
virtual bool ShouldCreateMemoryTracker() const = 0;
- // Block thread when a WaitSyncToken command is encountered instead of calling
- // OnWaitSyncToken().
- virtual bool BlockThreadOnWaitSyncToken() const = 0;
-
// Schedules |task| to run out of order with respect to other sequenced tasks.
virtual void ScheduleOutOfOrderTask(base::OnceClosure task) = 0;
@@ -119,7 +115,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor
gles2::FramebufferCompletenessCache* framebuffer_completeness_cache() {
return &framebuffer_completeness_cache_;
}
- SharedImageManager* shared_image_manager() { return &shared_image_manager_; }
+ SharedImageManager* shared_image_manager() { return shared_image_manager_; }
// These methods construct accessed fields if not already initialized.
scoped_refptr<gl::GLShareGroup> share_group();
@@ -134,19 +130,19 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor
private:
const GpuPreferences gpu_preferences_;
const GpuFeatureInfo gpu_feature_info_;
- std::unique_ptr<MailboxManager> owned_mailbox_manager_;
SyncPointManager* sync_point_manager_;
MailboxManager* mailbox_manager_;
std::unique_ptr<gles2::Outputter> outputter_;
scoped_refptr<gl::GLShareGroup> share_group_;
gl::GLSurfaceFormat share_group_surface_format_;
- std::unique_ptr<gles2::ProgramCache> program_cache_;
+ std::unique_ptr<gles2::ProgramCache> owned_program_cache_;
+ gles2::ProgramCache* program_cache_;
gles2::ImageManager image_manager_;
ServiceDiscardableManager discardable_manager_;
PassthroughDiscardableManager passthrough_discardable_manager_;
gles2::ShaderTranslatorCache shader_translator_cache_;
gles2::FramebufferCompletenessCache framebuffer_completeness_cache_;
- SharedImageManager shared_image_manager_;
+ SharedImageManager* shared_image_manager_;
// No-op default initialization is used in in-process mode.
GpuProcessActivityFlags activity_flags_;
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index 94cbe34879c..a25a403500b 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -139,8 +139,6 @@ source_set("ipc_common_sources") {
if (is_android) {
sources += [
- "android/android_image_reader_utils.cc",
- "android/android_image_reader_utils.h",
"android/scoped_surface_request_conduit.cc",
"android/scoped_surface_request_conduit.h",
"android/surface_owner_android.cc",
@@ -151,6 +149,7 @@ source_set("ipc_common_sources") {
"gpu_surface_tracker.h",
]
libs = [ "android" ]
+ deps += [ ":android_image_reader_utils" ]
}
if (use_ozone) {
@@ -158,6 +157,24 @@ source_set("ipc_common_sources") {
}
}
+if (is_android) {
+ source_set("android_image_reader_utils") {
+ sources = [
+ "android/android_image_reader_utils.cc",
+ "android/android_image_reader_utils.h",
+ ]
+ configs += [ "//gpu:gpu_implementation" ]
+ deps = [
+ "//base",
+ "//ui/gl",
+ ]
+ visibility = [
+ "//gpu/*",
+ "//media/gpu:gpu",
+ ]
+ }
+}
+
# Depend on this to use surface_handle.h without pulling in all of gpu ipc.
source_set("surface_handle_type") {
public = [
diff --git a/chromium/gpu/ipc/common/android/android_image_reader_utils.cc b/chromium/gpu/ipc/common/android/android_image_reader_utils.cc
index 7bd2381d95b..e2d7b4ea77e 100644
--- a/chromium/gpu/ipc/common/android/android_image_reader_utils.cc
+++ b/chromium/gpu/ipc/common/android/android_image_reader_utils.cc
@@ -12,31 +12,38 @@
namespace gpu {
-bool DeleteAImageAsync(AImage* image,
- base::android::AndroidImageReader* loader) {
- // If there is no image to delete, there is no need to insert fence.
- if (image == nullptr)
- return true;
-
+base::ScopedFD CreateEglFenceAndExportFd() {
std::unique_ptr<gl::GLFenceAndroidNativeFenceSync> android_native_fence =
gl::GLFenceAndroidNativeFenceSync::CreateForGpuFence();
if (!android_native_fence) {
LOG(ERROR) << "Failed to create android native fence sync object.";
- return false;
+ return base::ScopedFD();
}
std::unique_ptr<gfx::GpuFence> gpu_fence =
android_native_fence->GetGpuFence();
if (!gpu_fence) {
LOG(ERROR) << "Unable to get a gpu fence object.";
- return false;
+ return base::ScopedFD();
}
gfx::GpuFenceHandle fence_handle =
gfx::CloneHandleForIPC(gpu_fence->GetGpuFenceHandle());
if (fence_handle.is_null()) {
LOG(ERROR) << "Gpu fence handle is null";
- return false;
+ return base::ScopedFD();
}
- base::ScopedFD fence_fd(fence_handle.native_fd.fd);
+ return base::ScopedFD(fence_handle.native_fd.fd);
+}
+
+bool DeleteAImageAsync(AImage* image,
+ base::android::AndroidImageReader* loader) {
+ // If there is no image to delete, there is no need to insert fence.
+ if (image == nullptr)
+ return true;
+
+ // Create egl fence and export a sync fd from it.
+ base::ScopedFD fence_fd = CreateEglFenceAndExportFd();
+ if (!fence_fd.is_valid())
+ return false;
// Delete the image synchronously. Release the fence_fd as below api will own
// it and ensure that the file descriptor is closed properly.
diff --git a/chromium/gpu/ipc/common/android/android_image_reader_utils.h b/chromium/gpu/ipc/common/android/android_image_reader_utils.h
index 8365a1dfc84..fae7adf019f 100644
--- a/chromium/gpu/ipc/common/android/android_image_reader_utils.h
+++ b/chromium/gpu/ipc/common/android/android_image_reader_utils.h
@@ -9,16 +9,20 @@
#include "base/android/android_image_reader_compat.h"
#include "base/files/scoped_file.h"
+#include "gpu/gpu_export.h"
#include "ui/gl/gl_bindings.h"
namespace gpu {
+// Create and inserts an egl fence and exports a ScopedFD from it.
+GPU_EXPORT base::ScopedFD CreateEglFenceAndExportFd();
+
// Delete the AImage asynchronously by inserting an android native fence sync.
bool DeleteAImageAsync(AImage* image,
base::android::AndroidImageReader* loader);
-// Create and insert an EGL fence using the provided fence fd.
-bool InsertEglFenceAndWait(base::ScopedFD acquire_fence_fd);
+// Create and insert an EGL fence and imports the provided fence fd.
+GPU_EXPORT bool InsertEglFenceAndWait(base::ScopedFD acquire_fence_fd);
// Create an EGL image from the AImage via AHardwarebuffer. Bind this EGL image
// to the texture target target_id. This changes the texture binding on the
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl.cc
index 5cf38ddb010..31ac2095674 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl.cc
@@ -12,17 +12,17 @@ namespace gpu {
GpuMemoryBufferImpl::GpuMemoryBufferImpl(gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback)
+ DestructionCallback callback)
: id_(id),
size_(size),
format_(format),
- callback_(callback),
+ callback_(std::move(callback)),
mapped_(false) {}
GpuMemoryBufferImpl::~GpuMemoryBufferImpl() {
DCHECK(!mapped_);
if (!callback_.is_null())
- callback_.Run(destruction_sync_token_);
+ std::move(callback_).Run(destruction_sync_token_);
}
gfx::Size GpuMemoryBufferImpl::GetSize() const {
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl.h
index 62e70e44fd7..9045a44c895 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl.h
@@ -21,7 +21,7 @@ namespace gpu {
// TODO(reveman): Rename to GpuMemoryBufferBase.
class GPU_EXPORT GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
public:
- typedef base::Callback<void(const gpu::SyncToken& sync)> DestructionCallback;
+ using DestructionCallback = base::OnceCallback<void(const gpu::SyncToken&)>;
~GpuMemoryBufferImpl() override;
@@ -44,12 +44,12 @@ class GPU_EXPORT GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
GpuMemoryBufferImpl(gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback);
+ DestructionCallback callback);
const gfx::GpuMemoryBufferId id_;
const gfx::Size size_;
const gfx::BufferFormat format_;
- const DestructionCallback callback_;
+ DestructionCallback callback_;
bool mapped_;
gpu::SyncToken destruction_sync_token_;
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc
index 9a7d17c2ac2..02154f9badf 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc
@@ -11,7 +11,6 @@
#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
-#include "gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "ui/gfx/geometry/size.h"
@@ -59,9 +58,9 @@ GpuMemoryBufferImplAndroidHardwareBuffer::
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback,
+ DestructionCallback callback,
base::android::ScopedHardwareBufferHandle handle)
- : GpuMemoryBufferImpl(id, size, format, callback),
+ : GpuMemoryBufferImpl(id, size, format, std::move(callback)),
hardware_buffer_handle_(std::move(handle)) {}
GpuMemoryBufferImplAndroidHardwareBuffer::
@@ -69,12 +68,11 @@ GpuMemoryBufferImplAndroidHardwareBuffer::
// static
std::unique_ptr<GpuMemoryBufferImplAndroidHardwareBuffer>
-GpuMemoryBufferImplAndroidHardwareBuffer::Create(
- gfx::GpuMemoryBufferId id,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- const DestructionCallback& callback) {
+GpuMemoryBufferImplAndroidHardwareBuffer::Create(gfx::GpuMemoryBufferId id,
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage,
+ DestructionCallback callback) {
DCHECK(base::AndroidHardwareBufferCompat::IsSupportAvailable());
AHardwareBuffer* buffer = nullptr;
@@ -86,7 +84,7 @@ GpuMemoryBufferImplAndroidHardwareBuffer::Create(
}
return base::WrapUnique(new GpuMemoryBufferImplAndroidHardwareBuffer(
- id, size, format, callback,
+ id, size, format, std::move(callback),
base::android::ScopedHardwareBufferHandle::Adopt(buffer)));
}
@@ -97,10 +95,10 @@ GpuMemoryBufferImplAndroidHardwareBuffer::CreateFromHandle(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback) {
+ DestructionCallback callback) {
DCHECK(handle.android_hardware_buffer.is_valid());
return base::WrapUnique(new GpuMemoryBufferImplAndroidHardwareBuffer(
- handle.id, size, format, callback,
+ handle.id, size, format, std::move(callback),
std::move(handle.android_hardware_buffer)));
}
@@ -133,7 +131,7 @@ GpuMemoryBufferImplAndroidHardwareBuffer::CloneHandle() const {
}
// static
-base::Closure GpuMemoryBufferImplAndroidHardwareBuffer::AllocateForTesting(
+base::OnceClosure GpuMemoryBufferImplAndroidHardwareBuffer::AllocateForTesting(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.h
index eae2bf19d49..e7d04d34972 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.h
@@ -25,19 +25,20 @@ class GPU_EXPORT GpuMemoryBufferImplAndroidHardwareBuffer
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback);
+ DestructionCallback callback);
static std::unique_ptr<GpuMemoryBufferImplAndroidHardwareBuffer>
CreateFromHandle(gfx::GpuMemoryBufferHandle handle,
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback);
+ DestructionCallback callback);
- static base::Closure AllocateForTesting(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle);
+ static base::OnceClosure AllocateForTesting(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage,
+ gfx::GpuMemoryBufferHandle* handle);
// Overridden from gfx::GpuMemoryBuffer:
bool Map() override;
@@ -52,7 +53,7 @@ class GPU_EXPORT GpuMemoryBufferImplAndroidHardwareBuffer
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback,
+ DestructionCallback callback,
base::android::ScopedHardwareBufferHandle handle);
base::android::ScopedHardwareBufferHandle hardware_buffer_handle_;
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
index 9f653bf3da0..59c7a15611d 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
@@ -24,14 +24,14 @@ GpuMemoryBufferImplDXGI::CreateFromHandle(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback) {
+ DestructionCallback callback) {
DCHECK(handle.dxgi_handle.IsValid());
return base::WrapUnique(new GpuMemoryBufferImplDXGI(
- handle.id, size, format, callback,
+ handle.id, size, format, std::move(callback),
base::win::ScopedHandle(handle.dxgi_handle.GetHandle())));
}
-base::Closure GpuMemoryBufferImplDXGI::AllocateForTesting(
+base::OnceClosure GpuMemoryBufferImplDXGI::AllocateForTesting(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
@@ -123,9 +123,9 @@ GpuMemoryBufferImplDXGI::GpuMemoryBufferImplDXGI(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback,
+ DestructionCallback callback,
base::win::ScopedHandle dxgi_handle)
- : GpuMemoryBufferImpl(id, size, format, callback),
+ : GpuMemoryBufferImpl(id, size, format, std::move(callback)),
dxgi_handle_(std::move(dxgi_handle)) {}
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h
index d143da1771c..233e3a5c4a7 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h
@@ -30,12 +30,13 @@ class GPU_EXPORT GpuMemoryBufferImplDXGI : public GpuMemoryBufferImpl {
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback);
+ DestructionCallback callback);
- static base::Closure AllocateForTesting(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle);
+ static base::OnceClosure AllocateForTesting(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage,
+ gfx::GpuMemoryBufferHandle* handle);
bool Map() override;
void* memory(size_t plane) override;
@@ -48,7 +49,7 @@ class GPU_EXPORT GpuMemoryBufferImplDXGI : public GpuMemoryBufferImpl {
GpuMemoryBufferImplDXGI(gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback,
+ DestructionCallback callback,
base::win::ScopedHandle dxgi_handle);
base::win::ScopedHandle dxgi_handle_;
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc
index e12dadafd35..56862fe4e26 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc
@@ -43,10 +43,10 @@ GpuMemoryBufferImplIOSurface::GpuMemoryBufferImplIOSurface(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback,
+ DestructionCallback callback,
IOSurfaceRef io_surface,
uint32_t lock_flags)
- : GpuMemoryBufferImpl(id, size, format, callback),
+ : GpuMemoryBufferImpl(id, size, format, std::move(callback)),
io_surface_(io_surface),
lock_flags_(lock_flags) {}
@@ -59,7 +59,7 @@ GpuMemoryBufferImplIOSurface::CreateFromHandle(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback) {
+ DestructionCallback callback) {
if (!handle.mach_port) {
LOG(ERROR) << "Invalid IOSurface mach port returned to client.";
return nullptr;
@@ -77,13 +77,13 @@ GpuMemoryBufferImplIOSurface::CreateFromHandle(
return nullptr;
}
- return base::WrapUnique(
- new GpuMemoryBufferImplIOSurface(handle.id, size, format, callback,
- io_surface.release(), LockFlags(usage)));
+ return base::WrapUnique(new GpuMemoryBufferImplIOSurface(
+ handle.id, size, format, std::move(callback), io_surface.release(),
+ LockFlags(usage)));
}
// static
-base::Closure GpuMemoryBufferImplIOSurface::AllocateForTesting(
+base::OnceClosure GpuMemoryBufferImplIOSurface::AllocateForTesting(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.h
index d88f93042da..977ff47d077 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.h
@@ -32,12 +32,13 @@ class GPU_EXPORT GpuMemoryBufferImplIOSurface : public GpuMemoryBufferImpl {
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback);
+ DestructionCallback callback);
- static base::Closure AllocateForTesting(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle);
+ static base::OnceClosure AllocateForTesting(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage,
+ gfx::GpuMemoryBufferHandle* handle);
// Overridden from gfx::GpuMemoryBuffer:
bool Map() override;
@@ -52,7 +53,7 @@ class GPU_EXPORT GpuMemoryBufferImplIOSurface : public GpuMemoryBufferImpl {
GpuMemoryBufferImplIOSurface(gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback,
+ DestructionCallback callback,
IOSurfaceRef io_surface,
uint32_t lock_flags);
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc
index 38c2569c82f..b9252d505be 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc
@@ -32,11 +32,11 @@ GpuMemoryBufferImplNativePixmap::GpuMemoryBufferImplNativePixmap(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback,
+ DestructionCallback callback,
std::unique_ptr<gfx::ClientNativePixmap> pixmap,
const std::vector<gfx::NativePixmapPlane>& planes,
base::ScopedFD fd)
- : GpuMemoryBufferImpl(id, size, format, callback),
+ : GpuMemoryBufferImpl(id, size, format, std::move(callback)),
pixmap_(std::move(pixmap)),
planes_(planes),
fd_(std::move(fd)) {}
@@ -51,7 +51,7 @@ GpuMemoryBufferImplNativePixmap::CreateFromHandle(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback) {
+ DestructionCallback callback) {
// GpuMemoryBufferImpl needs the FD to implement GetHandle() but
// gfx::ClientNativePixmapFactory::ImportFromHandle is expected to take
// ownership of the FD passed in the handle so we have to dup it here in
@@ -86,12 +86,12 @@ GpuMemoryBufferImplNativePixmap::CreateFromHandle(
DCHECK(native_pixmap);
return base::WrapUnique(new GpuMemoryBufferImplNativePixmap(
- handle.id, size, format, callback, std::move(native_pixmap),
+ handle.id, size, format, std::move(callback), std::move(native_pixmap),
handle.native_pixmap_handle.planes, std::move(scoped_fd)));
}
// static
-base::Closure GpuMemoryBufferImplNativePixmap::AllocateForTesting(
+base::OnceClosure GpuMemoryBufferImplNativePixmap::AllocateForTesting(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
@@ -109,7 +109,7 @@ base::Closure GpuMemoryBufferImplNativePixmap::AllocateForTesting(
NOTIMPLEMENTED();
#endif
handle->type = gfx::NATIVE_PIXMAP;
- return base::Bind(&FreeNativePixmapForTesting, pixmap);
+ return base::BindOnce(&FreeNativePixmapForTesting, pixmap);
}
bool GpuMemoryBufferImplNativePixmap::Map() {
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h
index ede8d509823..ddc63f5e540 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h
@@ -33,12 +33,13 @@ class GPU_EXPORT GpuMemoryBufferImplNativePixmap : public GpuMemoryBufferImpl {
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback);
+ DestructionCallback callback);
- static base::Closure AllocateForTesting(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle);
+ static base::OnceClosure AllocateForTesting(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage,
+ gfx::GpuMemoryBufferHandle* handle);
// Overridden from gfx::GpuMemoryBuffer:
bool Map() override;
@@ -53,7 +54,7 @@ class GPU_EXPORT GpuMemoryBufferImplNativePixmap : public GpuMemoryBufferImpl {
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
- const DestructionCallback& callback,
+ DestructionCallback callback,
std::unique_ptr<gfx::ClientNativePixmap> native_pixmap,
const std::vector<gfx::NativePixmapPlane>& planes,
base::ScopedFD fd);
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc
index 152197f1ac2..3ea3fa8996a 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc
@@ -26,12 +26,12 @@ GpuMemoryBufferImplSharedMemory::GpuMemoryBufferImplSharedMemory(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback,
+ DestructionCallback callback,
base::UnsafeSharedMemoryRegion shared_memory_region,
base::WritableSharedMemoryMapping shared_memory_mapping,
size_t offset,
int stride)
- : GpuMemoryBufferImpl(id, size, format, callback),
+ : GpuMemoryBufferImpl(id, size, format, std::move(callback)),
shared_memory_region_(std::move(shared_memory_region)),
shared_memory_mapping_(std::move(shared_memory_mapping)),
offset_(offset),
@@ -48,7 +48,7 @@ GpuMemoryBufferImplSharedMemory::Create(gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback) {
+ DestructionCallback callback) {
if (!IsUsageSupported(usage))
return nullptr;
size_t buffer_size = 0u;
@@ -62,8 +62,8 @@ GpuMemoryBufferImplSharedMemory::Create(gfx::GpuMemoryBufferId id,
return nullptr;
return base::WrapUnique(new GpuMemoryBufferImplSharedMemory(
- id, size, format, usage, callback, std::move(shared_memory_region),
- std::move(shared_memory_mapping), 0,
+ id, size, format, usage, std::move(callback),
+ std::move(shared_memory_region), std::move(shared_memory_mapping), 0,
gfx::RowSizeForBufferFormat(size.width(), format, 0)));
}
@@ -102,12 +102,13 @@ GpuMemoryBufferImplSharedMemory::CreateFromHandle(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback) {
+ DestructionCallback callback) {
DCHECK(handle.region.IsValid());
return base::WrapUnique(new GpuMemoryBufferImplSharedMemory(
- handle.id, size, format, usage, callback, std::move(handle.region),
- base::WritableSharedMemoryMapping(), handle.offset, handle.stride));
+ handle.id, size, format, usage, std::move(callback),
+ std::move(handle.region), base::WritableSharedMemoryMapping(),
+ handle.offset, handle.stride));
}
// static
@@ -172,7 +173,7 @@ bool GpuMemoryBufferImplSharedMemory::IsSizeValidForFormat(
}
// static
-base::Closure GpuMemoryBufferImplSharedMemory::AllocateForTesting(
+base::OnceClosure GpuMemoryBufferImplSharedMemory::AllocateForTesting(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.h
index a5791cab195..d67583eecb2 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.h
@@ -28,7 +28,7 @@ class GPU_EXPORT GpuMemoryBufferImplSharedMemory : public GpuMemoryBufferImpl {
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback);
+ DestructionCallback callback);
static gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
gfx::GpuMemoryBufferId id,
@@ -41,7 +41,7 @@ class GPU_EXPORT GpuMemoryBufferImplSharedMemory : public GpuMemoryBufferImpl {
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback);
+ DestructionCallback callback);
static bool IsUsageSupported(gfx::BufferUsage usage);
static bool IsConfigurationSupported(gfx::BufferFormat format,
@@ -49,10 +49,11 @@ class GPU_EXPORT GpuMemoryBufferImplSharedMemory : public GpuMemoryBufferImpl {
static bool IsSizeValidForFormat(const gfx::Size& size,
gfx::BufferFormat format);
- static base::Closure AllocateForTesting(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- gfx::GpuMemoryBufferHandle* handle);
+ static base::OnceClosure AllocateForTesting(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage,
+ gfx::GpuMemoryBufferHandle* handle);
// Overridden from gfx::GpuMemoryBuffer:
bool Map() override;
@@ -76,7 +77,7 @@ class GPU_EXPORT GpuMemoryBufferImplSharedMemory : public GpuMemoryBufferImpl {
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const DestructionCallback& callback,
+ DestructionCallback callback,
base::UnsafeSharedMemoryRegion shared_memory_region,
base::WritableSharedMemoryMapping shared_memory_mapping,
size_t offset,
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
index 5899c542090..5b3a9a0c34a 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
@@ -39,11 +39,11 @@ class GpuMemoryBufferImplTest : public testing::Test {
gfx::BufferUsage usage,
gfx::GpuMemoryBufferHandle* handle,
bool* destroyed) {
- return base::Bind(&GpuMemoryBufferImplTest::FreeGpuMemoryBuffer,
- base::Unretained(this),
- GpuMemoryBufferImplType::AllocateForTesting(
- size, format, usage, handle),
- base::Unretained(destroyed));
+ return base::BindOnce(&GpuMemoryBufferImplTest::FreeGpuMemoryBuffer,
+ base::Unretained(this),
+ GpuMemoryBufferImplType::AllocateForTesting(
+ size, format, usage, handle),
+ base::Unretained(destroyed));
}
GpuMemoryBufferSupport* gpu_memory_buffer_support() {
@@ -59,10 +59,10 @@ class GpuMemoryBufferImplTest : public testing::Test {
private:
GpuMemoryBufferSupport gpu_memory_buffer_support_;
- void FreeGpuMemoryBuffer(const base::Closure& free_callback,
+ void FreeGpuMemoryBuffer(base::OnceClosure free_callback,
bool* destroyed,
const gpu::SyncToken& sync_token) {
- free_callback.Run();
+ std::move(free_callback).Run();
if (destroyed)
*destroyed = true;
}
@@ -108,9 +108,9 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, CreateFromHandle) {
&handle, &destroyed);
std::unique_ptr<GpuMemoryBufferImpl> buffer(
TestFixture::gpu_memory_buffer_support()
- ->CreateGpuMemoryBufferImplFromHandle(std::move(handle),
- kBufferSize, format, usage,
- destroy_callback));
+ ->CreateGpuMemoryBufferImplFromHandle(
+ std::move(handle), kBufferSize, format, usage,
+ std::move(destroy_callback)));
ASSERT_TRUE(buffer);
EXPECT_EQ(buffer->GetFormat(), format);
@@ -141,7 +141,8 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, Map) {
TestFixture::gpu_memory_buffer_support()
->CreateGpuMemoryBufferImplFromHandle(
std::move(handle), kBufferSize, format,
- gfx::BufferUsage::GPU_READ_CPU_READ_WRITE, destroy_callback));
+ gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
+ std::move(destroy_callback)));
ASSERT_TRUE(buffer);
const size_t num_planes = gfx::NumberOfPlanesForBufferFormat(format);
@@ -196,7 +197,7 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, PersistentMap) {
->CreateGpuMemoryBufferImplFromHandle(
std::move(handle), kBufferSize, format,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT,
- destroy_callback));
+ std::move(destroy_callback)));
ASSERT_TRUE(buffer);
// Map buffer into user space.
@@ -282,9 +283,9 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, SerializeAndDeserialize) {
std::unique_ptr<GpuMemoryBufferImpl> buffer(
TestFixture::gpu_memory_buffer_support()
- ->CreateGpuMemoryBufferImplFromHandle(std::move(output_handle),
- kBufferSize, format, usage,
- destroy_callback));
+ ->CreateGpuMemoryBufferImplFromHandle(
+ std::move(output_handle), kBufferSize, format, usage,
+ std::move(destroy_callback)));
ASSERT_TRUE(buffer);
EXPECT_EQ(buffer->GetFormat(), format);
@@ -317,7 +318,7 @@ TYPED_TEST_P(GpuMemoryBufferImplCreateTest, Create) {
bool destroyed = false;
std::unique_ptr<TypeParam> buffer(TypeParam::Create(
kBufferId, kBufferSize, format, usage,
- base::Bind(
+ base::BindOnce(
[](bool* destroyed, const gpu::SyncToken&) { *destroyed = true; },
base::Unretained(&destroyed))));
ASSERT_TRUE(buffer);
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
index 66ac7872d17..e2b9183004d 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
@@ -156,31 +156,31 @@ GpuMemoryBufferSupport::CreateGpuMemoryBufferImplFromHandle(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const GpuMemoryBufferImpl::DestructionCallback& callback) {
+ GpuMemoryBufferImpl::DestructionCallback callback) {
switch (handle.type) {
case gfx::SHARED_MEMORY_BUFFER:
return GpuMemoryBufferImplSharedMemory::CreateFromHandle(
- std::move(handle), size, format, usage, callback);
+ std::move(handle), size, format, usage, std::move(callback));
#if defined(OS_MACOSX)
case gfx::IO_SURFACE_BUFFER:
return GpuMemoryBufferImplIOSurface::CreateFromHandle(
- std::move(handle), size, format, usage, callback);
+ std::move(handle), size, format, usage, std::move(callback));
#endif
#if defined(OS_LINUX)
case gfx::NATIVE_PIXMAP:
return GpuMemoryBufferImplNativePixmap::CreateFromHandle(
client_native_pixmap_factory(), std::move(handle), size, format,
- usage, callback);
+ usage, std::move(callback));
#endif
#if defined(OS_WIN)
case gfx::DXGI_SHARED_HANDLE:
- return GpuMemoryBufferImplDXGI::CreateFromHandle(std::move(handle), size,
- format, usage, callback);
+ return GpuMemoryBufferImplDXGI::CreateFromHandle(
+ std::move(handle), size, format, usage, std::move(callback));
#endif
#if defined(OS_ANDROID)
case gfx::ANDROID_HARDWARE_BUFFER:
return GpuMemoryBufferImplAndroidHardwareBuffer::CreateFromHandle(
- std::move(handle), size, format, usage, callback);
+ std::move(handle), size, format, usage, std::move(callback));
#endif
default:
// TODO(dcheng): Remove default case (https://crbug.com/676224).
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_support.h b/chromium/gpu/ipc/common/gpu_memory_buffer_support.h
index 277ffd5afce..7e051c3f551 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_support.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_support.h
@@ -58,7 +58,7 @@ class GPU_EXPORT GpuMemoryBufferSupport {
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
- const GpuMemoryBufferImpl::DestructionCallback& callback);
+ GpuMemoryBufferImpl::DestructionCallback callback);
private:
#if defined(OS_LINUX) || defined(USE_OZONE)
diff --git a/chromium/gpu/ipc/common/gpu_messages.h b/chromium/gpu/ipc/common/gpu_messages.h
index fa20b5ab226..4d8771b9ee6 100644
--- a/chromium/gpu/ipc/common/gpu_messages.h
+++ b/chromium/gpu/ipc/common/gpu_messages.h
@@ -83,6 +83,18 @@ IPC_STRUCT_BEGIN(GpuChannelMsg_CreateSharedImage_Params)
IPC_STRUCT_MEMBER(uint32_t, release_id)
IPC_STRUCT_END()
+IPC_STRUCT_BEGIN(GpuChannelMsg_CreateSharedImageWithData_Params)
+ IPC_STRUCT_MEMBER(gpu::Mailbox, mailbox)
+ IPC_STRUCT_MEMBER(viz::ResourceFormat, format)
+ IPC_STRUCT_MEMBER(gfx::Size, size)
+ IPC_STRUCT_MEMBER(gfx::ColorSpace, color_space)
+ IPC_STRUCT_MEMBER(uint32_t, usage)
+ IPC_STRUCT_MEMBER(uint32_t, release_id)
+ IPC_STRUCT_MEMBER(uint32_t, pixel_data_offset)
+ IPC_STRUCT_MEMBER(uint32_t, pixel_data_size)
+ IPC_STRUCT_MEMBER(bool, done_with_shm)
+IPC_STRUCT_END()
+
IPC_STRUCT_BEGIN(GpuChannelMsg_CreateGMBSharedImage_Params)
IPC_STRUCT_MEMBER(gpu::Mailbox, mailbox)
IPC_STRUCT_MEMBER(gfx::GpuMemoryBufferHandle, handle)
@@ -136,12 +148,16 @@ IPC_MESSAGE_CONTROL1(GpuChannelMsg_FlushDeferredMessages,
IPC_MESSAGE_ROUTED1(GpuChannelMsg_CreateSharedImage,
GpuChannelMsg_CreateSharedImage_Params /* params */)
+IPC_MESSAGE_ROUTED1(GpuChannelMsg_CreateSharedImageWithData,
+ GpuChannelMsg_CreateSharedImageWithData_Params /* params */)
IPC_MESSAGE_ROUTED1(GpuChannelMsg_CreateGMBSharedImage,
GpuChannelMsg_CreateGMBSharedImage_Params /* params */)
IPC_MESSAGE_ROUTED2(GpuChannelMsg_UpdateSharedImage,
gpu::Mailbox /* id */,
uint32_t /* release_id */)
IPC_MESSAGE_ROUTED1(GpuChannelMsg_DestroySharedImage, gpu::Mailbox /* id */)
+IPC_MESSAGE_ROUTED1(GpuChannelMsg_RegisterSharedImageUploadBuffer,
+ base::ReadOnlySharedMemoryRegion /* shm */)
// Schedules a hardware-accelerated image decode in the GPU process. Renderers
// should use gpu::ImageDecodeAcceleratorProxy to schedule decode requests which
@@ -216,9 +232,10 @@ IPC_SYNC_MESSAGE_ROUTED3_1(GpuCommandBufferMsg_WaitForGetOffsetInRange,
// TODO(sunnyps): This is an internal implementation detail of the gpu service
// and is not sent by the client. Remove this once the non-scheduler code path
// is removed.
-IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_AsyncFlush,
+IPC_MESSAGE_ROUTED3(GpuCommandBufferMsg_AsyncFlush,
int32_t /* put_offset */,
- uint32_t /* flush_id */)
+ uint32_t /* flush_id */,
+ std::vector<gpu::SyncToken> /* sync_token_fences */)
// Sent by the GPU process to display messages in the console.
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ConsoleMsg,
@@ -248,10 +265,6 @@ IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_BufferPresented,
uint64_t, /* swap_id */
gfx::PresentationFeedback /* feedback */)
-// The receiver will stop processing messages until the Synctoken is signaled.
-IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_WaitSyncToken,
- gpu::SyncToken /* sync_token */)
-
// The receiver will asynchronously wait until the SyncToken is signaled, and
// then return a GpuCommandBufferMsg_SignalAck message.
IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_SignalSyncToken,
diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom
index 61d6150afe4..c80521f496c 100644
--- a/chromium/gpu/ipc/common/gpu_preferences.mojom
+++ b/chromium/gpu/ipc/common/gpu_preferences.mojom
@@ -34,6 +34,7 @@ struct GpuPreferences {
bool enable_media_foundation_vea_on_windows7;
bool disable_software_rasterizer;
bool log_gpu_control_list_decisions;
+ bool enable_trace_export_events_to_etw;
bool compile_shader_always_succeeds;
bool disable_gl_error_limit;
@@ -65,6 +66,7 @@ struct GpuPreferences {
bool disable_oop_rasterization;
bool enable_oop_rasterization_ddl;
bool enable_raster_to_sk_image;
+ bool enable_passthrough_raster_decoder;
bool watchdog_starts_backgrounded;
bool enable_vulkan;
bool enable_gpu_benchmarking_extension;
diff --git a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
index 22e7524a3da..3b3b52d0add 100644
--- a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
@@ -76,6 +76,8 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->disable_software_rasterizer = prefs.disable_software_rasterizer();
out->log_gpu_control_list_decisions =
prefs.log_gpu_control_list_decisions();
+ out->enable_trace_export_events_to_etw =
+ prefs.enable_trace_export_events_to_etw();
out->compile_shader_always_succeeds =
prefs.compile_shader_always_succeeds();
out->disable_gl_error_limit = prefs.disable_gl_error_limit();
@@ -122,6 +124,8 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->disable_oop_rasterization = prefs.disable_oop_rasterization();
out->enable_oop_rasterization_ddl = prefs.enable_oop_rasterization_ddl();
out->enable_raster_to_sk_image = prefs.enable_raster_to_sk_image();
+ out->enable_passthrough_raster_decoder =
+ prefs.enable_passthrough_raster_decoder();
out->watchdog_starts_backgrounded = prefs.watchdog_starts_backgrounded();
out->enable_vulkan = prefs.enable_vulkan();
out->enable_gpu_benchmarking_extension =
@@ -177,6 +181,10 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool log_gpu_control_list_decisions(const gpu::GpuPreferences& prefs) {
return prefs.log_gpu_control_list_decisions;
}
+ static bool enable_trace_export_events_to_etw(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.enable_trace_export_events_to_etw;
+ }
static bool compile_shader_always_succeeds(const gpu::GpuPreferences& prefs) {
return prefs.compile_shader_always_succeeds;
}
@@ -269,6 +277,10 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool enable_raster_to_sk_image(const gpu::GpuPreferences& prefs) {
return prefs.enable_raster_to_sk_image;
}
+ static bool enable_passthrough_raster_decoder(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.enable_passthrough_raster_decoder;
+ }
static bool watchdog_starts_backgrounded(const gpu::GpuPreferences& prefs) {
return prefs.watchdog_starts_backgrounded;
}
diff --git a/chromium/gpu/ipc/common/surface_handle.h b/chromium/gpu/ipc/common/surface_handle.h
index cb6f279ce66..391d7540aba 100644
--- a/chromium/gpu/ipc/common/surface_handle.h
+++ b/chromium/gpu/ipc/common/surface_handle.h
@@ -21,11 +21,11 @@ namespace gpu {
// SurfaceHandle is the native type used to reference a native surface in the
// GPU process so that we can create "view" contexts on it.
-// On Windows, Linux and Chrome OS, we can use a AcceleratedWidget across
+// On Windows, Mac, Linux and Chrome OS, we can use a AcceleratedWidget across
// processes, so SurfaceHandle is exactly that.
-// On Mac and Android, there is no type we can directly access across processes,
-// so we go through the GpuSurfaceTracker, and SurfaceHandle is a (scalar)
-// handle generated by that.
+// On Android, there is no type we can directly access across processes, so we
+// go through the GpuSurfaceTracker, and SurfaceHandle is a (scalar) handle
+// generated by that.
// On NaCl, we don't have native surfaces per se, but we need SurfaceHandle to
// be defined, because some APIs that use it are referenced there.
//
@@ -33,8 +33,7 @@ namespace gpu {
#if defined(GPU_SURFACE_HANDLE_IS_ACCELERATED_WINDOW)
using SurfaceHandle = gfx::AcceleratedWidget;
constexpr SurfaceHandle kNullSurfaceHandle = gfx::kNullAcceleratedWidget;
-#elif defined(OS_MACOSX) || defined(OS_ANDROID) || defined(OS_NACL) || \
- defined(OS_FUCHSIA)
+#elif defined(OS_ANDROID) || defined(OS_NACL) || defined(OS_FUCHSIA)
using SurfaceHandle = int32_t;
constexpr SurfaceHandle kNullSurfaceHandle = 0;
#else
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.cc b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
index 3d3c7737b08..9dca37074f6 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.cc
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
@@ -4,6 +4,9 @@
#include "gpu/ipc/gpu_in_process_thread_service.h"
+#include <utility>
+#include <vector>
+
#include "base/threading/thread_task_runner_handle.h"
#include "gpu/command_buffer/service/scheduler.h"
@@ -28,13 +31,6 @@ class SchedulerSequence : public CommandBufferTaskExecutor::Sequence {
bool ShouldYield() override { return scheduler_->ShouldYield(sequence_id_); }
- void SetEnabled(bool enabled) override {
- if (enabled)
- scheduler_->EnableSequence(sequence_id_);
- else
- scheduler_->DisableSequence(sequence_id_);
- }
-
void ScheduleTask(base::OnceClosure task,
std::vector<SyncToken> sync_token_fences) override {
scheduler_->ScheduleTask(Scheduler::Task(sequence_id_, std::move(task),
@@ -62,13 +58,17 @@ GpuInProcessThreadService::GpuInProcessThreadService(
scoped_refptr<gl::GLShareGroup> share_group,
gl::GLSurfaceFormat share_group_surface_format,
const GpuFeatureInfo& gpu_feature_info,
- const GpuPreferences& gpu_preferences)
+ const GpuPreferences& gpu_preferences,
+ SharedImageManager* shared_image_manager,
+ gles2::ProgramCache* program_cache)
: CommandBufferTaskExecutor(gpu_preferences,
gpu_feature_info,
sync_point_manager,
mailbox_manager,
share_group,
- share_group_surface_format),
+ share_group_surface_format,
+ shared_image_manager,
+ program_cache),
task_runner_(task_runner),
scheduler_(scheduler) {}
@@ -82,10 +82,6 @@ bool GpuInProcessThreadService::ShouldCreateMemoryTracker() const {
return true;
}
-bool GpuInProcessThreadService::BlockThreadOnWaitSyncToken() const {
- return false;
-}
-
std::unique_ptr<CommandBufferTaskExecutor::Sequence>
GpuInProcessThreadService::CreateSequence() {
return std::make_unique<SchedulerSequence>(scheduler_);
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.h b/chromium/gpu/ipc/gpu_in_process_thread_service.h
index 15272304cb6..35116b6ad7b 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.h
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.h
@@ -5,6 +5,8 @@
#ifndef GPU_IPC_GPU_IN_PROCESS_THREAD_SERVICE_H_
#define GPU_IPC_GPU_IN_PROCESS_THREAD_SERVICE_H_
+#include <memory>
+
#include "base/compiler_specific.h"
#include "base/single_thread_task_runner.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
@@ -14,9 +16,12 @@
#include "ui/gl/gl_share_group.h"
namespace gpu {
-
class Scheduler;
+namespace gles2 {
+class ProgramCache;
+} // namespace gles2
+
// Default Service class when no service is specified. GpuInProcessThreadService
// is used by Mus and unit tests.
class GL_IN_PROCESS_CONTEXT_EXPORT GpuInProcessThreadService
@@ -30,12 +35,13 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GpuInProcessThreadService
scoped_refptr<gl::GLShareGroup> share_group,
gl::GLSurfaceFormat share_group_surface_format,
const GpuFeatureInfo& gpu_feature_info,
- const GpuPreferences& gpu_preferences);
+ const GpuPreferences& gpu_preferences,
+ SharedImageManager* shared_image_manager,
+ gles2::ProgramCache* program_cache);
// CommandBufferTaskExecutor implementation.
bool ForceVirtualizedGLContexts() const override;
bool ShouldCreateMemoryTracker() const override;
- bool BlockThreadOnWaitSyncToken() const override;
std::unique_ptr<CommandBufferTaskExecutor::Sequence> CreateSequence()
override;
void ScheduleOutOfOrderTask(base::OnceClosure task) override;
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.h b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
index 8e177e8a54f..b35087a644d 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
@@ -5,10 +5,10 @@
#ifndef GPU_IPC_HOST_GPU_MEMORY_BUFFER_SUPPORT_H_
#define GPU_IPC_HOST_GPU_MEMORY_BUFFER_SUPPORT_H_
+#include <unordered_set>
#include <utility>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/hash.h"
#include "ui/gfx/buffer_types.h"
@@ -17,11 +17,11 @@ namespace gpu {
using GpuMemoryBufferConfigurationKey =
std::pair<gfx::BufferFormat, gfx::BufferUsage>;
using GpuMemoryBufferConfigurationSet =
- base::hash_set<GpuMemoryBufferConfigurationKey>;
+ std::unordered_set<GpuMemoryBufferConfigurationKey>;
} // namespace gpu
-namespace BASE_HASH_NAMESPACE {
+namespace std {
template <>
struct hash<gpu::GpuMemoryBufferConfigurationKey> {
@@ -31,7 +31,7 @@ struct hash<gpu::GpuMemoryBufferConfigurationKey> {
}
};
-} // namespace BASE_HASH_NAMESPACE
+} // namespace std
namespace gpu {
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index 734ddfd5cc6..5fc6dec47cb 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -27,7 +27,6 @@
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
-#include "components/viz/common/features.h"
#include "gpu/command_buffer/client/gpu_control_client.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
#include "gpu/command_buffer/client/shared_image_interface.h"
@@ -50,9 +49,9 @@
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/query_manager.h"
#include "gpu/command_buffer/service/raster_decoder.h"
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/command_buffer/service/transfer_buffer_manager.h"
@@ -89,7 +88,6 @@ namespace {
base::AtomicSequenceNumber g_next_route_id;
base::AtomicSequenceNumber g_next_image_id;
-base::AtomicSequenceNumber g_next_transfer_buffer_id;
CommandBufferId NextCommandBufferId() {
return CommandBufferIdFromChannelAndRoute(kInProcessCommandBufferClientId,
@@ -134,7 +132,7 @@ class InProcessCommandBuffer::SharedImageInterface
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) override {
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
{
base::AutoLock lock(lock_);
// Note: we enqueue the task under the lock to guarantee monotonicity of
@@ -149,6 +147,28 @@ class InProcessCommandBuffer::SharedImageInterface
return mailbox;
}
+ Mailbox CreateSharedImage(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ std::vector<uint8_t> pixel_data_copy(pixel_data.begin(), pixel_data.end());
+ {
+ base::AutoLock lock(lock_);
+ // Note: we enqueue the task under the lock to guarantee monotonicity of
+ // the release ids as seen by the service. Unretained is safe because
+ // InProcessCommandBuffer synchronizes with the GPU thread at destruction
+ // time, cancelling tasks, before |this| is destroyed.
+ parent_->ScheduleGpuTask(base::BindOnce(
+ &InProcessCommandBuffer::CreateSharedImageWithDataOnGpuThread,
+ gpu_thread_weak_ptr_, mailbox, format, size, color_space, usage,
+ MakeSyncToken(next_fence_sync_release_++),
+ std::move(pixel_data_copy)));
+ }
+ return mailbox;
+ }
+
Mailbox CreateSharedImage(gfx::GpuMemoryBuffer* gpu_memory_buffer,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
const gfx::ColorSpace& color_space,
@@ -159,7 +179,7 @@ class InProcessCommandBuffer::SharedImageInterface
DCHECK(gpu::IsImageSizeValidForGpuMemoryBufferFormat(
gpu_memory_buffer->GetSize(), gpu_memory_buffer->GetFormat()));
- auto mailbox = Mailbox::Generate();
+ auto mailbox = Mailbox::GenerateForSharedImage();
gfx::GpuMemoryBufferHandle handle = gpu_memory_buffer->CloneHandle();
bool requires_sync_token = handle.type == gfx::IO_SURFACE_BUFFER;
SyncToken sync_token;
@@ -288,6 +308,17 @@ bool InProcessCommandBuffer::MakeCurrent() {
return true;
}
+base::Optional<gles2::ProgramCache::ScopedCacheUse>
+InProcessCommandBuffer::CreateCacheUse() {
+ base::Optional<gles2::ProgramCache::ScopedCacheUse> cache_use;
+ if (context_group_->has_program_cache()) {
+ cache_use.emplace(context_group_->get_program_cache(),
+ base::BindRepeating(&DecoderClient::CacheShader,
+ base::Unretained(this)));
+ }
+ return cache_use;
+}
+
gpu::ContextResult InProcessCommandBuffer::Initialize(
scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
@@ -351,6 +382,13 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
TRACE_EVENT0("gpu", "InProcessCommandBuffer::InitializeOnGpuThread")
+ if (gpu_channel_manager_delegate_ &&
+ gpu_channel_manager_delegate_->IsExiting()) {
+ LOG(ERROR) << "ContextResult::kTransientFailure: trying to create command "
+ "buffer during process shutdown.";
+ return gpu::ContextResult::kTransientFailure;
+ }
+
// TODO(crbug.com/832243): This could use the TransferBufferManager owned by
// |context_group_| instead.
transfer_buffer_manager_ = std::make_unique<TransferBufferManager>(nullptr);
@@ -405,19 +443,6 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
use_virtualized_gl_context_ |=
context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
- const auto& gpu_feature_info = task_executor_->gpu_feature_info();
- const bool use_oop_rasterization =
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] ==
- gpu::kGpuFeatureStatusEnabled;
-
- // With OOP-R, SkiaRenderer and Skia DDL, we will only have one GLContext
- // and share it with RasterDecoders and DisplayCompositor. So it is not
- // necessary to use virtualized gl context anymore.
- // TODO(penghuang): Make virtualized gl context work with SkiaRenderer + DDL +
- // OOPR. https://crbug.com/838899
- if (features::IsUsingSkiaDeferredDisplayList() && use_oop_rasterization)
- use_virtualized_gl_context_ = false;
-
// TODO(sunnyps): Should this use ScopedCrashKey instead?
crash_keys::gpu_gl_context_is_virtual.Set(use_virtualized_gl_context_ ? "1"
: "0");
@@ -557,11 +582,17 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
bool use_passthrough_cmd_decoder =
task_executor_->gpu_preferences().use_passthrough_cmd_decoder &&
gles2::PassthroughCommandDecoderSupported();
- if (!use_passthrough_cmd_decoder &&
- params.attribs.enable_raster_interface &&
+ bool allow_raster_decoder =
+ !use_passthrough_cmd_decoder ||
+ task_executor_->gpu_preferences().enable_passthrough_raster_decoder;
+
+ if (allow_raster_decoder && params.attribs.enable_raster_interface &&
!params.attribs.enable_gles2_interface) {
- context_state_ = base::MakeRefCounted<raster::RasterDecoderContextState>(
- gl_share_group_, surface_, real_context, use_virtualized_gl_context_);
+ context_state_ = base::MakeRefCounted<SharedContextState>(
+ gl_share_group_, surface_, real_context, use_virtualized_gl_context_,
+ base::DoNothing());
+ context_state_->InitializeGL(task_executor_->gpu_preferences(),
+ context_group_->feature_info());
gr_shader_cache_ = params.gr_shader_cache;
context_state_->InitializeGrContext(workarounds, params.gr_shader_cache,
params.activity_flags);
@@ -581,18 +612,22 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
}
if (use_virtualized_gl_context_) {
- context_ = base::MakeRefCounted<GLContextVirtual>(
- gl_share_group_.get(), real_context.get(), decoder_->AsWeakPtr());
- if (!context_->Initialize(
- surface_.get(),
- GenerateGLContextAttribs(params.attribs, context_group_.get()))) {
- // TODO(piman): This might not be fatal, we could recurse into
- // CreateGLContext to get more info, tho it should be exceedingly
- // rare and may not be recoverable anyway.
- DestroyOnGpuThread();
- LOG(ERROR) << "ContextResult::kFatalFailure: "
- "Failed to initialize virtual GL context.";
- return gpu::ContextResult::kFatalFailure;
+ if (context_state_) {
+ context_ = context_state_->context();
+ } else {
+ context_ = base::MakeRefCounted<GLContextVirtual>(
+ gl_share_group_.get(), real_context.get(), decoder_->AsWeakPtr());
+ if (!context_->Initialize(surface_.get(),
+ GenerateGLContextAttribs(
+ params.attribs, context_group_.get()))) {
+ // TODO(piman): This might not be fatal, we could recurse into
+ // CreateGLContext to get more info, tho it should be exceedingly
+ // rare and may not be recoverable anyway.
+ DestroyOnGpuThread();
+ LOG(ERROR) << "ContextResult::kFatalFailure: "
+ "Failed to initialize virtual GL context.";
+ return gpu::ContextResult::kFatalFailure;
+ }
}
if (!context_->MakeCurrent(surface_.get())) {
@@ -676,6 +711,9 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
if (shared_image_factory_)
shared_image_factory_->DestroyAllSharedImages(have_context);
+ base::Optional<gles2::ProgramCache::ScopedCacheUse> cache_use;
+ if (have_context)
+ cache_use = CreateCacheUse();
// Prepare to destroy the surface while the context is still current, because
// some surface destructors make GL calls.
@@ -721,6 +759,19 @@ void InProcessCommandBuffer::OnParseError() {
// error and make the race benign.
UpdateLastStateOnGpuThread();
+ bool was_lost_by_robustness =
+ decoder_ && decoder_->WasContextLostByRobustnessExtension();
+
+ // Work around issues with recovery by allowing a new GPU process to launch.
+ if (was_lost_by_robustness) {
+ GpuDriverBugWorkarounds workarounds(
+ GetGpuFeatureInfo().enabled_gpu_driver_bug_workarounds);
+ if (workarounds.exit_on_context_lost && gpu_channel_manager_delegate_)
+ gpu_channel_manager_delegate_->MaybeExitOnContextLost();
+
+ // TODO(crbug.com/924148): Check if we should force lose all contexts too.
+ }
+
PostOrRunClientCallback(
base::BindOnce(&InProcessCommandBuffer::OnContextLost,
client_thread_weak_ptr_factory_.GetWeakPtr()));
@@ -788,20 +839,34 @@ bool InProcessCommandBuffer::HasUnprocessedCommandsOnGpuThread() {
return false;
}
-void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset) {
+void InProcessCommandBuffer::FlushOnGpuThread(
+ int32_t put_offset,
+ const std::vector<SyncToken>& sync_token_fences) {
DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
TRACE_EVENT1("gpu", "InProcessCommandBuffer::FlushOnGpuThread", "put_offset",
put_offset);
ScopedEvent handle_flush(&flush_event_);
+ // Check if sync token waits are invalid or already complete. Do not use
+ // SyncPointManager::IsSyncTokenReleased() as it can't say if the wait is
+ // invalid.
+ for (const auto& sync_token : sync_token_fences)
+ DCHECK(!sync_point_client_state_->Wait(sync_token, base::DoNothing()));
if (!MakeCurrent())
return;
+ auto cache_use = CreateCacheUse();
+
+ MailboxManager* mailbox_manager = context_group_->mailbox_manager();
+ if (mailbox_manager->UsesSync()) {
+ for (const auto& sync_token : sync_token_fences)
+ mailbox_manager->PullTextureUpdates(sync_token);
+ }
{
- base::Optional<raster::GrShaderCache::ScopedCacheUse> cache_use;
+ base::Optional<raster::GrShaderCache::ScopedCacheUse> gr_cache_use;
if (gr_shader_cache_)
- cache_use.emplace(gr_shader_cache_, kInProcessCommandBufferClientId);
+ gr_cache_use.emplace(gr_shader_cache_, kInProcessCommandBufferClientId);
command_buffer_->Flush(put_offset, decoder_.get());
}
// Update state before signaling the flush event.
@@ -810,10 +875,9 @@ void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset) {
bool has_unprocessed_commands = HasUnprocessedCommandsOnGpuThread();
if (!command_buffer_->scheduled() || has_unprocessed_commands) {
- DCHECK(!task_executor_->BlockThreadOnWaitSyncToken());
ContinueGpuTask(base::BindOnce(&InProcessCommandBuffer::FlushOnGpuThread,
gpu_thread_weak_ptr_factory_.GetWeakPtr(),
- put_offset));
+ put_offset, sync_token_fences));
}
// If we've processed all pending commands but still have pending queries,
@@ -831,6 +895,7 @@ void InProcessCommandBuffer::PerformDelayedWorkOnGpuThread() {
crash_keys::gpu_gl_context_is_virtual.Set(use_virtualized_gl_context_ ? "1"
: "0");
if (MakeCurrent()) {
+ auto cache_use = CreateCacheUse();
decoder_->PerformIdleWork();
decoder_->ProcessPendingQueries(false);
if (decoder_->HasMoreIdleWork() || decoder_->HasPendingQueries()) {
@@ -845,8 +910,8 @@ void InProcessCommandBuffer::ScheduleDelayedWorkOnGpuThread() {
return;
delayed_work_pending_ = true;
task_executor_->ScheduleDelayedWork(
- base::Bind(&InProcessCommandBuffer::PerformDelayedWorkOnGpuThread,
- gpu_thread_weak_ptr_factory_.GetWeakPtr()));
+ base::BindOnce(&InProcessCommandBuffer::PerformDelayedWorkOnGpuThread,
+ gpu_thread_weak_ptr_factory_.GetWeakPtr()));
}
void InProcessCommandBuffer::Flush(int32_t put_offset) {
@@ -860,15 +925,16 @@ void InProcessCommandBuffer::Flush(int32_t put_offset) {
put_offset);
last_put_offset_ = put_offset;
- flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
std::vector<SyncToken> sync_token_fences;
next_flush_sync_token_fences_.swap(sync_token_fences);
- ScheduleGpuTask(
- base::BindOnce(&InProcessCommandBuffer::FlushOnGpuThread,
- gpu_thread_weak_ptr_factory_.GetWeakPtr(), put_offset),
- std::move(sync_token_fences));
+ // Don't use std::move() for |sync_token_fences| because evaluation order for
+ // arguments is not defined.
+ ScheduleGpuTask(base::BindOnce(&InProcessCommandBuffer::FlushOnGpuThread,
+ gpu_thread_weak_ptr_factory_.GetWeakPtr(),
+ put_offset, sync_token_fences),
+ sync_token_fences);
}
void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) {
@@ -931,10 +997,10 @@ void InProcessCommandBuffer::SetGetBufferOnGpuThread(
}
scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(
- size_t size,
+ uint32_t size,
int32_t* id) {
scoped_refptr<Buffer> buffer = MakeMemoryBuffer(size);
- *id = g_next_transfer_buffer_id.GetNext() + 1;
+ *id = GetNextBufferId();
ScheduleGpuTask(
base::BindOnce(&InProcessCommandBuffer::RegisterTransferBufferOnGpuThread,
gpu_thread_weak_ptr_factory_.GetWeakPtr(), *id, buffer));
@@ -992,13 +1058,9 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
bool requires_sync_point = handle.type == gfx::IO_SURFACE_BUFFER;
uint64_t fence_sync = 0;
- if (requires_sync_point) {
+ if (requires_sync_point)
fence_sync = GenerateFenceSyncRelease();
- // Previous fence syncs should be flushed already.
- DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_);
- }
-
ScheduleGpuTask(base::BindOnce(
&InProcessCommandBuffer::CreateImageOnGpuThread,
gpu_thread_weak_ptr_factory_.GetWeakPtr(), new_id, std::move(handle),
@@ -1007,7 +1069,6 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
gpu_memory_buffer->GetFormat(), fence_sync));
if (fence_sync) {
- flushed_fence_sync_release_ = fence_sync;
SyncToken sync_token(GetNamespaceID(), GetCommandBufferID(), fence_sync);
sync_token.SetVerifyFlush();
gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer,
@@ -1106,58 +1167,13 @@ void InProcessCommandBuffer::OnFenceSyncRelease(uint64_t release) {
DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
SyncToken sync_token(GetNamespaceID(), GetCommandBufferID(), release);
- context_group_->mailbox_manager()->PushTextureUpdates(sync_token);
- sync_point_client_state_->ReleaseFenceSync(release);
-}
-
-// TODO(sunnyps): Remove the wait command once all sync tokens are passed as
-// task dependencies.
-bool InProcessCommandBuffer::OnWaitSyncToken(const SyncToken& sync_token) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
- DCHECK(!waiting_for_sync_point_);
- TRACE_EVENT0("gpu", "InProcessCommandBuffer::OnWaitSyncToken");
-
- SyncPointManager* sync_point_manager = task_executor_->sync_point_manager();
- DCHECK(sync_point_manager);
MailboxManager* mailbox_manager = context_group_->mailbox_manager();
- DCHECK(mailbox_manager);
-
- if (task_executor_->BlockThreadOnWaitSyncToken()) {
- // Wait if sync point wait is valid.
- if (sync_point_client_state_->Wait(
- sync_token,
- base::Bind(&base::WaitableEvent::Signal,
- base::Unretained(&fence_sync_wait_event_)))) {
- fence_sync_wait_event_.Wait();
- }
-
- mailbox_manager->PullTextureUpdates(sync_token);
- return false;
- }
-
- waiting_for_sync_point_ = sync_point_client_state_->Wait(
- sync_token,
- base::Bind(&InProcessCommandBuffer::OnWaitSyncTokenCompleted,
- gpu_thread_weak_ptr_factory_.GetWeakPtr(), sync_token));
- if (!waiting_for_sync_point_) {
- mailbox_manager->PullTextureUpdates(sync_token);
- return false;
- }
+ if (mailbox_manager->UsesSync())
+ mailbox_manager->PushTextureUpdates(sync_token);
- command_buffer_->SetScheduled(false);
- task_sequence_->SetEnabled(false);
- return true;
-}
-
-void InProcessCommandBuffer::OnWaitSyncTokenCompleted(
- const SyncToken& sync_token) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
- DCHECK(waiting_for_sync_point_);
- context_group_->mailbox_manager()->PullTextureUpdates(sync_token);
- waiting_for_sync_point_ = false;
- command_buffer_->SetScheduled(true);
- task_sequence_->SetEnabled(true);
+ command_buffer_->SetReleaseCount(release);
+ sync_point_client_state_->ReleaseFenceSync(release);
}
void InProcessCommandBuffer::OnDescheduleUntilFinished() {
@@ -1335,6 +1351,30 @@ void InProcessCommandBuffer::CreateSharedImageOnGpuThread(
shared_image_client_state_->ReleaseFenceSync(sync_token.release_count());
}
+void InProcessCommandBuffer::CreateSharedImageWithDataOnGpuThread(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SyncToken& sync_token,
+ std::vector<uint8_t> pixel_data) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ // |shared_image_factory_| never writes to the surface, so skip unnecessary
+ // MakeCurrent to improve performance. https://crbug.com/457431
+ if (!context_->IsCurrent(nullptr) && !MakeCurrent())
+ return;
+ LazyCreateSharedImageFactory();
+ if (!shared_image_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, usage, pixel_data)) {
+ // Signal errors by losing the command buffer.
+ command_buffer_->SetParseError(error::kLostContext);
+ return;
+ }
+ context_group_->mailbox_manager()->PushTextureUpdates(sync_token);
+ shared_image_client_state_->ReleaseFenceSync(sync_token.release_count());
+}
+
void InProcessCommandBuffer::CreateGMBSharedImageOnGpuThread(
const Mailbox& mailbox,
gfx::GpuMemoryBufferHandle handle,
@@ -1419,7 +1459,7 @@ bool InProcessCommandBuffer::IsFenceSyncReleased(uint64_t release) {
return release <= GetLastState().release_count;
}
-void InProcessCommandBuffer::WaitSyncTokenHint(const SyncToken& sync_token) {
+void InProcessCommandBuffer::WaitSyncToken(const SyncToken& sync_token) {
next_flush_sync_token_fences_.push_back(sync_token);
}
@@ -1477,12 +1517,10 @@ void InProcessCommandBuffer::BufferPresented(
SwapBufferParams params = pending_presented_params_.front();
pending_presented_params_.pop_front();
- if (ShouldSendBufferPresented(params.flags, feedback.flags)) {
- PostOrRunClientCallback(
- base::BindOnce(&InProcessCommandBuffer::BufferPresentedOnOriginThread,
- client_thread_weak_ptr_factory_.GetWeakPtr(),
- params.swap_id, params.flags, feedback));
- }
+ PostOrRunClientCallback(
+ base::BindOnce(&InProcessCommandBuffer::BufferPresentedOnOriginThread,
+ client_thread_weak_ptr_factory_.GetWeakPtr(),
+ params.swap_id, params.flags, feedback));
}
void InProcessCommandBuffer::AddFilter(IPC::MessageFilter* message_filter) {
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index 038ddf37673..a2ac2361a34 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -19,6 +19,7 @@
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
+#include "base/optional.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
@@ -31,6 +32,7 @@
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/decoder_context.h"
#include "gpu/command_buffer/service/gr_cache_controller.h"
+#include "gpu/command_buffer/service/program_cache.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/command_buffer/service/service_transfer_cache.h"
#include "gpu/config/gpu_feature_info.h"
@@ -54,6 +56,7 @@ class Size;
}
namespace gpu {
+class SharedContextState;
class GpuChannelManagerDelegate;
class GpuProcessActivityFlags;
class GpuMemoryBufferManager;
@@ -67,7 +70,6 @@ struct SwapBuffersCompleteParams;
namespace raster {
class GrShaderCache;
-struct RasterDecoderContextState;
}
// This class provides a thread-safe interface to the global GPU service (for
@@ -114,7 +116,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
int32_t start,
int32_t end) override;
void SetGetBuffer(int32_t shm_id) override;
- scoped_refptr<Buffer> CreateTransferBuffer(size_t size, int32_t* id) override;
+ scoped_refptr<Buffer> CreateTransferBuffer(uint32_t size,
+ int32_t* id) override;
void DestroyTransferBuffer(int32_t id) override;
// GpuControl implementation (called on client thread):
@@ -139,7 +142,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const SyncToken& sync_token,
base::OnceClosure callback) override;
- void WaitSyncTokenHint(const SyncToken& sync_token) override;
+ void WaitSyncToken(const SyncToken& sync_token) override;
bool CanWaitUnverifiedSyncToken(const SyncToken& sync_token) override;
// CommandBufferServiceClient implementation (called on gpu thread):
@@ -150,7 +153,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
void OnConsoleMessage(int32_t id, const std::string& message) override;
void CacheShader(const std::string& key, const std::string& shader) override;
void OnFenceSyncRelease(uint64_t release) override;
- bool OnWaitSyncToken(const SyncToken& sync_token) override;
void OnDescheduleUntilFinished() override;
void OnRescheduleAfterFinished() override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
@@ -174,7 +176,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
const GpuFeatureInfo& GetGpuFeatureInfo() const;
using UpdateVSyncParametersCallback =
- base::Callback<void(base::TimeTicks timebase, base::TimeDelta interval)>;
+ base::RepeatingCallback<void(base::TimeTicks timebase,
+ base::TimeDelta interval)>;
void SetUpdateVSyncParametersCallback(
const UpdateVSyncParametersCallback& callback);
@@ -227,7 +230,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
// Flush up to put_offset. If execution is deferred either by yielding, or due
// to a sync token wait, HasUnprocessedCommandsOnGpuThread() returns true.
- void FlushOnGpuThread(int32_t put_offset);
+ void FlushOnGpuThread(int32_t put_offset,
+ const std::vector<SyncToken>& sync_token_fences);
bool HasUnprocessedCommandsOnGpuThread();
void UpdateLastStateOnGpuThread();
@@ -235,6 +239,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
bool MakeCurrent();
+ base::Optional<gles2::ProgramCache::ScopedCacheUse> CreateCacheUse();
+
// Client callbacks are posted back to |origin_task_runner_|, or run
// synchronously if there's no task runner or message loop.
void PostOrRunClientCallback(base::OnceClosure callback);
@@ -275,6 +281,13 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
const gfx::ColorSpace& color_space,
uint32_t usage,
const SyncToken& sync_token);
+ void CreateSharedImageWithDataOnGpuThread(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SyncToken& sync_token,
+ std::vector<uint8_t> pixel_data);
void CreateGMBSharedImageOnGpuThread(const Mailbox& mailbox,
gfx::GpuMemoryBufferHandle handle,
gfx::BufferFormat format,
@@ -288,7 +301,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
// Callbacks on the gpu thread.
void PerformDelayedWorkOnGpuThread();
- void OnWaitSyncTokenCompleted(const SyncToken& sync_token);
// Callback implementations on the client thread.
void OnContextLost();
@@ -301,7 +313,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
// Members accessed on the gpu thread (possibly with the exception of
// creation):
- bool waiting_for_sync_point_ = false;
bool use_virtualized_gl_context_ = false;
raster::GrShaderCache* gr_shader_cache_ = nullptr;
scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
@@ -333,7 +344,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
Capabilities capabilities_;
GpuMemoryBufferManager* gpu_memory_buffer_manager_ = nullptr;
uint64_t next_fence_sync_release_ = 1;
- uint64_t flushed_fence_sync_release_ = 0;
std::vector<SyncToken> next_flush_sync_token_fences_;
// Sequence checker for client sequence used for initialization, destruction,
// callbacks, such as context loss, and methods which provide such callbacks,
@@ -363,7 +373,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
base::circular_deque<SwapBufferParams> pending_presented_params_;
base::circular_deque<SwapBufferParams> pending_swap_completed_params_;
- scoped_refptr<raster::RasterDecoderContextState> context_state_;
+ scoped_refptr<SharedContextState> context_state_;
base::WeakPtrFactory<InProcessCommandBuffer> client_thread_weak_ptr_factory_;
base::WeakPtrFactory<InProcessCommandBuffer> gpu_thread_weak_ptr_factory_;
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
index 6aa45dfbd0c..24545c06412 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
@@ -7,8 +7,10 @@
#include "base/command_line.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/command_buffer/service/mailbox_manager_factory.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/config/gpu_info_collector.h"
#include "gpu/config/gpu_util.h"
@@ -66,9 +68,12 @@ void InProcessGpuThreadHolder::InitializeOnGpuThread(
sync_point_manager_ = std::make_unique<SyncPointManager>();
scheduler_ =
std::make_unique<Scheduler>(task_runner(), sync_point_manager_.get());
+ mailbox_manager_ = gles2::CreateMailboxManager(gpu_preferences_);
+ shared_image_manager_ = std::make_unique<SharedImageManager>();
task_executor_ = base::MakeRefCounted<GpuInProcessThreadService>(
- task_runner(), scheduler_.get(), sync_point_manager_.get(), nullptr,
- nullptr, gl::GLSurfaceFormat(), gpu_feature_info_, gpu_preferences_);
+ task_runner(), scheduler_.get(), sync_point_manager_.get(),
+ mailbox_manager_.get(), nullptr, gl::GLSurfaceFormat(), gpu_feature_info_,
+ gpu_preferences_, shared_image_manager_.get(), nullptr);
completion->Signal();
}
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.h b/chromium/gpu/ipc/in_process_gpu_thread_holder.h
index cdc98c3d113..7674d455753 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.h
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.h
@@ -16,7 +16,9 @@
namespace gpu {
class CommandBufferTaskExecutor;
+class MailboxManager;
class Scheduler;
+class SharedImageManager;
class SyncPointManager;
// Starts a GPU thread and task executor that runs tasks on the GPU thread. This
@@ -50,6 +52,8 @@ class COMPONENT_EXPORT(GPU_THREAD_HOLDER) InProcessGpuThreadHolder
std::unique_ptr<SyncPointManager> sync_point_manager_;
std::unique_ptr<Scheduler> scheduler_;
+ std::unique_ptr<MailboxManager> mailbox_manager_;
+ std::unique_ptr<SharedImageManager> shared_image_manager_;
scoped_refptr<CommandBufferTaskExecutor> task_executor_;
DISALLOW_COPY_AND_ASSIGN(InProcessGpuThreadHolder);
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index d72bdb17446..9316a1e3cc6 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -2,10 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import("//testing/test.gni")
import("//build/config/jumbo.gni")
import("//build/config/ui.gni")
import("//gpu/vulkan/features.gni")
+import("//testing/test.gni")
if (is_mac) {
import("//build/config/mac/mac_sdk.gni")
}
@@ -31,6 +31,7 @@ jumbo_component("service") {
"gpu_watchdog_thread.h",
"image_decode_accelerator_stub.cc",
"image_decode_accelerator_stub.h",
+ "image_decode_accelerator_worker.h",
"image_transport_surface.h",
"image_transport_surface_delegate.h",
"pass_through_image_transport_surface.cc",
@@ -55,7 +56,7 @@ jumbo_component("service") {
"//ui/gfx",
"//ui/gfx/geometry",
"//ui/gl",
- "//ui/gl:gl_features",
+ "//ui/gl:buildflags",
"//ui/gl/init",
"//url",
]
diff --git a/chromium/gpu/ipc/service/child_window_win.cc b/chromium/gpu/ipc/service/child_window_win.cc
index 8bf56a44543..fb6e7dccc7f 100644
--- a/chromium/gpu/ipc/service/child_window_win.cc
+++ b/chromium/gpu/ipc/service/child_window_win.cc
@@ -141,8 +141,8 @@ bool ChildWindowWin::Initialize() {
thread_->task_runner()->PostTask(
FROM_HERE,
- base::Bind(&CreateWindowsOnThread, gfx::Rect(window_rect).size(), &event,
- &window_, &initial_parent_window_));
+ base::BindOnce(&CreateWindowsOnThread, gfx::Rect(window_rect).size(),
+ &event, &window_, &initial_parent_window_));
event.Wait();
delegate_->DidCreateAcceleratedSurfaceChildWindow(parent_window_, window_);
diff --git a/chromium/gpu/ipc/service/command_buffer_stub.cc b/chromium/gpu/ipc/service/command_buffer_stub.cc
index 8797b25e045..2ed549a6bdb 100644
--- a/chromium/gpu/ipc/service/command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/command_buffer_stub.cc
@@ -150,7 +150,6 @@ CommandBufferStub::CommandBufferStub(
stream_id_(stream_id),
route_id_(route_id),
last_flush_id_(0),
- waiting_for_sync_point_(false),
previous_processed_num_(0),
wait_set_get_buffer_count_(0) {}
@@ -166,6 +165,7 @@ bool CommandBufferStub::OnMessageReceived(const IPC::Message& message) {
crash_keys::gpu_gl_context_is_virtual.Set(use_virtualized_gl_context_ ? "1"
: "0");
bool have_context = false;
+ base::Optional<gles2::ProgramCache::ScopedCacheUse> cache_use;
// Ensure the appropriate GL context is current before handling any IPC
// messages directed at the command buffer. This ensures that the message
// handler can assume that the context is current (not necessary for
@@ -176,11 +176,11 @@ bool CommandBufferStub::OnMessageReceived(const IPC::Message& message) {
message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
- message.type() != GpuCommandBufferMsg_WaitSyncToken::ID &&
message.type() != GpuCommandBufferMsg_SignalSyncToken::ID &&
message.type() != GpuCommandBufferMsg_SignalQuery::ID) {
if (!MakeCurrent())
return false;
+ cache_use.emplace(CreateCacheUse());
have_context = true;
}
@@ -201,7 +201,6 @@ bool CommandBufferStub::OnMessageReceived(const IPC::Message& message) {
OnRegisterTransferBuffer);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
OnDestroyTransferBuffer);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_WaitSyncToken, OnWaitSyncToken)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken, OnSignalSyncToken)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery, OnSignalQuery)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
@@ -243,7 +242,7 @@ void CommandBufferStub::PollWork() {
DCHECK(!process_delayed_work_time_.is_null());
if (process_delayed_work_time_ > current_time) {
channel_->task_runner()->PostDelayedTask(
- FROM_HERE, base::Bind(&CommandBufferStub::PollWork, AsWeakPtr()),
+ FROM_HERE, base::BindOnce(&CommandBufferStub::PollWork, AsWeakPtr()),
process_delayed_work_time_ - current_time);
return;
}
@@ -260,6 +259,7 @@ void CommandBufferStub::PerformWork() {
: "0");
if (decoder_context_.get() && !MakeCurrent())
return;
+ auto cache_use = CreateCacheUse();
if (decoder_context_) {
uint32_t current_unprocessed_num =
@@ -336,7 +336,8 @@ void CommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) {
process_delayed_work_time_ = current_time + delay;
channel_->task_runner()->PostDelayedTask(
- FROM_HERE, base::Bind(&CommandBufferStub::PollWork, AsWeakPtr()), delay);
+ FROM_HERE, base::BindOnce(&CommandBufferStub::PollWork, AsWeakPtr()),
+ delay);
}
bool CommandBufferStub::MakeCurrent() {
@@ -348,6 +349,12 @@ bool CommandBufferStub::MakeCurrent() {
return false;
}
+gles2::ProgramCache::ScopedCacheUse CommandBufferStub::CreateCacheUse() {
+ return gles2::ProgramCache::ScopedCacheUse(
+ channel_->gpu_channel_manager()->program_cache(),
+ base::BindRepeating(&DecoderClient::CacheShader, base::Unretained(this)));
+}
+
void CommandBufferStub::Destroy() {
FastSetActiveURL(active_url_, active_url_hash_, channel_);
// TODO(sunnyps): Should this use ScopedCrashKey instead?
@@ -370,7 +377,7 @@ void CommandBufferStub::Destroy() {
// might bypass the 3D API blocking logic.
if ((surface_handle_ == gpu::kNullSurfaceHandle) &&
!active_url_.is_empty() &&
- !gpu_channel_manager->is_exiting_for_lost_context()) {
+ !gpu_channel_manager->delegate()->IsExiting()) {
gpu_channel_manager->delegate()->DidDestroyOffscreenContext(active_url_);
}
}
@@ -387,6 +394,11 @@ void CommandBufferStub::Destroy() {
have_context =
decoder_context_->GetGLContext()->MakeCurrent(surface_.get());
}
+
+ base::Optional<gles2::ProgramCache::ScopedCacheUse> cache_use;
+ if (have_context)
+ cache_use.emplace(CreateCacheUse());
+
for (auto& observer : destruction_observers_)
observer.OnWillDestroyStub(have_context);
@@ -517,7 +529,10 @@ void CommandBufferStub::CheckCompleteWaits() {
}
}
-void CommandBufferStub::OnAsyncFlush(int32_t put_offset, uint32_t flush_id) {
+void CommandBufferStub::OnAsyncFlush(
+ int32_t put_offset,
+ uint32_t flush_id,
+ const std::vector<SyncToken>& sync_token_fences) {
TRACE_EVENT1("gpu", "CommandBufferStub::OnAsyncFlush", "put_offset",
put_offset);
DCHECK(command_buffer_);
@@ -525,11 +540,22 @@ void CommandBufferStub::OnAsyncFlush(int32_t put_offset, uint32_t flush_id) {
// to catch regressions. Ignore the message.
DVLOG_IF(0, flush_id - last_flush_id_ >= 0x8000000U)
<< "Received a Flush message out-of-order";
+ // Check if sync token waits are invalid or already complete. Do not use
+ // SyncPointManager::IsSyncTokenReleased() as it can't say if the wait is
+ // invalid.
+ for (const auto& sync_token : sync_token_fences)
+ DCHECK(!sync_point_client_state_->Wait(sync_token, base::DoNothing()));
last_flush_id_ = flush_id;
CommandBuffer::State pre_state = command_buffer_->GetState();
FastSetActiveURL(active_url_, active_url_hash_, channel_);
+ MailboxManager* mailbox_manager = context_group_->mailbox_manager();
+ if (mailbox_manager->UsesSync()) {
+ for (const auto& sync_token : sync_token_fences)
+ mailbox_manager->PullTextureUpdates(sync_token);
+ }
+
{
auto* gr_shader_cache = channel_->gpu_channel_manager()->gr_shader_cache();
base::Optional<raster::GrShaderCache::ScopedCacheUse> cache_use;
@@ -556,7 +582,7 @@ void CommandBufferStub::OnRegisterTransferBuffer(
// Map the shared memory into this process.
base::WritableSharedMemoryMapping mapping = transfer_buffer.Map();
- if (!mapping.IsValid()) {
+ if (!mapping.IsValid() || (mapping.size() > UINT32_MAX)) {
DVLOG(0) << "Failed to map shared memory.";
return;
}
@@ -581,9 +607,10 @@ void CommandBufferStub::ReportState() {
void CommandBufferStub::OnSignalSyncToken(const SyncToken& sync_token,
uint32_t id) {
+ auto callback =
+ base::BindOnce(&CommandBufferStub::OnSignalAck, this->AsWeakPtr(), id);
if (!sync_point_client_state_->WaitNonThreadSafe(
- sync_token, channel_->task_runner(),
- base::Bind(&CommandBufferStub::OnSignalAck, this->AsWeakPtr(), id))) {
+ sync_token, channel_->task_runner(), std::move(callback))) {
OnSignalAck(id);
}
}
@@ -680,43 +707,6 @@ void CommandBufferStub::ScheduleGrContextCleanup() {
channel_->gpu_channel_manager()->ScheduleGrContextCleanup();
}
-// TODO(sunnyps): Remove the wait command once all sync tokens are passed as
-// task dependencies.
-bool CommandBufferStub::OnWaitSyncToken(const SyncToken& sync_token) {
- DCHECK(!waiting_for_sync_point_);
- DCHECK(command_buffer_->scheduled());
- TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncToken", this, "CommandBufferStub",
- this);
-
- waiting_for_sync_point_ = sync_point_client_state_->WaitNonThreadSafe(
- sync_token, channel_->task_runner(),
- base::Bind(&CommandBufferStub::OnWaitSyncTokenCompleted, AsWeakPtr(),
- sync_token));
-
- if (waiting_for_sync_point_) {
- command_buffer_->SetScheduled(false);
- channel_->OnCommandBufferDescheduled(this);
- return true;
- }
-
- MailboxManager* mailbox_manager = context_group_->mailbox_manager();
- if (mailbox_manager->UsesSync() && MakeCurrent())
- mailbox_manager->PullTextureUpdates(sync_token);
- return false;
-}
-
-void CommandBufferStub::OnWaitSyncTokenCompleted(const SyncToken& sync_token) {
- DCHECK(waiting_for_sync_point_);
- TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncToken", this, "CommandBufferStub",
- this);
- // Don't call PullTextureUpdates here because we can't MakeCurrent if we're
- // executing commands on another context. The WaitSyncToken command will run
- // again and call PullTextureUpdates once this command buffer gets scheduled.
- waiting_for_sync_point_ = false;
- command_buffer_->SetScheduled(true);
- channel_->OnCommandBufferScheduled(this);
-}
-
void CommandBufferStub::OnCreateImage(
GpuCommandBufferMsg_CreateImage_Params params) {
TRACE_EVENT0("gpu", "CommandBufferStub::OnCreateImage");
@@ -812,20 +802,7 @@ bool CommandBufferStub::CheckContextLost() {
bool was_lost_by_robustness =
decoder_context_ &&
decoder_context_->WasContextLostByRobustnessExtension();
-
- // Work around issues with recovery by allowing a new GPU process to launch.
- if ((was_lost_by_robustness ||
- context_group_->feature_info()->workarounds().exit_on_context_lost)) {
- channel_->gpu_channel_manager()->MaybeExitOnContextLost();
- }
-
- // Lose all other contexts if the reset was triggered by the robustness
- // extension instead of being synthetic.
- if (was_lost_by_robustness &&
- (gl::GLContext::LosesAllContextsOnContextLost() ||
- use_virtualized_gl_context_)) {
- channel_->LoseAllContexts();
- }
+ channel_->gpu_channel_manager()->OnContextLost(!was_lost_by_robustness);
}
CheckCompleteWaits();
diff --git a/chromium/gpu/ipc/service/command_buffer_stub.h b/chromium/gpu/ipc/service/command_buffer_stub.h
index 32a979f23e7..a1fb4bed007 100644
--- a/chromium/gpu/ipc/service/command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/command_buffer_stub.h
@@ -23,6 +23,7 @@
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/decoder_client.h"
+#include "gpu/command_buffer/service/program_cache.h"
#include "gpu/command_buffer/service/sequence_id.h"
#include "gpu/ipc/common/surface_handle.h"
#include "gpu/ipc/service/gpu_ipc_service_export.h"
@@ -97,7 +98,6 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
void OnConsoleMessage(int32_t id, const std::string& message) override;
void CacheShader(const std::string& key, const std::string& shader) override;
void OnFenceSyncRelease(uint64_t release) override;
- bool OnWaitSyncToken(const SyncToken& sync_token) override;
void OnDescheduleUntilFinished() override;
void OnRescheduleAfterFinished() override;
void ScheduleGrContextCleanup() override;
@@ -177,6 +177,8 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
bool MakeCurrent();
+ gles2::ProgramCache::ScopedCacheUse CreateCacheUse();
+
// Message handlers:
void OnSetGetBuffer(int32_t shm_id);
virtual void OnTakeFrontBuffer(const Mailbox& mailbox) = 0;
@@ -189,7 +191,9 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
int32_t start,
int32_t end,
IPC::Message* reply_message);
- void OnAsyncFlush(int32_t put_offset, uint32_t flush_id);
+ void OnAsyncFlush(int32_t put_offset,
+ uint32_t flush_id,
+ const std::vector<SyncToken>& sync_token_fences);
void OnRegisterTransferBuffer(int32_t id,
base::UnsafeSharedMemoryRegion transfer_buffer);
void OnDestroyTransferBuffer(int32_t id);
@@ -204,8 +208,6 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
const gfx::GpuFenceHandle& handle);
void OnGetGpuFenceHandle(uint32_t gpu_fence_id);
- void OnWaitSyncTokenCompleted(const SyncToken& sync_token);
-
void OnCreateImage(GpuCommandBufferMsg_CreateImage_Params params);
void OnDestroyImage(int32_t id);
void OnCreateStreamTexture(uint32_t texture_id,
@@ -237,8 +239,6 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
base::ObserverList<DestructionObserver>::Unchecked destruction_observers_;
- bool waiting_for_sync_point_;
-
base::TimeTicks process_delayed_work_time_;
uint32_t previous_processed_num_;
base::TimeTicks last_idle_time_;
diff --git a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
index 7b2b492f856..886dff6d36d 100644
--- a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
+++ b/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
@@ -11,6 +11,7 @@
#include "base/metrics/histogram_macros.h"
#include "base/synchronization/waitable_event.h"
#include "base/trace_event/trace_event.h"
+#include "base/win/windows_version.h"
#include "ui/display/display_switches.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gl/egl_util.h"
@@ -36,27 +37,64 @@ namespace {
// here which IDCompositionSurface is being rendered into. If another context
// is made current, then this surface will be suspended.
IDCompositionSurface* g_current_surface;
+
+// Returns true if swap chain tearing is supported.
+bool IsSwapChainTearingSupported() {
+ static const bool supported = [] {
+ // Swap chain tearing is used only if vsync is disabled explicitly.
+ if (!base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableGpuVsync))
+ return false;
+
+ // Swap chain tearing is supported only on Windows 10 Anniversary Edition
+ // (Redstone 1) and above.
+ if (base::win::GetVersion() < base::win::VERSION_WIN10_RS1)
+ return false;
+
+ Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
+ gl::QueryD3D11DeviceObjectFromANGLE();
+ if (!d3d11_device) {
+ DLOG(ERROR) << "Not using swap chain tearing because failed to retrieve "
+ "D3D11 device from ANGLE";
+ return false;
+ }
+ Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
+ d3d11_device.CopyTo(dxgi_device.GetAddressOf());
+ DCHECK(dxgi_device);
+ Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
+ dxgi_device->GetAdapter(dxgi_adapter.GetAddressOf());
+ DCHECK(dxgi_adapter);
+ Microsoft::WRL::ComPtr<IDXGIFactory5> dxgi_factory;
+ if (FAILED(dxgi_adapter->GetParent(
+ IID_PPV_ARGS(dxgi_factory.GetAddressOf())))) {
+ DLOG(ERROR) << "Not using swap chain tearing because failed to retrieve "
+ "IDXGIFactory5 interface";
+ return false;
+ }
+
+ BOOL present_allow_tearing = FALSE;
+ DCHECK(dxgi_factory);
+ if (FAILED(dxgi_factory->CheckFeatureSupport(
+ DXGI_FEATURE_PRESENT_ALLOW_TEARING, &present_allow_tearing,
+ sizeof(present_allow_tearing)))) {
+ DLOG(ERROR)
+ << "Not using swap chain tearing because CheckFeatureSupport failed";
+ return false;
+ }
+ return !!present_allow_tearing;
+ }();
+ return supported;
}
-DirectCompositionChildSurfaceWin::DirectCompositionChildSurfaceWin(
- const gfx::Size& size,
- bool is_hdr,
- bool has_alpha,
- bool use_dcomp_surface,
- bool allow_tearing)
- : gl::GLSurfaceEGL(),
- size_(size),
- is_hdr_(is_hdr),
- has_alpha_(has_alpha),
- use_dcomp_surface_(use_dcomp_surface),
- allow_tearing_(allow_tearing) {}
+} // namespace
+
+DirectCompositionChildSurfaceWin::DirectCompositionChildSurfaceWin() = default;
DirectCompositionChildSurfaceWin::~DirectCompositionChildSurfaceWin() {
Destroy();
}
bool DirectCompositionChildSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
- ui::ScopedReleaseCurrent release_current;
d3d11_device_ = gl::QueryD3D11DeviceObjectFromANGLE();
dcomp_device_ = gl::QueryDirectCompositionDevice(d3d11_device_);
if (!dcomp_device_)
@@ -85,67 +123,11 @@ bool DirectCompositionChildSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
return true;
}
-bool DirectCompositionChildSurfaceWin::InitializeSurface() {
- TRACE_EVENT1("gpu", "DirectCompositionChildSurfaceWin::InitializeSurface()",
- "use_dcomp_surface_", use_dcomp_surface_);
- if (!ReleaseDrawTexture(true /* will_discard */))
- return false;
- dcomp_surface_.Reset();
- swap_chain_.Reset();
-
- DXGI_FORMAT output_format =
- is_hdr_ ? DXGI_FORMAT_R16G16B16A16_FLOAT : DXGI_FORMAT_B8G8R8A8_UNORM;
- if (use_dcomp_surface_) {
- // Always treat as premultiplied, because an underlay could cause it to
- // become transparent.
- HRESULT hr = dcomp_device_->CreateSurface(
- size_.width(), size_.height(), output_format,
- DXGI_ALPHA_MODE_PREMULTIPLIED, dcomp_surface_.GetAddressOf());
- has_been_rendered_to_ = false;
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateSurface failed with error " << std::hex << hr;
- return false;
- }
- } else {
- DXGI_ALPHA_MODE alpha_mode =
- has_alpha_ ? DXGI_ALPHA_MODE_PREMULTIPLIED : DXGI_ALPHA_MODE_IGNORE;
- Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
- d3d11_device_.CopyTo(dxgi_device.GetAddressOf());
- DCHECK(dxgi_device);
- Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
- dxgi_device->GetAdapter(dxgi_adapter.GetAddressOf());
- DCHECK(dxgi_adapter);
- Microsoft::WRL::ComPtr<IDXGIFactory2> dxgi_factory;
- dxgi_adapter->GetParent(IID_PPV_ARGS(dxgi_factory.GetAddressOf()));
- DCHECK(dxgi_factory);
-
- DXGI_SWAP_CHAIN_DESC1 desc = {};
- desc.Width = size_.width();
- desc.Height = size_.height();
- desc.Format = output_format;
- desc.Stereo = FALSE;
- desc.SampleDesc.Count = 1;
- desc.BufferCount = 2;
- desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
- desc.Scaling = DXGI_SCALING_STRETCH;
- desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
- desc.AlphaMode = alpha_mode;
- desc.Flags = allow_tearing_ ? DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING : 0;
- HRESULT hr = dxgi_factory->CreateSwapChainForComposition(
- d3d11_device_.Get(), &desc, nullptr, swap_chain_.GetAddressOf());
- has_been_rendered_to_ = false;
- first_swap_ = true;
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateSwapChainForComposition failed with error "
- << std::hex << hr;
- return false;
- }
- }
- return true;
-}
-
bool DirectCompositionChildSurfaceWin::ReleaseDrawTexture(bool will_discard) {
- DCHECK(!gl::GLContext::GetCurrent());
+ // At the end we'll MakeCurrent the same surface but its handle will be
+ // |default_surface_|.
+ ui::ScopedReleaseCurrent release_current;
+
if (real_surface_) {
eglDestroySurface(GetDisplay(), real_surface_);
real_surface_ = nullptr;
@@ -164,8 +146,9 @@ bool DirectCompositionChildSurfaceWin::ReleaseDrawTexture(bool will_discard) {
}
dcomp_surface_serial_++;
} else if (!will_discard) {
- UINT interval = first_swap_ || !vsync_enabled_ || allow_tearing_ ? 0 : 1;
- UINT flags = allow_tearing_ ? DXGI_PRESENT_ALLOW_TEARING : 0;
+ bool allow_tearing = IsSwapChainTearingSupported();
+ UINT interval = first_swap_ || !vsync_enabled_ || allow_tearing ? 0 : 1;
+ UINT flags = allow_tearing ? DXGI_PRESENT_ALLOW_TEARING : 0;
DXGI_PRESENT_PARAMETERS params = {};
RECT dirty_rect = swap_rect_.ToRECT();
params.DirtyRectsCount = 1;
@@ -237,7 +220,6 @@ gfx::SwapResult DirectCompositionChildSurfaceWin::SwapBuffers(
// PresentationCallback is handled by DirectCompositionSurfaceWin. The child
// surface doesn't need provide presentation feedback.
DCHECK(!callback);
- ui::ScopedReleaseCurrent release_current;
if (!ReleaseDrawTexture(false /* will_discard */))
return gfx::SwapResult::SWAP_FAILED;
return gfx::SwapResult::SWAP_ACK;
@@ -280,34 +262,80 @@ bool DirectCompositionChildSurfaceWin::SupportsDCLayers() const {
bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
const gfx::Rect& rectangle) {
if (!gfx::Rect(size_).Contains(rectangle)) {
- DLOG(ERROR) << "Draw rectangle must be contained within size of surface";
+ VLOG(1) << "Draw rectangle must be contained within size of surface";
return false;
}
if (draw_texture_) {
- DLOG(ERROR) << "SetDrawRectangle must be called only once per swap buffers";
+ VLOG(1) << "SetDrawRectangle must be called only once per swap buffers";
return false;
}
-
DCHECK(!real_surface_);
+ DCHECK(!g_current_surface);
+
+ if (gfx::Rect(size_) != rectangle && !swap_chain_ && !dcomp_surface_) {
+ VLOG(1) << "First draw to surface must draw to everything";
+ return false;
+ }
+ // At the end we'll MakeCurrent the same surface but its handle will be
+ // |real_surface_|.
ui::ScopedReleaseCurrent release_current;
- if ((use_dcomp_surface_ && !dcomp_surface_) ||
- (!use_dcomp_surface_ && !swap_chain_)) {
- if (!InitializeSurface()) {
- DLOG(ERROR) << "InitializeSurface failed";
+ DXGI_FORMAT output_format =
+ is_hdr_ ? DXGI_FORMAT_R16G16B16A16_FLOAT : DXGI_FORMAT_B8G8R8A8_UNORM;
+ if (enable_dc_layers_ && !dcomp_surface_) {
+ TRACE_EVENT2("gpu", "DirectCompositionChildSurfaceWin::CreateSurface",
+ "width", size_.width(), "height", size_.height());
+ swap_chain_.Reset();
+ // Always treat as premultiplied, because an underlay could cause it to
+ // become transparent.
+ HRESULT hr = dcomp_device_->CreateSurface(
+ size_.width(), size_.height(), output_format,
+ DXGI_ALPHA_MODE_PREMULTIPLIED, dcomp_surface_.GetAddressOf());
+ if (FAILED(hr)) {
+ VLOG(1) << "CreateSurface failed with error " << std::hex << hr;
return false;
}
- }
+ } else if (!enable_dc_layers_ && !swap_chain_) {
+ TRACE_EVENT2("gpu", "DirectCompositionChildSurfaceWin::CreateSwapChain",
+ "width", size_.width(), "height", size_.height());
+ dcomp_surface_.Reset();
- // Check this after reinitializing the surface because we reset state there.
- if (gfx::Rect(size_) != rectangle && !has_been_rendered_to_) {
- DLOG(ERROR) << "First draw to surface must draw to everything";
- return false;
- }
+ DXGI_ALPHA_MODE alpha_mode =
+ has_alpha_ ? DXGI_ALPHA_MODE_PREMULTIPLIED : DXGI_ALPHA_MODE_IGNORE;
+ Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
+ d3d11_device_.CopyTo(dxgi_device.GetAddressOf());
+ DCHECK(dxgi_device);
+ Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
+ dxgi_device->GetAdapter(dxgi_adapter.GetAddressOf());
+ DCHECK(dxgi_adapter);
+ Microsoft::WRL::ComPtr<IDXGIFactory2> dxgi_factory;
+ dxgi_adapter->GetParent(IID_PPV_ARGS(dxgi_factory.GetAddressOf()));
+ DCHECK(dxgi_factory);
- DCHECK(!g_current_surface);
+ DXGI_SWAP_CHAIN_DESC1 desc = {};
+ desc.Width = size_.width();
+ desc.Height = size_.height();
+ desc.Format = output_format;
+ desc.Stereo = FALSE;
+ desc.SampleDesc.Count = 1;
+ desc.BufferCount = 2;
+ desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ desc.Scaling = DXGI_SCALING_STRETCH;
+ desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
+ desc.AlphaMode = alpha_mode;
+ desc.Flags =
+ IsSwapChainTearingSupported() ? DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING : 0;
+ HRESULT hr = dxgi_factory->CreateSwapChainForComposition(
+ d3d11_device_.Get(), &desc, nullptr, swap_chain_.GetAddressOf());
+ first_swap_ = true;
+ if (FAILED(hr)) {
+ VLOG(1) << "CreateSwapChainForComposition failed with error " << std::hex
+ << hr;
+ return false;
+ }
+ }
swap_rect_ = rectangle;
draw_offset_ = gfx::Vector2d();
@@ -318,7 +346,7 @@ bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
HRESULT hr = dcomp_surface_->BeginDraw(
&rect, IID_PPV_ARGS(draw_texture_.GetAddressOf()), &update_offset);
if (FAILED(hr)) {
- DLOG(ERROR) << "BeginDraw failed with error " << std::hex << hr;
+ VLOG(1) << "BeginDraw failed with error " << std::hex << hr;
return false;
}
draw_offset_ = gfx::Point(update_offset) - rectangle.origin();
@@ -326,7 +354,6 @@ bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
swap_chain_->GetBuffer(0, IID_PPV_ARGS(draw_texture_.GetAddressOf()));
}
DCHECK(draw_texture_);
- has_been_rendered_to_ = true;
g_current_surface = dcomp_surface_.Get();
@@ -346,8 +373,8 @@ bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
eglCreatePbufferFromClientBuffer(GetDisplay(), EGL_D3D_TEXTURE_ANGLE,
buffer, GetConfig(), pbuffer_attribs);
if (!real_surface_) {
- DLOG(ERROR) << "eglCreatePbufferFromClientBuffer failed with error "
- << ui::GetLastEGLErrorString();
+ VLOG(1) << "eglCreatePbufferFromClientBuffer failed with error "
+ << ui::GetLastEGLErrorString();
return false;
}
@@ -362,4 +389,56 @@ void DirectCompositionChildSurfaceWin::SetVSyncEnabled(bool enabled) {
vsync_enabled_ = enabled;
}
+bool DirectCompositionChildSurfaceWin::Resize(const gfx::Size& size,
+ float scale_factor,
+ ColorSpace color_space,
+ bool has_alpha) {
+ bool size_changed = size != size_;
+ bool is_hdr = color_space == ColorSpace::SCRGB_LINEAR;
+ bool hdr_changed = is_hdr != is_hdr_;
+ bool alpha_changed = has_alpha != has_alpha_;
+ if (!size_changed && !hdr_changed && !alpha_changed)
+ return true;
+
+ // This will release indirect references to swap chain (|real_surface_|) by
+ // binding |default_surface_| as the default framebuffer.
+ if (!ReleaseDrawTexture(true /* will_discard */))
+ return false;
+
+ size_ = size;
+ is_hdr_ = is_hdr;
+ has_alpha_ = has_alpha;
+
+ // ResizeBuffers can't change alpha blending mode.
+ if (swap_chain_ && !alpha_changed) {
+ DXGI_FORMAT format =
+ is_hdr_ ? DXGI_FORMAT_R16G16B16A16_FLOAT : DXGI_FORMAT_B8G8R8A8_UNORM;
+ UINT flags =
+ IsSwapChainTearingSupported() ? DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING : 0;
+ HRESULT hr = swap_chain_->ResizeBuffers(2 /* BufferCount */, size.width(),
+ size.height(), format, flags);
+ UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.SwapChainResizeResult",
+ SUCCEEDED(hr));
+ if (SUCCEEDED(hr))
+ return true;
+ DLOG(ERROR) << "ResizeBuffers failed with error 0x" << std::hex << hr;
+ }
+ // Next SetDrawRectangle call will recreate the swap chain or surface.
+ swap_chain_.Reset();
+ dcomp_surface_.Reset();
+ return true;
+}
+
+bool DirectCompositionChildSurfaceWin::SetEnableDCLayers(bool enable) {
+ if (enable_dc_layers_ == enable)
+ return true;
+ enable_dc_layers_ = enable;
+ // Next SetDrawRectangle call will recreate the swap chain or surface.
+ if (!ReleaseDrawTexture(true /* will_discard */))
+ return false;
+ swap_chain_.Reset();
+ dcomp_surface_.Reset();
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/direct_composition_child_surface_win.h b/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
index 21846047f0a..a91071e594b 100644
--- a/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
+++ b/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
@@ -18,14 +18,9 @@ namespace gpu {
class GPU_IPC_SERVICE_EXPORT DirectCompositionChildSurfaceWin
: public gl::GLSurfaceEGL {
public:
- DirectCompositionChildSurfaceWin(const gfx::Size& size,
- bool is_hdr,
- bool has_alpha,
- bool use_dcomp_surface,
- bool allow_tearing);
+ DirectCompositionChildSurfaceWin();
// GLSurfaceEGL implementation.
- using GLSurface::Initialize;
bool Initialize(gl::GLSurfaceFormat format) override;
void Destroy() override;
gfx::Size GetSize() override;
@@ -39,6 +34,11 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionChildSurfaceWin
bool SetDrawRectangle(const gfx::Rect& rect) override;
gfx::Vector2d GetDrawOffset() const override;
void SetVSyncEnabled(bool enabled) override;
+ bool Resize(const gfx::Size& size,
+ float scale_factor,
+ ColorSpace color_space,
+ bool has_alpha) override;
+ bool SetEnableDCLayers(bool enable) override;
const Microsoft::WRL::ComPtr<IDCompositionSurface>& dcomp_surface() const {
return dcomp_surface_;
@@ -54,14 +54,17 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionChildSurfaceWin
~DirectCompositionChildSurfaceWin() override;
private:
- // Releases previous surface or swap chain, and initializes new surface or
- // swap chain.
- bool InitializeSurface();
// Release the texture that's currently being drawn to. If will_discard is
// true then the surface should be discarded without swapping any contents
// to it. Returns false if this fails.
bool ReleaseDrawTexture(bool will_discard);
+ gfx::Size size_ = gfx::Size(1, 1);
+ bool enable_dc_layers_ = false;
+ bool is_hdr_ = false;
+ bool has_alpha_ = true;
+ bool vsync_enabled_ = true;
+
// This is a placeholder surface used when not rendering to the
// DirectComposition surface.
EGLSurface default_surface_ = 0;
@@ -70,14 +73,8 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionChildSurfaceWin
// outside of a BeginDraw/EndDraw pair.
EGLSurface real_surface_ = 0;
bool first_swap_ = true;
- const gfx::Size size_;
- const bool is_hdr_;
- const bool has_alpha_;
- const bool use_dcomp_surface_;
- const bool allow_tearing_;
gfx::Rect swap_rect_;
gfx::Vector2d draw_offset_;
- bool vsync_enabled_ = true;
// This is a number that increments once for every EndDraw on a surface, and
// is used to determine when the contents have changed so Commit() needs to
@@ -90,10 +87,6 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionChildSurfaceWin
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain_;
Microsoft::WRL::ComPtr<ID3D11Texture2D> draw_texture_;
- // Keep track of whether the texture has been rendered to, as the first draw
- // to it must overwrite the entire thing.
- bool has_been_rendered_to_ = false;
-
DISALLOW_COPY_AND_ASSIGN(DirectCompositionChildSurfaceWin);
};
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
index 4161187932f..46710ba1e6e 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.cc
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
@@ -9,14 +9,13 @@
#include <dxgi1_6.h>
#include "base/containers/circular_deque.h"
-#include "base/debug/alias.h"
-#include "base/debug/dump_without_crashing.h"
#include "base/feature_list.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/synchronization/waitable_event.h"
#include "base/trace_event/trace_event.h"
#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
#include "base/win/windows_version.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/config/gpu_finch_features.h"
@@ -54,6 +53,10 @@ bool SizeContains(const gfx::Size& a, const gfx::Size& b) {
return gfx::Rect(a).Contains(gfx::Rect(b));
}
+bool IsProtectedVideo(ui::ProtectedVideoType protected_video_type) {
+ return protected_video_type != ui::ProtectedVideoType::kClear;
+}
+
// This keeps track of whether the previous 30 frames used Overlays or GPU
// composition to present.
class PresentationHistory {
@@ -123,12 +126,11 @@ bool g_supports_scaled_overlays = true;
// Used for workaround limiting overlay size to monitor size.
gfx::Size g_overlay_monitor_size;
-// Overridden when NV12 is supported, and kDirectCompositionPreferNV12Overlays
-// finch feature is enabled. Default value is set to YUY2 so that we use a valid
-// format for swap chains when forced to enable overlay code path but hardware
-// overlays are not supported.
-OverlayFormat g_overlay_format_used = OverlayFormat::kYUY2;
-DXGI_FORMAT g_overlay_dxgi_format_used = DXGI_FORMAT_YUY2;
+// Preferred overlay format set when detecting hardware overlay support during
+// initialization. Set to NV12 by default so that it's used when enabling
+// overlays using command line flags.
+OverlayFormat g_overlay_format_used = OverlayFormat::kNV12;
+DXGI_FORMAT g_overlay_dxgi_format_used = DXGI_FORMAT_NV12;
// This is the raw support info, which shouldn't depend on field trial state, or
// command line flags. Ordered by most preferred to least preferred format.
@@ -138,6 +140,20 @@ OverlaySupportInfo g_overlay_support_info[] = {
{OverlayFormat::kBGRA, DXGI_FORMAT_B8G8R8A8_UNORM, 0},
};
+const char* ProtectedVideoTypeToString(ui::ProtectedVideoType type) {
+ switch (type) {
+ case ui::ProtectedVideoType::kClear:
+ return "Clear";
+ case ui::ProtectedVideoType::kSoftwareProtected:
+ if (g_supports_overlays)
+ return "SoftwareProtected.HasOverlaySupport";
+ else
+ return "SoftwareProtected.NoOverlaySupport";
+ case ui::ProtectedVideoType::kHardwareProtected:
+ return "HardwareProtected";
+ }
+}
+
void InitializeHardwareOverlaySupport() {
if (g_overlay_support_initialized)
return;
@@ -461,9 +477,9 @@ class DCLayerTree::SwapChainPresenter {
// Releases resources that might hold indirect references to the swap chain.
void ReleaseSwapChainResources();
- // Recreate swap chain using given size. Use preferred YUV format if |yuv| is
- // true, or BGRA otherwise. Sets flags based on |protected_video_type|.
- // Returns true on success.
+ // Recreate swap chain using given size. Use preferred YUV format if
+ // |use_yuv_swap_chain| is true, or BGRA otherwise. Sets flags based on
+ // |protected_video_type|. Returns true on success.
bool ReallocateSwapChain(const gfx::Size& swap_chain_size,
bool use_yuv_swap_chain,
ui::ProtectedVideoType protected_video_type);
@@ -475,13 +491,13 @@ class DCLayerTree::SwapChainPresenter {
// Perform a blit using video processor from given input texture to swap chain
// backbuffer. |input_texture| is the input texture (array), and |input_level|
// is the index of the texture in the texture array. |keyed_mutex| is
- // optional, and is used to lock the resource for reading. |input_size| is
- // the size of the input texture, and |src_color_space| is the color space
- // of the video.
+ // optional, and is used to lock the resource for reading. |content_rect| is
+ // subrectangle of the input texture that should be blitted to swap chain, and
+ // |src_color_space| is the color space of the video.
bool VideoProcessorBlt(Microsoft::WRL::ComPtr<ID3D11Texture2D> input_texture,
UINT input_level,
Microsoft::WRL::ComPtr<IDXGIKeyedMutex> keyed_mutex,
- const gfx::Size& input_size,
+ const gfx::Rect& content_rect,
const gfx::ColorSpace& src_color_space);
// Returns optimal swap chain size for given layer.
@@ -492,10 +508,13 @@ class DCLayerTree::SwapChainPresenter {
bool UpdateVisuals(const ui::DCRendererLayerParams& params,
const gfx::Size& swap_chain_size);
- // Whether the video is protected
- bool IsProtectedVideo(ui::ProtectedVideoType protected_video_type) const {
- return (protected_video_type != ui::ProtectedVideoType::kClear);
- }
+ // Present to a decode swap chain created from compatible video decoder
+ // buffers using given |image_dxgi| with destination size |swap_chain_size|.
+ // Sets |needs_commit| to true if a commit is needed. Returns true on success.
+ bool PresentToDecodeSwapChain(gl::GLImageDXGI* image_dxgi,
+ const gfx::Rect& content_rect,
+ const gfx::Size& swap_chain_size,
+ bool* needs_commit);
// Layer tree instance that owns this swap chain presenter.
DCLayerTree* layer_tree_;
@@ -506,6 +525,9 @@ class DCLayerTree::SwapChainPresenter {
// Whether the current swap chain is using the preferred YUV format.
bool is_yuv_swapchain_ = false;
+ // Whether the swap chain was reallocated, and next present will be the first.
+ bool first_present_ = false;
+
// Whether the current swap chain is presenting protected video, software
// or hardware protection.
ui::ProtectedVideoType protected_video_type_ = ui::ProtectedVideoType::kClear;
@@ -517,6 +539,10 @@ class DCLayerTree::SwapChainPresenter {
// Whether creating a YUV swap chain failed.
bool failed_to_create_yuv_swapchain_ = false;
+ // Set to true when PresentToDecodeSwapChain fails for the first time after
+ // which we won't attempt to use decode swap chain again.
+ bool failed_to_present_decode_swapchain_ = false;
+
// Number of frames since we switched from YUV to BGRA swap chain, or
// vice-versa.
int frames_since_color_space_change_ = 0;
@@ -539,8 +565,9 @@ class DCLayerTree::SwapChainPresenter {
// |content_visual_|, and root of the visual tree for this layer.
Microsoft::WRL::ComPtr<IDCompositionVisual2> clip_visual_;
- // These are the GLImages that were presented in the last frame.
- std::vector<scoped_refptr<gl::GLImage>> last_gl_images_;
+ // GLImages that were presented in the last frame.
+ scoped_refptr<gl::GLImage> last_y_image_;
+ scoped_refptr<gl::GLImage> last_uv_image_;
// NV12 staging texture used for software decoded YUV buffers. Mapped to CPU
// for copying from YUV buffers.
@@ -551,14 +578,18 @@ class DCLayerTree::SwapChainPresenter {
Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device_;
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain_;
- // Handle returned by DCompositionCreateSurfaceHandle() used to create swap
- // chain that can be used for direct composition.
+ // Handle returned by DCompositionCreateSurfaceHandle() used to create YUV
+ // swap chain that can be used for direct composition.
base::win::ScopedHandle swap_chain_handle_;
// Video processor output view created from swap chain back buffer. Must be
// cached for performance reasons.
Microsoft::WRL::ComPtr<ID3D11VideoProcessorOutputView> output_view_;
+ Microsoft::WRL::ComPtr<IDXGIResource> decode_resource_;
+ Microsoft::WRL::ComPtr<IDXGIDecodeSwapChain> decode_swap_chain_;
+ Microsoft::WRL::ComPtr<IUnknown> decode_surface_;
+
DISALLOW_COPY_AND_ASSIGN(SwapChainPresenter);
};
@@ -779,16 +810,30 @@ bool DCLayerTree::SwapChainPresenter::UploadVideoImages(
gfx::Size DCLayerTree::SwapChainPresenter::CalculateSwapChainSize(
const ui::DCRendererLayerParams& params) {
- // Swap chain size is the minimum of the on-screen size and the source
- // size so the video processor can do the minimal amount of work and
- // the overlay has to read the minimal amount of data.
- // DWM is also less likely to promote a surface to an overlay if it's
- // much larger than its area on-screen.
+ // Swap chain size is the minimum of the on-screen size and the source size so
+ // the video processor can do the minimal amount of work and the overlay has
+ // to read the minimal amount of data. DWM is also less likely to promote a
+ // surface to an overlay if it's much larger than its area on-screen.
+ gfx::Size swap_chain_size = params.content_rect.size();
+
+ // If transform isn't a scale or translation then swap chain can't be promoted
+ // to an overlay so avoid blitting to a large surface unnecessarily. Also,
+ // after the video rotation fix (crbug.com/904035), using rotated size for
+ // swap chain size will cause stretching since there's no squashing factor in
+ // the transform to counteract.
+ // TODO(sunnyps): Support 90/180/270 deg rotations using video context.
+ if (params.transform.IsScaleOrTranslation()) {
+ gfx::RectF bounds(params.quad_rect);
+ params.transform.TransformRect(&bounds);
+ swap_chain_size = gfx::ToEnclosingRect(bounds).size();
+ }
- // display_rect is the rect on screen
- gfx::RectF transformed_rect = gfx::RectF(params.rect);
- params.transform.TransformRect(&transformed_rect);
- gfx::Rect display_rect = gfx::ToEnclosingRect(transformed_rect);
+ if (g_supports_scaled_overlays) {
+ // Downscaling doesn't work on Intel display HW, and so DWM will perform an
+ // extra BLT to avoid HW downscaling. This prevents the use of hardware
+ // overlays especially for protected video.
+ swap_chain_size.SetToMin(params.content_rect.size());
+ }
if (layer_tree_->workarounds().disable_larger_than_screen_overlays &&
!g_overlay_monitor_size.IsEmpty()) {
@@ -803,32 +848,19 @@ gfx::Size DCLayerTree::SwapChainPresenter::CalculateSwapChainSize(
// TODO(jbauman): Remove when http://crbug.com/668278 is fixed.
const int kOversizeMargin = 3;
- if ((display_rect.x() >= 0) &&
- (display_rect.width() > g_overlay_monitor_size.width()) &&
- (display_rect.width() <=
+ if ((swap_chain_size.width() > g_overlay_monitor_size.width()) &&
+ (swap_chain_size.width() <=
g_overlay_monitor_size.width() + kOversizeMargin)) {
- display_rect.set_width(g_overlay_monitor_size.width());
+ swap_chain_size.set_width(g_overlay_monitor_size.width());
}
- if ((display_rect.y() >= 0) &&
- (display_rect.height() > g_overlay_monitor_size.height()) &&
- (display_rect.height() <=
+ if ((swap_chain_size.height() > g_overlay_monitor_size.height()) &&
+ (swap_chain_size.height() <=
g_overlay_monitor_size.height() + kOversizeMargin)) {
- display_rect.set_height(g_overlay_monitor_size.height());
+ swap_chain_size.set_height(g_overlay_monitor_size.height());
}
}
- // Downscaling doesn't work on Intel display HW, and so DWM will perform
- // an extra BLT to avoid HW downscaling. This prevents the use of hardware
- // overlays especially for protected video.
- gfx::Size swap_chain_size = display_rect.size();
-
- if (g_supports_scaled_overlays) {
- gfx::Size ceiled_input_size =
- gfx::ToCeiledSize(params.contents_rect.size());
- swap_chain_size.SetToMin(ceiled_input_size);
- }
-
// 4:2:2 subsampled formats like YUY2 must have an even width, and 4:2:0
// subsampled formats like NV12 must have an even width and height.
if (swap_chain_size.width() % 2 == 1)
@@ -854,26 +886,25 @@ bool DCLayerTree::SwapChainPresenter::UpdateVisuals(
needs_commit = true;
}
- // This is the scale from the swapchain size to the size of the contents
- // onscreen.
- float swap_chain_scale_x =
- params.rect.width() * 1.0f / swap_chain_size.width();
- float swap_chain_scale_y =
- params.rect.height() * 1.0f / swap_chain_size.height();
+ // Visual offset is applied before transform so it behaves similar to how the
+ // compositor uses transform to map quad rect in layer space to target space.
+ gfx::Point offset = params.quad_rect.origin();
gfx::Transform transform = params.transform;
- gfx::Transform scale_transform;
- scale_transform.Scale(swap_chain_scale_x, swap_chain_scale_y);
- transform.PreconcatTransform(scale_transform);
- transform.Transpose();
- // Offset is in layer space, and is applied before transform.
- // TODO(magchen): We should consider recalculating offset when it's non-zero.
- // Have not seen non-zero params.rect.x() and y() so far.
- gfx::Point offset(params.rect.x(), params.rect.y());
+ // Transform is correct for scaling up |quad_rect| to on screen bounds, but
+ // doesn't include scaling transform from |swap_chain_size| to |quad_rect|.
+ // Since |swap_chain_size| could be equal to on screen bounds, and therefore
+ // possibly larger than |quad_rect|, this scaling could be downscaling, but
+ // only to the extent that it would cancel upscaling already in the transform.
+ float swap_chain_scale_x =
+ params.quad_rect.width() * 1.0f / swap_chain_size.width();
+ float swap_chain_scale_y =
+ params.quad_rect.height() * 1.0f / swap_chain_size.height();
+ transform.Scale(swap_chain_scale_x, swap_chain_scale_y);
- if (visual_info_.transform != transform || visual_info_.offset != offset) {
- visual_info_.transform = transform;
+ if (visual_info_.offset != offset || visual_info_.transform != transform) {
visual_info_.offset = offset;
+ visual_info_.transform = transform;
needs_commit = true;
content_visual_->SetOffsetX(offset.x());
@@ -882,10 +913,11 @@ bool DCLayerTree::SwapChainPresenter::UpdateVisuals(
Microsoft::WRL::ComPtr<IDCompositionMatrixTransform> dcomp_transform;
dcomp_device_->CreateMatrixTransform(dcomp_transform.GetAddressOf());
DCHECK(dcomp_transform);
+ // SkMatrix44 is column-major, but D2D_MATRIX_3x2_F is row-major.
D2D_MATRIX_3X2_F d2d_matrix = {
- {{transform.matrix().get(0, 0), transform.matrix().get(0, 1),
- transform.matrix().get(1, 0), transform.matrix().get(1, 1),
- transform.matrix().get(3, 0), transform.matrix().get(3, 1)}}};
+ {{transform.matrix().get(0, 0), transform.matrix().get(1, 0),
+ transform.matrix().get(0, 1), transform.matrix().get(1, 1),
+ transform.matrix().get(0, 3), transform.matrix().get(1, 3)}}};
dcomp_transform->SetMatrix(d2d_matrix);
content_visual_->SetTransform(dcomp_transform.Get());
}
@@ -895,18 +927,17 @@ bool DCLayerTree::SwapChainPresenter::UpdateVisuals(
visual_info_.is_clipped = params.is_clipped;
visual_info_.clip_rect = params.clip_rect;
needs_commit = true;
- // DirectComposition clips happen in the pre-transform visual
- // space, while cc/ clips happen post-transform. So the clip needs
- // to go on a separate parent visual that's untransformed.
+ // DirectComposition clips happen in the pre-transform visual space, while
+ // cc/ clips happen post-transform. So the clip needs to go on a separate
+ // parent visual that's untransformed.
if (params.is_clipped) {
Microsoft::WRL::ComPtr<IDCompositionRectangleClip> clip;
dcomp_device_->CreateRectangleClip(clip.GetAddressOf());
DCHECK(clip);
- const gfx::Rect& offset_clip = params.clip_rect;
- clip->SetLeft(offset_clip.x());
- clip->SetRight(offset_clip.right());
- clip->SetBottom(offset_clip.bottom());
- clip->SetTop(offset_clip.y());
+ clip->SetLeft(params.clip_rect.x());
+ clip->SetRight(params.clip_rect.right());
+ clip->SetBottom(params.clip_rect.bottom());
+ clip->SetTop(params.clip_rect.y());
clip_visual_->SetClip(clip.Get());
} else {
clip_visual_->SetClip(nullptr);
@@ -915,23 +946,146 @@ bool DCLayerTree::SwapChainPresenter::UpdateVisuals(
return needs_commit;
}
+bool DCLayerTree::SwapChainPresenter::PresentToDecodeSwapChain(
+ gl::GLImageDXGI* image_dxgi,
+ const gfx::Rect& content_rect,
+ const gfx::Size& swap_chain_size,
+ bool* needs_commit) {
+ DCHECK(!swap_chain_size.IsEmpty());
+
+ Microsoft::WRL::ComPtr<IDXGIResource> decode_resource;
+ image_dxgi->texture()->QueryInterface(
+ IID_PPV_ARGS(decode_resource.GetAddressOf()));
+ DCHECK(decode_resource);
+
+ if (!decode_swap_chain_ || decode_resource_ != decode_resource) {
+ ReleaseSwapChainResources();
+
+ decode_resource_ = decode_resource;
+
+ HANDLE handle = INVALID_HANDLE_VALUE;
+ if (!CreateSurfaceHandleHelper(&handle))
+ return false;
+ swap_chain_handle_.Set(handle);
+
+ Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
+ d3d11_device_.CopyTo(dxgi_device.GetAddressOf());
+ DCHECK(dxgi_device);
+ Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
+ dxgi_device->GetAdapter(dxgi_adapter.GetAddressOf());
+ DCHECK(dxgi_adapter);
+ Microsoft::WRL::ComPtr<IDXGIFactoryMedia> media_factory;
+ dxgi_adapter->GetParent(IID_PPV_ARGS(media_factory.GetAddressOf()));
+ DCHECK(media_factory);
+
+ DXGI_DECODE_SWAP_CHAIN_DESC desc = {};
+ desc.Flags = 0;
+ HRESULT hr =
+ media_factory->CreateDecodeSwapChainForCompositionSurfaceHandle(
+ d3d11_device_.Get(), swap_chain_handle_.Get(), &desc,
+ decode_resource_.Get(), nullptr,
+ decode_swap_chain_.ReleaseAndGetAddressOf());
+ base::UmaHistogramSparse(
+ "GPU.DirectComposition.DecodeSwapChainCreationResult", hr);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "CreateDecodeSwapChainForCompositionSurfaceHandle failed "
+ "with error 0x"
+ << std::hex << hr;
+ return false;
+ }
+ DCHECK(decode_swap_chain_);
+
+ Microsoft::WRL::ComPtr<IDCompositionDesktopDevice> desktop_device;
+ dcomp_device_.CopyTo(desktop_device.GetAddressOf());
+ DCHECK(desktop_device);
+
+ desktop_device->CreateSurfaceFromHandle(
+ swap_chain_handle_.Get(), decode_surface_.ReleaseAndGetAddressOf());
+ base::UmaHistogramSparse(
+ "GPU.DirectComposition.DecodeSwapChainSurfaceCreationResult", hr);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "CreateSurfaceFromHandle failed with error 0x" << std::hex
+ << hr;
+ return false;
+ }
+ DCHECK(decode_surface_);
+
+ content_visual_->SetContent(decode_surface_.Get());
+ *needs_commit = true;
+ } else if (last_y_image_ == image_dxgi && last_uv_image_ == image_dxgi &&
+ swap_chain_size_ == swap_chain_size) {
+ // Early out if we're presenting the same image again.
+ return true;
+ }
+
+ RECT source_rect = content_rect.ToRECT();
+ decode_swap_chain_->SetSourceRect(&source_rect);
+
+ decode_swap_chain_->SetDestSize(swap_chain_size.width(),
+ swap_chain_size.height());
+ RECT target_rect = gfx::Rect(swap_chain_size).ToRECT();
+ decode_swap_chain_->SetTargetRect(&target_rect);
+
+ gfx::ColorSpace color_space = image_dxgi->color_space();
+ if (!color_space.IsValid())
+ color_space = gfx::ColorSpace::CreateREC709();
+
+ // TODO(sunnyps): Move this to gfx::ColorSpaceWin helper where we can access
+ // internal color space state and do a better job.
+ // Common color spaces have primaries and transfer function similar to BT 709
+ // and there are no other choices anyway.
+ int flags = DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_BT709;
+ // Proper Rec 709 and 601 have limited or nominal color range.
+ if (color_space == gfx::ColorSpace::CreateREC709() ||
+ color_space == gfx::ColorSpace::CreateREC601()) {
+ flags |= DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_NOMINAL_RANGE;
+ }
+ // xvYCC allows colors outside nominal range to encode negative colors that
+ // allows for a wider gamut.
+ if (color_space.FullRangeEncodedValues()) {
+ flags |= DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_xvYCC;
+ }
+ decode_swap_chain_->SetColorSpace(
+ static_cast<DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS>(flags));
+
+ HRESULT hr = decode_swap_chain_->PresentBuffer(image_dxgi->level(), 1, 0);
+ base::UmaHistogramSparse("GPU.DirectComposition.DecodeSwapChainPresentResult",
+ hr);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "PresentBuffer failed with error 0x" << std::hex << hr;
+ return false;
+ }
+
+ last_y_image_ = image_dxgi;
+ last_uv_image_ = image_dxgi;
+ swap_chain_size_ = swap_chain_size;
+ if (is_yuv_swapchain_) {
+ frames_since_color_space_change_++;
+ } else {
+ UMA_HISTOGRAM_COUNTS_1000(
+ "GPU.DirectComposition.FramesSinceColorSpaceChange",
+ frames_since_color_space_change_);
+ frames_since_color_space_change_ = 0;
+ is_yuv_swapchain_ = true;
+ }
+ return true;
+}
+
bool DCLayerTree::SwapChainPresenter::PresentToSwapChain(
const ui::DCRendererLayerParams& params,
bool* needs_commit) {
*needs_commit = false;
- gl::GLImageDXGIBase* image_dxgi =
- gl::GLImageDXGIBase::FromGLImage(params.image[0].get());
- gl::GLImageMemory* y_image_memory = nullptr;
- gl::GLImageMemory* uv_image_memory = nullptr;
- if (params.image.size() >= 2) {
- y_image_memory = gl::GLImageMemory::FromGLImage(params.image[0].get());
- uv_image_memory = gl::GLImageMemory::FromGLImage(params.image[1].get());
- }
+ gl::GLImageDXGI* image_dxgi =
+ gl::GLImageDXGI::FromGLImage(params.y_image.get());
+ gl::GLImageMemory* y_image_memory =
+ gl::GLImageMemory::FromGLImage(params.y_image.get());
+ gl::GLImageMemory* uv_image_memory =
+ gl::GLImageMemory::FromGLImage(params.uv_image.get());
if (!image_dxgi && (!y_image_memory || !uv_image_memory)) {
DLOG(ERROR) << "Video GLImages are missing";
- last_gl_images_.clear();
+ // No need to release resources as context will be lost soon.
return false;
}
@@ -950,31 +1104,59 @@ bool DCLayerTree::SwapChainPresenter::PresentToSwapChain(
if (UpdateVisuals(params, swap_chain_size))
*needs_commit = true;
+ bool use_decode_swap_chain =
+ base::FeatureList::IsEnabled(
+ features::kDirectCompositionUseNV12DecodeSwapChain) &&
+ g_overlay_format_used == OverlayFormat::kNV12 &&
+ !failed_to_present_decode_swapchain_;
+ // TODO(sunnyps): Try using decode swap chain for uploaded video images.
+ if (image_dxgi && use_decode_swap_chain) {
+ D3D11_TEXTURE2D_DESC texture_desc = {};
+ image_dxgi->texture()->GetDesc(&texture_desc);
+ bool is_decoder_texture = texture_desc.BindFlags & D3D11_BIND_DECODER;
+ // Decode swap chains do not support shared resources.
+ // TODO(sunnyps): Find a workaround for when the decoder moves to its own
+ // thread and D3D device. See https://crbug.com/911847
+ bool is_shared_texture =
+ texture_desc.MiscFlags &
+ (D3D11_RESOURCE_MISC_SHARED | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX |
+ D3D11_RESOURCE_MISC_SHARED_NTHANDLE);
+ if (is_decoder_texture && !is_shared_texture) {
+ if (PresentToDecodeSwapChain(image_dxgi, params.content_rect,
+ swap_chain_size, needs_commit)) {
+ return true;
+ }
+ ReleaseSwapChainResources();
+ failed_to_present_decode_swapchain_ = true;
+ DLOG(ERROR)
+ << "Present to decode swap chain failed - falling back to blit";
+ }
+ }
+
bool swap_chain_resized = swap_chain_size_ != swap_chain_size;
bool use_yuv_swap_chain = ShouldUseYUVSwapChain(params.protected_video_type);
bool toggle_yuv_swapchain = use_yuv_swap_chain != is_yuv_swapchain_;
bool toggle_protected_video =
protected_video_type_ != params.protected_video_type;
- bool first_present = false;
-
+ // Try reallocating swap chain if resizing fails.
if (!swap_chain_ || swap_chain_resized || toggle_yuv_swapchain ||
toggle_protected_video) {
if (!ReallocateSwapChain(swap_chain_size, use_yuv_swap_chain,
params.protected_video_type)) {
return false;
}
- first_present = true;
content_visual_->SetContent(swap_chain_.Get());
*needs_commit = true;
- } else if (last_gl_images_ == params.image) {
+ } else if (last_y_image_ == params.y_image &&
+ last_uv_image_ == params.uv_image) {
// The swap chain is presenting the same images as last swap, which means
// that the images were never returned to the video decoder and should
// have the same contents as last time. It shouldn't need to be redrawn.
return true;
}
-
- last_gl_images_ = params.image;
+ last_y_image_ = params.y_image;
+ last_uv_image_ = params.uv_image;
Microsoft::WRL::ComPtr<ID3D11Texture2D> input_texture;
UINT input_level;
@@ -1004,13 +1186,14 @@ bool DCLayerTree::SwapChainPresenter::PresentToSwapChain(
if (image_dxgi && image_dxgi->color_space().IsValid())
src_color_space = image_dxgi->color_space();
- gfx::Size input_size = gfx::ToCeiledSize(params.contents_rect.size());
-
- if (!VideoProcessorBlt(input_texture, input_level, keyed_mutex, input_size,
- src_color_space))
+ if (!VideoProcessorBlt(input_texture, input_level, keyed_mutex,
+ params.content_rect, src_color_space)) {
return false;
+ }
+
+ if (first_present_) {
+ first_present_ = false;
- if (first_present) {
HRESULT hr = swap_chain_->Present(0, 0);
if (FAILED(hr)) {
DLOG(ERROR) << "Present failed with error 0x" << std::hex << hr;
@@ -1076,11 +1259,12 @@ bool DCLayerTree::SwapChainPresenter::VideoProcessorBlt(
Microsoft::WRL::ComPtr<ID3D11Texture2D> input_texture,
UINT input_level,
Microsoft::WRL::ComPtr<IDXGIKeyedMutex> keyed_mutex,
- const gfx::Size& input_size,
+ const gfx::Rect& content_rect,
const gfx::ColorSpace& src_color_space) {
- if (!layer_tree_->InitializeVideoProcessor(input_size, swap_chain_size_))
+ if (!layer_tree_->InitializeVideoProcessor(content_rect.size(),
+ swap_chain_size_)) {
return false;
-
+ }
Microsoft::WRL::ComPtr<ID3D11VideoContext> video_context =
layer_tree_->video_context();
Microsoft::WRL::ComPtr<ID3D11VideoProcessor> video_processor =
@@ -1173,7 +1357,7 @@ bool DCLayerTree::SwapChainPresenter::VideoProcessorBlt(
TRUE, &dest_rect);
video_context->VideoProcessorSetStreamDestRect(video_processor.Get(), 0,
TRUE, &dest_rect);
- RECT source_rect = gfx::Rect(input_size).ToRECT();
+ RECT source_rect = content_rect.ToRECT();
video_context->VideoProcessorSetStreamSourceRect(video_processor.Get(), 0,
TRUE, &source_rect);
@@ -1210,6 +1394,9 @@ bool DCLayerTree::SwapChainPresenter::VideoProcessorBlt(
void DCLayerTree::SwapChainPresenter::ReleaseSwapChainResources() {
output_view_.Reset();
swap_chain_.Reset();
+ decode_surface_.Reset();
+ decode_swap_chain_.Reset();
+ decode_resource_.Reset();
swap_chain_handle_.Close();
}
@@ -1217,22 +1404,40 @@ bool DCLayerTree::SwapChainPresenter::ReallocateSwapChain(
const gfx::Size& swap_chain_size,
bool use_yuv_swap_chain,
ui::ProtectedVideoType protected_video_type) {
- TRACE_EVENT0("gpu", "DCLayerTree::SwapChainPresenter::ReallocateSwapChain");
-
- // TODO(sunnyps): Remove after debugging NV12 create swap chain failure.
- bool swap_chain_resized =
- swap_chain_ && (swap_chain_size_ != swap_chain_size);
- bool swap_chain_toggled_yuv =
- swap_chain_ && (is_yuv_swapchain_ != use_yuv_swap_chain);
- base::debug::Alias(&swap_chain_resized);
- base::debug::Alias(&swap_chain_toggled_yuv);
-
- ReleaseSwapChainResources();
+ TRACE_EVENT2("gpu", "DCLayerTree::SwapChainPresenter::ReallocateSwapChain",
+ "size", swap_chain_size.ToString(), "yuv", use_yuv_swap_chain);
DCHECK(!swap_chain_size.IsEmpty());
swap_chain_size_ = swap_chain_size;
+
+ // ResizeBuffers can't change YUV flags so only attempt it when size changes.
+ if (swap_chain_ && (is_yuv_swapchain_ == use_yuv_swap_chain) &&
+ (protected_video_type_ == protected_video_type)) {
+ output_view_.Reset();
+ DXGI_SWAP_CHAIN_DESC1 desc = {};
+ swap_chain_->GetDesc1(&desc);
+ HRESULT hr = swap_chain_->ResizeBuffers(
+ desc.BufferCount, swap_chain_size.width(), swap_chain_size.height(),
+ desc.Format, desc.Flags);
+ UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.SwapChainResizeResult",
+ SUCCEEDED(hr));
+ if (SUCCEEDED(hr))
+ return true;
+ DLOG(ERROR) << "ResizeBuffers failed with error 0x" << std::hex << hr;
+ }
+
protected_video_type_ = protected_video_type;
+ if (is_yuv_swapchain_ != use_yuv_swap_chain) {
+ UMA_HISTOGRAM_COUNTS_1000(
+ "GPU.DirectComposition.FramesSinceColorSpaceChange",
+ frames_since_color_space_change_);
+ frames_since_color_space_change_ = 0;
+ }
+ is_yuv_swapchain_ = false;
+
+ ReleaseSwapChainResources();
+
Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
d3d11_device_.CopyTo(dxgi_device.GetAddressOf());
DCHECK(dxgi_device);
@@ -1243,6 +1448,15 @@ bool DCLayerTree::SwapChainPresenter::ReallocateSwapChain(
dxgi_adapter->GetParent(IID_PPV_ARGS(media_factory.GetAddressOf()));
DCHECK(media_factory);
+ // The composition surface handle is only used to create YUV swap chains since
+ // CreateSwapChainForComposition can't do that.
+ HANDLE handle = INVALID_HANDLE_VALUE;
+ if (!CreateSurfaceHandleHelper(&handle))
+ return false;
+ swap_chain_handle_.Set(handle);
+
+ first_present_ = true;
+
DXGI_SWAP_CHAIN_DESC1 desc = {};
desc.Width = swap_chain_size_.width();
desc.Height = swap_chain_size_.height();
@@ -1253,52 +1467,35 @@ bool DCLayerTree::SwapChainPresenter::ReallocateSwapChain(
desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
desc.Scaling = DXGI_SCALING_STRETCH;
desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
- desc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
desc.Flags =
DXGI_SWAP_CHAIN_FLAG_YUV_VIDEO | DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO;
if (IsProtectedVideo(protected_video_type))
desc.Flags |= DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
if (protected_video_type == ui::ProtectedVideoType::kHardwareProtected)
desc.Flags |= DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED;
-
- // The composition surface handle is only used to create YUV swap chains since
- // CreateSwapChainForComposition can't do that.
- HANDLE handle;
- if (!CreateSurfaceHandleHelper(&handle))
- return false;
- swap_chain_handle_.Set(handle);
-
- if (is_yuv_swapchain_ != use_yuv_swap_chain) {
- UMA_HISTOGRAM_COUNTS_1000(
- "GPU.DirectComposition.FramesSinceColorSpaceChange",
- frames_since_color_space_change_);
- }
-
- frames_since_color_space_change_ = 0;
+ desc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
const std::string kSwapChainCreationResultUmaPrefix =
"GPU.DirectComposition.SwapChainCreationResult.";
- is_yuv_swapchain_ = false;
+ const std::string kSwapChainCreationResultUmaPrefix3 =
+ "GPU.DirectComposition.SwapChainCreationResult3.";
+ const std::string protected_video_type_string =
+ ProtectedVideoTypeToString(protected_video_type);
- // TODO(sunnyps): Remove after debugging NV12 create swap chain failure.
- HRESULT hr = S_OK;
- VisualInfo visual_info = visual_info_;
- base::debug::Alias(&hr);
- base::debug::Alias(&desc);
- base::debug::Alias(&visual_info);
if (use_yuv_swap_chain) {
- hr = media_factory->CreateSwapChainForCompositionSurfaceHandle(
+ HRESULT hr = media_factory->CreateSwapChainForCompositionSurfaceHandle(
d3d11_device_.Get(), swap_chain_handle_.Get(), &desc, nullptr,
swap_chain_.GetAddressOf());
is_yuv_swapchain_ = SUCCEEDED(hr);
failed_to_create_yuv_swapchain_ = !is_yuv_swapchain_;
- base::UmaHistogramBoolean(kSwapChainCreationResultUmaPrefix +
- OverlayFormatToString(g_overlay_format_used),
- SUCCEEDED(hr));
- if (FAILED(hr)) {
- // TODO(sunnyps): Remove after debugging NV12 create swap chain failure.
- base::debug::DumpWithoutCrashing();
+ UMA_HISTOGRAM_BOOLEAN(kSwapChainCreationResultUmaPrefix +
+ OverlayFormatToString(g_overlay_format_used),
+ SUCCEEDED(hr));
+ base::UmaHistogramSparse(
+ kSwapChainCreationResultUmaPrefix3 + protected_video_type_string, hr);
+
+ if (FAILED(hr)) {
DLOG(ERROR) << "Failed to create "
<< OverlayFormatToString(g_overlay_format_used)
<< " swap chain of size " << swap_chain_size.ToString()
@@ -1306,7 +1503,6 @@ bool DCLayerTree::SwapChainPresenter::ReallocateSwapChain(
<< "\nFalling back to BGRA";
}
}
-
if (!is_yuv_swapchain_) {
desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc.Flags = 0;
@@ -1317,9 +1513,13 @@ bool DCLayerTree::SwapChainPresenter::ReallocateSwapChain(
HRESULT hr = media_factory->CreateSwapChainForCompositionSurfaceHandle(
d3d11_device_.Get(), swap_chain_handle_.Get(), &desc, nullptr,
swap_chain_.GetAddressOf());
- base::UmaHistogramBoolean(kSwapChainCreationResultUmaPrefix +
- OverlayFormatToString(OverlayFormat::kBGRA),
- SUCCEEDED(hr));
+
+ UMA_HISTOGRAM_BOOLEAN(kSwapChainCreationResultUmaPrefix +
+ OverlayFormatToString(OverlayFormat::kBGRA),
+ SUCCEEDED(hr));
+ base::UmaHistogramSparse(
+ kSwapChainCreationResultUmaPrefix3 + protected_video_type_string, hr);
+
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to create BGRA swap chain of size "
<< swap_chain_size.ToString() << " with error 0x" << std::hex
@@ -1444,9 +1644,12 @@ DirectCompositionSurfaceWin::DirectCompositionSurfaceWin(
HWND parent_window)
: gl::GLSurfaceEGL(),
child_window_(delegate, parent_window),
- vsync_provider_(std::move(vsync_provider)),
+ root_surface_(new DirectCompositionChildSurfaceWin()),
layer_tree_(std::make_unique<DCLayerTree>(
- delegate->GetFeatureInfo()->workarounds())) {}
+ delegate->GetFeatureInfo()->workarounds())),
+ vsync_provider_(std::move(vsync_provider)),
+ presentation_helper_(std::make_unique<gl::GLSurfacePresentationHelper>(
+ vsync_provider_.get())) {}
DirectCompositionSurfaceWin::~DirectCompositionSurfaceWin() {
Destroy();
@@ -1553,8 +1756,7 @@ bool DirectCompositionSurfaceWin::IsHDRSupported() {
HRESULT hr = S_OK;
Microsoft::WRL::ComPtr<IDXGIFactory> factory;
- hr = CreateDXGIFactory(__uuidof(IDXGIFactory),
- reinterpret_cast<void**>(factory.GetAddressOf()));
+ hr = CreateDXGIFactory(IID_PPV_ARGS(factory.GetAddressOf()));
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to create DXGI factory.";
return false;
@@ -1582,9 +1784,7 @@ bool DirectCompositionSurfaceWin::IsHDRSupported() {
}
Microsoft::WRL::ComPtr<IDXGIOutput6> output6;
- hr = output->QueryInterface(
- __uuidof(IDXGIOutput6),
- reinterpret_cast<void**>(output6.GetAddressOf()));
+ hr = output->QueryInterface(IID_PPV_ARGS(output6.GetAddressOf()));
if (FAILED(hr)) {
DLOG(WARNING) << "IDXGIOutput6 is required for HDR detection.";
continue;
@@ -1610,69 +1810,6 @@ bool DirectCompositionSurfaceWin::IsHDRSupported() {
}
// static
-bool DirectCompositionSurfaceWin::IsSwapChainTearingSupported() {
- static bool initialized = false;
- static bool supported = false;
-
- if (initialized)
- return supported;
-
- initialized = true;
-
- // Swap chain tearing is used only if vsync is disabled explicitly.
- if (!base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableGpuVsync))
- return false;
-
- // Swap chain tearing is supported only on Windows 10 Anniversary Edition
- // (Redstone 1) and above.
- if (base::win::GetVersion() < base::win::VERSION_WIN10_RS1)
- return false;
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
- if (!d3d11_device) {
- DLOG(ERROR) << "Not using swap chain tearing because failed to retrieve "
- "D3D11 device from ANGLE";
- return false;
- }
- Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
- d3d11_device.CopyTo(dxgi_device.GetAddressOf());
- DCHECK(dxgi_device);
- Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
- dxgi_device->GetAdapter(dxgi_adapter.GetAddressOf());
- DCHECK(dxgi_adapter);
- Microsoft::WRL::ComPtr<IDXGIFactory5> dxgi_factory;
- if (FAILED(
- dxgi_adapter->GetParent(IID_PPV_ARGS(dxgi_factory.GetAddressOf())))) {
- DLOG(ERROR) << "Not using swap chain tearing because failed to retrieve "
- "IDXGIFactory5 interface";
- return false;
- }
-
- // BOOL instead of bool because we want a well defined sized type.
- BOOL present_allow_tearing = FALSE;
- DCHECK(dxgi_factory);
- if (FAILED(dxgi_factory->CheckFeatureSupport(
- DXGI_FEATURE_PRESENT_ALLOW_TEARING, &present_allow_tearing,
- sizeof(present_allow_tearing)))) {
- DLOG(ERROR)
- << "Not using swap chain tearing because CheckFeatureSupport failed";
- return false;
- }
- supported = !!present_allow_tearing;
- return supported;
-}
-
-bool DirectCompositionSurfaceWin::InitializeNativeWindow() {
- if (window_)
- return true;
-
- bool result = child_window_.Initialize();
- window_ = child_window_.window();
- return result;
-}
-
bool DirectCompositionSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
d3d11_device_ = gl::QueryD3D11DeviceObjectFromANGLE();
if (!d3d11_device_) {
@@ -1687,60 +1824,29 @@ bool DirectCompositionSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
return false;
}
- EGLDisplay display = GetDisplay();
- if (!window_) {
- if (!InitializeNativeWindow()) {
- DLOG(ERROR) << "Failed to initialize native window";
- return false;
- }
+ if (!child_window_.Initialize()) {
+ DLOG(ERROR) << "Failed to initialize native window";
+ return false;
}
+ window_ = child_window_.window();
if (!layer_tree_->Initialize(window_, d3d11_device_, dcomp_device_))
return false;
- EGLint pbuffer_attribs[] = {
- EGL_WIDTH,
- 1,
- EGL_HEIGHT,
- 1,
- EGL_FLEXIBLE_SURFACE_COMPATIBILITY_SUPPORTED_ANGLE,
- EGL_TRUE,
- EGL_NONE,
- };
-
- default_surface_ =
- eglCreatePbufferSurface(display, GetConfig(), pbuffer_attribs);
- if (!default_surface_) {
- DLOG(ERROR) << "eglCreatePbufferSurface failed with error "
- << ui::GetLastEGLErrorString();
- return false;
- }
-
- if (!RecreateRootSurface())
+ if (!root_surface_->Initialize(gl::GLSurfaceFormat()))
return false;
- presentation_helper_ =
- std::make_unique<gl::GLSurfacePresentationHelper>(vsync_provider_.get());
return true;
}
void DirectCompositionSurfaceWin::Destroy() {
+ // Destroy presentation helper first because its dtor calls GetHandle.
presentation_helper_ = nullptr;
- if (default_surface_) {
- if (!eglDestroySurface(GetDisplay(), default_surface_)) {
- DLOG(ERROR) << "eglDestroySurface failed with error "
- << ui::GetLastEGLErrorString();
- }
- default_surface_ = nullptr;
- }
- if (root_surface_) {
- root_surface_->Destroy();
- root_surface_ = nullptr;
- }
+ root_surface_->Destroy();
}
gfx::Size DirectCompositionSurfaceWin::GetSize() {
- return size_;
+ return root_surface_->GetSize();
}
bool DirectCompositionSurfaceWin::IsOffscreen() {
@@ -1748,27 +1854,20 @@ bool DirectCompositionSurfaceWin::IsOffscreen() {
}
void* DirectCompositionSurfaceWin::GetHandle() {
- return root_surface_ ? root_surface_->GetHandle() : default_surface_;
+ return root_surface_->GetHandle();
}
bool DirectCompositionSurfaceWin::Resize(const gfx::Size& size,
float scale_factor,
ColorSpace color_space,
bool has_alpha) {
- bool is_hdr = color_space == ColorSpace::SCRGB_LINEAR;
- if (size == GetSize() && has_alpha == has_alpha_ && is_hdr == is_hdr_)
- return true;
-
// Force a resize and redraw (but not a move, activate, etc.).
if (!SetWindowPos(window_, nullptr, 0, 0, size.width(), size.height(),
SWP_NOMOVE | SWP_NOACTIVATE | SWP_NOCOPYBITS |
SWP_NOOWNERZORDER | SWP_NOZORDER)) {
return false;
}
- size_ = size;
- is_hdr_ = is_hdr;
- has_alpha_ = has_alpha;
- return RecreateRootSurface();
+ return root_surface_->Resize(size, scale_factor, color_space, has_alpha);
}
gfx::SwapResult DirectCompositionSurfaceWin::SwapBuffers(
@@ -1777,7 +1876,6 @@ gfx::SwapResult DirectCompositionSurfaceWin::SwapBuffers(
presentation_helper_.get(), callback);
bool succeeded = true;
- DCHECK(root_surface_);
if (root_surface_->SwapBuffers(PresentationCallback()) ==
gfx::SwapResult::SWAP_FAILED)
succeeded = false;
@@ -1813,9 +1911,7 @@ gfx::VSyncProvider* DirectCompositionSurfaceWin::GetVSyncProvider() {
}
void DirectCompositionSurfaceWin::SetVSyncEnabled(bool enabled) {
- vsync_enabled_ = enabled;
- if (root_surface_)
- root_surface_->SetVSyncEnabled(enabled);
+ root_surface_->SetVSyncEnabled(enabled);
}
bool DirectCompositionSurfaceWin::ScheduleDCLayer(
@@ -1824,10 +1920,7 @@ bool DirectCompositionSurfaceWin::ScheduleDCLayer(
}
bool DirectCompositionSurfaceWin::SetEnableDCLayers(bool enable) {
- if (enable_dc_layers_ == enable)
- return true;
- enable_dc_layers_ = enable;
- return RecreateRootSurface();
+ return root_surface_->SetEnableDCLayers(enable);
}
bool DirectCompositionSurfaceWin::FlipsVertically() const {
@@ -1845,9 +1938,7 @@ bool DirectCompositionSurfaceWin::SupportsPostSubBuffer() {
bool DirectCompositionSurfaceWin::OnMakeCurrent(gl::GLContext* context) {
if (presentation_helper_)
presentation_helper_->OnMakeCurrent(context, this);
- if (root_surface_)
- return root_surface_->OnMakeCurrent(context);
- return true;
+ return root_surface_->OnMakeCurrent(context);
}
bool DirectCompositionSurfaceWin::SupportsDCLayers() const {
@@ -1865,23 +1956,11 @@ bool DirectCompositionSurfaceWin::SupportsProtectedVideo() const {
}
bool DirectCompositionSurfaceWin::SetDrawRectangle(const gfx::Rect& rectangle) {
- if (root_surface_)
- return root_surface_->SetDrawRectangle(rectangle);
- return false;
+ return root_surface_->SetDrawRectangle(rectangle);
}
gfx::Vector2d DirectCompositionSurfaceWin::GetDrawOffset() const {
- if (root_surface_)
- return root_surface_->GetDrawOffset();
- return gfx::Vector2d();
-}
-
-bool DirectCompositionSurfaceWin::RecreateRootSurface() {
- root_surface_ = new DirectCompositionChildSurfaceWin(
- size_, is_hdr_, has_alpha_, enable_dc_layers_,
- IsSwapChainTearingSupported());
- root_surface_->SetVSyncEnabled(vsync_enabled_);
- return root_surface_->Initialize();
+ return root_surface_->GetDrawOffset();
}
scoped_refptr<base::TaskRunner>
@@ -1896,9 +1975,7 @@ DirectCompositionSurfaceWin::GetLayerSwapChainForTesting(size_t index) const {
Microsoft::WRL::ComPtr<IDXGISwapChain1>
DirectCompositionSurfaceWin::GetBackbufferSwapChainForTesting() const {
- if (root_surface_)
- return root_surface_->swap_chain();
- return nullptr;
+ return root_surface_->swap_chain();
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.h b/chromium/gpu/ipc/service/direct_composition_surface_win.h
index eb8c7db95e8..703e4ab5138 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.h
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.h
@@ -54,10 +54,6 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionSurfaceWin
// Returns true if there is an HDR capable display connected.
static bool IsHDRSupported();
- // Returns true if swap chain tearing is supported for variable refresh rate
- // displays. Tearing is only used if vsync is also disabled via command line.
- static bool IsSwapChainTearingSupported();
-
static void SetScaledOverlaysSupportedForTesting(bool value);
static void SetPreferNV12OverlaysForTesting();
@@ -65,7 +61,6 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionSurfaceWin
bool InitializeNativeWindow();
// GLSurfaceEGL implementation.
- using GLSurface::Initialize;
bool Initialize(gl::GLSurfaceFormat format) override;
void Destroy() override;
gfx::Size GetSize() override;
@@ -113,24 +108,13 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionSurfaceWin
~DirectCompositionSurfaceWin() override;
private:
- bool RecreateRootSurface();
-
+ HWND window_ = nullptr;
ChildWindowWin child_window_;
- HWND window_ = nullptr;
- // This is a placeholder surface used when not rendering to the
- // DirectComposition surface.
- EGLSurface default_surface_ = 0;
-
- gfx::Size size_ = gfx::Size(1, 1);
- bool enable_dc_layers_ = false;
- bool is_hdr_ = false;
- bool has_alpha_ = true;
- bool vsync_enabled_ = true;
- std::unique_ptr<gfx::VSyncProvider> vsync_provider_;
- std::unique_ptr<gl::GLSurfacePresentationHelper> presentation_helper_;
scoped_refptr<DirectCompositionChildSurfaceWin> root_surface_;
std::unique_ptr<DCLayerTree> layer_tree_;
+ std::unique_ptr<gfx::VSyncProvider> vsync_provider_;
+ std::unique_ptr<gl::GLSurfacePresentationHelper> presentation_helper_;
Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device_;
Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device_;
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc b/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
index 9265d50bdbf..aca23603b69 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
@@ -155,7 +155,7 @@ TEST(DirectCompositionSurfaceTest, TestMakeCurrent) {
scoped_refptr<DirectCompositionSurfaceWin> surface1(
new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
ui::GetHiddenWindow()));
- EXPECT_TRUE(surface1->Initialize());
+ EXPECT_TRUE(surface1->Initialize(gl::GLSurfaceFormat()));
scoped_refptr<gl::GLContext> context1 = gl::init::CreateGLContext(
nullptr, surface1.get(), gl::GLContextAttribs());
@@ -191,7 +191,7 @@ TEST(DirectCompositionSurfaceTest, TestMakeCurrent) {
scoped_refptr<DirectCompositionSurfaceWin> surface2(
new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
ui::GetHiddenWindow()));
- EXPECT_TRUE(surface2->Initialize());
+ EXPECT_TRUE(surface2->Initialize(gl::GLSurfaceFormat()));
scoped_refptr<gl::GLContext> context2 = gl::init::CreateGLContext(
nullptr, surface2.get(), gl::GLContextAttribs());
@@ -225,7 +225,7 @@ TEST(DirectCompositionSurfaceTest, DXGIDCLayerSwitch) {
scoped_refptr<DirectCompositionSurfaceWin> surface(
new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize());
+ EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
scoped_refptr<gl::GLContext> context =
gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
@@ -235,39 +235,38 @@ TEST(DirectCompositionSurfaceTest, DXGIDCLayerSwitch) {
gl::GLSurface::ColorSpace::UNSPECIFIED, true));
EXPECT_FALSE(surface->GetBackbufferSwapChainForTesting());
- // First SetDrawRectangle must be full size of surface for DXGI
- // swapchain.
+ // First SetDrawRectangle must be full size of surface for DXGI swapchain.
EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
EXPECT_TRUE(surface->GetBackbufferSwapChainForTesting());
- // SetDrawRectangle can't be called again until swap.
+ // SetDrawRectangle and SetEnableDCLayers can't be called again until swap.
EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- EXPECT_TRUE(context->MakeCurrent(surface.get()));
EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
-
EXPECT_TRUE(context->IsCurrent(surface.get()));
surface->SetEnableDCLayers(true);
- // Surface switched to use IDCompositionSurface, so must draw to
- // entire surface.
+ // Surface switched to use IDCompositionSurface, so must draw to entire
+ // surface.
EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- EXPECT_TRUE(context->IsCurrent(surface.get()));
EXPECT_FALSE(surface->GetBackbufferSwapChainForTesting());
- surface->SetEnableDCLayers(false);
-
EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
+ EXPECT_TRUE(context->IsCurrent(surface.get()));
- // Surface switched to use IDXGISwapChain, so must draw to entire
- // surface.
+ surface->SetEnableDCLayers(false);
+
+ // Surface switched to use IDXGISwapChain, so must draw to entire surface.
EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
EXPECT_TRUE(surface->GetBackbufferSwapChainForTesting());
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
+ EXPECT_TRUE(context->IsCurrent(surface.get()));
+
context = nullptr;
DestroySurface(std::move(surface));
}
@@ -282,7 +281,7 @@ TEST(DirectCompositionSurfaceTest, SwitchAlpha) {
scoped_refptr<DirectCompositionSurfaceWin> surface(
new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize());
+ EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
scoped_refptr<gl::GLContext> context =
gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
@@ -329,7 +328,7 @@ TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
scoped_refptr<DirectCompositionSurfaceWin> surface(
new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize());
+ EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
scoped_refptr<gl::GLContext> context =
gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
@@ -349,12 +348,11 @@ TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
image_dxgi->SetTexture(texture, 0);
image_dxgi->SetColorSpace(gfx::ColorSpace::CreateREC709());
- gfx::Size window_size(100, 100);
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0, 0,
- ui::ProtectedVideoType::kClear);
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(texture_size);
+ params.quad_rect = gfx::Rect(100, 100);
surface->ScheduleDCLayer(params);
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
@@ -390,12 +388,9 @@ TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
image_dxgi2->SetTexture(texture, 0);
image_dxgi2->SetColorSpace(gfx::ColorSpace::CreateREC709());
- ui::DCRendererLayerParams params2(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi2},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0, 0,
- ui::ProtectedVideoType::kClear);
- surface->ScheduleDCLayer(params2);
+ params.y_image = image_dxgi2;
+ params.uv_image = image_dxgi2;
+ surface->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
@@ -420,7 +415,7 @@ TEST(DirectCompositionSurfaceTest, SwapchainSizeWithScaledOverlays) {
scoped_refptr<DirectCompositionSurfaceWin> surface(
new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize());
+ EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
scoped_refptr<gl::GLContext> context =
gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
@@ -443,14 +438,14 @@ TEST(DirectCompositionSurfaceTest, SwapchainSizeWithScaledOverlays) {
// HW supports scaled overlays
// The input texture size is maller than the window size.
surface->SetScaledOverlaysSupportedForTesting(true);
- gfx::Size window_size(100, 100);
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0, 0,
- ui::ProtectedVideoType::kClear);
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(texture_size);
+ params.quad_rect = gfx::Rect(100, 100);
surface->ScheduleDCLayer(params);
+
EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
surface->GetLayerSwapChainForTesting(0);
@@ -467,14 +462,9 @@ TEST(DirectCompositionSurfaceTest, SwapchainSizeWithScaledOverlays) {
EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
// The input texture size is bigger than the window size.
- window_size = gfx::Size(32, 48);
- ui::DCRendererLayerParams params2(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0, 0,
- ui::ProtectedVideoType::kClear);
-
- surface->ScheduleDCLayer(params2);
+ params.quad_rect = gfx::Rect(32, 48);
+
+ surface->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain2 =
@@ -482,8 +472,8 @@ TEST(DirectCompositionSurfaceTest, SwapchainSizeWithScaledOverlays) {
ASSERT_TRUE(swap_chain2);
EXPECT_TRUE(SUCCEEDED(swap_chain2->GetDesc(&Desc)));
- EXPECT_EQ((int)Desc.BufferDesc.Width, window_size.width());
- EXPECT_EQ((int)Desc.BufferDesc.Height, window_size.height());
+ EXPECT_EQ((int)Desc.BufferDesc.Width, params.quad_rect.width());
+ EXPECT_EQ((int)Desc.BufferDesc.Height, params.quad_rect.height());
context = nullptr;
DestroySurface(std::move(surface));
@@ -499,7 +489,7 @@ TEST(DirectCompositionSurfaceTest, SwapchainSizeWithoutScaledOverlays) {
scoped_refptr<DirectCompositionSurfaceWin> surface(
new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize());
+ EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
scoped_refptr<gl::GLContext> context =
gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
@@ -522,14 +512,14 @@ TEST(DirectCompositionSurfaceTest, SwapchainSizeWithoutScaledOverlays) {
// HW doesn't support scaled overlays
// The input texture size is bigger than the window size.
surface->SetScaledOverlaysSupportedForTesting(false);
- gfx::Size window_size(42, 42);
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0, 0,
- ui::ProtectedVideoType::kClear);
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(texture_size);
+ params.quad_rect = gfx::Rect(42, 42);
surface->ScheduleDCLayer(params);
+
EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
surface->GetLayerSwapChainForTesting(0);
@@ -537,18 +527,13 @@ TEST(DirectCompositionSurfaceTest, SwapchainSizeWithoutScaledOverlays) {
DXGI_SWAP_CHAIN_DESC desc;
EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc(&desc)));
- EXPECT_EQ((int)desc.BufferDesc.Width, window_size.width());
- EXPECT_EQ((int)desc.BufferDesc.Height, window_size.height());
+ EXPECT_EQ((int)desc.BufferDesc.Width, params.quad_rect.width());
+ EXPECT_EQ((int)desc.BufferDesc.Height, params.quad_rect.height());
// The input texture size is smaller than the window size.
- window_size = gfx::Size(124, 136);
- ui::DCRendererLayerParams params2(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0, 0,
- ui::ProtectedVideoType::kClear);
-
- surface->ScheduleDCLayer(params2);
+ params.quad_rect = gfx::Rect(124, 136);
+
+ surface->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain2 =
@@ -556,19 +541,106 @@ TEST(DirectCompositionSurfaceTest, SwapchainSizeWithoutScaledOverlays) {
ASSERT_TRUE(swap_chain2);
EXPECT_TRUE(SUCCEEDED(swap_chain2->GetDesc(&desc)));
- EXPECT_EQ((int)desc.BufferDesc.Width, window_size.width());
- EXPECT_EQ((int)desc.BufferDesc.Height, window_size.height());
+ EXPECT_EQ((int)desc.BufferDesc.Width, params.quad_rect.width());
+ EXPECT_EQ((int)desc.BufferDesc.Height, params.quad_rect.height());
context = nullptr;
DestroySurface(std::move(surface));
}
-SkColor ReadBackWindowPixel(HWND window, const gfx::Point& point) {
+// Test protected video flags
+TEST(DirectCompositionSurfaceTest, ProtectedVideos) {
+ if (!CheckIfDCSupported())
+ return;
+
+ TestImageTransportSurfaceDelegate delegate;
+ scoped_refptr<DirectCompositionSurfaceWin> surface(
+ new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
+ ui::GetHiddenWindow()));
+ EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
+
+ scoped_refptr<gl::GLContext> context =
+ gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
+ EXPECT_TRUE(context->MakeCurrent(surface.get()));
+
+ surface->SetEnableDCLayers(true);
+
+ Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
+ gl::QueryD3D11DeviceObjectFromANGLE();
+
+ gfx::Size texture_size(1280, 720);
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
+ CreateNV12Texture(d3d11_device, texture_size, false);
+
+ scoped_refptr<gl::GLImageDXGI> image_dxgi(
+ new gl::GLImageDXGI(texture_size, nullptr));
+ image_dxgi->SetTexture(texture, 0);
+ image_dxgi->SetColorSpace(gfx::ColorSpace::CreateREC709());
+ gfx::Size window_size(640, 360);
+
+ // Clear video
+ {
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.quad_rect = gfx::Rect(window_size);
+ params.content_rect = gfx::Rect(texture_size);
+ params.protected_video_type = ui::ProtectedVideoType::kClear;
+
+ surface->ScheduleDCLayer(params);
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
+ surface->SwapBuffers(base::DoNothing()));
+ Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
+ surface->GetLayerSwapChainForTesting(0);
+ ASSERT_TRUE(swap_chain);
+
+ DXGI_SWAP_CHAIN_DESC Desc;
+ EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc(&Desc)));
+ unsigned display_only_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
+ unsigned hw_protected_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED;
+ EXPECT_EQ(display_only_flag, (unsigned)0);
+ EXPECT_EQ(hw_protected_flag, (unsigned)0);
+ }
+
+ // Software protected video
+ {
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.quad_rect = gfx::Rect(window_size);
+ params.content_rect = gfx::Rect(texture_size);
+ params.protected_video_type = ui::ProtectedVideoType::kSoftwareProtected;
+
+ surface->ScheduleDCLayer(params);
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
+ surface->SwapBuffers(base::DoNothing()));
+ Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
+ surface->GetLayerSwapChainForTesting(0);
+ ASSERT_TRUE(swap_chain);
+
+ DXGI_SWAP_CHAIN_DESC Desc;
+ EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc(&Desc)));
+ unsigned display_only_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
+ unsigned hw_protected_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED;
+ EXPECT_EQ(display_only_flag, (unsigned)DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY);
+ EXPECT_EQ(hw_protected_flag, (unsigned)0);
+ }
+
+ // TODO(magchen): Add a hardware protected video test when hardware procted
+ // video support is enabled by defaut in the Intel driver and Chrome
+
+ context = nullptr;
+ DestroySurface(std::move(surface));
+}
+
+std::vector<SkColor> ReadBackWindow(HWND window, const gfx::Size& size) {
base::win::ScopedCreateDC mem_hdc(::CreateCompatibleDC(nullptr));
- void* bits = nullptr;
- BITMAPV4HEADER hdr;
- gfx::CreateBitmapV4Header(point.x() + 1, point.y() + 1, &hdr);
DCHECK(mem_hdc.IsValid());
+
+ BITMAPV4HEADER hdr;
+ gfx::CreateBitmapV4Header(size.width(), size.height(), &hdr);
+
+ void* bits = nullptr;
base::win::ScopedBitmap bitmap(
::CreateDIBSection(mem_hdc.Get(), reinterpret_cast<BITMAPINFO*>(&hdr),
DIB_RGB_COLORS, &bits, nullptr, 0));
@@ -588,16 +660,21 @@ SkColor ReadBackWindowPixel(HWND window, const gfx::Point& point) {
GdiFlush();
- uint32_t pixel_value =
- static_cast<uint32_t*>(bits)[hdr.bV4Width * point.y() + point.x()];
+ std::vector<SkColor> pixels(size.width() * size.height());
+ memcpy(pixels.data(), bits, pixels.size() * sizeof(SkColor));
+ return pixels;
+}
- return static_cast<SkColor>(pixel_value);
+SkColor ReadBackWindowPixel(HWND window, const gfx::Point& point) {
+ gfx::Size size(point.x() + 1, point.y() + 1);
+ auto pixels = ReadBackWindow(window, size);
+ return pixels[size.width() * point.y() + point.x()];
}
class DirectCompositionPixelTest : public testing::Test {
public:
DirectCompositionPixelTest()
- : window_(&platform_delegate_, gfx::Rect(0, 0, 100, 100)) {}
+ : window_(&platform_delegate_, gfx::Rect(100, 100)) {}
~DirectCompositionPixelTest() override {
context_ = nullptr;
@@ -611,7 +688,7 @@ class DirectCompositionPixelTest : public testing::Test {
surface_ = new DirectCompositionSurfaceWin(nullptr, delegate_.AsWeakPtr(),
window_.hwnd());
- EXPECT_TRUE(surface_->Initialize());
+ EXPECT_TRUE(surface_->Initialize(gl::GLSurfaceFormat()));
context_ = gl::init::CreateGLContext(nullptr, surface_.get(),
gl::GLContextAttribs());
EXPECT_TRUE(context_->MakeCurrent(surface_.get()));
@@ -699,11 +776,11 @@ class DirectCompositionVideoPixelTest : public DirectCompositionPixelTest {
image_dxgi->SetTexture(texture, 0);
image_dxgi->SetColorSpace(color_space);
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(texture_size), 0, 0, 1.0,
- 0, ui::ProtectedVideoType::kClear);
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(texture_size);
+ params.quad_rect = gfx::Rect(texture_size);
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
@@ -711,13 +788,9 @@ class DirectCompositionVideoPixelTest : public DirectCompositionPixelTest {
// Scaling up the swapchain with the same image should cause it to be
// transformed again, but not presented again.
- ui::DCRendererLayerParams params2(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0,
- 0, ui::ProtectedVideoType::kClear);
- surface_->ScheduleDCLayer(params2);
+ params.quad_rect = gfx::Rect(window_size);
+ surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
surface_->SwapBuffers(base::DoNothing()));
Sleep(1000);
@@ -787,11 +860,11 @@ TEST_F(DirectCompositionPixelTest, SoftwareVideoSwapchain) {
gfx::BufferFormat::RG_88);
y_image->SetColorSpace(gfx::ColorSpace::CreateREC709());
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{y_image, uv_image},
- gfx::RectF(gfx::Rect(y_size)), gfx::Rect(window_size), 0, 0, 1.0, 0,
- ui::ProtectedVideoType::kClear);
+ ui::DCRendererLayerParams params;
+ params.y_image = y_image;
+ params.uv_image = uv_image;
+ params.content_rect = gfx::Rect(y_size);
+ params.quad_rect = gfx::Rect(window_size);
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
@@ -828,15 +901,16 @@ TEST_F(DirectCompositionPixelTest, VideoHandleSwapchain) {
resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
&handle);
// The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGIHandle> image_dxgi(
- new gl::GLImageDXGIHandle(texture_size, 0, gfx::BufferFormat::RGBA_8888));
- ASSERT_TRUE(image_dxgi->Initialize(base::win::ScopedHandle(handle)));
-
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0, 0,
- ui::ProtectedVideoType::kClear);
+ scoped_refptr<gl::GLImageDXGI> image_dxgi(
+ new gl::GLImageDXGI(texture_size, nullptr));
+ ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
+ gfx::BufferFormat::RGBA_8888));
+
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(texture_size);
+ params.quad_rect = gfx::Rect(window_size);
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
@@ -878,17 +952,16 @@ TEST_F(DirectCompositionPixelTest, SkipVideoLayerEmptyBoundsRect) {
resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
&handle);
// The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGIHandle> image_dxgi(
- new gl::GLImageDXGIHandle(texture_size, 0, gfx::BufferFormat::RGBA_8888));
- ASSERT_TRUE(image_dxgi->Initialize(base::win::ScopedHandle(handle)));
+ scoped_refptr<gl::GLImageDXGI> image_dxgi(
+ new gl::GLImageDXGI(texture_size, nullptr));
+ ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
+ gfx::BufferFormat::RGBA_8888));
// Layer with empty bounds rect.
- gfx::Rect bounds_rect;
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), bounds_rect, 0, 0, 1.0, 0,
- ui::ProtectedVideoType::kClear);
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(texture_size);
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
@@ -935,16 +1008,16 @@ TEST_F(DirectCompositionPixelTest, SkipVideoLayerEmptyContentsRect) {
resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
&handle);
// The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGIHandle> image_dxgi(
- new gl::GLImageDXGIHandle(texture_size, 0, gfx::BufferFormat::RGBA_8888));
- ASSERT_TRUE(image_dxgi->Initialize(base::win::ScopedHandle(handle)));
+ scoped_refptr<gl::GLImageDXGI> image_dxgi(
+ new gl::GLImageDXGI(texture_size, nullptr));
+ ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
+ gfx::BufferFormat::RGBA_8888));
// Layer with empty content rect.
- gfx::RectF contents_rect;
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi}, contents_rect,
- gfx::Rect(window_size), 0, 0, 1.0, 0, ui::ProtectedVideoType::kClear);
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.quad_rect = gfx::Rect(window_size);
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
@@ -990,17 +1063,18 @@ TEST_F(DirectCompositionPixelTest, NV12SwapChain) {
resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
&handle);
// The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGIHandle> image_dxgi(
- new gl::GLImageDXGIHandle(texture_size, 0, gfx::BufferFormat::RGBA_8888));
- ASSERT_TRUE(image_dxgi->Initialize(base::win::ScopedHandle(handle)));
+ scoped_refptr<gl::GLImageDXGI> image_dxgi(
+ new gl::GLImageDXGI(texture_size, nullptr));
+ ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
+ gfx::BufferFormat::RGBA_8888));
// Pass content rect with odd with and height. Surface should round up width
// and height when creating swap chain.
- gfx::RectF contents_rect(0, 0, 49, 49);
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi}, contents_rect,
- gfx::Rect(window_size), 0, 0, 1.0, 0, ui::ProtectedVideoType::kClear);
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(0, 0, 49, 49);
+ params.quad_rect = gfx::Rect(window_size);
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
@@ -1026,87 +1100,75 @@ TEST_F(DirectCompositionPixelTest, NV12SwapChain) {
<< actual_color;
}
-// Test protected video flags
-TEST(DirectCompositionSurfaceTest, ProtectedVideos) {
+TEST_F(DirectCompositionPixelTest, NonZeroBoundsOffset) {
if (!CheckIfDCSupported())
return;
+ InitializeSurface();
+ // Swap chain size is overridden to content rect size only if scaled overlays
+ // are supported.
+ DirectCompositionSurfaceWin::SetScaledOverlaysSupportedForTesting(true);
+ surface_->SetEnableDCLayers(true);
- TestImageTransportSurfaceDelegate delegate;
- scoped_refptr<DirectCompositionSurfaceWin> surface(
- new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
- ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize());
-
- scoped_refptr<gl::GLContext> context =
- gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(context->MakeCurrent(surface.get()));
+ gfx::Size window_size(100, 100);
+ EXPECT_TRUE(surface_->Resize(window_size, 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
+ EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(window_size)));
- surface->SetEnableDCLayers(true);
+ glClearColor(0.0, 0.0, 0.0, 1.0);
+ glClear(GL_COLOR_BUFFER_BIT);
Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
gl::QueryD3D11DeviceObjectFromANGLE();
- gfx::Size texture_size(1280, 720);
+ gfx::Size texture_size(50, 50);
Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, false);
-
+ CreateNV12Texture(d3d11_device, texture_size, true);
+ Microsoft::WRL::ComPtr<IDXGIResource1> resource;
+ texture.CopyTo(resource.GetAddressOf());
+ HANDLE handle = 0;
+ resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
+ &handle);
+ // The format doesn't matter, since we aren't binding.
scoped_refptr<gl::GLImageDXGI> image_dxgi(
new gl::GLImageDXGI(texture_size, nullptr));
- image_dxgi->SetTexture(texture, 0);
- image_dxgi->SetColorSpace(gfx::ColorSpace::CreateREC709());
- gfx::Size window_size(640, 360);
-
- // Clear video
- {
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0,
- 0, ui::ProtectedVideoType::kClear);
-
- surface->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface->SwapBuffers(base::DoNothing()));
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
-
- DXGI_SWAP_CHAIN_DESC Desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc(&Desc)));
- unsigned display_only_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
- unsigned hw_protected_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED;
- EXPECT_EQ(display_only_flag, (unsigned)0);
- EXPECT_EQ(hw_protected_flag, (unsigned)0);
- }
+ ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
+ gfx::BufferFormat::RGBA_8888));
+
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(texture_size);
+ params.quad_rect = gfx::Rect(gfx::Point(25, 25), texture_size);
+ surface_->ScheduleDCLayer(params);
- // Software protected video
- {
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0,
- 0, ui::ProtectedVideoType::kSoftwareProtected);
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
+ surface_->SwapBuffers(base::DoNothing()));
- surface->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface->SwapBuffers(base::DoNothing()));
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
+ Sleep(1000);
- DXGI_SWAP_CHAIN_DESC Desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc(&Desc)));
- unsigned display_only_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
- unsigned hw_protected_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED;
- EXPECT_EQ(display_only_flag, (unsigned)DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY);
- EXPECT_EQ(hw_protected_flag, (unsigned)0);
+ SkColor video_color = SkColorSetRGB(0xe1, 0x90, 0xeb);
+ struct {
+ gfx::Point point;
+ SkColor expected_color;
+ } test_cases[] = {
+ // Outside bounds
+ {{24, 24}, SK_ColorBLACK},
+ {{75, 75}, SK_ColorBLACK},
+ // Inside bounds
+ {{25, 25}, video_color},
+ {{74, 74}, video_color},
+ };
+
+ auto pixels = ReadBackWindow(window_.hwnd(), window_size);
+
+ for (const auto& test_case : test_cases) {
+ const auto& point = test_case.point;
+ const auto& expected_color = test_case.expected_color;
+ SkColor actual_color = pixels[window_size.width() * point.y() + point.x()];
+ EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
+ << std::hex << "Expected " << expected_color << " Actual "
+ << actual_color << " at " << point.ToString();
}
-
- // TODO(magchen): Add a hardware protected video test when hardware procted
- // video support is enabled by defaut in the Intel driver and Chrome
-
- context = nullptr;
- DestroySurface(std::move(surface));
}
TEST_F(DirectCompositionPixelTest, ResizeVideoLayer) {
@@ -1135,53 +1197,50 @@ TEST_F(DirectCompositionPixelTest, ResizeVideoLayer) {
resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
&handle);
// The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGIHandle> image_dxgi(
- new gl::GLImageDXGIHandle(texture_size, 0, gfx::BufferFormat::RGBA_8888));
- ASSERT_TRUE(image_dxgi->Initialize(base::win::ScopedHandle(handle)));
+ scoped_refptr<gl::GLImageDXGI> image_dxgi(
+ new gl::GLImageDXGI(texture_size, nullptr));
+ ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
+ gfx::BufferFormat::RGBA_8888));
{
- gfx::RectF contents_rect = gfx::RectF(gfx::Rect(texture_size));
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi}, contents_rect,
- gfx::Rect(window_size), 0, 0, 1.0, 0, ui::ProtectedVideoType::kClear);
-
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(texture_size);
+ params.quad_rect = gfx::Rect(window_size);
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
surface_->SwapBuffers(base::DoNothing()));
+ }
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface_->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
+ Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
+ surface_->GetLayerSwapChainForTesting(0);
+ ASSERT_TRUE(swap_chain);
- DXGI_SWAP_CHAIN_DESC1 desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc1(&desc)));
- EXPECT_EQ(desc.Width, 50u);
- EXPECT_EQ(desc.Height, 50u);
- }
+ DXGI_SWAP_CHAIN_DESC1 desc;
+ EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc1(&desc)));
+ EXPECT_EQ(desc.Width, 50u);
+ EXPECT_EQ(desc.Height, 50u);
{
- gfx::RectF contents_rect(30, 30);
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi}, contents_rect,
- gfx::Rect(window_size), 0, 0, 1.0, 0, ui::ProtectedVideoType::kClear);
-
+ ui::DCRendererLayerParams params;
+ params.y_image = image_dxgi;
+ params.uv_image = image_dxgi;
+ params.content_rect = gfx::Rect(30, 30);
+ params.quad_rect = gfx::Rect(window_size);
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
surface_->SwapBuffers(base::DoNothing()));
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface_->GetLayerSwapChainForTesting(0).Get();
- ASSERT_TRUE(swap_chain);
-
- DXGI_SWAP_CHAIN_DESC1 desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc1(&desc)));
- EXPECT_EQ(desc.Width, 30u);
- EXPECT_EQ(desc.Height, 30u);
}
+
+ // Swap chain isn't recreated on resize.
+ ASSERT_TRUE(surface_->GetLayerSwapChainForTesting(0));
+ EXPECT_EQ(swap_chain.Get(), surface_->GetLayerSwapChainForTesting(0).Get());
+ EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc1(&desc)));
+ EXPECT_EQ(desc.Width, 30u);
+ EXPECT_EQ(desc.Height, 30u);
}
} // namespace
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
index 24043527235..d1f37958d4d 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
@@ -19,7 +19,6 @@
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
-#include "components/viz/common/features.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/common/mailbox.h"
@@ -125,19 +124,6 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
// only a single context. See crbug.com/510243 for details.
use_virtualized_gl_context_ |= manager->mailbox_manager()->UsesSync();
- const auto& gpu_feature_info = manager->gpu_feature_info();
- const bool use_oop_rasterization =
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] ==
- gpu::kGpuFeatureStatusEnabled;
-
- // With OOP-R, SkiaRenderer and Skia DDL, we will only have one GLContext
- // and share it with RasterDecoders and DisplayCompositor. So it is not
- // necessary to use virtualized gl context anymore.
- // TODO(penghuang): Make virtualized gl context work with SkiaRenderer + DDL +
- // OOPR. https://crbug.com/838899
- if (features::IsUsingSkiaDeferredDisplayList() && use_oop_rasterization)
- use_virtualized_gl_context_ = false;
-
bool offscreen = (surface_handle_ == kNullSurfaceHandle);
gl::GLSurface* default_surface = manager->default_offscreen_surface();
// On low-spec Android devices, the default offscreen surface is
@@ -244,6 +230,9 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
}
if (context_group_->use_passthrough_cmd_decoder()) {
+ // Virtualized contexts don't work with passthrough command decoder.
+ // See https://crbug.com/914976
+ use_virtualized_gl_context_ = false;
// When using the passthrough command decoder, only share with other
// contexts in the explicitly requested share group
if (share_command_buffer_stub) {
@@ -414,11 +403,8 @@ void GLES2CommandBufferStub::BufferPresented(
const gfx::PresentationFeedback& feedback) {
SwapBufferParams params = pending_presented_params_.front();
pending_presented_params_.pop_front();
-
- if (ShouldSendBufferPresented(params.flags, feedback.flags)) {
- Send(new GpuCommandBufferMsg_BufferPresented(route_id_, params.swap_id,
- feedback));
- }
+ Send(new GpuCommandBufferMsg_BufferPresented(route_id_, params.swap_id,
+ feedback));
}
void GLES2CommandBufferStub::AddFilter(IPC::MessageFilter* message_filter) {
@@ -441,6 +427,8 @@ void GLES2CommandBufferStub::OnTakeFrontBuffer(const Mailbox& mailbox) {
void GLES2CommandBufferStub::OnReturnFrontBuffer(const Mailbox& mailbox,
bool is_lost) {
+ // No need to pull texture updates.
+ DCHECK(!context_group_->mailbox_manager()->UsesSync());
gles2_decoder_->ReturnFrontBuffer(mailbox, is_lost);
}
diff --git a/chromium/gpu/ipc/service/gpu_channel.cc b/chromium/gpu/ipc/service/gpu_channel.cc
index abba347c15a..c966dfe9f20 100644
--- a/chromium/gpu/ipc/service/gpu_channel.cc
+++ b/chromium/gpu/ipc/service/gpu_channel.cc
@@ -81,6 +81,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelMessageFilter
GpuChannelMessageFilter(
GpuChannel* gpu_channel,
Scheduler* scheduler,
+ ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner);
// Methods called on main thread.
@@ -105,6 +106,10 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelMessageFilter
private:
~GpuChannelMessageFilter() override;
+ SequenceId GetSequenceId(int32_t route_id) const;
+
+ bool HandleFlushMessage(const IPC::Message& message);
+
bool MessageErrorHandler(const IPC::Message& message, const char* error_msg);
IPC::Channel* ipc_channel_ = nullptr;
@@ -128,12 +133,14 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelMessageFilter
GpuChannelMessageFilter::GpuChannelMessageFilter(
GpuChannel* gpu_channel,
Scheduler* scheduler,
+ ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner)
: gpu_channel_(gpu_channel),
scheduler_(scheduler),
main_task_runner_(std::move(main_task_runner)),
image_decode_accelerator_stub_(
base::MakeRefCounted<ImageDecodeAcceleratorStub>(
+ image_decode_accelerator_worker,
gpu_channel,
static_cast<int32_t>(
GpuChannelReservedRoutes::kImageDecodeAccelerator))) {
@@ -226,6 +233,17 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
if (message.should_unblock() || message.is_reply())
return MessageErrorHandler(message, "Unexpected message type");
+ switch (message.type()) {
+ case GpuCommandBufferMsg_AsyncFlush::ID:
+ case GpuCommandBufferMsg_DestroyTransferBuffer::ID:
+ case GpuCommandBufferMsg_ReturnFrontBuffer::ID:
+ case GpuChannelMsg_CreateSharedImage::ID:
+ case GpuChannelMsg_DestroySharedImage::ID:
+ return MessageErrorHandler(message, "Invalid message");
+ default:
+ break;
+ }
+
if (message.type() == GpuChannelMsg_Nop::ID) {
IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
ipc_channel_->Send(reply);
@@ -241,68 +259,77 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
if (!gpu_channel_)
return MessageErrorHandler(message, "Channel destroyed");
- switch (message.type()) {
- case GpuCommandBufferMsg_AsyncFlush::ID:
- case GpuCommandBufferMsg_DestroyTransferBuffer::ID:
- case GpuChannelMsg_CreateSharedImage::ID:
- case GpuChannelMsg_DestroySharedImage::ID:
- return MessageErrorHandler(message, "Invalid message");
- default:
- break;
+ // Handle flush first so that it doesn't get handled out of order.
+ if (message.type() == GpuChannelMsg_FlushDeferredMessages::ID)
+ return HandleFlushMessage(message);
+
+ if (message.routing_id() ==
+ static_cast<int32_t>(GpuChannelReservedRoutes::kImageDecodeAccelerator)) {
+ if (!image_decode_accelerator_stub_->OnMessageReceived(message))
+ return MessageErrorHandler(message, "Invalid image decode request");
}
- if (message.type() == GpuChannelMsg_FlushDeferredMessages::ID) {
- GpuChannelMsg_FlushDeferredMessages::Param params;
+ bool handle_out_of_order =
+ message.routing_id() == MSG_ROUTING_CONTROL ||
+ message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
+ message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID;
- if (!GpuChannelMsg_FlushDeferredMessages::Read(&message, &params))
- return MessageErrorHandler(message, "Invalid flush message");
+ if (handle_out_of_order) {
+ // It's OK to post task that may never run even for sync messages, because
+ // if the channel is destroyed, the client Send will fail.
+ main_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&GpuChannel::HandleOutOfOrderMessage,
+ gpu_channel_->AsWeakPtr(), message));
+ return true;
+ }
- std::vector<GpuDeferredMessage> deferred_messages =
- std::get<0>(std::move(params));
- std::vector<Scheduler::Task> tasks;
- tasks.reserve(deferred_messages.size());
+ // Messages which do not have sync token dependencies.
+ SequenceId sequence_id = GetSequenceId(message.routing_id());
+ if (sequence_id.is_null())
+ return MessageErrorHandler(message, "Invalid route id");
- for (auto& deferred_message : deferred_messages) {
- auto it = route_sequences_.find(deferred_message.message.routing_id());
- if (it == route_sequences_.end()) {
- DLOG(ERROR) << "Invalid route id in flush list";
- continue;
- }
+ scheduler_->ScheduleTask(
+ Scheduler::Task(sequence_id,
+ base::BindOnce(&GpuChannel::HandleMessage,
+ gpu_channel_->AsWeakPtr(), message),
+ std::vector<SyncToken>()));
+ return true;
+}
- tasks.emplace_back(
- it->second /* sequence_id */,
- base::BindOnce(&GpuChannel::HandleMessage, gpu_channel_->AsWeakPtr(),
- std::move(deferred_message.message)),
- std::move(deferred_message.sync_token_fences));
- }
+SequenceId GpuChannelMessageFilter::GetSequenceId(int32_t route_id) const {
+ gpu_channel_lock_.AssertAcquired();
+ auto it = route_sequences_.find(route_id);
+ if (it == route_sequences_.end())
+ return SequenceId();
+ return it->second;
+}
- scheduler_->ScheduleTasks(std::move(tasks));
- } else if (message.routing_id() ==
- static_cast<int32_t>(
- GpuChannelReservedRoutes::kImageDecodeAccelerator)) {
- if (!image_decode_accelerator_stub_->OnMessageReceived(message))
- return MessageErrorHandler(message, "Invalid image decode request");
- } else if (message.routing_id() == MSG_ROUTING_CONTROL ||
- message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
- message.type() ==
- GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
- // It's OK to post task that may never run even for sync messages, because
- // if the channel is destroyed, the client Send will fail.
- main_task_runner_->PostTask(FROM_HERE,
- base::Bind(&GpuChannel::HandleOutOfOrderMessage,
- gpu_channel_->AsWeakPtr(), message));
- } else {
- auto it = route_sequences_.find(message.routing_id());
- if (it == route_sequences_.end())
- return MessageErrorHandler(message, "Invalid route id");
-
- scheduler_->ScheduleTask(
- Scheduler::Task(it->second /* sequence_id */,
- base::BindOnce(&GpuChannel::HandleMessage,
- gpu_channel_->AsWeakPtr(), message),
- std::vector<SyncToken>()));
- }
+bool GpuChannelMessageFilter::HandleFlushMessage(const IPC::Message& message) {
+ DCHECK_EQ(message.type(), GpuChannelMsg_FlushDeferredMessages::ID);
+ gpu_channel_lock_.AssertAcquired();
+
+ GpuChannelMsg_FlushDeferredMessages::Param params;
+ if (!GpuChannelMsg_FlushDeferredMessages::Read(&message, &params))
+ return MessageErrorHandler(message, "Invalid flush message");
+
+ std::vector<GpuDeferredMessage> deferred_messages =
+ std::get<0>(std::move(params));
+ std::vector<Scheduler::Task> tasks;
+ tasks.reserve(deferred_messages.size());
+ for (auto& deferred_message : deferred_messages) {
+ auto it = route_sequences_.find(deferred_message.message.routing_id());
+ if (it == route_sequences_.end()) {
+ DLOG(ERROR) << "Invalid route id in flush list";
+ continue;
+ }
+ tasks.emplace_back(
+ it->second /* sequence_id */,
+ base::BindOnce(&GpuChannel::HandleMessage, gpu_channel_->AsWeakPtr(),
+ std::move(deferred_message.message)),
+ std::move(deferred_message.sync_token_fences));
+ }
+ scheduler_->ScheduleTasks(std::move(tasks));
return true;
}
@@ -326,7 +353,8 @@ GpuChannel::GpuChannel(
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
int32_t client_id,
uint64_t client_tracing_id,
- bool is_gpu_host)
+ bool is_gpu_host,
+ ImageDecodeAcceleratorWorker* image_decode_accelerator_worker)
: gpu_channel_manager_(gpu_channel_manager),
scheduler_(scheduler),
sync_point_manager_(sync_point_manager),
@@ -340,7 +368,8 @@ GpuChannel::GpuChannel(
weak_factory_(this) {
DCHECK(gpu_channel_manager_);
DCHECK(client_id_);
- filter_ = new GpuChannelMessageFilter(this, scheduler, task_runner);
+ filter_ = new GpuChannelMessageFilter(
+ this, scheduler, image_decode_accelerator_worker, task_runner);
// SharedImageInterfaceProxy/Stub is a singleton per channel, using a reserved
// route.
const int32_t shared_image_route_id =
@@ -454,10 +483,6 @@ bool GpuChannel::HasActiveWebGLContext() const {
return false;
}
-void GpuChannel::LoseAllContexts() {
- gpu_channel_manager_->LoseAllContexts();
-}
-
void GpuChannel::MarkAllContextsLost() {
for (auto& kv : stubs_)
kv.second->MarkContextLost();
@@ -503,8 +528,7 @@ void GpuChannel::HandleMessage(const IPC::Message& msg) {
// If we get descheduled or yield while processing a message.
if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) {
- DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() ||
- (uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type());
+ DCHECK_EQ(GpuCommandBufferMsg_AsyncFlush::ID, msg.type());
scheduler_->ContinueTask(
stub->sequence_id(),
base::BindOnce(&GpuChannel::HandleMessage, AsWeakPtr(), msg));
@@ -572,7 +596,7 @@ void GpuChannel::OnCreateCommandBuffer(
return;
}
- if (gpu_channel_manager_->is_exiting_for_lost_context()) {
+ if (gpu_channel_manager_->delegate()->IsExiting()) {
LOG(ERROR) << "ContextResult::kTransientFailure: trying to create command "
"buffer during process shutdown.";
*result = gpu::ContextResult::kTransientFailure;
@@ -623,6 +647,9 @@ void GpuChannel::OnCreateCommandBuffer(
bool use_passthrough_cmd_decoder =
gpu_channel_manager_->gpu_preferences().use_passthrough_cmd_decoder &&
gles2::PassthroughCommandDecoderSupported();
+ bool allow_raster_decoder =
+ !use_passthrough_cmd_decoder ||
+ gpu_channel_manager_->gpu_preferences().enable_passthrough_raster_decoder;
if (init_params.attribs.context_type == CONTEXT_TYPE_WEBGPU) {
if (!gpu_channel_manager_->gpu_preferences().enable_webgpu) {
@@ -632,7 +659,7 @@ void GpuChannel::OnCreateCommandBuffer(
stub = std::make_unique<WebGPUCommandBufferStub>(
this, init_params, command_buffer_id, sequence_id, stream_id, route_id);
- } else if (!use_passthrough_cmd_decoder &&
+ } else if (allow_raster_decoder &&
init_params.attribs.enable_raster_interface &&
!init_params.attribs.enable_gles2_interface) {
stub = std::make_unique<RasterCommandBufferStub>(
@@ -698,14 +725,14 @@ void GpuChannel::CacheShader(const std::string& key,
void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, filter_,
- base::RetainedRef(filter)));
+ FROM_HERE, base::BindOnce(&GpuChannelMessageFilter::AddChannelFilter,
+ filter_, base::RetainedRef(filter)));
}
void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
io_task_runner_->PostTask(
- FROM_HERE, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter,
- filter_, base::RetainedRef(filter)));
+ FROM_HERE, base::BindOnce(&GpuChannelMessageFilter::RemoveChannelFilter,
+ filter_, base::RetainedRef(filter)));
}
uint64_t GpuChannel::GetMemoryUsage() const {
diff --git a/chromium/gpu/ipc/service/gpu_channel.h b/chromium/gpu/ipc/service/gpu_channel.h
index aa4dc9b188a..2937600df96 100644
--- a/chromium/gpu/ipc/service/gpu_channel.h
+++ b/chromium/gpu/ipc/service/gpu_channel.h
@@ -44,6 +44,7 @@ namespace gpu {
class GpuChannelManager;
class GpuChannelMessageFilter;
+class ImageDecodeAcceleratorWorker;
class Scheduler;
class SharedImageStub;
class SyncPointManager;
@@ -62,7 +63,8 @@ class GPU_IPC_SERVICE_EXPORT GpuChannel : public IPC::Listener,
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
int32_t client_id,
uint64_t client_tracing_id,
- bool is_gpu_host);
+ bool is_gpu_host,
+ ImageDecodeAcceleratorWorker* image_decode_accelerator_worker);
~GpuChannel() override;
// Init() sets up the underlying IPC channel. Use a separate method because
@@ -121,7 +123,6 @@ class GPU_IPC_SERVICE_EXPORT GpuChannel : public IPC::Listener,
CommandBufferStub* LookupCommandBuffer(int32_t route_id);
bool HasActiveWebGLContext() const;
- void LoseAllContexts();
void MarkAllContextsLost();
// Called to add a listener for a particular message routing ID.
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index da7d7a571c6..ebf60ad5aad 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -64,6 +64,7 @@ GpuChannelManager::GpuChannelManager(
const GpuFeatureInfo& gpu_feature_info,
GpuProcessActivityFlags activity_flags,
scoped_refptr<gl::GLSurface> default_offscreen_surface,
+ ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
viz::VulkanContextProvider* vulkan_context_provider)
: task_runner_(task_runner),
io_task_runner_(io_task_runner),
@@ -80,24 +81,25 @@ GpuChannelManager::GpuChannelManager(
default_offscreen_surface_(std::move(default_offscreen_surface)),
gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
gpu_feature_info_(gpu_feature_info),
- exiting_for_lost_context_(false),
+ image_decode_accelerator_worker_(image_decode_accelerator_worker),
activity_flags_(std::move(activity_flags)),
memory_pressure_listener_(
- base::Bind(&GpuChannelManager::HandleMemoryPressure,
- base::Unretained(this))),
+ base::BindRepeating(&GpuChannelManager::HandleMemoryPressure,
+ base::Unretained(this))),
vulkan_context_provider_(vulkan_context_provider),
weak_factory_(this) {
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(io_task_runner);
DCHECK(scheduler);
- const bool enable_raster_transport =
- gpu_feature_info_.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] ==
- gpu::kGpuFeatureStatusEnabled;
+ const bool enable_gr_shader_cache =
+ (gpu_feature_info_.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] ==
+ gpu::kGpuFeatureStatusEnabled) ||
+ features::IsUsingSkiaRenderer();
const bool disable_disk_cache =
gpu_preferences_.disable_gpu_shader_disk_cache ||
gpu_driver_bug_workarounds_.disable_program_disk_cache;
- if (enable_raster_transport && !disable_disk_cache)
+ if (enable_gr_shader_cache && !disable_disk_cache)
gr_shader_cache_.emplace(gpu_preferences.gpu_program_cache_size, this);
}
@@ -157,7 +159,8 @@ GpuChannel* GpuChannelManager::EstablishChannel(int client_id,
std::unique_ptr<GpuChannel> gpu_channel = std::make_unique<GpuChannel>(
this, scheduler_, sync_point_manager_, share_group_, task_runner_,
- io_task_runner_, client_id, client_tracing_id, is_gpu_host);
+ io_task_runner_, client_id, client_tracing_id, is_gpu_host,
+ image_decode_accelerator_worker_);
GpuChannel* gpu_channel_ptr = gpu_channel.get();
gpu_channels_[client_id] = std::move(gpu_channel);
@@ -167,15 +170,6 @@ GpuChannel* GpuChannelManager::EstablishChannel(int client_id,
void GpuChannelManager::InternalDestroyGpuMemoryBuffer(
gfx::GpuMemoryBufferId id,
int client_id) {
- io_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&GpuChannelManager::InternalDestroyGpuMemoryBufferOnIO,
- base::Unretained(this), id, client_id));
-}
-
-void GpuChannelManager::InternalDestroyGpuMemoryBufferOnIO(
- gfx::GpuMemoryBufferId id,
- int client_id) {
gpu_memory_buffer_factory_->DestroyGpuMemoryBuffer(id, client_id);
}
@@ -184,8 +178,8 @@ void GpuChannelManager::DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
const SyncToken& sync_token) {
if (!sync_point_manager_->WaitOutOfOrder(
sync_token,
- base::Bind(&GpuChannelManager::InternalDestroyGpuMemoryBuffer,
- base::Unretained(this), id, client_id))) {
+ base::BindOnce(&GpuChannelManager::InternalDestroyGpuMemoryBuffer,
+ base::Unretained(this), id, client_id))) {
// No sync token or invalid sync token, destroy immediately.
InternalDestroyGpuMemoryBuffer(id, client_id);
}
@@ -209,17 +203,8 @@ void GpuChannelManager::LoseAllContexts() {
kv.second->MarkAllContextsLost();
}
task_runner_->PostTask(FROM_HERE,
- base::Bind(&GpuChannelManager::DestroyAllChannels,
- weak_factory_.GetWeakPtr()));
-}
-
-void GpuChannelManager::MaybeExitOnContextLost() {
- if (!gpu_preferences().single_process && !gpu_preferences().in_process_gpu) {
- LOG(ERROR) << "Exiting GPU process because some drivers cannot recover"
- << " from problems.";
- exiting_for_lost_context_ = true;
- delegate_->ExitProcess();
- }
+ base::BindOnce(&GpuChannelManager::DestroyAllChannels,
+ weak_factory_.GetWeakPtr()));
}
void GpuChannelManager::DestroyAllChannels() {
@@ -275,8 +260,9 @@ void GpuChannelManager::ScheduleWakeUpGpu() {
DoWakeUpGpu();
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, base::Bind(&GpuChannelManager::ScheduleWakeUpGpu,
- weak_factory_.GetWeakPtr()),
+ FROM_HERE,
+ base::BindOnce(&GpuChannelManager::ScheduleWakeUpGpu,
+ weak_factory_.GetWeakPtr()),
base::TimeDelta::FromMilliseconds(kMaxGpuIdleTimeMs));
}
@@ -314,10 +300,10 @@ void GpuChannelManager::OnBackgroundCleanup() {
if (program_cache_)
program_cache_->Trim(0u);
- if (raster_decoder_context_state_) {
+ if (shared_context_state_) {
gr_cache_controller_.reset();
- raster_decoder_context_state_->context_lost = true;
- raster_decoder_context_state_.reset();
+ shared_context_state_->MarkContextLost();
+ shared_context_state_.reset();
}
SkGraphics::PurgeAllCaches();
@@ -325,8 +311,8 @@ void GpuChannelManager::OnBackgroundCleanup() {
#endif
void GpuChannelManager::OnApplicationBackgrounded() {
- if (raster_decoder_context_state_) {
- raster_decoder_context_state_->PurgeMemory(
+ if (shared_context_state_) {
+ shared_context_state_->PurgeMemory(
base::MemoryPressureListener::MemoryPressureLevel::
MEMORY_PRESSURE_LEVEL_CRITICAL);
}
@@ -341,18 +327,17 @@ void GpuChannelManager::HandleMemoryPressure(
program_cache_->HandleMemoryPressure(memory_pressure_level);
discardable_manager_.HandleMemoryPressure(memory_pressure_level);
passthrough_discardable_manager_.HandleMemoryPressure(memory_pressure_level);
- if (raster_decoder_context_state_)
- raster_decoder_context_state_->PurgeMemory(memory_pressure_level);
+ if (shared_context_state_)
+ shared_context_state_->PurgeMemory(memory_pressure_level);
if (gr_shader_cache_)
gr_shader_cache_->PurgeMemory(memory_pressure_level);
}
-scoped_refptr<raster::RasterDecoderContextState>
-GpuChannelManager::GetRasterDecoderContextState(ContextResult* result) {
- if (raster_decoder_context_state_ &&
- !raster_decoder_context_state_->context_lost) {
+scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
+ ContextResult* result) {
+ if (shared_context_state_ && !shared_context_state_->context_lost()) {
*result = ContextResult::kSuccess;
- return raster_decoder_context_state_;
+ return shared_context_state_;
}
scoped_refptr<gl::GLSurface> surface = default_offscreen_surface();
@@ -369,18 +354,6 @@ GpuChannelManager::GetRasterDecoderContextState(ContextResult* result) {
// only a single context. See crbug.com/510243 for details.
use_virtualized_gl_contexts |= mailbox_manager_->UsesSync();
- const bool use_oop_rasterization =
- gpu_feature_info_.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] ==
- gpu::kGpuFeatureStatusEnabled;
-
- // With OOP-R, SkiaRenderer and Skia DDL, we will only have one GLContext
- // and share it with RasterDecoders and DisplayCompositor. So it is not
- // necessary to use virtualized gl context anymore.
- // TODO(penghuang): Make virtualized gl context work with SkiaRenderer + DDL +
- // OOPR. https://crbug.com/838899
- if (features::IsUsingSkiaDeferredDisplayList() && use_oop_rasterization)
- use_virtualized_gl_contexts = false;
-
const bool use_passthrough_decoder =
gles2::PassthroughCommandDecoderSupported() &&
gpu_preferences_.use_passthrough_cmd_decoder;
@@ -432,24 +405,56 @@ GpuChannelManager::GetRasterDecoderContextState(ContextResult* result) {
}
// TODO(penghuang): https://crbug.com/899735 Handle device lost for Vulkan.
- raster_decoder_context_state_ = new raster::RasterDecoderContextState(
+ shared_context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), std::move(surface), std::move(context),
- use_virtualized_gl_contexts, vulkan_context_provider_);
+ use_virtualized_gl_contexts,
+ base::BindOnce(&GpuChannelManager::OnContextLost, base::Unretained(this),
+ /*synthetic_loss=*/false),
+ vulkan_context_provider_);
- const bool enable_raster_transport =
+ // OOP-R needs GrContext for raster tiles.
+ bool need_gr_context =
gpu_feature_info_.status_values[GPU_FEATURE_TYPE_OOP_RASTERIZATION] ==
gpu::kGpuFeatureStatusEnabled;
- if (enable_raster_transport || features::IsUsingSkiaDeferredDisplayList()) {
- raster_decoder_context_state_->InitializeGrContext(
- gpu_driver_bug_workarounds_, gr_shader_cache(), &activity_flags_,
- watchdog_);
+
+ // SkiaRenderer needs GrContext to composite output surface.
+ need_gr_context |= features::IsUsingSkiaRenderer();
+
+ if (need_gr_context) {
+ if (!vulkan_context_provider_) {
+ auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
+ gpu_driver_bug_workarounds(), gpu_feature_info());
+ if (!shared_context_state_->InitializeGL(gpu_preferences_,
+ feature_info.get())) {
+ shared_context_state_ = nullptr;
+ return nullptr;
+ }
+ }
+ shared_context_state_->InitializeGrContext(gpu_driver_bug_workarounds_,
+ gr_shader_cache(),
+ &activity_flags_, watchdog_);
}
- gr_cache_controller_.emplace(raster_decoder_context_state_.get(),
- task_runner_);
+ gr_cache_controller_.emplace(shared_context_state_.get(), task_runner_);
*result = ContextResult::kSuccess;
- return raster_decoder_context_state_;
+ return shared_context_state_;
+}
+
+void GpuChannelManager::OnContextLost(bool synthetic_loss) {
+ if (synthetic_loss)
+ return;
+
+ // Work around issues with recovery by allowing a new GPU process to launch.
+ if (gpu_driver_bug_workarounds_.exit_on_context_lost)
+ delegate_->MaybeExitOnContextLost();
+
+ // Lose all other contexts.
+ if (gl::GLContext::LosesAllContextsOnContextLost() ||
+ (shared_context_state_ &&
+ shared_context_state_->use_virtualized_gl_contexts())) {
+ LoseAllContexts();
+ }
}
void GpuChannelManager::ScheduleGrContextCleanup() {
@@ -462,4 +467,10 @@ void GpuChannelManager::StoreShader(const std::string& key,
delegate_->StoreShaderToDisk(kGrShaderCacheClientId, key, shader);
}
+void GpuChannelManager::SetImageDecodeAcceleratorWorkerForTesting(
+ ImageDecodeAcceleratorWorker* worker) {
+ DCHECK(gpu_channels_.empty());
+ image_decode_accelerator_worker_ = worker;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index 24bc4fd1349..cae718b113e 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -24,9 +24,9 @@
#include "gpu/command_buffer/service/gr_cache_controller.h"
#include "gpu/command_buffer/service/gr_shader_cache.h"
#include "gpu/command_buffer/service/passthrough_discardable_manager.h"
-#include "gpu/command_buffer/service/raster_decoder_context_state.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/command_buffer/service/shader_translator_cache.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_feature_info.h"
@@ -50,6 +50,7 @@ class GpuChannel;
class GpuChannelManagerDelegate;
class GpuMemoryBufferFactory;
class GpuWatchdogThread;
+class ImageDecodeAcceleratorWorker;
class MailboxManager;
class Scheduler;
class SyncPointManager;
@@ -78,6 +79,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
const GpuFeatureInfo& gpu_feature_info,
GpuProcessActivityFlags activity_flags,
scoped_refptr<gl::GLSurface> default_offscreen_surface,
+ ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
viz::VulkanContextProvider* vulkan_context_provider = nullptr);
~GpuChannelManager() override;
@@ -103,8 +105,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
// Remove the channel for a particular renderer.
void RemoveChannel(int client_id);
- void LoseAllContexts();
- void MaybeExitOnContextLost();
+ void OnContextLost(bool synthetic_loss);
const GpuPreferences& gpu_preferences() const { return gpu_preferences_; }
const GpuDriverBugWorkarounds& gpu_driver_bug_workarounds() const {
@@ -143,8 +144,6 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
void OnApplicationBackgrounded();
- bool is_exiting_for_lost_context() { return exiting_for_lost_context_; }
-
MailboxManager* mailbox_manager() { return mailbox_manager_.get(); }
gl::GLShareGroup* share_group() const { return share_group_.get(); }
@@ -157,7 +156,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
void GetVideoMemoryUsageStats(
VideoMemoryUsageStats* video_memory_usage_stats) const;
- scoped_refptr<raster::RasterDecoderContextState> GetRasterDecoderContextState(
+ scoped_refptr<SharedContextState> GetSharedContextState(
ContextResult* result);
void ScheduleGrContextCleanup();
raster::GrShaderCache* gr_shader_cache() {
@@ -167,9 +166,12 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
// raster::GrShaderCache::Client implementation.
void StoreShader(const std::string& key, const std::string& shader) override;
+ void SetImageDecodeAcceleratorWorkerForTesting(
+ ImageDecodeAcceleratorWorker* worker);
+
+ private:
void InternalDestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id, int client_id);
- void InternalDestroyGpuMemoryBufferOnIO(gfx::GpuMemoryBufferId id,
- int client_id);
+
#if defined(OS_ANDROID)
void ScheduleWakeUpGpu();
void DoWakeUpGpu();
@@ -178,6 +180,8 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
void HandleMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level);
+ void LoseAllContexts();
+
// These objects manage channels to individual renderer processes. There is
// one channel for each renderer process that has connected to this GPU
// process.
@@ -216,8 +220,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
base::TimeTicks begin_wake_up_time_;
#endif
- // Set during intentional GPU process shutdown.
- bool exiting_for_lost_context_;
+ ImageDecodeAcceleratorWorker* image_decode_accelerator_worker_ = nullptr;
// Flags which indicate GPU process activity. Read by the browser process
// on GPU process crash.
@@ -225,20 +228,19 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
base::MemoryPressureListener memory_pressure_listener_;
- // The RasterDecoderContextState is shared across all RasterDecoders. Note
+ // The SharedContextState is shared across all RasterDecoders. Note
// that this class needs to be ref-counted to conveniently manage the lifetime
// of the shared context in the case of a context loss. While the
// GpuChannelManager strictly outlives the RasterDecoders, in the event of a
// context loss the clients need to re-create the GpuChannel and command
// buffers once notified. In this interim state we can have multiple instances
- // of the RasterDecoderContextState, for the lost and recovered clients. In
+ // of the SharedContextState, for the lost and recovered clients. In
// order to avoid having the GpuChannelManager keep the lost context state
// alive until all clients have recovered, we use a ref-counted object and
// allow the decoders to manage its lifetime.
base::Optional<raster::GrShaderCache> gr_shader_cache_;
base::Optional<raster::GrCacheController> gr_cache_controller_;
- scoped_refptr<raster::RasterDecoderContextState>
- raster_decoder_context_state_;
+ scoped_refptr<SharedContextState> shared_context_state_;
// With --enable-vulkan, the vulkan_context_provider_ will be set from
// viz::GpuServiceImpl. The raster decoders will use it for rasterization.
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
index 6f0688d33ee..a1294724805 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
@@ -12,6 +12,8 @@ class GURL;
namespace gpu {
+// TODO(kylechar): Rename this class. It's used to provide GpuServiceImpl
+// functionality to multiple classes in src/gpu/ so delegate is inaccurate.
class GpuChannelManagerDelegate {
public:
// Called on any successful context creation.
@@ -40,8 +42,14 @@ class GpuChannelManagerDelegate {
const std::string& key,
const std::string& shader) = 0;
- // Cleanly exits the GPU process in response to an unrecoverable error.
- virtual void ExitProcess() = 0;
+ // Cleanly exits the GPU process in response to an error. This will not exit
+ // with in-process GPU as that would also exit the browser. This can only be
+ // called from the GPU thread.
+ virtual void MaybeExitOnContextLost() = 0;
+
+ // Returns true if the GPU process is exiting. This can be called from any
+ // thread.
+ virtual bool IsExiting() const = 0;
#if defined(OS_WIN)
// Tells the delegate that |child_window| was created in the GPU process and
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
index 104d6ea852c..25dbb562c5b 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
@@ -47,7 +47,7 @@ class GpuChannelManagerTest : public GpuChannelTestCommon {
EXPECT_EQ(result, gpu::ContextResult::kSuccess);
auto raster_decoder_state =
- channel_manager()->GetRasterDecoderContextState(&result);
+ channel_manager()->GetSharedContextState(&result);
EXPECT_EQ(result, ContextResult::kSuccess);
ASSERT_TRUE(raster_decoder_state);
@@ -64,7 +64,7 @@ class GpuChannelManagerTest : public GpuChannelTestCommon {
}
// We should always clear the shared raster state on background cleanup.
- ASSERT_NE(channel_manager()->GetRasterDecoderContextState(&result).get(),
+ ASSERT_NE(channel_manager()->GetSharedContextState(&result).get(),
raster_decoder_state.get());
}
#endif
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.cc b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
index 09aa7233883..ab91f435031 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
@@ -36,13 +36,16 @@ class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
void StoreShaderToDisk(int32_t client_id,
const std::string& key,
const std::string& shader) override {}
- void ExitProcess() override {}
+ void MaybeExitOnContextLost() override { is_exiting_ = true; }
+ bool IsExiting() const override { return is_exiting_; }
#if defined(OS_WIN)
void SendCreatedChildWindow(SurfaceHandle parent_window,
SurfaceHandle child_window) override {}
#endif
private:
+ bool is_exiting_ = false;
+
DISALLOW_COPY_AND_ASSIGN(TestGpuChannelManagerDelegate);
};
@@ -68,7 +71,8 @@ GpuChannelTestCommon::GpuChannelTestCommon(
task_runner_.get(), io_task_runner_.get(), scheduler_.get(),
sync_point_manager_.get(), nullptr, /* gpu_memory_buffer_factory */
std::move(feature_info), GpuProcessActivityFlags(),
- gl::init::CreateOffscreenGLSurface(gfx::Size())));
+ gl::init::CreateOffscreenGLSurface(gfx::Size()),
+ nullptr /* image_decode_accelerator_worker */));
}
GpuChannelTestCommon::~GpuChannelTestCommon() {
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.h b/chromium/gpu/ipc/service/gpu_channel_test_common.h
index 8208e8a9376..cb2ccce443e 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.h
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.h
@@ -35,8 +35,11 @@ class GpuChannelTestCommon : public testing::Test {
~GpuChannelTestCommon() override;
protected:
- GpuChannelManager* channel_manager() { return channel_manager_.get(); }
- base::TestSimpleTaskRunner* task_runner() { return task_runner_.get(); }
+ GpuChannelManager* channel_manager() const { return channel_manager_.get(); }
+ base::TestSimpleTaskRunner* task_runner() const { return task_runner_.get(); }
+ base::TestSimpleTaskRunner* io_task_runner() const {
+ return io_task_runner_.get();
+ }
GpuChannel* CreateChannel(int32_t client_id, bool is_gpu_host);
diff --git a/chromium/gpu/ipc/service/gpu_channel_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
index 8a8845280da..b290851e535 100644
--- a/chromium/gpu/ipc/service/gpu_channel_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
@@ -243,7 +243,7 @@ TEST_F(GpuChannelExitForContextLostTest, CreateFailsDuringLostContextShutdown) {
ASSERT_TRUE(channel);
// Put channel manager into shutdown state.
- channel_manager()->MaybeExitOnContextLost();
+ channel_manager()->OnContextLost(false /* synthetic_loss */);
// Try to create a context.
int32_t kRouteId =
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index 90c497304c9..24e258d2967 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -23,7 +23,7 @@
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "ui/base/ui_base_features.h"
#include "ui/gfx/switches.h"
-#include "ui/gl/gl_features.h"
+#include "ui/gl/buildflags.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/gl_switches.h"
@@ -519,4 +519,8 @@ void GpuInit::AdjustInfoToSwiftShader() {
CollectContextGraphicsInfo(&gpu_info_, gpu_preferences_);
}
+scoped_refptr<gl::GLSurface> GpuInit::TakeDefaultOffscreenSurface() {
+ return std::move(default_offscreen_surface_);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_init.h b/chromium/gpu/ipc/service/gpu_init.h
index eeba59ed61d..0889c59d6e9 100644
--- a/chromium/gpu/ipc/service/gpu_init.h
+++ b/chromium/gpu/ipc/service/gpu_init.h
@@ -66,9 +66,7 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
std::unique_ptr<GpuWatchdogThread> TakeWatchdogThread() {
return std::move(watchdog_thread_);
}
- scoped_refptr<gl::GLSurface> TakeDefaultOffscreenSurface() {
- return std::move(default_offscreen_surface_);
- }
+ scoped_refptr<gl::GLSurface> TakeDefaultOffscreenSurface();
bool init_successful() const { return init_successful_; }
#if BUILDFLAG(ENABLE_VULKAN)
VulkanImplementation* vulkan_implementation() {
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc
index f373a08bda6..25a5e44d7e1 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc
@@ -106,11 +106,11 @@ GpuMemoryBufferFactoryDXGI::CreateImageForGpuMemoryBuffer(
SurfaceHandle surface_handle) {
if (handle.type != gfx::DXGI_SHARED_HANDLE)
return nullptr;
- // Transfer ownership of handle to GLImageDXGIHandle.
+ // Transfer ownership of handle to GLImageDXGI.
base::win::ScopedHandle handle_owner;
handle_owner.Set(handle.dxgi_handle.GetHandle());
- auto image = base::MakeRefCounted<gl::GLImageDXGIHandle>(size, 0, format);
- if (!image->Initialize(std::move(handle_owner)))
+ auto image = base::MakeRefCounted<gl::GLImageDXGI>(size, nullptr);
+ if (!image->InitializeHandle(std::move(handle_owner), 0, format))
return nullptr;
return image;
}
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.h
index 38c196e15e8..3ae6fa02a45 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.h
@@ -10,7 +10,6 @@
#include <D3D11.h>
#include <DXGI.h>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h
index 1ec67a3d4ad..f5e73346b39 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h
@@ -5,11 +5,11 @@
#ifndef GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_IO_SURFACE_H_
#define GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_IO_SURFACE_H_
+#include <unordered_map>
#include <utility>
#include <IOSurface/IOSurface.h>
-#include "base/containers/hash_tables.h"
#include "base/mac/scoped_cftyperef.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -63,7 +63,8 @@ class GPU_IPC_SERVICE_EXPORT GpuMemoryBufferFactoryIOSurface
private:
typedef std::pair<gfx::IOSurfaceId, int> IOSurfaceMapKey;
- typedef base::hash_map<IOSurfaceMapKey, base::ScopedCFTypeRef<IOSurfaceRef>>
+ typedef std::unordered_map<IOSurfaceMapKey,
+ base::ScopedCFTypeRef<IOSurfaceRef>>
IOSurfaceMap;
// TODO(reveman): Remove |io_surfaces_| and allow IOSurface backed GMBs to be
// used with any GPU process by passing a mach_port to CreateImageCHROMIUM.
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
index 837c62a8892..658441f11e0 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
@@ -124,13 +124,10 @@ GpuMemoryBufferFactoryNativePixmap::CreateImageForGpuMemoryBuffer(
}
}
- unsigned internalformat = gpu::InternalFormatForGpuMemoryBufferFormat(format);
- scoped_refptr<gl::GLImageNativePixmap> image(
- new gl::GLImageNativePixmap(size, internalformat));
- if (!image->Initialize(pixmap.get(), format)) {
+ auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(size, format);
+ if (!image->Initialize(pixmap.get())) {
LOG(ERROR) << "Failed to create GLImage " << size.ToString() << ", "
- << gfx::BufferFormatToString(format) << ", |internalformat|: "
- << gl::GLEnums::GetStringEnum(internalformat);
+ << gfx::BufferFormatToString(format);
return nullptr;
}
return image;
@@ -164,13 +161,10 @@ GpuMemoryBufferFactoryNativePixmap::CreateAnonymousImage(
<< gfx::BufferFormatToString(format);
return nullptr;
}
- unsigned internalformat = gpu::InternalFormatForGpuMemoryBufferFormat(format);
- scoped_refptr<gl::GLImageNativePixmap> image(
- new gl::GLImageNativePixmap(size, internalformat));
- if (!image->Initialize(pixmap.get(), format)) {
+ auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(size, format);
+ if (!image->Initialize(pixmap.get())) {
LOG(ERROR) << "Failed to create GLImage " << size.ToString() << ", "
- << gfx::BufferFormatToString(format) << ", |internalformat|: "
- << gl::GLEnums::GetStringEnum(internalformat);
+ << gfx::BufferFormatToString(format);
return nullptr;
}
*is_cleared = true;
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index 7a546b7364c..00e0b66c274 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -15,12 +15,12 @@
#include "base/files/file_util.h"
#include "base/format_macros.h"
#include "base/location.h"
-#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop_current.h"
#include "base/power_monitor/power_monitor.h"
#include "base/process/process.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -39,9 +39,12 @@ namespace {
#if defined(CYGPROFILE_INSTRUMENTATION)
const int kGpuTimeout = 30000;
-#elif defined(OS_WIN)
+#elif defined(OS_WIN) || defined(OS_MACOSX)
// Use a slightly longer timeout on Windows due to prevalence of slow and
// infected machines.
+
+// Also use a slightly longer timeout on MacOSX to get rid of GPU process
+// hangs at context creation during startup. See https://crbug.com/918490.
const int kGpuTimeout = 15000;
#else
const int kGpuTimeout = 10000;
@@ -115,9 +118,9 @@ void GpuWatchdogThread::CheckArmed() {
// Called on the monitored thread. Responds with OnAcknowledge. Cannot use
// the method factory. As we stop the task runner before destroying this
// class, the unretained reference will always outlive the task.
- task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&GpuWatchdogThread::OnAcknowledge, base::Unretained(this)));
+ task_runner()->PostTask(FROM_HERE,
+ base::BindOnce(&GpuWatchdogThread::OnAcknowledge,
+ base::Unretained(this)));
}
}
@@ -288,8 +291,9 @@ void GpuWatchdogThread::OnAcknowledge() {
// The monitored thread has responded. Post a task to check it again.
task_runner()->PostDelayedTask(
- FROM_HERE, base::Bind(&GpuWatchdogThread::OnCheck,
- weak_factory_.GetWeakPtr(), was_suspended),
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogThread::OnCheck, weak_factory_.GetWeakPtr(),
+ was_suspended),
0.5 * timeout_);
}
@@ -329,10 +333,11 @@ void GpuWatchdogThread::OnCheck(bool after_suspend) {
// Post a task to the watchdog thread to exit if the monitored thread does
// not respond in time.
- task_runner()->PostDelayedTask(FROM_HERE,
- base::Bind(&GpuWatchdogThread::OnCheckTimeout,
- weak_factory_.GetWeakPtr()),
- timeout);
+ task_runner()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogThread::OnCheckTimeout,
+ weak_factory_.GetWeakPtr()),
+ timeout);
}
void GpuWatchdogThread::OnCheckTimeout() {
@@ -358,8 +363,9 @@ void GpuWatchdogThread::OnCheckTimeout() {
// Continue with the termination after an additional delay.
task_runner()->PostDelayedTask(
FROM_HERE,
- base::Bind(&GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang,
- weak_factory_.GetWeakPtr()),
+ base::BindOnce(
+ &GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang,
+ weak_factory_.GetWeakPtr()),
0.5 * timeout_);
// Post a task that does nothing on the watched thread to bump its priority
@@ -389,8 +395,9 @@ void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
if (use_thread_cpu_time_ && (time_since_arm < timeout_)) {
task_runner()->PostDelayedTask(
FROM_HERE,
- base::Bind(&GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang,
- weak_factory_.GetWeakPtr()),
+ base::BindOnce(
+ &GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang,
+ weak_factory_.GetWeakPtr()),
timeout_ - time_since_arm);
return;
}
@@ -525,7 +532,7 @@ void GpuWatchdogThread::SetupXServer() {
void GpuWatchdogThread::SetupXChangeProp() {
DCHECK(display_);
XChangeProperty(display_, window_, atom_, XA_STRING, 8, PropModeReplace, text,
- (arraysize(text) - 1));
+ (base::size(text) - 1));
}
bool GpuWatchdogThread::MatchXEventAtom(XEvent* event) {
@@ -541,8 +548,8 @@ void GpuWatchdogThread::AddPowerObserver() {
// As we stop the task runner before destroying this class, the unretained
// reference will always outlive the task.
task_runner()->PostTask(FROM_HERE,
- base::Bind(&GpuWatchdogThread::OnAddPowerObserver,
- base::Unretained(this)));
+ base::BindOnce(&GpuWatchdogThread::OnAddPowerObserver,
+ base::Unretained(this)));
}
void GpuWatchdogThread::OnAddPowerObserver() {
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
index c1c0c9a786f..917d56edad2 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
@@ -4,25 +4,31 @@
#include "gpu/ipc/service/image_decode_accelerator_stub.h"
+#include "base/bind.h"
#include "base/feature_list.h"
+#include "base/location.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/scheduling_priority.h"
+#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/config/gpu_finch_features.h"
#include "gpu/ipc/common/command_buffer_id.h"
-#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel.h"
+#include "gpu/ipc/service/image_decode_accelerator_worker.h"
#include "ipc/ipc_message.h"
#include "ipc/ipc_message_macros.h"
namespace gpu {
-ImageDecodeAcceleratorStub::ImageDecodeAcceleratorStub(GpuChannel* channel,
- int32_t route_id)
- : channel_(channel),
+ImageDecodeAcceleratorStub::ImageDecodeAcceleratorStub(
+ ImageDecodeAcceleratorWorker* worker,
+ GpuChannel* channel,
+ int32_t route_id)
+ : worker_(worker),
+ channel_(channel),
sequence_(channel->scheduler()->CreateSequence(SchedulingPriority::kLow)),
sync_point_client_state_(
channel->sync_point_manager()->CreateSyncPointClientState(
@@ -31,7 +37,12 @@ ImageDecodeAcceleratorStub::ImageDecodeAcceleratorStub(GpuChannel* channel,
route_id),
sequence_)),
main_task_runner_(channel->task_runner()),
- io_task_runner_(channel->io_task_runner()) {}
+ io_task_runner_(channel->io_task_runner()) {
+ // We need the sequence to be initially disabled so that when we schedule a
+ // task to release the decode sync token, it doesn't run immediately (we want
+ // it to run when the decode is done).
+ channel_->scheduler()->DisableSequence(sequence_);
+}
bool ImageDecodeAcceleratorStub::OnMessageReceived(const IPC::Message& msg) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
@@ -70,8 +81,102 @@ void ImageDecodeAcceleratorStub::OnScheduleImageDecode(
// The channel is no longer available, so don't schedule a decode.
return;
}
- // TODO(andrescj): schedule the release of the decode sync token and start the
- // decode.
+
+ // Make sure the decode sync token is ordered with respect to the last decode
+ // request.
+ if (release_count <= last_release_count_) {
+ DLOG(ERROR) << "Out-of-order decode sync token";
+ OnError();
+ return;
+ }
+ last_release_count_ = release_count;
+
+ // Make sure the output dimensions are not too small.
+ if (decode_params.output_size.IsEmpty()) {
+ DLOG(ERROR) << "Output dimensions are too small";
+ OnError();
+ return;
+ }
+
+ // Start the actual decode.
+ worker_->Decode(std::move(decode_params.encoded_data),
+ decode_params.output_size,
+ base::BindOnce(&ImageDecodeAcceleratorStub::OnDecodeCompleted,
+ base::WrapRefCounted(this)));
+
+ // Schedule a task to eventually release the decode sync token. Note that this
+ // task won't run until the sequence is re-enabled when a decode completes.
+ channel_->scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_,
+ base::BindOnce(&ImageDecodeAcceleratorStub::ProcessCompletedDecode,
+ base::WrapRefCounted(this), std::move(decode_params),
+ release_count),
+ std::vector<SyncToken>()));
+}
+
+void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
+ GpuChannelMsg_ScheduleImageDecode_Params params,
+ uint64_t decode_release_count) {
+ DCHECK(main_task_runner_->BelongsToCurrentThread());
+ base::AutoLock lock(lock_);
+ if (!channel_) {
+ // The channel is no longer available, so don't do anything.
+ return;
+ }
+
+ // TODO(andrescj): create the transfer cache entry. Doing so will also upload
+ // the decoded image to a GPU texture.
+
+ sync_point_client_state_->ReleaseFenceSync(decode_release_count);
+
+ // If there are no more completed decodes to be processed, we can disable the
+ // sequence: when the next decode is completed, the sequence will be
+ // re-enabled.
+ pending_completed_decodes_.pop();
+ if (pending_completed_decodes_.empty())
+ channel_->scheduler()->DisableSequence(sequence_);
+}
+
+void ImageDecodeAcceleratorStub::OnDecodeCompleted(
+ std::vector<uint8_t> rgba_output) {
+ base::AutoLock lock(lock_);
+ if (!channel_) {
+ // The channel is no longer available, so don't do anything.
+ return;
+ }
+
+ if (!accepting_completed_decodes_) {
+ // We're still waiting for the channel to be destroyed because of an earlier
+ // failure, so don't do anything.
+ return;
+ }
+
+ if (rgba_output.empty()) {
+ DLOG(ERROR) << "The decode failed";
+ OnError();
+ return;
+ }
+
+ pending_completed_decodes_.push(std::move(rgba_output));
+
+ // We only need to enable the sequence when the number of pending completed
+ // decodes is 1. If there are more, the sequence should already be enabled.
+ if (pending_completed_decodes_.size() == 1u)
+ channel_->scheduler()->EnableSequence(sequence_);
+}
+
+void ImageDecodeAcceleratorStub::OnError() {
+ DCHECK(channel_);
+
+ // Trigger the destruction of the channel and stop processing further
+ // completed decodes, even if they're successful. We can't call
+ // GpuChannel::OnChannelError() directly because that will end up calling
+ // ImageDecodeAcceleratorStub::Shutdown() while |lock_| is still acquired. So,
+ // we post a task to the main thread instead.
+ accepting_completed_decodes_ = false;
+ channel_->task_runner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&GpuChannel::OnChannelError, channel_->AsWeakPtr()));
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
index ba767275ed1..a8b207ac802 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
@@ -5,14 +5,16 @@
#ifndef GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_STUB_H_
#define GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_STUB_H_
+#include <vector>
+
+#include "base/containers/queue.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "gpu/command_buffer/service/sequence_id.h"
-
-struct GpuChannelMsg_ScheduleImageDecode_Params;
+#include "gpu/ipc/common/gpu_messages.h"
namespace base {
class SingleThreadTaskRunner;
@@ -24,6 +26,7 @@ class Message;
namespace gpu {
class GpuChannel;
+class ImageDecodeAcceleratorWorker;
class SyncPointClientState;
// Processes incoming image decode requests from renderers: it schedules the
@@ -43,7 +46,12 @@ class SyncPointClientState;
class ImageDecodeAcceleratorStub
: public base::RefCountedThreadSafe<ImageDecodeAcceleratorStub> {
public:
- ImageDecodeAcceleratorStub(GpuChannel* channel, int32_t route_id);
+ // TODO(andrescj): right now, we only accept one worker to be used for JPEG
+ // decoding. If we want to use multiple workers, we need to ensure that sync
+ // tokens are released in order.
+ ImageDecodeAcceleratorStub(ImageDecodeAcceleratorWorker* worker,
+ GpuChannel* channel,
+ int32_t route_id);
// Processes a message from the renderer. Should be called on the IO thread.
bool OnMessageReceived(const IPC::Message& msg);
@@ -60,11 +68,34 @@ class ImageDecodeAcceleratorStub
const GpuChannelMsg_ScheduleImageDecode_Params& params,
uint64_t release_count);
+ // Creates the service-side cache entry for a completed decode and releases
+ // the decode sync token.
+ void ProcessCompletedDecode(GpuChannelMsg_ScheduleImageDecode_Params params,
+ uint64_t decode_release_count);
+
+ // The |worker_| calls this when a decode is completed. If the decode is
+ // successful (i.e., |rgba_output| is not empty), |sequence_| will be enabled
+ // so that ProcessCompletedDecode() is called. If the decode is not
+ // successful, we destroy the channel (see OnError()).
+ void OnDecodeCompleted(std::vector<uint8_t> rgba_output);
+
+ // Triggers the destruction of the channel asynchronously and makes it so that
+ // we stop accepting completed decodes. On entry, |channel_| must not be
+ // nullptr.
+ void OnError() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // The object to which the actual decoding can be delegated.
+ ImageDecodeAcceleratorWorker* worker_ = nullptr;
+
base::Lock lock_;
- GpuChannel* channel_ GUARDED_BY(lock_);
+ GpuChannel* channel_ GUARDED_BY(lock_) = nullptr;
SequenceId sequence_ GUARDED_BY(lock_);
scoped_refptr<SyncPointClientState> sync_point_client_state_
GUARDED_BY(lock_);
+ base::queue<std::vector<uint8_t>> pending_completed_decodes_
+ GUARDED_BY(lock_);
+ bool accepting_completed_decodes_ GUARDED_BY(lock_) = true;
+ uint64_t last_release_count_ GUARDED_BY(lock_) = 0;
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
new file mode 100644
index 00000000000..8b738c6fa80
--- /dev/null
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
@@ -0,0 +1,326 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/queue.h"
+#include "base/macros.h"
+#include "base/numerics/checked_math.h"
+#include "base/test/scoped_feature_list.h"
+#include "base/test/test_simple_task_runner.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/command_buffer/service/sync_point_manager.h"
+#include "gpu/config/gpu_finch_features.h"
+#include "gpu/ipc/common/command_buffer_id.h"
+#include "gpu/ipc/common/gpu_messages.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
+#include "gpu/ipc/service/gpu_channel_test_common.h"
+#include "gpu/ipc/service/image_decode_accelerator_worker.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "ui/gfx/color_space.h"
+#include "ui/gfx/geometry/size.h"
+
+using testing::InSequence;
+using testing::StrictMock;
+
+namespace gpu {
+class GpuChannel;
+
+// This mock allows individual tests to decide asynchronously when to finish a
+// decode by using the FinishOneDecode() method.
+class MockImageDecodeAcceleratorWorker : public ImageDecodeAcceleratorWorker {
+ public:
+ MockImageDecodeAcceleratorWorker() {}
+
+ void Decode(std::vector<uint8_t> encoded_data,
+ const gfx::Size& output_size,
+ base::OnceCallback<void(std::vector<uint8_t>)> decode_cb) {
+ pending_decodes_.push(PendingDecode{output_size, std::move(decode_cb)});
+ DoDecode(output_size);
+ }
+
+ void FinishOneDecode(bool success) {
+ if (pending_decodes_.empty())
+ return;
+ PendingDecode next_decode = std::move(pending_decodes_.front());
+ pending_decodes_.pop();
+ if (success) {
+ base::CheckedNumeric<size_t> rgba_bytes = 4u;
+ rgba_bytes *= next_decode.output_size.width();
+ rgba_bytes *= next_decode.output_size.height();
+ std::vector<uint8_t> rgba_output(rgba_bytes.ValueOrDie(), 0u);
+ std::move(next_decode.decode_cb).Run(std::move(rgba_output));
+ } else {
+ std::move(next_decode.decode_cb).Run(std::vector<uint8_t>());
+ }
+ }
+
+ MOCK_METHOD1(DoDecode, void(const gfx::Size&));
+
+ private:
+ struct PendingDecode {
+ gfx::Size output_size;
+ base::OnceCallback<void(std::vector<uint8_t>)> decode_cb;
+ };
+
+ base::queue<PendingDecode> pending_decodes_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockImageDecodeAcceleratorWorker);
+};
+
+const int kChannelId = 1;
+
+// Test fixture: the general strategy for testing is to have a GPU channel test
+// infrastructure (provided by GpuChannelTestCommon), ask the channel to handle
+// decode requests, and expect sync token releases and invokations to the
+// ImageDecodeAcceleratorWorker functionality.
+class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
+ public:
+ ImageDecodeAcceleratorStubTest() : GpuChannelTestCommon() {}
+ ~ImageDecodeAcceleratorStubTest() override = default;
+
+ SyncPointManager* sync_point_manager() const {
+ return channel_manager()->sync_point_manager();
+ }
+
+ void SetUp() override {
+ GpuChannelTestCommon::SetUp();
+ // TODO(andrescj): get rid of the |feature_list_| when the feature is
+ // enabled by default.
+ feature_list_.InitAndEnableFeature(
+ features::kVaapiJpegImageDecodeAcceleration);
+ channel_manager()->SetImageDecodeAcceleratorWorkerForTesting(
+ &image_decode_accelerator_worker_);
+ ASSERT_TRUE(CreateChannel(kChannelId, false /* is_gpu_host */));
+ }
+
+ void TearDown() override {
+ // Make sure the channel is destroyed before the
+ // |image_decode_accelerator_worker_| is destroyed.
+ channel_manager()->DestroyAllChannels();
+ }
+
+ SyncToken SendDecodeRequest(const gfx::Size& output_size,
+ uint64_t release_count) {
+ GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
+ if (!channel) {
+ // It's possible that the channel was destroyed as part of an earlier
+ // SendDecodeRequest() call. This would happen if
+ // ImageDecodeAcceleratorStub::OnScheduleImageDecode decides to destroy
+ // the channel.
+ return SyncToken();
+ }
+
+ SyncToken decode_sync_token(
+ CommandBufferNamespace::GPU_IO,
+ CommandBufferIdFromChannelAndRoute(
+ kChannelId, static_cast<int32_t>(
+ GpuChannelReservedRoutes::kImageDecodeAccelerator)),
+ release_count);
+ GpuChannelMsg_ScheduleImageDecode_Params decode_params;
+ decode_params.encoded_data = std::vector<uint8_t>();
+ decode_params.output_size = output_size;
+ decode_params.raster_decoder_route_id = 1;
+ decode_params.transfer_cache_entry_id = 1u;
+ decode_params.discardable_handle_shm_id = 0;
+ decode_params.discardable_handle_shm_offset = 0u;
+ decode_params.target_color_space = gfx::ColorSpace();
+ decode_params.needs_mips = false;
+
+ HandleMessage(
+ channel,
+ new GpuChannelMsg_ScheduleImageDecode(
+ static_cast<int32_t>(
+ GpuChannelReservedRoutes::kImageDecodeAccelerator),
+ std::move(decode_params), decode_sync_token.release_count()));
+ return decode_sync_token;
+ }
+
+ void RunTasksUntilIdle() {
+ while (task_runner()->HasPendingTask() ||
+ io_task_runner()->HasPendingTask()) {
+ task_runner()->RunUntilIdle();
+ io_task_runner()->RunUntilIdle();
+ }
+ }
+
+ protected:
+ StrictMock<MockImageDecodeAcceleratorWorker> image_decode_accelerator_worker_;
+
+ private:
+ base::test::ScopedFeatureList feature_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImageDecodeAcceleratorStubTest);
+};
+
+// Tests the following flow: two decode requests are sent. One of the decodes is
+// completed. This should cause one sync token to be released and the scheduler
+// sequence to be disabled. Then, the second decode is completed. This should
+// cause the other sync token to be released.
+TEST_F(ImageDecodeAcceleratorStubTest,
+ MultipleDecodesCompletedAfterSequenceIsDisabled) {
+ {
+ InSequence call_sequence;
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
+ .Times(1);
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(200, 200)))
+ .Times(1);
+ }
+ const SyncToken decode1_sync_token = SendDecodeRequest(
+ gfx::Size(100, 100) /* output_size */, 1u /* release_count */);
+ const SyncToken decode2_sync_token = SendDecodeRequest(
+ gfx::Size(200, 200) /* output_size */, 2u /* release_count */);
+
+ // A decode sync token should not be released before a decode is finished.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+
+ // Only the first decode sync token should be released after the first decode
+ // is finished.
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ RunTasksUntilIdle();
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+
+ // The second decode sync token should be released after the second decode is
+ // finished.
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ RunTasksUntilIdle();
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+
+ // The channel should still exist at the end.
+ EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId));
+}
+
+// Tests the following flow: three decode requests are sent. The first decode
+// completes which should cause the scheduler sequence to be enabled. Right
+// after that (while the sequence is still enabled), the other two decodes
+// complete. At the end, all the sync tokens should be released.
+TEST_F(ImageDecodeAcceleratorStubTest,
+ MultipleDecodesCompletedWhileSequenceIsEnabled) {
+ {
+ InSequence call_sequence;
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
+ .Times(1);
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(200, 200)))
+ .Times(1);
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(300, 300)))
+ .Times(1);
+ }
+ const SyncToken decode1_sync_token = SendDecodeRequest(
+ gfx::Size(100, 100) /* output_size */, 1u /* release_count */);
+ const SyncToken decode2_sync_token = SendDecodeRequest(
+ gfx::Size(200, 200) /* output_size */, 2u /* release_count */);
+ const SyncToken decode3_sync_token = SendDecodeRequest(
+ gfx::Size(300, 300) /* output_size */, 3u /* release_count */);
+
+ // A decode sync token should not be released before a decode is finished.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token));
+
+ // All decode sync tokens should be released after completing all the decodes.
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ RunTasksUntilIdle();
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token));
+
+ // The channel should still exist at the end.
+ EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId));
+}
+
+// Tests the following flow: three decode requests are sent. The first decode
+// fails which should trigger the destruction of the channel. The second
+// succeeds and the third one fails. Regardless, the channel should still be
+// destroyed and all sync tokens should be released.
+TEST_F(ImageDecodeAcceleratorStubTest, FailedDecodes) {
+ {
+ InSequence call_sequence;
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
+ .Times(1);
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(200, 200)))
+ .Times(1);
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(300, 300)))
+ .Times(1);
+ }
+ const SyncToken decode1_sync_token = SendDecodeRequest(
+ gfx::Size(100, 100) /* output_size */, 1u /* release_count */);
+ const SyncToken decode2_sync_token = SendDecodeRequest(
+ gfx::Size(200, 200) /* output_size */, 2u /* release_count */);
+ const SyncToken decode3_sync_token = SendDecodeRequest(
+ gfx::Size(300, 300) /* output_size */, 3u /* release_count */);
+
+ // A decode sync token should not be released before a decode is finished.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+ EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token));
+ image_decode_accelerator_worker_.FinishOneDecode(false);
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ image_decode_accelerator_worker_.FinishOneDecode(false);
+
+ // We expect the destruction of the ImageDecodeAcceleratorStub, which also
+ // implies that all decode sync tokens should be released.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token));
+}
+
+TEST_F(ImageDecodeAcceleratorStubTest, OutOfOrderSyncTokens) {
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
+ .Times(1);
+ const SyncToken decode1_sync_token = SendDecodeRequest(
+ gfx::Size(100, 100) /* output_size */, 2u /* release_count */);
+ const SyncToken decode2_sync_token = SendDecodeRequest(
+ gfx::Size(200, 200) /* output_size */, 1u /* release_count */);
+
+ // We expect the destruction of the ImageDecodeAcceleratorStub, which also
+ // implies that all decode sync tokens should be released.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token));
+}
+
+TEST_F(ImageDecodeAcceleratorStubTest, ZeroReleaseCountSyncToken) {
+ const SyncToken decode_sync_token = SendDecodeRequest(
+ gfx::Size(100, 100) /* output_size */, 0u /* release_count */);
+
+ // We expect the destruction of the ImageDecodeAcceleratorStub, which also
+ // implies that all decode sync tokens should be released.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+}
+
+TEST_F(ImageDecodeAcceleratorStubTest, ZeroWidthOutputSize) {
+ const SyncToken decode_sync_token = SendDecodeRequest(
+ gfx::Size(0, 100) /* output_size */, 1u /* release_count */);
+
+ // We expect the destruction of the ImageDecodeAcceleratorStub, which also
+ // implies that all decode sync tokens should be released.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+}
+
+TEST_F(ImageDecodeAcceleratorStubTest, ZeroHeightOutputSize) {
+ const SyncToken decode_sync_token = SendDecodeRequest(
+ gfx::Size(100, 0) /* output_size */, 1u /* release_count */);
+
+ // We expect the destruction of the ImageDecodeAcceleratorStub, which also
+ // implies that all decode sync tokens should be released.
+ RunTasksUntilIdle();
+ EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId));
+ EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token));
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_worker.h b/chromium/gpu/ipc/service/image_decode_accelerator_worker.h
new file mode 100644
index 00000000000..66efb3090b5
--- /dev/null
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_worker.h
@@ -0,0 +1,38 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_WORKER_H_
+#define GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_WORKER_H_
+
+#include <vector>
+
+#include "base/callback.h"
+
+namespace gfx {
+class Size;
+} // namespace gfx
+
+namespace gpu {
+
+// An ImageDecodeAcceleratorWorker handles the actual hardware-accelerated
+// decode of an image of a specific type (e.g., JPEG, WebP, etc.).
+class ImageDecodeAcceleratorWorker {
+ public:
+ virtual ~ImageDecodeAcceleratorWorker() {}
+
+ // Enqueue a decode of |encoded_data|. The |decode_cb| is called
+ // asynchronously when the decode completes passing as a parameter a vector
+ // containing the decoded image in RGBA format (the stride of the output is
+ // |output_size|.width() * 4). If the decode fails, |decode_cb| is called
+ // asynchronously with an empty vector. Callbacks should be called in the
+ // order that this method is called.
+ virtual void Decode(
+ std::vector<uint8_t> encoded_data,
+ const gfx::Size& output_size,
+ base::OnceCallback<void(std::vector<uint8_t>)> decode_cb) = 0;
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_SERVICE_IMAGE_DECODE_ACCELERATOR_WORKER_H_
diff --git a/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc b/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc
index f2feaed4c02..636ded4ef60 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc
@@ -5,6 +5,7 @@
#include "gpu/ipc/service/image_transport_surface.h"
#include "base/logging.h"
+#include "gpu/ipc/service/pass_through_image_transport_surface.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/gl_surface_stub.h"
#include "ui/gl/init/gl_factory.h"
@@ -21,7 +22,13 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
return new gl::GLSurfaceStub;
}
- return gl::init::CreateViewGLSurface(surface_handle);
+ scoped_refptr<gl::GLSurface> surface =
+ gl::init::CreateViewGLSurface(surface_handle);
+
+ if (!surface)
+ return surface;
+ return base::MakeRefCounted<PassThroughImageTransportSurface>(
+ delegate, surface.get(), false);
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index 9655417c802..3884df87a49 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -303,8 +303,8 @@ void ImageTransportSurfaceOverlayMac::OnGpuSwitched() {
// transport surface that is observing the GPU switch.
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
- base::Bind(
- base::DoNothing::Repeatedly<scoped_refptr<ui::IOSurfaceContext>>(),
+ base::BindOnce(
+ base::DoNothing::Once<scoped_refptr<ui::IOSurfaceContext>>(),
context_on_new_gpu));
}
diff --git a/chromium/gpu/ipc/service/image_transport_surface_win.cc b/chromium/gpu/ipc/service/image_transport_surface_win.cc
index 8c3b9e78384..5306c000412 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_win.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_win.cc
@@ -37,7 +37,7 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
if (DirectCompositionSurfaceWin::IsDirectCompositionSupported()) {
surface = base::MakeRefCounted<DirectCompositionSurfaceWin>(
std::move(vsync_provider), delegate, surface_handle);
- if (!surface->Initialize())
+ if (!surface->Initialize(gl::GLSurfaceFormat()))
return nullptr;
} else {
surface = gl::InitializeGLSurface(
diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
index 78adb5527ff..f673526d6ef 100644
--- a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
@@ -93,15 +93,35 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
return ContextResult::kFatalFailure;
}
- auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
- manager->gpu_driver_bug_workarounds(), manager->gpu_feature_info());
+ ContextResult result;
+ auto shared_context_state = manager->GetSharedContextState(&result);
+ if (!shared_context_state) {
+ LOG(ERROR) << "ContextResult::kFatalFailure: "
+ "Failed to create raster decoder state.";
+ DCHECK_NE(result, gpu::ContextResult::kSuccess);
+ return result;
+ }
+
+ if (!shared_context_state->IsGLInitialized()) {
+ if (!shared_context_state->MakeCurrent(nullptr) ||
+ !shared_context_state->InitializeGL(
+ manager->gpu_preferences(),
+ base::MakeRefCounted<gles2::FeatureInfo>(
+ manager->gpu_driver_bug_workarounds(),
+ manager->gpu_feature_info()))) {
+ LOG(ERROR) << "Failed to Initialize GL for SharedContextState";
+ return ContextResult::kFatalFailure;
+ }
+ }
+
gpu::GpuMemoryBufferFactory* gmb_factory =
manager->gpu_memory_buffer_factory();
context_group_ = base::MakeRefCounted<gles2::ContextGroup>(
manager->gpu_preferences(), gles2::PassthroughCommandDecoderSupported(),
manager->mailbox_manager(), CreateMemoryTracker(init_params),
manager->shader_translator_cache(),
- manager->framebuffer_completeness_cache(), std::move(feature_info),
+ manager->framebuffer_completeness_cache(),
+ shared_context_state->feature_info(),
init_params.attribs.bind_generates_resource, channel_->image_manager(),
gmb_factory ? gmb_factory->AsImageFactory() : nullptr,
/*progress_reporter=*/manager->watchdog(), manager->gpu_feature_info(),
@@ -109,26 +129,16 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
manager->passthrough_discardable_manager(),
manager->shared_image_manager());
- ContextResult result;
- auto raster_decoder_context_state =
- manager->GetRasterDecoderContextState(&result);
- if (!raster_decoder_context_state) {
- LOG(ERROR) << "ContextResult::kFatalFailure: "
- "Failed to create raster decoder state.";
- DCHECK_NE(result, gpu::ContextResult::kSuccess);
- return result;
- }
-
- surface_ = raster_decoder_context_state->surface;
- share_group_ = raster_decoder_context_state->share_group;
+ surface_ = shared_context_state->surface();
+ share_group_ = shared_context_state->share_group();
use_virtualized_gl_context_ =
- raster_decoder_context_state->use_virtualized_gl_contexts;
+ shared_context_state->use_virtualized_gl_contexts();
command_buffer_ = std::make_unique<CommandBufferService>(
this, context_group_->transfer_buffer_manager());
std::unique_ptr<raster::RasterDecoder> decoder(raster::RasterDecoder::Create(
this, command_buffer_.get(), manager->outputter(), context_group_.get(),
- raster_decoder_context_state));
+ shared_context_state));
sync_point_client_state_ =
channel_->sync_point_manager()->CreateSyncPointClientState(
@@ -138,26 +148,8 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
crash_keys::gpu_gl_context_is_virtual.Set(use_virtualized_gl_context_ ? "1"
: "0");
- scoped_refptr<gl::GLContext> context = raster_decoder_context_state->context;
- if (use_virtualized_gl_context_) {
- context = base::MakeRefCounted<GLContextVirtual>(
- share_group_.get(), context.get(), decoder->AsWeakPtr());
- if (!context->Initialize(surface_.get(),
- GenerateGLContextAttribs(init_params.attribs,
- context_group_.get()))) {
- // The real context created above for the default offscreen surface
- // might not be compatible with this surface.
- context = nullptr;
- // TODO(piman): This might not be fatal, we could recurse into
- // CreateGLContext to get more info, tho it should be exceedingly
- // rare and may not be recoverable anyway.
- LOG(ERROR) << "ContextResult::kFatalFailure: "
- "Failed to initialize virtual GL context.";
- return gpu::ContextResult::kFatalFailure;
- }
- }
-
- if (!context->MakeCurrent(surface_.get())) {
+ scoped_refptr<gl::GLContext> context = shared_context_state->context();
+ if (!shared_context_state->MakeCurrent(nullptr)) {
LOG(ERROR) << "ContextResult::kTransientFailure: "
"Failed to make context current.";
return gpu::ContextResult::kTransientFailure;
@@ -196,22 +188,6 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
if (!active_url_.is_empty())
manager->delegate()->DidCreateOffscreenContext(active_url_);
- if (use_virtualized_gl_context_) {
- // If virtualized GL contexts are in use, then real GL context state
- // is in an indeterminate state, since the GLStateRestorer was not
- // initialized at the time the GLContextVirtual was made current. In
- // the case that this command decoder is the next one to be
- // processed, force a "full virtual" MakeCurrent to be performed.
- // Note that GpuChannel's initialization of the gpu::Capabilities
- // expects the context to be left current.
- context->ForceReleaseVirtuallyCurrent();
- if (!context->MakeCurrent(surface_.get())) {
- LOG(ERROR) << "ContextResult::kTransientFailure: "
- "Failed to make context current after initialization.";
- return gpu::ContextResult::kTransientFailure;
- }
- }
-
manager->delegate()->DidCreateContextSuccessfully();
initialized_ = true;
return gpu::ContextResult::kSuccess;
diff --git a/chromium/gpu/ipc/service/shared_image_stub.cc b/chromium/gpu/ipc/service/shared_image_stub.cc
index 86747de159c..6ee676a8016 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.cc
+++ b/chromium/gpu/ipc/service/shared_image_stub.cc
@@ -47,10 +47,14 @@ bool SharedImageStub::OnMessageReceived(const IPC::Message& msg) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(SharedImageStub, msg)
IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateSharedImage, OnCreateSharedImage)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateSharedImageWithData,
+ OnCreateSharedImageWithData)
IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateGMBSharedImage,
OnCreateGMBSharedImage)
IPC_MESSAGE_HANDLER(GpuChannelMsg_UpdateSharedImage, OnUpdateSharedImage)
IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroySharedImage, OnDestroySharedImage)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_RegisterSharedImageUploadBuffer,
+ OnRegisterSharedImageUploadBuffer)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
@@ -60,6 +64,13 @@ void SharedImageStub::OnCreateSharedImage(
const GpuChannelMsg_CreateSharedImage_Params& params) {
TRACE_EVENT2("gpu", "SharedImageStub::OnCreateSharedImage", "width",
params.size.width(), "height", params.size.height());
+ if (!params.mailbox.IsSharedImage()) {
+ LOG(ERROR) << "SharedImageStub: Trying to create a SharedImage with a "
+ "non-SharedImage mailbox.";
+ OnError();
+ return;
+ }
+
if (!MakeContextCurrentAndCreateFactory()) {
OnError();
return;
@@ -80,10 +91,75 @@ void SharedImageStub::OnCreateSharedImage(
sync_point_client_state_->ReleaseFenceSync(params.release_id);
}
+void SharedImageStub::OnCreateSharedImageWithData(
+ const GpuChannelMsg_CreateSharedImageWithData_Params& params) {
+ TRACE_EVENT2("gpu", "SharedImageStub::OnCreateSharedImageWithData", "width",
+ params.size.width(), "height", params.size.height());
+ if (!params.mailbox.IsSharedImage()) {
+ LOG(ERROR) << "SharedImageStub: Trying to create a SharedImage with a "
+ "non-SharedImage mailbox.";
+ OnError();
+ return;
+ }
+
+ if (!MakeContextCurrentAndCreateFactory()) {
+ OnError();
+ return;
+ }
+
+ base::CheckedNumeric<size_t> safe_required_span_size =
+ params.pixel_data_offset;
+ safe_required_span_size += params.pixel_data_size;
+ size_t required_span_size;
+ if (!safe_required_span_size.AssignIfValid(&required_span_size)) {
+ LOG(ERROR) << "SharedImageStub: upload data size and offset is invalid";
+ OnError();
+ return;
+ }
+
+ auto memory =
+ upload_memory_mapping_.GetMemoryAsSpan<uint8_t>(required_span_size);
+ if (memory.empty()) {
+ LOG(ERROR) << "SharedImageStub: upload data does not have expected size";
+ OnError();
+ return;
+ }
+
+ auto subspan =
+ memory.subspan(params.pixel_data_offset, params.pixel_data_size);
+
+ if (!factory_->CreateSharedImage(params.mailbox, params.format, params.size,
+ params.color_space, params.usage, subspan)) {
+ LOG(ERROR) << "SharedImageStub: Unable to create shared image";
+ OnError();
+ return;
+ }
+
+ // If this is the last upload using a given buffer, release it.
+ if (params.done_with_shm) {
+ upload_memory_mapping_ = base::ReadOnlySharedMemoryMapping();
+ upload_memory_ = base::ReadOnlySharedMemoryRegion();
+ }
+
+ SyncToken sync_token(sync_point_client_state_->namespace_id(),
+ sync_point_client_state_->command_buffer_id(),
+ params.release_id);
+ auto* mailbox_manager = channel_->gpu_channel_manager()->mailbox_manager();
+ mailbox_manager->PushTextureUpdates(sync_token);
+ sync_point_client_state_->ReleaseFenceSync(params.release_id);
+}
+
void SharedImageStub::OnCreateGMBSharedImage(
GpuChannelMsg_CreateGMBSharedImage_Params params) {
TRACE_EVENT2("gpu", "SharedImageStub::OnCreateSharedImage", "width",
params.size.width(), "height", params.size.height());
+ if (!params.mailbox.IsSharedImage()) {
+ LOG(ERROR) << "SharedImageStub: Trying to create a SharedImage with a "
+ "non-SharedImage mailbox.";
+ OnError();
+ return;
+ }
+
if (!MakeContextCurrentAndCreateFactory()) {
OnError();
return;
@@ -111,6 +187,13 @@ void SharedImageStub::OnCreateGMBSharedImage(
void SharedImageStub::OnUpdateSharedImage(const Mailbox& mailbox,
uint32_t release_id) {
TRACE_EVENT0("gpu", "SharedImageStub::OnDestroySharedImage");
+ if (!mailbox.IsSharedImage()) {
+ LOG(ERROR) << "SharedImageStub: Trying to access a SharedImage with a "
+ "non-SharedImage mailbox.";
+ OnError();
+ return;
+ }
+
if (!MakeContextCurrentAndCreateFactory()) {
OnError();
return;
@@ -132,6 +215,13 @@ void SharedImageStub::OnUpdateSharedImage(const Mailbox& mailbox,
void SharedImageStub::OnDestroySharedImage(const Mailbox& mailbox) {
TRACE_EVENT0("gpu", "SharedImageStub::OnDestroySharedImage");
+ if (!mailbox.IsSharedImage()) {
+ LOG(ERROR) << "SharedImageStub: Trying to destroy a SharedImage with a "
+ "non-SharedImage mailbox.";
+ OnError();
+ return;
+ }
+
if (!MakeContextCurrentAndCreateFactory()) {
OnError();
return;
@@ -144,18 +234,31 @@ void SharedImageStub::OnDestroySharedImage(const Mailbox& mailbox) {
}
}
+void SharedImageStub::OnRegisterSharedImageUploadBuffer(
+ base::ReadOnlySharedMemoryRegion shm) {
+ TRACE_EVENT0("gpu", "SharedImageStub::OnRegisterSharedImageUploadBuffer");
+ upload_memory_ = std::move(shm);
+ upload_memory_mapping_ = upload_memory_.Map();
+ if (!upload_memory_mapping_.IsValid()) {
+ LOG(ERROR)
+ << "SharedImageStub: Unable to map shared memory for upload data";
+ OnError();
+ return;
+ }
+}
+
bool SharedImageStub::MakeContextCurrent() {
DCHECK(context_state_);
- DCHECK(!context_state_->context_lost);
+ DCHECK(!context_state_->context_lost());
- // |factory_| never writes to the surface, so skip unnecessary MakeCurrent to
+ // |factory_| never writes to the surface, so pass nullptr to
// improve performance. https://crbug.com/457431
- if (context_state_->context->IsCurrent(nullptr))
- return true;
-
- if (context_state_->context->MakeCurrent(context_state_->surface.get())) {
+ auto* context = context_state_->real_context();
+ if (context->IsCurrent(nullptr) ||
+ context_state_->real_context()->MakeCurrent(context_state_->surface())) {
return true;
} else {
+ context_state_->MarkContextLost();
LOG(ERROR) << "SharedImageStub: MakeCurrent failed";
return false;
}
@@ -166,13 +269,13 @@ bool SharedImageStub::MakeContextCurrentAndCreateFactory() {
auto* channel_manager = channel_->gpu_channel_manager();
DCHECK(!context_state_);
ContextResult result;
- context_state_ = channel_manager->GetRasterDecoderContextState(&result);
+ context_state_ = channel_manager->GetSharedContextState(&result);
if (result != ContextResult::kSuccess) {
LOG(ERROR) << "SharedImageStub: unable to create context";
return false;
}
DCHECK(context_state_);
- DCHECK(!context_state_->context_lost);
+ DCHECK(!context_state_->context_lost());
if (!MakeContextCurrent())
return false;
gpu::GpuMemoryBufferFactory* gmb_factory =
@@ -187,13 +290,12 @@ bool SharedImageStub::MakeContextCurrentAndCreateFactory() {
return true;
} else {
DCHECK(context_state_);
- if (context_state_->context_lost) {
+ if (context_state_->context_lost()) {
LOG(ERROR) << "SharedImageStub: context already lost";
return false;
} else {
if (MakeContextCurrent())
return true;
- context_state_->context_lost = true;
return false;
}
}
diff --git a/chromium/gpu/ipc/service/shared_image_stub.h b/chromium/gpu/ipc/service/shared_image_stub.h
index 4b931644f4a..e1fabff45d1 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.h
+++ b/chromium/gpu/ipc/service/shared_image_stub.h
@@ -13,14 +13,11 @@
#include "ipc/ipc_listener.h"
namespace gpu {
+class SharedContextState;
struct Mailbox;
class GpuChannel;
class SharedImageFactory;
-namespace raster {
-struct RasterDecoderContextState;
-}
-
class SharedImageStub : public IPC::Listener,
public MemoryTracker,
public base::trace_event::MemoryDumpProvider {
@@ -47,9 +44,12 @@ class SharedImageStub : public IPC::Listener,
private:
void OnCreateSharedImage(
const GpuChannelMsg_CreateSharedImage_Params& params);
+ void OnCreateSharedImageWithData(
+ const GpuChannelMsg_CreateSharedImageWithData_Params& params);
void OnCreateGMBSharedImage(GpuChannelMsg_CreateGMBSharedImage_Params params);
void OnUpdateSharedImage(const Mailbox& mailbox, uint32_t release_id);
void OnDestroySharedImage(const Mailbox& mailbox);
+ void OnRegisterSharedImageUploadBuffer(base::ReadOnlySharedMemoryRegion shm);
bool MakeContextCurrent();
bool MakeContextCurrentAndCreateFactory();
void OnError();
@@ -57,9 +57,12 @@ class SharedImageStub : public IPC::Listener,
GpuChannel* channel_;
SequenceId sequence_;
scoped_refptr<gpu::SyncPointClientState> sync_point_client_state_;
- scoped_refptr<raster::RasterDecoderContextState> context_state_;
+ scoped_refptr<SharedContextState> context_state_;
std::unique_ptr<SharedImageFactory> factory_;
uint64_t size_ = 0;
+ // Holds shared memory used in initial data uploads.
+ base::ReadOnlySharedMemoryRegion upload_memory_;
+ base::ReadOnlySharedMemoryMapping upload_memory_mapping_;
};
} // namespace gpu
diff --git a/chromium/gpu/perftests/run_all_tests.cc b/chromium/gpu/perftests/run_all_tests.cc
index 7d35e61f2b0..3e958d0828d 100644
--- a/chromium/gpu/perftests/run_all_tests.cc
+++ b/chromium/gpu/perftests/run_all_tests.cc
@@ -29,7 +29,6 @@ int main(int argc, char** argv) {
// Always run the perf tests serially, to avoid distorting
// perf measurements with randomness resulting from running
// in parallel.
- const auto& run_test_suite =
- base::Bind(&RunHelper, base::Unretained(&test_suite));
- return base::LaunchUnitTestsSerially(argc, argv, run_test_suite);
+ return base::LaunchUnitTestsSerially(
+ argc, argv, base::BindOnce(&RunHelper, base::Unretained(&test_suite)));
}
diff --git a/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc b/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
index de7ad594dc8..a6d8044934d 100644
--- a/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
+++ b/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
@@ -252,6 +252,10 @@ sk_sp<GrGLInterface> CreateGLES2InterfaceBindings(
gles_bind(&GLES2Interface::StencilOpSeparate, impl, context_support);
functions->fTexImage2D =
gles_bind(&GLES2Interface::TexImage2D, impl, context_support);
+ functions->fTexParameterf =
+ gles_bind(&GLES2Interface::TexParameterf, impl, context_support);
+ functions->fTexParameterfv =
+ gles_bind(&GLES2Interface::TexParameterfv, impl, context_support);
functions->fTexParameteri =
gles_bind(&GLES2Interface::TexParameteri, impl, context_support);
functions->fTexParameteriv =
diff --git a/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc b/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc
index 6b2ee211c3e..78425e1c8b0 100644
--- a/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc
+++ b/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc
@@ -118,7 +118,7 @@ class Simulator {
message_loop_.task_runner()->PostTask(
FROM_HERE,
- base::Bind(&Simulator::ProcessEvents, weak_factory_.GetWeakPtr()));
+ base::BindOnce(&Simulator::ProcessEvents, weak_factory_.GetWeakPtr()));
run_loop_.Run();
}
@@ -254,7 +254,7 @@ class Simulator {
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
- base::Bind(&Simulator::UpdateLoop, weak_factory_.GetWeakPtr()));
+ base::BindOnce(&Simulator::UpdateLoop, weak_factory_.GetWeakPtr()));
}
void DumpOutput() {
diff --git a/chromium/gpu/vulkan/BUILD.gn b/chromium/gpu/vulkan/BUILD.gn
index 3e8862decbb..5b69e428a04 100644
--- a/chromium/gpu/vulkan/BUILD.gn
+++ b/chromium/gpu/vulkan/BUILD.gn
@@ -2,11 +2,12 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import("features.gni")
+import("//build/buildflag_header.gni")
+import("//build/config/dcheck_always_on.gni")
import("//build/config/jumbo.gni")
import("//build/config/ui.gni")
-import("//build/buildflag_header.gni")
import("//testing/test.gni")
+import("features.gni")
# Generate a buildflag header for compile-time checking of Vulkan support.
buildflag_header("buildflags") {
@@ -40,6 +41,9 @@ if (enable_vulkan) {
configs += [ "//build/config:precompiled_headers" ]
defines = [ "VULKAN_IMPLEMENTATION" ]
+ if (is_android) {
+ defines += [ "VK_USE_PLATFORM_ANDROID_KHR" ]
+ }
all_dependent_configs = [ "//third_party/vulkan:vulkan_config" ]
@@ -50,7 +54,15 @@ if (enable_vulkan) {
data_deps = []
if (is_fuchsia) {
- data_deps += [ "//third_party/fuchsia-sdk:vulkan_layers" ]
+ data_deps += [ "//third_party/fuchsia-sdk:vulkan_base" ]
+
+ # VulkanInstance enables validation layer in Debug builds and when DCHECKs
+ # are enabled in Release builds. In these cases the validation layer
+ # libraries and configs need to be included in the generated Fuchsia
+ # package.
+ if (is_debug || dcheck_always_on) {
+ data_deps += [ "//third_party/fuchsia-sdk:vulkan_validation" ]
+ }
}
}
diff --git a/chromium/gpu/vulkan/android/vulkan_android_unittests.cc b/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
index 61618a10260..a27c8e089af 100644
--- a/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
+++ b/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
@@ -4,6 +4,8 @@
#include <sys/eventfd.h>
+#include "base/android/android_hardware_buffer_compat.h"
+#include "base/android/scoped_hardware_buffer_handle.h"
#include "base/files/scoped_file.h"
#include "components/viz/common/gpu/vulkan_in_process_context_provider.h"
#include "gpu/vulkan/android/vulkan_implementation_android.h"
@@ -18,20 +20,33 @@ class VulkanImplementationAndroidTest : public testing::Test {
// Create a vulkan implementation.
vk_implementation_ = std::make_unique<VulkanImplementationAndroid>();
ASSERT_TRUE(vk_implementation_);
- ASSERT_TRUE(vk_implementation_->InitializeVulkanInstance());
- // Create vulkan context provider.
+ // This call checks for all instance extensions. Let the test pass if this
+ // call fails since many bots would not have this extension present.
+ if (!vk_implementation_->InitializeVulkanInstance())
+ return;
+
+ // Create vulkan context provider. This call checks for all device
+ // extensions. Let the test pass if this call fails since many bots would
+ // not have this extension present.
vk_context_provider_ =
viz::VulkanInProcessContextProvider::Create(vk_implementation_.get());
- ASSERT_TRUE(vk_context_provider_);
+ if (!vk_context_provider_)
+ return;
// Get the VkDevice.
vk_device_ = vk_context_provider_->GetDeviceQueue()->GetVulkanDevice();
ASSERT_TRUE(vk_device_);
+
+ // Get the physical device.
+ vk_phy_device_ =
+ vk_context_provider_->GetDeviceQueue()->GetVulkanPhysicalDevice();
+ ASSERT_TRUE(vk_phy_device_);
}
void TearDown() override {
- vk_context_provider_->Destroy();
+ if (vk_context_provider_)
+ vk_context_provider_->Destroy();
vk_device_ = VK_NULL_HANDLE;
}
@@ -39,9 +54,13 @@ class VulkanImplementationAndroidTest : public testing::Test {
std::unique_ptr<VulkanImplementationAndroid> vk_implementation_;
scoped_refptr<viz::VulkanInProcessContextProvider> vk_context_provider_;
VkDevice vk_device_;
+ VkPhysicalDevice vk_phy_device_;
};
TEST_F(VulkanImplementationAndroidTest, ExportImportSyncFd) {
+ if (!vk_implementation_ || !vk_context_provider_)
+ return;
+
// Create a vk semaphore which can be exported.
// To create a semaphore whose payload can be exported to external handles,
// add the VkExportSemaphoreCreateInfo structure to the pNext chain of the
@@ -93,4 +112,41 @@ TEST_F(VulkanImplementationAndroidTest, ExportImportSyncFd) {
vkDestroySemaphore(vk_device_, semaphore2, nullptr);
}
+TEST_F(VulkanImplementationAndroidTest, CreateVkImageFromAHB) {
+ if (!vk_implementation_ || !vk_context_provider_)
+ return;
+
+ // Setup and Create an AHardwareBuffer.
+ AHardwareBuffer* buffer = nullptr;
+ AHardwareBuffer_Desc hwb_desc;
+ hwb_desc.width = 128;
+ hwb_desc.height = 128;
+ hwb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
+ hwb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
+ AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
+ hwb_desc.layers = 1;
+ hwb_desc.stride = 0;
+ hwb_desc.rfu0 = 0;
+ hwb_desc.rfu1 = 0;
+
+ // Allocate an AHardwareBuffer.
+ base::AndroidHardwareBufferCompat::GetInstance().Allocate(&hwb_desc, &buffer);
+ EXPECT_TRUE(buffer);
+
+ // Create a vkimage and import the AHB into it.
+ const gfx::Size size(hwb_desc.width, hwb_desc.height);
+ VkImage vk_image;
+ VkImageCreateInfo vk_image_info;
+ VkDeviceMemory vk_device_memory;
+ VkDeviceSize mem_allocation_size;
+ EXPECT_TRUE(vk_implementation_->CreateVkImageAndImportAHB(
+ vk_device_, vk_phy_device_, size,
+ base::android::ScopedHardwareBufferHandle::Adopt(buffer), &vk_image,
+ &vk_image_info, &vk_device_memory, &mem_allocation_size));
+
+ // Free up resources.
+ vkDestroyImage(vk_device_, vk_image, nullptr);
+ vkFreeMemory(vk_device_, vk_device_memory, nullptr);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
index 54cb9050ab6..275fb419a90 100644
--- a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
+++ b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
@@ -4,6 +4,7 @@
#include "gpu/vulkan/android/vulkan_implementation_android.h"
+#include "base/android/android_hardware_buffer_compat.h"
#include "base/bind_helpers.h"
#include "base/files/file_path.h"
#include "base/logging.h"
@@ -22,7 +23,6 @@ VulkanImplementationAndroid::~VulkanImplementationAndroid() = default;
bool VulkanImplementationAndroid::InitializeVulkanInstance() {
std::vector<const char*> required_extensions = {
VK_KHR_SURFACE_EXTENSION_NAME, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME,
- VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME};
@@ -35,20 +35,15 @@ bool VulkanImplementationAndroid::InitializeVulkanInstance() {
if (!vulkan_function_pointers->vulkan_loader_library_)
return false;
- if (!vulkan_instance_.Initialize(required_extensions, {})) {
- vulkan_instance_.Destroy();
+ if (!vulkan_instance_.Initialize(required_extensions, {}))
return false;
- }
// Initialize platform function pointers
vkCreateAndroidSurfaceKHR_ =
reinterpret_cast<PFN_vkCreateAndroidSurfaceKHR>(vkGetInstanceProcAddr(
vulkan_instance_.vk_instance(), "vkCreateAndroidSurfaceKHR"));
- if (!vkCreateAndroidSurfaceKHR_) {
- LOG(ERROR) << "vkCreateAndroidSurfaceKHR not found";
- vulkan_instance_.Destroy();
+ if (!vkCreateAndroidSurfaceKHR_)
return false;
- }
return true;
}
@@ -70,8 +65,7 @@ std::unique_ptr<VulkanSurface> VulkanImplementationAndroid::CreateViewSurface(
return nullptr;
}
- return std::make_unique<VulkanSurface>(GetVulkanInstance(), surface,
- base::DoNothing());
+ return std::make_unique<VulkanSurface>(GetVulkanInstance(), surface);
}
bool VulkanImplementationAndroid::GetPhysicalDevicePresentationSupport(
@@ -86,9 +80,15 @@ bool VulkanImplementationAndroid::GetPhysicalDevicePresentationSupport(
std::vector<const char*>
VulkanImplementationAndroid::GetRequiredDeviceExtensions() {
+ // VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME also requires
+ // VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME as per spec.
return {VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
- VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME};
+ VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
+ VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
+ VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
+ VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME};
}
VkFence VulkanImplementationAndroid::CreateVkFenceForGpuFence(
@@ -165,4 +165,195 @@ bool VulkanImplementationAndroid::GetSemaphoreFdKHR(VkDevice vk_device,
return true;
}
+bool VulkanImplementationAndroid::CreateVkImageAndImportAHB(
+ const VkDevice& vk_device,
+ const VkPhysicalDevice& vk_physical_device,
+ const gfx::Size& size,
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) {
+ DCHECK(ahb_handle.is_valid());
+ DCHECK(vk_image);
+ DCHECK(vk_image_info);
+ DCHECK(vk_device_memory);
+ DCHECK(mem_allocation_size);
+
+ // To obtain format properties of an Android hardware buffer, include an
+ // instance of VkAndroidHardwareBufferFormatPropertiesANDROID in the pNext
+ // chain of the VkAndroidHardwareBufferPropertiesANDROID instance passed to
+ // vkGetAndroidHardwareBufferPropertiesANDROID.
+ VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props;
+ ahb_format_props.sType =
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
+ ahb_format_props.pNext = nullptr;
+
+ VkAndroidHardwareBufferPropertiesANDROID ahb_props;
+ ahb_props.sType =
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
+ ahb_props.pNext = &ahb_format_props;
+
+ bool result = vkGetAndroidHardwareBufferPropertiesANDROID(
+ vk_device, ahb_handle.get(), &ahb_props);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "GetAndroidHardwareBufferProperties failed : " << result;
+ return false;
+ }
+
+ // To create an image with an external format, include an instance of
+ // VkExternalFormatANDROID in the pNext chain of VkImageCreateInfo.
+ VkExternalFormatANDROID external_format;
+ external_format.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
+ external_format.pNext = nullptr;
+
+ // If externalFormat is zero, the effect is as if the VkExternalFormatANDROID
+ // structure was not present. Otherwise, the image will have the specified
+ // external format.
+ external_format.externalFormat = 0;
+
+ // If image has an external format, format must be VK_FORMAT_UNDEFINED.
+ if (ahb_format_props.format == VK_FORMAT_UNDEFINED) {
+ // externalFormat must be 0 or a value returned in the externalFormat member
+ // of VkAndroidHardwareBufferFormatPropertiesANDROID by an earlier call to
+ // vkGetAndroidHardwareBufferPropertiesANDROID.
+ external_format.externalFormat = ahb_format_props.externalFormat;
+ }
+
+ // To define a set of external memory handle types that may be used as backing
+ // store for an image, add a VkExternalMemoryImageCreateInfo structure to the
+ // pNext chain of the VkImageCreateInfo structure.
+ VkExternalMemoryImageCreateInfo external_memory_image_info;
+ external_memory_image_info.sType =
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+ external_memory_image_info.pNext = &external_format;
+ external_memory_image_info.handleTypes =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
+
+ // Intended usage of the image.
+ VkImageUsageFlags usage_flags = 0;
+
+ // Get the AHB description.
+ AHardwareBuffer_Desc ahb_desc = {};
+ base::AndroidHardwareBufferCompat::GetInstance().Describe(ahb_handle.get(),
+ &ahb_desc);
+
+ // Get Vulkan Image usage flag equivalence of AHB usage.
+ if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE) {
+ usage_flags = usage_flags | VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+ }
+ if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT) {
+ usage_flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+ if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT) {
+ usage_flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
+ }
+
+ // TODO(vikassoni) : AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP is supported from API
+ // level 28 which is not part of current android_ndk version in chromium. Add
+ // equvalent VK usage later.
+
+ if (!usage_flags) {
+ LOG(ERROR) << "No valid usage flags found";
+ return false;
+ }
+
+ // Find the first set bit to use as memoryTypeIndex.
+ uint32_t memory_type_bits = ahb_props.memoryTypeBits;
+ int32_t type_index = -1;
+ for (uint32_t i = 0; memory_type_bits;
+ memory_type_bits = memory_type_bits >> 0x1, ++i) {
+ if (memory_type_bits & 0x1) {
+ type_index = i;
+ break;
+ }
+ }
+ if (type_index == -1) {
+ LOG(ERROR) << "No valid memoryTypeIndex found";
+ return false;
+ }
+
+ // Populate VkImageCreateInfo.
+ vk_image_info->sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ vk_image_info->pNext = &external_memory_image_info;
+ vk_image_info->flags = 0;
+ vk_image_info->imageType = VK_IMAGE_TYPE_2D;
+ vk_image_info->format = ahb_format_props.format;
+ vk_image_info->extent = {static_cast<uint32_t>(size.width()),
+ static_cast<uint32_t>(size.height()), 1};
+ vk_image_info->mipLevels = 1;
+ vk_image_info->arrayLayers = 1;
+ vk_image_info->samples = VK_SAMPLE_COUNT_1_BIT;
+ vk_image_info->tiling = VK_IMAGE_TILING_OPTIMAL;
+ vk_image_info->usage = usage_flags;
+ vk_image_info->sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ vk_image_info->queueFamilyIndexCount = 0;
+ vk_image_info->pQueueFamilyIndices = 0;
+ vk_image_info->initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ // Create Vk Image.
+ result = vkCreateImage(vk_device, vk_image_info, nullptr, vk_image);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "vkCreateImage failed : " << result;
+ return false;
+ }
+
+ // To import memory created outside of the current Vulkan instance from an
+ // Android hardware buffer, add a VkImportAndroidHardwareBufferInfoANDROID
+ // structure to the pNext chain of the VkMemoryAllocateInfo structure.
+ VkImportAndroidHardwareBufferInfoANDROID ahb_import_info;
+ ahb_import_info.sType =
+ VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
+ ahb_import_info.pNext = nullptr;
+ ahb_import_info.buffer = ahb_handle.get();
+
+ // If the VkMemoryAllocateInfo pNext chain includes a
+ // VkMemoryDedicatedAllocateInfo structure, then that structure includes a
+ // handle of the sole buffer or image resource that the memory can be bound
+ // to.
+ VkMemoryDedicatedAllocateInfo dedicated_alloc_info;
+ dedicated_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
+ dedicated_alloc_info.pNext = &ahb_import_info;
+ dedicated_alloc_info.image = *vk_image;
+ dedicated_alloc_info.buffer = VK_NULL_HANDLE;
+
+ // An instance of the VkMemoryAllocateInfo structure defines a memory import
+ // operation.
+ VkMemoryAllocateInfo mem_alloc_info;
+ mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ mem_alloc_info.pNext = &dedicated_alloc_info;
+
+ // If the parameters define an import operation and the external handle type
+ // is VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID,
+ // allocationSize must be the size returned by
+ // vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware
+ // buffer.
+ mem_alloc_info.allocationSize = ahb_props.allocationSize;
+ mem_alloc_info.memoryTypeIndex = type_index;
+
+ // A Vulkan device operates on data in device memory via memory objects that
+ // are represented in the API by a VkDeviceMemory handle.
+ // Allocate memory.
+ result =
+ vkAllocateMemory(vk_device, &mem_alloc_info, nullptr, vk_device_memory);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "vkAllocateMemory failed : " << result;
+ vkDestroyImage(vk_device, *vk_image, nullptr);
+ return false;
+ }
+
+ // Attach memory to the image object.
+ result = vkBindImageMemory(vk_device, *vk_image, *vk_device_memory, 0);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR) << "vkBindImageMemory failed : " << result;
+ vkDestroyImage(vk_device, *vk_image, nullptr);
+ vkFreeMemory(vk_device, *vk_device_memory, nullptr);
+ return false;
+ }
+
+ *mem_allocation_size = mem_alloc_info.allocationSize;
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/vulkan/android/vulkan_implementation_android.h b/chromium/gpu/vulkan/android/vulkan_implementation_android.h
index af97c76b4e7..145320defe2 100644
--- a/chromium/gpu/vulkan/android/vulkan_implementation_android.h
+++ b/chromium/gpu/vulkan/android/vulkan_implementation_android.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "base/android/scoped_hardware_buffer_handle.h"
#include "base/component_export.h"
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_instance.h"
@@ -40,6 +41,15 @@ class COMPONENT_EXPORT(VULKAN_ANDROID) VulkanImplementationAndroid
bool GetSemaphoreFdKHR(VkDevice vk_device,
VkSemaphore vk_semaphore,
base::ScopedFD* sync_fd) override;
+ bool CreateVkImageAndImportAHB(
+ const VkDevice& vk_device,
+ const VkPhysicalDevice& vk_physical_device,
+ const gfx::Size& size,
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) override;
private:
VulkanInstance vulkan_instance_;
diff --git a/chromium/gpu/vulkan/demo/vulkan_demo.cc b/chromium/gpu/vulkan/demo/vulkan_demo.cc
index 2c0f2a08cd2..ed67c365646 100644
--- a/chromium/gpu/vulkan/demo/vulkan_demo.cc
+++ b/chromium/gpu/vulkan/demo/vulkan_demo.cc
@@ -12,6 +12,7 @@
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_surface.h"
#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkFont.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/effects/SkGradientShader.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
@@ -161,9 +162,11 @@ void VulkanDemo::Draw(SkCanvas* canvas, float fraction) {
}
// Draw a message with a nice black paint
- paint.setSubpixelText(true);
paint.setColor(SK_ColorBLACK);
- paint.setTextSize(32);
+
+ SkFont font;
+ font.setSize(32);
+ font.setSubpixel(true);
static const char message[] = "Hello Vulkan";
@@ -176,7 +179,7 @@ void VulkanDemo::Draw(SkCanvas* canvas, float fraction) {
canvas->rotate(rotation_angle_);
// Draw the text
- canvas->drawText(message, strlen(message), 0, 0, paint);
+ canvas->drawString(message, 0, 0, font, paint);
canvas->restore();
canvas->flush();
diff --git a/chromium/gpu/vulkan/features.gni b/chromium/gpu/vulkan/features.gni
index d97fadb7009..d55eddeb0b3 100644
--- a/chromium/gpu/vulkan/features.gni
+++ b/chromium/gpu/vulkan/features.gni
@@ -8,5 +8,5 @@ import("//build/config/ui.gni")
# For details see declare_args() in build/config/BUILDCONFIG.gn.
declare_args() {
# Enable experimental vulkan backend.
- enable_vulkan = is_linux || is_fuchsia
+ enable_vulkan = is_linux || is_android || is_fuchsia
}
diff --git a/chromium/gpu/vulkan/generate_bindings.py b/chromium/gpu/vulkan/generate_bindings.py
index eefd051c928..66ecfa1698f 100755
--- a/chromium/gpu/vulkan/generate_bindings.py
+++ b/chromium/gpu/vulkan/generate_bindings.py
@@ -39,11 +39,14 @@ VULKAN_PHYSICAL_DEVICE_FUNCTIONS = [
VULKAN_DEVICE_FUNCTIONS = [
{ 'name': 'vkAllocateCommandBuffers' },
{ 'name': 'vkAllocateDescriptorSets' },
+{ 'name': 'vkAllocateMemory' },
+{ 'name': 'vkBindImageMemory' },
{ 'name': 'vkCreateCommandPool' },
{ 'name': 'vkCreateDescriptorPool' },
{ 'name': 'vkCreateDescriptorSetLayout' },
{ 'name': 'vkCreateFence' },
{ 'name': 'vkCreateFramebuffer' },
+{ 'name': 'vkCreateImage' },
{ 'name': 'vkCreateImageView' },
{ 'name': 'vkCreateRenderPass' },
{ 'name': 'vkCreateSampler' },
@@ -74,6 +77,7 @@ VULKAN_DEVICE_FUNCTIONS = [
VULKAN_DEVICE_FUNCTIONS_ANDROID = [
{ 'name': 'vkImportSemaphoreFdKHR' },
+{ 'name': 'vkGetAndroidHardwareBufferPropertiesANDROID' },
{ 'name': 'vkGetSemaphoreFdKHR' },
]
@@ -155,14 +159,14 @@ struct VulkanFunctionPointers {
VulkanFunctionPointers();
~VulkanFunctionPointers();
- bool BindUnassociatedFunctionPointers();
+ VULKAN_EXPORT bool BindUnassociatedFunctionPointers();
// These functions assume that vkGetInstanceProcAddr has been populated.
- bool BindInstanceFunctionPointers(VkInstance vk_instance);
- bool BindPhysicalDeviceFunctionPointers(VkInstance vk_instance);
+ VULKAN_EXPORT bool BindInstanceFunctionPointers(VkInstance vk_instance);
+ VULKAN_EXPORT bool BindPhysicalDeviceFunctionPointers(VkInstance vk_instance);
// These functions assume that vkGetDeviceProcAddr has been populated.
- bool BindDeviceFunctionPointers(VkDevice vk_device);
+ VULKAN_EXPORT bool BindDeviceFunctionPointers(VkDevice vk_device);
bool BindSwapchainFunctionPointers(VkDevice vk_device);
base::NativeLibrary vulkan_loader_library_ = nullptr;
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.cc b/chromium/gpu/vulkan/vulkan_function_pointers.cc
index b1524be022b..ef19bfbb3fb 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.cc
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.cc
@@ -111,6 +111,16 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkAllocateDescriptorSetsFn)
return false;
+ vkAllocateMemoryFn = reinterpret_cast<PFN_vkAllocateMemory>(
+ vkGetDeviceProcAddrFn(vk_device, "vkAllocateMemory"));
+ if (!vkAllocateMemoryFn)
+ return false;
+
+ vkBindImageMemoryFn = reinterpret_cast<PFN_vkBindImageMemory>(
+ vkGetDeviceProcAddrFn(vk_device, "vkBindImageMemory"));
+ if (!vkBindImageMemoryFn)
+ return false;
+
vkCreateCommandPoolFn = reinterpret_cast<PFN_vkCreateCommandPool>(
vkGetDeviceProcAddrFn(vk_device, "vkCreateCommandPool"));
if (!vkCreateCommandPoolFn)
@@ -137,6 +147,11 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkCreateFramebufferFn)
return false;
+ vkCreateImageFn = reinterpret_cast<PFN_vkCreateImage>(
+ vkGetDeviceProcAddrFn(vk_device, "vkCreateImage"));
+ if (!vkCreateImageFn)
+ return false;
+
vkCreateImageViewFn = reinterpret_cast<PFN_vkCreateImageView>(
vkGetDeviceProcAddrFn(vk_device, "vkCreateImageView"));
if (!vkCreateImageViewFn)
@@ -275,6 +290,13 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkImportSemaphoreFdKHRFn)
return false;
+ vkGetAndroidHardwareBufferPropertiesANDROIDFn =
+ reinterpret_cast<PFN_vkGetAndroidHardwareBufferPropertiesANDROID>(
+ vkGetDeviceProcAddrFn(vk_device,
+ "vkGetAndroidHardwareBufferPropertiesANDROID"));
+ if (!vkGetAndroidHardwareBufferPropertiesANDROIDFn)
+ return false;
+
vkGetSemaphoreFdKHRFn = reinterpret_cast<PFN_vkGetSemaphoreFdKHR>(
vkGetDeviceProcAddrFn(vk_device, "vkGetSemaphoreFdKHR"));
if (!vkGetSemaphoreFdKHRFn)
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.h b/chromium/gpu/vulkan/vulkan_function_pointers.h
index 6c6b0ce9a25..abc4a37cf88 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.h
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.h
@@ -27,14 +27,14 @@ struct VulkanFunctionPointers {
VulkanFunctionPointers();
~VulkanFunctionPointers();
- bool BindUnassociatedFunctionPointers();
+ VULKAN_EXPORT bool BindUnassociatedFunctionPointers();
// These functions assume that vkGetInstanceProcAddr has been populated.
- bool BindInstanceFunctionPointers(VkInstance vk_instance);
- bool BindPhysicalDeviceFunctionPointers(VkInstance vk_instance);
+ VULKAN_EXPORT bool BindInstanceFunctionPointers(VkInstance vk_instance);
+ VULKAN_EXPORT bool BindPhysicalDeviceFunctionPointers(VkInstance vk_instance);
// These functions assume that vkGetDeviceProcAddr has been populated.
- bool BindDeviceFunctionPointers(VkDevice vk_device);
+ VULKAN_EXPORT bool BindDeviceFunctionPointers(VkDevice vk_device);
bool BindSwapchainFunctionPointers(VkDevice vk_device);
base::NativeLibrary vulkan_loader_library_ = nullptr;
@@ -69,11 +69,14 @@ struct VulkanFunctionPointers {
// Device functions
PFN_vkAllocateCommandBuffers vkAllocateCommandBuffersFn = nullptr;
PFN_vkAllocateDescriptorSets vkAllocateDescriptorSetsFn = nullptr;
+ PFN_vkAllocateMemory vkAllocateMemoryFn = nullptr;
+ PFN_vkBindImageMemory vkBindImageMemoryFn = nullptr;
PFN_vkCreateCommandPool vkCreateCommandPoolFn = nullptr;
PFN_vkCreateDescriptorPool vkCreateDescriptorPoolFn = nullptr;
PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayoutFn = nullptr;
PFN_vkCreateFence vkCreateFenceFn = nullptr;
PFN_vkCreateFramebuffer vkCreateFramebufferFn = nullptr;
+ PFN_vkCreateImage vkCreateImageFn = nullptr;
PFN_vkCreateImageView vkCreateImageViewFn = nullptr;
PFN_vkCreateRenderPass vkCreateRenderPassFn = nullptr;
PFN_vkCreateSampler vkCreateSamplerFn = nullptr;
@@ -101,9 +104,11 @@ struct VulkanFunctionPointers {
PFN_vkUpdateDescriptorSets vkUpdateDescriptorSetsFn = nullptr;
PFN_vkWaitForFences vkWaitForFencesFn = nullptr;
-// Android only device functions.
+ // Android only device functions.
#if defined(OS_ANDROID)
PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHRFn = nullptr;
+ PFN_vkGetAndroidHardwareBufferPropertiesANDROID
+ vkGetAndroidHardwareBufferPropertiesANDROIDFn = nullptr;
PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHRFn = nullptr;
#endif
@@ -167,6 +172,8 @@ struct VulkanFunctionPointers {
gpu::GetVulkanFunctionPointers()->vkAllocateCommandBuffersFn
#define vkAllocateDescriptorSets \
gpu::GetVulkanFunctionPointers()->vkAllocateDescriptorSetsFn
+#define vkAllocateMemory gpu::GetVulkanFunctionPointers()->vkAllocateMemoryFn
+#define vkBindImageMemory gpu::GetVulkanFunctionPointers()->vkBindImageMemoryFn
#define vkCreateCommandPool \
gpu::GetVulkanFunctionPointers()->vkCreateCommandPoolFn
#define vkCreateDescriptorPool \
@@ -176,6 +183,7 @@ struct VulkanFunctionPointers {
#define vkCreateFence gpu::GetVulkanFunctionPointers()->vkCreateFenceFn
#define vkCreateFramebuffer \
gpu::GetVulkanFunctionPointers()->vkCreateFramebufferFn
+#define vkCreateImage gpu::GetVulkanFunctionPointers()->vkCreateImageFn
#define vkCreateImageView gpu::GetVulkanFunctionPointers()->vkCreateImageViewFn
#define vkCreateRenderPass \
gpu::GetVulkanFunctionPointers()->vkCreateRenderPassFn
@@ -219,6 +227,9 @@ struct VulkanFunctionPointers {
#if defined(OS_ANDROID)
#define vkImportSemaphoreFdKHR \
gpu::GetVulkanFunctionPointers()->vkImportSemaphoreFdKHRFn
+#define vkGetAndroidHardwareBufferPropertiesANDROID \
+ gpu::GetVulkanFunctionPointers() \
+ ->vkGetAndroidHardwareBufferPropertiesANDROIDFn
#define vkGetSemaphoreFdKHR \
gpu::GetVulkanFunctionPointers()->vkGetSemaphoreFdKHRFn
#endif
diff --git a/chromium/gpu/vulkan/vulkan_implementation.h b/chromium/gpu/vulkan/vulkan_implementation.h
index 2cb6be5934a..78470bc5c86 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.h
+++ b/chromium/gpu/vulkan/vulkan_implementation.h
@@ -14,6 +14,12 @@
#include "gpu/vulkan/vulkan_export.h"
#include "ui/gfx/native_widget_types.h"
+#if defined(OS_ANDROID)
+#include "base/android/scoped_hardware_buffer_handle.h"
+#include "base/files/scoped_file.h"
+#include "ui/gfx/geometry/size.h"
+#endif
+
namespace gfx {
class GpuFence;
}
@@ -69,6 +75,18 @@ class VULKAN_EXPORT VulkanImplementation {
virtual bool GetSemaphoreFdKHR(VkDevice vk_device,
VkSemaphore vk_semaphore,
base::ScopedFD* sync_fd) = 0;
+
+ // Create a VkImage, import Android AHardwareBuffer object created outside of
+ // the Vulkan device into Vulkan memory object and bind it to the VkImage.
+ virtual bool CreateVkImageAndImportAHB(
+ const VkDevice& vk_device,
+ const VkPhysicalDevice& vk_physical_device,
+ const gfx::Size& size,
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) = 0;
#endif
private:
diff --git a/chromium/gpu/vulkan/vulkan_instance.cc b/chromium/gpu/vulkan/vulkan_instance.cc
index 9c1bc7b0adb..c733e65604e 100644
--- a/chromium/gpu/vulkan/vulkan_instance.cc
+++ b/chromium/gpu/vulkan/vulkan_instance.cc
@@ -61,7 +61,7 @@ bool VulkanInstance::Initialize(
VkApplicationInfo app_info = {};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
app_info.pApplicationName = "Chromium";
- app_info.apiVersion = VK_MAKE_VERSION(1, 0, 2);
+ app_info.apiVersion = VK_MAKE_VERSION(1, 1, 0);
std::vector<const char*> enabled_extensions;
enabled_extensions.insert(std::end(enabled_extensions),
@@ -228,21 +228,25 @@ void VulkanInstance::Destroy() {
vkGetInstanceProcAddr(vk_instance_,
"vkDestroyDebugReportCallbackEXT"));
DCHECK(vkDestroyDebugReportCallbackEXT);
- if (error_callback_ != VK_NULL_HANDLE)
+ if (error_callback_ != VK_NULL_HANDLE) {
vkDestroyDebugReportCallbackEXT(vk_instance_, error_callback_, nullptr);
- if (warning_callback_ != VK_NULL_HANDLE)
+ error_callback_ = VK_NULL_HANDLE;
+ }
+ if (warning_callback_ != VK_NULL_HANDLE) {
vkDestroyDebugReportCallbackEXT(vk_instance_, warning_callback_, nullptr);
+ warning_callback_ = VK_NULL_HANDLE;
+ }
}
#endif
if (vk_instance_ != VK_NULL_HANDLE) {
vkDestroyInstance(vk_instance_, nullptr);
+ vk_instance_ = VK_NULL_HANDLE;
}
VulkanFunctionPointers* vulkan_function_pointers =
gpu::GetVulkanFunctionPointers();
if (vulkan_function_pointers->vulkan_loader_library_)
base::UnloadNativeLibrary(vulkan_function_pointers->vulkan_loader_library_);
vulkan_function_pointers->vulkan_loader_library_ = nullptr;
- vk_instance_ = VK_NULL_HANDLE;
}
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_instance.h b/chromium/gpu/vulkan/vulkan_instance.h
index 31a2a477001..19ba467f761 100644
--- a/chromium/gpu/vulkan/vulkan_instance.h
+++ b/chromium/gpu/vulkan/vulkan_instance.h
@@ -29,8 +29,6 @@ class VULKAN_EXPORT VulkanInstance {
bool Initialize(const std::vector<const char*>& required_extensions,
const std::vector<const char*>& required_layers);
- void Destroy();
-
const gfx::ExtensionSet& enabled_extensions() const {
return enabled_extensions_;
}
@@ -38,6 +36,8 @@ class VULKAN_EXPORT VulkanInstance {
VkInstance vk_instance() { return vk_instance_; }
private:
+ void Destroy();
+
VkInstance vk_instance_ = VK_NULL_HANDLE;
gfx::ExtensionSet enabled_extensions_;
bool debug_report_enabled_ = false;
diff --git a/chromium/gpu/vulkan/vulkan_surface.cc b/chromium/gpu/vulkan/vulkan_surface.cc
index 97b4999c68e..d8d42742575 100644
--- a/chromium/gpu/vulkan/vulkan_surface.cc
+++ b/chromium/gpu/vulkan/vulkan_surface.cc
@@ -30,12 +30,8 @@ VulkanSurface::~VulkanSurface() {
DCHECK_EQ(static_cast<VkSurfaceKHR>(VK_NULL_HANDLE), surface_);
}
-VulkanSurface::VulkanSurface(VkInstance vk_instance,
- VkSurfaceKHR surface,
- base::OnceClosure destruction_callback)
- : vk_instance_(vk_instance),
- surface_(surface),
- destruction_callback_(std::move(destruction_callback)) {
+VulkanSurface::VulkanSurface(VkInstance vk_instance, VkSurfaceKHR surface)
+ : vk_instance_(vk_instance), surface_(surface) {
DCHECK_NE(static_cast<VkSurfaceKHR>(VK_NULL_HANDLE), surface_);
}
@@ -118,7 +114,6 @@ void VulkanSurface::Destroy() {
swap_chain_->Destroy();
vkDestroySurfaceKHR(vk_instance_, surface_, nullptr);
surface_ = VK_NULL_HANDLE;
- std::move(destruction_callback_).Run();
}
gfx::SwapResult VulkanSurface::SwapBuffers() {
diff --git a/chromium/gpu/vulkan/vulkan_surface.h b/chromium/gpu/vulkan/vulkan_surface.h
index 860dc63cad6..89ff83aa865 100644
--- a/chromium/gpu/vulkan/vulkan_surface.h
+++ b/chromium/gpu/vulkan/vulkan_surface.h
@@ -30,9 +30,7 @@ class VULKAN_EXPORT VulkanSurface {
DEFAULT_SURFACE_FORMAT = FORMAT_RGBA_32
};
- VulkanSurface(VkInstance vk_instance,
- VkSurfaceKHR surface,
- base::OnceClosure destruction_callback);
+ VulkanSurface(VkInstance vk_instance, VkSurfaceKHR surface);
~VulkanSurface();
@@ -58,9 +56,6 @@ class VULKAN_EXPORT VulkanSurface {
VulkanDeviceQueue* device_queue_ = nullptr;
std::unique_ptr<VulkanSwapChain> swap_chain_;
- // Called after destruction to clean up platform state, if any.
- base::OnceClosure destruction_callback_;
-
DISALLOW_COPY_AND_ASSIGN(VulkanSurface);
};
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.cc b/chromium/gpu/vulkan/vulkan_swap_chain.cc
index cd21927a021..84c9d388159 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.cc
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.cc
@@ -210,7 +210,7 @@ bool VulkanSwapChain::InitializeSwapChain(
swap_chain_create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
swap_chain_create_info.surface = surface;
swap_chain_create_info.minImageCount =
- std::max(2u, surface_caps.minImageCount);
+ std::max(3u, surface_caps.minImageCount);
swap_chain_create_info.imageFormat = surface_format.format;
swap_chain_create_info.imageColorSpace = surface_format.colorSpace;
swap_chain_create_info.imageExtent = surface_caps.currentExtent;
diff --git a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
index 6d874755aa6..2530aafd5d3 100644
--- a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
+++ b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
@@ -30,10 +30,8 @@ bool VulkanImplementationWin32::InitializeVulkanInstance() {
if (!vulkan_function_pointers->vulkan_loader_library_)
return false;
- if (!vulkan_instance_.Initialize(required_extensions, {})) {
- vulkan_instance_.Destroy();
+ if (!vulkan_instance_.Initialize(required_extensions, {}))
return false;
- }
// Initialize platform function pointers
vkGetPhysicalDeviceWin32PresentationSupportKHR_ =
@@ -43,7 +41,6 @@ bool VulkanImplementationWin32::InitializeVulkanInstance() {
"vkGetPhysicalDeviceWin32PresentationSupportKHR"));
if (!vkGetPhysicalDeviceWin32PresentationSupportKHR_) {
LOG(ERROR) << "vkGetPhysicalDeviceWin32PresentationSupportKHR not found";
- vulkan_instance_.Destroy();
return false;
}
@@ -52,7 +49,6 @@ bool VulkanImplementationWin32::InitializeVulkanInstance() {
vulkan_instance_.vk_instance(), "vkCreateWin32SurfaceKHR"));
if (!vkCreateWin32SurfaceKHR_) {
LOG(ERROR) << "vkCreateWin32SurfaceKHR not found";
- vulkan_instance_.Destroy();
return false;
}
@@ -78,8 +74,7 @@ std::unique_ptr<VulkanSurface> VulkanImplementationWin32::CreateViewSurface(
return nullptr;
}
- return std::make_unique<VulkanSurface>(GetVulkanInstance(), surface,
- base::DoNothing());
+ return std::make_unique<VulkanSurface>(GetVulkanInstance(), surface);
}
bool VulkanImplementationWin32::GetPhysicalDevicePresentationSupport(
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
index 3f29fa81e9b..1f390e7c99e 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
@@ -36,10 +36,8 @@ bool VulkanImplementationX11::InitializeVulkanInstance() {
if (!vulkan_function_pointers->vulkan_loader_library_)
return false;
- if (!vulkan_instance_.Initialize(required_extensions, {})) {
- vulkan_instance_.Destroy();
+ if (!vulkan_instance_.Initialize(required_extensions, {}))
return false;
- }
// Initialize platform function pointers
vkGetPhysicalDeviceXlibPresentationSupportKHR_ =
@@ -49,7 +47,6 @@ bool VulkanImplementationX11::InitializeVulkanInstance() {
"vkGetPhysicalDeviceXlibPresentationSupportKHR"));
if (!vkGetPhysicalDeviceXlibPresentationSupportKHR_) {
LOG(ERROR) << "vkGetPhysicalDeviceXlibPresentationSupportKHR not found";
- vulkan_instance_.Destroy();
return false;
}
@@ -58,7 +55,6 @@ bool VulkanImplementationX11::InitializeVulkanInstance() {
vulkan_instance_.vk_instance(), "vkCreateXlibSurfaceKHR"));
if (!vkCreateXlibSurfaceKHR_) {
LOG(ERROR) << "vkCreateXlibSurfaceKHR not found";
- vulkan_instance_.Destroy();
return false;
}
@@ -83,8 +79,7 @@ std::unique_ptr<VulkanSurface> VulkanImplementationX11::CreateViewSurface(
return nullptr;
}
- return std::make_unique<VulkanSurface>(GetVulkanInstance(), surface,
- base::DoNothing());
+ return std::make_unique<VulkanSurface>(GetVulkanInstance(), surface);
}
bool VulkanImplementationX11::GetPhysicalDevicePresentationSupport(