summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-16 11:45:35 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-17 08:59:23 +0000
commit552906b0f222c5d5dd11b9fd73829d510980461a (patch)
tree3a11e6ed0538a81dd83b20cf3a4783e297f26d91 /chromium/gpu
parent1b05827804eaf047779b597718c03e7d38344261 (diff)
downloadqtwebengine-chromium-552906b0f222c5d5dd11b9fd73829d510980461a.tar.gz
BASELINE: Update Chromium to 83.0.4103.122
Change-Id: Ie3a82f5bb0076eec2a7c6a6162326b4301ee291e Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn97
-rw-r--r--chromium/gpu/GLES2/extensions/ANGLE/EGL_ANGLE_sync_control_rate.txt110
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt10
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_path_rendering.txt1404
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_resize.txt30
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt4
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/EGL_CHROMIUM_sync_control.txt8
-rw-r--r--chromium/gpu/GLES2/gl2chromium_autogen.h37
-rw-r--r--chromium/gpu/GLES2/gl2extchromium.h373
-rw-r--r--chromium/gpu/OWNERS9
-rw-r--r--chromium/gpu/angle_end2end_tests_main.cc2
-rw-r--r--chromium/gpu/command_buffer/OWNERS7
-rw-r--r--chromium/gpu/command_buffer/build_cmd_buffer_lib.py55
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py258
-rwxr-xr-xchromium/gpu/command_buffer/build_raster_cmd_buffer.py4
-rwxr-xr-xchromium/gpu/command_buffer/build_webgpu_cmd_buffer.py20
-rw-r--r--chromium/gpu/command_buffer/client/BUILD.gn50
-rw-r--r--chromium/gpu/command_buffer/client/gl_helper.cc861
-rw-r--r--chromium/gpu/command_buffer/client/gl_helper.h463
-rw-r--r--chromium/gpu/command_buffer/client/gl_helper_scaling.cc1335
-rw-r--r--chromium/gpu/command_buffer/client/gl_helper_scaling.h197
-rw-r--r--chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h284
-rw-r--r--chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h307
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc580
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h13
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_autogen.h120
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h156
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h190
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface.h2
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_autogen.h99
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h95
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h116
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h95
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h238
-rw-r--r--chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h5
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.cc61
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.h38
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.cc228
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.h61
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/client/raster_interface.h59
-rw-r--r--chromium/gpu/command_buffer/client/shared_image_interface.cc13
-rw-r--r--chromium/gpu/command_buffer/client/shared_image_interface.h35
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_cmd_helper.h2
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h34
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.cc475
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.h86
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h6
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h27
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h26
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface.h13
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_autogen.h5
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub.cc12
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub.h11
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h6
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h5
-rw-r--r--chromium/gpu/command_buffer/common/BUILD.gn42
-rw-r--r--chromium/gpu/command_buffer/common/capabilities.h13
-rw-r--r--chromium/gpu/command_buffer/common/context_creation_attribs.h1
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.cc17
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.h20
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format.h5
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h1521
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h459
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h77
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc108
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.h9
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h150
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc24
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h6
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h22
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h5
-rw-r--r--chromium/gpu/command_buffer/common/shared_image_usage.h4
-rw-r--r--chromium/gpu/command_buffer/common/skia_utils.cc57
-rw-r--r--chromium/gpu/command_buffer/common/skia_utils.h8
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_enums.h1
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_format.h60
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h187
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h52
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_ids.h3
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h3
-rw-r--r--chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt31
-rw-r--r--chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt2
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn53
-rw-r--r--chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc25
-rw-r--r--chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc7
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_direct.cc11
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_direct.h2
-rw-r--r--chromium/gpu/command_buffer/service/common_decoder.cc28
-rw-r--r--chromium/gpu/command_buffer/service/common_decoder.h11
-rw-r--r--chromium/gpu/command_buffer/service/context_group.cc58
-rw-r--r--chromium/gpu/command_buffer/service/context_group.h11
-rw-r--r--chromium/gpu/command_buffer/service/context_group_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/service/context_state_autogen.h5
-rw-r--r--chromium/gpu/command_buffer/service/context_state_impl_autogen.h127
-rw-r--r--chromium/gpu/command_buffer/service/context_state_test_helpers_autogen.h15
-rw-r--r--chromium/gpu/command_buffer/service/decoder_context.h9
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.cc536
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.h63
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc33
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.h9
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory.cc63
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory.h6
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc36
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc2
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc164
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h9
-rw-r--r--chromium/gpu/command_buffer/service/feature_info_unittest.cc70
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc7
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gl_surface_mock.h2
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.cc267
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.h8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc149
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc1420
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h113
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc289
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h77
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h4
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc350
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc610
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc113
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc226
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc3
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc12
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc1613
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h55
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc135
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h66
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h126
-rw-r--r--chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h2
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.cc17
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.h8
-rw-r--r--chromium/gpu/command_buffer/service/gpu_tracer.cc49
-rw-r--r--chromium/gpu/command_buffer/service/gpu_tracer.h15
-rw-r--r--chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gr_shader_cache.cc2
-rw-r--r--chromium/gpu/command_buffer/service/image_factory.cc1
-rw-r--r--chromium/gpu/command_buffer/service/image_factory.h1
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner.cc21
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc12
-rw-r--r--chromium/gpu/command_buffer/service/mailbox_manager_dummy.cc27
-rw-r--r--chromium/gpu/command_buffer/service/mailbox_manager_dummy.h38
-rw-r--r--chromium/gpu/command_buffer/service/mailbox_manager_factory.cc14
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.cc34
-rw-r--r--chromium/gpu/command_buffer/service/memory_tracking.h19
-rw-r--r--chromium/gpu/command_buffer/service/mocks.h2
-rw-r--r--chromium/gpu/command_buffer/service/passthrough_discardable_manager.cc8
-rw-r--r--chromium/gpu/command_buffer/service/passthrough_discardable_manager.h3
-rw-r--r--chromium/gpu/command_buffer/service/path_manager.cc251
-rw-r--r--chromium/gpu/command_buffer/service/path_manager.h68
-rw-r--r--chromium/gpu/command_buffer/service/path_manager_unittest.cc165
-rw-r--r--chromium/gpu/command_buffer/service/program_cache.cc14
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.cc195
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.h32
-rw-r--r--chromium/gpu/command_buffer/service/program_manager_unittest.cc149
-rw-r--r--chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h2
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc270
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.h3
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_autogen.h6
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest.cc82
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc2
-rw-r--r--chromium/gpu/command_buffer/service/service_discardable_manager.cc15
-rw-r--r--chromium/gpu/command_buffer/service/service_discardable_manager.h3
-rw-r--r--chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc3
-rw-r--r--chromium/gpu/command_buffer/service/service_font_manager.cc6
-rw-r--r--chromium/gpu/command_buffer/service/service_transfer_cache.cc50
-rw-r--r--chromium/gpu/command_buffer/service/service_transfer_cache.h6
-rw-r--r--chromium/gpu/command_buffer/service/service_transfer_cache_unittest.cc7
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.cc96
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.h13
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.cc192
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.h40
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing.cc55
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing.h82
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_d3d.cc222
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_d3d.h113
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc331
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_egl_image.h113
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory.h1
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc394
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h2
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc296
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc496
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h6
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc402
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc501
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h36
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc642
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h1
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm145
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc490
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.cc74
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.h66
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc84
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_ozone.h28
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_batch_access_manager.cc80
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_batch_access_manager.h48
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.cc98
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.h10
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc18
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.cc58
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.h42
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc67
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.cc202
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.h294
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_d3d.cc136
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_d3d.h69
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc110
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.h54
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc111
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.h54
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc109
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h27
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_unittest.cc248
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_test_utils.cc64
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_test_utils.h28
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_video.cc186
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_video.h5
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.cc43
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.h6
-rw-r--r--chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h7
-rw-r--r--chromium/gpu/command_buffer/service/surface_texture_gl_owner_unittest.cc3
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/test_helper.cc44
-rw-r--r--chromium/gpu/command_buffer/service/test_shared_image_backing.cc243
-rw-r--r--chromium/gpu/command_buffer/service/test_shared_image_backing.h78
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc213
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h17
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager_unittest.cc28
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc533
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc78
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.cc81
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.h1
-rw-r--r--chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt11
-rw-r--r--chromium/gpu/config/BUILD.gn33
-rw-r--r--chromium/gpu/config/PRESUBMIT.py36
-rw-r--r--chromium/gpu/config/device_perf_info.cc32
-rw-r--r--chromium/gpu/config/device_perf_info.h66
-rw-r--r--chromium/gpu/config/gpu_control_list.cc104
-rw-r--r--chromium/gpu/config/gpu_control_list.h16
-rw-r--r--chromium/gpu/config/gpu_control_list_entry_unittest.cc59
-rw-r--r--chromium/gpu/config/gpu_control_list_testing.json65
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h111
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_autogen.cc412
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h3
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h67
-rw-r--r--chromium/gpu/config/gpu_crash_keys.cc1
-rw-r--r--chromium/gpu/config/gpu_crash_keys.h2
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.json187
-rw-r--r--chromium/gpu/config/gpu_extra_info.h12
-rw-r--r--chromium/gpu/config/gpu_finch_features.cc78
-rw-r--r--chromium/gpu/config/gpu_finch_features.h14
-rw-r--r--chromium/gpu/config/gpu_info.cc66
-rw-r--r--chromium/gpu/config/gpu_info.h87
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc49
-rw-r--r--chromium/gpu/config/gpu_info_collector.h20
-rw-r--r--chromium/gpu/config/gpu_info_collector_linux.cc9
-rw-r--r--chromium/gpu/config/gpu_info_collector_mac.mm5
-rw-r--r--chromium/gpu/config/gpu_info_collector_win.cc156
-rw-r--r--chromium/gpu/config/gpu_info_unittest.cc6
-rw-r--r--chromium/gpu/config/gpu_lists_version.h2
-rw-r--r--chromium/gpu/config/gpu_mode.h13
-rw-r--r--chromium/gpu/config/gpu_preferences.h26
-rw-r--r--chromium/gpu/config/gpu_preferences_unittest.cc17
-rw-r--r--chromium/gpu/config/gpu_switches.cc5
-rw-r--r--chromium/gpu/config/gpu_test_config.cc5
-rw-r--r--chromium/gpu/config/gpu_util.cc272
-rw-r--r--chromium/gpu/config/gpu_util.h21
-rw-r--r--chromium/gpu/config/gpu_workaround_list.txt26
-rwxr-xr-xchromium/gpu/config/process_json.py114
-rw-r--r--chromium/gpu/config/skia_limits.cc58
-rw-r--r--chromium/gpu/config/skia_limits.h25
-rw-r--r--chromium/gpu/config/software_rendering_list.json86
-rw-r--r--chromium/gpu/config/vulkan_info.h2
-rw-r--r--chromium/gpu/gles2_conform_support/BUILD.gn16
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.cc2
-rw-r--r--chromium/gpu/ipc/BUILD.gn8
-rw-r--r--chromium/gpu/ipc/client/BUILD.gn14
-rw-r--r--chromium/gpu/ipc/client/DEPS2
-rw-r--r--chromium/gpu/ipc/client/OWNERS1
-rw-r--r--chromium/gpu/ipc/client/client_shared_image_interface.cc137
-rw-r--r--chromium/gpu/ipc/client/client_shared_image_interface.h77
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc15
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h27
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.cc24
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.h6
-rw-r--r--chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc3
-rw-r--r--chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc124
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.cc44
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.h52
-rw-r--r--chromium/gpu/ipc/command_buffer_task_executor.cc16
-rw-r--r--chromium/gpu/ipc/command_buffer_task_executor.h29
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn74
-rw-r--r--chromium/gpu/ipc/common/device_perf_info.mojom40
-rw-r--r--chromium/gpu/ipc/common/device_perf_info.typemap13
-rw-r--r--chromium/gpu/ipc/common/device_perf_info_mojom_traits.cc131
-rw-r--r--chromium/gpu/ipc/common/device_perf_info_mojom_traits.h67
-rwxr-xr-xchromium/gpu/ipc/common/generate_vulkan_types.py24
-rw-r--r--chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h11
-rw-r--r--chromium/gpu/ipc/common/gpu_extra_info.mojom9
-rw-r--r--chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.cc12
-rw-r--r--chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.h15
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom24
-rw-r--r--chromium/gpu/ipc/common/gpu_info.typemap5
-rw-r--r--chromium/gpu/ipc/common/gpu_info_mojom_traits.cc28
-rw-r--r--chromium/gpu/ipc/common/gpu_info_mojom_traits.h64
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc21
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h2
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc4
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h36
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_support.cc26
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_support.h6
-rw-r--r--chromium/gpu/ipc/common/gpu_peak_memory.h21
-rw-r--r--chromium/gpu/ipc/common/gpu_peak_memory.mojom16
-rw-r--r--chromium/gpu/ipc/common/gpu_peak_memory.typemap9
-rw-r--r--chromium/gpu/ipc/common/gpu_peak_memory_mojom_traits.h61
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.mojom9
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h35
-rw-r--r--chromium/gpu/ipc/common/gpu_watchdog_timeout.h9
-rw-r--r--chromium/gpu/ipc/common/mojom_traits_unittest.cc28
-rw-r--r--chromium/gpu/ipc/common/typemaps.gni4
-rw-r--r--chromium/gpu/ipc/common/vulkan_info.mojom2
-rw-r--r--chromium/gpu/ipc/common/vulkan_info_mojom_traits.h6
-rw-r--r--chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc2
-rw-r--r--chromium/gpu/ipc/gl_in_process_context.cc10
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.cc24
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.h24
-rw-r--r--chromium/gpu/ipc/gpu_task_scheduler_helper.cc70
-rw-r--r--chromium/gpu/ipc/gpu_task_scheduler_helper.h100
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.cc10
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.h7
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc384
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h71
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.cc56
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.h14
-rw-r--r--chromium/gpu/ipc/raster_in_process_context.cc4
-rw-r--r--chromium/gpu/ipc/scheduler_sequence.h6
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn9
-rw-r--r--chromium/gpu/ipc/service/DEPS6
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.cc4
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.cc4
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.h2
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc289
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h65
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_delegate.h10
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc139
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.cc7
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.h4
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc117
-rw-r--r--chromium/gpu/ipc/service/gpu_init.h7
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc10
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h1
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc67
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h1
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h5
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc94
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.h43
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc51
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc350
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h95
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.cc13
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc351
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_win.cc11
-rw-r--r--chromium/gpu/ipc/service/raster_command_buffer_stub.cc9
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.cc12
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.h2
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.cc15
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.h2
-rw-r--r--chromium/gpu/ipc/shared_image_interface_in_process.cc427
-rw-r--r--chromium/gpu/ipc/shared_image_interface_in_process.h223
-rw-r--r--chromium/gpu/ipc/webgpu_in_process_context.cc4
-rw-r--r--chromium/gpu/ipc/webgpu_in_process_context.h3
-rw-r--r--chromium/gpu/khronos_glcts_support/BUILD.gn52
-rw-r--r--chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc52
-rw-r--r--chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.cc15
-rw-r--r--chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.h5
-rw-r--r--chromium/gpu/vulkan/BUILD.gn82
-rw-r--r--chromium/gpu/vulkan/android/BUILD.gn1
-rw-r--r--chromium/gpu/vulkan/android/vulkan_android_unittests.cc22
-rw-r--r--chromium/gpu/vulkan/android/vulkan_implementation_android.cc295
-rw-r--r--chromium/gpu/vulkan/android/vulkan_implementation_android.h21
-rw-r--r--chromium/gpu/vulkan/demo/BUILD.gn4
-rw-r--r--chromium/gpu/vulkan/demo/vulkan_demo.cc2
-rw-r--r--chromium/gpu/vulkan/features.gni7
-rwxr-xr-xchromium/gpu/vulkan/generate_bindings.py83
-rw-r--r--chromium/gpu/vulkan/init/vulkan_factory.cc4
-rw-r--r--chromium/gpu/vulkan/vulkan_command_buffer.cc13
-rw-r--r--chromium/gpu/vulkan/vulkan_command_buffer.h9
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.cc61
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.h1
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper.cc6
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc3
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.cc310
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.h326
-rw-r--r--chromium/gpu/vulkan/vulkan_image.cc367
-rw-r--r--chromium/gpu/vulkan/vulkan_image.h146
-rw-r--r--chromium/gpu/vulkan/vulkan_image_android.cc141
-rw-r--r--chromium/gpu/vulkan/vulkan_image_fuchsia.cc21
-rw-r--r--chromium/gpu/vulkan/vulkan_image_linux.cc73
-rw-r--r--chromium/gpu/vulkan/vulkan_image_win.cc21
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.cc4
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.h27
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.cc105
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_surface.cc44
-rw-r--r--chromium/gpu/vulkan/vulkan_surface.h5
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.cc145
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.h21
-rw-r--r--chromium/gpu/vulkan/vulkan_util.cc4
-rw-r--r--chromium/gpu/vulkan/win32/BUILD.gn4
-rw-r--r--chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc65
-rw-r--r--chromium/gpu/vulkan/win32/vulkan_implementation_win32.h19
-rw-r--r--chromium/gpu/vulkan/x/BUILD.gn10
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.cc63
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.h11
-rw-r--r--chromium/gpu/vulkan/x/vulkan_surface_x11.cc44
429 files changed, 20239 insertions, 18303 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index 8d9d5eb6be5..2cc0c63f37f 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -70,15 +70,11 @@ component("gles2") {
}
component("raster") {
- public_deps = [
- "//gpu/command_buffer/client:raster_sources",
- ]
+ public_deps = [ "//gpu/command_buffer/client:raster_sources" ]
}
component("webgpu") {
- public_deps = [
- "//gpu/command_buffer/client:webgpu_sources",
- ]
+ public_deps = [ "//gpu/command_buffer/client:webgpu_sources" ]
}
if (!use_static_angle) {
@@ -185,6 +181,8 @@ jumbo_static_library("test_support") {
"command_buffer/service/mocks.h",
"command_buffer/service/test_helper.cc",
"command_buffer/service/test_helper.h",
+ "command_buffer/service/test_shared_image_backing.cc",
+ "command_buffer/service/test_shared_image_backing.h",
"ipc/raster_in_process_context.cc",
"ipc/raster_in_process_context.h",
"ipc/service/gpu_memory_buffer_factory_test_template.h",
@@ -219,14 +217,10 @@ jumbo_static_library("test_support") {
if (!is_android && !is_fuchsia && !is_chromeos) {
proto_library("gl_lpm_fuzzer_proto") {
- sources = [
- "command_buffer/tests/lpm/gl_lpm_fuzzer.proto",
- ]
+ sources = [ "command_buffer/tests/lpm/gl_lpm_fuzzer.proto" ]
use_protobuf_full = true
- deps = [
- "//third_party/protobuf:protobuf_full",
- ]
+ deps = [ "//third_party/protobuf:protobuf_full" ]
}
static_library("gl_lpm_shader_to_string") {
@@ -242,9 +236,7 @@ if (!is_android && !is_fuchsia && !is_chromeos) {
}
test("gl_lpm_shader_to_string_unittest") {
- sources = [
- "command_buffer/tests/lpm/gl_lpm_shader_to_string_unittest.cc",
- ]
+ sources = [ "command_buffer/tests/lpm/gl_lpm_shader_to_string_unittest.cc" ]
deps = [
":gl_lpm_shader_to_string",
@@ -293,13 +285,15 @@ test("gl_tests") {
"command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc",
"command_buffer/service/shared_image_factory_unittest.cc",
"command_buffer/service/shared_image_manager_unittest.cc",
+ "command_buffer/service/shared_image_representation_unittest.cc",
+ "command_buffer/service/shared_image_test_utils.cc",
+ "command_buffer/service/shared_image_test_utils.h",
"command_buffer/tests/compressed_texture_test.cc",
"command_buffer/tests/es3_misc_functions_unittest.cc",
"command_buffer/tests/gl_bgra_mipmap_unittest.cc",
"command_buffer/tests/gl_bind_uniform_location_unittest.cc",
"command_buffer/tests/gl_chromium_framebuffer_mixed_samples_unittest.cc",
"command_buffer/tests/gl_chromium_framebuffer_multisample_unittest.cc",
- "command_buffer/tests/gl_chromium_path_rendering_unittest.cc",
"command_buffer/tests/gl_clear_framebuffer_unittest.cc",
"command_buffer/tests/gl_copy_tex_image_2d_workaround_unittest.cc",
"command_buffer/tests/gl_copy_texture_CHROMIUM_unittest.cc",
@@ -313,6 +307,7 @@ test("gl_tests") {
"command_buffer/tests/gl_ext_srgb_unittest.cc",
"command_buffer/tests/gl_ext_window_rectangles_unittest.cc",
"command_buffer/tests/gl_gpu_memory_buffer_unittest.cc",
+ "command_buffer/tests/gl_helper_unittest.cc",
"command_buffer/tests/gl_iosurface_readback_workaround_unittest.cc",
"command_buffer/tests/gl_lose_context_chromium_unittest.cc",
"command_buffer/tests/gl_manager.cc",
@@ -424,6 +419,8 @@ test("gl_tests") {
"//third_party/dawn/src/dawn:libdawn_proc",
]
}
+
+ data_deps = [ "//testing/buildbot/filters:gl_tests_filters" ]
}
test("gpu_unittests") {
@@ -515,7 +512,6 @@ test("gpu_unittests") {
"command_buffer/service/memory_program_cache_unittest.cc",
"command_buffer/service/multi_draw_manager_unittest.cc",
"command_buffer/service/passthrough_program_cache_unittest.cc",
- "command_buffer/service/path_manager_unittest.cc",
"command_buffer/service/program_cache_unittest.cc",
"command_buffer/service/program_manager_unittest.cc",
"command_buffer/service/query_manager_unittest.cc",
@@ -743,9 +739,7 @@ test("command_buffer_perftests") {
}
fuzzer_test("gpu_fuzzer") {
- sources = [
- "command_buffer/tests/fuzzer_main.cc",
- ]
+ sources = [ "command_buffer/tests/fuzzer_main.cc" ]
deps = [
":gles2",
@@ -763,9 +757,7 @@ fuzzer_test("gpu_fuzzer") {
if (is_linux) {
fuzzer_test("gpu_angle_fuzzer") {
- sources = [
- "command_buffer/tests/fuzzer_main.cc",
- ]
+ sources = [ "command_buffer/tests/fuzzer_main.cc" ]
defines = [ "GPU_FUZZER_USE_ANGLE" ]
@@ -783,9 +775,7 @@ if (is_linux) {
}
fuzzer_test("gpu_angle_passthrough_fuzzer") {
- sources = [
- "command_buffer/tests/fuzzer_main.cc",
- ]
+ sources = [ "command_buffer/tests/fuzzer_main.cc" ]
defines = [
"GPU_FUZZER_USE_ANGLE",
@@ -806,9 +796,7 @@ if (is_linux) {
}
fuzzer_test("gpu_swiftshader_fuzzer") {
- sources = [
- "command_buffer/tests/fuzzer_main.cc",
- ]
+ sources = [ "command_buffer/tests/fuzzer_main.cc" ]
defines = [ "GPU_FUZZER_USE_SWIFTSHADER" ]
@@ -826,9 +814,7 @@ if (is_linux) {
}
fuzzer_test("gpu_raster_fuzzer") {
- sources = [
- "command_buffer/tests/fuzzer_main.cc",
- ]
+ sources = [ "command_buffer/tests/fuzzer_main.cc" ]
defines = [ "GPU_FUZZER_USE_RASTER_DECODER" ]
@@ -846,9 +832,7 @@ if (is_linux) {
}
fuzzer_test("gpu_raster_passthrough_fuzzer") {
- sources = [
- "command_buffer/tests/fuzzer_main.cc",
- ]
+ sources = [ "command_buffer/tests/fuzzer_main.cc" ]
defines = [
"GPU_FUZZER_USE_ANGLE",
@@ -870,9 +854,7 @@ if (is_linux) {
}
fuzzer_test("gpu_raster_swiftshader_fuzzer") {
- sources = [
- "command_buffer/tests/fuzzer_main.cc",
- ]
+ sources = [ "command_buffer/tests/fuzzer_main.cc" ]
defines = [
"GPU_FUZZER_USE_RASTER_DECODER",
@@ -893,9 +875,7 @@ if (is_linux) {
}
fuzzer_test("gpu_raster_angle_fuzzer") {
- sources = [
- "command_buffer/tests/fuzzer_main.cc",
- ]
+ sources = [ "command_buffer/tests/fuzzer_main.cc" ]
defines = [
"GPU_FUZZER_USE_RASTER_DECODER",
@@ -915,3 +895,38 @@ if (is_linux) {
libfuzzer_options = [ "max_len=16384" ]
}
}
+
+# Microbenchmark to measure performance of GLHelper code, for use in
+# debugging, profiling, and optimizing.
+test("gpu_benchmark") {
+ sources = [ "command_buffer/tests/gl_helper_benchmark.cc" ]
+
+ configs += [
+ "//build/config/compiler:no_size_t_to_int_warning",
+ "//build/config:precompiled_headers",
+ "//third_party/khronos:khronos_headers",
+ ]
+
+ if (!is_debug && (is_win || is_android)) {
+ configs -= [ "//build/config/compiler:default_optimization" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
+ deps = [
+ "//base",
+ "//base/test:test_support",
+ "//components/test:run_all_unittests",
+ "//components/viz/common",
+ "//components/viz/test:test_support",
+ "//gpu/command_buffer/client",
+ "//gpu/command_buffer/client:gles2_implementation",
+ "//gpu/ipc:gl_in_process_context",
+ "//gpu/ipc:gpu_thread_holder",
+ "//skia",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//ui/gfx",
+ ]
+
+ data_deps = [ "//third_party/mesa_headers" ]
+}
diff --git a/chromium/gpu/GLES2/extensions/ANGLE/EGL_ANGLE_sync_control_rate.txt b/chromium/gpu/GLES2/extensions/ANGLE/EGL_ANGLE_sync_control_rate.txt
new file mode 100644
index 00000000000..2056ae88882
--- /dev/null
+++ b/chromium/gpu/GLES2/extensions/ANGLE/EGL_ANGLE_sync_control_rate.txt
@@ -0,0 +1,110 @@
+Name
+
+ ANGLE_sync_control_rate
+
+Name Strings
+
+ EGL_ANGLE_sync_control_rate
+
+Contact
+
+ Jonah Ryan-Davis, Google (jonahr 'at' google.com)
+
+Status
+
+ Draft.
+
+Version
+
+ Version 1, 2020-03-24
+
+ Based on GLX_OML_sync_control Revision 6.0
+
+Number
+
+ ???
+
+Dependencies
+
+ The extension is written against the EGL 1.2 Specification, although it
+ should work on other versions of these specifications. This extension
+ also requires an operating system which supports CLOCK_MONOTONIC.
+
+Overview
+
+ This extension provides counters which let applications know about the
+ timing of the last vertical retrace. By looking at the system clock, as
+ well as the refresh rate of the monitor, this should enable applications
+ to predict the position of future retraces so as to schedule an optimal
+ workload.
+
+ This extension incorporates the use of three counters that provide
+ the necessary synchronization. The Unadjusted System Time (or UST)
+ is the 64-bit CLOCK_MONOTONIC clock; in particular this lets the
+ application schedule future vertical retraces by querying this clock.
+ The graphics Media Stream Counter (or graphics MSC) is a counter
+ that is unique to the graphics subsystem and increments for each
+ vertical retrace that occurs. The Swap Buffer Counter (SBC) is an
+ attribute of an EGLSurface and is incremented each time a swap
+ buffer action is performed on the associated surface.
+
+ The use of these three counters allows the application to
+ synchronize graphics rendering to vertical retraces and/or swap
+ buffer actions. For example, by querying the synchronization values for
+ a given surface, the application can accurately predict the timing for
+ the next vertical retraces and schedule rendering accordingly.
+
+Issues
+
+ None.
+
+IP Status
+
+ No known issues.
+
+New Procedures and Functions
+
+ Bool eglGetMscRateANGLE(EGLDisplay* dpy,
+ EGLSurface surface,
+ int32_t* numerator,
+ int32_t* denominator)
+
+New Tokens
+
+ None
+
+Additions to the EGL 1.3 Specification
+
+ The graphics MSC value is incremented once for each screen refresh.
+ For a non-interlaced display, this means that the graphics MSC value
+ is incremented for each frame. For an interlaced display, it means
+ that it will be incremented for each field. For a multi-monitor
+ system, the monitor used to determine MSC is the one where the surface
+ is located. If the surface spans multiple monitors, the monitor used
+ to determine MSC is the one with the biggest coverage in pixels.
+
+ eglGetMscRateANGLE returns the rate at which the MSC will be incremented
+ for the display associated with <hdc>. The rate is expressed in Hertz
+ as <numerator> / <denominator>. If the MSC rate in Hertz is an
+ integer, then <denominator> will be 1 and <numerator> will be
+ the MSC rate.
+
+ The function eglGetMscRateANGLE will return TRUE if the function
+ completed successfully, FALSE otherwise.
+
+ Each time eglSwapBuffer succeeds, the SBC will be increased within a
+ finite time period.
+
+Errors
+
+ The function eglGetMscRateANGLE will return FALSE if there is no
+ current EGLContext.
+
+New Implementation Dependent State
+
+ None
+
+Revision History
+
+ Version 1, 2020-03-24 (Jonah Ryan-Davis)
+ - Initial draft, based on GLX_OML_sync_control revision 6.0.
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt
index 8613f3cc06a..808208569b9 100644
--- a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt
@@ -136,6 +136,11 @@ Additions to the OpenGL ES 2.0 Specification
INVALID_VALUE is generated if <sourceLevel> of the source texture is not
defined.
+ INVALID_OPERATION is generated on ES 3.0 if <sourceId> refers to an
+ external texture (OES_EGL_image_external), <destId> refers to a texture
+ with an integer-type internal format, and the underlying context does not
+ support OES_EGL_image_external_essl3.
+
The command
CopySubTextureCHROMIUM
@@ -175,6 +180,11 @@ Additions to the OpenGL ES 2.0 Specification
INVALID_VALUE is generated if (<xoffset> + <width>) > destWidth,
or (<yoffset> + <height>) > destHeight.
+ INVALID_OPERATION is generated on ES 2.0 if <sourceId> refers to an
+ external texture (OES_EGL_image_external), <destId> refers to a texture
+ with an integer-type internal format, and the underlying context does not
+ support OES_EGL_image_external_essl3.
+
Table 1.0 Valid internal formats for CopyTextureCHROMIUM:
<internalFormat>
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_path_rendering.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_path_rendering.txt
deleted file mode 100644
index 74f76329726..00000000000
--- a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_path_rendering.txt
+++ /dev/null
@@ -1,1404 +0,0 @@
-Name
-
- CHROMIUM_path_rendering
-
-Name Strings
-
- GL_CHROMIUM_path_rendering
-
-Version
-
- Last Modifed Date: August 14, 2014
-
-Dependencies
-
- OpenGL ES 3.0 is required.
-
-Overview
-
- This extensions implements path rendering using
- OpenGL API.
-
-New Tokens
-
- Accepted by the <matrixMode> parameter of MatrixLoadfCHROMIUM and
- MatrixLoadIdentityCHROMIUM:
- PATH_MODELVIEW_CHROMIUM 0x1700
- PATH_PROJECTION_CHROMIUM 0x1701
-
- Accepted in elements of the <commands> array parameter of
- PathCommandsCHROMIUM:
- CLOSE_PATH_CHROMIUM 0x00
- MOVE_TO_CHROMIUM 0x02
- LINE_TO_CHROMIUM 0x04
- QUADRATIC_CURVE_TO_CHROMIUM 0x0A
- CUBIC_CURVE_TO_CHROMIUM 0x0C
- CONIC_CURVE_TO_CHROMIUM 0x1A
-
- Accepted by the <pname> parameter of GetIntegerv,
- GetFloatv:
- PATH_MODELVIEW_MATRIX_CHROMIUM 0x0BA6
- PATH_PROJECTION_MATRIX_CHROMIUM 0x0BA7
-
- Accepted by the <pname> parameter of PathParameter{if}CHROMIUM:
- PATH_STROKE_WIDTH_CHROMIUM 0x9075
- PATH_END_CAPS_CHROMIUM 0x9076
- PATH_JOIN_STYLE_CHROMIUM 0x9079
- PATH_MITER_LIMIT_CHROMIUM 0x907a
- PATH_STROKE_BOUND_CHROMIUM 0x9086
-
- Accepted by the <value> parameter of PathParameter{if}CHROMIUM:
- FLAT_CHROMIUM 0x1D00
- SQUARE_CHROMIUM 0x90a3
- ROUND_CHROMIUM 0x90a4
- BEVEL_CHROMIUM 0x90A6
- MITER_REVERT_CHROMIUM 0x90A7
-
- Accepted by the <fillMode> parameter of StencilFillPathCHROMIUM
- StencilFillPathInstancedCHROMIUM and
- StencilThenCoverFillPathInstancedCHROMIUM:
- COUNT_UP_CHROMIUM 0x9088
- COUNT_DOWN_CHROMIUM 0x9089
-
- Accepted by the <coverMode> parameter of CoverFillPathCHROMIUM,
- CoverStrokePath, StencilThenCoverFillPathCHROMIUM and
- StencilThenCoverStrokePathCHROMIUM:
- CONVEX_HULL_CHROMIUM 0x908B
- BOUNDING_BOX_CHROMIUM 0x908D
-
- Accepted by the <coverMode> parameter of CoverFillPathInstancedCHROMIUM,
- CoverStrokePathInstanced, StencilThenCoverFillPathInstancedCHROMIUM and
- StencilThenCoverStrokePathInstancedCHROMIUM:
- CONVEX_HULL_CHROMIUM see above
- BOUNDING_BOX_CHROMIUM see above
- BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM 0x909C
-
- Accepted by the <genMode> parameter of ProgramPathFragmentInputGen:
- EYE_LINEAR_CHROMIUM 0x2400
- OBJECT_LINEAR_CHROMIUM 0x2401
- CONSTANT_CHROMIUM 0x8576
-
- Accepted by the <transformType> parameter of
- StencilFillPathInstancedCHROMIUM, StencilStrokePathInstancedCHROMIUM,
- CoverFillPathInstancedCHROMIUM, CoverStrokePathInstancedCHROMIUM,
- StencilThenCoverFillPathInstancedCHROMIUM and
- StencilThenCoverStrokePathInstancedCHROMIUM:
- TRANSLATE_X_CHROMIUM 0x908E
- TRANSLATE_Y_CHROMIUM 0x908F
- TRANSLATE_2D_CHROMIUM 0x9090
- TRANSLATE_3D_CHROMIUM 0x9091
- AFFINE_2D_CHROMIUM 0x9092
- AFFINE_3D_CHROMIUM 0x9094
- TRANSPOSE_AFFINE_2D_CHROMIUM 0x9096
- TRANSPOSE_AFFINE_3D_CHROMIUM 0x9098
-
-New Procedures and Functions
-
- void MatrixLoadfCHROMIUM(enum matrixMode, float* matrix)
-
- Takes a pointer to a 4x4 matrix stored in column-major order as 16
- consecutive floating-point values. The matrixMode specifies which
- matrix, PATH_MODELVIEW_CHROMIUM or PATH_PROJECTION_CHROMIUM is used.
-
- The funcition specifies either modelview or projection matrix
- to be used with path rendering API calls.
-
- void MatrixLoadIdentityCHROMIUM(enum matrixMode)
-
- Effectively calls MatrixLoadf with the identity matrix.
-
- uint GenPathsCHROMIUM(sizei range)
-
- Returns an integer /n/ such that names /n/, ..., /n+range-1/ are
- previously unused (i.e. there are /range/ previously unused path object
- names starting at /n/). These names are marked as used, for the
- purposes of subsequent GenPathsCHROMIUM only, but they do not acquire
- path object state until each particular name is used to specify
- a path object.
-
- Returns 0 if no new path name was marked as used. Reasons for this
- include lack of free path names or range being 0 or a GL error
- was generated.
-
- INVALID_VALUE error is generated if range is negative.
-
- INVALID_OPERATION error is generated if range does not fit in
- 32-bit uint.
-
- void DeletePathsCHROMIUM(uint path, sizei range)
-
- Deletes a path object where /path/ contains /range/ names of path objects to
- be delete. After a path object is deleted, its name is again unused.
- Unused names in /paths/ are silently ignored.
-
- INVALID_VALUE error is generated if /range/ is negative.
-
- INVALID_OPERATION error is generated if /range/ does not
- fit in 32-bit uint.
-
- INVALID_OPERATION error is generated if /path/ + /range/ does not fit
- 32-bit uint.
-
- boolean IsPathCHROMIUM(uint path);
-
- The query returns TRUE if /path/ is the name of a path object. If path is
- not the name of a path object, or if an error condition occurs,
- IsPathCHROMIUM returns FALSE. A name retuned by GenPathsCHROMIUM, but
- without a path specified for it yet, is not the name of a path object.
-
- void PathCommandsCHROMIUM(uint path, sizei numCommands,
- const ubyte* commands, sizei numCoords,
- enum coordType, const GLvoid* coords)
-
- Specifies a path object commands for /path/ where /numCommands/
- indicates the number of path commands, read from the array
- /commands/, with which to initialize that path's command sequence.
- The type of the coordinates read from the /coords/ array is
- determined by the /coordType/ parameter which must be one of BYTE,
- UNSIGNED_BYTE, SHORT, UNSIGNED_SHORT, or FLOAT, otherwise the
- INVALID_ENUM error is generated. These path commands reference
- coordinates read sequentially from the /coords/ array.
-
- The /numCommands/ elements of the /commands/ array must be tokens
- in Table 5.pathCommands. The command sequence matches
- the element order of the /commands/ array. Each command references
- a number of coordinates specified by "Coordinate count" column of
- Table 5.pathCommands, starting with the first (zero) element of
- the /coords/ array and advancing by the coordinate count for each
- command. If any of these /numCommands/ command values are not
- listed in the "Token" column of Table
- 5.pathCommands, the INVALID_ENUM error is generated.
-
- The INVALID_OPERATION error is generated if /numCoords/ does not
- equal the number of coordinates referenced by the command sequence
- specified by /numCommands/ and /commands/ (so /numCoords/ provides a
- sanity check that the /coords/ array is being interpreted properly).
- The error INVALID_VALUE is generated if either /numCommands/ or
- /numCoords/ is negative.
-
- The error INVALID_OPERATION is generated if /path/ is
- not an existing path object.
-
- The error INVALID_OPERATION is generated if
- /numCommands/ + (size of /coordType/ data type) * /numCoords/
- does not fit in 32-bit uint.
-
- If the PathCommandsCHROMIUM command results in an error, the path object
- named /path/ is not changed; if there is no error, the prior contents
- of /path/, if /path/ was an existent path object, are lost and the
- path object name /path/ becomes used.
-
- void PathParameterfCHROMIUM(uint path, enum pname, float value)
- void PathParameteriCHROMIUM(uint path, enum pname, int value)
-
- The commands specify the value of path parameters for the specified path
- object named /path/. The error INVALID_OPERATION is generated if /path/ is
- not an existing path object.
-
- Each parameter has a single (scalar) value.
-
- /pname/ must be one of the tokens in the "Name" column of
- Table 5.pathParameters.
- The required values or range of each allowed parameter name token
- is listed in Table 5.pathParameter's "Required Values/Range" column.
-
- For values of /pname/ listed in Table 5.pathsParameters, the specified
- parameter is specified by /value/ when /value/ is a float or int,
- or if /value/ is a pointer to a float or int, accessed through that
- pointer. The error INVALID_VALUE is generated if the specified
- value is negative for parameters required to be non-negative in
- Table 5.pathParameters.
-
- The error INVALID_VALUE is generated if the specified parameter value
- is not within the require range for parameters typed float or integer.
- The error INVALID_ENUM is generated if the specified parameter value
- is not one of the listed tokens for parameters typed enum.
-
- void PathStencilFuncCHROMIUM(enum func, int ref, uint mask)
-
- Configures the stencil function, stencil reference value, and stencil read
- mask to be used by the StencilFillPathCHROMIUM and StencilStrokePathCHROMIUM
- commands described subsequently. The parameters accept the same values
- allowed by the StencilFunc command.
-
- void StencilFillPathCHROMIUM(uint path, enum fillMode, uint mask)
-
- The function transforms into window space the outline of the path object
- named /path/ based on the current modelview, projection and viewport,
- transforms (ignoring any vertex and/or geometry shader or program that might
- be active/enabled) and then updates the stencil values of all /accessible
- samples/ (explained below) in the framebuffer. Each sample's stencil buffer
- value is updated based on the winding number of that sample with respect to
- the transformed outline of the path object with any non-closed subpath
- forced closed and the specified /fillMode/.
-
- If /path/ does not name an existing path object, the command does
- nothing (and no error is generated).
-
- If the path's command sequence specifies unclosed subpaths (so not
- contours) due to MOVE_TO_CHROMIUM commands, such subpaths are trivially
- closed by connecting with a line segment the initial and terminal
- control points of each such path command subsequence.
-
- Transformation of a path's outline works by taking all positions on the
- path's outline in 2D path space (x,y) and constructing an object space
- position (x,y,0,1) that is then used similar to as with the (xo,yo,zo,wo)
- position in section 2.12 ("Fixed-Function Vertex Transformation") of OpenGL
- 3.2 (unabridged) Specification (Special Functions) to compute corresponding
- eye-space coordinates (xe,ye,ze,we) and clip-space coordinates
- (xc,yc,zc,wc). A path outline's clip-space coordinates are further
- transformed into window space similar to as described in section 2.16
- ("Coordinate Transformations"). This process provides a mapping 2D path
- coordinates to 2D window coordinates. The resulting 2D window coordinates
- are undefined if any of the transformations involved are singular or may be
- inaccurate if any of the transformations (or their combination) are
- ill-conditioned.
-
- The winding number for a sample with respect to the path outline,
- transformed into window space, is computed by counting the (signed)
- number of revolutions around the sample point when traversing each
- (trivially closed if necessary) contour once in the transformed path.
- This traversal is performed in the order of the path's command
- sequence. Starting from an initially zero winding count, each
- counterclockwise revolution when the front face mode is CCW (or
- clockwise revolution when the front face mode is CW) around the sample
- point increments the winding count by one; while each clockwise
- revolution when the front face mode is CCW (or counterclockwise
- revolution when the front face mode is CW) around the sample point
- decrements the winding count by one.
-
- The /mask/ parameter controls what subset of stencil bits are affected
- by the command.
-
- The /fillMode/ parameter must be one of INVERT, COUNT_UP_CHROMIUM
- or COUNT_DOWN_CHROMIUM; otherwise the INVALID_ENUM error
- is generated. INVERT inverts the bits set in the effective /mask/
- value for each sample's stencil value if the winding number for the
- given sample is odd. COUNT_UP_CHROMIUM adds with modulo n arithmetic the
- winding number of each sample with the sample's prior stencil buffer
- value; the result of this addition is written into the sample's
- stencil value but the bits of the stencil value not set in the
- effective /mask/ value are left unchanged. COUNT_DOWN_CHROMIUM subtracts
- with modulo /n/ arithmetic the winding number of each sample with the
- sample's prior stencil buffer value; the result of this subtraction is
- written into the sample's stencil value but the bits of the stencil
- value not set in the effective /mask/ value are left unchanged.
-
- The value of /n/ for the modulo /n/ arithmetic used by COUNT_UP_CHROMIUM
- and COUNT_DOWN_CHROMIUM is the effective /mask/+1. The error INVALID_VALUE
- is generated if /fillMode/ is COUNT_UP_CHROMIUM or COUNT_DOWN_CHROMIUM and
- the effective /mask/+1 is not an integer power of two.
-
- ACCESSIBLE SAMPLES WITH RESPECT TO A TRANSFORMED PATH
-
- The accessible samples of a transformed path that are updated are
- the samples that remain after discarding the following samples:
-
- * Any sample that would be clipped similar to as specified in section
- 2.22 ("Primitive Clipping") of OpenGL 3.2 (unabridged) Specification
- (Special Functions) because its corresponding position in clip space
- (xc,yc,zc,wc) or (xe,ye,ze,we) would be clipped by the clip volume
- or enabled client-defined clip planes.
-
- * Any sample that would fail the pixel ownership test (section
- 4.1.1) if rasterized.
-
- * Any sample that would fail the scissor test (section 4.1.2)
- if SCISSOR_TEST is enabled.
-
- And for the StencilFillPathCHROMIUM and StencilStrokePathCHROMIUM commands
- (so not applicable to the CoverFillPathCHROMIUM and CoverStrokePathCHROMIUM
- commands):
- * Any sample that would fail the (implicitly enabled) stencil test
- with the stencil function configured based on the path stencil
- function state configured by PathStencilFuncCHROMIUM. In the case
- of the StencilFillPathCHROMIUM and StencilStrokePathCHROMIUM
- commands and their instanced versions, the effective stencil read
- mask for the stencil mask is treated as the value of
- PATH_STENCIL_VALUE_MASK bit-wise ANDed with the bit-invert of the
- effective /mask/ parameter value; otherwise, for the cover commands,
- the stencil test operates normally. In the case the stencil test
- fails during a path stencil operation, the stencil fail operation is
- ignored and the pixel's stencil value is left undisturbed (as if the
- stencil operation was KEEP).
-
- * The state of the face culling (CULL_FACE) enable is ignored.
-
- void StencilStrokePathCHROMIUM(uint path, int reference, uint mask)
-
- Transforms into window space the stroked region of the path object named
- /path/ based on the current modelview, projection and viewport transforms
- (ignoring any vertex and/or geometry shader or program that might be
- active/enabled) and then updates the stencil values of a subset of the
- accessible samples (see above) in the framebuffer.
-
- If /path/ does not name an existing path object, the command does
- nothing (and no error is generated).
-
- The path object's specified stroke width (in path space) determines
- the width of the path's stroked region.
-
- The stroke of a transformed path's outline
- is the region of window space defined by the union of:
-
- * Sweeping an orthogonal centered line segment of the (above
- determined) effective stroke width along each path segment
- in the path's transformed outline.
-
- * End cap regions (explained below) appended to the initial
- and terminal control points of non-closed command sequences
- in the path. For a sequence of commands that form a closed
- contour, the end cap regions are ignored.
-
- * Join style regions (explained below) between connected path
- segments meet.
-
- Any accessible samples within the union of these three regions are
- considered within the path object's stroke.
-
- If the stroke width is zero, each of the regions in the union will
- be empty and there are no accessible samples within the stroke.
-
- The /mask/ parameter controls what subset of stencil bits are affected
- by the command.
-
- A sample's stencil bits that are set in the effective /mask/ value
- are updated with the specified stencil /reference/ value if the
- sample is accessible (as specified above) and within the stroke of
- the transformed path's outline.
-
- Every path object has an end caps parameter
- PATH_END_CAPS_CHROMIUM) that is one of FLAT_CHROMIUM,
- SQUARE_CHROMIUM or ROUND_CHROMIUM. This parameter defines the
- initial and terminal caps type. There are no samples within a
- FLAT_CHROMIUM cap. The SQUARE_CHROMIUM cap extends centered and
- tangent to the given end (initial or terminal) of the subpath for
- half the effective stroke width; in other words, a square cap is a
- half-square that kisses watertightly the end of a subpath. The
- ROUND_CHROMIUM cap appends a semi-circle, centered and tangent,
- with the diameter of the effective stroke width to the given end
- (initial or terminal) of the subpath; in other words, a round cap
- is a semi-circle that kisses watertightly the end of a subpath.
-
- Every path object has a join style that is one of BEVEL_CHROMIUM,
- ROUND_CHROMIUM or MITER_REVERT_CHROMIUM. Each path object also has a miter
- limit value. The BEVEL_CHROMIUM join style inserts a triangle with two
- vertices at the outside corners where two connected path segments join and a
- third vertex at the common end point shared by the two path segments. The
- ROUND_CHROMIUM join style inserts a wedge-shaped portion of a circle
- centered at the common end point shared by the two path segments; the radius
- of the circle is half the effective stroke width. The MITER_REVERT_CHROMIUM
- join style inserts a quadrilateral with two opposite vertices at the outside
- corners where the two connected path segments join and two opposite vertices
- with one on the path's junction between the two joining path segments and
- the other at the common end point shared by the two path segments. However,
- the MITER_REVERT_CHROMIUM join style behaves as the BEVEL_CHROMIUM style if
- the sine of half the angle between the two joined segments is less than the
- path object's PATH_STROKE_WIDTH value divided by the path's
- PATH_MITER_LIMIT_CHROMIUM value.
-
- Every path object has a stroke approximation bound parameter
- (PATH_STROKE_BOUND_CHROMIUM) that is a floating-point value /sab/ clamped
- between 0.0 and 1.0 and set and queried with the PATH_STROKE_BOUND_CHROMIUM
- path parameter. Exact determination of samples swept an orthogonal
- centered line segment along cubic Bezier segments and rational
- quadratic Bezier curves (so non-circular partial elliptical arcs) is
- intractable for real-time rendering so an approximation is required;
- /sab/ intuitively bounds the approximation error as a percentage of
- the path object's stroke width. Specifically, this path parameter
- requests the implementation to stencil any samples within /sweep/
- object space units of the exact sweep of the path's cubic Bezier
- segments or partial elliptical arcs to be sampled by the stroke where
-
- sweep = ((1-sab)*sw)/2
-
- where /sw/ is the path object's stroke width. The initial value
- of /sab/ when a path is created is 0.2. In practical terms, this
- initial value means the stencil sample positions coverage within 80%
- (100%-20%) of the stroke width of cubic and rational quadratic stroke
- segments should be sampled.
-
-
- void CoverFillPathCHROMIUM(uint path, enum coverMode)
-
- The command transforms into window space the outline of the path object
- named /path/ based on the current modelview, projection and viewport
- transforms (ignoring any vertex and/or geometry shader or program that might
- be active/enabled) and rasterizes a subset of the accessible samples in the
- framebuffer guaranteed to include all samples that would have a net
- stencil value change if StencilFillPathCHROMIUM were issued with the same
- modelview, projection, and viewport state. During this rasterization, the
- stencil test operates normally and as configured; the expectation is the
- stencil test will be used to discard samples not determined "covered" by a
- prior StencilFillPathCHROMIUM command.
-
- If /path/ does not name an existing path object, the command does
- nothing (and no error is generated).
-
- /coverMode/ must be one of CONVEX_HULL_CHROMIUM or BOUNDING_BOX_CHROMIUM.
- Otherwise, INVALID_ENUM error is generated.
-
- The subset of accessible pixels that are rasterized are within a bounding
- box (expected to be reasonably tight) surrounding all the samples guaranteed
- to be rasterized by CoverFillPathCHROMIUM. The bounding box must be
- orthogonally aligned to the path space coordinate system. (The area of the
- bounding box in path space is guaranteed to be greater than or equal the
- area of the convex hull in path space.) Each rasterized sample will be
- rasterized once and exactly once.
-
- While samples with a net stencil change /must/ be rasterized,
- implementations are explicitly allowed to vary in the rasterization
- of samples for which StencilFillPathCHROMIUM would /not/ change sample's
- net stencil value. This means implementations are allowed to (and,
- in fact, are expected to) conservatively "exceed" the region strictly
- stenciled by the path object.
-
- CoverFillPathCHROMIUM /requires/ the following rasterization invariance:
- calling CoverFillPathCHROMIUM for the same (unchanged) path object with
- fixed (unchanged) modelview, projection, and viewport transform state
- with the same (unchanged) set of accessible samples will rasterize
- the exact same set of samples with identical interpolated values
- for respective fragment/sample locations.
-
- void CoverStrokePathCHROMIUM(uint path, enum coverMode)
-
- The command operates in the same manner as CoverFillPathCHROMIUM except the
- region guaranteed to be rasterized is, rather than the region within
- /path/'s filled outline, instead the region within the /path/'s stroked
- region as determined by StencilStrokePathCHROMIUM. During this
- rasterization, the stencil test operates normally and as configured; the
- expectation is the stencil test will be used to discard samples not
- determined "covered" by a prior StencilStrokePathCHROMIUM command.
-
- If /path/ does not name an existing path object, the command does
- nothing (and no error is generated).
-
- /coverMode/ must be one of CONVEX_HULL_CHROMIUM or BOUNDING_BOX_CHROMIUM.
- Otherwise, INVALID_ENUM error is generated.
-
- Analogous to the rasterization guarantee of CoverFillPathCHROMIUM with
- respect to StencilFillPathCHROMIUM, CoverStrokePathCHROMIUM guarantees that
- all samples rasterized by StencilStrokePathCHROMIUM, given the same
- transforms and accessible pixels and stroke width, will also be rasterized
- by the corresponding CoverStrokePathCHROMIUM.
-
- CoverStrokePathCHROMIUM /requires/ the following rasterization invariance:
- calling CoverStrokePathCHROMIUM for the same (unchanged) path object with
- fixed (unchanged) modelview, projection, and viewport transform state and
- with the same (unchanged) set of accessible samples will rasterize the exact
- same set of samples with identical interpolated values for respective
- fragment/sample locations.
-
- void StencilThenCoverFillPathCHROMIUM(uint path, enum fillMode, uint mask, enum coverMode)
-
- The command is equivalent to the two commands
-
- StencilFillPathCHROMIUM(path, fillMode, mask);
- CoverFillPathCHROMIUM(path, coverMode);
-
- unless either command would generate an error; for any such error
- other than OUT_OF_MEMORY, only that error is generated.
-
- void StencilThenCoverStrokePathCHROMIUM(uint path, int reference, uint mask, enum coverMode)
-
- The command is equivalent to the two commands
-
- StencilStrokePathCHROMIUM(path, reference, mask);
- CoverStrokePathCHROMIUM(path, coverMode);
-
- unless either command would generate an error; for any such error
- other than OUT_OF_MEMORY, only that error is generated.
-
- void StencilFillPathInstancedCHROMIUM(sizei numPaths,
- enum pathNameType,
- const void *paths,
- uint pathBase,
- enum fillMode, uint mask,
- enum transformType,
- const float *transformValues);
-
- The command stencils a sequence of filled paths.
-
- The /numPaths/ has to be >= 0. Otherwise INVALID_VALUE error is
- generated.
-
- The /numPaths/ has to fit in 32-bit uint. Otherwise
- INVALID_OPERATION is generated.
-
- The /pathNameType/ determines the type of elements of the /paths/
- array and must be one of UNSIGNED_BYTE, BYTE, UNSIGNED_SHORT, SHORT,
- UNSIGNED_INT or INT. Otherwise INVALID_ENUM error is generated.
-
- The /pathBase/ is an offset added to the /numPaths/ path names read
- from the /paths/ array. Each result is 2's complement integer and it
- is cast to uint path name..
-
- The /transformType/ must be one of NONE, TRANSLATE_X_CHROMIUM,
- TRANSLATE_Y_CHROMIUM, TRANSLATE_2D_CHROMIUM, TRANSLATE_3D_CHROMIUM,
- AFFINE_2D_CHROMIUM, AFFINE_3D_CHROMIUM, TRANSPOSE_AFFINE_2D_CHROMIUM, or
- TRANSPOSE_AFFINE_3D_CHROMIUM. Otherwise INVALID_ENUM error is generated.
-
- The /fillMode/ and /mask/ are validated identically to the same-named
- parameters of StencilFillPathCHROMIUM.
-
- The /numPaths/ * (size of /pathNameType/ data type) + /numPaths/ *
- (size of float) * (component count of /transformType/) must fit to
- 32-bit uint. Otherwise INVALID_OPERATION is generated.
-
- The StencilFillPathInstancedCHROMIUM command is equivalent to:
-
- float dm[16];
- GetFloatv(PATH_MODELVIEW_MATRIX, dm);
- const float *v = transformValues;
- for (int i = 0; i<numPaths; i++) {
- if (!applyPathTransform(dm, transformType, &v)) {
- return;
- }
- uint pathName;
- if (!getPathName(pathNameType, &paths, pathBase, &pathName)) {
- return;
- }
- if (IsPathCHROMIUM(pathName)) {
- StencilFillPathCHROMIUM(pathName, fillMode, mask);
- }
- }
- glMatrixLoadfCHROMIUM(PATH_MODELVIEW_CHROMIUM, dm);
-
- assuming these helper functions for applyPathTransform and
- getPathName:
-
- bool applyPathTransform(const float dm[], enum transformType, const float** v)
- {
- float m[16] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
-
- switch (transformType) {
- case NONE:
- break;
- case TRANSLATE_X_CHROMIUM:
- m[12] = (*v)[0];
- *v += 1;
- break;
- case TRANSLATE_Y_CHROMIUM:
- m[13] = (*v)[0];
- *v += 1;
- break;
- case TRANSLATE_2D_CHROMIUM:
- m[12] = (*v)[0];
- m[13] = (*v)[1];
- *v += 2;
- break;
- case TRANSLATE_3D_CHROMIUM:
- m[12] = (*v)[0];
- m[13] = (*v)[1];
- m[14] = (*v)[2];
- *v += 3;
- break;
- case AFFINE_2D_CHROMIUM:
- m[0] =(*v)[0]; m[4] =(*v)[2]; m[8] =0; m[12]=(*v)[4];
- m[1] =(*v)[1]; m[5] =(*v)[3]; m[9] =0; m[13]=(*v)[5];
- m[2] =0 ; m[6] =0; m[10]=1; m[14]=0;
- m[3] =0; m[7] =0; m[11]=0; m[15]=1;
- *v += 6;
- break;
- case TRANSPOSE_AFFINE_2D_CHROMIUM:
- m[0] =(*v)[0]; m[4] =(*v)[1]; m[8] =0; m[12]=(*v)[2];
- m[1] =(*v)[3]; m[5] =(*v)[4]; m[9] =0; m[13]=(*v)[5];
- m[2] =0; m[6] =0; m[10]=1; m[14]=0;
- m[3] =0; m[7] =0; m[11]=0; m[15]=1;
- *v += 6;
- break;
- case AFFINE_3D_CHROMIUM:
- m[0] =(*v)[0]; m[4] =(*v)[3]; m[8] =(*v)[6]; m[12]=(*v)[9];
- m[1] =(*v)[1]; m[5] =(*v)[4]; m[9] =(*v)[7]; m[13]=(*v)[10];
- m[2] =(*v)[2]; m[6] =(*v)[5]; m[10]=(*v)[8]; m[14]=(*v)[11];
- m[3] =0; m[7] =0; m[11]=1; m[15]=0;
- *v += 12;
- break;
- case TRANSPOSE_AFFINE_3D_CHROMIUM:
- m[0] =(*v)[0]; m[4] =(*v)[1]; m[8] =(*v)[2]; m[12]=(*v)[3];
- m[1] =(*v)[4]; m[5] =(*v)[5]; m[9] =(*v)[6]; m[13]=(*v)[7];
- m[2] =(*v)[8]; m[6] =(*v)[9]; m[10]=(*v)[10]; m[14]=(*v)[11];
- m[3] =0; m[7] =0; m[11]=1; m[15]=0;
- *v += 12;
- break;
- default:
- setError(INVALID_ENUM);
- return FALSE;
- }
- multiplyMatrix(dm, m, m); // Multiplies dm and m and stores result to m.
- glMatrixLoadfCHROMIUM(PATH_MODELVIEW_CHROMIUM, m);
- return TRUE;
- }
-
- bool getPathName(enum pathNameType, const void** paths,
- uint pathBase, uint* pathName)
- {
- switch (pathNameType) {
- case BYTE:
- {
- const byte *p = (const byte*)*paths;
- *pathName = pathBase + p[0];
- *paths = p+1;
- break;
- }
- case UNSIGNED_BYTE:
- {
- const ubyte *p = (const ubyte*)*paths;
- *pathName = pathBase + p[0];
- *paths = p+1;
- break;
- }
- case SHORT:
- {
- const short *p = (const short*)*paths;
- *pathName = pathBase + p[0];
- *paths = p+1;
- break;
- }
- case UNSIGNED_SHORT:
- {
- const ushort *p = (const ushort*)*paths;
- *pathName = pathBase + p[0];
- *paths = p+1;
- break;
- }
- case INT:
- {
- const int *p = (const int*)*paths;
- *pathName = pathBase + p[0];
- *paths = p+1;
- break;
- }
- case UNSIGNED_INT:
- {
- const uint *p = (const uint*)*paths;
- *pathName = pathBase + p[0];
- *paths = p+1;
- break;
- }
- default:
- setError(INVALID_ENUM);
- return FALSE;
- }
- return TRUE;
- }
-
-
- void StencilStrokePathInstancedCHROMIUM(sizei numPaths,
- enum pathNameType,
- const void *paths,
- uint pathBase,
- int reference, uint mask,
- enum transformType,
- const float *transformValues);
-
- The command stencils a sequence of stroked paths.
-
- The command verifies /numPaths/, /pathNameType/ and
- /transformType/ similarly to StencilFillPathInstancedCHROMIUM.
-
- The command is equivalent to:
-
- float dm[16];
- GetFloatv(PATH_MODELVIEW_MATRIX, dm);
- const float *v = transformValues;
- for (int i = 0; i<numPaths; i++) {
- if (!applyPathTransform(dm, transformType, &v)) {
- return;
- }
- uint pathName;
- if (!getPathName(pathNameType, &paths, pathBase, &pathName)) {
- return;
- }
- if (IsPathCHROMIUM(pathName)) {
- StencilStrokePathCHROMIUM(pathName, reference, mask);
- }
- }
- glMatrixLoadfCHROMIUM(PATH_MODELVIEW_CHROMIUM, dm);
-
- assume the helper functions for applyPathTransform and
- getPathName defined above.
-
- void CoverFillPathInstancedCHROMIUM(sizei numPaths,
- enum pathNameType,
- const void *paths,
- uint pathBase,
- enum coverMode,
- enum transformType,
- const float *transformValues);
-
- The command covers a sequence of filled paths.
-
- The command verifies /numPaths/, /pathNameType/ and
- /transformType/ similarly to StencilFillPathInstancedCHROMIUM.
-
- The command is equivalent to:
-
- if (coverMode == BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM) {
- renderBoundingBox(FALSE,
- numPaths,
- pathNameType,
- paths,
- pathBase,
- transformType, transformValues);
- } else if (coverMode == CONVEX_HULL_CHROMIUM || coverMode == BOUNDING_BOX_CHROMIUM) {
- float dm[16];
- GetFloatv(PATH_MODELVIEW_MATRIX, dm);
- const float *v = transformValues;
- for (int i = 0; i<numPaths; i++) {
- if (!applyPathTransform(dm, transformType, &v)) {
- return;
- }
- uint pathName;
- if (!getPathName(pathNameType, &paths, pathBase, &pathName)) {
- return;
- }
- if (IsPathCHROMIUM(pathName)) {
- CoverFillPathCHROMIUM(pathName, coverMode);
- }
- }
- glMatrixLoadfCHROMIUM(PATH_MODELVIEW_CHROMIUM, dm);
- } else {
- setError(INVALID_ENUM);
- }
-
-
- assuming these helper functions for applyPathTransform and
- getPathName defined above as well as:
-
- void renderBoundingBox(bool shouldRenderStroke,
- sizei numPaths,
- enum pathNameType,
- const uint *paths,
- uint pathBase,
- enum transformType,
- const float *transformValues)
- {
- boolean hasBounds = FALSE;
- float boundsUnion[4], bounds[4];
-
- const float *v = transformValues;
- for (int i = 0; i<numPaths; i++) {
- uint pathName;
- if (!getPathName(pathNameType, paths, pathBase, &pathName)) {
- return;
- }
- if (IsPathCHROMIUM(pathName)) {
- GetPathBoundingBox(pathName, shouldRenderStroke, bounds);
- switch (transformType) {
- case NONE:
- break;
- case TRANSLATE_X_CHROMIUM:
- bounds[0] += v[0];
- bounds[2] += v[0];
- v += 1;
- break;
- case TRANSLATE_Y_CHROMIUM:
- bounds[1] += v[0];
- bounds[3] += v[0];
- v += 1;
- break;
- case TRANSLATE_2D_CHROMIUM:
- bounds[0] += v[0];
- bounds[1] += v[1];
- bounds[2] += v[0];
- bounds[3] += v[1];
- v += 2;
- break;
- case TRANSLATE_3D_CHROMIUM: // ignores v[2]
- bounds[0] += v[0];
- bounds[1] += v[1];
- bounds[2] += v[0];
- bounds[3] += v[1];
- v += 3;
- break;
- case AFFINE_2D_CHROMIUM:
- bounds[0] = bounds[0]*v[0] + bounds[0]*v[2] + v[4];
- bounds[1] = bounds[1]*v[1] + bounds[1]*v[3] + v[5];
- bounds[2] = bounds[2]*v[0] + bounds[2]*v[2] + v[4];
- bounds[3] = bounds[3]*v[1] + bounds[3]*v[3] + v[5];
- v += 6;
- break;
- case TRANSPOSE_AFFINE_2D_CHROMIUM:
- bounds[0] = bounds[0]*v[0] + bounds[0]*v[1] + v[2];
- bounds[1] = bounds[1]*v[3] + bounds[1]*v[4] + v[5];
- bounds[2] = bounds[2]*v[0] + bounds[2]*v[1] + v[2];
- bounds[3] = bounds[3]*v[3] + bounds[3]*v[4] + v[5];
- v += 6;
- break;
- case AFFINE_3D_CHROMIUM: // ignores v[2], v[5], v[6..8], v[11]
- bounds[0] = bounds[0]*v[0] + bounds[0]*v[3] + v[9];
- bounds[1] = bounds[1]*v[1] + bounds[1]*v[4] + v[10];
- bounds[2] = bounds[2]*v[0] + bounds[2]*v[3] + v[9];
- bounds[3] = bounds[3]*v[1] + bounds[3]*v[4] + v[10];
- v += 12;
- break;
- case TRANSPOSE_AFFINE_3D_CHROMIUM: // ignores v[2], v[6], v[8..11]
- bounds[0] = bounds[0]*v[0] + bounds[0]*v[1] + v[3];
- bounds[1] = bounds[1]*v[4] + bounds[1]*v[5] + v[7];
- bounds[2] = bounds[2]*v[0] + bounds[2]*v[1] + v[3];
- bounds[3] = bounds[3]*v[4] + bounds[3]*v[5] + v[7];
- v += 12;
- break;
- default:
- setError(INVALID_ENUM);
- return;
- }
- if (bounds[0] > bounds[2]) {
- float t = bounds[2];
- bounds[2] = bounds[0];
- bounds[0] = t;
- }
- if (bounds[1] > bounds[3]) {
- float t = bounds[3];
- bounds[3] = bounds[1];
- bounds[1] = t;
- }
- if (hasBounds) {
- if (bounds[0] < boundsUnion[0]) {
- boundsUnion[0] = bounds[0];
- }
- if (bounds[1] < boundsUnion[1]) {
- boundsUnion[1] = bounds[1];
- }
- if (bounds[2] > boundsUnion[2]) {
- boundsUnion[2] = bounds[2];
- }
- if (bounds[3] > boundsUnion[3]) {
- boundsUnion[3] = bounds[3];
- }
- } else {
- for (int i=0; i<4; i++) {
- boundsUnion[i] = bounds[i];
- }
- hasBounds = TRUE;
- }
- }
- }
- if (hasBounds) {
- Rectf(boundsUnion[0], boundsUnion[1], boundsUnion[2], boundsUnion[3]);
- }
- }
-
- Where helper GetPathBoundingBox returns bounding box for the path with or without
- stroking, and Rectf renders a rectangle.
-
- /coverMode/ must be one of CONVEX_HULL_CHROMIUM or BOUNDING_BOX_CHROMIUM or
- BOUNDING_BOX_OF_BOUNDING_BOXES. Otherwise, INVALID_ENUM error is generated.
-
- void CoverStrokePathInstancedCHROMIUM(sizei numPaths,
- enum pathNameType,
- const void *paths,
- uint pathBase,
- enum coverMode,
- enum transformType,
- const float *transformValues);
-
- The command covers a sequence of stroked paths.
-
- The command verifies /numPaths/, /pathNameType/ and
- /transformType/ similarly to StencilFillPathInstancedCHROMIUM.
-
- The command is equivalent to:
-
- if (coverage == BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM) {
- renderBoundingBox(TRUE,
- numPaths,
- pathNameType, paths,
- pathBase,
- transformType, transformValues);
- } else if (coverMode == CONVEX_HULL_CHROMIUM || coverMode == BOUNDING_BOX_CHROMIUM) {
- float dm[16];
- GetFloatv(PATH_MODELVIEW_MATRIX, dm);
- const float *v = transformValues;
- for (int i = 0; i<numPaths; i++) {
- if (!applyPathTransform(dm, transformType, &v)) {
- return;
- }
- uint pathName;
- if (!getPathName(pathNameType, &paths, pathBase, &pathName)) {
- return;
- }
- if (IsPathCHROMIUM(pathName)) {
- CoverStrokePathCHROMIUM(pathName, coverMode);
- }
- }
- glMatrixLoadfCHROMIUM(PATH_MODELVIEW_CHROMIUM, dm);
- } else {
- setError(INVALID_ENUM);
- }
-
- assuming these helper functions defined above.
-
- /coverMode/ must be one of CONVEX_HULL_CHROMIUM or BOUNDING_BOX_CHROMIUM or
- BOUNDING_BOX_OF_BOUNDING_BOXES. Otherwise, INVALID_ENUM error is generated.
-
-
- void StencilThenCoverFillPathInstancedCHROMIUM(sizei numPaths,
- enum pathNameType,
- const void *paths,
- uint pathBase,
- enum coverMode,
- enum fillMode,
- uint mask,
- enum transformType,
- const float *transformValues);
-
- The command is equivalent to the two commands
-
- StencilFillPathInstancedCHROMIUM(numPaths
- paths,
- pathBase,
- fillMode,
- mask,
- transformType,
- transformValues);
- CoverFillPathInstancedCHROMIUM(numPaths,
- paths,
- pathBase,
- coverMode,
- fillMode,
- mask,
- transformType,
- transformValues);
-
- unless either command would generate an error; for any such error
- other than OUT_OF_MEMORY, only that error is generated.
-
-
- void StencilThenCoverStrokePathInstancedCHROMIUM(sizei numPaths,
- enum pathNameType,
- const void *paths,
- uint pathBase,
- enum coverMode,
- int reference,
- uint mask,
- enum transformType,
- const float *transformValues);
-
- The command is equivalent to the two commands
-
- StencilStrokePathInstancedCHROMIUM(numPaths,
- pathNameType,
- paths,
- pathBase,
- reference,
- mask,
- transformType,
- transformValues);
- CoverStrokePathInstancedCHROMIUM(numPaths,
- pathNameType,
- paths,
- pathBase,
- coverMode,
- transformType,
- transformValues);
-
- unless either command would generate an error; for any such error
- other than OUT_OF_MEMORY, only that error is generated.
-
- void BindFragmentInputLocationCHROMIUM(uint program, int location,
- const char* name);
-
- The call specifes that the fragment shader input varying named
- /name/ in program /program/ should be bound to uniform location
- /location/ when the program is next linked. If /name/ was bound
- previously, its assigned binding is replaced with /location/. The
- /name/ must be a null terminated string. The error INVALID_VALUE
- is generated if /location/ is equal or greater than
-
- MAX_VARYING_VECTORS * 4
-
- or less than 0. BindFragmentInputLocation has no effect until the
- program is linked. In particular, it doesn't modify the bindings of active
- uniforms variables in a program that has already been linked.
-
- The error INVALID_OPERATION is generated if /program/ is not name for a
- program object.
-
- The error INVALID_OPERATION is generated if name starts with the reserved
- "gl_" prefix.
-
- When a program is linked, any active uniforms without a binding specified
- through BindFragmentInputLocation will be automatically be bound to
- locations by the GL. Such bindings can not be queried.
-
- BindFragmentInputLocation may be issued before any shader objects are
- attached to a program object. Hence it is allowed to bind any name (except
- a name starting with "gl_") to an index, including a name that is never used
- as a varying in the fragment shader object. Assigned bindings for varying
- variables that do not exist or are not active are ignored. Using such bindings
- behaves as if passed location was -1.
-
- It is possible for an application to bind more than one fragment
- input name to the same location. This is referred to as aliasing.
- This will only work if only one of the aliased fragment inputs is
- active in the executable program, or if no path through the shader
- consumes more than one fragment input of a set of fragment inputs
- aliased to the same location. If two statically used fragment
- inputs in a program are bound to the name location, link must
- fail.
-
- void ProgramPathFragmentInputGenCHROMIUM(uint program,
- int location,
- enum genMode,
- int components,
- const float *coeffs);
-
- The command controls how a user-defined (non-built-in) fragment input of
- a GLSL program object is computed for fragment shading operations that occur
- as a result of CoverFillPathCHROMIUM or CoverStrokePathCHROMIUM.
-
- /program/ names a GLSL program object. If /program/ has not been
- successfully linked, the error INVALID_OPERATION is generated.
-
- The given fragment input generation state is loaded into the fragment
- input variable location identified by /location/. This location
- is a value bound with BindFragmentInputLocation.
-
- If the value of location is -1, the ProgramPathFragmentInputGenCHROMIUM
- command will silently ignore the command, and the program's path fragment
- input generation state will not be changed.
-
- If any of the following conditions occur, an INVALID_OPERATION error is
- generated by the ProgramPathFragmentInputGenCHROMIUM, and no state is
- changed:
-
- * if the size indicated in the /components/ of the
- ProgramPathFragmentInputGenCHROMIUM command used does not match the
- size of the fragment input scalar or vector declared in the
- shader,
-
- * if the fragment input declared in the shader is not
- single-precision floating-point scalar or vector, or
-
- * if no fragment input variable with a location of /location/
- exists in the program object named by /program/ and location
- is not -1, or
-
- * if the fragment input declared in the shader is a built-in
- variables (i.e. prefixed by "gl_").
-
- When covering paths, fragment input variables are interpolated at
- each shaded fragment based on the corresponding fragment input
- generation state specified by ProgramPathFragmentInputGenCHROMIUM for
- each respective fragment input.
-
- The /genMode/, /components/, and /coeffs/ parameters are used to
- generate the fragment input variable values. This is described in
- subsection FRAGMENT INPUT GENERATION FOR PATH COVER COMMANDS.
-
- When covering paths, if a fragment input variable has not had its
- path fragment input generation state successfully generated, it as
- if the values of this variable are always initialized to zero when
- the fragment shader is executing.
-
- FRAGMENT INPUT GENERATION FOR PATH COVER COMMANDS
-
- The /genMode/, /components/, and /coeffs/ parameters of
- ProgramPathFragmentInputGenCHROMIUM control how fragment inputs are computed
- for fragment shading operations that occur as a result of
- CoverFillPathCHROMIUM and CoverStrokePathCHROMIUM and their StencilThenCover
- and instanced variants.
-
- /genMode/ must be one of NONE, OBJECT_LINEAR_CHROMIUM, EYE_LINEAR_CHROMIUM
- or CONSTANT_CHROMIUM; otherwise INVALID_ENUM is generated.
-
- NONE means that the fragment input is not generated. OBJECT_LINEAR_CHROMIUM
- means that the specified input is generated from a linear combination of the
- 2D path coordinates (x,y). EYE_LINEAR_CHROMIUM means the specified input is
- generated from a linear combination of path's 2D coordinates transformed in
- eye space, with (xe, ye, ze, we) calculated as in section 2.12
- ("Fixed-Function Vertex Transformation") of OpenGL 3.2 (unabridged)
- Specification (Special Functions). CONSTANT_CHROMIUM means that the
- specified input is set to corresponding constant value.
-
- /components/ must be 0 if /genMode/ is NONE or for other allowed /genMode/
- values must be one of 1, 2, 3, or 4; otherwise INVALID_VALUE is generated.
- /components/ determines how many fragment input components, how many
- coefficients read from the /coeffs/ array, and the linear equations used to
- generate the s, t, r, and q coordinates of the fragment input specified by
- /location/.
-
- In the following equations, coeffs[i] is the /i/th element (base zero) of
- the /coeffs/ array; x, y, z, and w are determined by the /genMode/.
-
- When /genMode/ is EYE_LINEAR_CHROMIUM, xcoeffs[i] is the /i/th element (base
- zero) of a /xcoeffs/ array generated by multiplying each respective vector
- of four elements of coeffs by the current inverse modelview matrix when
- ProgramPathFragmentInputGen is called.
-
- xcoeffs[0..3] = coeffs[0..3] * MV^-1
- xcoeffs[4..7] = coeffs[4..7] * MV^-1
- xcoeffs[8..11] = coeffs[8..11] * MV^-1
- xcoeffs[12..15] = coeffs[12..12] * MV^-1
-
- [[ NOTATION:
-
- xxx[0..3] is a vector form from xxx[0], xxx[1], xxx[2], and xxx[3]
-
- MV^-1 is the inverse of the current PATH_MODELVIEW_CHROMIUM matrix when
- ProgramPathFragmentInputGenCHROMIUM happens.
-
- ]]
-
- If the /components/ is 0, no values from the /coeffs/ array are
- accessed and the s, t, r, and q coordinates of a covered fragment's
- fragment input for /location/ are computed:
-
- s = 0
- t = 0
- r = 0
- q = 0
-
- If the /components/ is 1 and /genMode/ is OBJECT_LINEAR_CHROMIUM
- 3 values from the /coeffs/ array are
- accessed and the s, t, r, and q coordinates of a covered fragment's
- fragment input for /location/ are computed:
-
- s = coeffs[0] * x + coeffs[1] * y + coeffs[2]
- t = 0
- r = 0
- q = 0
-
- Alternatively if the /genMode/ is EYE_LINEAR_CHROMIUM, then 4 values are
- accessed and the fragment input for /location/ are
- computed:
-
- s = xcoeffs[0] * xe + xcoeffs[1] * ye + xcoeffs[2] * ze + xcoeffs[3] * we
- t = 0
- r = 0
- q = 0
-
- Alternatively if the /genMode/ is CONSTANT_CHROMIUM, then:
-
- s = xcoeffs[0]
- t = 0
- r = 0
- q = 0
-
- If the /components/ is 2 and /genMode/ is OBJECT_LINEAR_CHROMIUM,
- 6 values from the /coeffs/ array are accessed and the
- s, t, r, and q coordinates of a covered fragment's fragment input
- coordinates are computed:
-
- s = coeffs[0] * x + coeffs[1] * y + coeffs[2]
- t = coeffs[3] * x + coeffs[4] * y + coeffs[5]
- r = 0
- q = 0
-
- Alternatively if the /genMode/ is EYE_LINEAR_CHROMIUM, then 8 values are
- accessed and the fragment input coordinates are computed:
-
- s = xcoeffs[0] * xe + xcoeffs[1] * ye + xcoeffs[2] * ze + xcoeffs[3] * we
- t = xcoeffs[4] * xe + xcoeffs[5] * ye + xcoeffs[6] * ze + xcoeffs[7] * we
- r = 0
- q = 0
-
- Alternatively if the /genMode/ is CONSTANT_CHROMIUM, then:
-
- s = xcoeffs[0]
- t = xcoeffs[1]
- r = 0
- q = 0
-
- If the /components/ is 3 and /genMode/ is OBJECT_LINEAR_CHROMIUM 9 values
- from the /coeffs/ array are accessed and the s, t, r, and q coordinates of a
- covered fragment's fragment input coordinates for /location/ are computed:
-
- s = coeffs[0] * x + coeffs[1] * y + coeffs[2]
- t = coeffs[3] * x + coeffs[4] * y + coeffs[5]
- r = coeffs[6] * x + coeffs[7] * y + coeffs[8]
- q = 0
-
- Alternatively if the /genMode/ is CONSTANT_CHROMIUM, then:
-
- s = xcoeffs[0]
- t = xcoeffs[1]
- r = xcoeffs[2]
- q = 0
-
- Alternatively if the /genMode/ is EYE_LINEAR_CHROMIUM, then 12 values are
- accessed and the fragment input coodinates for /location/ are computed:
-
- s = xcoeffs[0] * xe + xcoeffs[1] * ye + xcoeffs[2] * ze + xcoeffs[3] * we
- t = xcoeffs[4] * xe + xcoeffs[5] * ye + xcoeffs[6] * ze + xcoeffs[7] * we
- r = xcoeffs[8] * xe + xcoeffs[9] * ye + xcoeffs[10] * ze + xcoeffs[11] * we
- q = 0
-
- If the /components/ is 4 and /genMode/ is OBJECT_LINEAR_CHROMIUM,
- 12 values from the /coeffs/ array are accessed and the
- s, t, r, and q coordinates of a covered fragment's fragment input
- coordinates for /location/ are computed:
-
- s = coeffs[0] * x + coeffs[1] * y + coeffs[2]
- t = coeffs[3] * x + coeffs[4] * y + coeffs[5]
- r = coeffs[6] * x + coeffs[7] * y + coeffs[8]
- q = coeffs[9] * x + coeffs[10] * y + coeffs[11]
-
- Alternatively if the /genMode/ is EYE_LINEAR_CHROMIUM, then 16 values are
- accessed and the fragment input coordinates for /location/ are
- computed:
-
- s = xcoeffs[0] * xe + xcoeffs[1] * ye + xcoeffs[2] * ze + xcoeffs[3] * we
- t = xcoeffs[4] * xe + xcoeffs[5] * ye + xcoeffs[6] * ze + xcoeffs[7] * we
- r = xcoeffs[8] * xe + xcoeffs[9] * ye + xcoeffs[10] * ze + xcoeffs[11] * we
- q = xcoeffs[12] * xe + xcoeffs[13] * ye + xcoeffs[14] * ze + xcoeffs[15] * we
-
- Alternatively if the /genMode/ is CONSTANT_CHROMIUM, then:
-
- s = xcoeffs[0]
- t = xcoeffs[1]
- r = xcoeffs[2]
- q = xcoeffs[3]
-
- The initial mode is NONE and the coefficients are all initially zero.
-
- PATH COVERING RASTERIZATION DETAILS
-
- The GL processes fragments rasterized by path cover commands in
- much the same manner as fragments generated by conventional polygon
- rasterization. However path rendering /ignores/ the following
- operations:
-
- * Interpolation of per-vertex data (section 3.6.1). Path
- primitives have neither conventional vertices nor per-vertex
- data. Instead fragments generate interpolated per-fragment
- colors, texture coordinate sets, as a
- linear function of object-space or eye-space path coordinate's
- or using the current color or texture coordinate set state
- directly.
-
- Depth offset (section 3.6.2) and polygon multisample rasterization
- (3.6.3) do apply to path covering.
-
- Front and back face determination (explained in section 3.6.1 for
- polygons) operates somewhat differently for transformed paths than
- polygons. The path's convex hull or bounding box
- (depending on the /coverMode/) is specified to wind counterclockwise
- in object space, though the transformation of the convex hull into
- window space could reverse this winding. Whether the GL's front face
- state is CW or CCW (as set by the FrontFace command) determines
- if the path is front facing or not. Because the specific vertices
- that belong to the covering geometry are implementation-dependent,
- when the signed area of the covering geometry (computed with equation
- 3.6) is sufficiently near zero, the facingness of the path in such
- situations is ill-defined.
-
- The determination of whether a path transformed into window space is
- front facing or not affects face culling if enabled (section 3.6.1),
- the gl_FrontFacing built-in variable (section 3.9.2), and separate
- (two-sided) stencil testing (section 4.1.4).
-
-Errors
-
- None.
-
-New State
-
- Get Value Type Get Command Initial Description
- ----------------------------- ----- ------------ -------- -------------------
- PATH_MODELVIEW_MATRIX_CHROMIUM 16xR GetFloatv all 0's Current modelview
- matrix for path rendering
- PATH_PROJECTION_MATRIX_CHROMIUM 16xR GetFloatv all 0's Current projection
- matrix for path rendering
- PATH_STENCIL_FUNC_CHROMIUM Z8 GetIntegerv ALWAYS path stenciling function
- PATH_STENCIL_REF_CHROMIUM Z+ GetIntegerv 0 path stenciling
- reference value
- PATH_STENCIL_VALUE_MASK_CHROMIUM path stencil read
- Z+ GetIntegerv 1's mask
-
-Tables
- Table 5.pathCommands: Path Commands
-
- Coordinate
- Token Description count
- ========================== ===================== ==========
- MOVE_TO_CHROMIUM Absolute move 2
- current point
- -------------------------- --------------------- ----------
- CLOSE_PATH_CHROMIUM Close path 0
- -------------------------- --------------------- ----------
- LINE_TO_CHROMIUM Absolute line 2
- -------------------------- --------------------- ----------
- QUADRATIC_CURVE_TO_CHROMIUM Absolute quadratic 4
- -------------------------- --------------------- ----------
- CUBIC_CURVE_TO_CHROMIUM Absolute cubic 6
- Bezier segment
- -------------------------- --------------------- ----------
- CONIC_CURVE_TO_CHROMIUM Absolute conic 5
- (rational Bezier)
- segment
-
-
- Table 5.pathParameters
- Name Type Required Values or Range
- ------------------------------- ------- -----------------------------------------------
- PATH_STROKE_WIDTH_CHROMIUM float non-negative
- PATH_END_CAPS_CHROMIUM enum FLAT, SQUARE_CHROMIUM, ROUND_CHROMIUM
- PATH_JOIN_STYLE_CHROMIUM enum MITER_REVERT_CHROMIUM, BEVEL_CHROMIUM, ROUND_CHROMIUM
- PATH_MITER_LIMIT_CHROMIUM float non-negative
- PATH_STROKE_BOUND_CHROMIUM float will be clamped to [0, 1.0], initially 0.2 (20%)
-
-
-Issues
-
- 1. Should there be a distinct stencil function state for path
- stenciling?
-
- RESOLVED: YES. glPathStencilFunc sets the state. How the
- stencil state needs to be configured for path covering is
- different than how the stencil function is configured typically
- for path stenciling.
-
- For example, stencil covering might use
- StencilFunc(NOT_EQUAL,0,~0) while path stenciling would
- use ALWAYS for the path stenciling stencil test.
-
- However there are other situations such as path clipping where it
- is useful to have the path stencil function configured differently
- such as PathStencilFunc(NOT_EQUAL, 0x00, 0x80) or other
- similar path clipping test.
-
- 2. Since Cover*Path* skips the vertex shader, what does it mean exactly
- wrt a fully linked program? What happens to the fragment shader's input
- varyings that are not filled by the vertex shader + rasterizer?
-
- It is possible that input varyings from a shader may not be written
- as output varyings of a preceding shader. In this case, the unwritten
- input varying values are set to constant zeros.
-
- 3. What is the defined behavior when stroking if PATH_STROKE_WIDTH is
- zero?
-
- There will not be any samples within the stroke. I.e. the stroke does
- not produce any visible results.
-
- 4. How do you define a program that's valid to use with these calls.
-
- There is no change with respect to validity of the programs. All
- programs that are valid before this extension are valid after.
- All programs that are invalid before this extension is invalid
- after.
-
- 5. Can same programs be used to render regular GL primitives as well
- as in covering paths?
-
- Yes.
-
- 6. How is the fragment shader called when covering paths, and with
- which values for the inputs?
-
- gl_FragCoord: Interpolated coordinate of the path coverage.
-
- gl_FrontFacing:
- * Paths wind by default counterclockwise
- * Window space transform can reverse this winding
- * GL front face state CW/CCW selects whether the variable is true
- or false
-
- user-defined varyings: constant zeros.
-
-Revision History
-
- 14/8/2014 Documented the extension
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_resize.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_resize.txt
index 0b7daa4b0dd..cabda8e9c2f 100644
--- a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_resize.txt
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_resize.txt
@@ -23,15 +23,6 @@ Issues
None
-New Tokens
-
- Accepted by the <color_space> parameter of glResizeCHROMIUM:
- GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM 0x8AF1
- GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM 0x8AF2
- GL_COLOR_SPACE_SRGB_CHROMIUM 0x8AF3
- GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM 0x8AF4
- GL_COLOR_SPACE_HDR10_CHROMIUM 0x8AF5
-
New Procedures and Functions
The command
@@ -39,7 +30,7 @@ New Procedures and Functions
glResizeCHROMIUM(GLint width,
GLint height,
GLfloat scale_factor,
- GLenum color_space,
+ GLcolorSpace color_space,
GLboolean alpha);
changes the current output surface to be changed.
@@ -47,22 +38,9 @@ New Procedures and Functions
<scale_factor> specifies the device scale factor for the surface.
<color_space> specifies the color space in which the pixels of the surface
should be interpreted by the display system. Note that this value does not
- impact blending. All blending will be done on the raw pixel values.
- The values have the following interpretation:
- GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM: Indicates that the display system
- should use whatever its default interpretation of color values is.
- GL_COLOR_SPACE_SRGB_CHROMIUM: Indicates that the display system should
- interpret output colors as being sRGB values. On EGL-based systems this
- corresponds to using the default value, EGL_GL_COLORSPACE_LINEAR_KHR,
- for the EGL_GL_COLORSPACE_KHR property in EGL_KHR_gl_colorspace.
- GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM: Indicates that the display system
- should interpret output colors as being in P3 D65 color space. As above,
- this corresponds to the EGL_GL_COLORSPACE_DISPLAY_P3_LINEAR_EXT value
- from EGL_EXT_gl_colorspace_display_p3_linear.
- GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM: Indicates that the display system
- should interpret output colors as being in linear-gamma extended scRGB
- color space. On Windows, this will result in HDR being enabled for the
- surface, when possible.
+ impact blending. All blending will be done on the raw pixel values. It is
+ valid to specify nullptr for <color_space>, which will be interpreted as
+ the default (invalid) color space.
<alpha> indicates whether or not the surface must allocate an alpha channel
Errors
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt
index def75e9bf6c..d5403074f9a 100644
--- a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt
@@ -81,6 +81,10 @@ New Procedures and Functions
mode - the access mode with which to begin access.
+ This function indicates that the calling context will access the SharedImage
+ bound to <texture> until glEndSharedImageAccessDirectCHROMIUM is called, or
+ the calling context deletes <texture>.
+
INVALID_OPERATION is generated if the texture id indicated is not
backed by a shared image.
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/EGL_CHROMIUM_sync_control.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/EGL_CHROMIUM_sync_control.txt
index 91a301909b3..f699f617a98 100644
--- a/chromium/gpu/GLES2/extensions/CHROMIUM/EGL_CHROMIUM_sync_control.txt
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/EGL_CHROMIUM_sync_control.txt
@@ -16,7 +16,7 @@ Status
Version
- Version 2, 2017-05-05
+ Version 2, 2015-05-05
Based on GLX_OML_sync_control Revision 6.0
@@ -70,7 +70,6 @@ New Procedures and Functions
int64_t* msc,
int64_t* sbc)
-
New Tokens
None
@@ -115,8 +114,9 @@ Additions to the EGL 1.3 Specification
Errors
- eglGetSyncValuesCHROMIUM will return FALSE if there is no current
- EGLContext.
+ The function eglGetSyncValuesCHROMIUM will return FALSE if there is no
+ current EGLContext.
+
New State
diff --git a/chromium/gpu/GLES2/gl2chromium_autogen.h b/chromium/gpu/GLES2/gl2chromium_autogen.h
index d3bcadd5c69..2d5d43fc670 100644
--- a/chromium/gpu/GLES2/gl2chromium_autogen.h
+++ b/chromium/gpu/GLES2/gl2chromium_autogen.h
@@ -357,39 +357,6 @@
#define glGetLastFlushIdCHROMIUM GLES2_GET_FUN(GetLastFlushIdCHROMIUM)
#define glScheduleDCLayerCHROMIUM GLES2_GET_FUN(ScheduleDCLayerCHROMIUM)
#define glSetActiveURLCHROMIUM GLES2_GET_FUN(SetActiveURLCHROMIUM)
-#define glMatrixLoadfCHROMIUM GLES2_GET_FUN(MatrixLoadfCHROMIUM)
-#define glMatrixLoadIdentityCHROMIUM GLES2_GET_FUN(MatrixLoadIdentityCHROMIUM)
-#define glGenPathsCHROMIUM GLES2_GET_FUN(GenPathsCHROMIUM)
-#define glDeletePathsCHROMIUM GLES2_GET_FUN(DeletePathsCHROMIUM)
-#define glIsPathCHROMIUM GLES2_GET_FUN(IsPathCHROMIUM)
-#define glPathCommandsCHROMIUM GLES2_GET_FUN(PathCommandsCHROMIUM)
-#define glPathParameterfCHROMIUM GLES2_GET_FUN(PathParameterfCHROMIUM)
-#define glPathParameteriCHROMIUM GLES2_GET_FUN(PathParameteriCHROMIUM)
-#define glPathStencilFuncCHROMIUM GLES2_GET_FUN(PathStencilFuncCHROMIUM)
-#define glStencilFillPathCHROMIUM GLES2_GET_FUN(StencilFillPathCHROMIUM)
-#define glStencilStrokePathCHROMIUM GLES2_GET_FUN(StencilStrokePathCHROMIUM)
-#define glCoverFillPathCHROMIUM GLES2_GET_FUN(CoverFillPathCHROMIUM)
-#define glCoverStrokePathCHROMIUM GLES2_GET_FUN(CoverStrokePathCHROMIUM)
-#define glStencilThenCoverFillPathCHROMIUM \
- GLES2_GET_FUN(StencilThenCoverFillPathCHROMIUM)
-#define glStencilThenCoverStrokePathCHROMIUM \
- GLES2_GET_FUN(StencilThenCoverStrokePathCHROMIUM)
-#define glStencilFillPathInstancedCHROMIUM \
- GLES2_GET_FUN(StencilFillPathInstancedCHROMIUM)
-#define glStencilStrokePathInstancedCHROMIUM \
- GLES2_GET_FUN(StencilStrokePathInstancedCHROMIUM)
-#define glCoverFillPathInstancedCHROMIUM \
- GLES2_GET_FUN(CoverFillPathInstancedCHROMIUM)
-#define glCoverStrokePathInstancedCHROMIUM \
- GLES2_GET_FUN(CoverStrokePathInstancedCHROMIUM)
-#define glStencilThenCoverFillPathInstancedCHROMIUM \
- GLES2_GET_FUN(StencilThenCoverFillPathInstancedCHROMIUM)
-#define glStencilThenCoverStrokePathInstancedCHROMIUM \
- GLES2_GET_FUN(StencilThenCoverStrokePathInstancedCHROMIUM)
-#define glBindFragmentInputLocationCHROMIUM \
- GLES2_GET_FUN(BindFragmentInputLocationCHROMIUM)
-#define glProgramPathFragmentInputGenCHROMIUM \
- GLES2_GET_FUN(ProgramPathFragmentInputGenCHROMIUM)
#define glContextVisibilityHintCHROMIUM \
GLES2_GET_FUN(ContextVisibilityHintCHROMIUM)
#define glCoverageModulationCHROMIUM GLES2_GET_FUN(CoverageModulationCHROMIUM)
@@ -435,5 +402,9 @@
GLES2_GET_FUN(BeginSharedImageAccessDirectCHROMIUM)
#define glEndSharedImageAccessDirectCHROMIUM \
GLES2_GET_FUN(EndSharedImageAccessDirectCHROMIUM)
+#define glBeginBatchReadAccessSharedImageCHROMIUM \
+ GLES2_GET_FUN(BeginBatchReadAccessSharedImageCHROMIUM)
+#define glEndBatchReadAccessSharedImageCHROMIUM \
+ GLES2_GET_FUN(EndBatchReadAccessSharedImageCHROMIUM)
#endif // GPU_GLES2_GL2CHROMIUM_AUTOGEN_H_
diff --git a/chromium/gpu/GLES2/gl2extchromium.h b/chromium/gpu/GLES2/gl2extchromium.h
index dc411eccedf..b67d61d70c8 100644
--- a/chromium/gpu/GLES2/gl2extchromium.h
+++ b/chromium/gpu/GLES2/gl2extchromium.h
@@ -614,40 +614,20 @@ typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSEXTPROC) (
/* GL_CHROMIUM_resize */
#ifndef GL_CHROMIUM_resize
#define GL_CHROMIUM_resize 1
+typedef const struct _GLcolorSpace* GLcolorSpace;
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY glResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
- GLenum color_space,
+ GLcolorSpace color_space,
GLboolean alpha);
#endif
typedef void(GL_APIENTRYP PFNGLRESIZECHROMIUMPROC)(GLuint width,
GLuint height,
GLfloat scale_factor,
- GLenum color_space,
+ GLcolorSpace color_space,
GLboolean alpha);
-
-#ifndef GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM
-#define GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM 0x8AF1
-#endif
-
-#ifndef GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM
-#define GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM 0x8AF2
-#endif
-
-#ifndef GL_COLOR_SPACE_SRGB_CHROMIUM
-#define GL_COLOR_SPACE_SRGB_CHROMIUM 0x8AF3
-#endif
-
-#ifndef GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM
-#define GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM 0x8AF4
-#endif
-
-#ifndef GL_COLOR_SPACE_HDR10_CHROMIUM
-#define GL_COLOR_SPACE_HDR10_CHROMIUM 0x8AF5
-#endif
-
#endif /* GL_CHROMIUM_resize */
/* GL_CHROMIUM_get_multiple */
@@ -834,348 +814,6 @@ typedef void(GL_APIENTRYP PFNGLSCHEDULECALAYERINUSEQUERYCHROMIUMPROC)(
#endif
#endif /* GL_CHROMIUM_nonblocking_readback */
-#ifndef GL_CHROMIUM_path_rendering
-#define GL_CHROMIUM_path_rendering 1
-
-#ifdef GL_GLEXT_PROTOTYPES
-GL_APICALL void GL_APIENTRY
- glMatrixLoadfCHROMIUM(GLenum mode, const GLfloat* m);
-GL_APICALL void GL_APIENTRY glMatrixLoadIdentityCHROMIUM(GLenum mode);
-GL_APICALL GLuint GL_APIENTRY glGenPathsCHROMIUM(GLsizei range);
-GL_APICALL void GL_APIENTRY glDeletePathsCHROMIUM(GLuint path, GLsizei range);
-GL_APICALL GLboolean GL_APIENTRY glIsPathCHROMIUM(GLuint path);
-GL_APICALL void GL_APIENTRY glPathCommandsCHROMIUM(GLuint path,
- GLsizei numCommands,
- const GLubyte* commands,
- GLsizei numCoords,
- GLenum coordType,
- const void* coords);
-GL_APICALL void GL_APIENTRY
-glPathParameteriCHROMIUM(GLuint path, GLenum pname, GLint value);
-GL_APICALL void GL_APIENTRY
-glPathParameterfCHROMIUM(GLuint path, GLenum pname, GLfloat value);
-GL_APICALL void GL_APIENTRY
-glPathStencilFuncCHROMIUM(GLenum func, GLint ref, GLuint mask);
-GL_APICALL void GL_APIENTRY
-glStencilFillPathCHROMIUM(GLuint path, GLenum fillMode, GLuint mask);
-GL_APICALL void GL_APIENTRY
-glStencilStrokePathCHROMIUM(GLuint path, GLint reference, GLuint mask);
-GL_APICALL void GL_APIENTRY
-glCoverFillPathCHROMIUM(GLuint path, GLenum coverMode);
-GL_APICALL void GL_APIENTRY
-glCoverStrokePathCHROMIUM(GLuint name, GLenum coverMode);
-GL_APICALL void GL_APIENTRY
-glStencilThenCoverFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode);
-GL_APICALL void GL_APIENTRY
-glStencilThenCoverStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode);
-
-GL_APICALL void GL_APIENTRY
-glStencilFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY
-glStencilStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint ref,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY
-glCoverFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY
-glCoverStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY
-glStencilThenCoverFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY
-glStencilThenCoverStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint ref,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY
-glBindFragmentInputLocationCHROMIUM(GLuint program,
- GLint location,
- const char* name);
-GL_APICALL void GL_APIENTRY
-glProgramPathFragmentInputGenCHROMIUM(GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- const GLfloat* coeffs);
-
-#endif
-
-typedef void(GL_APIENTRYP PFNGLMATRIXLOADFCHROMIUMPROC)(GLenum matrixMode,
- const GLfloat* m);
-typedef void(GL_APIENTRYP PFNGLMATRIXLOADIDENTITYCHROMIUMPROC)(
- GLenum matrixMode);
-typedef GLuint(GL_APIENTRYP* PFNGLGENPATHSCHROMIUMPROC)(GLsizei range);
-typedef void(GL_APIENTRYP* PFNGLDELETEPATHSCHROMIUMPROC)(GLuint path,
- GLsizei range);
-typedef GLboolean(GL_APIENTRYP* PFNGLISPATHCHROMIUMPROC)(GLuint path);
-typedef void(GL_APIENTRYP* PFNGLPATHCOMMANDSCHROMIUMPROC)(
- GLuint path,
- GLsizei numCommands,
- const GLubyte* commands,
- GLsizei numCoords,
- GLenum coordType,
- const GLvoid* coords);
-typedef void(GL_APIENTRYP* PFNGLPATHPARAMETERICHROMIUMPROC)(GLuint path,
- GLenum pname,
- GLint value);
-typedef void(GL_APIENTRYP* PFNGLPATHPARAMETERFCHROMIUMPROC)(GLuint path,
- GLenum pname,
- GLfloat value);
-typedef void(GL_APIENTRYP* PFNGLPATHSTENCILFUNCCHROMIUMPROC)(GLenum func,
- GLint ref,
- GLuint mask);
-typedef void(GL_APIENTRYP* PFNGLSTENCILFILLPATHCHROMIUMPROC)(GLuint path,
- GLenum fillMode,
- GLuint mask);
-typedef void(GL_APIENTRYP* PFNGLSTENCILSTROKEPATHCHROMIUMPROC)(GLuint path,
- GLint reference,
- GLuint mask);
-typedef void(GL_APIENTRYP* PFNGLCOVERFILLPATHCHROMIUMPROC)(GLuint path,
- GLenum coverMode);
-typedef void(GL_APIENTRYP* PFNGLCOVERSTROKEPATHCHROMIUMPROC)(GLuint name,
- GLenum coverMode);
-
-typedef void(GL_APIENTRYP* PFNGLSTENCILTHENCOVERFILLPATHCHROMIUMPROC)(
- GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode);
-typedef void(GL_APIENTRYP* PFNGLSTENCILTHENCOVERSTROKEPATHCHROMIUMPROC)(
- GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode);
-typedef void(GL_APIENTRYP PFNGLSTENCILFILLPATHINSTANCEDCHROMIUMPROC)(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues);
-typedef void(GL_APIENTRYP PFNGLSTENCILSTROKEPATHINSTANCEDCHROMIUMPROC)(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues);
-typedef void(GL_APIENTRYP PFNGLCOVERFILLPATHINSTANCEDCHROMIUMPROC)(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues);
-typedef void(GL_APIENTRYP PFNGLCOVERSTROKEPATHINSTANCEDCHROMIUMPROC)(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues);
-typedef void(GL_APIENTRYP PFNGLSTENCILTHENCOVERFILLPATHINSTANCEDCHROMIUMPROC)(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues);
-typedef void(GL_APIENTRYP PFNGLSTENCILTHENCOVERSTROKEPATHINSTANCEDCHROMIUMPROC)(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues);
-typedef void(GL_APIENTRYP PFNGLBINDFRAGMENTINPUTLOCATIONCHROMIUMPROC)(
- GLuint program,
- GLint location,
- const char* name);
-typedef void(GL_APIENTRYP PFNGLPROGRAMPATHFRAGMENTINPUTGENCHROMIUMPROC)(
- GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- const GLfloat* coeffs);
-
-#ifndef GL_CLOSE_PATH_CHROMIUM
-#define GL_CLOSE_PATH_CHROMIUM 0x00
-#endif
-#ifndef GL_MOVE_TO_CHROMIUM
-#define GL_MOVE_TO_CHROMIUM 0x02
-#endif
-#ifndef GL_LINE_TO_CHROMIUM
-#define GL_LINE_TO_CHROMIUM 0x04
-#endif
-#ifndef GL_QUADRATIC_CURVE_TO_CHROMIUM
-#define GL_QUADRATIC_CURVE_TO_CHROMIUM 0x0A
-#endif
-#ifndef GL_CUBIC_CURVE_TO_CHROMIUM
-#define GL_CUBIC_CURVE_TO_CHROMIUM 0x0C
-#endif
-#ifndef GL_CONIC_CURVE_TO_CHROMIUM
-#define GL_CONIC_CURVE_TO_CHROMIUM 0x1A
-#endif
-#ifndef GL_PATH_MODELVIEW_MATRIX_CHROMIUM
-#define GL_PATH_MODELVIEW_MATRIX_CHROMIUM 0x0BA6
-#endif
-#ifndef GL_PATH_PROJECTION_MATRIX_CHROMIUM
-#define GL_PATH_PROJECTION_MATRIX_CHROMIUM 0x0BA7
-#endif
-#ifndef GL_PATH_MODELVIEW_CHROMIUM
-#define GL_PATH_MODELVIEW_CHROMIUM 0x1700
-#endif
-#ifndef GL_PATH_PROJECTION_CHROMIUM
-#define GL_PATH_PROJECTION_CHROMIUM 0x1701
-#endif
-#ifndef GL_FLAT_CHROMIUM
-#define GL_FLAT_CHROMIUM 0x1D00
-#endif
-#ifndef GL_EYE_LINEAR_CHROMIUM
-#define GL_EYE_LINEAR_CHROMIUM 0x2400
-#endif
-#ifndef GL_OBJECT_LINEAR_CHROMIUM
-#define GL_OBJECT_LINEAR_CHROMIUM 0x2401
-#endif
-#ifndef GL_CONSTANT_CHROMIUM
-#define GL_CONSTANT_CHROMIUM 0x8576
-#endif
-#ifndef GL_PATH_STROKE_WIDTH_CHROMIUM
-#define GL_PATH_STROKE_WIDTH_CHROMIUM 0x9075
-#endif
-#ifndef GL_PATH_END_CAPS_CHROMIUM
-#define GL_PATH_END_CAPS_CHROMIUM 0x9076
-#endif
-#ifndef GL_PATH_JOIN_STYLE_CHROMIUM
-#define GL_PATH_JOIN_STYLE_CHROMIUM 0x9079
-#endif
-#ifndef GL_PATH_MITER_LIMIT_CHROMIUM
-#define GL_PATH_MITER_LIMIT_CHROMIUM 0x907a
-#endif
-#ifndef GL_PATH_STROKE_BOUND_CHROMIUM
-#define GL_PATH_STROKE_BOUND_CHROMIUM 0x9086
-#endif
-#ifndef GL_COUNT_UP_CHROMIUM
-#define GL_COUNT_UP_CHROMIUM 0x9088
-#endif
-#ifndef GL_COUNT_DOWN_CHROMIUM
-#define GL_COUNT_DOWN_CHROMIUM 0x9089
-#endif
-#ifndef GL_CONVEX_HULL_CHROMIUM
-#define GL_CONVEX_HULL_CHROMIUM 0x908B
-#endif
-#ifndef GL_BOUNDING_BOX_CHROMIUM
-#define GL_BOUNDING_BOX_CHROMIUM 0x908D
-#endif
-#ifndef GL_TRANSLATE_X_CHROMIUM
-#define GL_TRANSLATE_X_CHROMIUM 0x908E
-#endif
-#ifndef GL_TRANSLATE_Y_CHROMIUM
-#define GL_TRANSLATE_Y_CHROMIUM 0x908F
-#endif
-#ifndef GL_TRANSLATE_2D_CHROMIUM
-#define GL_TRANSLATE_2D_CHROMIUM 0x9090
-#endif
-#ifndef GL_TRANSLATE_3D_CHROMIUM
-#define GL_TRANSLATE_3D_CHROMIUM 0x9091
-#endif
-#ifndef GL_AFFINE_2D_CHROMIUM
-#define GL_AFFINE_2D_CHROMIUM 0x9092
-#endif
-#ifndef GL_AFFINE_3D_CHROMIUM
-#define GL_AFFINE_3D_CHROMIUM 0x9094
-#endif
-#ifndef GL_TRANSPOSE_AFFINE_2D_CHROMIUM
-#define GL_TRANSPOSE_AFFINE_2D_CHROMIUM 0x9096
-#endif
-#ifndef GL_TRANSPOSE_AFFINE_3D_CHROMIUM
-#define GL_TRANSPOSE_AFFINE_3D_CHROMIUM 0x9098
-#endif
-#ifndef GL_SQUARE_CHROMIUM
-#define GL_SQUARE_CHROMIUM 0x90a3
-#endif
-#ifndef GL_ROUND_CHROMIUM
-#define GL_ROUND_CHROMIUM 0x90a4
-#endif
-#ifndef GL_ROUND_CHROMIUM
-#define GL_ROUND_CHROMIUM 0x90A4
-#endif
-#ifndef GL_BEVEL_CHROMIUM
-#define GL_BEVEL_CHROMIUM 0x90A6
-#endif
-#ifndef GL_MITER_REVERT_CHROMIUM
-#define GL_MITER_REVERT_CHROMIUM 0x90A7
-#endif
-#ifndef GL_PATH_STENCIL_FUNC_CHROMIUM
-#define GL_PATH_STENCIL_FUNC_CHROMIUM 0x90B7
-#endif
-#ifndef GL_PATH_STENCIL_REF_CHROMIUM
-#define GL_PATH_STENCIL_REF_CHROMIUM 0x90B8
-#endif
-#ifndef GL_PATH_STENCIL_VALUE_MASK_CHROMIUM
-#define GL_PATH_STENCIL_VALUE_MASK_CHROMIUM 0x90B9
-#endif
-#ifndef GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM
-#define GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM 0x909C
-#endif
-
-#endif /* GL_CHROMIUM_path_rendering */
-
-
#ifndef GL_EXT_multisample_compatibility
#define GL_EXT_multisample_compatibility 1
#define GL_MULTISAMPLE_EXT 0x809D
@@ -1255,14 +893,13 @@ typedef void(GL_APIENTRYP PFNGLTEXSTORAGE2DIMAGECHROMIUM)(GLenum target,
#ifndef GL_CHROMIUM_color_space_metadata
#define GL_CHROMIUM_color_space_metadata 1
-typedef struct _GLColorSpace* GLColorSpace;
#ifdef GL_GLEXT_PROTOTYPES
GL_APICALL void GL_APIENTRY
-glSetColorSpaceMetadataCHROMIUM(GLuint texture_id, GLColorSpace color_space);
+glSetColorSpaceMetadataCHROMIUM(GLuint texture_id, GLcolorSpace color_space);
#endif
typedef void(GL_APIENTRYP PFNGLSETCOLORSPACEMETADATACHROMIUM)(
GLuint texture_id,
- GLColorSpace color_space);
+ GLcolorSpace color_space);
#endif /* GL_CHROMIUM_color_space_metadata */
/* GL_CHROMIUM_dither_and_premultiply_copy */
diff --git a/chromium/gpu/OWNERS b/chromium/gpu/OWNERS
index 8d32d1c1998..138fdcc4f18 100644
--- a/chromium/gpu/OWNERS
+++ b/chromium/gpu/OWNERS
@@ -1,22 +1,19 @@
backer@chromium.org
ericrk@chromium.org
kbr@chromium.org
+khushalsagar@chromium.org
vmiura@chromium.org
zmo@chromium.org
sunnyps@chromium.org
penghuang@chromium.org
# GPU memory buffer implementations.
-per-file *gpu_memory_buffer*=reveman@chromium.org
per-file *gpu_memory_buffer*=dcastagna@chromium.org
# Passthrough command decoder
per-file *passthrough*=geofflang@chromium.org
-# Shared Image and Media on Android
-per-file *shared_image_video*=vikassoni@chromium.org
-per-file *android*=vikassoni@chromium.org
-per-file *image_reader*=vikassoni@chromium.org
-per-file *hardwarebuffer*=vikassoni@chromium.org
+# For SharedImages
+vikassoni@chromium.org
# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/angle_end2end_tests_main.cc b/chromium/gpu/angle_end2end_tests_main.cc
index 02997fdc6d1..75b40ed1ef1 100644
--- a/chromium/gpu/angle_end2end_tests_main.cc
+++ b/chromium/gpu/angle_end2end_tests_main.cc
@@ -21,11 +21,13 @@ int RunHelper(base::TestSuite* test_suite) {
// Located in third_party/angle/src/tests/test_utils/ANGLETest.cpp.
// Defined here so we can avoid depending on the ANGLE headers.
void ANGLEProcessTestArgs(int *argc, char *argv[]);
+void RegisterContextCompatibilityTests();
int main(int argc, char** argv) {
base::CommandLine::Init(argc, argv);
ANGLEProcessTestArgs(&argc, argv);
testing::InitGoogleMock(&argc, argv);
+ RegisterContextCompatibilityTests();
base::TestSuite test_suite(argc, argv);
int rt = base::LaunchUnitTestsWithOptions(
argc, argv,
diff --git a/chromium/gpu/command_buffer/OWNERS b/chromium/gpu/command_buffer/OWNERS
index fba89df3fe6..5bdc5e2bd3c 100644
--- a/chromium/gpu/command_buffer/OWNERS
+++ b/chromium/gpu/command_buffer/OWNERS
@@ -2,9 +2,14 @@ bajones@chromium.org
geofflang@chromium.org
vmiura@chromium.org
zmo@chromium.org
+jdarpinian@chromium.org
# GPU memory buffer tests.
-per-file *gpu_memory_buffer*=reveman@chromium.org
per-file *gpu_memory_buffer*=dcastagna@chromium.org
+# For Dawn / WebGPU changes
+cwallez@chromium.org
+enga@chromium.org
+kainino@chromium.org
+
# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
index 14004259867..2538596fb95 100644
--- a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
+++ b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
@@ -626,61 +626,6 @@ _STATE_INFO = {
},
],
},
- 'MatrixValuesCHROMIUM': {
- 'type': 'NamedParameter',
- 'func': 'MatrixLoadfEXT',
- 'states': [
- { 'enum': 'GL_PATH_MODELVIEW_MATRIX_CHROMIUM',
- 'enum_set': 'GL_PATH_MODELVIEW_CHROMIUM',
- 'name': 'modelview_matrix',
- 'type': 'GLfloat',
- 'default': [
- '1.0f', '0.0f','0.0f','0.0f',
- '0.0f', '1.0f','0.0f','0.0f',
- '0.0f', '0.0f','1.0f','0.0f',
- '0.0f', '0.0f','0.0f','1.0f',
- ],
- 'extension_flag': 'chromium_path_rendering',
- },
- { 'enum': 'GL_PATH_PROJECTION_MATRIX_CHROMIUM',
- 'enum_set': 'GL_PATH_PROJECTION_CHROMIUM',
- 'name': 'projection_matrix',
- 'type': 'GLfloat',
- 'default': [
- '1.0f', '0.0f','0.0f','0.0f',
- '0.0f', '1.0f','0.0f','0.0f',
- '0.0f', '0.0f','1.0f','0.0f',
- '0.0f', '0.0f','0.0f','1.0f',
- ],
- 'extension_flag': 'chromium_path_rendering',
- },
- ],
- },
- 'PathStencilFuncCHROMIUM': {
- 'type': 'Normal',
- 'func': 'PathStencilFuncNV',
- 'extension_flag': 'chromium_path_rendering',
- 'states': [
- {
- 'name': 'stencil_path_func',
- 'type': 'GLenum',
- 'enum': 'GL_PATH_STENCIL_FUNC_CHROMIUM',
- 'default': 'GL_ALWAYS',
- },
- {
- 'name': 'stencil_path_ref',
- 'type': 'GLint',
- 'enum': 'GL_PATH_STENCIL_REF_CHROMIUM',
- 'default': '0',
- },
- {
- 'name': 'stencil_path_mask',
- 'type': 'GLuint',
- 'enum': 'GL_PATH_STENCIL_VALUE_MASK_CHROMIUM',
- 'default': '0xFFFFFFFFU',
- },
- ],
- },
'WindowRectanglesEXT': {
'type': 'Normal',
'func': 'WindowRectanglesEXT',
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index 7705e56fc3f..653016c3ae2 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -684,14 +684,6 @@ _NAMED_TYPE_INFO = {
'type': 'GLenum',
'valid' : [],
},
- 'MatrixMode': {
- 'type': 'GLenum',
- 'is_complete': True,
- 'valid': [
- 'GL_PATH_PROJECTION_CHROMIUM',
- 'GL_PATH_MODELVIEW_CHROMIUM',
- ],
- },
'ProgramParameter': {
'type': 'GLenum',
'valid': [
@@ -1056,109 +1048,6 @@ _NAMED_TYPE_INFO = {
'GL_UNSIGNED_BYTE_3_3_2',
],
},
- 'PathCoordType': {
- 'type': 'GLenum',
- 'is_complete': True,
- 'valid': [
- 'GL_BYTE',
- 'GL_UNSIGNED_BYTE',
- 'GL_SHORT',
- 'GL_UNSIGNED_SHORT',
- 'GL_FLOAT',
- ],
- },
- 'PathCoverMode': {
- 'type': 'GLenum',
- 'is_complete': True,
- 'valid': [
- 'GL_CONVEX_HULL_CHROMIUM',
- 'GL_BOUNDING_BOX_CHROMIUM',
- ],
- },
- 'PathFillMode': {
- 'type': 'GLenum',
- 'is_complete': True,
- 'valid': [
- 'GL_INVERT',
- 'GL_COUNT_UP_CHROMIUM',
- 'GL_COUNT_DOWN_CHROMIUM',
- ],
- },
- 'PathInstancedCoverMode': {
- 'type': 'GLenum',
- 'is_complete': True,
- 'valid': [
- 'GL_CONVEX_HULL_CHROMIUM',
- 'GL_BOUNDING_BOX_CHROMIUM',
- 'GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM',
- ],
- },
- 'PathNameType': {
- 'type': 'GLenum',
- 'is_complete': True,
- 'valid': [
- 'GL_UNSIGNED_BYTE',
- 'GL_BYTE',
- 'GL_UNSIGNED_SHORT',
- 'GL_SHORT',
- 'GL_UNSIGNED_INT',
- 'GL_INT',
- ],
- },
- 'PathParameter': {
- 'type': 'GLenum',
- 'is_complete': True,
- 'valid': [
- 'GL_PATH_STROKE_WIDTH_CHROMIUM',
- 'GL_PATH_END_CAPS_CHROMIUM',
- 'GL_PATH_JOIN_STYLE_CHROMIUM',
- 'GL_PATH_MITER_LIMIT_CHROMIUM',
- 'GL_PATH_STROKE_BOUND_CHROMIUM',
- ]
- },
- 'PathParameterCapValues': {
- 'type': 'GLint',
- 'is_complete': True,
- 'valid': [
- 'GL_FLAT',
- 'GL_SQUARE_CHROMIUM',
- 'GL_ROUND_CHROMIUM',
- ]
- },
- 'PathParameterJoinValues': {
- 'type': 'GLint',
- 'is_complete': True,
- 'valid': [
- 'GL_MITER_REVERT_CHROMIUM',
- 'GL_BEVEL_CHROMIUM',
- 'GL_ROUND_CHROMIUM',
- ]
- },
- 'PathTransformType': {
- 'type': 'GLenum',
- 'is_complete': True,
- 'valid': [
- 'GL_NONE',
- 'GL_TRANSLATE_X_CHROMIUM',
- 'GL_TRANSLATE_Y_CHROMIUM',
- 'GL_TRANSLATE_2D_CHROMIUM',
- 'GL_TRANSLATE_3D_CHROMIUM',
- 'GL_AFFINE_2D_CHROMIUM',
- 'GL_AFFINE_3D_CHROMIUM',
- 'GL_TRANSPOSE_AFFINE_2D_CHROMIUM',
- 'GL_TRANSPOSE_AFFINE_3D_CHROMIUM',
- ],
- },
- 'PathFragmentInputGenMode': {
- 'type': 'GLenum',
- 'is_complete': True,
- 'valid': [
- 'GL_NONE',
- 'GL_EYE_LINEAR_CHROMIUM',
- 'GL_OBJECT_LINEAR_CHROMIUM',
- 'GL_CONSTANT_CHROMIUM',
- ],
- },
'ReadPixelType': {
'type': 'GLenum',
'valid': [
@@ -3652,6 +3541,10 @@ _FUNCTION_INFO = {
'ResizeCHROMIUM': {
'type': 'Custom',
'impl_func': False,
+ 'client_test': False,
+ 'cmd_args': 'GLint width, GLint height, GLfloat scale_factor, GLboolean '
+ 'alpha, GLuint shm_id, GLuint shm_offset, GLsizei '
+ 'color_space_size',
'extension': True,
'trace_level': 1,
},
@@ -4049,138 +3942,6 @@ _FUNCTION_INFO = {
'client_test': False,
'extension': 'CHROMIUM_commit_overlay_planes',
},
- 'MatrixLoadfCHROMIUM': {
- 'type': 'PUT',
- 'count': 16,
- 'data_type': 'GLfloat',
- 'decoder_func': 'DoMatrixLoadfCHROMIUM',
- 'gl_test_func': 'glMatrixLoadfEXT',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'MatrixLoadIdentityCHROMIUM': {
- 'decoder_func': 'DoMatrixLoadIdentityCHROMIUM',
- 'gl_test_func': 'glMatrixLoadIdentityEXT',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'GenPathsCHROMIUM': {
- 'type': 'Custom',
- 'cmd_args': 'GLuint first_client_id, GLsizei range',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'DeletePathsCHROMIUM': {
- 'type': 'Custom',
- 'cmd_args': 'GLuint first_client_id, GLsizei range',
- 'impl_func': False,
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'IsPathCHROMIUM': {
- 'type': 'Is',
- 'decoder_func': 'DoIsPathCHROMIUM',
- 'gl_test_func': 'glIsPathNV',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'PathCommandsCHROMIUM': {
- 'type': 'Custom',
- 'impl_func': False,
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'PathParameterfCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'PathParameteriCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'PathStencilFuncCHROMIUM': {
- 'type': 'StateSet',
- 'state': 'PathStencilFuncCHROMIUM',
- 'decoder_func': 'glPathStencilFuncNV',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'StencilFillPathCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'StencilStrokePathCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'CoverFillPathCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'CoverStrokePathCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'StencilThenCoverFillPathCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'StencilThenCoverStrokePathCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'StencilFillPathInstancedCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'StencilStrokePathInstancedCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'CoverFillPathInstancedCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'CoverStrokePathInstancedCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'StencilThenCoverFillPathInstancedCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'StencilThenCoverStrokePathInstancedCHROMIUM': {
- 'type': 'Custom',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'BindFragmentInputLocationCHROMIUM': {
- 'type': 'GLchar',
- 'data_transfer_methods': ['bucket'],
- 'needs_size': True,
- 'gl_test_func': 'DoBindFragmentInputLocationCHROMIUM',
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
- 'ProgramPathFragmentInputGenCHROMIUM': {
- 'type': 'Custom',
- 'data_transfer_methods': ['shm'],
- 'extension': 'CHROMIUM_path_rendering',
- 'extension_flag': 'chromium_path_rendering',
- },
'SetDrawRectangleCHROMIUM': {
'decoder_func': 'DoSetDrawRectangleCHROMIUM',
'unit_test': False,
@@ -4411,7 +4172,18 @@ _FUNCTION_INFO = {
'decoder_func': 'DoEndSharedImageAccessDirectCHROMIUM',
'extension': 'CHROMIUM_shared_image',
'unit_test': False,
+ },
+ 'BeginBatchReadAccessSharedImageCHROMIUM': {
+ 'decoder_func': 'DoBeginBatchReadAccessSharedImageCHROMIUM',
+ 'extension': 'CHROMIUM_shared_image',
+ 'unit_test': False,
+ },
+ 'EndBatchReadAccessSharedImageCHROMIUM': {
+ 'decoder_func': 'DoEndBatchReadAccessSharedImageCHROMIUM',
+ 'extension': 'CHROMIUM_shared_image',
+ 'unit_test': False,
}
+
}
diff --git a/chromium/gpu/command_buffer/build_raster_cmd_buffer.py b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
index 4e18014175e..6308f352c0c 100755
--- a/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
@@ -131,8 +131,8 @@ _NAMED_TYPE_INFO = {
'viz::ResourceFormat::R16_EXT',
'viz::ResourceFormat::RGBX_8888',
'viz::ResourceFormat::BGRX_8888',
- 'viz::ResourceFormat::RGBX_1010102',
- 'viz::ResourceFormat::BGRX_1010102',
+ 'viz::ResourceFormat::RGBA_1010102',
+ 'viz::ResourceFormat::BGRA_1010102',
'viz::ResourceFormat::YVU_420',
'viz::ResourceFormat::YUV_420_BIPLANAR',
diff --git a/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py b/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py
index bafd94dc683..2a106764900 100755
--- a/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py
@@ -47,30 +47,33 @@ _FUNCTION_INFO = {
'impl_func': False,
'internal': True,
'data_transfer_methods': ['shm'],
- 'cmd_args': 'uint32_t commands_shm_id, uint32_t commands_shm_offset, '
- 'uint32_t size',
+ 'cmd_args': 'uint64_t device_client_id, uint32_t commands_shm_id, '
+ 'uint32_t commands_shm_offset, uint32_t size',
'size_args': {
'commands': 'size * sizeof(char)',
},
},
'AssociateMailbox': {
+ 'impl_func': False,
+ 'client_test': False,
'type': 'PUT',
'count': 16, # GL_MAILBOX_SIZE_CHROMIUM
- 'trace_level': 1,
},
'DissociateMailbox': {
- 'trace_level': 1,
+ 'impl_func': False,
+ 'client_test': False,
},
'RequestAdapter': {
'impl_func': False,
'internal': True,
- 'cmd_args': 'uint32_t request_adapter_serial, uint32_t power_preference'
+ 'cmd_args': 'uint64_t request_adapter_serial, uint32_t power_preference'
},
'RequestDevice': {
'impl_func': False,
'internal': True,
'data_transfer_methods': ['shm'],
- 'cmd_args': 'uint32_t adapter_service_id, '
+ 'cmd_args': 'uint64_t device_client_id, '
+ 'uint32_t adapter_service_id, '
'uint32_t request_device_properties_shm_id, '
'uint32_t request_device_properties_shm_offset, '
'uint32_t request_device_properties_size',
@@ -79,6 +82,11 @@ _FUNCTION_INFO = {
'request_device_properties_size * sizeof(char)',
},
},
+ 'RemoveDevice': {
+ 'impl_func': False,
+ 'internal': True,
+ 'cmd_args': 'uint64_t device_client_id'
+ },
}
def main(argv):
diff --git a/chromium/gpu/command_buffer/client/BUILD.gn b/chromium/gpu/command_buffer/client/BUILD.gn
index 883c1932e7f..79eb3f50a75 100644
--- a/chromium/gpu/command_buffer/client/BUILD.gn
+++ b/chromium/gpu/command_buffer/client/BUILD.gn
@@ -15,49 +15,33 @@ declare_args() {
# separate static libraries in non-component build.
group("client") {
if (is_component_build) {
- public_deps = [
- "//gpu",
- ]
+ public_deps = [ "//gpu" ]
} else {
- public_deps = [
- ":client_sources",
- ]
+ public_deps = [ ":client_sources" ]
}
}
group("gles2_cmd_helper") {
if (is_component_build) {
- public_deps = [
- "//gpu:gles2",
- ]
+ public_deps = [ "//gpu:gles2" ]
} else {
- public_deps = [
- ":gles2_cmd_helper_sources",
- ]
+ public_deps = [ ":gles2_cmd_helper_sources" ]
}
}
group("raster") {
if (is_component_build) {
- public_deps = [
- "//gpu:raster",
- ]
+ public_deps = [ "//gpu:raster" ]
} else {
- public_deps = [
- ":raster_sources",
- ]
+ public_deps = [ ":raster_sources" ]
}
}
group("webgpu") {
if (is_component_build) {
- public_deps = [
- "//gpu:webgpu",
- ]
+ public_deps = [ "//gpu:webgpu" ]
} else {
- public_deps = [
- ":webgpu_sources",
- ]
+ public_deps = [ ":webgpu_sources" ]
}
}
@@ -75,6 +59,10 @@ jumbo_source_set("client_sources") {
"cmd_buffer_helper.h",
"fenced_allocator.cc",
"fenced_allocator.h",
+ "gl_helper.cc",
+ "gl_helper.h",
+ "gl_helper_scaling.cc",
+ "gl_helper_scaling.h",
"gpu_control.h",
"gpu_memory_buffer_manager.cc",
"gpu_memory_buffer_manager.h",
@@ -83,6 +71,7 @@ jumbo_source_set("client_sources") {
"mapped_memory.h",
"ring_buffer.cc",
"ring_buffer.h",
+ "shared_image_interface.cc",
"shared_image_interface.h",
"transfer_buffer.cc",
"transfer_buffer.h",
@@ -98,6 +87,7 @@ jumbo_source_set("client_sources") {
"//components/viz/common:resource_format",
]
deps = [
+ ":gles2_interface",
"//gpu/command_buffer/common:common_sources",
"//gpu/ipc/common:surface_handle_type",
"//ui/gfx:memory_buffer",
@@ -171,13 +161,9 @@ gles2_implementation_source_files = [
]
source_set("interface_base") {
- sources = [
- "interface_base.h",
- ]
+ sources = [ "interface_base.h" ]
public_configs = [ "//third_party/khronos:khronos_headers" ]
- deps = [
- "//base",
- ]
+ deps = [ "//base" ]
}
# Provides GLES2 interface, but does not cause any implementation to be linked
@@ -301,9 +287,7 @@ source_set("webgpu_sources") {
":webgpu_interface",
"//gpu/command_buffer/common:webgpu",
]
- public_deps = [
- "//third_party/dawn/src/dawn:dawn_headers",
- ]
+ public_deps = [ "//third_party/dawn/src/dawn:dawn_headers" ]
sources = [
"webgpu_cmd_helper.cc",
"webgpu_cmd_helper.h",
diff --git a/chromium/gpu/command_buffer/client/gl_helper.cc b/chromium/gpu/command_buffer/client/gl_helper.cc
new file mode 100644
index 00000000000..47c531d4c8e
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/gl_helper.cc
@@ -0,0 +1,861 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gl_helper.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/containers/queue.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "gpu/GLES2/gl2extchromium.h"
+#include "gpu/command_buffer/client/context_support.h"
+#include "gpu/command_buffer/client/gl_helper_scaling.h"
+#include "ui/gfx/geometry/point.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/geometry/vector2d.h"
+
+namespace gpu {
+
+using gles2::GLES2Interface;
+
+namespace {
+
+class ScopedFlush {
+ public:
+ explicit ScopedFlush(gles2::GLES2Interface* gl) : gl_(gl) {}
+
+ ~ScopedFlush() { gl_->Flush(); }
+
+ private:
+ gles2::GLES2Interface* gl_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedFlush);
+};
+
+// Helper class for allocating and holding an RGBA texture of a given
+// size.
+class TextureHolder {
+ public:
+ TextureHolder(GLES2Interface* gl, gfx::Size size)
+ : texture_(gl), size_(size) {
+ ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl, texture_);
+ gl->TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+ }
+
+ GLuint texture() const { return texture_.id(); }
+ gfx::Size size() const { return size_; }
+
+ private:
+ ScopedTexture texture_;
+ gfx::Size size_;
+
+ DISALLOW_COPY_AND_ASSIGN(TextureHolder);
+};
+
+class I420ConverterImpl : public I420Converter {
+ public:
+ I420ConverterImpl(GLES2Interface* gl,
+ GLHelperScaling* scaler_impl,
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle,
+ bool use_mrt);
+
+ ~I420ConverterImpl() override;
+
+ void Convert(GLuint src_texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ GLHelper::ScalerInterface* optional_scaler,
+ const gfx::Rect& output_rect,
+ GLuint y_plane_texture,
+ GLuint u_plane_texture,
+ GLuint v_plane_texture) override;
+
+ bool IsSamplingFlippedSource() const override;
+ bool IsFlippingOutput() const override;
+ GLenum GetReadbackFormat() const override;
+
+ protected:
+ // Returns true if the planerizer should use the faster, two-pass shaders
+ // to generate the YUV planar outputs. If false, the source will be
+ // scanned three times, once for each Y/U/V plane.
+ bool use_mrt() const { return !v_planerizer_; }
+
+ // Reallocates the intermediate and plane textures, if needed.
+ void EnsureTexturesSizedFor(const gfx::Size& scaler_output_size,
+ const gfx::Size& y_texture_size,
+ const gfx::Size& chroma_texture_size,
+ GLuint y_plane_texture,
+ GLuint u_plane_texture,
+ GLuint v_plane_texture);
+
+ GLES2Interface* const gl_;
+
+ private:
+ // These generate the Y/U/V planes. If MRT is being used, |y_planerizer_|
+ // generates the Y and interim UV plane, |u_planerizer_| generates the
+ // final U and V planes, and |v_planerizer_| is unused. If MRT is not
+ // being used, each of these generates only one of the Y/U/V planes.
+ const std::unique_ptr<GLHelper::ScalerInterface> y_planerizer_;
+ const std::unique_ptr<GLHelper::ScalerInterface> u_planerizer_;
+ const std::unique_ptr<GLHelper::ScalerInterface> v_planerizer_;
+
+ // Intermediate texture, holding the scaler's output.
+ base::Optional<TextureHolder> intermediate_;
+
+ // Intermediate texture, holding the UV interim output (if the MRT shader
+ // is being used).
+ base::Optional<ScopedTexture> uv_;
+
+ DISALLOW_COPY_AND_ASSIGN(I420ConverterImpl);
+};
+
+} // namespace
+
+// Implements texture consumption/readback and encapsulates
+// the data needed for it.
+class GLHelper::CopyTextureToImpl
+ : public base::SupportsWeakPtr<GLHelper::CopyTextureToImpl> {
+ public:
+ CopyTextureToImpl(GLES2Interface* gl,
+ ContextSupport* context_support,
+ GLHelper* helper)
+ : gl_(gl),
+ context_support_(context_support),
+ helper_(helper),
+ flush_(gl) {}
+ ~CopyTextureToImpl() { CancelRequests(); }
+
+ void ReadbackTextureAsync(GLuint texture,
+ GLenum texture_target,
+ const gfx::Size& dst_size,
+ unsigned char* out,
+ GLenum format,
+ base::OnceCallback<void(bool)> callback);
+
+ // Reads back bytes from the currently bound frame buffer.
+ // Note that dst_size is specified in bytes, not pixels.
+ void ReadbackAsync(const gfx::Size& dst_size,
+ size_t bytes_per_row, // generally dst_size.width() * 4
+ size_t row_stride_bytes, // generally dst_size.width() * 4
+ unsigned char* out,
+ GLenum format,
+ GLenum type,
+ size_t bytes_per_pixel,
+ base::OnceCallback<void(bool)> callback);
+
+ void ReadbackPlane(const gfx::Size& texture_size,
+ int row_stride_bytes,
+ unsigned char* data,
+ int size_shift,
+ const gfx::Rect& paste_rect,
+ ReadbackSwizzle swizzle,
+ base::OnceCallback<void(bool)> callback);
+
+ std::unique_ptr<ReadbackYUVInterface> CreateReadbackPipelineYUV(
+ bool flip_vertically,
+ bool use_mrt);
+
+ private:
+ // Represents the state of a single readback request.
+ // The main thread can cancel the request, before it's handled by the helper
+ // thread, by resetting the texture and pixels fields. Alternatively, the
+ // thread marks that it handles the request by resetting the pixels field
+ // (meaning it guarantees that the callback with be called).
+ // In either case, the callback must be called exactly once, and the texture
+ // must be deleted by the main thread gl.
+ struct Request {
+ Request(const gfx::Size& size_,
+ size_t bytes_per_row_,
+ size_t row_stride_bytes_,
+ unsigned char* pixels_,
+ base::OnceCallback<void(bool)> callback_)
+ : done(false),
+ size(size_),
+ bytes_per_row(bytes_per_row_),
+ row_stride_bytes(row_stride_bytes_),
+ pixels(pixels_),
+ callback(std::move(callback_)),
+ buffer(0),
+ query(0) {}
+
+ bool done;
+ bool result;
+ gfx::Size size;
+ size_t bytes_per_row;
+ size_t row_stride_bytes;
+ unsigned char* pixels;
+ base::OnceCallback<void(bool)> callback;
+ GLuint buffer;
+ GLuint query;
+ };
+
+ // We must take care to call the callbacks last, as they may
+ // end up destroying the gl_helper and make *this invalid.
+ // We stick the finished requests in a stack object that calls
+ // the callbacks when it goes out of scope.
+ class FinishRequestHelper {
+ public:
+ FinishRequestHelper() {}
+ ~FinishRequestHelper() {
+ while (!requests_.empty()) {
+ Request* request = requests_.front();
+ requests_.pop();
+ std::move(request->callback).Run(request->result);
+ delete request;
+ }
+ }
+ void Add(Request* r) { requests_.push(r); }
+
+ private:
+ base::queue<Request*> requests_;
+ DISALLOW_COPY_AND_ASSIGN(FinishRequestHelper);
+ };
+
+ // A readback pipeline that also converts the data to YUV before
+ // reading it back.
+ class ReadbackYUVImpl : public I420ConverterImpl,
+ public ReadbackYUVInterface {
+ public:
+ ReadbackYUVImpl(GLES2Interface* gl,
+ CopyTextureToImpl* copy_impl,
+ GLHelperScaling* scaler_impl,
+ bool flip_vertically,
+ ReadbackSwizzle swizzle,
+ bool use_mrt);
+
+ ~ReadbackYUVImpl() override;
+
+ void SetScaler(std::unique_ptr<GLHelper::ScalerInterface> scaler) override;
+
+ GLHelper::ScalerInterface* scaler() const override;
+
+ bool IsFlippingOutput() const override;
+
+ void ReadbackYUV(GLuint texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Rect& output_rect,
+ int y_plane_row_stride_bytes,
+ unsigned char* y_plane_data,
+ int u_plane_row_stride_bytes,
+ unsigned char* u_plane_data,
+ int v_plane_row_stride_bytes,
+ unsigned char* v_plane_data,
+ const gfx::Point& paste_location,
+ base::OnceCallback<void(bool)> callback) override;
+
+ private:
+ GLES2Interface* gl_;
+ CopyTextureToImpl* copy_impl_;
+ ReadbackSwizzle swizzle_;
+
+ // May be null if no scaling is required. This can be changed between
+ // calls to ReadbackYUV().
+ std::unique_ptr<GLHelper::ScalerInterface> scaler_;
+
+ // These are the output textures for each Y/U/V plane.
+ ScopedTexture y_;
+ ScopedTexture u_;
+ ScopedTexture v_;
+
+ // Framebuffers used by ReadbackPlane(). They are cached here so as to not
+ // be re-allocated for every frame of video.
+ ScopedFramebuffer y_readback_framebuffer_;
+ ScopedFramebuffer u_readback_framebuffer_;
+ ScopedFramebuffer v_readback_framebuffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReadbackYUVImpl);
+ };
+
+ void ReadbackDone(Request* request, size_t bytes_per_pixel);
+ void FinishRequest(Request* request,
+ bool result,
+ FinishRequestHelper* helper);
+ void CancelRequests();
+
+ bool IsBGRAReadbackSupported();
+
+ GLES2Interface* gl_;
+ ContextSupport* context_support_;
+ GLHelper* helper_;
+
+ // A scoped flush that will ensure all resource deletions are flushed when
+ // this object is destroyed. Must be declared before other Scoped* fields.
+ ScopedFlush flush_;
+
+ base::queue<Request*> request_queue_;
+
+ // Lazily set by IsBGRAReadbackSupported().
+ enum {
+ BGRA_SUPPORT_UNKNOWN,
+ BGRA_SUPPORTED,
+ BGRA_NOT_SUPPORTED
+ } bgra_support_ = BGRA_SUPPORT_UNKNOWN;
+
+ // A run-once test is lazy executed in CreateReadbackPipelineYUV(), to
+ // determine whether the GL_BGRA_EXT format is preferred for readback.
+ enum {
+ BGRA_PREFERENCE_UNKNOWN,
+ BGRA_PREFERRED,
+ BGRA_NOT_PREFERRED
+ } bgra_preference_ = BGRA_PREFERENCE_UNKNOWN;
+};
+
+std::unique_ptr<GLHelper::ScalerInterface> GLHelper::CreateScaler(
+ ScalerQuality quality,
+ const gfx::Vector2d& scale_from,
+ const gfx::Vector2d& scale_to,
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle) {
+ InitScalerImpl();
+ return scaler_impl_->CreateScaler(quality, scale_from, scale_to,
+ flipped_source, flip_output, swizzle);
+}
+
+void GLHelper::CopyTextureToImpl::ReadbackAsync(
+ const gfx::Size& dst_size,
+ size_t bytes_per_row,
+ size_t row_stride_bytes,
+ unsigned char* out,
+ GLenum format,
+ GLenum type,
+ size_t bytes_per_pixel,
+ base::OnceCallback<void(bool)> callback) {
+ TRACE_EVENT0("gpu.capture", "GLHelper::CopyTextureToImpl::ReadbackAsync");
+ Request* request = new Request(dst_size, bytes_per_row, row_stride_bytes, out,
+ std::move(callback));
+ request_queue_.push(request);
+ request->buffer = 0u;
+
+ gl_->GenBuffers(1, &request->buffer);
+ gl_->BindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, request->buffer);
+ gl_->BufferData(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM,
+ bytes_per_pixel * dst_size.GetArea(), nullptr,
+ GL_STREAM_READ);
+
+ request->query = 0u;
+ gl_->GenQueriesEXT(1, &request->query);
+ gl_->BeginQueryEXT(GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM, request->query);
+ gl_->ReadPixels(0, 0, dst_size.width(), dst_size.height(), format, type,
+ nullptr);
+ gl_->EndQueryEXT(GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM);
+ gl_->BindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, 0);
+ context_support_->SignalQuery(
+ request->query, base::BindOnce(&CopyTextureToImpl::ReadbackDone,
+ AsWeakPtr(), request, bytes_per_pixel));
+}
+
+void GLHelper::CopyTextureToImpl::ReadbackTextureAsync(
+ GLuint texture,
+ GLenum texture_target,
+ const gfx::Size& dst_size,
+ unsigned char* out,
+ GLenum format,
+ base::OnceCallback<void(bool)> callback) {
+ constexpr size_t kBytesPerPixel = 4;
+
+ // Note: It's possible the GL implementation supports other readback
+ // types. However, as of this writing, no caller of this method will
+ // request a different |color_type| (i.e., requiring using some other GL
+ // format).
+ if (format != GL_RGBA &&
+ (format != GL_BGRA_EXT || !IsBGRAReadbackSupported())) {
+ std::move(callback).Run(false);
+ return;
+ }
+
+ ScopedFramebuffer dst_framebuffer(gl_);
+ ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(gl_,
+ dst_framebuffer);
+ gl_->BindTexture(texture_target, texture);
+ gl_->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ texture_target, texture, 0);
+ ReadbackAsync(dst_size, dst_size.width() * kBytesPerPixel,
+ dst_size.width() * kBytesPerPixel, out, format,
+ GL_UNSIGNED_BYTE, kBytesPerPixel, std::move(callback));
+ gl_->BindTexture(texture_target, 0);
+}
+
+void GLHelper::CopyTextureToImpl::ReadbackDone(Request* finished_request,
+ size_t bytes_per_pixel) {
+ TRACE_EVENT0("gpu.capture",
+ "GLHelper::CopyTextureToImpl::CheckReadbackFramebufferComplete");
+ finished_request->done = true;
+
+ FinishRequestHelper finish_request_helper;
+
+ // We process transfer requests in the order they were received, regardless
+ // of the order we get the callbacks in.
+ while (!request_queue_.empty()) {
+ Request* request = request_queue_.front();
+ if (!request->done) {
+ break;
+ }
+
+ bool result = false;
+ if (request->buffer != 0) {
+ gl_->BindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, request->buffer);
+ unsigned char* data = static_cast<unsigned char*>(gl_->MapBufferCHROMIUM(
+ GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, GL_READ_ONLY));
+ if (data) {
+ result = true;
+ if (request->bytes_per_row == request->size.width() * bytes_per_pixel &&
+ request->bytes_per_row == request->row_stride_bytes) {
+ memcpy(request->pixels, data,
+ request->size.GetArea() * bytes_per_pixel);
+ } else {
+ unsigned char* out = request->pixels;
+ for (int y = 0; y < request->size.height(); y++) {
+ memcpy(out, data, request->bytes_per_row);
+ out += request->row_stride_bytes;
+ data += request->size.width() * bytes_per_pixel;
+ }
+ }
+ gl_->UnmapBufferCHROMIUM(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM);
+ }
+ gl_->BindBuffer(GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM, 0);
+ }
+ FinishRequest(request, result, &finish_request_helper);
+ }
+}
+
+void GLHelper::CopyTextureToImpl::FinishRequest(
+ Request* request,
+ bool result,
+ FinishRequestHelper* finish_request_helper) {
+ TRACE_EVENT0("gpu.capture", "GLHelper::CopyTextureToImpl::FinishRequest");
+ DCHECK(request_queue_.front() == request);
+ request_queue_.pop();
+ request->result = result;
+ ScopedFlush flush(gl_);
+ if (request->query != 0) {
+ gl_->DeleteQueriesEXT(1, &request->query);
+ request->query = 0;
+ }
+ if (request->buffer != 0) {
+ gl_->DeleteBuffers(1, &request->buffer);
+ request->buffer = 0;
+ }
+ finish_request_helper->Add(request);
+}
+
+void GLHelper::CopyTextureToImpl::CancelRequests() {
+ FinishRequestHelper finish_request_helper;
+ while (!request_queue_.empty()) {
+ Request* request = request_queue_.front();
+ FinishRequest(request, false, &finish_request_helper);
+ }
+}
+
+bool GLHelper::CopyTextureToImpl::IsBGRAReadbackSupported() {
+ if (bgra_support_ == BGRA_PREFERENCE_UNKNOWN) {
+ bgra_support_ = BGRA_NOT_SUPPORTED;
+ if (auto* extensions = gl_->GetString(GL_EXTENSIONS)) {
+ const std::string extensions_string =
+ " " + std::string(reinterpret_cast<const char*>(extensions)) + " ";
+ if (extensions_string.find(" GL_EXT_read_format_bgra ") !=
+ std::string::npos) {
+ bgra_support_ = BGRA_SUPPORTED;
+ }
+ }
+ }
+
+ return bgra_support_ == BGRA_SUPPORTED;
+}
+
+GLHelper::GLHelper(GLES2Interface* gl, ContextSupport* context_support)
+ : gl_(gl), context_support_(context_support) {}
+
+GLHelper::~GLHelper() {}
+
+void GLHelper::ReadbackTextureAsync(GLuint texture,
+ GLenum texture_target,
+ const gfx::Size& dst_size,
+ unsigned char* out,
+ GLenum format,
+ base::OnceCallback<void(bool)> callback) {
+ InitCopyTextToImpl();
+ copy_texture_to_impl_->ReadbackTextureAsync(texture, texture_target, dst_size,
+ out, format, std::move(callback));
+}
+
+void GLHelper::InitCopyTextToImpl() {
+ // Lazily initialize |copy_texture_to_impl_|
+ if (!copy_texture_to_impl_)
+ copy_texture_to_impl_.reset(
+ new CopyTextureToImpl(gl_, context_support_, this));
+}
+
+void GLHelper::InitScalerImpl() {
+ // Lazily initialize |scaler_impl_|
+ if (!scaler_impl_)
+ scaler_impl_.reset(new GLHelperScaling(gl_, this));
+}
+
+GLint GLHelper::MaxDrawBuffers() {
+ if (max_draw_buffers_ < 0) {
+ max_draw_buffers_ = 0;
+ const GLubyte* extensions = gl_->GetString(GL_EXTENSIONS);
+ if (extensions) {
+ const std::string extensions_string =
+ " " + std::string(reinterpret_cast<const char*>(extensions)) + " ";
+ if (extensions_string.find(" GL_EXT_draw_buffers ") !=
+ std::string::npos) {
+ gl_->GetIntegerv(GL_MAX_DRAW_BUFFERS_EXT, &max_draw_buffers_);
+ DCHECK_GE(max_draw_buffers_, 0);
+ }
+ }
+ }
+
+ return max_draw_buffers_;
+}
+
+void GLHelper::CopyTextureToImpl::ReadbackPlane(
+ const gfx::Size& texture_size,
+ int row_stride_bytes,
+ unsigned char* data,
+ int size_shift,
+ const gfx::Rect& paste_rect,
+ ReadbackSwizzle swizzle,
+ base::OnceCallback<void(bool)> callback) {
+ const size_t offset = row_stride_bytes * (paste_rect.y() >> size_shift) +
+ (paste_rect.x() >> size_shift);
+ ReadbackAsync(texture_size, paste_rect.width() >> size_shift,
+ row_stride_bytes, data + offset,
+ (swizzle == kSwizzleBGRA) ? GL_BGRA_EXT : GL_RGBA,
+ GL_UNSIGNED_BYTE, 4, std::move(callback));
+}
+
+I420Converter::I420Converter() = default;
+I420Converter::~I420Converter() = default;
+
+// static
+gfx::Size I420Converter::GetYPlaneTextureSize(const gfx::Size& output_size) {
+ return gfx::Size((output_size.width() + 3) / 4, output_size.height());
+}
+
+// static
+gfx::Size I420Converter::GetChromaPlaneTextureSize(
+ const gfx::Size& output_size) {
+ return gfx::Size((output_size.width() + 7) / 8,
+ (output_size.height() + 1) / 2);
+}
+
+namespace {
+
+I420ConverterImpl::I420ConverterImpl(GLES2Interface* gl,
+ GLHelperScaling* scaler_impl,
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle,
+ bool use_mrt)
+ : gl_(gl),
+ y_planerizer_(
+ use_mrt ? scaler_impl->CreateI420MrtPass1Planerizer(flipped_source,
+ flip_output,
+ swizzle)
+ : scaler_impl->CreateI420Planerizer(0,
+ flipped_source,
+ flip_output,
+ swizzle)),
+ u_planerizer_(use_mrt ? scaler_impl->CreateI420MrtPass2Planerizer(swizzle)
+ : scaler_impl->CreateI420Planerizer(1,
+ flipped_source,
+ flip_output,
+ swizzle)),
+ v_planerizer_(use_mrt ? nullptr
+ : scaler_impl->CreateI420Planerizer(2,
+ flipped_source,
+ flip_output,
+ swizzle)) {}
+
+I420ConverterImpl::~I420ConverterImpl() = default;
+
+void I420ConverterImpl::Convert(GLuint src_texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ GLHelper::ScalerInterface* optional_scaler,
+ const gfx::Rect& output_rect,
+ GLuint y_plane_texture,
+ GLuint u_plane_texture,
+ GLuint v_plane_texture) {
+ const gfx::Size scaler_output_size =
+ optional_scaler ? output_rect.size() : gfx::Size();
+ const gfx::Size y_texture_size = GetYPlaneTextureSize(output_rect.size());
+ const gfx::Size chroma_texture_size =
+ GetChromaPlaneTextureSize(output_rect.size());
+ EnsureTexturesSizedFor(scaler_output_size, y_texture_size,
+ chroma_texture_size, y_plane_texture, u_plane_texture,
+ v_plane_texture);
+
+ // Scale first, if needed.
+ if (optional_scaler) {
+ // The scaler should not be configured to do any swizzling.
+ DCHECK_EQ(optional_scaler->GetReadbackFormat(),
+ static_cast<GLenum>(GL_RGBA));
+ optional_scaler->Scale(src_texture, src_texture_size, src_offset,
+ intermediate_->texture(), output_rect);
+ }
+
+ // Convert the intermediate (or source) texture into Y, U and V planes.
+ const GLuint texture =
+ optional_scaler ? intermediate_->texture() : src_texture;
+ const gfx::Size texture_size =
+ optional_scaler ? intermediate_->size() : src_texture_size;
+ const gfx::Vector2dF offset = optional_scaler ? gfx::Vector2dF() : src_offset;
+ if (use_mrt()) {
+ y_planerizer_->ScaleToMultipleOutputs(texture, texture_size, offset,
+ y_plane_texture, uv_->id(),
+ gfx::Rect(y_texture_size));
+ u_planerizer_->ScaleToMultipleOutputs(
+ uv_->id(), y_texture_size, gfx::Vector2dF(), u_plane_texture,
+ v_plane_texture, gfx::Rect(chroma_texture_size));
+ } else {
+ y_planerizer_->Scale(texture, texture_size, offset, y_plane_texture,
+ gfx::Rect(y_texture_size));
+ u_planerizer_->Scale(texture, texture_size, offset, u_plane_texture,
+ gfx::Rect(chroma_texture_size));
+ v_planerizer_->Scale(texture, texture_size, offset, v_plane_texture,
+ gfx::Rect(chroma_texture_size));
+ }
+}
+
+bool I420ConverterImpl::IsSamplingFlippedSource() const {
+ return y_planerizer_->IsSamplingFlippedSource();
+}
+
+bool I420ConverterImpl::IsFlippingOutput() const {
+ return y_planerizer_->IsFlippingOutput();
+}
+
+GLenum I420ConverterImpl::GetReadbackFormat() const {
+ return y_planerizer_->GetReadbackFormat();
+}
+
+void I420ConverterImpl::EnsureTexturesSizedFor(
+ const gfx::Size& scaler_output_size,
+ const gfx::Size& y_texture_size,
+ const gfx::Size& chroma_texture_size,
+ GLuint y_plane_texture,
+ GLuint u_plane_texture,
+ GLuint v_plane_texture) {
+ // Reallocate the intermediate texture, if needed.
+ if (!scaler_output_size.IsEmpty()) {
+ if (!intermediate_ || intermediate_->size() != scaler_output_size)
+ intermediate_.emplace(gl_, scaler_output_size);
+ } else {
+ intermediate_ = base::nullopt;
+ }
+
+ // Size the interim UV plane and the three output planes.
+ const auto SetRGBATextureSize = [this](const gfx::Size& size) {
+ gl_->TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+ };
+ if (use_mrt()) {
+ uv_.emplace(gl_);
+ gl_->BindTexture(GL_TEXTURE_2D, uv_->id());
+ SetRGBATextureSize(y_texture_size);
+ }
+ gl_->BindTexture(GL_TEXTURE_2D, y_plane_texture);
+ SetRGBATextureSize(y_texture_size);
+ gl_->BindTexture(GL_TEXTURE_2D, u_plane_texture);
+ SetRGBATextureSize(chroma_texture_size);
+ gl_->BindTexture(GL_TEXTURE_2D, v_plane_texture);
+ SetRGBATextureSize(chroma_texture_size);
+}
+
+} // namespace
+
+GLHelper::CopyTextureToImpl::ReadbackYUVImpl::ReadbackYUVImpl(
+ GLES2Interface* gl,
+ CopyTextureToImpl* copy_impl,
+ GLHelperScaling* scaler_impl,
+ bool flip_vertically,
+ ReadbackSwizzle swizzle,
+ bool use_mrt)
+ : I420ConverterImpl(gl,
+ scaler_impl,
+ false,
+ flip_vertically,
+ swizzle == kSwizzleBGRA,
+ use_mrt),
+ gl_(gl),
+ copy_impl_(copy_impl),
+ swizzle_(swizzle),
+ y_(gl_),
+ u_(gl_),
+ v_(gl_),
+ y_readback_framebuffer_(gl_),
+ u_readback_framebuffer_(gl_),
+ v_readback_framebuffer_(gl_) {}
+
+GLHelper::CopyTextureToImpl::ReadbackYUVImpl::~ReadbackYUVImpl() = default;
+
+void GLHelper::CopyTextureToImpl::ReadbackYUVImpl::SetScaler(
+ std::unique_ptr<GLHelper::ScalerInterface> scaler) {
+ scaler_ = std::move(scaler);
+}
+
+GLHelper::ScalerInterface*
+GLHelper::CopyTextureToImpl::ReadbackYUVImpl::scaler() const {
+ return scaler_.get();
+}
+
+bool GLHelper::CopyTextureToImpl::ReadbackYUVImpl::IsFlippingOutput() const {
+ return I420ConverterImpl::IsFlippingOutput();
+}
+
+void GLHelper::CopyTextureToImpl::ReadbackYUVImpl::ReadbackYUV(
+ GLuint texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Rect& output_rect,
+ int y_plane_row_stride_bytes,
+ unsigned char* y_plane_data,
+ int u_plane_row_stride_bytes,
+ unsigned char* u_plane_data,
+ int v_plane_row_stride_bytes,
+ unsigned char* v_plane_data,
+ const gfx::Point& paste_location,
+ base::OnceCallback<void(bool)> callback) {
+ DCHECK(!(paste_location.x() & 1));
+ DCHECK(!(paste_location.y() & 1));
+
+ I420ConverterImpl::Convert(texture, src_texture_size, gfx::Vector2dF(),
+ scaler_.get(), output_rect, y_, u_, v_);
+
+ // Read back planes, one at a time. Keep the video frame alive while doing
+ // the readback.
+ const gfx::Rect paste_rect(paste_location, output_rect.size());
+ const auto SetUpAndBindFramebuffer = [this](GLuint framebuffer,
+ GLuint texture) {
+ gl_->BindFramebuffer(GL_FRAMEBUFFER, framebuffer);
+ gl_->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, texture, 0);
+ };
+ SetUpAndBindFramebuffer(y_readback_framebuffer_, y_);
+ copy_impl_->ReadbackPlane(
+ GetYPlaneTextureSize(output_rect.size()), y_plane_row_stride_bytes,
+ y_plane_data, 0, paste_rect, swizzle_, base::DoNothing::Once<bool>());
+ SetUpAndBindFramebuffer(u_readback_framebuffer_, u_);
+ const gfx::Size chroma_texture_size =
+ GetChromaPlaneTextureSize(output_rect.size());
+ copy_impl_->ReadbackPlane(chroma_texture_size, u_plane_row_stride_bytes,
+ u_plane_data, 1, paste_rect, swizzle_,
+ base::DoNothing::Once<bool>());
+ SetUpAndBindFramebuffer(v_readback_framebuffer_, v_);
+ copy_impl_->ReadbackPlane(chroma_texture_size, v_plane_row_stride_bytes,
+ v_plane_data, 1, paste_rect, swizzle_,
+ std::move(callback));
+ gl_->BindFramebuffer(GL_FRAMEBUFFER, 0);
+}
+
+std::unique_ptr<I420Converter> GLHelper::CreateI420Converter(
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle,
+ bool use_mrt) {
+ InitCopyTextToImpl();
+ InitScalerImpl();
+ return std::make_unique<I420ConverterImpl>(
+ gl_, scaler_impl_.get(), flipped_source, flip_output, swizzle,
+ use_mrt && (MaxDrawBuffers() >= 2));
+}
+
+std::unique_ptr<ReadbackYUVInterface>
+GLHelper::CopyTextureToImpl::CreateReadbackPipelineYUV(bool flip_vertically,
+ bool use_mrt) {
+ helper_->InitScalerImpl();
+
+ if (bgra_preference_ == BGRA_PREFERENCE_UNKNOWN) {
+ if (IsBGRAReadbackSupported()) {
+ // Test whether GL_BRGA_EXT is preferred for readback by creating a test
+ // texture, binding it to a framebuffer as a color attachment, and then
+ // querying the implementation for the framebuffer's readback format.
+ constexpr int kTestSize = 64;
+ GLuint texture = 0;
+ gl_->GenTextures(1, &texture);
+ gl_->BindTexture(GL_TEXTURE_2D, texture);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ gl_->TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, kTestSize, kTestSize, 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+ GLuint framebuffer = 0;
+ gl_->GenFramebuffers(1, &framebuffer);
+ gl_->BindFramebuffer(GL_FRAMEBUFFER, framebuffer);
+ gl_->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, texture, 0);
+ GLint readback_format = 0;
+ GLint readback_type = 0;
+ gl_->GetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &readback_format);
+ gl_->GetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &readback_type);
+ if (readback_format == GL_BGRA_EXT && readback_type == GL_UNSIGNED_BYTE) {
+ bgra_preference_ = BGRA_PREFERRED;
+ } else {
+ bgra_preference_ = BGRA_NOT_PREFERRED;
+ }
+ if (framebuffer != 0)
+ gl_->DeleteFramebuffers(1, &framebuffer);
+ if (texture != 0)
+ gl_->DeleteTextures(1, &texture);
+ } else {
+ bgra_preference_ = BGRA_NOT_PREFERRED;
+ }
+ }
+
+ const ReadbackSwizzle swizzle =
+ (bgra_preference_ == BGRA_PREFERRED) ? kSwizzleBGRA : kSwizzleNone;
+ return std::make_unique<ReadbackYUVImpl>(
+ gl_, this, helper_->scaler_impl_.get(), flip_vertically, swizzle,
+ use_mrt && (helper_->MaxDrawBuffers() >= 2));
+}
+
+std::unique_ptr<ReadbackYUVInterface> GLHelper::CreateReadbackPipelineYUV(
+ bool flip_vertically,
+ bool use_mrt) {
+ InitCopyTextToImpl();
+ return copy_texture_to_impl_->CreateReadbackPipelineYUV(flip_vertically,
+ use_mrt);
+}
+
+ReadbackYUVInterface* GLHelper::GetReadbackPipelineYUV(
+ bool vertically_flip_texture) {
+ ReadbackYUVInterface* yuv_reader = nullptr;
+ if (vertically_flip_texture) {
+ if (!shared_readback_yuv_flip_) {
+ shared_readback_yuv_flip_ = CreateReadbackPipelineYUV(
+ vertically_flip_texture, true /* use_mrt */);
+ }
+ yuv_reader = shared_readback_yuv_flip_.get();
+ } else {
+ if (!shared_readback_yuv_noflip_) {
+ shared_readback_yuv_noflip_ = CreateReadbackPipelineYUV(
+ vertically_flip_texture, true /* use_mrt */);
+ }
+ yuv_reader = shared_readback_yuv_noflip_.get();
+ }
+ DCHECK(!yuv_reader->scaler());
+ return yuv_reader;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/gl_helper.h b/chromium/gpu/command_buffer/client/gl_helper.h
new file mode 100644
index 00000000000..379c4566079
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/gl_helper.h
@@ -0,0 +1,463 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GL_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GL_HELPER_H_
+
+#include <memory>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/gpu_export.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace gfx {
+class Point;
+class Rect;
+class Vector2d;
+class Vector2dF;
+} // namespace gfx
+
+namespace gpu {
+
+class ContextSupport;
+class GLHelperScaling;
+
+class ScopedGLuint {
+ public:
+ typedef void (gles2::GLES2Interface::*GenFunc)(GLsizei n, GLuint* ids);
+ typedef void (gles2::GLES2Interface::*DeleteFunc)(GLsizei n,
+ const GLuint* ids);
+ ScopedGLuint(gles2::GLES2Interface* gl,
+ GenFunc gen_func,
+ DeleteFunc delete_func)
+ : gl_(gl), id_(0u), delete_func_(delete_func) {
+ (gl_->*gen_func)(1, &id_);
+ }
+
+ operator GLuint() const { return id_; }
+
+ GLuint id() const { return id_; }
+
+ ~ScopedGLuint() {
+ if (id_ != 0) {
+ (gl_->*delete_func_)(1, &id_);
+ }
+ }
+
+ private:
+ gles2::GLES2Interface* gl_;
+ GLuint id_;
+ DeleteFunc delete_func_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedGLuint);
+};
+
+class ScopedBuffer : public ScopedGLuint {
+ public:
+ explicit ScopedBuffer(gles2::GLES2Interface* gl)
+ : ScopedGLuint(gl,
+ &gles2::GLES2Interface::GenBuffers,
+ &gles2::GLES2Interface::DeleteBuffers) {}
+};
+
+class ScopedFramebuffer : public ScopedGLuint {
+ public:
+ explicit ScopedFramebuffer(gles2::GLES2Interface* gl)
+ : ScopedGLuint(gl,
+ &gles2::GLES2Interface::GenFramebuffers,
+ &gles2::GLES2Interface::DeleteFramebuffers) {}
+};
+
+class ScopedTexture : public ScopedGLuint {
+ public:
+ explicit ScopedTexture(gles2::GLES2Interface* gl)
+ : ScopedGLuint(gl,
+ &gles2::GLES2Interface::GenTextures,
+ &gles2::GLES2Interface::DeleteTextures) {}
+};
+
+template <GLenum Target>
+class ScopedBinder {
+ public:
+ typedef void (gles2::GLES2Interface::*BindFunc)(GLenum target, GLuint id);
+ ScopedBinder(gles2::GLES2Interface* gl, GLuint id, BindFunc bind_func)
+ : gl_(gl), bind_func_(bind_func) {
+ (gl_->*bind_func_)(Target, id);
+ }
+
+ virtual ~ScopedBinder() { (gl_->*bind_func_)(Target, 0); }
+
+ private:
+ gles2::GLES2Interface* gl_;
+ BindFunc bind_func_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedBinder);
+};
+
+template <GLenum Target>
+class ScopedBufferBinder : ScopedBinder<Target> {
+ public:
+ ScopedBufferBinder(gles2::GLES2Interface* gl, GLuint id)
+ : ScopedBinder<Target>(gl, id, &gles2::GLES2Interface::BindBuffer) {}
+};
+
+template <GLenum Target>
+class ScopedFramebufferBinder : ScopedBinder<Target> {
+ public:
+ ScopedFramebufferBinder(gles2::GLES2Interface* gl, GLuint id)
+ : ScopedBinder<Target>(gl, id, &gles2::GLES2Interface::BindFramebuffer) {}
+};
+
+template <GLenum Target>
+class ScopedTextureBinder : ScopedBinder<Target> {
+ public:
+ ScopedTextureBinder(gles2::GLES2Interface* gl, GLuint id)
+ : ScopedBinder<Target>(gl, id, &gles2::GLES2Interface::BindTexture) {}
+};
+
+class I420Converter;
+class ReadbackYUVInterface;
+
+// Provides higher level operations on top of the gles2::GLES2Interface
+// interfaces.
+//
+// TODO(crbug.com/870036): DEPRECATED. Please contact the crbug owner before
+// adding any new dependencies on this code.
+class GPU_EXPORT GLHelper {
+ public:
+ GLHelper(gles2::GLES2Interface* gl, ContextSupport* context_support);
+ ~GLHelper();
+
+ enum ScalerQuality {
+ // Bilinear single pass, fastest possible.
+ SCALER_QUALITY_FAST = 1,
+
+ // Bilinear upscale + N * 50% bilinear downscales.
+ // This is still fast enough for most purposes and
+ // Image quality is nearly as good as the BEST option.
+ SCALER_QUALITY_GOOD = 2,
+
+ // Bicubic upscale + N * 50% bicubic downscales.
+ // Produces very good quality scaled images, but it's
+ // 2-8x slower than the "GOOD" quality, so it's not always
+ // worth it.
+ SCALER_QUALITY_BEST = 3,
+ };
+
+ // Copies the texture data out of |texture| into |out|. |dst_size| is the
+ // size of the texture. No post processing is applied to the pixels. The
+ // texture is assumed to have a format of GL_RGBA or GL_BGRA_EXT with a pixel
+ // type of GL_UNSIGNED_BYTE.
+ //
+ // TODO(crbug.com/870036): DEPRECATED. This will be moved to be closer to its
+ // one caller soon.
+ void ReadbackTextureAsync(GLuint texture,
+ GLenum texture_target,
+ const gfx::Size& dst_size,
+ unsigned char* out,
+ GLenum format,
+ base::OnceCallback<void(bool)> callback);
+
+ // Caches all intermediate textures and programs needed to scale any subset of
+ // a source texture at a fixed scaling ratio.
+ class ScalerInterface {
+ public:
+ virtual ~ScalerInterface() {}
+
+ // Scales a portion of |src_texture| and draws the result into
+ // |dest_texture| at offset (0, 0).
+ //
+ // |src_texture_size| is the full, allocated size of the |src_texture|. This
+ // is required for computing texture coordinate transforms (and only because
+ // the OpenGL ES 2.0 API lacks the ability to query this info).
+ //
+ // |src_offset| is the offset in the source texture corresponding to point
+ // (0,0) in the source/output coordinate spaces. This prevents the need for
+ // extra texture copies just to re-position the source coordinate system.
+ // TODO(crbug.com/775740): This must be set to whole-numbered values for
+ // now, until the implementation is modified to handle fractional offsets.
+ //
+ // |output_rect| selects the region to draw (in the scaled, not the source,
+ // coordinate space). This is used to save work in cases where only a
+ // portion needs to be re-scaled. The implementation will back-compute,
+ // internally, to determine the region of the |src_texture| to sample.
+ //
+ // WARNING: The output will always be placed at (0, 0) in the
+ // |dest_texture|, and not at |output_rect.origin()|.
+ //
+ // Note that the src_texture will have the min/mag filter set to GL_LINEAR
+ // and wrap_s/t set to CLAMP_TO_EDGE in this call.
+ void Scale(GLuint src_texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ GLuint dest_texture,
+ const gfx::Rect& output_rect) {
+ ScaleToMultipleOutputs(src_texture, src_texture_size, src_offset,
+ dest_texture, 0, output_rect);
+ }
+
+ // Same as above, but for shaders that output to two textures at once.
+ virtual void ScaleToMultipleOutputs(GLuint src_texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ GLuint dest_texture_0,
+ GLuint dest_texture_1,
+ const gfx::Rect& output_rect) = 0;
+
+ // Given the |src_texture_size|, |src_offset| and |output_rect| arguments
+ // that would be passed to Scale(), compute the region of pixels in the
+ // source texture that would be sampled to produce a scaled result. The
+ // result is stored in |sampling_rect|, along with the |offset| to the (0,0)
+ // point relative to |sampling_rect|'s origin.
+ //
+ // This is used by clients that need to know the minimal portion of a source
+ // buffer that must be copied without affecting Scale()'s results. This
+ // method also accounts for vertical flipping.
+ virtual void ComputeRegionOfInfluence(const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ const gfx::Rect& output_rect,
+ gfx::Rect* sampling_rect,
+ gfx::Vector2dF* offset) const = 0;
+
+ // Returns true if from:to represent the same scale ratio as that provided
+ // by this scaler.
+ virtual bool IsSameScaleRatio(const gfx::Vector2d& from,
+ const gfx::Vector2d& to) const = 0;
+
+ // Returns true if the scaler is assuming the source texture's content is
+ // vertically flipped.
+ virtual bool IsSamplingFlippedSource() const = 0;
+
+ // Returns true if the scaler will vertically-flip the output. Note that if
+ // both this method and IsSamplingFlippedSource() return true, then the
+ // scaler output will be right-side up.
+ virtual bool IsFlippingOutput() const = 0;
+
+ // Returns the format to use when calling glReadPixels() to read-back the
+ // output texture(s). This indicates whether the 0th and 2nd bytes in each
+ // RGBA quad have been swapped. If no swapping has occurred, this will
+ // return GL_RGBA. Otherwise, it will return GL_BGRA_EXT.
+ virtual GLenum GetReadbackFormat() const = 0;
+
+ protected:
+ ScalerInterface() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScalerInterface);
+ };
+
+ // Create a scaler that upscales or downscales at the given ratio
+ // (scale_from:scale_to). Returns null on invalid arguments.
+ //
+ // If |flipped_source| is true, then the scaler will assume the content of the
+ // source texture is vertically-flipped. This is required so that the scaler
+ // can correctly compute the sampling region.
+ //
+ // If |flip_output| is true, then the scaler will vertically-flip its output
+ // result. This is used when the output texture will be read-back into system
+ // memory, so that the rows do not have to be copied in reverse.
+ //
+ // If |swizzle| is true, the 0th and 2nd elements in each RGBA quad will be
+ // swapped. This is beneficial for optimizing read-back into system memory.
+ //
+ // WARNING: The returned scaler assumes both this GLHelper and its
+ // GLES2Interface/ContextSupport will outlive it!
+ std::unique_ptr<ScalerInterface> CreateScaler(ScalerQuality quality,
+ const gfx::Vector2d& scale_from,
+ const gfx::Vector2d& scale_to,
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle);
+
+ // Create a pipeline that will (optionally) scale a source texture, and then
+ // convert it to I420 (YUV) planar form, delivering results in three separate
+ // output textures (one for each plane; see I420Converter::Convert()).
+ //
+ // Due to limitations in the OpenGL ES 2.0 API, the output textures will have
+ // a format of GL_RGBA. However, each RGBA "pixel" in these textures actually
+ // carries 4 consecutive pixels for the single-color-channel result plane.
+ // Therefore, when using the OpenGL APIs to read-back the image into system
+ // memory, note that a width 1/4 the actual |output_rect.width()| must be
+ // used.
+ //
+ // |flipped_source|, |flip_output|, and |swizzle| have the same meaning as
+ // that explained in the method comments for CreateScaler().
+ //
+ // If |use_mrt| is true, the pipeline will try to optimize the YUV conversion
+ // using the multi-render-target extension, if the platform is capable.
+ // |use_mrt| should only be set to false for testing.
+ //
+ // The benefit of using this pipeline is seen when these output textures are
+ // read back from GPU to CPU memory: The I420 format reduces the amount of
+ // data read back by a factor of ~2.6 (32bpp → 12bpp) which can greatly
+ // improve performance, for things like video screen capture, on platforms
+ // with slow GPU read-back performance.
+ //
+ // WARNING: The returned I420Converter instance assumes both this GLHelper and
+ // its GLES2Interface/ContextSupport will outlive it!
+ std::unique_ptr<I420Converter> CreateI420Converter(bool flipped_source,
+ bool flip_output,
+ bool swizzle,
+ bool use_mrt);
+
+ // Create a readback pipeline that will (optionally) scale a source texture,
+ // then convert it to YUV420 planar form, and finally read back that. This
+ // reduces the amount of memory read from GPU to CPU memory by a factor of 2.6
+ // (32bpp → 12bpp), which can be quite handy since readbacks have very limited
+ // speed on some platforms.
+ //
+ // If |use_mrt| is true, the pipeline will try to optimize the YUV conversion
+ // using the multi-render-target extension, if the platform is capable.
+ // |use_mrt| should only be set to false for testing.
+ //
+ // WARNING: The returned ReadbackYUVInterface instance assumes both this
+ // GLHelper and its GLES2Interface/ContextSupport will outlive it!
+ //
+ // TODO(crbug.com/870036): DEPRECATED. This will be removed soon, in favor of
+ // CreateI420Converter().
+ std::unique_ptr<ReadbackYUVInterface> CreateReadbackPipelineYUV(
+ bool vertically_flip_texture,
+ bool use_mrt);
+
+ // Returns a ReadbackYUVInterface instance that is lazily created and owned by
+ // this class. |use_mrt| is always true for these instances.
+ //
+ // TODO(crbug.com/870036): DEPRECATED. This will be moved to be closer to its
+ // one caller soon.
+ ReadbackYUVInterface* GetReadbackPipelineYUV(bool vertically_flip_texture);
+
+ // Returns the maximum number of draw buffers available,
+ // 0 if GL_EXT_draw_buffers is not available.
+ GLint MaxDrawBuffers();
+
+ protected:
+ class CopyTextureToImpl;
+
+ // Creates |copy_texture_to_impl_| if nullptr.
+ void InitCopyTextToImpl();
+ // Creates |scaler_impl_| if nullptr.
+ void InitScalerImpl();
+
+ enum ReadbackSwizzle { kSwizzleNone = 0, kSwizzleBGRA };
+
+ gles2::GLES2Interface* gl_;
+ ContextSupport* context_support_;
+ std::unique_ptr<CopyTextureToImpl> copy_texture_to_impl_;
+ std::unique_ptr<GLHelperScaling> scaler_impl_;
+ std::unique_ptr<ReadbackYUVInterface> shared_readback_yuv_flip_;
+ std::unique_ptr<ReadbackYUVInterface> shared_readback_yuv_noflip_;
+
+ // Memoized result for MaxDrawBuffers(), if >= 0. Otherwise, MaxDrawBuffers()
+ // will need to query the GL implementation.
+ GLint max_draw_buffers_ = -1;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(GLHelper);
+};
+
+// Splits an RGBA source texture's image into separate Y, U, and V planes. The U
+// and V planes are half-width and half-height, according to the I420 standard.
+class GPU_EXPORT I420Converter {
+ public:
+ I420Converter();
+ virtual ~I420Converter();
+
+ // Transforms a RGBA |src_texture| into three textures, each containing bytes
+ // in I420 planar form. See the GLHelper::ScalerInterface::Scale() method
+ // comments for the meaning/semantics of |src_texture_size|, |src_offset| and
+ // |output_rect|. If |optional_scaler| is not null, it will first be used to
+ // scale the source texture into an intermediate texture before generating the
+ // Y+U+V planes.
+ //
+ // See notes for CreateI420Converter() regarding the semantics of the output
+ // textures.
+ virtual void Convert(GLuint src_texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ GLHelper::ScalerInterface* optional_scaler,
+ const gfx::Rect& output_rect,
+ GLuint y_plane_texture,
+ GLuint u_plane_texture,
+ GLuint v_plane_texture) = 0;
+
+ // Returns true if the converter is assuming the source texture's content is
+ // vertically flipped.
+ virtual bool IsSamplingFlippedSource() const = 0;
+
+ // Returns true if the converter will vertically-flip the output.
+ virtual bool IsFlippingOutput() const = 0;
+
+ // Returns the format to use when calling glReadPixels() to read-back the
+ // output textures. This indicates whether the 0th and 2nd bytes in each RGBA
+ // quad have been swapped. If no swapping has occurred, this will return
+ // GL_RGBA. Otherwise, it will return GL_BGRA_EXT.
+ virtual GLenum GetReadbackFormat() const = 0;
+
+ // Returns the texture size of the Y plane texture, based on the size of the
+ // |output_rect| that was given to Convert(). This will have a width of
+ // CEIL(output_rect_size.width() / 4), and the same height.
+ static gfx::Size GetYPlaneTextureSize(const gfx::Size& output_rect_size);
+
+ // Like GetYPlaneTextureSize(), except the returned size will have a width of
+ // CEIL(output_rect_size.width() / 8), and a height of
+ // CEIL(output_rect_size.height() / 2); because the chroma planes are half-
+ // length in both dimensions in the I420 format.
+ static gfx::Size GetChromaPlaneTextureSize(const gfx::Size& output_rect_size);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(I420Converter);
+};
+
+// Similar to a ScalerInterface, a YUV readback pipeline will cache a scaler and
+// all intermediate textures and frame buffers needed to scale, crop, letterbox
+// and read back a texture from the GPU into CPU-accessible RAM. A single
+// readback pipeline can handle multiple outstanding readbacks at the same time.
+//
+// TODO(crbug.com/870036): DEPRECATED. This will be removed soon, in favor of
+// I420Converter and readback implementation in GLRendererCopier.
+class GPU_EXPORT ReadbackYUVInterface {
+ public:
+ ReadbackYUVInterface() {}
+ virtual ~ReadbackYUVInterface() {}
+
+ // Optional behavior: This sets a scaler to use to scale the inputs before
+ // planarizing. If null (or never called), then no scaling is performed.
+ virtual void SetScaler(std::unique_ptr<GLHelper::ScalerInterface> scaler) = 0;
+
+ // Returns the currently-set scaler, or null.
+ virtual GLHelper::ScalerInterface* scaler() const = 0;
+
+ // Returns true if the converter will vertically-flip the output.
+ virtual bool IsFlippingOutput() const = 0;
+
+ // Transforms a RGBA texture into I420 planar form, and then reads it back
+ // from the GPU into system memory. See the GLHelper::ScalerInterface::Scale()
+ // method comments for the meaning/semantics of |src_texture_size| and
+ // |output_rect|. The process is:
+ //
+ // 1. Scale the source texture to an intermediate texture.
+ // 2. Planarize, producing textures containing the Y, U, and V planes.
+ // 3. Read-back the planar data, copying it into the given output
+ // destination. |paste_location| specifies the where to place the output
+ // pixels: Rect(paste_location.origin(), output_rect.size()).
+ // 4. Run |callback| with true on success, false on failure (with no output
+ // modified).
+ virtual void ReadbackYUV(GLuint texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Rect& output_rect,
+ int y_plane_row_stride_bytes,
+ unsigned char* y_plane_data,
+ int u_plane_row_stride_bytes,
+ unsigned char* u_plane_data,
+ int v_plane_row_stride_bytes,
+ unsigned char* v_plane_data,
+ const gfx::Point& paste_location,
+ base::OnceCallback<void(bool)> callback) = 0;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GL_HELPER_H_
diff --git a/chromium/gpu/command_buffer/client/gl_helper_scaling.cc b/chromium/gpu/command_buffer/client/gl_helper_scaling.cc
new file mode 100644
index 00000000000..a6ac94340fc
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/gl_helper_scaling.cc
@@ -0,0 +1,1335 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gl_helper_scaling.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/containers/circular_deque.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/optional.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "gpu/GLES2/gl2extchromium.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/geometry/rect_conversions.h"
+#include "ui/gfx/geometry/rect_f.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/geometry/vector2d_f.h"
+
+namespace gpu {
+using gles2::GLES2Interface;
+
+namespace {
+
+// Linear translation from RGB to grayscale.
+const GLfloat kRGBtoGrayscaleColorWeights[4] = {0.213f, 0.715f, 0.072f, 0.0f};
+
+// Linear translation from RGB to YUV color space.
+// TODO(miu): This needs to stop being hardcoded...and need to identify to&from
+// color spaces.
+const GLfloat kRGBtoYColorWeights[4] = {0.257f, 0.504f, 0.098f, 0.0625f};
+const GLfloat kRGBtoUColorWeights[4] = {-0.148f, -0.291f, 0.439f, 0.5f};
+const GLfloat kRGBtoVColorWeights[4] = {0.439f, -0.368f, -0.071f, 0.5f};
+
+// Returns true iff a_num/a_denom == b_num/b_denom.
+bool AreRatiosEqual(int32_t a_num,
+ int32_t a_denom,
+ int32_t b_num,
+ int32_t b_denom) {
+ // The math (for each dimension):
+ // If: a_num/a_denom == b_num/b_denom
+ // Then: a_num*b_denom == b_num*a_denom
+ //
+ // ...and cast to int64_t to guarantee no overflow from the multiplications.
+ return (static_cast<int64_t>(a_num) * b_denom) ==
+ (static_cast<int64_t>(b_num) * a_denom);
+}
+
+} // namespace
+
+GLHelperScaling::GLHelperScaling(GLES2Interface* gl, GLHelper* helper)
+ : gl_(gl), helper_(helper), vertex_attributes_buffer_(gl_) {
+ InitBuffer();
+}
+
+GLHelperScaling::~GLHelperScaling() {}
+
+// Used to keep track of a generated shader program. The program
+// is passed in as text through Setup and is used by calling
+// UseProgram() with the right parameters. Note that |gl_|
+// and |helper_| are assumed to live longer than this program.
+class ShaderProgram : public base::RefCounted<ShaderProgram> {
+ public:
+ ShaderProgram(GLES2Interface* gl,
+ GLHelper* helper,
+ GLHelperScaling::ShaderType shader)
+ : gl_(gl),
+ helper_(helper),
+ shader_(shader),
+ program_(gl_->CreateProgram()),
+ position_location_(-1),
+ texcoord_location_(-1),
+ src_rect_location_(-1),
+ src_pixelsize_location_(-1),
+ scaling_vector_location_(-1),
+ rgb_to_plane0_location_(-1),
+ rgb_to_plane1_location_(-1),
+ rgb_to_plane2_location_(-1) {}
+
+ // Compile shader program.
+ void Setup(const GLchar* vertex_shader_text,
+ const GLchar* fragment_shader_text);
+
+ // UseProgram must be called with GL_ARRAY_BUFFER bound to a vertex attribute
+ // buffer. |src_texture_size| is the size of the entire source texture,
+ // regardless of which region is to be sampled. |src_rect| is the source
+ // region not including overscan pixels past the edges. The program produces a
+ // scaled image placed at Rect(0, 0, dst_size.width(), dst_size.height()) in
+ // the destination texture(s).
+ void UseProgram(const gfx::Size& src_texture_size,
+ const gfx::RectF& src_rect,
+ const gfx::Size& dst_size,
+ bool scale_x,
+ bool flip_y,
+ const GLfloat color_weights[3][4]);
+
+ bool Initialized() const { return position_location_ != -1; }
+
+ private:
+ friend class base::RefCounted<ShaderProgram>;
+ ~ShaderProgram() { gl_->DeleteProgram(program_); }
+
+ GLES2Interface* gl_;
+ GLHelper* helper_;
+ const GLHelperScaling::ShaderType shader_;
+
+ // A program for copying a source texture into a destination texture.
+ GLuint program_;
+
+ // The location of the position in the program.
+ GLint position_location_;
+ // The location of the texture coordinate in the program.
+ GLint texcoord_location_;
+ // The location of the source texture in the program.
+ GLint texture_location_;
+ // The location of the texture coordinate of the source rectangle in the
+ // program.
+ GLint src_rect_location_;
+ // Location of size of source image in pixels.
+ GLint src_pixelsize_location_;
+ // Location of vector for scaling ratio between source and dest textures.
+ GLint scaling_vector_location_;
+ // Location of color weights, for programs that convert from interleaved to
+ // planar pixel orderings/formats.
+ GLint rgb_to_plane0_location_;
+ GLint rgb_to_plane1_location_;
+ GLint rgb_to_plane2_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderProgram);
+};
+
+// Implementation of a single stage in a scaler pipeline. If the pipeline has
+// multiple stages, it calls Scale() on the subscaler, then further scales the
+// output. Caches textures and framebuffers to avoid allocating/deleting
+// them once per frame, which can be expensive on some drivers.
+class ScalerImpl : public GLHelper::ScalerInterface {
+ public:
+ // |gl| and |scaler_helper| are expected to live longer than this object.
+ ScalerImpl(GLES2Interface* gl,
+ GLHelperScaling* scaler_helper,
+ const GLHelperScaling::ScalerStage& scaler_stage,
+ std::unique_ptr<ScalerImpl> subscaler)
+ : gl_(gl),
+ scaler_helper_(scaler_helper),
+ spec_(scaler_stage),
+ intermediate_texture_(0),
+ dst_framebuffer_(gl),
+ subscaler_(std::move(subscaler)) {
+ shader_program_ =
+ scaler_helper_->GetShaderProgram(spec_.shader, spec_.swizzle);
+ }
+
+ ~ScalerImpl() override {
+ if (intermediate_texture_) {
+ gl_->DeleteTextures(1, &intermediate_texture_);
+ }
+ }
+
+ void SetColorWeights(int plane, const GLfloat color_weights[4]) {
+ DCHECK(plane >= 0 && plane < 3);
+ color_weights_[plane][0] = color_weights[0];
+ color_weights_[plane][1] = color_weights[1];
+ color_weights_[plane][2] = color_weights[2];
+ color_weights_[plane][3] = color_weights[3];
+ }
+
+ void ScaleToMultipleOutputs(GLuint src_texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ GLuint dest_texture_0,
+ GLuint dest_texture_1,
+ const gfx::Rect& output_rect) override {
+ // TODO(crbug.com/775740): Do not accept non-whole-numbered offsets
+ // until the shader programs produce the correct output for them.
+ DCHECK_EQ(src_offset.x(), std::floor(src_offset.x()));
+ DCHECK_EQ(src_offset.y(), std::floor(src_offset.y()));
+
+ if (output_rect.IsEmpty())
+ return; // No work to do.
+ gfx::RectF src_rect = ToSourceRect(output_rect);
+
+ // Ensure conflicting GL capabilities are disabled. The following explicity
+ // disables those known to possibly be enabled in GL compositing code, while
+ // the helper method call will DCHECK a wider set.
+ gl_->Disable(GL_SCISSOR_TEST);
+ gl_->Disable(GL_STENCIL_TEST);
+ gl_->Disable(GL_BLEND);
+ DCheckNoConflictingCapabilitiesAreEnabled();
+
+ if (subscaler_) {
+ gfx::RectF overscan_rect = src_rect;
+ PadForOverscan(&overscan_rect);
+ const auto intermediate = subscaler_->GenerateIntermediateTexture(
+ src_texture, src_texture_size, src_offset,
+ gfx::ToEnclosingRect(overscan_rect));
+ src_rect -= intermediate.second.OffsetFromOrigin();
+ Execute(intermediate.first, intermediate.second.size(), src_rect,
+ dest_texture_0, dest_texture_1, output_rect.size());
+ } else {
+ if (spec_.flipped_source) {
+ src_rect.set_x(src_rect.x() + src_offset.x());
+ src_rect.set_y(src_texture_size.height() - src_rect.bottom() -
+ src_offset.y());
+ } else {
+ src_rect += src_offset;
+ }
+ Execute(src_texture, src_texture_size, src_rect, dest_texture_0,
+ dest_texture_1, output_rect.size());
+ }
+ }
+
+ void ComputeRegionOfInfluence(const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ const gfx::Rect& output_rect,
+ gfx::Rect* sampling_rect,
+ gfx::Vector2dF* offset) const override {
+ // This mimics the recursive behavior of GenerateIntermediateTexture(),
+ // computing the size of the intermediate texture required by each scaler
+ // in the chain.
+ gfx::Rect intermediate_rect = output_rect;
+ const ScalerImpl* scaler = this;
+ while (scaler->subscaler_) {
+ gfx::RectF overscan_rect = scaler->ToSourceRect(intermediate_rect);
+ scaler->PadForOverscan(&overscan_rect);
+ intermediate_rect = gfx::ToEnclosingRect(overscan_rect);
+ scaler = scaler->subscaler_.get();
+ }
+
+ // At this point, |scaler| points to the first scaler in the chain. Compute
+ // the source rect that would have been used with the shader program, and
+ // then pad that to account for the shader program's overscan pixels.
+ const auto rects = scaler->ComputeBaseCaseRects(
+ src_texture_size, src_offset, intermediate_rect);
+ gfx::RectF src_overscan_rect = rects.first;
+ scaler->PadForOverscan(&src_overscan_rect);
+
+ // Provide a whole-numbered Rect result along with the offset to the origin
+ // point.
+ *sampling_rect = gfx::ToEnclosingRect(src_overscan_rect);
+ sampling_rect->Intersect(gfx::Rect(src_texture_size));
+ *offset = gfx::ScaleVector2d(
+ output_rect.OffsetFromOrigin(),
+ static_cast<float>(chain_properties_->scale_from.x()) /
+ chain_properties_->scale_to.x(),
+ static_cast<float>(chain_properties_->scale_from.y()) /
+ chain_properties_->scale_to.y());
+ if (scaler->spec_.flipped_source) {
+ offset->set_x(offset->x() - sampling_rect->x());
+ offset->set_y(offset->y() -
+ (src_texture_size.height() - sampling_rect->bottom()));
+ } else {
+ *offset -= sampling_rect->OffsetFromOrigin();
+ }
+ }
+
+ // Sets the overall scale ratio and swizzle for the entire chain of Scalers.
+ void SetChainProperties(const gfx::Vector2d& from,
+ const gfx::Vector2d& to,
+ bool swizzle) {
+ chain_properties_.emplace(ChainProperties{
+ from, to, static_cast<GLenum>(swizzle ? GL_BGRA_EXT : GL_RGBA)});
+ }
+
+ // WARNING: This method should only be called by external clients, since they
+ // are using it compare against the overall scale ratio (of the entire chain
+ // of Scalers).
+ bool IsSameScaleRatio(const gfx::Vector2d& from,
+ const gfx::Vector2d& to) const override {
+ const gfx::Vector2d& overall_from = chain_properties_->scale_from;
+ const gfx::Vector2d& overall_to = chain_properties_->scale_to;
+ return AreRatiosEqual(overall_from.x(), overall_to.x(), from.x(), to.x()) &&
+ AreRatiosEqual(overall_from.y(), overall_to.y(), from.y(), to.y());
+ }
+
+ bool IsSamplingFlippedSource() const override {
+ const ScalerImpl* scaler = this;
+ while (scaler->subscaler_) {
+ DCHECK(!scaler->spec_.flipped_source);
+ scaler = scaler->subscaler_.get();
+ }
+ return scaler->spec_.flipped_source;
+ }
+
+ bool IsFlippingOutput() const override {
+ bool flipped_overall = false;
+ const ScalerImpl* scaler = this;
+ while (scaler) {
+ flipped_overall = (flipped_overall != scaler->spec_.flip_output);
+ scaler = scaler->subscaler_.get();
+ }
+ return flipped_overall;
+ }
+
+ GLenum GetReadbackFormat() const override {
+ return chain_properties_->readback_format;
+ }
+
+ private:
+ // In DCHECK-enabled builds, this checks that no conflicting GL capability is
+ // currently enabled in the GL context. Any of these might cause problems when
+ // the shader draw operations are executed.
+ void DCheckNoConflictingCapabilitiesAreEnabled() const {
+ DCHECK_NE(gl_->IsEnabled(GL_BLEND), GL_TRUE);
+ DCHECK_NE(gl_->IsEnabled(GL_CULL_FACE), GL_TRUE);
+ DCHECK_NE(gl_->IsEnabled(GL_DEPTH_TEST), GL_TRUE);
+ DCHECK_NE(gl_->IsEnabled(GL_POLYGON_OFFSET_FILL), GL_TRUE);
+ DCHECK_NE(gl_->IsEnabled(GL_SAMPLE_ALPHA_TO_COVERAGE), GL_TRUE);
+ DCHECK_NE(gl_->IsEnabled(GL_SAMPLE_COVERAGE), GL_TRUE);
+ DCHECK_NE(gl_->IsEnabled(GL_SCISSOR_TEST), GL_TRUE);
+ DCHECK_NE(gl_->IsEnabled(GL_STENCIL_TEST), GL_TRUE);
+ }
+
+ // Expands the given |sampling_rect| to account for the extra pixels bordering
+ // it that will be sampled by the shaders.
+ void PadForOverscan(gfx::RectF* sampling_rect) const {
+ // Room for optimization: These are conservative calculations. Some of the
+ // shaders actually require fewer overscan pixels.
+ float overscan_x = 0;
+ float overscan_y = 0;
+ switch (spec_.shader) {
+ case GLHelperScaling::SHADER_BILINEAR:
+ case GLHelperScaling::SHADER_BILINEAR2:
+ case GLHelperScaling::SHADER_BILINEAR3:
+ case GLHelperScaling::SHADER_BILINEAR4:
+ case GLHelperScaling::SHADER_BILINEAR2X2:
+ case GLHelperScaling::SHADER_PLANAR:
+ case GLHelperScaling::SHADER_YUV_MRT_PASS1:
+ case GLHelperScaling::SHADER_YUV_MRT_PASS2:
+ overscan_x =
+ static_cast<float>(spec_.scale_from.x()) / spec_.scale_to.x();
+ overscan_y =
+ static_cast<float>(spec_.scale_from.y()) / spec_.scale_to.y();
+ break;
+
+ case GLHelperScaling::SHADER_BICUBIC_UPSCALE:
+ DCHECK_LE(spec_.scale_from.x(), spec_.scale_to.x());
+ DCHECK_LE(spec_.scale_from.y(), spec_.scale_to.y());
+ // This shader always reads a radius of 2 pixels about the sampling
+ // point.
+ overscan_x = 2.0f;
+ overscan_y = 2.0f;
+ break;
+
+ case GLHelperScaling::SHADER_BICUBIC_HALF_1D: {
+ DCHECK_GE(spec_.scale_from.x(), spec_.scale_to.x());
+ DCHECK_GE(spec_.scale_from.y(), spec_.scale_to.y());
+ // kLobeDist is the largest pixel read offset in the shader program.
+ constexpr float kLobeDist = 11.0f / 4.0f;
+ overscan_x = kLobeDist * spec_.scale_from.x() / spec_.scale_to.x();
+ overscan_y = kLobeDist * spec_.scale_from.y() / spec_.scale_to.y();
+ break;
+ }
+ }
+ // Because the texture sampler sometimes reads between pixels, an extra one
+ // must be accounted for.
+ sampling_rect->Inset(-(overscan_x + 1.0f), -(overscan_y + 1.0f));
+ }
+
+ // Returns the given |rect| in source coordinates.
+ gfx::RectF ToSourceRect(const gfx::Rect& rect) const {
+ return gfx::ScaleRect(
+ gfx::RectF(rect),
+ static_cast<float>(spec_.scale_from.x()) / spec_.scale_to.x(),
+ static_cast<float>(spec_.scale_from.y()) / spec_.scale_to.y());
+ }
+
+ // Returns the given |rect| in output coordinates, enlarged to whole-number
+ // coordinates.
+ gfx::Rect ToOutputRect(const gfx::RectF& rect) const {
+ return gfx::ToEnclosingRect(gfx::ScaleRect(
+ rect, static_cast<float>(spec_.scale_to.x()) / spec_.scale_from.x(),
+ static_cast<float>(spec_.scale_to.y()) / spec_.scale_from.y()));
+ }
+
+ // Returns the source and output rects to use with the shader program,
+ // assuming this scaler is the "base case" (i.e., it has no subscaler). The
+ // returned output rect is clamped according to what the source texture can
+ // provide.
+ std::pair<gfx::RectF, gfx::Rect> ComputeBaseCaseRects(
+ const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ const gfx::Rect& requested_output_rect) const {
+ DCHECK(!subscaler_);
+
+ // Determine what the requested source rect is, and clamp to the texture's
+ // bounds.
+ gfx::RectF src_rect = ToSourceRect(requested_output_rect);
+ src_rect += src_offset;
+ if (spec_.flipped_source)
+ src_rect.set_y(src_texture_size.height() - src_rect.bottom());
+ src_rect.Intersect(gfx::RectF(gfx::SizeF(src_texture_size)));
+
+ // From the clamped source rect, re-compute the output rect that will be
+ // provided to the next scaler stage. This will either be all of what was
+ // requested or a smaller rect. See comments in
+ // GenerateIntermediateTexture().
+ if (spec_.flipped_source)
+ src_rect.set_y(src_texture_size.height() - src_rect.bottom());
+ src_rect -= src_offset;
+ const gfx::Rect output_rect = ToOutputRect(src_rect);
+
+ // Once again, compute the source rect from the output rect, which might
+ // spill-over the texture's bounds slightly (but only by the minimal amount
+ // necessary). Apply the |src_offset| and vertically-flip this source rect,
+ // if necessary, as this is what will be provided directly to the shader
+ // program.
+ src_rect = ToSourceRect(output_rect);
+ src_rect += src_offset;
+ if (spec_.flipped_source)
+ src_rect.set_y(src_texture_size.height() - src_rect.bottom());
+
+ return std::make_pair(src_rect, output_rect);
+ }
+
+ // Generates the intermediate texture and/or re-defines it if its size has
+ // changed.
+ void EnsureIntermediateTextureDefined(const gfx::Size& size) {
+ // Reallocate a new texture, if needed.
+ if (!intermediate_texture_)
+ gl_->GenTextures(1, &intermediate_texture_);
+ if (intermediate_texture_size_ != size) {
+ gl_->BindTexture(GL_TEXTURE_2D, intermediate_texture_);
+ gl_->TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0,
+ GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+ intermediate_texture_size_ = size;
+ }
+ }
+
+ // Returns a texture of this intermediate scaling step. The caller does NOT
+ // own the returned texture. The texture may be smaller than the
+ // |requested_output_rect.size()|, if that eliminates data redundancy that
+ // GL_CLAMP_TO_EDGE will correct for.
+ std::pair<GLuint, gfx::Rect> GenerateIntermediateTexture(
+ GLuint src_texture,
+ const gfx::Size& src_texture_size,
+ const gfx::Vector2dF& src_offset,
+ const gfx::Rect& requested_output_rect) {
+ // Base case: If there is no subscaler, render the intermediate texture from
+ // the |src_texture| and return it.
+ if (!subscaler_) {
+ const auto rects = ComputeBaseCaseRects(src_texture_size, src_offset,
+ requested_output_rect);
+ EnsureIntermediateTextureDefined(rects.second.size());
+ Execute(src_texture, src_texture_size, rects.first, intermediate_texture_,
+ 0, rects.second.size());
+ return std::make_pair(intermediate_texture_, rects.second);
+ }
+
+ // Recursive case: Output from the subscaler is needed to generate this
+ // scaler's intermediate texture. Compute the region of pixels that will be
+ // sampled, and request those pixels from the subscaler.
+ gfx::RectF sampling_rect = ToSourceRect(requested_output_rect);
+ PadForOverscan(&sampling_rect);
+ const auto intermediate = subscaler_->GenerateIntermediateTexture(
+ src_texture, src_texture_size, src_offset,
+ gfx::ToEnclosingRect(sampling_rect));
+ const GLuint& sampling_texture = intermediate.first;
+ const gfx::Rect& sampling_bounds = intermediate.second;
+
+ // The subscaler might not have provided pixels for the entire requested
+ // |sampling_rect| because they would be redundant (i.e., GL_CLAMP_TO_EDGE
+ // behavior will generate the redundant pixel values in the rendering step,
+ // below). Thus, re-compute |requested_output_rect| and |sampling_rect| when
+ // this has occurred.
+ gfx::Rect output_rect;
+ if (sampling_bounds.Contains(gfx::ToEnclosingRect(sampling_rect))) {
+ output_rect = requested_output_rect;
+ } else {
+ sampling_rect.Intersect(gfx::RectF(sampling_bounds));
+ output_rect = ToOutputRect(sampling_rect);
+ // The new sampling rect might exceed the bounds slightly, but only by the
+ // minimal amount necessary to populate the entire output.
+ sampling_rect = ToSourceRect(output_rect);
+ }
+
+ // Render the output, but do not account for |src_offset| nor vertical
+ // flipping because that should have been handled in the base case.
+ EnsureIntermediateTextureDefined(output_rect.size());
+ DCHECK(!spec_.flipped_source);
+ Execute(sampling_texture, sampling_bounds.size(),
+ sampling_rect - sampling_bounds.OffsetFromOrigin(),
+ intermediate_texture_, 0, output_rect.size());
+ return std::make_pair(intermediate_texture_, output_rect);
+ }
+
+ // Executes the scale, mapping pixels from |src_texture| to one or two
+ // outputs, transforming the source pixels in |src_rect| to produce a
+ // result of the given size. |src_texture_size| is the size of the entire
+ // |src_texture|, regardless of the sampled region.
+ void Execute(GLuint src_texture,
+ const gfx::Size& src_texture_size,
+ const gfx::RectF& src_rect,
+ GLuint dest_texture_0,
+ GLuint dest_texture_1,
+ const gfx::Size& result_size) {
+ // Attach output texture(s) to the framebuffer.
+ ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(
+ gl_, dst_framebuffer_);
+ gl_->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, dest_texture_0, 0);
+ if (dest_texture_1 > 0) {
+ gl_->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + 1,
+ GL_TEXTURE_2D, dest_texture_1, 0);
+ }
+
+ // Use GL_NEAREST for copies between exactly same size of rectangles to
+ // reduce errors on low-precision GPUs. Use bilinear filtering otherwise.
+ //
+ // This is a workaround for Mali-G72 GPU (b/141898654) that uses lower
+ // precision than expected for interpolation.
+ GLint filter = (src_rect.IsExpressibleAsRect() &&
+ src_rect.size() == gfx::SizeF(result_size))
+ ? GL_NEAREST
+ : GL_LINEAR;
+ // Bind to the source texture and set the filitering and clamp to the edge,
+ // as required by all shader programs.
+ ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(gl_, src_texture);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, filter);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, filter);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ // Prepare the shader program for drawing.
+ ScopedBufferBinder<GL_ARRAY_BUFFER> buffer_binder(
+ gl_, scaler_helper_->vertex_attributes_buffer_);
+ shader_program_->UseProgram(src_texture_size, src_rect, result_size,
+ spec_.scale_x, spec_.flip_output,
+ color_weights_);
+
+ // Execute the draw.
+ gl_->Viewport(0, 0, result_size.width(), result_size.height());
+ const GLenum buffers[] = {GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT0 + 1};
+ if (dest_texture_1 > 0) {
+ DCHECK_LE(2, scaler_helper_->helper_->MaxDrawBuffers());
+ gl_->DrawBuffersEXT(2, buffers);
+ }
+ gl_->DrawArrays(GL_TRIANGLE_STRIP, 0, 4);
+ if (dest_texture_1 > 0) {
+ // Set the draw buffers back to not disrupt external operations.
+ gl_->DrawBuffersEXT(1, buffers);
+ }
+ }
+
+ GLES2Interface* gl_;
+ GLHelperScaling* scaler_helper_;
+ GLHelperScaling::ScalerStage spec_;
+ GLfloat color_weights_[3][4]; // A vec4 for each plane.
+ GLuint intermediate_texture_;
+ gfx::Size intermediate_texture_size_;
+ scoped_refptr<ShaderProgram> shader_program_;
+ ScopedFramebuffer dst_framebuffer_;
+ std::unique_ptr<ScalerImpl> subscaler_;
+
+ // This last member is only set on ScalerImpls that are exposed to external
+ // modules. This is so the client can query the overall scale ratio and
+ // swizzle provided by a chain of ScalerImpls.
+ struct ChainProperties {
+ gfx::Vector2d scale_from;
+ gfx::Vector2d scale_to;
+ GLenum readback_format;
+ };
+ base::Optional<ChainProperties> chain_properties_;
+};
+
+// The important inputs for this function is |x_ops| and |y_ops|. They represent
+// scaling operations to be done on a source image of relative size
+// |scale_from|. If |quality| is SCALER_QUALITY_BEST, then interpret these scale
+// operations literally and create one scaler stage for each ScaleOp. However,
+// if |quality| is SCALER_QUALITY_GOOD, then enable some optimizations that
+// combine two or more ScaleOps in to a single scaler stage. Normally first
+// ScaleOps from |y_ops| are processed first and |x_ops| after all the |y_ops|,
+// but sometimes it's possible to combine one or more operation from both
+// queues essentially for free. This is the reason why |x_ops| and |y_ops|
+// aren't just one single queue.
+// static
+void GLHelperScaling::ConvertScalerOpsToScalerStages(
+ GLHelper::ScalerQuality quality,
+ gfx::Vector2d scale_from,
+ base::circular_deque<GLHelperScaling::ScaleOp>* x_ops,
+ base::circular_deque<GLHelperScaling::ScaleOp>* y_ops,
+ std::vector<ScalerStage>* scaler_stages) {
+ while (!x_ops->empty() || !y_ops->empty()) {
+ gfx::Vector2d intermediate_scale = scale_from;
+ base::circular_deque<ScaleOp>* current_queue = nullptr;
+
+ if (!y_ops->empty()) {
+ current_queue = y_ops;
+ } else {
+ current_queue = x_ops;
+ }
+
+ ShaderType current_shader = SHADER_BILINEAR;
+ switch (current_queue->front().scale_factor) {
+ case 0:
+ if (quality == GLHelper::SCALER_QUALITY_BEST) {
+ current_shader = SHADER_BICUBIC_UPSCALE;
+ }
+ break;
+ case 2:
+ if (quality == GLHelper::SCALER_QUALITY_BEST) {
+ current_shader = SHADER_BICUBIC_HALF_1D;
+ }
+ break;
+ case 3:
+ DCHECK(quality != GLHelper::SCALER_QUALITY_BEST);
+ current_shader = SHADER_BILINEAR3;
+ break;
+ default:
+ NOTREACHED();
+ }
+ bool scale_x = current_queue->front().scale_x;
+ current_queue->front().UpdateScale(&intermediate_scale);
+ current_queue->pop_front();
+
+ // Optimization: Sometimes we can combine 2-4 scaling operations into
+ // one operation.
+ if (quality == GLHelper::SCALER_QUALITY_GOOD) {
+ if (!current_queue->empty() && current_shader == SHADER_BILINEAR) {
+ // Combine two steps in the same dimension.
+ current_queue->front().UpdateScale(&intermediate_scale);
+ current_queue->pop_front();
+ current_shader = SHADER_BILINEAR2;
+ if (!current_queue->empty()) {
+ // Combine three steps in the same dimension.
+ current_queue->front().UpdateScale(&intermediate_scale);
+ current_queue->pop_front();
+ current_shader = SHADER_BILINEAR4;
+ }
+ }
+ // Check if we can combine some steps in the other dimension as well.
+ // Since all shaders currently use GL_LINEAR, we can easily scale up
+ // or scale down by exactly 2x at the same time as we do another
+ // operation. Currently, the following mergers are supported:
+ // * 1 bilinear Y-pass with 1 bilinear X-pass (up or down)
+ // * 2 bilinear Y-passes with 2 bilinear X-passes
+ // * 1 bilinear Y-pass with N bilinear X-pass
+ // * N bilinear Y-passes with 1 bilinear X-pass (down only)
+ // Measurements indicate that generalizing this for 3x3 and 4x4
+ // makes it slower on some platforms, such as the Pixel.
+ if (!scale_x && x_ops->size() > 0 && x_ops->front().scale_factor <= 2) {
+ int x_passes = 0;
+ if (current_shader == SHADER_BILINEAR2 && x_ops->size() >= 2) {
+ // 2y + 2x passes
+ x_passes = 2;
+ current_shader = SHADER_BILINEAR2X2;
+ } else if (current_shader == SHADER_BILINEAR) {
+ // 1y + Nx passes
+ scale_x = true;
+ switch (x_ops->size()) {
+ case 0:
+ NOTREACHED();
+ break;
+ case 1:
+ if (x_ops->front().scale_factor == 3) {
+ current_shader = SHADER_BILINEAR3;
+ }
+ x_passes = 1;
+ break;
+ case 2:
+ x_passes = 2;
+ current_shader = SHADER_BILINEAR2;
+ break;
+ default:
+ x_passes = 3;
+ current_shader = SHADER_BILINEAR4;
+ break;
+ }
+ } else if (x_ops->front().scale_factor == 2) {
+ // Ny + 1x-downscale
+ x_passes = 1;
+ }
+
+ for (int i = 0; i < x_passes; i++) {
+ x_ops->front().UpdateScale(&intermediate_scale);
+ x_ops->pop_front();
+ }
+ }
+ }
+
+ scaler_stages->emplace_back(ScalerStage{current_shader, scale_from,
+ intermediate_scale, scale_x, false,
+ false, false});
+ scale_from = intermediate_scale;
+ }
+}
+
+// static
+void GLHelperScaling::ComputeScalerStages(
+ GLHelper::ScalerQuality quality,
+ const gfx::Vector2d& scale_from,
+ const gfx::Vector2d& scale_to,
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle,
+ std::vector<ScalerStage>* scaler_stages) {
+ if (quality == GLHelper::SCALER_QUALITY_FAST || scale_from == scale_to) {
+ scaler_stages->emplace_back(ScalerStage{SHADER_BILINEAR, scale_from,
+ scale_to, false, flipped_source,
+ flip_output, swizzle});
+ return;
+ }
+
+ base::circular_deque<GLHelperScaling::ScaleOp> x_ops, y_ops;
+ GLHelperScaling::ScaleOp::AddOps(scale_from.x(), scale_to.x(), true,
+ quality == GLHelper::SCALER_QUALITY_GOOD,
+ &x_ops);
+ GLHelperScaling::ScaleOp::AddOps(scale_from.y(), scale_to.y(), false,
+ quality == GLHelper::SCALER_QUALITY_GOOD,
+ &y_ops);
+ DCHECK_GT(x_ops.size() + y_ops.size(), 0u);
+ ConvertScalerOpsToScalerStages(quality, scale_from, &x_ops, &y_ops,
+ scaler_stages);
+ DCHECK_EQ(x_ops.size() + y_ops.size(), 0u);
+ DCHECK(!scaler_stages->empty());
+
+ // If the source content is flipped, the first scaler stage will perform math
+ // to account for this. It also will flip the content during scaling so that
+ // all following stages may assume the content is not flipped. Then, the final
+ // stage must ensure the final output is correctly flipped-back (or not) based
+ // on what the first stage did PLUS what is being requested by the client
+ // code.
+ if (flipped_source) {
+ scaler_stages->front().flipped_source = true;
+ scaler_stages->front().flip_output = true;
+ }
+ if (flipped_source != flip_output) {
+ scaler_stages->back().flip_output = !scaler_stages->back().flip_output;
+ }
+
+ scaler_stages->back().swizzle = swizzle;
+}
+
+std::unique_ptr<GLHelper::ScalerInterface> GLHelperScaling::CreateScaler(
+ GLHelper::ScalerQuality quality,
+ const gfx::Vector2d& scale_from,
+ const gfx::Vector2d& scale_to,
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle) {
+ if (scale_from.x() == 0 || scale_from.y() == 0 || scale_to.x() == 0 ||
+ scale_to.y() == 0) {
+ // Invalid arguments: Cannot scale from or to a relative size of 0.
+ return nullptr;
+ }
+
+ std::vector<ScalerStage> scaler_stages;
+ ComputeScalerStages(quality, scale_from, scale_to, flipped_source,
+ flip_output, swizzle, &scaler_stages);
+
+ std::unique_ptr<ScalerImpl> ret;
+ for (unsigned int i = 0; i < scaler_stages.size(); i++) {
+ ret = std::make_unique<ScalerImpl>(gl_, this, scaler_stages[i],
+ std::move(ret));
+ }
+ ret->SetChainProperties(scale_from, scale_to, swizzle);
+ return std::move(ret);
+}
+
+std::unique_ptr<GLHelper::ScalerInterface>
+GLHelperScaling::CreateGrayscalePlanerizer(bool flipped_source,
+ bool flip_output,
+ bool swizzle) {
+ const ScalerStage stage = {
+ SHADER_PLANAR, gfx::Vector2d(4, 1), gfx::Vector2d(1, 1),
+ true, flipped_source, flip_output,
+ swizzle};
+ auto result = std::make_unique<ScalerImpl>(gl_, this, stage, nullptr);
+ result->SetColorWeights(0, kRGBtoGrayscaleColorWeights);
+ result->SetChainProperties(stage.scale_from, stage.scale_to, swizzle);
+ return std::move(result);
+}
+
+std::unique_ptr<GLHelper::ScalerInterface>
+GLHelperScaling::CreateI420Planerizer(int plane,
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle) {
+ const ScalerStage stage = {
+ SHADER_PLANAR,
+ plane == 0 ? gfx::Vector2d(4, 1) : gfx::Vector2d(8, 2),
+ gfx::Vector2d(1, 1),
+ true,
+ flipped_source,
+ flip_output,
+ swizzle};
+ auto result = std::make_unique<ScalerImpl>(gl_, this, stage, nullptr);
+ switch (plane) {
+ case 0:
+ result->SetColorWeights(0, kRGBtoYColorWeights);
+ break;
+ case 1:
+ result->SetColorWeights(0, kRGBtoUColorWeights);
+ break;
+ case 2:
+ result->SetColorWeights(0, kRGBtoVColorWeights);
+ break;
+ default:
+ NOTREACHED();
+ }
+ result->SetChainProperties(stage.scale_from, stage.scale_to, swizzle);
+ return std::move(result);
+}
+
+std::unique_ptr<GLHelper::ScalerInterface>
+GLHelperScaling::CreateI420MrtPass1Planerizer(bool flipped_source,
+ bool flip_output,
+ bool swizzle) {
+ const ScalerStage stage = {SHADER_YUV_MRT_PASS1,
+ gfx::Vector2d(4, 1),
+ gfx::Vector2d(1, 1),
+ true,
+ flipped_source,
+ flip_output,
+ swizzle};
+ auto result = std::make_unique<ScalerImpl>(gl_, this, stage, nullptr);
+ result->SetColorWeights(0, kRGBtoYColorWeights);
+ result->SetColorWeights(1, kRGBtoUColorWeights);
+ result->SetColorWeights(2, kRGBtoVColorWeights);
+ result->SetChainProperties(stage.scale_from, stage.scale_to, swizzle);
+ return std::move(result);
+}
+
+std::unique_ptr<GLHelper::ScalerInterface>
+GLHelperScaling::CreateI420MrtPass2Planerizer(bool swizzle) {
+ const ScalerStage stage = {SHADER_YUV_MRT_PASS2,
+ gfx::Vector2d(2, 2),
+ gfx::Vector2d(1, 1),
+ true,
+ false,
+ false,
+ swizzle};
+ auto result = std::make_unique<ScalerImpl>(gl_, this, stage, nullptr);
+ result->SetChainProperties(stage.scale_from, stage.scale_to, swizzle);
+ return std::move(result);
+}
+
+// Triangle strip coordinates, used to sweep the entire source area when
+// executing the shader programs. The first two columns correspond to
+// values interpolated to produce |a_position| values in the shader programs,
+// while the latter two columns relate to the |a_texcoord| values; respectively,
+// the first pair are the vertex coordinates in object space, and the second
+// pair are the corresponding source texture coordinates.
+const GLfloat GLHelperScaling::kVertexAttributes[] = {
+ -1.0f, -1.0f, 0.0f, 0.0f, // vertex 0
+ 1.0f, -1.0f, 1.0f, 0.0f, // vertex 1
+ -1.0f, 1.0f, 0.0f, 1.0f, // vertex 2
+ 1.0f, 1.0f, 1.0f, 1.0f,
+}; // vertex 3
+
+void GLHelperScaling::InitBuffer() {
+ ScopedBufferBinder<GL_ARRAY_BUFFER> buffer_binder(gl_,
+ vertex_attributes_buffer_);
+ gl_->BufferData(GL_ARRAY_BUFFER, sizeof(kVertexAttributes), kVertexAttributes,
+ GL_STATIC_DRAW);
+}
+
+scoped_refptr<ShaderProgram> GLHelperScaling::GetShaderProgram(ShaderType type,
+ bool swizzle) {
+ ShaderProgramKeyType key(type, swizzle);
+ scoped_refptr<ShaderProgram>& cache_entry(shader_programs_[key]);
+ if (!cache_entry) {
+ cache_entry = new ShaderProgram(gl_, helper_, type);
+ std::basic_string<GLchar> vertex_program;
+ std::basic_string<GLchar> fragment_program;
+ std::basic_string<GLchar> vertex_header;
+ std::basic_string<GLchar> fragment_directives;
+ std::basic_string<GLchar> fragment_header;
+ std::basic_string<GLchar> shared_variables;
+
+ vertex_header.append(
+ "precision highp float;\n"
+ "attribute vec2 a_position;\n"
+ "attribute vec2 a_texcoord;\n"
+ "uniform vec4 src_rect;\n");
+
+ fragment_header.append(
+ "precision mediump float;\n"
+ "uniform sampler2D s_texture;\n");
+
+ vertex_program.append(
+ " gl_Position = vec4(a_position, 0.0, 1.0);\n"
+ " vec2 texcoord = src_rect.xy + a_texcoord * src_rect.zw;\n");
+
+ switch (type) {
+ case SHADER_BILINEAR:
+ shared_variables.append("varying vec2 v_texcoord;\n");
+ vertex_program.append(" v_texcoord = texcoord;\n");
+ fragment_program.append(
+ " gl_FragColor = texture2D(s_texture, v_texcoord);\n");
+ break;
+
+ case SHADER_BILINEAR2:
+ // This is equivialent to two passes of the BILINEAR shader above.
+ // It can be used to scale an image down 1.0x-2.0x in either dimension,
+ // or exactly 4x.
+ shared_variables.append(
+ "varying vec4 v_texcoords;\n"); // 2 texcoords packed in one quad
+ vertex_header.append("uniform vec2 scaling_vector;\n");
+ vertex_program.append(
+ " vec2 step = scaling_vector / 4.0;\n"
+ " v_texcoords.xy = texcoord + step;\n"
+ " v_texcoords.zw = texcoord - step;\n");
+
+ fragment_program.append(
+ " gl_FragColor = (texture2D(s_texture, v_texcoords.xy) +\n"
+ " texture2D(s_texture, v_texcoords.zw)) / 2.0;\n");
+ break;
+
+ case SHADER_BILINEAR3:
+ // This is kind of like doing 1.5 passes of the BILINEAR shader.
+ // It can be used to scale an image down 1.5x-3.0x, or exactly 6x.
+ shared_variables.append(
+ "varying vec4 v_texcoords1;\n" // 2 texcoords packed in one quad
+ "varying vec2 v_texcoords2;\n");
+ vertex_header.append("uniform vec2 scaling_vector;\n");
+ vertex_program.append(
+ " vec2 step = scaling_vector / 3.0;\n"
+ " v_texcoords1.xy = texcoord + step;\n"
+ " v_texcoords1.zw = texcoord;\n"
+ " v_texcoords2 = texcoord - step;\n");
+ fragment_program.append(
+ " gl_FragColor = (texture2D(s_texture, v_texcoords1.xy) +\n"
+ " texture2D(s_texture, v_texcoords1.zw) +\n"
+ " texture2D(s_texture, v_texcoords2)) / 3.0;\n");
+ break;
+
+ case SHADER_BILINEAR4:
+ // This is equivialent to three passes of the BILINEAR shader above,
+ // It can be used to scale an image down 2.0x-4.0x or exactly 8x.
+ shared_variables.append("varying vec4 v_texcoords[2];\n");
+ vertex_header.append("uniform vec2 scaling_vector;\n");
+ vertex_program.append(
+ " vec2 step = scaling_vector / 8.0;\n"
+ " v_texcoords[0].xy = texcoord - step * 3.0;\n"
+ " v_texcoords[0].zw = texcoord - step;\n"
+ " v_texcoords[1].xy = texcoord + step;\n"
+ " v_texcoords[1].zw = texcoord + step * 3.0;\n");
+ fragment_program.append(
+ " gl_FragColor = (\n"
+ " texture2D(s_texture, v_texcoords[0].xy) +\n"
+ " texture2D(s_texture, v_texcoords[0].zw) +\n"
+ " texture2D(s_texture, v_texcoords[1].xy) +\n"
+ " texture2D(s_texture, v_texcoords[1].zw)) / 4.0;\n");
+ break;
+
+ case SHADER_BILINEAR2X2:
+ // This is equivialent to four passes of the BILINEAR shader above.
+ // Two in each dimension. It can be used to scale an image down
+ // 1.0x-2.0x in both X and Y directions. Or, it could be used to
+ // scale an image down by exactly 4x in both dimensions.
+ shared_variables.append("varying vec4 v_texcoords[2];\n");
+ vertex_header.append("uniform vec2 scaling_vector;\n");
+ vertex_program.append(
+ " vec2 step = scaling_vector / 4.0;\n"
+ " v_texcoords[0].xy = texcoord + vec2(step.x, step.y);\n"
+ " v_texcoords[0].zw = texcoord + vec2(step.x, -step.y);\n"
+ " v_texcoords[1].xy = texcoord + vec2(-step.x, step.y);\n"
+ " v_texcoords[1].zw = texcoord + vec2(-step.x, -step.y);\n");
+ fragment_program.append(
+ " gl_FragColor = (\n"
+ " texture2D(s_texture, v_texcoords[0].xy) +\n"
+ " texture2D(s_texture, v_texcoords[0].zw) +\n"
+ " texture2D(s_texture, v_texcoords[1].xy) +\n"
+ " texture2D(s_texture, v_texcoords[1].zw)) / 4.0;\n");
+ break;
+
+ case SHADER_BICUBIC_HALF_1D:
+ // This scales down texture by exactly half in one dimension.
+ // directions in one pass. We use bilinear lookup to reduce
+ // the number of texture reads from 8 to 4
+ shared_variables.append(
+ "const float CenterDist = 99.0 / 140.0;\n"
+ "const float LobeDist = 11.0 / 4.0;\n"
+ "const float CenterWeight = 35.0 / 64.0;\n"
+ "const float LobeWeight = -3.0 / 64.0;\n"
+ "varying vec4 v_texcoords[2];\n");
+ vertex_header.append("uniform vec2 scaling_vector;\n");
+ vertex_program.append(
+ " vec2 step = scaling_vector / 2.0;\n"
+ " v_texcoords[0].xy = texcoord - LobeDist * step;\n"
+ " v_texcoords[0].zw = texcoord - CenterDist * step;\n"
+ " v_texcoords[1].xy = texcoord + CenterDist * step;\n"
+ " v_texcoords[1].zw = texcoord + LobeDist * step;\n");
+ fragment_program.append(
+ " gl_FragColor = \n"
+ // Lobe pixels
+ " (texture2D(s_texture, v_texcoords[0].xy) +\n"
+ " texture2D(s_texture, v_texcoords[1].zw)) *\n"
+ " LobeWeight +\n"
+ // Center pixels
+ " (texture2D(s_texture, v_texcoords[0].zw) +\n"
+ " texture2D(s_texture, v_texcoords[1].xy)) *\n"
+ " CenterWeight;\n");
+ break;
+
+ case SHADER_BICUBIC_UPSCALE:
+ // When scaling up, we need 4 texture reads, but we can
+ // save some instructions because will know in which range of
+ // the bicubic function each call call to the bicubic function
+ // will be in.
+ // Also, when sampling the bicubic function like this, the sum
+ // is always exactly one, so we can skip normalization as well.
+ shared_variables.append("varying vec2 v_texcoord;\n");
+ vertex_program.append(" v_texcoord = texcoord;\n");
+ fragment_header.append(
+ "uniform vec2 src_pixelsize;\n"
+ "uniform vec2 scaling_vector;\n"
+ "const float a = -0.5;\n"
+ // This function is equivialent to calling the bicubic
+ // function with x-1, x, 1-x and 2-x
+ // (assuming 0 <= x < 1)
+ "vec4 filt4(float x) {\n"
+ " return vec4(x * x * x, x * x, x, 1) *\n"
+ " mat4( a, -2.0 * a, a, 0.0,\n"
+ " a + 2.0, -a - 3.0, 0.0, 1.0,\n"
+ " -a - 2.0, 3.0 + 2.0 * a, -a, 0.0,\n"
+ " -a, a, 0.0, 0.0);\n"
+ "}\n"
+ "mat4 pixels_x(vec2 pos, vec2 step) {\n"
+ " return mat4(\n"
+ " texture2D(s_texture, pos - step),\n"
+ " texture2D(s_texture, pos),\n"
+ " texture2D(s_texture, pos + step),\n"
+ " texture2D(s_texture, pos + step * 2.0));\n"
+ "}\n");
+ fragment_program.append(
+ " vec2 pixel_pos = v_texcoord * src_pixelsize - \n"
+ " scaling_vector / 2.0;\n"
+ " float frac = fract(dot(pixel_pos, scaling_vector));\n"
+ " vec2 base = (floor(pixel_pos) + vec2(0.5)) / src_pixelsize;\n"
+ " vec2 step = scaling_vector / src_pixelsize;\n"
+ " gl_FragColor = pixels_x(base, step) * filt4(frac);\n");
+ break;
+
+ case SHADER_PLANAR:
+ // Converts four RGBA pixels into one pixel. Each RGBA
+ // pixel will be dot-multiplied with the color weights and
+ // then placed into a component of the output. This is used to
+ // convert RGBA textures into Y, U and V textures. We do this
+ // because single-component textures are not renderable on all
+ // architectures.
+ shared_variables.append("varying vec4 v_texcoords[2];\n");
+ vertex_header.append("uniform vec2 scaling_vector;\n");
+ vertex_program.append(
+ " vec2 step = scaling_vector / 4.0;\n"
+ " v_texcoords[0].xy = texcoord - step * 1.5;\n"
+ " v_texcoords[0].zw = texcoord - step * 0.5;\n"
+ " v_texcoords[1].xy = texcoord + step * 0.5;\n"
+ " v_texcoords[1].zw = texcoord + step * 1.5;\n");
+ fragment_header.append("uniform vec4 rgb_to_plane0;\n");
+ fragment_program.append(
+ " gl_FragColor = rgb_to_plane0 * mat4(\n"
+ " vec4(texture2D(s_texture, v_texcoords[0].xy).rgb, 1.0),\n"
+ " vec4(texture2D(s_texture, v_texcoords[0].zw).rgb, 1.0),\n"
+ " vec4(texture2D(s_texture, v_texcoords[1].xy).rgb, 1.0),\n"
+ " vec4(texture2D(s_texture, v_texcoords[1].zw).rgb, 1.0));\n");
+ break;
+
+ case SHADER_YUV_MRT_PASS1:
+ // RGB24 to YV12 in two passes; writing two 8888 targets each pass.
+ //
+ // YV12 is full-resolution luma and half-resolution blue/red chroma.
+ //
+ // (original)
+ // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
+ // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
+ // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
+ // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
+ // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
+ // RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
+ // |
+ // | (y plane) (temporary)
+ // | YYYY YYYY UUVV UUVV
+ // +--> { YYYY YYYY + UUVV UUVV }
+ // YYYY YYYY UUVV UUVV
+ // First YYYY YYYY UUVV UUVV
+ // pass YYYY YYYY UUVV UUVV
+ // YYYY YYYY UUVV UUVV
+ // |
+ // | (u plane) (v plane)
+ // Second | UUUU VVVV
+ // pass +--> { UUUU + VVVV }
+ // UUUU VVVV
+ //
+ shared_variables.append("varying vec4 v_texcoords[2];\n");
+ vertex_header.append("uniform vec2 scaling_vector;\n");
+ vertex_program.append(
+ " vec2 step = scaling_vector / 4.0;\n"
+ " v_texcoords[0].xy = texcoord - step * 1.5;\n"
+ " v_texcoords[0].zw = texcoord - step * 0.5;\n"
+ " v_texcoords[1].xy = texcoord + step * 0.5;\n"
+ " v_texcoords[1].zw = texcoord + step * 1.5;\n");
+ fragment_directives.append("#extension GL_EXT_draw_buffers : enable\n");
+ fragment_header.append(
+ "uniform vec4 rgb_to_plane0;\n" // RGB-to-Y
+ "uniform vec4 rgb_to_plane1;\n" // RGB-to-U
+ "uniform vec4 rgb_to_plane2;\n"); // RGB-to-V
+ fragment_program.append(
+ " vec4 pixel1 = vec4(texture2D(s_texture, v_texcoords[0].xy).rgb, "
+ " 1.0);\n"
+ " vec4 pixel2 = vec4(texture2D(s_texture, v_texcoords[0].zw).rgb, "
+ " 1.0);\n"
+ " vec4 pixel3 = vec4(texture2D(s_texture, v_texcoords[1].xy).rgb, "
+ " 1.0);\n"
+ " vec4 pixel4 = vec4(texture2D(s_texture, v_texcoords[1].zw).rgb, "
+ " 1.0);\n"
+ " vec4 pixel12 = (pixel1 + pixel2) / 2.0;\n"
+ " vec4 pixel34 = (pixel3 + pixel4) / 2.0;\n"
+ " gl_FragData[0] = vec4(dot(pixel1, rgb_to_plane0),\n"
+ " dot(pixel2, rgb_to_plane0),\n"
+ " dot(pixel3, rgb_to_plane0),\n"
+ " dot(pixel4, rgb_to_plane0));\n"
+ " gl_FragData[1] = vec4(dot(pixel12, rgb_to_plane1),\n"
+ " dot(pixel34, rgb_to_plane1),\n"
+ " dot(pixel12, rgb_to_plane2),\n"
+ " dot(pixel34, rgb_to_plane2));\n");
+ break;
+
+ case SHADER_YUV_MRT_PASS2:
+ // We're just sampling two pixels and unswizzling them. There's
+ // no need to do vertical scaling with math, since bilinear
+ // interpolation in the sampler takes care of that.
+ shared_variables.append("varying vec4 v_texcoords;\n");
+ vertex_header.append("uniform vec2 scaling_vector;\n");
+ vertex_program.append(
+ " vec2 step = scaling_vector / 2.0;\n"
+ " v_texcoords.xy = texcoord - step * 0.5;\n"
+ " v_texcoords.zw = texcoord + step * 0.5;\n");
+ fragment_directives.append("#extension GL_EXT_draw_buffers : enable\n");
+ fragment_program.append(
+ " vec4 lo_uuvv = texture2D(s_texture, v_texcoords.xy);\n"
+ " vec4 hi_uuvv = texture2D(s_texture, v_texcoords.zw);\n"
+ " gl_FragData[0] = vec4(lo_uuvv.rg, hi_uuvv.rg);\n"
+ " gl_FragData[1] = vec4(lo_uuvv.ba, hi_uuvv.ba);\n");
+ break;
+ }
+ if (swizzle) {
+ switch (type) {
+ case SHADER_YUV_MRT_PASS1:
+ fragment_program.append(" gl_FragData[0] = gl_FragData[0].bgra;\n");
+ break;
+ case SHADER_YUV_MRT_PASS2:
+ fragment_program.append(" gl_FragData[0] = gl_FragData[0].bgra;\n");
+ fragment_program.append(" gl_FragData[1] = gl_FragData[1].bgra;\n");
+ break;
+ default:
+ fragment_program.append(" gl_FragColor = gl_FragColor.bgra;\n");
+ break;
+ }
+ }
+
+ vertex_program = vertex_header + shared_variables + "void main() {\n" +
+ vertex_program + "}\n";
+
+ fragment_program = fragment_directives + fragment_header +
+ shared_variables + "void main() {\n" + fragment_program +
+ "}\n";
+
+ cache_entry->Setup(vertex_program.c_str(), fragment_program.c_str());
+ }
+ return cache_entry;
+}
+
+namespace {
+GLuint CompileShaderFromSource(GLES2Interface* gl,
+ const GLchar* source,
+ GLenum type) {
+ GLuint shader = gl->CreateShader(type);
+ GLint length = base::checked_cast<GLint>(strlen(source));
+ gl->ShaderSource(shader, 1, &source, &length);
+ gl->CompileShader(shader);
+ GLint compile_status = 0;
+ gl->GetShaderiv(shader, GL_COMPILE_STATUS, &compile_status);
+ if (!compile_status) {
+ GLint log_length = 0;
+ gl->GetShaderiv(shader, GL_INFO_LOG_LENGTH, &log_length);
+ if (log_length) {
+ std::unique_ptr<GLchar[]> log(new GLchar[log_length]);
+ GLsizei returned_log_length = 0;
+ gl->GetShaderInfoLog(shader, log_length, &returned_log_length, log.get());
+ LOG(ERROR) << std::string(log.get(), returned_log_length);
+ }
+ gl->DeleteShader(shader);
+ return 0;
+ }
+ return shader;
+}
+} // namespace
+
+void ShaderProgram::Setup(const GLchar* vertex_shader_text,
+ const GLchar* fragment_shader_text) {
+ // Shaders to map the source texture to |dst_texture_|.
+ const GLuint vertex_shader =
+ CompileShaderFromSource(gl_, vertex_shader_text, GL_VERTEX_SHADER);
+ if (vertex_shader == 0)
+ return;
+
+ gl_->AttachShader(program_, vertex_shader);
+ gl_->DeleteShader(vertex_shader);
+
+ const GLuint fragment_shader =
+ CompileShaderFromSource(gl_, fragment_shader_text, GL_FRAGMENT_SHADER);
+ if (fragment_shader == 0)
+ return;
+ gl_->AttachShader(program_, fragment_shader);
+ gl_->DeleteShader(fragment_shader);
+
+ gl_->LinkProgram(program_);
+
+ GLint link_status = 0;
+ gl_->GetProgramiv(program_, GL_LINK_STATUS, &link_status);
+ if (!link_status)
+ return;
+
+ position_location_ = gl_->GetAttribLocation(program_, "a_position");
+ texcoord_location_ = gl_->GetAttribLocation(program_, "a_texcoord");
+ texture_location_ = gl_->GetUniformLocation(program_, "s_texture");
+ src_rect_location_ = gl_->GetUniformLocation(program_, "src_rect");
+ src_pixelsize_location_ = gl_->GetUniformLocation(program_, "src_pixelsize");
+ scaling_vector_location_ =
+ gl_->GetUniformLocation(program_, "scaling_vector");
+ rgb_to_plane0_location_ = gl_->GetUniformLocation(program_, "rgb_to_plane0");
+ rgb_to_plane1_location_ = gl_->GetUniformLocation(program_, "rgb_to_plane1");
+ rgb_to_plane2_location_ = gl_->GetUniformLocation(program_, "rgb_to_plane2");
+ // The only reason fetching these attribute locations should fail is
+ // if the context was spontaneously lost (i.e., because the GPU
+ // process crashed, perhaps deliberately for testing).
+ DCHECK(Initialized() || gl_->GetGraphicsResetStatusKHR() != GL_NO_ERROR);
+}
+
+void ShaderProgram::UseProgram(const gfx::Size& src_texture_size,
+ const gfx::RectF& src_rect,
+ const gfx::Size& dst_size,
+ bool scale_x,
+ bool flip_y,
+ const GLfloat color_weights[3][4]) {
+ gl_->UseProgram(program_);
+
+ // OpenGL defines the last parameter to VertexAttribPointer as type
+ // "const GLvoid*" even though it is actually an offset into the buffer
+ // object's data store and not a pointer to the client's address space.
+ const void* offsets[2] = {nullptr,
+ reinterpret_cast<const void*>(2 * sizeof(GLfloat))};
+
+ gl_->VertexAttribPointer(position_location_, 2, GL_FLOAT, GL_FALSE,
+ 4 * sizeof(GLfloat), offsets[0]);
+ gl_->EnableVertexAttribArray(position_location_);
+
+ gl_->VertexAttribPointer(texcoord_location_, 2, GL_FLOAT, GL_FALSE,
+ 4 * sizeof(GLfloat), offsets[1]);
+ gl_->EnableVertexAttribArray(texcoord_location_);
+
+ gl_->Uniform1i(texture_location_, 0);
+
+ // Convert |src_rect| from pixel coordinates to texture coordinates. The
+ // source texture coordinates are in the range [0.0,1.0] for each dimension,
+ // but the sampling rect may slightly "spill" outside that range (e.g., for
+ // scaler overscan).
+ GLfloat src_rect_texcoord[4] = {
+ src_rect.x() / src_texture_size.width(),
+ src_rect.y() / src_texture_size.height(),
+ src_rect.width() / src_texture_size.width(),
+ src_rect.height() / src_texture_size.height(),
+ };
+ if (flip_y) {
+ src_rect_texcoord[1] += src_rect_texcoord[3];
+ src_rect_texcoord[3] *= -1.0f;
+ }
+ gl_->Uniform4fv(src_rect_location_, 1, src_rect_texcoord);
+
+ // Set shader-specific uniform inputs. The |scaling_vector| is the ratio of
+ // the number of source pixels sampled per dest pixels output. It is used by
+ // the shader programs to locate distinct texels from the source texture, and
+ // sample them at the appropriate offset to produce each output texel. In many
+ // cases, |scaling_vector| also selects whether scaling will happen only in
+ // the X or the Y dimension.
+ switch (shader_) {
+ case GLHelperScaling::SHADER_BILINEAR:
+ break;
+
+ case GLHelperScaling::SHADER_BILINEAR2:
+ case GLHelperScaling::SHADER_BILINEAR3:
+ case GLHelperScaling::SHADER_BILINEAR4:
+ case GLHelperScaling::SHADER_BICUBIC_HALF_1D:
+ case GLHelperScaling::SHADER_PLANAR:
+ case GLHelperScaling::SHADER_YUV_MRT_PASS1:
+ case GLHelperScaling::SHADER_YUV_MRT_PASS2:
+ if (scale_x) {
+ gl_->Uniform2f(scaling_vector_location_,
+ src_rect_texcoord[2] / dst_size.width(), 0.0);
+ } else {
+ gl_->Uniform2f(scaling_vector_location_, 0.0,
+ src_rect_texcoord[3] / dst_size.height());
+ }
+ break;
+
+ case GLHelperScaling::SHADER_BILINEAR2X2:
+ gl_->Uniform2f(scaling_vector_location_,
+ src_rect_texcoord[2] / dst_size.width(),
+ src_rect_texcoord[3] / dst_size.height());
+ break;
+
+ case GLHelperScaling::SHADER_BICUBIC_UPSCALE:
+ gl_->Uniform2f(src_pixelsize_location_, src_texture_size.width(),
+ src_texture_size.height());
+ // For this shader program, the |scaling_vector| has an alternate meaning:
+ // It is only being used to select whether sampling is stepped in the X or
+ // the Y direction.
+ gl_->Uniform2f(scaling_vector_location_, scale_x ? 1.0 : 0.0,
+ scale_x ? 0.0 : 1.0);
+ break;
+ }
+
+ if (rgb_to_plane0_location_ != -1) {
+ gl_->Uniform4fv(rgb_to_plane0_location_, 1, &color_weights[0][0]);
+ if (rgb_to_plane1_location_ != -1) {
+ DCHECK_NE(rgb_to_plane2_location_, -1);
+ gl_->Uniform4fv(rgb_to_plane1_location_, 1, &color_weights[1][0]);
+ gl_->Uniform4fv(rgb_to_plane2_location_, 1, &color_weights[2][0]);
+ }
+ }
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/gl_helper_scaling.h b/chromium/gpu/command_buffer/client/gl_helper_scaling.h
new file mode 100644
index 00000000000..821b37fe624
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/gl_helper_scaling.h
@@ -0,0 +1,197 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GL_HELPER_SCALING_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GL_HELPER_SCALING_H_
+
+#include <map>
+#include <vector>
+
+#include "base/containers/circular_deque.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/client/gl_helper.h"
+#include "gpu/gpu_export.h"
+#include "ui/gfx/geometry/vector2d.h"
+
+namespace gpu {
+
+class GLHelperTest;
+class ScalerImpl;
+class ShaderProgram;
+
+// Implements GPU texture scaling methods.
+// Note that you should probably not use this class directly.
+// See gl_helper.cc::CreateScaler instead.
+class GPU_EXPORT GLHelperScaling {
+ public:
+ enum ShaderType {
+ SHADER_BILINEAR,
+ SHADER_BILINEAR2,
+ SHADER_BILINEAR3,
+ SHADER_BILINEAR4,
+ SHADER_BILINEAR2X2,
+ SHADER_BICUBIC_UPSCALE,
+ SHADER_BICUBIC_HALF_1D,
+ SHADER_PLANAR,
+ SHADER_YUV_MRT_PASS1,
+ SHADER_YUV_MRT_PASS2,
+ };
+
+ using ShaderProgramKeyType = std::pair<ShaderType, bool>;
+
+ GLHelperScaling(gles2::GLES2Interface* gl, GLHelper* helper);
+ ~GLHelperScaling();
+ void InitBuffer();
+
+ // Returns null on invalid arguments.
+ std::unique_ptr<GLHelper::ScalerInterface> CreateScaler(
+ GLHelper::ScalerQuality quality,
+ const gfx::Vector2d& scale_from,
+ const gfx::Vector2d& scale_to,
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle);
+
+ // These convert source textures with RGBA pixel data into a single-color-
+ // channel planar format. Used for grayscale and I420 format conversion.
+ //
+ // While these output RGBA pixels in the destination texture(s), each RGBA
+ // pixel is actually a container for 4 consecutive pixels in the result.
+ std::unique_ptr<GLHelper::ScalerInterface> CreateGrayscalePlanerizer(
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle);
+ std::unique_ptr<GLHelper::ScalerInterface> CreateI420Planerizer(
+ int plane, // 0=Y, 1=U, 2=V
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle);
+
+ // These are a faster path to I420 planerization, if the platform supports
+ // it. The first pass draws to two outputs simultaneously: the Y plane and an
+ // interim UV plane that is used as the input to the second pass. Then, the
+ // second pass splits the UV plane, drawing to two outputs: the final U plane
+ // and final V plane. Thus, clients should call ScaleToMultipleOutputs() on
+ // the returned instance.
+ std::unique_ptr<GLHelper::ScalerInterface> CreateI420MrtPass1Planerizer(
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle);
+ std::unique_ptr<GLHelper::ScalerInterface> CreateI420MrtPass2Planerizer(
+ bool swizzle);
+
+ private:
+ // A ScaleOp represents a pass in a scaler pipeline, in one dimension.
+ // Note that when quality is GOOD, multiple scaler passes will be
+ // combined into one operation for increased performance.
+ // Exposed in the header file for testing purposes.
+ struct ScaleOp {
+ ScaleOp(int factor, bool x, int size)
+ : scale_factor(factor), scale_x(x), scale_size(size) {}
+
+ // Calculates the sequence of ScaleOp needed to convert an image of
+ // relative size |src| into an image of relative size |dst|. If |scale_x| is
+ // true, then the calculations are for the X axis of the image, otherwise Y.
+ // If |allow3| is true, we can use a SHADER_BILINEAR3 to replace
+ // a scale up and scale down with a 3-tap bilinear scale.
+ // The calculated ScaleOps are added to |ops|.
+ static void AddOps(int src,
+ int dst,
+ bool scale_x,
+ bool allow3,
+ base::circular_deque<ScaleOp>* ops) {
+ int num_downscales = 0;
+ if (allow3 && dst * 3 >= src && dst * 2 < src) {
+ // Technically, this should be a scale up and then a
+ // scale down, but it makes the optimization code more
+ // complicated.
+ ops->push_back(ScaleOp(3, scale_x, dst));
+ return;
+ }
+ while ((dst << num_downscales) < src) {
+ num_downscales++;
+ }
+ if ((dst << num_downscales) != src) {
+ ops->push_back(ScaleOp(0, scale_x, dst << num_downscales));
+ }
+ while (num_downscales) {
+ num_downscales--;
+ ops->push_back(ScaleOp(2, scale_x, dst << num_downscales));
+ }
+ }
+
+ // Update either the X or Y component of |scale| to the match the relative
+ // result size of this ScaleOp.
+ void UpdateScale(gfx::Vector2d* scale) {
+ if (scale_x) {
+ scale->set_x(scale_size);
+ } else {
+ scale->set_y(scale_size);
+ }
+ }
+
+ // A scale factor of 0 means upscale
+ // 2 means 50% scale
+ // 3 means 33% scale, etc.
+ int scale_factor;
+ bool scale_x; // Otherwise y
+ int scale_size; // Size to scale to.
+ };
+
+ // Full specification for a single scaling stage.
+ struct ScalerStage {
+ ShaderType shader;
+ gfx::Vector2d scale_from;
+ gfx::Vector2d scale_to;
+ bool scale_x;
+ bool flipped_source;
+ bool flip_output;
+ bool swizzle;
+ };
+
+ // Compute a vector of scaler stages for a particular
+ // set of input/output parameters.
+ static void ComputeScalerStages(GLHelper::ScalerQuality quality,
+ const gfx::Vector2d& scale_from,
+ const gfx::Vector2d& scale_to,
+ bool flipped_source,
+ bool flip_output,
+ bool swizzle,
+ std::vector<ScalerStage>* scaler_stages);
+
+ // Take two queues of ScaleOp structs and generate a
+ // vector of scaler stages. This is the second half of
+ // ComputeScalerStages.
+ static void ConvertScalerOpsToScalerStages(
+ GLHelper::ScalerQuality quality,
+ gfx::Vector2d scale_from,
+ base::circular_deque<GLHelperScaling::ScaleOp>* x_ops,
+ base::circular_deque<GLHelperScaling::ScaleOp>* y_ops,
+ std::vector<ScalerStage>* scaler_stages);
+
+ scoped_refptr<ShaderProgram> GetShaderProgram(ShaderType type, bool swizzle);
+
+ // Interleaved array of 2-dimentional vertex positions (x, y) and
+ // 2-dimentional texture coordinates (s, t).
+ static const GLfloat kVertexAttributes[];
+
+ gles2::GLES2Interface* gl_;
+ GLHelper* helper_;
+
+ // The buffer that holds the vertices and the texture coordinates data for
+ // drawing a quad.
+ ScopedBuffer vertex_attributes_buffer_;
+
+ std::map<ShaderProgramKeyType, scoped_refptr<ShaderProgram>> shader_programs_;
+
+ friend class ShaderProgram;
+ friend class ScalerImpl;
+ friend class GLHelperBenchmark;
+ friend class GLHelperTest;
+ DISALLOW_COPY_AND_ASSIGN(GLHelperScaling);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GL_HELPER_SCALING_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
index d6e0485819b..76546b8dd53 100644
--- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -1413,7 +1413,7 @@ void GL_APIENTRY GLES2UnmapTexSubImage2DCHROMIUM(const void* mem) {
void GL_APIENTRY GLES2ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
- GLenum color_space,
+ GLcolorSpace color_space,
GLboolean alpha) {
gles2::GetGLContext()->ResizeCHROMIUM(width, height, scale_factor,
color_space, alpha);
@@ -1692,168 +1692,6 @@ void GL_APIENTRY GLES2ScheduleDCLayerCHROMIUM(GLuint texture_0,
void GL_APIENTRY GLES2SetActiveURLCHROMIUM(const char* url) {
gles2::GetGLContext()->SetActiveURLCHROMIUM(url);
}
-void GL_APIENTRY GLES2MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) {
- gles2::GetGLContext()->MatrixLoadfCHROMIUM(matrixMode, m);
-}
-void GL_APIENTRY GLES2MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
- gles2::GetGLContext()->MatrixLoadIdentityCHROMIUM(matrixMode);
-}
-GLuint GL_APIENTRY GLES2GenPathsCHROMIUM(GLsizei range) {
- return gles2::GetGLContext()->GenPathsCHROMIUM(range);
-}
-void GL_APIENTRY GLES2DeletePathsCHROMIUM(GLuint path, GLsizei range) {
- gles2::GetGLContext()->DeletePathsCHROMIUM(path, range);
-}
-GLboolean GL_APIENTRY GLES2IsPathCHROMIUM(GLuint path) {
- return gles2::GetGLContext()->IsPathCHROMIUM(path);
-}
-void GL_APIENTRY GLES2PathCommandsCHROMIUM(GLuint path,
- GLsizei numCommands,
- const GLubyte* commands,
- GLsizei numCoords,
- GLenum coordType,
- const GLvoid* coords) {
- gles2::GetGLContext()->PathCommandsCHROMIUM(path, numCommands, commands,
- numCoords, coordType, coords);
-}
-void GL_APIENTRY GLES2PathParameterfCHROMIUM(GLuint path,
- GLenum pname,
- GLfloat value) {
- gles2::GetGLContext()->PathParameterfCHROMIUM(path, pname, value);
-}
-void GL_APIENTRY GLES2PathParameteriCHROMIUM(GLuint path,
- GLenum pname,
- GLint value) {
- gles2::GetGLContext()->PathParameteriCHROMIUM(path, pname, value);
-}
-void GL_APIENTRY GLES2PathStencilFuncCHROMIUM(GLenum func,
- GLint ref,
- GLuint mask) {
- gles2::GetGLContext()->PathStencilFuncCHROMIUM(func, ref, mask);
-}
-void GL_APIENTRY GLES2StencilFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask) {
- gles2::GetGLContext()->StencilFillPathCHROMIUM(path, fillMode, mask);
-}
-void GL_APIENTRY GLES2StencilStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask) {
- gles2::GetGLContext()->StencilStrokePathCHROMIUM(path, reference, mask);
-}
-void GL_APIENTRY GLES2CoverFillPathCHROMIUM(GLuint path, GLenum coverMode) {
- gles2::GetGLContext()->CoverFillPathCHROMIUM(path, coverMode);
-}
-void GL_APIENTRY GLES2CoverStrokePathCHROMIUM(GLuint path, GLenum coverMode) {
- gles2::GetGLContext()->CoverStrokePathCHROMIUM(path, coverMode);
-}
-void GL_APIENTRY GLES2StencilThenCoverFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode) {
- gles2::GetGLContext()->StencilThenCoverFillPathCHROMIUM(path, fillMode, mask,
- coverMode);
-}
-void GL_APIENTRY GLES2StencilThenCoverStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode) {
- gles2::GetGLContext()->StencilThenCoverStrokePathCHROMIUM(path, reference,
- mask, coverMode);
-}
-void GL_APIENTRY
-GLES2StencilFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) {
- gles2::GetGLContext()->StencilFillPathInstancedCHROMIUM(
- numPaths, pathNameType, paths, pathBase, fillMode, mask, transformType,
- transformValues);
-}
-void GL_APIENTRY
-GLES2StencilStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) {
- gles2::GetGLContext()->StencilStrokePathInstancedCHROMIUM(
- numPaths, pathNameType, paths, pathBase, reference, mask, transformType,
- transformValues);
-}
-void GL_APIENTRY
-GLES2CoverFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) {
- gles2::GetGLContext()->CoverFillPathInstancedCHROMIUM(
- numPaths, pathNameType, paths, pathBase, coverMode, transformType,
- transformValues);
-}
-void GL_APIENTRY
-GLES2CoverStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) {
- gles2::GetGLContext()->CoverStrokePathInstancedCHROMIUM(
- numPaths, pathNameType, paths, pathBase, coverMode, transformType,
- transformValues);
-}
-void GL_APIENTRY
-GLES2StencilThenCoverFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) {
- gles2::GetGLContext()->StencilThenCoverFillPathInstancedCHROMIUM(
- numPaths, pathNameType, paths, pathBase, fillMode, mask, coverMode,
- transformType, transformValues);
-}
-void GL_APIENTRY GLES2StencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) {
- gles2::GetGLContext()->StencilThenCoverStrokePathInstancedCHROMIUM(
- numPaths, pathNameType, paths, pathBase, reference, mask, coverMode,
- transformType, transformValues);
-}
-void GL_APIENTRY GLES2BindFragmentInputLocationCHROMIUM(GLuint program,
- GLint location,
- const char* name) {
- gles2::GetGLContext()->BindFragmentInputLocationCHROMIUM(program, location,
- name);
-}
-void GL_APIENTRY
-GLES2ProgramPathFragmentInputGenCHROMIUM(GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- const GLfloat* coeffs) {
- gles2::GetGLContext()->ProgramPathFragmentInputGenCHROMIUM(
- program, location, genMode, components, coeffs);
-}
void GL_APIENTRY GLES2ContextVisibilityHintCHROMIUM(GLboolean visibility) {
gles2::GetGLContext()->ContextVisibilityHintCHROMIUM(visibility);
}
@@ -1932,7 +1770,7 @@ void GL_APIENTRY GLES2TexStorage2DImageCHROMIUM(GLenum target,
bufferUsage, width, height);
}
void GL_APIENTRY GLES2SetColorSpaceMetadataCHROMIUM(GLuint texture_id,
- GLColorSpace color_space) {
+ GLcolorSpace color_space) {
gles2::GetGLContext()->SetColorSpaceMetadataCHROMIUM(texture_id, color_space);
}
void GL_APIENTRY GLES2WindowRectanglesEXT(GLenum mode,
@@ -1988,6 +1826,12 @@ void GL_APIENTRY GLES2BeginSharedImageAccessDirectCHROMIUM(GLuint texture,
void GL_APIENTRY GLES2EndSharedImageAccessDirectCHROMIUM(GLuint texture) {
gles2::GetGLContext()->EndSharedImageAccessDirectCHROMIUM(texture);
}
+void GL_APIENTRY GLES2BeginBatchReadAccessSharedImageCHROMIUM() {
+ gles2::GetGLContext()->BeginBatchReadAccessSharedImageCHROMIUM();
+}
+void GL_APIENTRY GLES2EndBatchReadAccessSharedImageCHROMIUM() {
+ gles2::GetGLContext()->EndBatchReadAccessSharedImageCHROMIUM();
+}
namespace gles2 {
@@ -3296,108 +3140,6 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glSetActiveURLCHROMIUM),
},
{
- "glMatrixLoadfCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glMatrixLoadfCHROMIUM),
- },
- {
- "glMatrixLoadIdentityCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glMatrixLoadIdentityCHROMIUM),
- },
- {
- "glGenPathsCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glGenPathsCHROMIUM),
- },
- {
- "glDeletePathsCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glDeletePathsCHROMIUM),
- },
- {
- "glIsPathCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glIsPathCHROMIUM),
- },
- {
- "glPathCommandsCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glPathCommandsCHROMIUM),
- },
- {
- "glPathParameterfCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glPathParameterfCHROMIUM),
- },
- {
- "glPathParameteriCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glPathParameteriCHROMIUM),
- },
- {
- "glPathStencilFuncCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glPathStencilFuncCHROMIUM),
- },
- {
- "glStencilFillPathCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glStencilFillPathCHROMIUM),
- },
- {
- "glStencilStrokePathCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glStencilStrokePathCHROMIUM),
- },
- {
- "glCoverFillPathCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glCoverFillPathCHROMIUM),
- },
- {
- "glCoverStrokePathCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(glCoverStrokePathCHROMIUM),
- },
- {
- "glStencilThenCoverFillPathCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glStencilThenCoverFillPathCHROMIUM),
- },
- {
- "glStencilThenCoverStrokePathCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glStencilThenCoverStrokePathCHROMIUM),
- },
- {
- "glStencilFillPathInstancedCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glStencilFillPathInstancedCHROMIUM),
- },
- {
- "glStencilStrokePathInstancedCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glStencilStrokePathInstancedCHROMIUM),
- },
- {
- "glCoverFillPathInstancedCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glCoverFillPathInstancedCHROMIUM),
- },
- {
- "glCoverStrokePathInstancedCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glCoverStrokePathInstancedCHROMIUM),
- },
- {
- "glStencilThenCoverFillPathInstancedCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glStencilThenCoverFillPathInstancedCHROMIUM),
- },
- {
- "glStencilThenCoverStrokePathInstancedCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glStencilThenCoverStrokePathInstancedCHROMIUM),
- },
- {
- "glBindFragmentInputLocationCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glBindFragmentInputLocationCHROMIUM),
- },
- {
- "glProgramPathFragmentInputGenCHROMIUM",
- reinterpret_cast<GLES2FunctionPointer>(
- glProgramPathFragmentInputGenCHROMIUM),
- },
- {
"glContextVisibilityHintCHROMIUM",
reinterpret_cast<GLES2FunctionPointer>(glContextVisibilityHintCHROMIUM),
},
@@ -3525,6 +3267,16 @@ extern const NameToFunc g_gles2_function_table[] = {
glEndSharedImageAccessDirectCHROMIUM),
},
{
+ "glBeginBatchReadAccessSharedImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glBeginBatchReadAccessSharedImageCHROMIUM),
+ },
+ {
+ "glEndBatchReadAccessSharedImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glEndBatchReadAccessSharedImageCHROMIUM),
+ },
+ {
nullptr,
nullptr,
},
diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index 4c50bbfd008..7aa80690359 100644
--- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -2701,14 +2701,17 @@ void FlushMappedBufferRange(GLenum target, GLintptr offset, GLsizeiptr size) {
}
}
-void ResizeCHROMIUM(GLuint width,
- GLuint height,
+void ResizeCHROMIUM(GLint width,
+ GLint height,
GLfloat scale_factor,
- GLenum color_space,
- GLboolean alpha) {
+ GLboolean alpha,
+ GLuint shm_id,
+ GLuint shm_offset,
+ GLsizei color_space_size) {
gles2::cmds::ResizeCHROMIUM* c = GetCmdSpace<gles2::cmds::ResizeCHROMIUM>();
if (c) {
- c->Init(width, height, scale_factor, color_space, alpha);
+ c->Init(width, height, scale_factor, alpha, shm_id, shm_offset,
+ color_space_size);
}
}
@@ -3136,284 +3139,6 @@ void SetActiveURLCHROMIUM(GLuint url_bucket_id) {
}
}
-void MatrixLoadfCHROMIUMImmediate(GLenum matrixMode, const GLfloat* m) {
- const uint32_t size =
- gles2::cmds::MatrixLoadfCHROMIUMImmediate::ComputeSize();
- gles2::cmds::MatrixLoadfCHROMIUMImmediate* c =
- GetImmediateCmdSpaceTotalSize<gles2::cmds::MatrixLoadfCHROMIUMImmediate>(
- size);
- if (c) {
- c->Init(matrixMode, m);
- }
-}
-
-void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
- gles2::cmds::MatrixLoadIdentityCHROMIUM* c =
- GetCmdSpace<gles2::cmds::MatrixLoadIdentityCHROMIUM>();
- if (c) {
- c->Init(matrixMode);
- }
-}
-
-void GenPathsCHROMIUM(GLuint first_client_id, GLsizei range) {
- gles2::cmds::GenPathsCHROMIUM* c =
- GetCmdSpace<gles2::cmds::GenPathsCHROMIUM>();
- if (c) {
- c->Init(first_client_id, range);
- }
-}
-
-void DeletePathsCHROMIUM(GLuint first_client_id, GLsizei range) {
- gles2::cmds::DeletePathsCHROMIUM* c =
- GetCmdSpace<gles2::cmds::DeletePathsCHROMIUM>();
- if (c) {
- c->Init(first_client_id, range);
- }
-}
-
-void IsPathCHROMIUM(GLuint path,
- uint32_t result_shm_id,
- uint32_t result_shm_offset) {
- gles2::cmds::IsPathCHROMIUM* c = GetCmdSpace<gles2::cmds::IsPathCHROMIUM>();
- if (c) {
- c->Init(path, result_shm_id, result_shm_offset);
- }
-}
-
-void PathCommandsCHROMIUM(GLuint path,
- GLsizei numCommands,
- uint32_t commands_shm_id,
- uint32_t commands_shm_offset,
- GLsizei numCoords,
- GLenum coordType,
- uint32_t coords_shm_id,
- uint32_t coords_shm_offset) {
- gles2::cmds::PathCommandsCHROMIUM* c =
- GetCmdSpace<gles2::cmds::PathCommandsCHROMIUM>();
- if (c) {
- c->Init(path, numCommands, commands_shm_id, commands_shm_offset, numCoords,
- coordType, coords_shm_id, coords_shm_offset);
- }
-}
-
-void PathParameterfCHROMIUM(GLuint path, GLenum pname, GLfloat value) {
- gles2::cmds::PathParameterfCHROMIUM* c =
- GetCmdSpace<gles2::cmds::PathParameterfCHROMIUM>();
- if (c) {
- c->Init(path, pname, value);
- }
-}
-
-void PathParameteriCHROMIUM(GLuint path, GLenum pname, GLint value) {
- gles2::cmds::PathParameteriCHROMIUM* c =
- GetCmdSpace<gles2::cmds::PathParameteriCHROMIUM>();
- if (c) {
- c->Init(path, pname, value);
- }
-}
-
-void PathStencilFuncCHROMIUM(GLenum func, GLint ref, GLuint mask) {
- gles2::cmds::PathStencilFuncCHROMIUM* c =
- GetCmdSpace<gles2::cmds::PathStencilFuncCHROMIUM>();
- if (c) {
- c->Init(func, ref, mask);
- }
-}
-
-void StencilFillPathCHROMIUM(GLuint path, GLenum fillMode, GLuint mask) {
- gles2::cmds::StencilFillPathCHROMIUM* c =
- GetCmdSpace<gles2::cmds::StencilFillPathCHROMIUM>();
- if (c) {
- c->Init(path, fillMode, mask);
- }
-}
-
-void StencilStrokePathCHROMIUM(GLuint path, GLint reference, GLuint mask) {
- gles2::cmds::StencilStrokePathCHROMIUM* c =
- GetCmdSpace<gles2::cmds::StencilStrokePathCHROMIUM>();
- if (c) {
- c->Init(path, reference, mask);
- }
-}
-
-void CoverFillPathCHROMIUM(GLuint path, GLenum coverMode) {
- gles2::cmds::CoverFillPathCHROMIUM* c =
- GetCmdSpace<gles2::cmds::CoverFillPathCHROMIUM>();
- if (c) {
- c->Init(path, coverMode);
- }
-}
-
-void CoverStrokePathCHROMIUM(GLuint path, GLenum coverMode) {
- gles2::cmds::CoverStrokePathCHROMIUM* c =
- GetCmdSpace<gles2::cmds::CoverStrokePathCHROMIUM>();
- if (c) {
- c->Init(path, coverMode);
- }
-}
-
-void StencilThenCoverFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode) {
- gles2::cmds::StencilThenCoverFillPathCHROMIUM* c =
- GetCmdSpace<gles2::cmds::StencilThenCoverFillPathCHROMIUM>();
- if (c) {
- c->Init(path, fillMode, mask, coverMode);
- }
-}
-
-void StencilThenCoverStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode) {
- gles2::cmds::StencilThenCoverStrokePathCHROMIUM* c =
- GetCmdSpace<gles2::cmds::StencilThenCoverStrokePathCHROMIUM>();
- if (c) {
- c->Init(path, reference, mask, coverMode);
- }
-}
-
-void StencilFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- uint32_t paths_shm_id,
- uint32_t paths_shm_offset,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- uint32_t transformValues_shm_id,
- uint32_t transformValues_shm_offset) {
- gles2::cmds::StencilFillPathInstancedCHROMIUM* c =
- GetCmdSpace<gles2::cmds::StencilFillPathInstancedCHROMIUM>();
- if (c) {
- c->Init(numPaths, pathNameType, paths_shm_id, paths_shm_offset, pathBase,
- fillMode, mask, transformType, transformValues_shm_id,
- transformValues_shm_offset);
- }
-}
-
-void StencilStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- uint32_t paths_shm_id,
- uint32_t paths_shm_offset,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum transformType,
- uint32_t transformValues_shm_id,
- uint32_t transformValues_shm_offset) {
- gles2::cmds::StencilStrokePathInstancedCHROMIUM* c =
- GetCmdSpace<gles2::cmds::StencilStrokePathInstancedCHROMIUM>();
- if (c) {
- c->Init(numPaths, pathNameType, paths_shm_id, paths_shm_offset, pathBase,
- reference, mask, transformType, transformValues_shm_id,
- transformValues_shm_offset);
- }
-}
-
-void CoverFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- uint32_t paths_shm_id,
- uint32_t paths_shm_offset,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- uint32_t transformValues_shm_id,
- uint32_t transformValues_shm_offset) {
- gles2::cmds::CoverFillPathInstancedCHROMIUM* c =
- GetCmdSpace<gles2::cmds::CoverFillPathInstancedCHROMIUM>();
- if (c) {
- c->Init(numPaths, pathNameType, paths_shm_id, paths_shm_offset, pathBase,
- coverMode, transformType, transformValues_shm_id,
- transformValues_shm_offset);
- }
-}
-
-void CoverStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- uint32_t paths_shm_id,
- uint32_t paths_shm_offset,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- uint32_t transformValues_shm_id,
- uint32_t transformValues_shm_offset) {
- gles2::cmds::CoverStrokePathInstancedCHROMIUM* c =
- GetCmdSpace<gles2::cmds::CoverStrokePathInstancedCHROMIUM>();
- if (c) {
- c->Init(numPaths, pathNameType, paths_shm_id, paths_shm_offset, pathBase,
- coverMode, transformType, transformValues_shm_id,
- transformValues_shm_offset);
- }
-}
-
-void StencilThenCoverFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- uint32_t paths_shm_id,
- uint32_t paths_shm_offset,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- uint32_t transformValues_shm_id,
- uint32_t transformValues_shm_offset) {
- gles2::cmds::StencilThenCoverFillPathInstancedCHROMIUM* c =
- GetCmdSpace<gles2::cmds::StencilThenCoverFillPathInstancedCHROMIUM>();
- if (c) {
- c->Init(numPaths, pathNameType, paths_shm_id, paths_shm_offset, pathBase,
- fillMode, mask, coverMode, transformType, transformValues_shm_id,
- transformValues_shm_offset);
- }
-}
-
-void StencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- uint32_t paths_shm_id,
- uint32_t paths_shm_offset,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- uint32_t transformValues_shm_id,
- uint32_t transformValues_shm_offset) {
- gles2::cmds::StencilThenCoverStrokePathInstancedCHROMIUM* c =
- GetCmdSpace<gles2::cmds::StencilThenCoverStrokePathInstancedCHROMIUM>();
- if (c) {
- c->Init(numPaths, pathNameType, paths_shm_id, paths_shm_offset, pathBase,
- reference, mask, coverMode, transformType, transformValues_shm_id,
- transformValues_shm_offset);
- }
-}
-
-void BindFragmentInputLocationCHROMIUMBucket(GLuint program,
- GLint location,
- uint32_t name_bucket_id) {
- gles2::cmds::BindFragmentInputLocationCHROMIUMBucket* c =
- GetCmdSpace<gles2::cmds::BindFragmentInputLocationCHROMIUMBucket>();
- if (c) {
- c->Init(program, location, name_bucket_id);
- }
-}
-
-void ProgramPathFragmentInputGenCHROMIUM(GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- uint32_t coeffs_shm_id,
- uint32_t coeffs_shm_offset) {
- gles2::cmds::ProgramPathFragmentInputGenCHROMIUM* c =
- GetCmdSpace<gles2::cmds::ProgramPathFragmentInputGenCHROMIUM>();
- if (c) {
- c->Init(program, location, genMode, components, coeffs_shm_id,
- coeffs_shm_offset);
- }
-}
-
void ContextVisibilityHintCHROMIUM(GLboolean visibility) {
gles2::cmds::ContextVisibilityHintCHROMIUM* c =
GetCmdSpace<gles2::cmds::ContextVisibilityHintCHROMIUM>();
@@ -3674,4 +3399,20 @@ void EndSharedImageAccessDirectCHROMIUM(GLuint texture) {
}
}
+void BeginBatchReadAccessSharedImageCHROMIUM() {
+ gles2::cmds::BeginBatchReadAccessSharedImageCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::BeginBatchReadAccessSharedImageCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void EndBatchReadAccessSharedImageCHROMIUM() {
+ gles2::cmds::EndBatchReadAccessSharedImageCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::EndBatchReadAccessSharedImageCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index 18298a136e5..c5d65e65595 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -5658,18 +5658,18 @@ void GLES2Implementation::ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
void GLES2Implementation::SetColorSpaceMetadataCHROMIUM(
GLuint texture_id,
- GLColorSpace color_space) {
+ GLcolorSpace color_space) {
#if defined(__native_client__)
// Including gfx::ColorSpace would bring Skia and a lot of other code into
// NaCl's IRT.
SetGLError(GL_INVALID_VALUE, "GLES2::SetColorSpaceMetadataCHROMIUM",
"not supported");
#else
- gfx::ColorSpace* gfx_color_space =
- reinterpret_cast<gfx::ColorSpace*>(color_space);
+ gfx::ColorSpace gfx_color_space;
+ if (color_space)
+ gfx_color_space = *reinterpret_cast<const gfx::ColorSpace*>(color_space);
base::Pickle color_space_data;
- IPC::ParamTraits<gfx::ColorSpace>::Write(&color_space_data, *gfx_color_space);
-
+ IPC::ParamTraits<gfx::ColorSpace>::Write(&color_space_data, gfx_color_space);
ScopedTransferBufferPtr buffer(color_space_data.size(), helper_,
transfer_buffer_);
if (!buffer.valid() || buffer.size() < color_space_data.size()) {
@@ -6027,12 +6027,35 @@ void GLES2Implementation::UnmapTexSubImage2DCHROMIUM(const void* mem) {
void GLES2Implementation::ResizeCHROMIUM(GLuint width,
GLuint height,
float scale_factor,
- GLenum color_space,
+ GLcolorSpace gl_color_space,
GLboolean alpha) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glResizeCHROMIUM(" << width << ", "
<< height << ", " << scale_factor << ", " << alpha << ")");
- helper_->ResizeCHROMIUM(width, height, scale_factor, color_space, alpha);
+ // Including gfx::ColorSpace would bring Skia and a lot of other code into
+ // NaCl's IRT, so just leave the color space unspecified.
+#if !defined(__native_client__)
+ if (gl_color_space) {
+ gfx::ColorSpace gfx_color_space =
+ *reinterpret_cast<const gfx::ColorSpace*>(gl_color_space);
+ base::Pickle color_space_data;
+ IPC::ParamTraits<gfx::ColorSpace>::Write(&color_space_data,
+ gfx_color_space);
+ ScopedTransferBufferPtr buffer(color_space_data.size(), helper_,
+ transfer_buffer_);
+ if (!buffer.valid() || buffer.size() < color_space_data.size()) {
+ SetGLError(GL_OUT_OF_MEMORY, "GLES2::SetColorSpaceMetadataCHROMIUM",
+ "out of memory");
+ return;
+ }
+ memcpy(buffer.address(), color_space_data.data(), color_space_data.size());
+ helper_->ResizeCHROMIUM(width, height, scale_factor, alpha, buffer.shm_id(),
+ buffer.offset(), color_space_data.size());
+ CheckGLError();
+ return;
+ }
+#endif
+ helper_->ResizeCHROMIUM(width, height, scale_factor, alpha, 0, 0, 0);
CheckGLError();
}
@@ -7040,7 +7063,7 @@ bool CreateImageValidInternalFormat(GLenum internalformat,
case GL_R16_EXT:
return capabilities.texture_norm16;
case GL_RGB10_A2_EXT:
- return capabilities.image_xr30 || capabilities.image_xb30;
+ return capabilities.image_ar30 || capabilities.image_ab30;
case GL_RGB_YCBCR_P010_CHROMIUM:
return capabilities.image_ycbcr_p010;
case GL_RED:
@@ -7347,547 +7370,6 @@ void GLES2Implementation::GetInternalformativ(GLenum target,
CheckGLError();
}
-GLuint GLES2Implementation::GenPathsCHROMIUM(GLsizei range) {
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenPathsCHROMIUM(" << range
- << ")");
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- static const char kFunctionName[] = "glGenPathsCHROMIUM";
- if (range < 0) {
- SetGLError(GL_INVALID_VALUE, kFunctionName, "range < 0");
- return 0;
- }
- if (!base::IsValueInRangeForNumericType<int32_t>(range)) {
- SetGLError(GL_INVALID_OPERATION, kFunctionName, "range more than 32-bit");
- return 0;
- }
- if (range == 0)
- return 0;
-
- GLuint first_client_id = 0;
- GetRangeIdHandler(id_namespaces::kPaths)
- ->MakeIdRange(this, range, &first_client_id);
-
- if (first_client_id == 0) {
- // Ran out of id space. Is not specified to raise any gl errors.
- return 0;
- }
-
- helper_->GenPathsCHROMIUM(first_client_id, range);
-
- GPU_CLIENT_LOG_CODE_BLOCK({
- for (GLsizei i = 0; i < range; ++i) {
- GPU_CLIENT_LOG(" " << i << ": " << (first_client_id + i));
- }
- });
- CheckGLError();
- return first_client_id;
-}
-
-void GLES2Implementation::DeletePathsCHROMIUM(GLuint first_client_id,
- GLsizei range) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeletePathsCHROMIUM("
- << first_client_id << ", " << range << ")");
- static const char kFunctionName[] = "glDeletePathsCHROMIUM";
-
- if (range < 0) {
- SetGLError(GL_INVALID_VALUE, kFunctionName, "range < 0");
- return;
- }
- if (!base::IsValueInRangeForNumericType<int32_t>(range)) {
- SetGLError(GL_INVALID_OPERATION, kFunctionName, "range more than 32-bit");
- return;
- }
- if (range == 0)
- return;
-
- GLuint last_client_id;
- if (!base::CheckAdd(first_client_id, range - 1)
- .AssignIfValid(&last_client_id)) {
- SetGLError(GL_INVALID_OPERATION, kFunctionName, "overflow");
- return;
- }
-
- GetRangeIdHandler(id_namespaces::kPaths)
- ->FreeIdRange(this, first_client_id, range,
- &GLES2Implementation::DeletePathsCHROMIUMStub);
- CheckGLError();
-}
-
-void GLES2Implementation::DeletePathsCHROMIUMStub(GLuint first_client_id,
- GLsizei range) {
- helper_->DeletePathsCHROMIUM(first_client_id, range);
-}
-
-void GLES2Implementation::PathCommandsCHROMIUM(GLuint path,
- GLsizei num_commands,
- const GLubyte* commands,
- GLsizei num_coords,
- GLenum coord_type,
- const void* coords) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPathCommandsCHROMIUM(" << path
- << ", " << num_commands << ", " << commands << ", "
- << num_coords << ", " << coords << ")");
- static const char kFunctionName[] = "glPathCommandsCHROMIUM";
- if (path == 0) {
- SetGLError(GL_INVALID_VALUE, kFunctionName, "invalid path object");
- return;
- }
- if (num_commands < 0) {
- SetGLError(GL_INVALID_VALUE, kFunctionName, "numCommands < 0");
- return;
- }
- if (num_commands != 0 && !commands) {
- SetGLError(GL_INVALID_VALUE, kFunctionName, "missing commands");
- return;
- }
- if (num_coords < 0) {
- SetGLError(GL_INVALID_VALUE, kFunctionName, "numCoords < 0");
- return;
- }
- if (num_coords != 0 && !coords) {
- SetGLError(GL_INVALID_VALUE, kFunctionName, "missing coords");
- return;
- }
- uint32_t coord_type_size =
- GLES2Util::GetGLTypeSizeForPathCoordType(coord_type);
- if (coord_type_size == 0) {
- SetGLError(GL_INVALID_ENUM, kFunctionName, "invalid coordType");
- return;
- }
- if (num_commands == 0) {
- // No commands must mean no coords, thus nothing to memcpy. Let
- // the service validate the call. Validate coord_type above, so
- // that the parameters will be checked the in the same order
- // regardless of num_commands.
- helper_->PathCommandsCHROMIUM(path, num_commands, 0, 0, num_coords,
- coord_type, 0, 0);
- CheckGLError();
- return;
- }
-
- uint32_t coords_size;
- if (!base::CheckMul(num_coords, coord_type_size)
- .AssignIfValid(&coords_size)) {
- SetGLError(GL_INVALID_OPERATION, kFunctionName, "overflow");
- return;
- }
-
- uint32_t required_buffer_size;
- if (!base::CheckAdd(coords_size, num_commands)
- .AssignIfValid(&required_buffer_size)) {
- SetGLError(GL_INVALID_OPERATION, kFunctionName, "overflow");
- return;
- }
-
- ScopedTransferBufferPtr buffer(required_buffer_size, helper_,
- transfer_buffer_);
- if (!buffer.valid() || buffer.size() < required_buffer_size) {
- SetGLError(GL_OUT_OF_MEMORY, kFunctionName, "too large");
- return;
- }
-
- uint32_t coords_shm_id = 0;
- uint32_t coords_shm_offset = 0;
- // Copy coords first because they need more strict alignment.
- if (coords_size > 0) {
- unsigned char* coords_addr = static_cast<unsigned char*>(buffer.address());
- memcpy(coords_addr, coords, coords_size);
- coords_shm_id = buffer.shm_id();
- coords_shm_offset = buffer.offset();
- }
-
- DCHECK_GT(num_commands, 0);
- unsigned char* commands_addr =
- static_cast<unsigned char*>(buffer.address()) + coords_size;
- memcpy(commands_addr, commands, num_commands);
-
- helper_->PathCommandsCHROMIUM(path, num_commands, buffer.shm_id(),
- buffer.offset() + coords_size, num_coords,
- coord_type, coords_shm_id, coords_shm_offset);
- CheckGLError();
-}
-
-bool GLES2Implementation::PrepareInstancedPathCommand(
- const char* function_name,
- GLsizei num_paths,
- GLenum path_name_type,
- const void* paths,
- GLenum transform_type,
- const GLfloat* transform_values,
- ScopedTransferBufferPtr* buffer,
- uint32_t* out_paths_shm_id,
- uint32_t* out_paths_offset,
- uint32_t* out_transforms_shm_id,
- uint32_t* out_transforms_offset) {
- if (num_paths < 0) {
- SetGLError(GL_INVALID_VALUE, function_name, "numPaths < 0");
- return false;
- }
- uint32_t path_name_size =
- GLES2Util::GetGLTypeSizeForGLPathNameType(path_name_type);
-
- if (path_name_size == 0) {
- SetGLError(GL_INVALID_ENUM, function_name, "invalid pathNameType");
- return false;
- }
-
- uint32_t transforms_component_count =
- GLES2Util::GetComponentCountForGLTransformType(transform_type);
-
- if (transform_type != GL_NONE && transforms_component_count == 0) {
- SetGLError(GL_INVALID_ENUM, function_name, "invalid transformType");
- return false;
- }
-
- if (num_paths == 0) {
- // This might still be a valid or an invalid GL call. Make an empty call to
- // the service side to check the rest of the parameters. We check the above
- // parameters client-side, in order to get same GL errors whether num_paths
- // == 0 or not. We do not check the parameters below, as they are anyway
- // checked by the service side. We can not check all the parameters
- // client-side, since the validators are not available.
- *out_paths_shm_id = 0;
- *out_paths_offset = 0;
- *out_transforms_shm_id = 0;
- *out_transforms_offset = 0;
- return true;
- }
-
- if (!paths) {
- SetGLError(GL_INVALID_VALUE, function_name, "missing paths");
- return false;
- }
-
- if (transform_type != GL_NONE && !transform_values) {
- SetGLError(GL_INVALID_VALUE, function_name, "missing transforms");
- return false;
- }
-
- uint32_t paths_size;
- if (!base::CheckMul(path_name_size, num_paths).AssignIfValid(&paths_size)) {
- SetGLError(GL_INVALID_OPERATION, function_name, "overflow");
- return false;
- }
-
- // The multiplication below will not overflow.
- DCHECK_LE(transforms_component_count, 12U);
- uint32_t one_transform_size = sizeof(GLfloat) * transforms_component_count;
-
- uint32_t transforms_size;
- if (!base::CheckMul(one_transform_size, num_paths)
- .AssignIfValid(&transforms_size)) {
- SetGLError(GL_INVALID_OPERATION, function_name, "overflow");
- return false;
- }
-
- uint32_t required_buffer_size;
- if (!base::CheckAdd(transforms_size, paths_size)
- .AssignIfValid(&required_buffer_size)) {
- SetGLError(GL_INVALID_OPERATION, function_name, "overflow");
- return false;
- }
-
- buffer->Reset(required_buffer_size);
-
- if (!buffer->valid() || buffer->size() < required_buffer_size) {
- SetGLError(GL_OUT_OF_MEMORY, function_name, "too large");
- return false;
- }
-
- // Copy transforms first, they may have more strict alignment.
- if (transforms_size > 0) {
- unsigned char* transforms_addr =
- static_cast<unsigned char*>(buffer->address());
- memcpy(transforms_addr, transform_values, transforms_size);
- *out_transforms_shm_id = buffer->shm_id();
- *out_transforms_offset = buffer->offset();
- } else {
- *out_transforms_shm_id = 0;
- *out_transforms_offset = 0;
- }
-
- DCHECK_GT(paths_size, 0U);
- unsigned char* paths_addr =
- static_cast<unsigned char*>(buffer->address()) + transforms_size;
- memcpy(paths_addr, paths, paths_size);
- *out_paths_shm_id = buffer->shm_id();
- *out_paths_offset = buffer->offset() + transforms_size;
-
- return true;
-}
-
-void GLES2Implementation::StencilFillPathInstancedCHROMIUM(
- GLsizei num_paths,
- GLenum path_name_type,
- const GLvoid* paths,
- GLuint path_base,
- GLenum fill_mode,
- GLuint mask,
- GLenum transform_type,
- const GLfloat* transform_values) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix()
- << "] glStencilFillPathInstancedCHROMIUM(" << num_paths
- << ", " << path_name_type << ", " << paths << ", "
- << path_base << ", " << fill_mode << ", " << mask << ", "
- << transform_type << ", " << transform_values << ")");
-
- ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
- uint32_t paths_shm_id = 0;
- uint32_t paths_offset = 0;
- uint32_t transforms_shm_id = 0;
- uint32_t transforms_offset = 0;
- if (!PrepareInstancedPathCommand(
- "glStencilFillPathInstancedCHROMIUM", num_paths, path_name_type,
- paths, transform_type, transform_values, &buffer, &paths_shm_id,
- &paths_offset, &transforms_shm_id, &transforms_offset)) {
- return;
- }
-
- helper_->StencilFillPathInstancedCHROMIUM(
- num_paths, path_name_type, paths_shm_id, paths_offset, path_base,
- fill_mode, mask, transform_type, transforms_shm_id, transforms_offset);
-
- CheckGLError();
-}
-
-void GLES2Implementation::StencilStrokePathInstancedCHROMIUM(
- GLsizei num_paths,
- GLenum path_name_type,
- const GLvoid* paths,
- GLuint path_base,
- GLint ref,
- GLuint mask,
- GLenum transform_type,
- const GLfloat* transform_values) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix()
- << "] glStencilStrokePathInstancedCHROMIUM(" << num_paths
- << ", " << path_name_type << ", " << paths << ", "
- << path_base << ", " << ref << ", " << mask << ", "
- << transform_type << ", " << transform_values << ")");
-
- ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
- uint32_t paths_shm_id = 0;
- uint32_t paths_offset = 0;
- uint32_t transforms_shm_id = 0;
- uint32_t transforms_offset = 0;
- if (!PrepareInstancedPathCommand(
- "glStencilStrokePathInstancedCHROMIUM", num_paths, path_name_type,
- paths, transform_type, transform_values, &buffer, &paths_shm_id,
- &paths_offset, &transforms_shm_id, &transforms_offset)) {
- return;
- }
-
- helper_->StencilStrokePathInstancedCHROMIUM(
- num_paths, path_name_type, paths_shm_id, paths_offset, path_base, ref,
- mask, transform_type, transforms_shm_id, transforms_offset);
-
- CheckGLError();
-}
-
-void GLES2Implementation::CoverFillPathInstancedCHROMIUM(
- GLsizei num_paths,
- GLenum path_name_type,
- const GLvoid* paths,
- GLuint path_base,
- GLenum cover_mode,
- GLenum transform_type,
- const GLfloat* transform_values) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCoverFillPathInstancedCHROMIUM("
- << num_paths << ", " << path_name_type << ", " << paths
- << ", " << path_base << ", " << cover_mode << ", "
- << transform_type << ", " << transform_values << ")");
-
- ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
- uint32_t paths_shm_id = 0;
- uint32_t paths_offset = 0;
- uint32_t transforms_shm_id = 0;
- uint32_t transforms_offset = 0;
- if (!PrepareInstancedPathCommand(
- "glCoverFillPathInstancedCHROMIUM", num_paths, path_name_type, paths,
- transform_type, transform_values, &buffer, &paths_shm_id,
- &paths_offset, &transforms_shm_id, &transforms_offset)) {
- return;
- }
-
- helper_->CoverFillPathInstancedCHROMIUM(
- num_paths, path_name_type, paths_shm_id, paths_offset, path_base,
- cover_mode, transform_type, transforms_shm_id, transforms_offset);
-
- CheckGLError();
-}
-
-void GLES2Implementation::CoverStrokePathInstancedCHROMIUM(
- GLsizei num_paths,
- GLenum path_name_type,
- const GLvoid* paths,
- GLuint path_base,
- GLenum cover_mode,
- GLenum transform_type,
- const GLfloat* transform_values) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix()
- << "] glCoverStrokePathInstancedCHROMIUM(" << num_paths
- << ", " << path_name_type << ", " << paths << ", "
- << path_base << ", " << cover_mode << ", "
- << transform_type << ", " << transform_values << ")");
-
- ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
- uint32_t paths_shm_id = 0;
- uint32_t paths_offset = 0;
- uint32_t transforms_shm_id = 0;
- uint32_t transforms_offset = 0;
- if (!PrepareInstancedPathCommand(
- "glCoverStrokePathInstancedCHROMIUM", num_paths, path_name_type,
- paths, transform_type, transform_values, &buffer, &paths_shm_id,
- &paths_offset, &transforms_shm_id, &transforms_offset)) {
- return;
- }
-
- helper_->CoverStrokePathInstancedCHROMIUM(
- num_paths, path_name_type, paths_shm_id, paths_offset, path_base,
- cover_mode, transform_type, transforms_shm_id, transforms_offset);
-
- CheckGLError();
-}
-
-void GLES2Implementation::StencilThenCoverFillPathInstancedCHROMIUM(
- GLsizei num_paths,
- GLenum path_name_type,
- const GLvoid* paths,
- GLuint path_base,
- GLenum fill_mode,
- GLuint mask,
- GLenum cover_mode,
- GLenum transform_type,
- const GLfloat* transform_values) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG(
- "[" << GetLogPrefix() << "] glStencilThenCoverFillPathInstancedCHROMIUM("
- << num_paths << ", " << path_name_type << ", " << paths << ", "
- << path_base << ", " << cover_mode << ", " << fill_mode << ", "
- << mask << ", " << transform_type << ", " << transform_values << ")");
-
- ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
- uint32_t paths_shm_id = 0;
- uint32_t paths_offset = 0;
- uint32_t transforms_shm_id = 0;
- uint32_t transforms_offset = 0;
- if (!PrepareInstancedPathCommand(
- "glStencilThenCoverFillPathInstancedCHROMIUM", num_paths,
- path_name_type, paths, transform_type, transform_values, &buffer,
- &paths_shm_id, &paths_offset, &transforms_shm_id,
- &transforms_offset)) {
- return;
- }
-
- helper_->StencilThenCoverFillPathInstancedCHROMIUM(
- num_paths, path_name_type, paths_shm_id, paths_offset, path_base,
- fill_mode, mask, cover_mode, transform_type, transforms_shm_id,
- transforms_offset);
-
- CheckGLError();
-}
-
-void GLES2Implementation::StencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei num_paths,
- GLenum path_name_type,
- const GLvoid* paths,
- GLuint path_base,
- GLint ref,
- GLuint mask,
- GLenum cover_mode,
- GLenum transform_type,
- const GLfloat* transform_values) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix()
- << "] glStencilThenCoverStrokePathInstancedCHROMIUM("
- << num_paths << ", " << path_name_type << ", " << paths
- << ", " << path_base << ", " << cover_mode << ", " << ref
- << ", " << mask << ", " << transform_type << ", "
- << transform_values << ")");
-
- ScopedTransferBufferPtr buffer(helper_, transfer_buffer_);
- uint32_t paths_shm_id = 0;
- uint32_t paths_offset = 0;
- uint32_t transforms_shm_id = 0;
- uint32_t transforms_offset = 0;
- if (!PrepareInstancedPathCommand(
- "glStencilThenCoverStrokePathInstancedCHROMIUM", num_paths,
- path_name_type, paths, transform_type, transform_values, &buffer,
- &paths_shm_id, &paths_offset, &transforms_shm_id,
- &transforms_offset)) {
- return;
- }
-
- helper_->StencilThenCoverStrokePathInstancedCHROMIUM(
- num_paths, path_name_type, paths_shm_id, paths_offset, path_base, ref,
- mask, cover_mode, transform_type, transforms_shm_id, transforms_offset);
-
- CheckGLError();
-}
-
-void GLES2Implementation::BindFragmentInputLocationCHROMIUM(GLuint program,
- GLint location,
- const char* name) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix()
- << "] glBindFragmentInputLocationCHROMIUM(" << program
- << ", " << location << ", " << name << ")");
- SetBucketAsString(kResultBucketId, name);
- helper_->BindFragmentInputLocationCHROMIUMBucket(program, location,
- kResultBucketId);
- helper_->SetBucketSize(kResultBucketId, 0);
- CheckGLError();
-}
-
-void GLES2Implementation::ProgramPathFragmentInputGenCHROMIUM(
- GLuint program,
- GLint location,
- GLenum gen_mode,
- GLint components,
- const GLfloat* coeffs) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix()
- << "] glProgramPathFragmentInputGenCHROMIUM(" << program
- << ", " << gen_mode << ", " << components << ", " << coeffs
- << ")");
-
- uint32_t coeffs_per_component =
- GLES2Util::GetCoefficientCountForGLPathFragmentInputGenMode(gen_mode);
-
- if (components <= 0 || components > 4 || gen_mode == GL_NONE ||
- coeffs_per_component == 0 || location == -1) {
- helper_->ProgramPathFragmentInputGenCHROMIUM(program, location, gen_mode,
- components, 0, 0);
- } else {
- // The multiplication below will not overflow.
- DCHECK(coeffs_per_component > 0 && coeffs_per_component <= 4);
- DCHECK(components > 0 && components <= 4);
- uint32_t coeffs_size = sizeof(GLfloat) * coeffs_per_component * components;
-
- ScopedTransferBufferPtr buffer(coeffs_size, helper_, transfer_buffer_);
- if (!buffer.valid() || buffer.size() < coeffs_size) {
- SetGLError(GL_OUT_OF_MEMORY, "glProgramPathFragmentInputGenCHROMIUM",
- "no room in transfer buffer");
- return;
- }
-
- DCHECK_GT(coeffs_size, 0U);
- unsigned char* addr = static_cast<unsigned char*>(buffer.address());
- memcpy(addr, coeffs, coeffs_size);
-
- helper_->ProgramPathFragmentInputGenCHROMIUM(program, location, gen_mode,
- components, buffer.shm_id(),
- buffer.offset());
- }
- CheckGLError();
-}
-
void GLES2Implementation::InitializeDiscardableTextureCHROMIUM(
GLuint texture_id) {
ClientDiscardableTextureManager* manager =
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index ea262ee7c7c..e0db2688e6b 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -489,7 +489,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
void DeleteBuffersStub(GLsizei n, const GLuint* buffers);
void DeleteRenderbuffersStub(GLsizei n, const GLuint* renderbuffers);
void DeleteTexturesStub(GLsizei n, const GLuint* textures);
- void DeletePathsCHROMIUMStub(GLuint first_client_id, GLsizei range);
void DeleteProgramStub(GLsizei n, const GLuint* programs);
void DeleteShaderStub(GLsizei n, const GLuint* shaders);
void DeleteSamplersStub(GLsizei n, const GLuint* samplers);
@@ -671,18 +670,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
const std::string& GetLogPrefix() const;
- bool PrepareInstancedPathCommand(const char* function_name,
- GLsizei num_paths,
- GLenum path_name_type,
- const void* paths,
- GLenum transform_type,
- const GLfloat* transform_values,
- ScopedTransferBufferPtr* buffer,
- uint32_t* out_paths_shm_id,
- uint32_t* out_paths_offset,
- uint32_t* out_transforms_shm_id,
- uint32_t* out_transforms_offset);
-
// Set to 1 to have the client fail when a GL error is generated.
// This helps find bugs in the renderer since the debugger stops on the error.
#if DCHECK_IS_ON()
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
index 109e29e43c0..6fb5046d5de 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -994,7 +994,7 @@ void UnmapTexSubImage2DCHROMIUM(const void* mem) override;
void ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
- GLenum color_space,
+ GLcolorSpace color_space,
GLboolean alpha) override;
const GLchar* GetRequestableExtensionsCHROMIUM() override;
@@ -1198,118 +1198,6 @@ void ScheduleDCLayerCHROMIUM(GLuint texture_0,
void SetActiveURLCHROMIUM(const char* url) override;
-void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) override;
-
-void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) override;
-
-GLuint GenPathsCHROMIUM(GLsizei range) override;
-
-void DeletePathsCHROMIUM(GLuint path, GLsizei range) override;
-
-GLboolean IsPathCHROMIUM(GLuint path) override;
-
-void PathCommandsCHROMIUM(GLuint path,
- GLsizei numCommands,
- const GLubyte* commands,
- GLsizei numCoords,
- GLenum coordType,
- const GLvoid* coords) override;
-
-void PathParameterfCHROMIUM(GLuint path, GLenum pname, GLfloat value) override;
-
-void PathParameteriCHROMIUM(GLuint path, GLenum pname, GLint value) override;
-
-void PathStencilFuncCHROMIUM(GLenum func, GLint ref, GLuint mask) override;
-
-void StencilFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask) override;
-
-void StencilStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask) override;
-
-void CoverFillPathCHROMIUM(GLuint path, GLenum coverMode) override;
-
-void CoverStrokePathCHROMIUM(GLuint path, GLenum coverMode) override;
-
-void StencilThenCoverFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode) override;
-
-void StencilThenCoverStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode) override;
-
-void StencilFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) override;
-
-void StencilStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) override;
-
-void CoverFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-
-void CoverStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-
-void StencilThenCoverFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-
-void StencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-
-void BindFragmentInputLocationCHROMIUM(GLuint program,
- GLint location,
- const char* name) override;
-
-void ProgramPathFragmentInputGenCHROMIUM(GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- const GLfloat* coeffs) override;
-
void ContextVisibilityHintCHROMIUM(GLboolean visibility) override;
void CoverageModulationCHROMIUM(GLenum components) override;
@@ -1366,7 +1254,7 @@ void TexStorage2DImageCHROMIUM(GLenum target,
GLsizei height) override;
void SetColorSpaceMetadataCHROMIUM(GLuint texture_id,
- GLColorSpace color_space) override;
+ GLcolorSpace color_space) override;
void WindowRectanglesEXT(GLenum mode, GLsizei count, const GLint* box) override;
@@ -1399,4 +1287,8 @@ void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
+void BeginBatchReadAccessSharedImageCHROMIUM() override;
+
+void EndBatchReadAccessSharedImageCHROMIUM() override;
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
index ea02c67be96..69cd2bd09f6 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -3516,144 +3516,6 @@ void GLES2Implementation::ScheduleDCLayerCHROMIUM(GLuint texture_0,
CheckGLError();
}
-void GLES2Implementation::MatrixLoadfCHROMIUM(GLenum matrixMode,
- const GLfloat* m) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMatrixLoadfCHROMIUM("
- << GLES2Util::GetStringMatrixMode(matrixMode) << ", "
- << static_cast<const void*>(m) << ")");
- uint32_t count = 16;
- for (uint32_t ii = 0; ii < count; ++ii)
- GPU_CLIENT_LOG("value[" << ii << "]: " << m[ii]);
- helper_->MatrixLoadfCHROMIUMImmediate(matrixMode, m);
- CheckGLError();
-}
-
-void GLES2Implementation::MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMatrixLoadIdentityCHROMIUM("
- << GLES2Util::GetStringMatrixMode(matrixMode) << ")");
- helper_->MatrixLoadIdentityCHROMIUM(matrixMode);
- CheckGLError();
-}
-
-GLboolean GLES2Implementation::IsPathCHROMIUM(GLuint path) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- TRACE_EVENT0("gpu", "GLES2Implementation::IsPathCHROMIUM");
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsPathCHROMIUM(" << path << ")");
- typedef cmds::IsPathCHROMIUM::Result Result;
- ScopedResultPtr<Result> result = GetResultAs<Result>();
- if (!result) {
- return GL_FALSE;
- }
- *result = 0;
- helper_->IsPathCHROMIUM(path, GetResultShmId(), result.offset());
- WaitForCmd();
- GLboolean result_value = *result != 0;
- GPU_CLIENT_LOG("returned " << result_value);
- CheckGLError();
- return result_value;
-}
-
-void GLES2Implementation::PathParameterfCHROMIUM(GLuint path,
- GLenum pname,
- GLfloat value) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPathParameterfCHROMIUM(" << path
- << ", " << GLES2Util::GetStringPathParameter(pname) << ", "
- << value << ")");
- helper_->PathParameterfCHROMIUM(path, pname, value);
- CheckGLError();
-}
-
-void GLES2Implementation::PathParameteriCHROMIUM(GLuint path,
- GLenum pname,
- GLint value) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPathParameteriCHROMIUM(" << path
- << ", " << GLES2Util::GetStringPathParameter(pname) << ", "
- << value << ")");
- helper_->PathParameteriCHROMIUM(path, pname, value);
- CheckGLError();
-}
-
-void GLES2Implementation::PathStencilFuncCHROMIUM(GLenum func,
- GLint ref,
- GLuint mask) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPathStencilFuncCHROMIUM("
- << GLES2Util::GetStringCmpFunction(func) << ", " << ref
- << ", " << mask << ")");
- helper_->PathStencilFuncCHROMIUM(func, ref, mask);
- CheckGLError();
-}
-
-void GLES2Implementation::StencilFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilFillPathCHROMIUM(" << path
- << ", " << GLES2Util::GetStringPathFillMode(fillMode)
- << ", " << mask << ")");
- helper_->StencilFillPathCHROMIUM(path, fillMode, mask);
- CheckGLError();
-}
-
-void GLES2Implementation::StencilStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilStrokePathCHROMIUM("
- << path << ", " << reference << ", " << mask << ")");
- helper_->StencilStrokePathCHROMIUM(path, reference, mask);
- CheckGLError();
-}
-
-void GLES2Implementation::CoverFillPathCHROMIUM(GLuint path, GLenum coverMode) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCoverFillPathCHROMIUM(" << path
- << ", " << GLES2Util::GetStringPathCoverMode(coverMode)
- << ")");
- helper_->CoverFillPathCHROMIUM(path, coverMode);
- CheckGLError();
-}
-
-void GLES2Implementation::CoverStrokePathCHROMIUM(GLuint path,
- GLenum coverMode) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCoverStrokePathCHROMIUM(" << path
- << ", " << GLES2Util::GetStringPathCoverMode(coverMode)
- << ")");
- helper_->CoverStrokePathCHROMIUM(path, coverMode);
- CheckGLError();
-}
-
-void GLES2Implementation::StencilThenCoverFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG(
- "[" << GetLogPrefix() << "] glStencilThenCoverFillPathCHROMIUM(" << path
- << ", " << GLES2Util::GetStringPathFillMode(fillMode) << ", " << mask
- << ", " << GLES2Util::GetStringPathCoverMode(coverMode) << ")");
- helper_->StencilThenCoverFillPathCHROMIUM(path, fillMode, mask, coverMode);
- CheckGLError();
-}
-
-void GLES2Implementation::StencilThenCoverStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix()
- << "] glStencilThenCoverStrokePathCHROMIUM(" << path
- << ", " << reference << ", " << mask << ", "
- << GLES2Util::GetStringPathCoverMode(coverMode) << ")");
- helper_->StencilThenCoverStrokePathCHROMIUM(path, reference, mask, coverMode);
- CheckGLError();
-}
-
void GLES2Implementation::ContextVisibilityHintCHROMIUM(GLboolean visibility) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glContextVisibilityHintCHROMIUM("
@@ -3851,4 +3713,22 @@ void GLES2Implementation::EndSharedImageAccessDirectCHROMIUM(GLuint texture) {
CheckGLError();
}
+void GLES2Implementation::BeginBatchReadAccessSharedImageCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glBeginBatchReadAccessSharedImageCHROMIUM("
+ << ")");
+ helper_->BeginBatchReadAccessSharedImageCHROMIUM();
+ CheckGLError();
+}
+
+void GLES2Implementation::EndBatchReadAccessSharedImageCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glEndBatchReadAccessSharedImageCHROMIUM("
+ << ")");
+ helper_->EndBatchReadAccessSharedImageCHROMIUM();
+ CheckGLError();
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
index b83aaedf4cf..5205f3e98f7 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -2786,17 +2786,6 @@ TEST_F(GLES2ImplementationTest, FlushMappedBufferRange) {
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
-TEST_F(GLES2ImplementationTest, ResizeCHROMIUM) {
- struct Cmds {
- cmds::ResizeCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, 2, 3, 4, true);
-
- gl_->ResizeCHROMIUM(1, 2, 3, 4, true);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
TEST_F(GLES2ImplementationTest, DescheduleUntilFinishedCHROMIUM) {
struct Cmds {
cmds::DescheduleUntilFinishedCHROMIUM cmd;
@@ -3001,163 +2990,6 @@ TEST_F(GLES2ImplementationTest, ScheduleDCLayerCHROMIUM) {
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
-TEST_F(GLES2ImplementationTest, MatrixLoadfCHROMIUM) {
- GLfloat data[16] = {0};
- struct Cmds {
- cmds::MatrixLoadfCHROMIUMImmediate cmd;
- GLfloat data[16];
- };
-
- for (int jj = 0; jj < 16; ++jj) {
- data[jj] = static_cast<GLfloat>(jj);
- }
- Cmds expected;
- expected.cmd.Init(GL_PATH_PROJECTION_CHROMIUM, &data[0]);
- gl_->MatrixLoadfCHROMIUM(GL_PATH_PROJECTION_CHROMIUM, &data[0]);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, MatrixLoadIdentityCHROMIUM) {
- struct Cmds {
- cmds::MatrixLoadIdentityCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(GL_PATH_PROJECTION_CHROMIUM);
-
- gl_->MatrixLoadIdentityCHROMIUM(GL_PATH_PROJECTION_CHROMIUM);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, DeletePathsCHROMIUM) {
- struct Cmds {
- cmds::DeletePathsCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, 2);
-
- gl_->DeletePathsCHROMIUM(1, 2);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, IsPathCHROMIUM) {
- struct Cmds {
- cmds::IsPathCHROMIUM cmd;
- };
-
- Cmds expected;
- ExpectedMemoryInfo result1 =
- GetExpectedResultMemory(sizeof(cmds::IsPathCHROMIUM::Result));
- expected.cmd.Init(1, result1.id, result1.offset);
-
- EXPECT_CALL(*command_buffer(), OnFlush())
- .WillOnce(SetMemory(result1.ptr, uint32_t(GL_TRUE)))
- .RetiresOnSaturation();
-
- GLboolean result = gl_->IsPathCHROMIUM(1);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
- EXPECT_TRUE(result);
-}
-
-TEST_F(GLES2ImplementationTest, PathParameterfCHROMIUM) {
- struct Cmds {
- cmds::PathParameterfCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, GL_PATH_STROKE_WIDTH_CHROMIUM, 3);
-
- gl_->PathParameterfCHROMIUM(1, GL_PATH_STROKE_WIDTH_CHROMIUM, 3);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, PathParameteriCHROMIUM) {
- struct Cmds {
- cmds::PathParameteriCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, GL_PATH_STROKE_WIDTH_CHROMIUM, 3);
-
- gl_->PathParameteriCHROMIUM(1, GL_PATH_STROKE_WIDTH_CHROMIUM, 3);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, PathStencilFuncCHROMIUM) {
- struct Cmds {
- cmds::PathStencilFuncCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(GL_NEVER, 2, 3);
-
- gl_->PathStencilFuncCHROMIUM(GL_NEVER, 2, 3);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, StencilFillPathCHROMIUM) {
- struct Cmds {
- cmds::StencilFillPathCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, GL_INVERT, 3);
-
- gl_->StencilFillPathCHROMIUM(1, GL_INVERT, 3);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, StencilStrokePathCHROMIUM) {
- struct Cmds {
- cmds::StencilStrokePathCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, 2, 3);
-
- gl_->StencilStrokePathCHROMIUM(1, 2, 3);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, CoverFillPathCHROMIUM) {
- struct Cmds {
- cmds::CoverFillPathCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, GL_CONVEX_HULL_CHROMIUM);
-
- gl_->CoverFillPathCHROMIUM(1, GL_CONVEX_HULL_CHROMIUM);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, CoverStrokePathCHROMIUM) {
- struct Cmds {
- cmds::CoverStrokePathCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, GL_CONVEX_HULL_CHROMIUM);
-
- gl_->CoverStrokePathCHROMIUM(1, GL_CONVEX_HULL_CHROMIUM);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, StencilThenCoverFillPathCHROMIUM) {
- struct Cmds {
- cmds::StencilThenCoverFillPathCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, GL_INVERT, 3, GL_CONVEX_HULL_CHROMIUM);
-
- gl_->StencilThenCoverFillPathCHROMIUM(1, GL_INVERT, 3,
- GL_CONVEX_HULL_CHROMIUM);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(GLES2ImplementationTest, StencilThenCoverStrokePathCHROMIUM) {
- struct Cmds {
- cmds::StencilThenCoverStrokePathCHROMIUM cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, 2, 3, GL_CONVEX_HULL_CHROMIUM);
-
- gl_->StencilThenCoverStrokePathCHROMIUM(1, 2, 3, GL_CONVEX_HULL_CHROMIUM);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
TEST_F(GLES2ImplementationTest, CoverageModulationCHROMIUM) {
struct Cmds {
cmds::CoverageModulationCHROMIUM cmd;
@@ -3270,4 +3102,26 @@ TEST_F(GLES2ImplementationTest, EndSharedImageAccessDirectCHROMIUM) {
gl_->EndSharedImageAccessDirectCHROMIUM(1);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
+
+TEST_F(GLES2ImplementationTest, BeginBatchReadAccessSharedImageCHROMIUM) {
+ struct Cmds {
+ cmds::BeginBatchReadAccessSharedImageCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->BeginBatchReadAccessSharedImageCHROMIUM();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, EndBatchReadAccessSharedImageCHROMIUM) {
+ struct Cmds {
+ cmds::EndBatchReadAccessSharedImageCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->EndBatchReadAccessSharedImageCHROMIUM();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface.h b/chromium/gpu/command_buffer/client/gles2_interface.h
index ec6728c279d..f6f925dd907 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface.h
@@ -27,8 +27,8 @@ enum class GpuPreference;
}
extern "C" typedef struct _ClientBuffer* ClientBuffer;
-extern "C" typedef struct _GLColorSpace* GLColorSpace;
extern "C" typedef struct _ClientGpuFence* ClientGpuFence;
+extern "C" typedef const struct _GLcolorSpace* GLcolorSpace;
namespace gpu {
namespace gles2 {
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
index 7d055f33c22..11954f6a4c4 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -737,7 +737,7 @@ virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) = 0;
virtual void ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
- GLenum color_space,
+ GLcolorSpace color_space,
GLboolean alpha) = 0;
virtual const GLchar* GetRequestableExtensionsCHROMIUM() = 0;
virtual void RequestExtensionCHROMIUM(const char* extension) = 0;
@@ -901,99 +901,6 @@ virtual void ScheduleDCLayerCHROMIUM(GLuint texture_0,
GLint clip_height,
GLuint protected_video_type) = 0;
virtual void SetActiveURLCHROMIUM(const char* url) = 0;
-virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) = 0;
-virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) = 0;
-virtual GLuint GenPathsCHROMIUM(GLsizei range) = 0;
-virtual void DeletePathsCHROMIUM(GLuint path, GLsizei range) = 0;
-virtual GLboolean IsPathCHROMIUM(GLuint path) = 0;
-virtual void PathCommandsCHROMIUM(GLuint path,
- GLsizei numCommands,
- const GLubyte* commands,
- GLsizei numCoords,
- GLenum coordType,
- const GLvoid* coords) = 0;
-virtual void PathParameterfCHROMIUM(GLuint path,
- GLenum pname,
- GLfloat value) = 0;
-virtual void PathParameteriCHROMIUM(GLuint path, GLenum pname, GLint value) = 0;
-virtual void PathStencilFuncCHROMIUM(GLenum func, GLint ref, GLuint mask) = 0;
-virtual void StencilFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask) = 0;
-virtual void StencilStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask) = 0;
-virtual void CoverFillPathCHROMIUM(GLuint path, GLenum coverMode) = 0;
-virtual void CoverStrokePathCHROMIUM(GLuint path, GLenum coverMode) = 0;
-virtual void StencilThenCoverFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode) = 0;
-virtual void StencilThenCoverStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode) = 0;
-virtual void StencilFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) = 0;
-virtual void StencilStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) = 0;
-virtual void CoverFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) = 0;
-virtual void CoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) = 0;
-virtual void StencilThenCoverFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) = 0;
-virtual void StencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) = 0;
-virtual void BindFragmentInputLocationCHROMIUM(GLuint program,
- GLint location,
- const char* name) = 0;
-virtual void ProgramPathFragmentInputGenCHROMIUM(GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- const GLfloat* coeffs) = 0;
virtual void ContextVisibilityHintCHROMIUM(GLboolean visibility) = 0;
virtual void CoverageModulationCHROMIUM(GLenum components) = 0;
virtual GLenum GetGraphicsResetStatusKHR() = 0;
@@ -1034,7 +941,7 @@ virtual void TexStorage2DImageCHROMIUM(GLenum target,
GLsizei width,
GLsizei height) = 0;
virtual void SetColorSpaceMetadataCHROMIUM(GLuint texture_id,
- GLColorSpace color_space) = 0;
+ GLcolorSpace color_space) = 0;
virtual void WindowRectanglesEXT(GLenum mode,
GLsizei count,
const GLint* box) = 0;
@@ -1058,4 +965,6 @@ virtual GLuint CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
virtual void BeginSharedImageAccessDirectCHROMIUM(GLuint texture,
GLenum mode) = 0;
virtual void EndSharedImageAccessDirectCHROMIUM(GLuint texture) = 0;
+virtual void BeginBatchReadAccessSharedImageCHROMIUM() = 0;
+virtual void EndBatchReadAccessSharedImageCHROMIUM() = 0;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index 637c3afdb81..af3f5723eba 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -713,7 +713,7 @@ void UnmapTexSubImage2DCHROMIUM(const void* mem) override;
void ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
- GLenum color_space,
+ GLcolorSpace color_space,
GLboolean alpha) override;
const GLchar* GetRequestableExtensionsCHROMIUM() override;
void RequestExtensionCHROMIUM(const char* extension) override;
@@ -875,95 +875,6 @@ void ScheduleDCLayerCHROMIUM(GLuint texture_0,
GLint clip_height,
GLuint protected_video_type) override;
void SetActiveURLCHROMIUM(const char* url) override;
-void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) override;
-void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) override;
-GLuint GenPathsCHROMIUM(GLsizei range) override;
-void DeletePathsCHROMIUM(GLuint path, GLsizei range) override;
-GLboolean IsPathCHROMIUM(GLuint path) override;
-void PathCommandsCHROMIUM(GLuint path,
- GLsizei numCommands,
- const GLubyte* commands,
- GLsizei numCoords,
- GLenum coordType,
- const GLvoid* coords) override;
-void PathParameterfCHROMIUM(GLuint path, GLenum pname, GLfloat value) override;
-void PathParameteriCHROMIUM(GLuint path, GLenum pname, GLint value) override;
-void PathStencilFuncCHROMIUM(GLenum func, GLint ref, GLuint mask) override;
-void StencilFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask) override;
-void StencilStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask) override;
-void CoverFillPathCHROMIUM(GLuint path, GLenum coverMode) override;
-void CoverStrokePathCHROMIUM(GLuint path, GLenum coverMode) override;
-void StencilThenCoverFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode) override;
-void StencilThenCoverStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode) override;
-void StencilFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void StencilStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void CoverFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void CoverStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void StencilThenCoverFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void StencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void BindFragmentInputLocationCHROMIUM(GLuint program,
- GLint location,
- const char* name) override;
-void ProgramPathFragmentInputGenCHROMIUM(GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- const GLfloat* coeffs) override;
void ContextVisibilityHintCHROMIUM(GLboolean visibility) override;
void CoverageModulationCHROMIUM(GLenum components) override;
GLenum GetGraphicsResetStatusKHR() override;
@@ -1004,7 +915,7 @@ void TexStorage2DImageCHROMIUM(GLenum target,
GLsizei width,
GLsizei height) override;
void SetColorSpaceMetadataCHROMIUM(GLuint texture_id,
- GLColorSpace color_space) override;
+ GLcolorSpace color_space) override;
void WindowRectanglesEXT(GLenum mode, GLsizei count, const GLint* box) override;
GLuint CreateGpuFenceCHROMIUM() override;
GLuint CreateClientGpuFenceCHROMIUM(ClientGpuFence source) override;
@@ -1024,4 +935,6 @@ GLuint CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
GLenum internalformat) override;
void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
+void BeginBatchReadAccessSharedImageCHROMIUM() override;
+void EndBatchReadAccessSharedImageCHROMIUM() override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index 20857b8c343..1ba3ccf0850 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -965,7 +965,7 @@ void GLES2InterfaceStub::UnmapTexSubImage2DCHROMIUM(const void* /* mem */) {}
void GLES2InterfaceStub::ResizeCHROMIUM(GLuint /* width */,
GLuint /* height */,
GLfloat /* scale_factor */,
- GLenum /* color_space */,
+ GLcolorSpace /* color_space */,
GLboolean /* alpha */) {}
const GLchar* GLES2InterfaceStub::GetRequestableExtensionsCHROMIUM() {
return 0;
@@ -1156,116 +1156,6 @@ void GLES2InterfaceStub::ScheduleDCLayerCHROMIUM(
GLint /* clip_height */,
GLuint /* protected_video_type */) {}
void GLES2InterfaceStub::SetActiveURLCHROMIUM(const char* /* url */) {}
-void GLES2InterfaceStub::MatrixLoadfCHROMIUM(GLenum /* matrixMode */,
- const GLfloat* /* m */) {}
-void GLES2InterfaceStub::MatrixLoadIdentityCHROMIUM(GLenum /* matrixMode */) {}
-GLuint GLES2InterfaceStub::GenPathsCHROMIUM(GLsizei /* range */) {
- return 0;
-}
-void GLES2InterfaceStub::DeletePathsCHROMIUM(GLuint /* path */,
- GLsizei /* range */) {}
-GLboolean GLES2InterfaceStub::IsPathCHROMIUM(GLuint /* path */) {
- return 0;
-}
-void GLES2InterfaceStub::PathCommandsCHROMIUM(GLuint /* path */,
- GLsizei /* numCommands */,
- const GLubyte* /* commands */,
- GLsizei /* numCoords */,
- GLenum /* coordType */,
- const GLvoid* /* coords */) {}
-void GLES2InterfaceStub::PathParameterfCHROMIUM(GLuint /* path */,
- GLenum /* pname */,
- GLfloat /* value */) {}
-void GLES2InterfaceStub::PathParameteriCHROMIUM(GLuint /* path */,
- GLenum /* pname */,
- GLint /* value */) {}
-void GLES2InterfaceStub::PathStencilFuncCHROMIUM(GLenum /* func */,
- GLint /* ref */,
- GLuint /* mask */) {}
-void GLES2InterfaceStub::StencilFillPathCHROMIUM(GLuint /* path */,
- GLenum /* fillMode */,
- GLuint /* mask */) {}
-void GLES2InterfaceStub::StencilStrokePathCHROMIUM(GLuint /* path */,
- GLint /* reference */,
- GLuint /* mask */) {}
-void GLES2InterfaceStub::CoverFillPathCHROMIUM(GLuint /* path */,
- GLenum /* coverMode */) {}
-void GLES2InterfaceStub::CoverStrokePathCHROMIUM(GLuint /* path */,
- GLenum /* coverMode */) {}
-void GLES2InterfaceStub::StencilThenCoverFillPathCHROMIUM(
- GLuint /* path */,
- GLenum /* fillMode */,
- GLuint /* mask */,
- GLenum /* coverMode */) {}
-void GLES2InterfaceStub::StencilThenCoverStrokePathCHROMIUM(
- GLuint /* path */,
- GLint /* reference */,
- GLuint /* mask */,
- GLenum /* coverMode */) {}
-void GLES2InterfaceStub::StencilFillPathInstancedCHROMIUM(
- GLsizei /* numPaths */,
- GLenum /* pathNameType */,
- const GLvoid* /* paths */,
- GLuint /* pathBase */,
- GLenum /* fillMode */,
- GLuint /* mask */,
- GLenum /* transformType */,
- const GLfloat* /* transformValues */) {}
-void GLES2InterfaceStub::StencilStrokePathInstancedCHROMIUM(
- GLsizei /* numPaths */,
- GLenum /* pathNameType */,
- const GLvoid* /* paths */,
- GLuint /* pathBase */,
- GLint /* reference */,
- GLuint /* mask */,
- GLenum /* transformType */,
- const GLfloat* /* transformValues */) {}
-void GLES2InterfaceStub::CoverFillPathInstancedCHROMIUM(
- GLsizei /* numPaths */,
- GLenum /* pathNameType */,
- const GLvoid* /* paths */,
- GLuint /* pathBase */,
- GLenum /* coverMode */,
- GLenum /* transformType */,
- const GLfloat* /* transformValues */) {}
-void GLES2InterfaceStub::CoverStrokePathInstancedCHROMIUM(
- GLsizei /* numPaths */,
- GLenum /* pathNameType */,
- const GLvoid* /* paths */,
- GLuint /* pathBase */,
- GLenum /* coverMode */,
- GLenum /* transformType */,
- const GLfloat* /* transformValues */) {}
-void GLES2InterfaceStub::StencilThenCoverFillPathInstancedCHROMIUM(
- GLsizei /* numPaths */,
- GLenum /* pathNameType */,
- const GLvoid* /* paths */,
- GLuint /* pathBase */,
- GLenum /* fillMode */,
- GLuint /* mask */,
- GLenum /* coverMode */,
- GLenum /* transformType */,
- const GLfloat* /* transformValues */) {}
-void GLES2InterfaceStub::StencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei /* numPaths */,
- GLenum /* pathNameType */,
- const GLvoid* /* paths */,
- GLuint /* pathBase */,
- GLint /* reference */,
- GLuint /* mask */,
- GLenum /* coverMode */,
- GLenum /* transformType */,
- const GLfloat* /* transformValues */) {}
-void GLES2InterfaceStub::BindFragmentInputLocationCHROMIUM(
- GLuint /* program */,
- GLint /* location */,
- const char* /* name */) {}
-void GLES2InterfaceStub::ProgramPathFragmentInputGenCHROMIUM(
- GLuint /* program */,
- GLint /* location */,
- GLenum /* genMode */,
- GLint /* components */,
- const GLfloat* /* coeffs */) {}
void GLES2InterfaceStub::ContextVisibilityHintCHROMIUM(
GLboolean /* visibility */) {}
void GLES2InterfaceStub::CoverageModulationCHROMIUM(GLenum /* components */) {}
@@ -1321,7 +1211,7 @@ void GLES2InterfaceStub::TexStorage2DImageCHROMIUM(GLenum /* target */,
GLsizei /* height */) {}
void GLES2InterfaceStub::SetColorSpaceMetadataCHROMIUM(
GLuint /* texture_id */,
- GLColorSpace /* color_space */) {}
+ GLcolorSpace /* color_space */) {}
void GLES2InterfaceStub::WindowRectanglesEXT(GLenum /* mode */,
GLsizei /* count */,
const GLint* /* box */) {}
@@ -1359,4 +1249,6 @@ void GLES2InterfaceStub::BeginSharedImageAccessDirectCHROMIUM(
GLenum /* mode */) {}
void GLES2InterfaceStub::EndSharedImageAccessDirectCHROMIUM(
GLuint /* texture */) {}
+void GLES2InterfaceStub::BeginBatchReadAccessSharedImageCHROMIUM() {}
+void GLES2InterfaceStub::EndBatchReadAccessSharedImageCHROMIUM() {}
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index 75637ef77b0..5091689bd0e 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -713,7 +713,7 @@ void UnmapTexSubImage2DCHROMIUM(const void* mem) override;
void ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
- GLenum color_space,
+ GLcolorSpace color_space,
GLboolean alpha) override;
const GLchar* GetRequestableExtensionsCHROMIUM() override;
void RequestExtensionCHROMIUM(const char* extension) override;
@@ -875,95 +875,6 @@ void ScheduleDCLayerCHROMIUM(GLuint texture_0,
GLint clip_height,
GLuint protected_video_type) override;
void SetActiveURLCHROMIUM(const char* url) override;
-void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) override;
-void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) override;
-GLuint GenPathsCHROMIUM(GLsizei range) override;
-void DeletePathsCHROMIUM(GLuint path, GLsizei range) override;
-GLboolean IsPathCHROMIUM(GLuint path) override;
-void PathCommandsCHROMIUM(GLuint path,
- GLsizei numCommands,
- const GLubyte* commands,
- GLsizei numCoords,
- GLenum coordType,
- const GLvoid* coords) override;
-void PathParameterfCHROMIUM(GLuint path, GLenum pname, GLfloat value) override;
-void PathParameteriCHROMIUM(GLuint path, GLenum pname, GLint value) override;
-void PathStencilFuncCHROMIUM(GLenum func, GLint ref, GLuint mask) override;
-void StencilFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask) override;
-void StencilStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask) override;
-void CoverFillPathCHROMIUM(GLuint path, GLenum coverMode) override;
-void CoverStrokePathCHROMIUM(GLuint path, GLenum coverMode) override;
-void StencilThenCoverFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode) override;
-void StencilThenCoverStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode) override;
-void StencilFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void StencilStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void CoverFillPathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void CoverStrokePathInstancedCHROMIUM(GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void StencilThenCoverFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void StencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) override;
-void BindFragmentInputLocationCHROMIUM(GLuint program,
- GLint location,
- const char* name) override;
-void ProgramPathFragmentInputGenCHROMIUM(GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- const GLfloat* coeffs) override;
void ContextVisibilityHintCHROMIUM(GLboolean visibility) override;
void CoverageModulationCHROMIUM(GLenum components) override;
GLenum GetGraphicsResetStatusKHR() override;
@@ -1004,7 +915,7 @@ void TexStorage2DImageCHROMIUM(GLenum target,
GLsizei width,
GLsizei height) override;
void SetColorSpaceMetadataCHROMIUM(GLuint texture_id,
- GLColorSpace color_space) override;
+ GLcolorSpace color_space) override;
void WindowRectanglesEXT(GLenum mode, GLsizei count, const GLint* box) override;
GLuint CreateGpuFenceCHROMIUM() override;
GLuint CreateClientGpuFenceCHROMIUM(ClientGpuFence source) override;
@@ -1024,4 +935,6 @@ GLuint CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
GLenum internalformat) override;
void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
+void BeginBatchReadAccessSharedImageCHROMIUM() override;
+void EndBatchReadAccessSharedImageCHROMIUM() override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index 7f13e0254e4..ce0e76e739b 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -2038,7 +2038,7 @@ void GLES2TraceImplementation::UnmapTexSubImage2DCHROMIUM(const void* mem) {
void GLES2TraceImplementation::ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
- GLenum color_space,
+ GLcolorSpace color_space,
GLboolean alpha) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ResizeCHROMIUM");
gl_->ResizeCHROMIUM(width, height, scale_factor, color_space, alpha);
@@ -2421,228 +2421,6 @@ void GLES2TraceImplementation::SetActiveURLCHROMIUM(const char* url) {
gl_->SetActiveURLCHROMIUM(url);
}
-void GLES2TraceImplementation::MatrixLoadfCHROMIUM(GLenum matrixMode,
- const GLfloat* m) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MatrixLoadfCHROMIUM");
- gl_->MatrixLoadfCHROMIUM(matrixMode, m);
-}
-
-void GLES2TraceImplementation::MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu",
- "GLES2Trace::MatrixLoadIdentityCHROMIUM");
- gl_->MatrixLoadIdentityCHROMIUM(matrixMode);
-}
-
-GLuint GLES2TraceImplementation::GenPathsCHROMIUM(GLsizei range) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenPathsCHROMIUM");
- return gl_->GenPathsCHROMIUM(range);
-}
-
-void GLES2TraceImplementation::DeletePathsCHROMIUM(GLuint path, GLsizei range) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeletePathsCHROMIUM");
- gl_->DeletePathsCHROMIUM(path, range);
-}
-
-GLboolean GLES2TraceImplementation::IsPathCHROMIUM(GLuint path) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsPathCHROMIUM");
- return gl_->IsPathCHROMIUM(path);
-}
-
-void GLES2TraceImplementation::PathCommandsCHROMIUM(GLuint path,
- GLsizei numCommands,
- const GLubyte* commands,
- GLsizei numCoords,
- GLenum coordType,
- const GLvoid* coords) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PathCommandsCHROMIUM");
- gl_->PathCommandsCHROMIUM(path, numCommands, commands, numCoords, coordType,
- coords);
-}
-
-void GLES2TraceImplementation::PathParameterfCHROMIUM(GLuint path,
- GLenum pname,
- GLfloat value) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PathParameterfCHROMIUM");
- gl_->PathParameterfCHROMIUM(path, pname, value);
-}
-
-void GLES2TraceImplementation::PathParameteriCHROMIUM(GLuint path,
- GLenum pname,
- GLint value) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PathParameteriCHROMIUM");
- gl_->PathParameteriCHROMIUM(path, pname, value);
-}
-
-void GLES2TraceImplementation::PathStencilFuncCHROMIUM(GLenum func,
- GLint ref,
- GLuint mask) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PathStencilFuncCHROMIUM");
- gl_->PathStencilFuncCHROMIUM(func, ref, mask);
-}
-
-void GLES2TraceImplementation::StencilFillPathCHROMIUM(GLuint path,
- GLenum fillMode,
- GLuint mask) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilFillPathCHROMIUM");
- gl_->StencilFillPathCHROMIUM(path, fillMode, mask);
-}
-
-void GLES2TraceImplementation::StencilStrokePathCHROMIUM(GLuint path,
- GLint reference,
- GLuint mask) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilStrokePathCHROMIUM");
- gl_->StencilStrokePathCHROMIUM(path, reference, mask);
-}
-
-void GLES2TraceImplementation::CoverFillPathCHROMIUM(GLuint path,
- GLenum coverMode) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CoverFillPathCHROMIUM");
- gl_->CoverFillPathCHROMIUM(path, coverMode);
-}
-
-void GLES2TraceImplementation::CoverStrokePathCHROMIUM(GLuint path,
- GLenum coverMode) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CoverStrokePathCHROMIUM");
- gl_->CoverStrokePathCHROMIUM(path, coverMode);
-}
-
-void GLES2TraceImplementation::StencilThenCoverFillPathCHROMIUM(
- GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu",
- "GLES2Trace::StencilThenCoverFillPathCHROMIUM");
- gl_->StencilThenCoverFillPathCHROMIUM(path, fillMode, mask, coverMode);
-}
-
-void GLES2TraceImplementation::StencilThenCoverStrokePathCHROMIUM(
- GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode) {
- TRACE_EVENT_BINARY_EFFICIENT0(
- "gpu", "GLES2Trace::StencilThenCoverStrokePathCHROMIUM");
- gl_->StencilThenCoverStrokePathCHROMIUM(path, reference, mask, coverMode);
-}
-
-void GLES2TraceImplementation::StencilFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu",
- "GLES2Trace::StencilFillPathInstancedCHROMIUM");
- gl_->StencilFillPathInstancedCHROMIUM(numPaths, pathNameType, paths, pathBase,
- fillMode, mask, transformType,
- transformValues);
-}
-
-void GLES2TraceImplementation::StencilStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues) {
- TRACE_EVENT_BINARY_EFFICIENT0(
- "gpu", "GLES2Trace::StencilStrokePathInstancedCHROMIUM");
- gl_->StencilStrokePathInstancedCHROMIUM(numPaths, pathNameType, paths,
- pathBase, reference, mask,
- transformType, transformValues);
-}
-
-void GLES2TraceImplementation::CoverFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu",
- "GLES2Trace::CoverFillPathInstancedCHROMIUM");
- gl_->CoverFillPathInstancedCHROMIUM(numPaths, pathNameType, paths, pathBase,
- coverMode, transformType,
- transformValues);
-}
-
-void GLES2TraceImplementation::CoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) {
- TRACE_EVENT_BINARY_EFFICIENT0("gpu",
- "GLES2Trace::CoverStrokePathInstancedCHROMIUM");
- gl_->CoverStrokePathInstancedCHROMIUM(numPaths, pathNameType, paths, pathBase,
- coverMode, transformType,
- transformValues);
-}
-
-void GLES2TraceImplementation::StencilThenCoverFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) {
- TRACE_EVENT_BINARY_EFFICIENT0(
- "gpu", "GLES2Trace::StencilThenCoverFillPathInstancedCHROMIUM");
- gl_->StencilThenCoverFillPathInstancedCHROMIUM(
- numPaths, pathNameType, paths, pathBase, fillMode, mask, coverMode,
- transformType, transformValues);
-}
-
-void GLES2TraceImplementation::StencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues) {
- TRACE_EVENT_BINARY_EFFICIENT0(
- "gpu", "GLES2Trace::StencilThenCoverStrokePathInstancedCHROMIUM");
- gl_->StencilThenCoverStrokePathInstancedCHROMIUM(
- numPaths, pathNameType, paths, pathBase, reference, mask, coverMode,
- transformType, transformValues);
-}
-
-void GLES2TraceImplementation::BindFragmentInputLocationCHROMIUM(
- GLuint program,
- GLint location,
- const char* name) {
- TRACE_EVENT_BINARY_EFFICIENT0(
- "gpu", "GLES2Trace::BindFragmentInputLocationCHROMIUM");
- gl_->BindFragmentInputLocationCHROMIUM(program, location, name);
-}
-
-void GLES2TraceImplementation::ProgramPathFragmentInputGenCHROMIUM(
- GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- const GLfloat* coeffs) {
- TRACE_EVENT_BINARY_EFFICIENT0(
- "gpu", "GLES2Trace::ProgramPathFragmentInputGenCHROMIUM");
- gl_->ProgramPathFragmentInputGenCHROMIUM(program, location, genMode,
- components, coeffs);
-}
-
void GLES2TraceImplementation::ContextVisibilityHintCHROMIUM(
GLboolean visibility) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu",
@@ -2767,7 +2545,7 @@ void GLES2TraceImplementation::TexStorage2DImageCHROMIUM(GLenum target,
void GLES2TraceImplementation::SetColorSpaceMetadataCHROMIUM(
GLuint texture_id,
- GLColorSpace color_space) {
+ GLcolorSpace color_space) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu",
"GLES2Trace::SetColorSpaceMetadataCHROMIUM");
gl_->SetColorSpaceMetadataCHROMIUM(texture_id, color_space);
@@ -2861,4 +2639,16 @@ void GLES2TraceImplementation::EndSharedImageAccessDirectCHROMIUM(
gl_->EndSharedImageAccessDirectCHROMIUM(texture);
}
+void GLES2TraceImplementation::BeginBatchReadAccessSharedImageCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::BeginBatchReadAccessSharedImageCHROMIUM");
+ gl_->BeginBatchReadAccessSharedImageCHROMIUM();
+}
+
+void GLES2TraceImplementation::EndBatchReadAccessSharedImageCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::EndBatchReadAccessSharedImageCHROMIUM");
+ gl_->EndBatchReadAccessSharedImageCHROMIUM();
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
index efc7328ed89..42ee264931e 100644
--- a/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
@@ -190,6 +190,8 @@ void CopySubTextureINTERNALImmediate(GLint xoffset,
GLint y,
GLsizei width,
GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
const GLbyte* mailboxes) {
const uint32_t size =
raster::cmds::CopySubTextureINTERNALImmediate::ComputeSize();
@@ -197,7 +199,8 @@ void CopySubTextureINTERNALImmediate(GLint xoffset,
GetImmediateCmdSpaceTotalSize<
raster::cmds::CopySubTextureINTERNALImmediate>(size);
if (c) {
- c->Init(xoffset, yoffset, x, y, width, height, mailboxes);
+ c->Init(xoffset, yoffset, x, y, width, height, unpack_flip_y,
+ unpack_premultiply_alpha, mailboxes);
}
}
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc
index 87e03d9e090..5279574c7d0 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation.cc
@@ -1045,7 +1045,9 @@ void RasterImplementation::CopySubTexture(const gpu::Mailbox& source_mailbox,
GLint x,
GLint y,
GLsizei width,
- GLsizei height) {
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCopySubTexture("
<< source_mailbox.ToDebugString() << ", "
@@ -1065,10 +1067,20 @@ void RasterImplementation::CopySubTexture(const gpu::Mailbox& source_mailbox,
memcpy(mailboxes + sizeof(source_mailbox.name), dest_mailbox.name,
sizeof(dest_mailbox.name));
helper_->CopySubTextureINTERNALImmediate(xoffset, yoffset, x, y, width,
- height, mailboxes);
+ height, unpack_flip_y,
+ unpack_premultiply_alpha, mailboxes);
CheckGLError();
}
+void RasterImplementation::WritePixels(const gpu::Mailbox& dest_mailbox,
+ int dst_x_offset,
+ int dst_y_offset,
+ GLenum texture_target,
+ const SkImageInfo& src_info,
+ const void* src_pixels) {
+ NOTREACHED();
+}
+
void RasterImplementation::BeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
@@ -1142,8 +1154,7 @@ void RasterImplementation::RasterCHROMIUM(const cc::DisplayItemList* list,
GetOrCreatePaintCache(), font_manager_.strike_server(),
raster_properties_->color_space, raster_properties_->can_use_lcd_text,
capabilities().context_supports_distance_field_text,
- capabilities().max_texture_size,
- capabilities().glyph_cache_max_texture_bytes);
+ capabilities().max_texture_size);
serializer.Serialize(&list->paint_op_buffer_, &temp_raster_offsets_,
preamble);
// TODO(piman): raise error if !serializer.valid()?
@@ -1181,6 +1192,34 @@ SyncToken RasterImplementation::ScheduleImageDecode(
return decode_sync_token;
}
+void RasterImplementation::ReadbackARGBPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& dst_size,
+ unsigned char* out,
+ GLenum format,
+ base::OnceCallback<void(bool)> readback_done) {
+ NOTREACHED();
+}
+
+void RasterImplementation::ReadbackYUVPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& source_size,
+ const gfx::Rect& output_rect,
+ bool vertically_flip_texture,
+ int y_plane_row_stride_bytes,
+ unsigned char* y_plane_data,
+ int u_plane_row_stride_bytes,
+ unsigned char* u_plane_data,
+ int v_plane_row_stride_bytes,
+ unsigned char* v_plane_data,
+ const gfx::Point& paste_location,
+ base::OnceCallback<void()> release_mailbox,
+ base::OnceCallback<void(bool)> readback_done) {
+ NOTREACHED();
+}
+
void RasterImplementation::IssueImageDecodeCacheEntryCreation(
base::span<const uint8_t> encoded_data,
const gfx::Size& output_size,
@@ -1230,6 +1269,20 @@ void RasterImplementation::EndSharedImageAccessDirectCHROMIUM(GLuint texture) {
NOTREACHED();
}
+void RasterImplementation::InitializeDiscardableTextureCHROMIUM(
+ GLuint texture) {
+ NOTREACHED();
+}
+
+void RasterImplementation::UnlockDiscardableTextureCHROMIUM(GLuint texture) {
+ NOTREACHED();
+}
+
+bool RasterImplementation::LockDiscardableTextureCHROMIUM(GLuint texture) {
+ NOTREACHED();
+ return false;
+}
+
void RasterImplementation::TraceBeginCHROMIUM(const char* category_name,
const char* trace_name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.h b/chromium/gpu/command_buffer/client/raster_implementation.h
index 4661b207f66..920e96b90cd 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation.h
@@ -31,7 +31,6 @@
#include "gpu/command_buffer/common/id_allocator.h"
#include "gpu/command_buffer/common/raster_cmd_format.h"
#include "gpu/raster_export.h"
-#include "third_party/skia/include/core/SkColor.h"
namespace cc {
class TransferCacheSerializeHelper;
@@ -119,7 +118,16 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
GLint x,
GLint y,
GLsizei width,
- GLsizei height) override;
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha) override;
+
+ void WritePixels(const gpu::Mailbox& dest_mailbox,
+ int dst_x_offset,
+ int dst_y_offset,
+ GLenum texture_target,
+ const SkImageInfo& src_info,
+ const void* src_pixels) override;
void BeginRasterCHROMIUM(GLuint sk_color,
GLuint msaa_sample_count,
@@ -140,6 +148,28 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
uint32_t transfer_cache_entry_id,
const gfx::ColorSpace& target_color_space,
bool needs_mips) override;
+ void ReadbackARGBPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& dst_size,
+ unsigned char* out,
+ GLenum format,
+ base::OnceCallback<void(bool)> readback_done) override;
+ void ReadbackYUVPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& source_size,
+ const gfx::Rect& output_rect,
+ bool vertically_flip_texture,
+ int y_plane_row_stride_bytes,
+ unsigned char* y_plane_data,
+ int u_plane_row_stride_bytes,
+ unsigned char* u_plane_data,
+ int v_plane_row_stride_bytes,
+ unsigned char* v_plane_data,
+ const gfx::Point& paste_location,
+ base::OnceCallback<void()> release_mailbox,
+ base::OnceCallback<void(bool)> readback_done) override;
GLuint CreateAndConsumeForGpuRaster(const gpu::Mailbox& mailbox) override;
void DeleteGpuRasterTexture(GLuint texture) override;
void BeginGpuRaster() override;
@@ -148,6 +178,10 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
+ void InitializeDiscardableTextureCHROMIUM(GLuint texture) override;
+ void UnlockDiscardableTextureCHROMIUM(GLuint texture) override;
+ bool LockDiscardableTextureCHROMIUM(GLuint texture) override;
+
// ContextSupport implementation.
void SetAggressivelyFreeResources(bool aggressively_free_resources) override;
void Swap(uint32_t flags,
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
index bf10d7a7db3..3d5e3070b00 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
@@ -19,6 +19,8 @@
#include "cc/paint/transfer_cache_serialize_helper.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/GLES2/gl2extchromium.h"
+#include "gpu/command_buffer/client/gl_helper.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/common/capabilities.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
@@ -29,10 +31,41 @@
namespace gpu {
namespace raster {
-RasterImplementationGLES::RasterImplementationGLES(gles2::GLES2Interface* gl)
- : gl_(gl) {}
+namespace {
+
+GLenum SkColorTypeToGLDataFormat(SkColorType color_type) {
+ switch (color_type) {
+ case kRGBA_8888_SkColorType:
+ return GL_RGBA;
+ case kBGRA_8888_SkColorType:
+ return GL_BGRA_EXT;
+ default:
+ DLOG(ERROR) << "Unknown SkColorType " << color_type;
+ }
+ NOTREACHED();
+ return 0;
+}
+
+GLenum SkColorTypeToGLDataType(SkColorType color_type) {
+ switch (color_type) {
+ case kRGBA_8888_SkColorType:
+ case kBGRA_8888_SkColorType:
+ return GL_UNSIGNED_BYTE;
+ default:
+ DLOG(ERROR) << "Unknown SkColorType " << color_type;
+ }
+ NOTREACHED();
+ return 0;
+}
-RasterImplementationGLES::~RasterImplementationGLES() {}
+} // namespace
+
+RasterImplementationGLES::RasterImplementationGLES(
+ gles2::GLES2Interface* gl,
+ ContextSupport* context_support)
+ : gl_(gl), context_support_(context_support) {}
+
+RasterImplementationGLES::~RasterImplementationGLES() = default;
void RasterImplementationGLES::Finish() {
gl_->Finish();
@@ -105,20 +138,52 @@ void RasterImplementationGLES::CopySubTexture(
GLint x,
GLint y,
GLsizei width,
- GLsizei height) {
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha) {
GLuint texture_ids[2] = {
- gl_->CreateAndConsumeTextureCHROMIUM(source_mailbox.name),
- gl_->CreateAndConsumeTextureCHROMIUM(dest_mailbox.name),
+ CreateAndConsumeForGpuRaster(source_mailbox),
+ CreateAndConsumeForGpuRaster(dest_mailbox),
};
DCHECK(texture_ids[0]);
DCHECK(texture_ids[1]);
+ BeginSharedImageAccessDirectCHROMIUM(
+ texture_ids[0], GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ BeginSharedImageAccessDirectCHROMIUM(
+ texture_ids[1], GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+
gl_->CopySubTextureCHROMIUM(texture_ids[0], 0, dest_target, texture_ids[1], 0,
- xoffset, yoffset, x, y, width, height, false,
- false, false);
+ xoffset, yoffset, x, y, width, height,
+ unpack_flip_y, unpack_premultiply_alpha,
+ false /* upack_unmultiply_alpha */);
+
+ EndSharedImageAccessDirectCHROMIUM(texture_ids[0]);
+ EndSharedImageAccessDirectCHROMIUM(texture_ids[1]);
gl_->DeleteTextures(2, texture_ids);
}
+void RasterImplementationGLES::WritePixels(const gpu::Mailbox& dest_mailbox,
+ int dst_x_offset,
+ int dst_y_offset,
+ GLenum texture_target,
+ const SkImageInfo& src_info,
+ const void* src_pixels) {
+ GLuint texture_id = CreateAndConsumeForGpuRaster(dest_mailbox);
+ BeginSharedImageAccessDirectCHROMIUM(
+ texture_id, GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+
+ gl_->BindTexture(texture_target, texture_id);
+ gl_->TexSubImage2D(texture_target, 0, dst_x_offset, dst_y_offset,
+ src_info.width(), src_info.height(),
+ SkColorTypeToGLDataFormat(src_info.colorType()),
+ SkColorTypeToGLDataType(src_info.colorType()), src_pixels);
+ gl_->BindTexture(texture_target, 0);
+
+ EndSharedImageAccessDirectCHROMIUM(texture_id);
+ DeleteGpuRasterTexture(texture_id);
+}
+
void RasterImplementationGLES::BeginRasterCHROMIUM(
GLuint sk_color,
GLuint msaa_sample_count,
@@ -159,10 +224,129 @@ SyncToken RasterImplementationGLES::ScheduleImageDecode(
return SyncToken();
}
+void RasterImplementationGLES::ReadbackARGBPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& dst_size,
+ unsigned char* out,
+ GLenum format,
+ base::OnceCallback<void(bool)> readback_done) {
+ DCHECK(!readback_done.is_null());
+
+ GLuint texture_id = CreateAndConsumeForGpuRaster(source_mailbox);
+ BeginSharedImageAccessDirectCHROMIUM(
+ texture_id, GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+
+ GetGLHelper()->ReadbackTextureAsync(
+ texture_id, source_target, dst_size, out, format,
+ base::BindOnce(&RasterImplementationGLES::OnReadARGBPixelsAsync,
+ weak_ptr_factory_.GetWeakPtr(), texture_id,
+ std::move(readback_done)));
+}
+
+void RasterImplementationGLES::OnReadARGBPixelsAsync(
+ GLuint texture_id,
+ base::OnceCallback<void(bool)> readback_done,
+ bool success) {
+ DCHECK(texture_id);
+ EndSharedImageAccessDirectCHROMIUM(texture_id);
+ DeleteGpuRasterTexture(texture_id);
+
+ std::move(readback_done).Run(success);
+}
+
+void RasterImplementationGLES::ReadbackYUVPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& source_size,
+ const gfx::Rect& output_rect,
+ bool vertically_flip_texture,
+ int y_plane_row_stride_bytes,
+ unsigned char* y_plane_data,
+ int u_plane_row_stride_bytes,
+ unsigned char* u_plane_data,
+ int v_plane_row_stride_bytes,
+ unsigned char* v_plane_data,
+ const gfx::Point& paste_location,
+ base::OnceCallback<void()> release_mailbox,
+ base::OnceCallback<void(bool)> readback_done) {
+ GLuint shared_texture_id = CreateAndConsumeForGpuRaster(source_mailbox);
+ BeginSharedImageAccessDirectCHROMIUM(
+ shared_texture_id, GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ base::OnceCallback<void()> on_release_mailbox =
+ base::BindOnce(&RasterImplementationGLES::OnReleaseMailbox,
+ weak_ptr_factory_.GetWeakPtr(), shared_texture_id,
+ std::move(release_mailbox));
+
+ // The YUV readback path only works for 2D textures.
+ GLuint texture_for_readback = shared_texture_id;
+ GLuint copy_texture_id = 0;
+ if (source_target != GL_TEXTURE_2D) {
+ int width = source_size.width();
+ int height = source_size.height();
+
+ gl_->GenTextures(1, &copy_texture_id);
+ gl_->BindTexture(GL_TEXTURE_2D, copy_texture_id);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ gl_->TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, nullptr);
+ gl_->CopyTextureCHROMIUM(shared_texture_id, 0, GL_TEXTURE_2D,
+ copy_texture_id, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0,
+ 0, 0);
+ texture_for_readback = copy_texture_id;
+
+ // |copy_texture_id| now contains the texture we want to copy, release the
+ // pinned mailbox.
+ std::move(on_release_mailbox).Run();
+ }
+
+ DCHECK(GetGLHelper());
+ gpu::ReadbackYUVInterface* const yuv_reader =
+ GetGLHelper()->GetReadbackPipelineYUV(vertically_flip_texture);
+ yuv_reader->ReadbackYUV(
+ texture_for_readback, source_size, gfx::Rect(source_size),
+ y_plane_row_stride_bytes, y_plane_data, u_plane_row_stride_bytes,
+ u_plane_data, v_plane_row_stride_bytes, v_plane_data, paste_location,
+ base::BindOnce(&RasterImplementationGLES::OnReadYUVPixelsAsync,
+ weak_ptr_factory_.GetWeakPtr(), copy_texture_id,
+ std::move(on_release_mailbox), std::move(readback_done)));
+}
+
+void RasterImplementationGLES::OnReadYUVPixelsAsync(
+ GLuint copy_texture_id,
+ base::OnceCallback<void()> on_release_mailbox,
+ base::OnceCallback<void(bool)> readback_done,
+ bool success) {
+ if (copy_texture_id) {
+ DCHECK(on_release_mailbox.is_null());
+ gl_->DeleteTextures(1, &copy_texture_id);
+ } else {
+ DCHECK(!on_release_mailbox.is_null());
+ std::move(on_release_mailbox).Run();
+ }
+
+ std::move(readback_done).Run(success);
+}
+
+void RasterImplementationGLES::OnReleaseMailbox(
+ GLuint shared_texture_id,
+ base::OnceCallback<void()> release_mailbox) {
+ DCHECK(shared_texture_id);
+ DCHECK(!release_mailbox.is_null());
+
+ EndSharedImageAccessDirectCHROMIUM(shared_texture_id);
+ DeleteGpuRasterTexture(shared_texture_id);
+ std::move(release_mailbox).Run();
+}
+
GLuint RasterImplementationGLES::CreateAndConsumeForGpuRaster(
const gpu::Mailbox& mailbox) {
- DCHECK(mailbox.IsSharedImage());
- return gl_->CreateAndTexStorage2DSharedImageCHROMIUM(mailbox.name);
+ return mailbox.IsSharedImage()
+ ? gl_->CreateAndTexStorage2DSharedImageCHROMIUM(mailbox.name)
+ : gl_->CreateAndConsumeTextureCHROMIUM(mailbox.name);
}
void RasterImplementationGLES::DeleteGpuRasterTexture(GLuint texture) {
@@ -198,6 +382,20 @@ void RasterImplementationGLES::EndSharedImageAccessDirectCHROMIUM(
gl_->EndSharedImageAccessDirectCHROMIUM(texture);
}
+void RasterImplementationGLES::InitializeDiscardableTextureCHROMIUM(
+ GLuint texture) {
+ gl_->InitializeDiscardableTextureCHROMIUM(texture);
+}
+
+void RasterImplementationGLES::UnlockDiscardableTextureCHROMIUM(
+ GLuint texture) {
+ gl_->UnlockDiscardableTextureCHROMIUM(texture);
+}
+
+bool RasterImplementationGLES::LockDiscardableTextureCHROMIUM(GLuint texture) {
+ return gl_->LockDiscardableTextureCHROMIUM(texture);
+}
+
void RasterImplementationGLES::TraceBeginCHROMIUM(const char* category_name,
const char* trace_name) {
gl_->TraceBeginCHROMIUM(category_name, trace_name);
@@ -223,5 +421,15 @@ void RasterImplementationGLES::WaitSyncTokenCHROMIUM(const GLbyte* sync_token) {
gl_->WaitSyncTokenCHROMIUM(sync_token);
}
+GLHelper* RasterImplementationGLES::GetGLHelper() {
+ if (!gl_helper_) {
+ DCHECK(gl_);
+ DCHECK(context_support_);
+ gl_helper_ = std::make_unique<GLHelper>(gl_, context_support_);
+ }
+
+ return gl_helper_.get();
+}
+
} // namespace raster
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.h b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
index 5d93204409b..4295a366f06 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
@@ -15,16 +15,20 @@
#include "gpu/command_buffer/client/raster_interface.h"
#include "gpu/command_buffer/common/capabilities.h"
#include "gpu/raster_export.h"
-#include "third_party/skia/include/core/SkColor.h"
#include "third_party/skia/include/core/SkColorSpace.h"
namespace gpu {
+
+class ContextSupport;
+class GLHelper;
+
namespace raster {
// An implementation of RasterInterface on top of GLES2Interface.
class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
public:
- explicit RasterImplementationGLES(gles2::GLES2Interface* gl);
+ explicit RasterImplementationGLES(gles2::GLES2Interface* gl,
+ ContextSupport* context_support);
~RasterImplementationGLES() override;
// Command buffer Flush / Finish.
@@ -61,7 +65,16 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
GLint x,
GLint y,
GLsizei width,
- GLsizei height) override;
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha) override;
+
+ void WritePixels(const gpu::Mailbox& dest_mailbox,
+ int dst_x_offset,
+ int dst_y_offset,
+ GLenum texture_target,
+ const SkImageInfo& src_info,
+ const void* src_pixels) override;
// OOP-Raster
void BeginRasterCHROMIUM(GLuint sk_color,
@@ -87,6 +100,30 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
const gfx::ColorSpace& target_color_space,
bool needs_mips) override;
+ void ReadbackARGBPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& dst_size,
+ unsigned char* out,
+ GLenum format,
+ base::OnceCallback<void(bool)> callback) override;
+
+ void ReadbackYUVPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& source_size,
+ const gfx::Rect& output_rect,
+ bool vertically_flip_texture,
+ int y_plane_row_stride_bytes,
+ unsigned char* y_plane_data,
+ int u_plane_row_stride_bytes,
+ unsigned char* u_plane_data,
+ int v_plane_row_stride_bytes,
+ unsigned char* v_plane_data,
+ const gfx::Point& paste_location,
+ base::OnceCallback<void()> release_mailbox,
+ base::OnceCallback<void(bool)> readback_done) override;
+
// Raster via GrContext.
GLuint CreateAndConsumeForGpuRaster(const gpu::Mailbox& mailbox) override;
void DeleteGpuRasterTexture(GLuint texture) override;
@@ -96,6 +133,10 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
+ void InitializeDiscardableTextureCHROMIUM(GLuint texture) override;
+ void UnlockDiscardableTextureCHROMIUM(GLuint texture) override;
+ bool LockDiscardableTextureCHROMIUM(GLuint texture) override;
+
void TraceBeginCHROMIUM(const char* category_name,
const char* trace_name) override;
void TraceEndCHROMIUM() override;
@@ -109,7 +150,21 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) override;
private:
+ GLHelper* GetGLHelper();
+ void OnReadARGBPixelsAsync(GLuint texture_id,
+ base::OnceCallback<void(bool)> callback,
+ bool success);
+ void OnReadYUVPixelsAsync(GLuint copy_texture_id,
+ base::OnceCallback<void()> on_release_mailbox,
+ base::OnceCallback<void(bool)> readback_done,
+ bool success);
+ void OnReleaseMailbox(GLuint shared_texture_id,
+ base::OnceCallback<void()> release_mailbox);
+
gles2::GLES2Interface* gl_;
+ ContextSupport* context_support_;
+ std::unique_ptr<GLHelper> gl_helper_;
+ base::WeakPtrFactory<RasterImplementationGLES> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(RasterImplementationGLES);
};
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
index 93285232dce..fafce6d83ce 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
@@ -82,7 +82,7 @@ class RasterMockGLES2Interface : public gles2::GLES2InterfaceStub {
MOCK_METHOD1(ActiveTexture, void(GLenum texture));
MOCK_METHOD1(GenerateMipmap, void(GLenum target));
MOCK_METHOD2(SetColorSpaceMetadataCHROMIUM,
- void(GLuint texture_id, GLColorSpace color_space));
+ void(GLuint texture_id, GLcolorSpace color_space));
MOCK_METHOD3(TexParameteri, void(GLenum target, GLenum pname, GLint param));
// Mailboxes.
@@ -260,7 +260,7 @@ class RasterImplementationGLESTest : public testing::Test {
void SetUp() override {
gl_ = std::make_unique<RasterMockGLES2Interface>();
- ri_ = std::make_unique<RasterImplementationGLES>(gl_.get());
+ ri_ = std::make_unique<RasterImplementationGLES>(gl_.get(), &support_);
}
void TearDown() override {}
diff --git a/chromium/gpu/command_buffer/client/raster_interface.h b/chromium/gpu/command_buffer/client/raster_interface.h
index 250272d867e..209dd6552d8 100644
--- a/chromium/gpu/command_buffer/client/raster_interface.h
+++ b/chromium/gpu/command_buffer/client/raster_interface.h
@@ -12,6 +12,8 @@
#include "gpu/command_buffer/client/interface_base.h"
#include "gpu/command_buffer/common/sync_token.h"
+struct SkImageInfo;
+
namespace cc {
class DisplayItemList;
class ImageProvider;
@@ -19,6 +21,7 @@ class ImageProvider;
namespace gfx {
class ColorSpace;
+class Point;
class Rect;
class Size;
class Vector2dF;
@@ -26,7 +29,7 @@ enum class BufferUsage;
} // namespace gfx
extern "C" typedef struct _ClientBuffer* ClientBuffer;
-extern "C" typedef struct _GLColorSpace* GLColorSpace;
+extern "C" typedef const struct _GLcolorSpace* GLcolorSpace;
namespace gpu {
@@ -49,7 +52,17 @@ class RasterInterface : public InterfaceBase {
GLint x,
GLint y,
GLsizei width,
- GLsizei height) = 0;
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha) = 0;
+
+ virtual void WritePixels(const gpu::Mailbox& dest_mailbox,
+ int dst_x_offset,
+ int dst_y_offset,
+ GLenum texture_target,
+ const SkImageInfo& src_info,
+ const void* src_pixels) = 0;
+
// OOP-Raster
virtual void BeginRasterCHROMIUM(GLuint sk_color,
GLuint msaa_sample_count,
@@ -81,6 +94,44 @@ class RasterInterface : public InterfaceBase {
const gfx::ColorSpace& target_color_space,
bool needs_mips) = 0;
+ // Starts an asynchronous readback of |source_mailbox| into caller-owned
+ // memory |out|. Currently supports the GL_RGBA format and GL_BGRA_EXT format
+ // with the GL_EXT_read_format_bgra GL extension. |out| must remain valid
+ // until |readback_done| is called with a bool indicating if the readback was
+ // successful. On success |out| will contain the pixel data copied back from
+ // the GPU process.
+ virtual void ReadbackARGBPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& dst_size,
+ unsigned char* out,
+ GLenum format,
+ base::OnceCallback<void(bool)> readback_done) = 0;
+
+ // Starts an asynchronus readback and translation of RGBA |source_mailbox|
+ // into caller-owned |[yuv]_plane_data|. All provided pointers must remain
+ // valid until |readback_done| is called with a bool indicating if readback
+ // was successful. On success the provided memory will contain pixel data in
+ // I420 format copied from |source_mailbox| in the GPU process.
+ // |release_mailbox| is called when all operations requiring a valid mailbox
+ // have completed, indicating that the caller can perform any necessary
+ // cleanup.
+ virtual void ReadbackYUVPixelsAsync(
+ const gpu::Mailbox& source_mailbox,
+ GLenum source_target,
+ const gfx::Size& source_size,
+ const gfx::Rect& output_rect,
+ bool vertically_flip_texture,
+ int y_plane_row_stride_bytes,
+ unsigned char* y_plane_data,
+ int u_plane_row_stride_bytes,
+ unsigned char* u_plane_data,
+ int v_plane_row_stride_bytes,
+ unsigned char* v_plane_data,
+ const gfx::Point& paste_location,
+ base::OnceCallback<void()> release_mailbox,
+ base::OnceCallback<void(bool)> readback_done) = 0;
+
// Raster via GrContext.
virtual GLuint CreateAndConsumeForGpuRaster(const gpu::Mailbox& mailbox) = 0;
virtual void DeleteGpuRasterTexture(GLuint texture) = 0;
@@ -90,6 +141,10 @@ class RasterInterface : public InterfaceBase {
GLenum mode) = 0;
virtual void EndSharedImageAccessDirectCHROMIUM(GLuint texture) = 0;
+ virtual void InitializeDiscardableTextureCHROMIUM(GLuint texture) = 0;
+ virtual void UnlockDiscardableTextureCHROMIUM(GLuint texture) = 0;
+ virtual bool LockDiscardableTextureCHROMIUM(GLuint texture) = 0;
+
// Include the auto-generated part of this class. We split this because
// it means we can easily edit the non-auto generated parts right here in
// this file instead of having to edit some template or the code generator.
diff --git a/chromium/gpu/command_buffer/client/shared_image_interface.cc b/chromium/gpu/command_buffer/client/shared_image_interface.cc
new file mode 100644
index 00000000000..8b340c00ca3
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/shared_image_interface.cc
@@ -0,0 +1,13 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/shared_image_interface.h"
+
+namespace gpu {
+
+uint32_t SharedImageInterface::UsageForMailbox(const Mailbox& mailbox) {
+ return 0u;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/shared_image_interface.h b/chromium/gpu/command_buffer/client/shared_image_interface.h
index a18d0dfbca8..18369b51acd 100644
--- a/chromium/gpu/command_buffer/client/shared_image_interface.h
+++ b/chromium/gpu/command_buffer/client/shared_image_interface.h
@@ -7,12 +7,19 @@
#include "base/compiler_specific.h"
#include "base/containers/span.h"
+#include "base/memory/scoped_refptr.h"
#include "build/build_config.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/gpu_export.h"
+#include "gpu/ipc/common/surface_handle.h"
#include "ui/gfx/buffer_types.h"
+
+#if !defined(OS_NACL)
+#include "ui/gfx/native_pixmap.h"
#include "ui/gfx/native_pixmap_handle.h"
+#endif
#if defined(OS_FUCHSIA)
#include <lib/zx/channel.h>
@@ -33,7 +40,7 @@ class GpuMemoryBufferManager;
// It is asynchronous in the same sense as GLES2Interface or RasterInterface in
// that commands are executed asynchronously on the service side, but can be
// synchronized using SyncTokens. See //docs/design/gpu_synchronization.md.
-class SharedImageInterface {
+class GPU_EXPORT SharedImageInterface {
public:
virtual ~SharedImageInterface() {}
@@ -48,10 +55,12 @@ class SharedImageInterface {
// The |SharedImageInterface| keeps ownership of the image until
// |DestroySharedImage| is called or the interface itself is destroyed (e.g.
// the GPU channel is lost).
- virtual Mailbox CreateSharedImage(viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage) = 0;
+ virtual Mailbox CreateSharedImage(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle) = 0;
// Same behavior as the above, except that this version takes |pixel_data|
// which is used to populate the SharedImage. |pixel_data| should have the
@@ -156,6 +165,22 @@ class SharedImageInterface {
// Flush the SharedImageInterface, issuing any deferred IPCs.
virtual void Flush() = 0;
+
+#if !defined(OS_NACL)
+ // Returns the NativePixmap backing |mailbox|. This is a privileged API. Only
+ // the callers living inside the GPU process are able to retrieve the
+ // NativePixmap; otherwise null is returned. Also returns null if the
+ // SharedImage doesn't exist or is not backed by a NativePixmap. The caller is
+ // not expected to read from or write into the provided NativePixmap because
+ // it can be modified at any time. The primary purpose of this method is to
+ // facilitate pageflip testing on the viz thread.
+ virtual scoped_refptr<gfx::NativePixmap> GetNativePixmap(
+ const gpu::Mailbox& mailbox) = 0;
+#endif
+
+ // Provides the usage flags supported by the given |mailbox|. This must have
+ // been created using a SharedImageInterface on the same channel.
+ virtual uint32_t UsageForMailbox(const Mailbox& mailbox);
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/webgpu_cmd_helper.h b/chromium/gpu/command_buffer/client/webgpu_cmd_helper.h
index 6a2390a5ba0..ea85536aec3 100644
--- a/chromium/gpu/command_buffer/client/webgpu_cmd_helper.h
+++ b/chromium/gpu/command_buffer/client/webgpu_cmd_helper.h
@@ -15,7 +15,7 @@
namespace gpu {
namespace webgpu {
-// A class that helps write GL command buffers.
+// A class that helps write WebGPU command buffers.
class WEBGPU_EXPORT WebGPUCmdHelper : public CommandBufferHelper {
public:
explicit WebGPUCmdHelper(CommandBuffer* command_buffer);
diff --git a/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h
index 9d0bec45260..750268fc635 100644
--- a/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h
@@ -11,16 +11,17 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_CMD_HELPER_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_CMD_HELPER_AUTOGEN_H_
-void DawnCommands(uint32_t commands_shm_id,
+void DawnCommands(uint64_t device_client_id,
+ uint32_t commands_shm_id,
uint32_t commands_shm_offset,
uint32_t size) {
webgpu::cmds::DawnCommands* c = GetCmdSpace<webgpu::cmds::DawnCommands>();
if (c) {
- c->Init(commands_shm_id, commands_shm_offset, size);
+ c->Init(device_client_id, commands_shm_id, commands_shm_offset, size);
}
}
-void AssociateMailboxImmediate(GLuint device_id,
+void AssociateMailboxImmediate(GLuint64 device_client_id,
GLuint device_generation,
GLuint id,
GLuint generation,
@@ -31,19 +32,22 @@ void AssociateMailboxImmediate(GLuint device_id,
GetImmediateCmdSpaceTotalSize<webgpu::cmds::AssociateMailboxImmediate>(
size);
if (c) {
- c->Init(device_id, device_generation, id, generation, usage, mailbox);
+ c->Init(device_client_id, device_generation, id, generation, usage,
+ mailbox);
}
}
-void DissociateMailbox(GLuint texture_id, GLuint texture_generation) {
+void DissociateMailbox(GLuint64 device_client_id,
+ GLuint texture_id,
+ GLuint texture_generation) {
webgpu::cmds::DissociateMailbox* c =
GetCmdSpace<webgpu::cmds::DissociateMailbox>();
if (c) {
- c->Init(texture_id, texture_generation);
+ c->Init(device_client_id, texture_id, texture_generation);
}
}
-void RequestAdapter(uint32_t request_adapter_serial,
+void RequestAdapter(uint64_t request_adapter_serial,
uint32_t power_preference) {
webgpu::cmds::RequestAdapter* c = GetCmdSpace<webgpu::cmds::RequestAdapter>();
if (c) {
@@ -51,15 +55,23 @@ void RequestAdapter(uint32_t request_adapter_serial,
}
}
-void RequestDevice(uint32_t adapter_service_id,
+void RequestDevice(uint64_t device_client_id,
+ uint32_t adapter_service_id,
uint32_t request_device_properties_shm_id,
uint32_t request_device_properties_shm_offset,
uint32_t request_device_properties_size) {
webgpu::cmds::RequestDevice* c = GetCmdSpace<webgpu::cmds::RequestDevice>();
if (c) {
- c->Init(adapter_service_id, request_device_properties_shm_id,
- request_device_properties_shm_offset,
- request_device_properties_size);
+ c->Init(
+ device_client_id, adapter_service_id, request_device_properties_shm_id,
+ request_device_properties_shm_offset, request_device_properties_size);
+ }
+}
+
+void RemoveDevice(uint64_t device_client_id) {
+ webgpu::cmds::RemoveDevice* c = GetCmdSpace<webgpu::cmds::RemoveDevice>();
+ if (c) {
+ c->Init(device_client_id);
}
}
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.cc b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
index 185b3bd9935..b43f4fa1d6d 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
@@ -18,6 +18,149 @@
namespace gpu {
namespace webgpu {
+#if BUILDFLAG(USE_DAWN)
+WebGPUCommandSerializer::WebGPUCommandSerializer(
+ DawnDeviceClientID device_client_id,
+ WebGPUCmdHelper* helper,
+ DawnClientMemoryTransferService* memory_transfer_service)
+ : device_client_id_(device_client_id),
+ helper_(helper),
+ memory_transfer_service_(memory_transfer_service) {
+ c2s_transfer_buffer_ = std::make_unique<TransferBuffer>(helper_);
+
+ const SharedMemoryLimits& limits = SharedMemoryLimits::ForWebGPUContext();
+ c2s_transfer_buffer_->Initialize(
+ limits.start_transfer_buffer_size, ImplementationBase::kStartingOffset,
+ limits.min_transfer_buffer_size, limits.max_transfer_buffer_size,
+ ImplementationBase::kAlignment);
+ DCHECK(helper_);
+ c2s_buffer_ = std::make_unique<ScopedTransferBufferPtr>(
+ helper_, c2s_transfer_buffer_.get());
+
+ c2s_buffer_default_size_ = limits.start_transfer_buffer_size;
+ DCHECK_GT(c2s_buffer_default_size_, 0u);
+
+ DCHECK(memory_transfer_service_);
+ dawn_wire::WireClientDescriptor descriptor = {};
+ descriptor.serializer = this;
+ descriptor.memoryTransferService = memory_transfer_service_;
+ wire_client_ = std::make_unique<dawn_wire::WireClient>(descriptor);
+}
+
+WebGPUCommandSerializer::~WebGPUCommandSerializer() {}
+
+// This function can only be called once for each WebGPUCommandSerializer
+// object (before any call of GetCmdSpace()).
+void WebGPUCommandSerializer::RequestDeviceCreation(
+ uint32_t requested_adapter_id,
+ const WGPUDeviceProperties& requested_device_properties) {
+ DCHECK(!c2s_buffer_->valid());
+ DCHECK_EQ(0u, c2s_put_offset_);
+
+ size_t serialized_device_properties_size =
+ dawn_wire::SerializedWGPUDevicePropertiesSize(
+ &requested_device_properties);
+ DCHECK_NE(0u, serialized_device_properties_size);
+
+ DCHECK_LE(serialized_device_properties_size,
+ c2s_transfer_buffer_->GetMaxSize());
+ c2s_buffer_->Reset(serialized_device_properties_size);
+
+ dawn_wire::SerializeWGPUDeviceProperties(
+ &requested_device_properties,
+ reinterpret_cast<char*>(c2s_buffer_->address()));
+
+ helper_->RequestDevice(device_client_id_, requested_adapter_id,
+ c2s_buffer_->shm_id(), c2s_buffer_->offset(),
+ serialized_device_properties_size);
+ c2s_buffer_->Release();
+
+ helper_->Flush();
+}
+
+void* WebGPUCommandSerializer::GetCmdSpace(size_t size) {
+ // The buffer size must be initialized before any commands are serialized.
+ if (c2s_buffer_default_size_ == 0u) {
+ NOTREACHED();
+ return nullptr;
+ }
+
+ base::CheckedNumeric<uint32_t> checked_next_offset(c2s_put_offset_);
+ checked_next_offset += size;
+
+ uint32_t next_offset;
+ bool next_offset_valid = checked_next_offset.AssignIfValid(&next_offset);
+
+ // If the buffer does not have enough space, or if the buffer is not
+ // initialized, flush and reset the command stream.
+ if (!next_offset_valid || next_offset > c2s_buffer_->size() ||
+ !c2s_buffer_->valid()) {
+ Flush();
+
+ uint32_t max_allocation = c2s_transfer_buffer_->GetMaxSize();
+ // TODO(crbug.com/951558): Handle command chunking or ensure commands aren't
+ // this large.
+ CHECK_LE(size, max_allocation);
+
+ uint32_t allocation_size =
+ std::max(c2s_buffer_default_size_, static_cast<uint32_t>(size));
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "WebGPUCommandSerializer::GetCmdSpace", "bytes",
+ allocation_size);
+ c2s_buffer_->Reset(allocation_size);
+ c2s_put_offset_ = 0;
+ next_offset = size;
+
+ // TODO(crbug.com/951558): Handle OOM.
+ CHECK(c2s_buffer_->valid());
+ CHECK_LE(size, c2s_buffer_->size());
+ }
+
+ DCHECK(c2s_buffer_->valid());
+ uint8_t* ptr = static_cast<uint8_t*>(c2s_buffer_->address());
+ ptr += c2s_put_offset_;
+
+ c2s_put_offset_ = next_offset;
+ return ptr;
+}
+
+bool WebGPUCommandSerializer::Flush() {
+ if (c2s_buffer_->valid()) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "WebGPUCommandSerializer::Flush", "bytes", c2s_put_offset_);
+
+ TRACE_EVENT_FLOW_BEGIN0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.dawn"), "DawnCommands",
+ (static_cast<uint64_t>(c2s_buffer_->shm_id()) << 32) +
+ c2s_buffer_->offset());
+
+ c2s_buffer_->Shrink(c2s_put_offset_);
+ helper_->DawnCommands(device_client_id_, c2s_buffer_->shm_id(),
+ c2s_buffer_->offset(), c2s_put_offset_);
+ c2s_put_offset_ = 0;
+ c2s_buffer_->Release();
+ }
+
+ memory_transfer_service_->FreeHandlesPendingToken(helper_->InsertToken());
+ return true;
+}
+
+WGPUDevice WebGPUCommandSerializer::GetDevice() const {
+ return wire_client_->GetDevice();
+}
+
+ReservedTexture WebGPUCommandSerializer::ReserveTexture() {
+ dawn_wire::ReservedTexture reservation =
+ wire_client_->ReserveTexture(GetDevice());
+ return {reservation.texture, reservation.id, reservation.generation};
+}
+
+bool WebGPUCommandSerializer::HandleCommands(const char* commands,
+ size_t command_size) {
+ return wire_client_->HandleCommands(commands, command_size);
+}
+#endif
+
// Include the auto-generated part of this file. We split this because it means
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
@@ -28,19 +171,20 @@ WebGPUImplementation::WebGPUImplementation(
TransferBufferInterface* transfer_buffer,
GpuControl* gpu_control)
: ImplementationBase(helper, transfer_buffer, gpu_control),
- helper_(helper),
- c2s_buffer_(helper, transfer_buffer) {
-}
+ helper_(helper) {}
WebGPUImplementation::~WebGPUImplementation() {
+#if BUILDFLAG(USE_DAWN)
// Wait for all commands to finish or we may free shared memory while
// commands are still in flight.
- Flush();
+ FlushAllCommandSerializers();
+#endif
+
helper_->Finish();
#if BUILDFLAG(USE_DAWN)
// Now that commands are finished, free the wire client.
- wire_client_.reset();
+ ClearAllCommandSerializers();
// All client-side Dawn objects are now destroyed.
// Shared memory allocations for buffers that were still mapped at the time
@@ -58,20 +202,11 @@ gpu::ContextResult WebGPUImplementation::Initialize(
return result;
}
- c2s_buffer_default_size_ = limits.start_transfer_buffer_size;
- DCHECK_GT(c2s_buffer_default_size_, 0u);
-
#if BUILDFLAG(USE_DAWN)
- memory_transfer_service_.reset(
- new DawnClientMemoryTransferService(mapped_memory_.get()));
-
- dawn_wire::WireClientDescriptor descriptor = {};
- descriptor.serializer = this;
- descriptor.memoryTransferService = memory_transfer_service_.get();
+ memory_transfer_service_ =
+ std::make_unique<DawnClientMemoryTransferService>(mapped_memory_.get());
- wire_client_.reset(new dawn_wire::WireClient(descriptor));
-
- procs_ = wire_client_->GetProcs();
+ procs_ = dawn_wire::WireClient::GetProcs();
#endif
return gpu::ContextResult::kSuccess;
@@ -208,11 +343,19 @@ void WebGPUImplementation::SetGLError(GLenum error,
}
// GpuControlClient implementation.
+// TODO(jiawei.shao@intel.com): do other clean-ups when the context is lost.
void WebGPUImplementation::OnGpuControlLostContext() {
- NOTIMPLEMENTED();
+ OnGpuControlLostContextMaybeReentrant();
+
+ // This should never occur more than once.
+ DCHECK(!lost_context_callback_run_);
+ lost_context_callback_run_ = true;
+ if (!lost_context_callback_.is_null()) {
+ std::move(lost_context_callback_).Run();
+ }
}
void WebGPUImplementation::OnGpuControlLostContextMaybeReentrant() {
- NOTIMPLEMENTED();
+ lost_ = true;
}
void WebGPUImplementation::OnGpuControlErrorMessage(const char* message,
int32_t id) {
@@ -242,30 +385,51 @@ void WebGPUImplementation::OnGpuControlReturnData(
if (data.size() <= sizeof(cmds::DawnReturnDataHeader)) {
// TODO(jiawei.shao@intel.com): Lose the context.
NOTREACHED();
+ return;
}
const cmds::DawnReturnDataHeader& dawnReturnDataHeader =
*reinterpret_cast<const cmds::DawnReturnDataHeader*>(data.data());
- const uint8_t* dawnReturnDataBody =
- data.data() + sizeof(cmds::DawnReturnDataHeader);
- size_t dawnReturnDataSize = data.size() - sizeof(cmds::DawnReturnDataHeader);
-
switch (dawnReturnDataHeader.return_data_type) {
- case DawnReturnDataType::kDawnCommands:
- if (!wire_client_->HandleCommands(
- reinterpret_cast<const char*>(dawnReturnDataBody),
- dawnReturnDataSize)) {
+ case DawnReturnDataType::kDawnCommands: {
+ if (data.size() < sizeof(cmds::DawnReturnCommandsInfo)) {
+ // TODO(jiawei.shao@intel.com): Lose the context.
+ NOTREACHED();
+ break;
+ }
+
+ const cmds::DawnReturnCommandsInfo* dawn_return_commands_info =
+ reinterpret_cast<const cmds::DawnReturnCommandsInfo*>(data.data());
+ DawnDeviceClientID device_client_id =
+ dawn_return_commands_info->header.device_client_id;
+ WebGPUCommandSerializer* command_serializer =
+ GetCommandSerializerWithDeviceClientID(device_client_id);
+ if (!command_serializer) {
+ // TODO(jiawei.shao@intel.com): Lose the context.
+ NOTREACHED();
+ break;
+ }
+ if (!command_serializer->HandleCommands(
+ reinterpret_cast<const char*>(
+ dawn_return_commands_info->deserialized_buffer),
+ data.size() - offsetof(cmds::DawnReturnCommandsInfo,
+ deserialized_buffer))) {
// TODO(enga): Lose the context.
NOTREACHED();
}
- break;
+ } break;
case DawnReturnDataType::kRequestedDawnAdapterProperties: {
+ if (data.size() < sizeof(cmds::DawnReturnAdapterInfo)) {
+ // TODO(jiawei.shao@intel.com): Lose the context.
+ NOTREACHED();
+ break;
+ }
+
const cmds::DawnReturnAdapterInfo* returned_adapter_info =
- reinterpret_cast<const cmds::DawnReturnAdapterInfo*>(
- dawnReturnDataBody);
+ reinterpret_cast<const cmds::DawnReturnAdapterInfo*>(data.data());
- GLuint request_adapter_serial =
- returned_adapter_info->adapter_ids.request_adapter_serial;
+ DawnRequestAdapterSerial request_adapter_serial =
+ returned_adapter_info->header.request_adapter_serial;
auto request_callback_iter =
request_adapter_callback_map_.find(request_adapter_serial);
if (request_callback_iter == request_adapter_callback_map_.end()) {
@@ -275,7 +439,7 @@ void WebGPUImplementation::OnGpuControlReturnData(
}
auto& request_callback = request_callback_iter->second;
GLuint adapter_service_id =
- returned_adapter_info->adapter_ids.adapter_service_id;
+ returned_adapter_info->header.adapter_service_id;
WGPUDeviceProperties adapter_properties = {};
const volatile char* deserialized_buffer =
reinterpret_cast<const volatile char*>(
@@ -285,6 +449,38 @@ void WebGPUImplementation::OnGpuControlReturnData(
std::move(request_callback).Run(adapter_service_id, adapter_properties);
request_adapter_callback_map_.erase(request_callback_iter);
} break;
+ case DawnReturnDataType::kRequestedDeviceReturnInfo: {
+ if (data.size() < sizeof(cmds::DawnReturnRequestDeviceInfo)) {
+ // TODO(jiawei.shao@intel.com): Lose the context.
+ NOTREACHED();
+ break;
+ }
+
+ const cmds::DawnReturnRequestDeviceInfo* returned_request_device_info =
+ reinterpret_cast<const cmds::DawnReturnRequestDeviceInfo*>(
+ data.data());
+
+ DawnDeviceClientID device_client_id =
+ returned_request_device_info->device_client_id;
+ auto request_callback_iter =
+ request_device_callback_map_.find(device_client_id);
+ if (request_callback_iter == request_device_callback_map_.end()) {
+ // TODO(jiawei.shao@intel.com): Lose the context.
+ NOTREACHED();
+ break;
+ }
+ auto& request_callback = request_callback_iter->second;
+ bool is_request_device_success =
+ returned_request_device_info->is_request_device_success;
+ if (!is_request_device_success) {
+ auto iter = command_serializers_.find(device_client_id);
+ DCHECK(iter != command_serializers_.end());
+ command_serializers_.erase(iter);
+ }
+ std::move(request_callback)
+ .Run(is_request_device_success, device_client_id);
+ request_device_callback_map_.erase(request_callback_iter);
+ } break;
default:
// TODO(jiawei.shao@intel.com): Lose the context.
NOTREACHED();
@@ -293,105 +489,69 @@ void WebGPUImplementation::OnGpuControlReturnData(
#endif
}
-void* WebGPUImplementation::GetCmdSpace(size_t size) {
- // The buffer size must be initialized before any commands are serialized.
- if (c2s_buffer_default_size_ == 0u) {
- NOTREACHED();
- return nullptr;
- }
-
- base::CheckedNumeric<uint32_t> checked_next_offset(c2s_put_offset_);
- checked_next_offset += size;
-
- uint32_t next_offset;
- bool next_offset_valid = checked_next_offset.AssignIfValid(&next_offset);
-
- // If the buffer does not have enough space, or if the buffer is not
- // initialized, flush and reset the command stream.
- if (!next_offset_valid || next_offset > c2s_buffer_.size() ||
- !c2s_buffer_.valid()) {
- Flush();
-
- uint32_t max_allocation = transfer_buffer_->GetMaxSize();
- // TODO(crbug.com/951558): Handle command chunking or ensure commands aren't
- // this large.
- CHECK_LE(size, max_allocation);
-
- uint32_t allocation_size =
- std::max(c2s_buffer_default_size_, static_cast<uint32_t>(size));
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "WebGPUImplementation::GetCmdSpace", "bytes", allocation_size);
- c2s_buffer_.Reset(allocation_size);
- c2s_put_offset_ = 0;
- next_offset = size;
+const DawnProcTable& WebGPUImplementation::GetProcs() const {
+#if !BUILDFLAG(USE_DAWN)
+ NOTREACHED();
+#endif
+ return procs_;
+}
- // TODO(crbug.com/951558): Handle OOM.
- CHECK(c2s_buffer_.valid());
- CHECK_LE(size, c2s_buffer_.size());
+#if BUILDFLAG(USE_DAWN)
+WebGPUCommandSerializer*
+WebGPUImplementation::GetCommandSerializerWithDeviceClientID(
+ DawnDeviceClientID device_client_id) const {
+ auto command_serializer = command_serializers_.find(device_client_id);
+ if (command_serializer == command_serializers_.end()) {
+ return nullptr;
}
-
- DCHECK(c2s_buffer_.valid());
- uint8_t* ptr = static_cast<uint8_t*>(c2s_buffer_.address());
- ptr += c2s_put_offset_;
-
- c2s_put_offset_ = next_offset;
- return ptr;
+ return command_serializer->second.get();
}
-bool WebGPUImplementation::Flush() {
- if (c2s_buffer_.valid()) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "WebGPUImplementation::Flush", "bytes", c2s_put_offset_);
-
- TRACE_EVENT_FLOW_BEGIN0(
- TRACE_DISABLED_BY_DEFAULT("gpu.dawn"), "DawnCommands",
- (static_cast<uint64_t>(c2s_buffer_.shm_id()) << 32) +
- c2s_buffer_.offset());
-
- c2s_buffer_.Shrink(c2s_put_offset_);
- helper_->DawnCommands(c2s_buffer_.shm_id(), c2s_buffer_.offset(),
- c2s_put_offset_);
- c2s_put_offset_ = 0;
- c2s_buffer_.Release();
+void WebGPUImplementation::FlushAllCommandSerializers() {
+ for (auto& iter : command_serializers_) {
+ iter.second->Flush();
}
-#if BUILDFLAG(USE_DAWN)
- memory_transfer_service_->FreeHandlesPendingToken(helper_->InsertToken());
-#endif
- return true;
}
-const DawnProcTable& WebGPUImplementation::GetProcs() const {
-#if !BUILDFLAG(USE_DAWN)
- NOTREACHED();
-#endif
- return procs_;
+void WebGPUImplementation::ClearAllCommandSerializers() {
+ command_serializers_.clear();
}
+#endif
void WebGPUImplementation::FlushCommands() {
- Flush();
+#if BUILDFLAG(USE_DAWN)
+ FlushAllCommandSerializers();
+#endif
helper_->Flush();
}
-WGPUDevice WebGPUImplementation::GetDefaultDevice() {
+WGPUDevice WebGPUImplementation::GetDevice(
+ DawnDeviceClientID device_client_id) {
#if BUILDFLAG(USE_DAWN)
- return wire_client_->GetDevice();
+ WebGPUCommandSerializer* command_serializer =
+ GetCommandSerializerWithDeviceClientID(device_client_id);
+ DCHECK(command_serializer);
+ return command_serializer->GetDevice();
#else
NOTREACHED();
return {};
#endif
}
-ReservedTexture WebGPUImplementation::ReserveTexture(WGPUDevice device) {
+ReservedTexture WebGPUImplementation::ReserveTexture(
+ DawnDeviceClientID device_client_id) {
#if BUILDFLAG(USE_DAWN)
- dawn_wire::ReservedTexture reservation = wire_client_->ReserveTexture(device);
- return {reservation.texture, reservation.id, reservation.generation};
+ WebGPUCommandSerializer* command_serializer =
+ GetCommandSerializerWithDeviceClientID(device_client_id);
+ DCHECK(command_serializer);
+ return command_serializer->ReserveTexture();
#else
NOTREACHED();
return {};
#endif
}
-uint32_t WebGPUImplementation::NextRequestAdapterSerial() {
+DawnRequestAdapterSerial WebGPUImplementation::NextRequestAdapterSerial() {
return ++request_adapter_serial_;
}
@@ -399,14 +559,16 @@ bool WebGPUImplementation::RequestAdapterAsync(
PowerPreference power_preference,
base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)>
request_adapter_callback) {
- uint32_t request_adapter_serial = NextRequestAdapterSerial();
-
- // Avoid the overflow of request_adapter_serial and old slot being reused.
- if (request_adapter_callback_map_.find(request_adapter_serial) !=
- request_adapter_callback_map_.end()) {
+ if (lost_) {
return false;
}
+ // Now that we declare request_adapter_serial as an uint64, it can't overflow
+ // because we just increment an uint64 by one.
+ DawnRequestAdapterSerial request_adapter_serial = NextRequestAdapterSerial();
+ DCHECK(request_adapter_callback_map_.find(request_adapter_serial) ==
+ request_adapter_callback_map_.end());
+
helper_->RequestAdapter(request_adapter_serial,
static_cast<uint32_t>(power_preference));
helper_->Flush();
@@ -417,33 +579,37 @@ bool WebGPUImplementation::RequestAdapterAsync(
return true;
}
-bool WebGPUImplementation::RequestDevice(
+DawnDeviceClientID WebGPUImplementation::NextDeviceClientID() {
+ return ++device_client_id_;
+}
+
+bool WebGPUImplementation::RequestDeviceAsync(
uint32_t requested_adapter_id,
- const WGPUDeviceProperties* requested_device_properties) {
+ const WGPUDeviceProperties& requested_device_properties,
+ base::OnceCallback<void(bool, DawnDeviceClientID)>
+ request_device_callback) {
#if BUILDFLAG(USE_DAWN)
- if (!requested_device_properties) {
- helper_->RequestDevice(requested_adapter_id, 0, 0, 0);
- return true;
+ if (lost_) {
+ return false;
}
- size_t serialized_device_properties_size =
- dawn_wire::SerializedWGPUDevicePropertiesSize(
- requested_device_properties);
- DCHECK_NE(0u, serialized_device_properties_size);
+ // Now that we declare device_client_id as an uint64, it can't overflow
+ // because we just increment an uint64 by one.
+ DawnDeviceClientID device_client_id = NextDeviceClientID();
+ DCHECK(request_device_callback_map_.find(device_client_id) ==
+ request_device_callback_map_.end());
+
+ DCHECK(command_serializers_.find(device_client_id) ==
+ command_serializers_.end());
+ command_serializers_[device_client_id] =
+ std::make_unique<WebGPUCommandSerializer>(device_client_id, helper_,
+ memory_transfer_service_.get());
+ request_device_callback_map_[device_client_id] =
+ std::move(request_device_callback);
+
+ command_serializers_[device_client_id]->RequestDeviceCreation(
+ requested_adapter_id, requested_device_properties);
- // Both transfer_buffer and c2s_buffer_ are created with transfer_buffer_,
- // so we need to make c2s_buffer_ clean before transferring
- // requested_device_properties with transfer_buffer.
- Flush();
- ScopedTransferBufferPtr transfer_buffer(serialized_device_properties_size,
- helper_, transfer_buffer_);
- dawn_wire::SerializeWGPUDeviceProperties(
- requested_device_properties,
- reinterpret_cast<char*>(transfer_buffer.address()));
- helper_->RequestDevice(requested_adapter_id, transfer_buffer.shm_id(),
- transfer_buffer.offset(),
- serialized_device_properties_size);
- transfer_buffer.Release();
return true;
#else
NOTREACHED();
@@ -451,5 +617,50 @@ bool WebGPUImplementation::RequestDevice(
#endif
}
+void WebGPUImplementation::AssociateMailbox(GLuint64 device_client_id,
+ GLuint device_generation,
+ GLuint id,
+ GLuint generation,
+ GLuint usage,
+ const GLbyte* mailbox) {
+#if BUILDFLAG(USE_DAWN)
+ // Flush previous Dawn commands as they may manipulate texture object IDs
+ // and need to be resolved prior to the AssociateMailbox command. Otherwise
+ // the service side might not know, for example that the previous texture
+ // using that ID has been released.
+ WebGPUCommandSerializer* command_serializer =
+ GetCommandSerializerWithDeviceClientID(device_client_id);
+ DCHECK(command_serializer);
+ command_serializer->Flush();
+
+ helper_->AssociateMailboxImmediate(device_client_id, device_generation, id,
+ generation, usage, mailbox);
+#endif
+}
+
+void WebGPUImplementation::DissociateMailbox(GLuint64 device_client_id,
+ GLuint texture_id,
+ GLuint texture_generation) {
+#if BUILDFLAG(USE_DAWN)
+ // Flush previous Dawn commands that might be rendering to the texture, prior
+ // to Dissociating the shared image from that texture.
+ WebGPUCommandSerializer* command_serializer =
+ GetCommandSerializerWithDeviceClientID(device_client_id);
+ DCHECK(command_serializer);
+ command_serializer->Flush();
+
+ helper_->DissociateMailbox(device_client_id, texture_id, texture_generation);
+#endif
+}
+
+void WebGPUImplementation::RemoveDevice(DawnDeviceClientID device_client_id) {
+#if BUILDFLAG(USE_DAWN)
+ auto it = command_serializers_.find(device_client_id);
+ DCHECK(it != command_serializers_.end());
+ helper_->RemoveDevice(device_client_id);
+ command_serializers_.erase(it);
+#endif
+}
+
} // namespace webgpu
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.h b/chromium/gpu/command_buffer/client/webgpu_implementation.h
index 3d703010cf6..6dfd3c5d12a 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.h
@@ -26,12 +26,47 @@ namespace webgpu {
class DawnClientMemoryTransferService;
-class WEBGPU_EXPORT WebGPUImplementation final
- : public dawn_wire::CommandSerializer,
- public WebGPUInterface,
- public ImplementationBase {
- friend class WireClientCommandSerializer;
+#if BUILDFLAG(USE_DAWN)
+class WebGPUCommandSerializer final : public dawn_wire::CommandSerializer {
+ public:
+ WebGPUCommandSerializer(
+ DawnDeviceClientID device_client_id,
+ WebGPUCmdHelper* helper,
+ DawnClientMemoryTransferService* memory_transfer_service);
+ ~WebGPUCommandSerializer() override;
+
+ // Send WGPUDeviceProperties to the server side
+ // Note that this function should only be called once for each
+ // WebGPUCommandSerializer object.
+ void RequestDeviceCreation(
+ uint32_t requested_adapter_id,
+ const WGPUDeviceProperties& requested_device_properties);
+
+ // dawn_wire::CommandSerializer implementation
+ void* GetCmdSpace(size_t size) final;
+ bool Flush() final;
+
+ // For the WebGPUInterface implementation of WebGPUImplementation
+ WGPUDevice GetDevice() const;
+ ReservedTexture ReserveTexture();
+ bool HandleCommands(const char* commands, size_t command_size);
+ private:
+ DawnDeviceClientID device_client_id_;
+ WebGPUCmdHelper* helper_;
+ DawnClientMemoryTransferService* memory_transfer_service_;
+
+ std::unique_ptr<dawn_wire::WireClient> wire_client_;
+
+ uint32_t c2s_buffer_default_size_ = 0;
+ uint32_t c2s_put_offset_ = 0;
+ std::unique_ptr<TransferBuffer> c2s_transfer_buffer_;
+ std::unique_ptr<ScopedTransferBufferPtr> c2s_buffer_;
+};
+#endif
+
+class WEBGPU_EXPORT WebGPUImplementation final : public WebGPUInterface,
+ public ImplementationBase {
public:
explicit WebGPUImplementation(WebGPUCmdHelper* helper,
TransferBufferInterface* transfer_buffer,
@@ -110,46 +145,55 @@ class WEBGPU_EXPORT WebGPUImplementation final
const gfx::PresentationFeedback& feedback) final;
void OnGpuControlReturnData(base::span<const uint8_t> data) final;
- // dawn_wire::CommandSerializer implementation
- void* GetCmdSpace(size_t size) final;
- bool Flush() final;
-
// WebGPUInterface implementation
const DawnProcTable& GetProcs() const override;
void FlushCommands() override;
- WGPUDevice GetDefaultDevice() override;
- ReservedTexture ReserveTexture(WGPUDevice device) override;
+ WGPUDevice GetDevice(DawnDeviceClientID device_client_id) override;
+ ReservedTexture ReserveTexture(DawnDeviceClientID device_client_id) override;
bool RequestAdapterAsync(
PowerPreference power_preference,
base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)>
request_adapter_callback) override;
- bool RequestDevice(
+ bool RequestDeviceAsync(
uint32_t requested_adapter_id,
- const WGPUDeviceProperties* requested_device_properties) override;
+ const WGPUDeviceProperties& requested_device_properties,
+ base::OnceCallback<void(bool, DawnDeviceClientID)>
+ request_device_callback) override;
+ void RemoveDevice(DawnDeviceClientID device_client_id) override;
private:
const char* GetLogPrefix() const { return "webgpu"; }
void CheckGLError() {}
- uint32_t NextRequestAdapterSerial();
+ DawnRequestAdapterSerial NextRequestAdapterSerial();
+ DawnDeviceClientID NextDeviceClientID();
WebGPUCmdHelper* helper_;
#if BUILDFLAG(USE_DAWN)
std::unique_ptr<DawnClientMemoryTransferService> memory_transfer_service_;
- std::unique_ptr<dawn_wire::WireClient> wire_client_;
+
+ WebGPUCommandSerializer* GetCommandSerializerWithDeviceClientID(
+ DawnDeviceClientID device_client_id) const;
+ void FlushAllCommandSerializers();
+ void ClearAllCommandSerializers();
+ base::flat_map<DawnDeviceClientID, std::unique_ptr<WebGPUCommandSerializer>>
+ command_serializers_;
#endif
DawnProcTable procs_ = {};
- uint32_t c2s_buffer_default_size_ = 0;
- uint32_t c2s_put_offset_ = 0;
- ScopedTransferBufferPtr c2s_buffer_;
-
LogSettings log_settings_;
base::flat_map<
- uint32_t,
+ DawnRequestAdapterSerial,
base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)>>
request_adapter_callback_map_;
- uint32_t request_adapter_serial_ = 0;
+ DawnRequestAdapterSerial request_adapter_serial_ = 0;
+
+ base::flat_map<DawnDeviceClientID,
+ base::OnceCallback<void(bool, DawnDeviceClientID)>>
+ request_device_callback_map_;
+ DawnDeviceClientID device_client_id_ = 0;
+
+ std::atomic_bool lost_{false};
DISALLOW_COPY_AND_ASSIGN(WebGPUImplementation);
};
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h b/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h
index 1e299de566d..8458b850917 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h
@@ -13,13 +13,15 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_AUTOGEN_H_
-void AssociateMailbox(GLuint device_id,
+void AssociateMailbox(GLuint64 device_client_id,
GLuint device_generation,
GLuint id,
GLuint generation,
GLuint usage,
const GLbyte* mailbox) override;
-void DissociateMailbox(GLuint texture_id, GLuint texture_generation) override;
+void DissociateMailbox(GLuint64 device_client_id,
+ GLuint texture_id,
+ GLuint texture_generation) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h
index c5f555eca66..06f94d450ad 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h
@@ -13,31 +13,4 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_IMPL_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_IMPL_AUTOGEN_H_
-void WebGPUImplementation::AssociateMailbox(GLuint device_id,
- GLuint device_generation,
- GLuint id,
- GLuint generation,
- GLuint usage,
- const GLbyte* mailbox) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] wgAssociateMailbox(" << device_id
- << ", " << device_generation << ", " << id << ", "
- << generation << ", " << usage << ", "
- << static_cast<const void*>(mailbox) << ")");
- uint32_t count = 16;
- for (uint32_t ii = 0; ii < count; ++ii)
- GPU_CLIENT_LOG("value[" << ii << "]: " << mailbox[ii]);
- helper_->AssociateMailboxImmediate(device_id, device_generation, id,
- generation, usage, mailbox);
- CheckGLError();
-}
-
-void WebGPUImplementation::DissociateMailbox(GLuint texture_id,
- GLuint texture_generation) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] wgDissociateMailbox(" << texture_id
- << ", " << texture_generation << ")");
- helper_->DissociateMailbox(texture_id, texture_generation);
-}
-
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc
index f3b37d43254..f0ba7951d90 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest.cc
@@ -93,7 +93,7 @@ class WebGPUImplementationTest : public testing::Test {
void SetUp() override { ASSERT_TRUE(Initialize()); }
void TearDown() override {
- gl_->Flush();
+ gl_->FlushCommands();
Mock::VerifyAndClear(gl_.get());
EXPECT_CALL(*command_buffer_, OnFlush()).Times(AnyNumber());
// For command buffer.
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h
index 66a91a31401..12aa0d1f975 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h
@@ -13,30 +13,4 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
-TEST_F(WebGPUImplementationTest, AssociateMailbox) {
- GLbyte data[16] = {0};
- struct Cmds {
- cmds::AssociateMailboxImmediate cmd;
- GLbyte data[16];
- };
-
- for (int jj = 0; jj < 16; ++jj) {
- data[jj] = static_cast<GLbyte>(jj);
- }
- Cmds expected;
- expected.cmd.Init(1, 2, 3, 4, 5, &data[0]);
- gl_->AssociateMailbox(1, 2, 3, 4, 5, &data[0]);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
-
-TEST_F(WebGPUImplementationTest, DissociateMailbox) {
- struct Cmds {
- cmds::DissociateMailbox cmd;
- };
- Cmds expected;
- expected.cmd.Init(1, 2);
-
- gl_->DissociateMailbox(1, 2);
- EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
-}
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface.h b/chromium/gpu/command_buffer/client/webgpu_interface.h
index 28f519527da..f68cbe25bfa 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface.h
@@ -11,6 +11,7 @@
#include "base/callback.h"
#include "gpu/command_buffer/client/interface_base.h"
#include "gpu/command_buffer/common/webgpu_cmd_enums.h"
+#include "gpu/command_buffer/common/webgpu_cmd_ids.h"
namespace gpu {
namespace webgpu {
@@ -28,15 +29,19 @@ class WebGPUInterface : public InterfaceBase {
virtual const DawnProcTable& GetProcs() const = 0;
virtual void FlushCommands() = 0;
- virtual WGPUDevice GetDefaultDevice() = 0;
- virtual ReservedTexture ReserveTexture(WGPUDevice device) = 0;
+ virtual WGPUDevice GetDevice(DawnDeviceClientID device_client_id) = 0;
+ virtual ReservedTexture ReserveTexture(
+ DawnDeviceClientID device_client_id) = 0;
virtual bool RequestAdapterAsync(
PowerPreference power_preference,
base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)>
request_adapter_callback) = 0;
- virtual bool RequestDevice(
+ virtual bool RequestDeviceAsync(
uint32_t adapter_service_id,
- const WGPUDeviceProperties* requested_device_properties) = 0;
+ const WGPUDeviceProperties& requested_device_properties,
+ base::OnceCallback<void(bool, DawnDeviceClientID)>
+ request_device_callback) = 0;
+ virtual void RemoveDevice(DawnDeviceClientID device_client_id) = 0;
// Include the auto-generated part of this class. We split this because
// it means we can easily edit the non-auto generated parts right here in
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h b/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h
index 92784474d30..10af23f0a48 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h
@@ -13,12 +13,13 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_AUTOGEN_H_
-virtual void AssociateMailbox(GLuint device_id,
+virtual void AssociateMailbox(GLuint64 device_client_id,
GLuint device_generation,
GLuint id,
GLuint generation,
GLuint usage,
const GLbyte* mailbox) = 0;
-virtual void DissociateMailbox(GLuint texture_id,
+virtual void DissociateMailbox(GLuint64 device_client_id,
+ GLuint texture_id,
GLuint texture_generation) = 0;
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
index f2ce5c9da9f..e38d43134fd 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
@@ -23,10 +23,11 @@ const DawnProcTable& WebGPUInterfaceStub::GetProcs() const {
return null_procs_;
}
void WebGPUInterfaceStub::FlushCommands() {}
-WGPUDevice WebGPUInterfaceStub::GetDefaultDevice() {
+WGPUDevice WebGPUInterfaceStub::GetDevice(DawnDeviceClientID device_client_id) {
return nullptr;
}
-ReservedTexture WebGPUInterfaceStub::ReserveTexture(WGPUDevice device) {
+ReservedTexture WebGPUInterfaceStub::ReserveTexture(
+ DawnDeviceClientID device_client_id) {
return {nullptr, 0, 0};
}
bool WebGPUInterfaceStub::RequestAdapterAsync(
@@ -35,11 +36,14 @@ bool WebGPUInterfaceStub::RequestAdapterAsync(
request_adapter_callback) {
return false;
}
-bool WebGPUInterfaceStub::RequestDevice(
+bool WebGPUInterfaceStub::RequestDeviceAsync(
uint32_t adapter_service_id,
- const WGPUDeviceProperties* requested_device_properties) {
+ const WGPUDeviceProperties& requested_device_properties,
+ base::OnceCallback<void(bool, DawnDeviceClientID)>
+ request_device_callback) {
return false;
}
+void WebGPUInterfaceStub::RemoveDevice(DawnDeviceClientID device_client_id) {}
// Include the auto-generated part of this class. We split this because
// it means we can easily edit the non-auto generated parts right here in
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
index efeae734f76..c03b35328c5 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
@@ -25,15 +25,18 @@ class WebGPUInterfaceStub : public WebGPUInterface {
// WebGPUInterface implementation
const DawnProcTable& GetProcs() const override;
void FlushCommands() override;
- WGPUDevice GetDefaultDevice() override;
- ReservedTexture ReserveTexture(WGPUDevice device) override;
+ WGPUDevice GetDevice(DawnDeviceClientID device_client_id) override;
+ ReservedTexture ReserveTexture(DawnDeviceClientID device_client_id) override;
bool RequestAdapterAsync(
PowerPreference power_preference,
base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)>
request_adapter_callback) override;
- bool RequestDevice(
+ bool RequestDeviceAsync(
uint32_t adapter_service_id,
- const WGPUDeviceProperties* requested_device_properties) override;
+ const WGPUDeviceProperties& requested_device_properties,
+ base::OnceCallback<void(bool, DawnDeviceClientID)>
+ request_device_callback) override;
+ void RemoveDevice(DawnDeviceClientID device_client_id) override;
// Include the auto-generated part of this class. We split this because
// it means we can easily edit the non-auto generated parts right here in
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h
index 6d23a6873e3..18a083a2dcf 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h
@@ -12,11 +12,13 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_AUTOGEN_H_
-void AssociateMailbox(GLuint device_id,
+void AssociateMailbox(GLuint64 device_client_id,
GLuint device_generation,
GLuint id,
GLuint generation,
GLuint usage,
const GLbyte* mailbox) override;
-void DissociateMailbox(GLuint texture_id, GLuint texture_generation) override;
+void DissociateMailbox(GLuint64 device_client_id,
+ GLuint texture_id,
+ GLuint texture_generation) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h
index 0d89b6896a4..cf164130484 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h
@@ -12,12 +12,13 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_IMPL_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_IMPL_AUTOGEN_H_
-void WebGPUInterfaceStub::AssociateMailbox(GLuint /* device_id */,
+void WebGPUInterfaceStub::AssociateMailbox(GLuint64 /* device_client_id */,
GLuint /* device_generation */,
GLuint /* id */,
GLuint /* generation */,
GLuint /* usage */,
const GLbyte* /* mailbox */) {}
-void WebGPUInterfaceStub::DissociateMailbox(GLuint /* texture_id */,
+void WebGPUInterfaceStub::DissociateMailbox(GLuint64 /* device_client_id */,
+ GLuint /* texture_id */,
GLuint /* texture_generation */) {}
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/BUILD.gn b/chromium/gpu/command_buffer/common/BUILD.gn
index 8ebd59e38f2..b1d05d401ec 100644
--- a/chromium/gpu/command_buffer/common/BUILD.gn
+++ b/chromium/gpu/command_buffer/common/BUILD.gn
@@ -12,44 +12,30 @@ import("//ui/gl/features.gni")
group("common") {
if (is_component_build) {
- public_deps = [
- "//gpu",
- ]
+ public_deps = [ "//gpu" ]
} else {
- public_deps = [
- ":common_sources",
- ]
+ public_deps = [ ":common_sources" ]
}
}
group("gles2") {
if (is_component_build) {
- public_deps = [
- "//gpu:gles2",
- ]
+ public_deps = [ "//gpu:gles2" ]
} else {
- public_deps = [
- ":gles2_sources",
- ]
+ public_deps = [ ":gles2_sources" ]
}
}
group("raster") {
if (is_component_build) {
- public_deps = [
- "//gpu:gles2",
- ]
+ public_deps = [ "//gpu:gles2" ]
} else {
- public_deps = [
- ":raster_sources",
- ]
+ public_deps = [ ":raster_sources" ]
}
}
group("webgpu") {
- public_deps = [
- ":webgpu_sources",
- ]
+ public_deps = [ ":webgpu_sources" ]
}
jumbo_source_set("common_sources") {
@@ -112,9 +98,7 @@ jumbo_source_set("common_sources") {
"//ui/gfx/geometry",
]
- deps = [
- "//base",
- ]
+ deps = [ "//base" ]
# TODO(piman): needed for gpu_memory_buffer_support.cc. Split common vs gles2
# specifics.
@@ -141,9 +125,7 @@ source_set("gles2_sources") {
":gles2_utils",
"//base",
]
- public_deps = [
- ":common",
- ]
+ public_deps = [ ":common" ]
}
source_set("raster_sources") {
@@ -206,6 +188,8 @@ source_set("webgpu_sources") {
component("gles2_utils") {
sources = [
+ "gles2_cmd_copy_texture_chromium_utils.cc",
+ "gles2_cmd_copy_texture_chromium_utils.h",
"gles2_cmd_utils.cc",
"gles2_cmd_utils.h",
"gles2_utils_export.h",
@@ -213,9 +197,7 @@ component("gles2_utils") {
defines = [ "GLES2_UTILS_IMPLEMENTATION" ]
- deps = [
- "//base",
- ]
+ deps = [ "//base" ]
all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
}
diff --git a/chromium/gpu/command_buffer/common/capabilities.h b/chromium/gpu/command_buffer/common/capabilities.h
index f7947cc8c10..7fe983e2272 100644
--- a/chromium/gpu/command_buffer/common/capabilities.h
+++ b/chromium/gpu/command_buffer/common/capabilities.h
@@ -11,6 +11,7 @@
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/gpu_export.h"
#include "ui/gfx/buffer_types.h"
+#include "ui/gfx/surface_origin.h"
// From gl2.h. We want to avoid including gl headers because client-side and
// service-side headers conflict.
@@ -132,6 +133,7 @@ struct GPU_EXPORT Capabilities {
bool swap_buffers_with_bounds = false;
bool commit_overlay_planes = false;
bool egl_image_external = false;
+ bool egl_image_external_essl3 = false;
bool texture_format_astc = false;
bool texture_format_atc = false;
bool texture_format_bgra8888 = false;
@@ -154,15 +156,15 @@ struct GPU_EXPORT Capabilities {
bool image_ycbcr_422 = false;
bool image_ycbcr_420v = false;
bool image_ycbcr_420v_disabled_for_video_frames = false;
- bool image_xr30 = false;
- bool image_xb30 = false;
+ bool image_ar30 = false;
+ bool image_ab30 = false;
bool image_ycbcr_p010 = false;
bool render_buffer_format_bgra8888 = false;
bool occlusion_query = false;
bool occlusion_query_boolean = false;
bool timer_queries = false;
bool surfaceless = false;
- bool flips_vertically = false;
+ gfx::SurfaceOrigin surface_origin = gfx::SurfaceOrigin::kBottomLeft;
bool msaa_is_slow = false;
bool disable_one_component_textures = false;
bool gpu_rasterization = false;
@@ -170,7 +172,6 @@ struct GPU_EXPORT Capabilities {
bool multisample_compatibility = false;
// True if DirectComposition layers are enabled.
bool dc_layers = false;
- bool use_dc_overlays_for_video = false;
bool protected_video_swap_chain = false;
bool gpu_vsync = false;
bool shared_image_swap_chain = false;
@@ -182,9 +183,6 @@ struct GPU_EXPORT Capabilities {
// details.
bool chromium_image_rgb_emulation = false;
- // When true, non-empty post sub buffer calls are unsupported.
- bool disable_non_empty_post_sub_buffers = false;
-
bool disable_2d_canvas_copy_on_write = false;
bool texture_npot = false;
@@ -210,7 +208,6 @@ struct GPU_EXPORT Capabilities {
// Used by OOP raster.
bool context_supports_distance_field_text = true;
- uint64_t glyph_cache_max_texture_bytes = 0.f;
GpuMemoryBufferFormatSet gpu_memory_buffer_formats = {
gfx::BufferFormat::BGR_565, gfx::BufferFormat::RGBA_4444,
diff --git a/chromium/gpu/command_buffer/common/context_creation_attribs.h b/chromium/gpu/command_buffer/common/context_creation_attribs.h
index a6516327472..aed80cc9ddc 100644
--- a/chromium/gpu/command_buffer/common/context_creation_attribs.h
+++ b/chromium/gpu/command_buffer/common/context_creation_attribs.h
@@ -62,6 +62,7 @@ struct GPU_EXPORT ContextCreationAttribs {
bool own_offscreen_surface = false;
bool single_buffer = false;
bool enable_gles2_interface = true;
+ bool enable_grcontext = false;
bool enable_raster_interface = false;
bool enable_oop_rasterization = false;
bool enable_swap_timestamps_if_supported = false;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.cc
new file mode 100644
index 00000000000..3e5cfd937a5
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.h"
+
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+
+namespace gpu {
+namespace gles2 {
+
+bool CopyTextureCHROMIUMNeedsESSL3(uint32_t dest_format) {
+ return gpu::gles2::GLES2Util::IsIntegerFormat(dest_format);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.h b/chromium/gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.h
new file mode 100644
index 00000000000..753542bf62c
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_COPY_TEXTURE_CHROMIUM_UTILS_H_
+#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_COPY_TEXTURE_CHROMIUM_UTILS_H_
+
+#include <stdint.h>
+
+#include "gpu/command_buffer/common/gles2_utils_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+bool GLES2_UTILS_EXPORT CopyTextureCHROMIUMNeedsESSL3(uint32_t dest_format);
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_COPY_TEXTURE_CHROMIUM_UTILS_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format.h b/chromium/gpu/command_buffer/common/gles2_cmd_format.h
index e5a18782bb1..6bcbe51c0f3 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format.h
@@ -55,7 +55,7 @@ enum class IdNamespaces {
kNumIdNamespaces
};
-enum RangeIdNamespaces { kPaths, kNumRangeIdNamespaces };
+enum RangeIdNamespaces { kNumRangeIdNamespaces = 1 };
// These numbers must not change
static_assert(static_cast<int>(SharedIdNamespaces::kBuffers) == 0,
@@ -80,7 +80,8 @@ static_assert(static_cast<int>(IdNamespaces::kTransformFeedbacks) == 3,
"kTransformFeedbacks should equal 3");
static_assert(static_cast<int>(IdNamespaces::kGpuFences) == 4,
"kGpuFences should equal 4");
-static_assert(kPaths == 0, "kPaths should equal 0");
+static_assert(kNumRangeIdNamespaces == 1,
+ "kNumRangeIdNamespaces should equal 1");
} // namespace id_namespaces
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index 3635c8ae565..05a48970424 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -13483,40 +13483,48 @@ struct ResizeCHROMIUM {
void SetHeader() { header.SetCmd<ValueType>(); }
- void Init(GLuint _width,
- GLuint _height,
+ void Init(GLint _width,
+ GLint _height,
GLfloat _scale_factor,
- GLenum _color_space,
- GLboolean _alpha) {
+ GLboolean _alpha,
+ GLuint _shm_id,
+ GLuint _shm_offset,
+ GLsizei _color_space_size) {
SetHeader();
width = _width;
height = _height;
scale_factor = _scale_factor;
- color_space = _color_space;
alpha = _alpha;
+ shm_id = _shm_id;
+ shm_offset = _shm_offset;
+ color_space_size = _color_space_size;
}
void* Set(void* cmd,
- GLuint _width,
- GLuint _height,
+ GLint _width,
+ GLint _height,
GLfloat _scale_factor,
- GLenum _color_space,
- GLboolean _alpha) {
- static_cast<ValueType*>(cmd)->Init(_width, _height, _scale_factor,
- _color_space, _alpha);
+ GLboolean _alpha,
+ GLuint _shm_id,
+ GLuint _shm_offset,
+ GLsizei _color_space_size) {
+ static_cast<ValueType*>(cmd)->Init(_width, _height, _scale_factor, _alpha,
+ _shm_id, _shm_offset, _color_space_size);
return NextCmdAddress<ValueType>(cmd);
}
gpu::CommandHeader header;
- uint32_t width;
- uint32_t height;
+ int32_t width;
+ int32_t height;
float scale_factor;
- uint32_t color_space;
uint32_t alpha;
+ uint32_t shm_id;
+ uint32_t shm_offset;
+ int32_t color_space_size;
};
-static_assert(sizeof(ResizeCHROMIUM) == 24,
- "size of ResizeCHROMIUM should be 24");
+static_assert(sizeof(ResizeCHROMIUM) == 32,
+ "size of ResizeCHROMIUM should be 32");
static_assert(offsetof(ResizeCHROMIUM, header) == 0,
"offset of ResizeCHROMIUM header should be 0");
static_assert(offsetof(ResizeCHROMIUM, width) == 4,
@@ -13525,10 +13533,14 @@ static_assert(offsetof(ResizeCHROMIUM, height) == 8,
"offset of ResizeCHROMIUM height should be 8");
static_assert(offsetof(ResizeCHROMIUM, scale_factor) == 12,
"offset of ResizeCHROMIUM scale_factor should be 12");
-static_assert(offsetof(ResizeCHROMIUM, color_space) == 16,
- "offset of ResizeCHROMIUM color_space should be 16");
-static_assert(offsetof(ResizeCHROMIUM, alpha) == 20,
- "offset of ResizeCHROMIUM alpha should be 20");
+static_assert(offsetof(ResizeCHROMIUM, alpha) == 16,
+ "offset of ResizeCHROMIUM alpha should be 16");
+static_assert(offsetof(ResizeCHROMIUM, shm_id) == 20,
+ "offset of ResizeCHROMIUM shm_id should be 20");
+static_assert(offsetof(ResizeCHROMIUM, shm_offset) == 24,
+ "offset of ResizeCHROMIUM shm_offset should be 24");
+static_assert(offsetof(ResizeCHROMIUM, color_space_size) == 28,
+ "offset of ResizeCHROMIUM color_space_size should be 28");
struct GetRequestableExtensionsCHROMIUM {
typedef GetRequestableExtensionsCHROMIUM ValueType;
@@ -15525,1419 +15537,6 @@ static_assert(offsetof(SetActiveURLCHROMIUM, header) == 0,
static_assert(offsetof(SetActiveURLCHROMIUM, url_bucket_id) == 4,
"offset of SetActiveURLCHROMIUM url_bucket_id should be 4");
-struct MatrixLoadfCHROMIUMImmediate {
- typedef MatrixLoadfCHROMIUMImmediate ValueType;
- static const CommandId kCmdId = kMatrixLoadfCHROMIUMImmediate;
- static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeDataSize() {
- return static_cast<uint32_t>(sizeof(GLfloat) * 16);
- }
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType) + ComputeDataSize());
- }
-
- void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
-
- void Init(GLenum _matrixMode, const GLfloat* _m) {
- SetHeader();
- matrixMode = _matrixMode;
- memcpy(ImmediateDataAddress(this), _m, ComputeDataSize());
- }
-
- void* Set(void* cmd, GLenum _matrixMode, const GLfloat* _m) {
- static_cast<ValueType*>(cmd)->Init(_matrixMode, _m);
- const uint32_t size = ComputeSize();
- return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
- }
-
- gpu::CommandHeader header;
- uint32_t matrixMode;
-};
-
-static_assert(sizeof(MatrixLoadfCHROMIUMImmediate) == 8,
- "size of MatrixLoadfCHROMIUMImmediate should be 8");
-static_assert(offsetof(MatrixLoadfCHROMIUMImmediate, header) == 0,
- "offset of MatrixLoadfCHROMIUMImmediate header should be 0");
-static_assert(offsetof(MatrixLoadfCHROMIUMImmediate, matrixMode) == 4,
- "offset of MatrixLoadfCHROMIUMImmediate matrixMode should be 4");
-
-struct MatrixLoadIdentityCHROMIUM {
- typedef MatrixLoadIdentityCHROMIUM ValueType;
- static const CommandId kCmdId = kMatrixLoadIdentityCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLenum _matrixMode) {
- SetHeader();
- matrixMode = _matrixMode;
- }
-
- void* Set(void* cmd, GLenum _matrixMode) {
- static_cast<ValueType*>(cmd)->Init(_matrixMode);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t matrixMode;
-};
-
-static_assert(sizeof(MatrixLoadIdentityCHROMIUM) == 8,
- "size of MatrixLoadIdentityCHROMIUM should be 8");
-static_assert(offsetof(MatrixLoadIdentityCHROMIUM, header) == 0,
- "offset of MatrixLoadIdentityCHROMIUM header should be 0");
-static_assert(offsetof(MatrixLoadIdentityCHROMIUM, matrixMode) == 4,
- "offset of MatrixLoadIdentityCHROMIUM matrixMode should be 4");
-
-struct GenPathsCHROMIUM {
- typedef GenPathsCHROMIUM ValueType;
- static const CommandId kCmdId = kGenPathsCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _first_client_id, GLsizei _range) {
- SetHeader();
- first_client_id = _first_client_id;
- range = _range;
- }
-
- void* Set(void* cmd, GLuint _first_client_id, GLsizei _range) {
- static_cast<ValueType*>(cmd)->Init(_first_client_id, _range);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t first_client_id;
- int32_t range;
-};
-
-static_assert(sizeof(GenPathsCHROMIUM) == 12,
- "size of GenPathsCHROMIUM should be 12");
-static_assert(offsetof(GenPathsCHROMIUM, header) == 0,
- "offset of GenPathsCHROMIUM header should be 0");
-static_assert(offsetof(GenPathsCHROMIUM, first_client_id) == 4,
- "offset of GenPathsCHROMIUM first_client_id should be 4");
-static_assert(offsetof(GenPathsCHROMIUM, range) == 8,
- "offset of GenPathsCHROMIUM range should be 8");
-
-struct DeletePathsCHROMIUM {
- typedef DeletePathsCHROMIUM ValueType;
- static const CommandId kCmdId = kDeletePathsCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _first_client_id, GLsizei _range) {
- SetHeader();
- first_client_id = _first_client_id;
- range = _range;
- }
-
- void* Set(void* cmd, GLuint _first_client_id, GLsizei _range) {
- static_cast<ValueType*>(cmd)->Init(_first_client_id, _range);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t first_client_id;
- int32_t range;
-};
-
-static_assert(sizeof(DeletePathsCHROMIUM) == 12,
- "size of DeletePathsCHROMIUM should be 12");
-static_assert(offsetof(DeletePathsCHROMIUM, header) == 0,
- "offset of DeletePathsCHROMIUM header should be 0");
-static_assert(offsetof(DeletePathsCHROMIUM, first_client_id) == 4,
- "offset of DeletePathsCHROMIUM first_client_id should be 4");
-static_assert(offsetof(DeletePathsCHROMIUM, range) == 8,
- "offset of DeletePathsCHROMIUM range should be 8");
-
-struct IsPathCHROMIUM {
- typedef IsPathCHROMIUM ValueType;
- static const CommandId kCmdId = kIsPathCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- typedef uint32_t Result;
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path,
- uint32_t _result_shm_id,
- uint32_t _result_shm_offset) {
- SetHeader();
- path = _path;
- result_shm_id = _result_shm_id;
- result_shm_offset = _result_shm_offset;
- }
-
- void* Set(void* cmd,
- GLuint _path,
- uint32_t _result_shm_id,
- uint32_t _result_shm_offset) {
- static_cast<ValueType*>(cmd)->Init(_path, _result_shm_id,
- _result_shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- uint32_t result_shm_id;
- uint32_t result_shm_offset;
-};
-
-static_assert(sizeof(IsPathCHROMIUM) == 16,
- "size of IsPathCHROMIUM should be 16");
-static_assert(offsetof(IsPathCHROMIUM, header) == 0,
- "offset of IsPathCHROMIUM header should be 0");
-static_assert(offsetof(IsPathCHROMIUM, path) == 4,
- "offset of IsPathCHROMIUM path should be 4");
-static_assert(offsetof(IsPathCHROMIUM, result_shm_id) == 8,
- "offset of IsPathCHROMIUM result_shm_id should be 8");
-static_assert(offsetof(IsPathCHROMIUM, result_shm_offset) == 12,
- "offset of IsPathCHROMIUM result_shm_offset should be 12");
-
-struct PathCommandsCHROMIUM {
- typedef PathCommandsCHROMIUM ValueType;
- static const CommandId kCmdId = kPathCommandsCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path,
- GLsizei _numCommands,
- uint32_t _commands_shm_id,
- uint32_t _commands_shm_offset,
- GLsizei _numCoords,
- GLenum _coordType,
- uint32_t _coords_shm_id,
- uint32_t _coords_shm_offset) {
- SetHeader();
- path = _path;
- numCommands = _numCommands;
- commands_shm_id = _commands_shm_id;
- commands_shm_offset = _commands_shm_offset;
- numCoords = _numCoords;
- coordType = _coordType;
- coords_shm_id = _coords_shm_id;
- coords_shm_offset = _coords_shm_offset;
- }
-
- void* Set(void* cmd,
- GLuint _path,
- GLsizei _numCommands,
- uint32_t _commands_shm_id,
- uint32_t _commands_shm_offset,
- GLsizei _numCoords,
- GLenum _coordType,
- uint32_t _coords_shm_id,
- uint32_t _coords_shm_offset) {
- static_cast<ValueType*>(cmd)->Init(
- _path, _numCommands, _commands_shm_id, _commands_shm_offset, _numCoords,
- _coordType, _coords_shm_id, _coords_shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- int32_t numCommands;
- uint32_t commands_shm_id;
- uint32_t commands_shm_offset;
- int32_t numCoords;
- uint32_t coordType;
- uint32_t coords_shm_id;
- uint32_t coords_shm_offset;
-};
-
-static_assert(sizeof(PathCommandsCHROMIUM) == 36,
- "size of PathCommandsCHROMIUM should be 36");
-static_assert(offsetof(PathCommandsCHROMIUM, header) == 0,
- "offset of PathCommandsCHROMIUM header should be 0");
-static_assert(offsetof(PathCommandsCHROMIUM, path) == 4,
- "offset of PathCommandsCHROMIUM path should be 4");
-static_assert(offsetof(PathCommandsCHROMIUM, numCommands) == 8,
- "offset of PathCommandsCHROMIUM numCommands should be 8");
-static_assert(offsetof(PathCommandsCHROMIUM, commands_shm_id) == 12,
- "offset of PathCommandsCHROMIUM commands_shm_id should be 12");
-static_assert(
- offsetof(PathCommandsCHROMIUM, commands_shm_offset) == 16,
- "offset of PathCommandsCHROMIUM commands_shm_offset should be 16");
-static_assert(offsetof(PathCommandsCHROMIUM, numCoords) == 20,
- "offset of PathCommandsCHROMIUM numCoords should be 20");
-static_assert(offsetof(PathCommandsCHROMIUM, coordType) == 24,
- "offset of PathCommandsCHROMIUM coordType should be 24");
-static_assert(offsetof(PathCommandsCHROMIUM, coords_shm_id) == 28,
- "offset of PathCommandsCHROMIUM coords_shm_id should be 28");
-static_assert(offsetof(PathCommandsCHROMIUM, coords_shm_offset) == 32,
- "offset of PathCommandsCHROMIUM coords_shm_offset should be 32");
-
-struct PathParameterfCHROMIUM {
- typedef PathParameterfCHROMIUM ValueType;
- static const CommandId kCmdId = kPathParameterfCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path, GLenum _pname, GLfloat _value) {
- SetHeader();
- path = _path;
- pname = _pname;
- value = _value;
- }
-
- void* Set(void* cmd, GLuint _path, GLenum _pname, GLfloat _value) {
- static_cast<ValueType*>(cmd)->Init(_path, _pname, _value);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- uint32_t pname;
- float value;
-};
-
-static_assert(sizeof(PathParameterfCHROMIUM) == 16,
- "size of PathParameterfCHROMIUM should be 16");
-static_assert(offsetof(PathParameterfCHROMIUM, header) == 0,
- "offset of PathParameterfCHROMIUM header should be 0");
-static_assert(offsetof(PathParameterfCHROMIUM, path) == 4,
- "offset of PathParameterfCHROMIUM path should be 4");
-static_assert(offsetof(PathParameterfCHROMIUM, pname) == 8,
- "offset of PathParameterfCHROMIUM pname should be 8");
-static_assert(offsetof(PathParameterfCHROMIUM, value) == 12,
- "offset of PathParameterfCHROMIUM value should be 12");
-
-struct PathParameteriCHROMIUM {
- typedef PathParameteriCHROMIUM ValueType;
- static const CommandId kCmdId = kPathParameteriCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path, GLenum _pname, GLint _value) {
- SetHeader();
- path = _path;
- pname = _pname;
- value = _value;
- }
-
- void* Set(void* cmd, GLuint _path, GLenum _pname, GLint _value) {
- static_cast<ValueType*>(cmd)->Init(_path, _pname, _value);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- uint32_t pname;
- int32_t value;
-};
-
-static_assert(sizeof(PathParameteriCHROMIUM) == 16,
- "size of PathParameteriCHROMIUM should be 16");
-static_assert(offsetof(PathParameteriCHROMIUM, header) == 0,
- "offset of PathParameteriCHROMIUM header should be 0");
-static_assert(offsetof(PathParameteriCHROMIUM, path) == 4,
- "offset of PathParameteriCHROMIUM path should be 4");
-static_assert(offsetof(PathParameteriCHROMIUM, pname) == 8,
- "offset of PathParameteriCHROMIUM pname should be 8");
-static_assert(offsetof(PathParameteriCHROMIUM, value) == 12,
- "offset of PathParameteriCHROMIUM value should be 12");
-
-struct PathStencilFuncCHROMIUM {
- typedef PathStencilFuncCHROMIUM ValueType;
- static const CommandId kCmdId = kPathStencilFuncCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLenum _func, GLint _ref, GLuint _mask) {
- SetHeader();
- func = _func;
- ref = _ref;
- mask = _mask;
- }
-
- void* Set(void* cmd, GLenum _func, GLint _ref, GLuint _mask) {
- static_cast<ValueType*>(cmd)->Init(_func, _ref, _mask);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t func;
- int32_t ref;
- uint32_t mask;
-};
-
-static_assert(sizeof(PathStencilFuncCHROMIUM) == 16,
- "size of PathStencilFuncCHROMIUM should be 16");
-static_assert(offsetof(PathStencilFuncCHROMIUM, header) == 0,
- "offset of PathStencilFuncCHROMIUM header should be 0");
-static_assert(offsetof(PathStencilFuncCHROMIUM, func) == 4,
- "offset of PathStencilFuncCHROMIUM func should be 4");
-static_assert(offsetof(PathStencilFuncCHROMIUM, ref) == 8,
- "offset of PathStencilFuncCHROMIUM ref should be 8");
-static_assert(offsetof(PathStencilFuncCHROMIUM, mask) == 12,
- "offset of PathStencilFuncCHROMIUM mask should be 12");
-
-struct StencilFillPathCHROMIUM {
- typedef StencilFillPathCHROMIUM ValueType;
- static const CommandId kCmdId = kStencilFillPathCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path, GLenum _fillMode, GLuint _mask) {
- SetHeader();
- path = _path;
- fillMode = _fillMode;
- mask = _mask;
- }
-
- void* Set(void* cmd, GLuint _path, GLenum _fillMode, GLuint _mask) {
- static_cast<ValueType*>(cmd)->Init(_path, _fillMode, _mask);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- uint32_t fillMode;
- uint32_t mask;
-};
-
-static_assert(sizeof(StencilFillPathCHROMIUM) == 16,
- "size of StencilFillPathCHROMIUM should be 16");
-static_assert(offsetof(StencilFillPathCHROMIUM, header) == 0,
- "offset of StencilFillPathCHROMIUM header should be 0");
-static_assert(offsetof(StencilFillPathCHROMIUM, path) == 4,
- "offset of StencilFillPathCHROMIUM path should be 4");
-static_assert(offsetof(StencilFillPathCHROMIUM, fillMode) == 8,
- "offset of StencilFillPathCHROMIUM fillMode should be 8");
-static_assert(offsetof(StencilFillPathCHROMIUM, mask) == 12,
- "offset of StencilFillPathCHROMIUM mask should be 12");
-
-struct StencilStrokePathCHROMIUM {
- typedef StencilStrokePathCHROMIUM ValueType;
- static const CommandId kCmdId = kStencilStrokePathCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path, GLint _reference, GLuint _mask) {
- SetHeader();
- path = _path;
- reference = _reference;
- mask = _mask;
- }
-
- void* Set(void* cmd, GLuint _path, GLint _reference, GLuint _mask) {
- static_cast<ValueType*>(cmd)->Init(_path, _reference, _mask);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- int32_t reference;
- uint32_t mask;
-};
-
-static_assert(sizeof(StencilStrokePathCHROMIUM) == 16,
- "size of StencilStrokePathCHROMIUM should be 16");
-static_assert(offsetof(StencilStrokePathCHROMIUM, header) == 0,
- "offset of StencilStrokePathCHROMIUM header should be 0");
-static_assert(offsetof(StencilStrokePathCHROMIUM, path) == 4,
- "offset of StencilStrokePathCHROMIUM path should be 4");
-static_assert(offsetof(StencilStrokePathCHROMIUM, reference) == 8,
- "offset of StencilStrokePathCHROMIUM reference should be 8");
-static_assert(offsetof(StencilStrokePathCHROMIUM, mask) == 12,
- "offset of StencilStrokePathCHROMIUM mask should be 12");
-
-struct CoverFillPathCHROMIUM {
- typedef CoverFillPathCHROMIUM ValueType;
- static const CommandId kCmdId = kCoverFillPathCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path, GLenum _coverMode) {
- SetHeader();
- path = _path;
- coverMode = _coverMode;
- }
-
- void* Set(void* cmd, GLuint _path, GLenum _coverMode) {
- static_cast<ValueType*>(cmd)->Init(_path, _coverMode);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- uint32_t coverMode;
-};
-
-static_assert(sizeof(CoverFillPathCHROMIUM) == 12,
- "size of CoverFillPathCHROMIUM should be 12");
-static_assert(offsetof(CoverFillPathCHROMIUM, header) == 0,
- "offset of CoverFillPathCHROMIUM header should be 0");
-static_assert(offsetof(CoverFillPathCHROMIUM, path) == 4,
- "offset of CoverFillPathCHROMIUM path should be 4");
-static_assert(offsetof(CoverFillPathCHROMIUM, coverMode) == 8,
- "offset of CoverFillPathCHROMIUM coverMode should be 8");
-
-struct CoverStrokePathCHROMIUM {
- typedef CoverStrokePathCHROMIUM ValueType;
- static const CommandId kCmdId = kCoverStrokePathCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path, GLenum _coverMode) {
- SetHeader();
- path = _path;
- coverMode = _coverMode;
- }
-
- void* Set(void* cmd, GLuint _path, GLenum _coverMode) {
- static_cast<ValueType*>(cmd)->Init(_path, _coverMode);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- uint32_t coverMode;
-};
-
-static_assert(sizeof(CoverStrokePathCHROMIUM) == 12,
- "size of CoverStrokePathCHROMIUM should be 12");
-static_assert(offsetof(CoverStrokePathCHROMIUM, header) == 0,
- "offset of CoverStrokePathCHROMIUM header should be 0");
-static_assert(offsetof(CoverStrokePathCHROMIUM, path) == 4,
- "offset of CoverStrokePathCHROMIUM path should be 4");
-static_assert(offsetof(CoverStrokePathCHROMIUM, coverMode) == 8,
- "offset of CoverStrokePathCHROMIUM coverMode should be 8");
-
-struct StencilThenCoverFillPathCHROMIUM {
- typedef StencilThenCoverFillPathCHROMIUM ValueType;
- static const CommandId kCmdId = kStencilThenCoverFillPathCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path, GLenum _fillMode, GLuint _mask, GLenum _coverMode) {
- SetHeader();
- path = _path;
- fillMode = _fillMode;
- mask = _mask;
- coverMode = _coverMode;
- }
-
- void* Set(void* cmd,
- GLuint _path,
- GLenum _fillMode,
- GLuint _mask,
- GLenum _coverMode) {
- static_cast<ValueType*>(cmd)->Init(_path, _fillMode, _mask, _coverMode);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- uint32_t fillMode;
- uint32_t mask;
- uint32_t coverMode;
-};
-
-static_assert(sizeof(StencilThenCoverFillPathCHROMIUM) == 20,
- "size of StencilThenCoverFillPathCHROMIUM should be 20");
-static_assert(offsetof(StencilThenCoverFillPathCHROMIUM, header) == 0,
- "offset of StencilThenCoverFillPathCHROMIUM header should be 0");
-static_assert(offsetof(StencilThenCoverFillPathCHROMIUM, path) == 4,
- "offset of StencilThenCoverFillPathCHROMIUM path should be 4");
-static_assert(
- offsetof(StencilThenCoverFillPathCHROMIUM, fillMode) == 8,
- "offset of StencilThenCoverFillPathCHROMIUM fillMode should be 8");
-static_assert(offsetof(StencilThenCoverFillPathCHROMIUM, mask) == 12,
- "offset of StencilThenCoverFillPathCHROMIUM mask should be 12");
-static_assert(
- offsetof(StencilThenCoverFillPathCHROMIUM, coverMode) == 16,
- "offset of StencilThenCoverFillPathCHROMIUM coverMode should be 16");
-
-struct StencilThenCoverStrokePathCHROMIUM {
- typedef StencilThenCoverStrokePathCHROMIUM ValueType;
- static const CommandId kCmdId = kStencilThenCoverStrokePathCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _path, GLint _reference, GLuint _mask, GLenum _coverMode) {
- SetHeader();
- path = _path;
- reference = _reference;
- mask = _mask;
- coverMode = _coverMode;
- }
-
- void* Set(void* cmd,
- GLuint _path,
- GLint _reference,
- GLuint _mask,
- GLenum _coverMode) {
- static_cast<ValueType*>(cmd)->Init(_path, _reference, _mask, _coverMode);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t path;
- int32_t reference;
- uint32_t mask;
- uint32_t coverMode;
-};
-
-static_assert(sizeof(StencilThenCoverStrokePathCHROMIUM) == 20,
- "size of StencilThenCoverStrokePathCHROMIUM should be 20");
-static_assert(
- offsetof(StencilThenCoverStrokePathCHROMIUM, header) == 0,
- "offset of StencilThenCoverStrokePathCHROMIUM header should be 0");
-static_assert(offsetof(StencilThenCoverStrokePathCHROMIUM, path) == 4,
- "offset of StencilThenCoverStrokePathCHROMIUM path should be 4");
-static_assert(
- offsetof(StencilThenCoverStrokePathCHROMIUM, reference) == 8,
- "offset of StencilThenCoverStrokePathCHROMIUM reference should be 8");
-static_assert(offsetof(StencilThenCoverStrokePathCHROMIUM, mask) == 12,
- "offset of StencilThenCoverStrokePathCHROMIUM mask should be 12");
-static_assert(
- offsetof(StencilThenCoverStrokePathCHROMIUM, coverMode) == 16,
- "offset of StencilThenCoverStrokePathCHROMIUM coverMode should be 16");
-
-struct StencilFillPathInstancedCHROMIUM {
- typedef StencilFillPathInstancedCHROMIUM ValueType;
- static const CommandId kCmdId = kStencilFillPathInstancedCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLenum _fillMode,
- GLuint _mask,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- SetHeader();
- numPaths = _numPaths;
- pathNameType = _pathNameType;
- paths_shm_id = _paths_shm_id;
- paths_shm_offset = _paths_shm_offset;
- pathBase = _pathBase;
- fillMode = _fillMode;
- mask = _mask;
- transformType = _transformType;
- transformValues_shm_id = _transformValues_shm_id;
- transformValues_shm_offset = _transformValues_shm_offset;
- }
-
- void* Set(void* cmd,
- GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLenum _fillMode,
- GLuint _mask,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- static_cast<ValueType*>(cmd)->Init(
- _numPaths, _pathNameType, _paths_shm_id, _paths_shm_offset, _pathBase,
- _fillMode, _mask, _transformType, _transformValues_shm_id,
- _transformValues_shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- int32_t numPaths;
- uint32_t pathNameType;
- uint32_t paths_shm_id;
- uint32_t paths_shm_offset;
- uint32_t pathBase;
- uint32_t fillMode;
- uint32_t mask;
- uint32_t transformType;
- uint32_t transformValues_shm_id;
- uint32_t transformValues_shm_offset;
-};
-
-static_assert(sizeof(StencilFillPathInstancedCHROMIUM) == 44,
- "size of StencilFillPathInstancedCHROMIUM should be 44");
-static_assert(offsetof(StencilFillPathInstancedCHROMIUM, header) == 0,
- "offset of StencilFillPathInstancedCHROMIUM header should be 0");
-static_assert(
- offsetof(StencilFillPathInstancedCHROMIUM, numPaths) == 4,
- "offset of StencilFillPathInstancedCHROMIUM numPaths should be 4");
-static_assert(
- offsetof(StencilFillPathInstancedCHROMIUM, pathNameType) == 8,
- "offset of StencilFillPathInstancedCHROMIUM pathNameType should be 8");
-static_assert(
- offsetof(StencilFillPathInstancedCHROMIUM, paths_shm_id) == 12,
- "offset of StencilFillPathInstancedCHROMIUM paths_shm_id should be 12");
-static_assert(
- offsetof(StencilFillPathInstancedCHROMIUM, paths_shm_offset) == 16,
- "offset of StencilFillPathInstancedCHROMIUM paths_shm_offset should be 16");
-static_assert(
- offsetof(StencilFillPathInstancedCHROMIUM, pathBase) == 20,
- "offset of StencilFillPathInstancedCHROMIUM pathBase should be 20");
-static_assert(
- offsetof(StencilFillPathInstancedCHROMIUM, fillMode) == 24,
- "offset of StencilFillPathInstancedCHROMIUM fillMode should be 24");
-static_assert(offsetof(StencilFillPathInstancedCHROMIUM, mask) == 28,
- "offset of StencilFillPathInstancedCHROMIUM mask should be 28");
-static_assert(
- offsetof(StencilFillPathInstancedCHROMIUM, transformType) == 32,
- "offset of StencilFillPathInstancedCHROMIUM transformType should be 32");
-static_assert(offsetof(StencilFillPathInstancedCHROMIUM,
- transformValues_shm_id) == 36,
- "offset of StencilFillPathInstancedCHROMIUM "
- "transformValues_shm_id should be 36");
-static_assert(offsetof(StencilFillPathInstancedCHROMIUM,
- transformValues_shm_offset) == 40,
- "offset of StencilFillPathInstancedCHROMIUM "
- "transformValues_shm_offset should be 40");
-
-struct StencilStrokePathInstancedCHROMIUM {
- typedef StencilStrokePathInstancedCHROMIUM ValueType;
- static const CommandId kCmdId = kStencilStrokePathInstancedCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLint _reference,
- GLuint _mask,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- SetHeader();
- numPaths = _numPaths;
- pathNameType = _pathNameType;
- paths_shm_id = _paths_shm_id;
- paths_shm_offset = _paths_shm_offset;
- pathBase = _pathBase;
- reference = _reference;
- mask = _mask;
- transformType = _transformType;
- transformValues_shm_id = _transformValues_shm_id;
- transformValues_shm_offset = _transformValues_shm_offset;
- }
-
- void* Set(void* cmd,
- GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLint _reference,
- GLuint _mask,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- static_cast<ValueType*>(cmd)->Init(
- _numPaths, _pathNameType, _paths_shm_id, _paths_shm_offset, _pathBase,
- _reference, _mask, _transformType, _transformValues_shm_id,
- _transformValues_shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- int32_t numPaths;
- uint32_t pathNameType;
- uint32_t paths_shm_id;
- uint32_t paths_shm_offset;
- uint32_t pathBase;
- int32_t reference;
- uint32_t mask;
- uint32_t transformType;
- uint32_t transformValues_shm_id;
- uint32_t transformValues_shm_offset;
-};
-
-static_assert(sizeof(StencilStrokePathInstancedCHROMIUM) == 44,
- "size of StencilStrokePathInstancedCHROMIUM should be 44");
-static_assert(
- offsetof(StencilStrokePathInstancedCHROMIUM, header) == 0,
- "offset of StencilStrokePathInstancedCHROMIUM header should be 0");
-static_assert(
- offsetof(StencilStrokePathInstancedCHROMIUM, numPaths) == 4,
- "offset of StencilStrokePathInstancedCHROMIUM numPaths should be 4");
-static_assert(
- offsetof(StencilStrokePathInstancedCHROMIUM, pathNameType) == 8,
- "offset of StencilStrokePathInstancedCHROMIUM pathNameType should be 8");
-static_assert(
- offsetof(StencilStrokePathInstancedCHROMIUM, paths_shm_id) == 12,
- "offset of StencilStrokePathInstancedCHROMIUM paths_shm_id should be 12");
-static_assert(offsetof(StencilStrokePathInstancedCHROMIUM, paths_shm_offset) ==
- 16,
- "offset of StencilStrokePathInstancedCHROMIUM paths_shm_offset "
- "should be 16");
-static_assert(
- offsetof(StencilStrokePathInstancedCHROMIUM, pathBase) == 20,
- "offset of StencilStrokePathInstancedCHROMIUM pathBase should be 20");
-static_assert(
- offsetof(StencilStrokePathInstancedCHROMIUM, reference) == 24,
- "offset of StencilStrokePathInstancedCHROMIUM reference should be 24");
-static_assert(offsetof(StencilStrokePathInstancedCHROMIUM, mask) == 28,
- "offset of StencilStrokePathInstancedCHROMIUM mask should be 28");
-static_assert(
- offsetof(StencilStrokePathInstancedCHROMIUM, transformType) == 32,
- "offset of StencilStrokePathInstancedCHROMIUM transformType should be 32");
-static_assert(offsetof(StencilStrokePathInstancedCHROMIUM,
- transformValues_shm_id) == 36,
- "offset of StencilStrokePathInstancedCHROMIUM "
- "transformValues_shm_id should be 36");
-static_assert(offsetof(StencilStrokePathInstancedCHROMIUM,
- transformValues_shm_offset) == 40,
- "offset of StencilStrokePathInstancedCHROMIUM "
- "transformValues_shm_offset should be 40");
-
-struct CoverFillPathInstancedCHROMIUM {
- typedef CoverFillPathInstancedCHROMIUM ValueType;
- static const CommandId kCmdId = kCoverFillPathInstancedCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLenum _coverMode,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- SetHeader();
- numPaths = _numPaths;
- pathNameType = _pathNameType;
- paths_shm_id = _paths_shm_id;
- paths_shm_offset = _paths_shm_offset;
- pathBase = _pathBase;
- coverMode = _coverMode;
- transformType = _transformType;
- transformValues_shm_id = _transformValues_shm_id;
- transformValues_shm_offset = _transformValues_shm_offset;
- }
-
- void* Set(void* cmd,
- GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLenum _coverMode,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- static_cast<ValueType*>(cmd)->Init(_numPaths, _pathNameType, _paths_shm_id,
- _paths_shm_offset, _pathBase, _coverMode,
- _transformType, _transformValues_shm_id,
- _transformValues_shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- int32_t numPaths;
- uint32_t pathNameType;
- uint32_t paths_shm_id;
- uint32_t paths_shm_offset;
- uint32_t pathBase;
- uint32_t coverMode;
- uint32_t transformType;
- uint32_t transformValues_shm_id;
- uint32_t transformValues_shm_offset;
-};
-
-static_assert(sizeof(CoverFillPathInstancedCHROMIUM) == 40,
- "size of CoverFillPathInstancedCHROMIUM should be 40");
-static_assert(offsetof(CoverFillPathInstancedCHROMIUM, header) == 0,
- "offset of CoverFillPathInstancedCHROMIUM header should be 0");
-static_assert(offsetof(CoverFillPathInstancedCHROMIUM, numPaths) == 4,
- "offset of CoverFillPathInstancedCHROMIUM numPaths should be 4");
-static_assert(
- offsetof(CoverFillPathInstancedCHROMIUM, pathNameType) == 8,
- "offset of CoverFillPathInstancedCHROMIUM pathNameType should be 8");
-static_assert(
- offsetof(CoverFillPathInstancedCHROMIUM, paths_shm_id) == 12,
- "offset of CoverFillPathInstancedCHROMIUM paths_shm_id should be 12");
-static_assert(
- offsetof(CoverFillPathInstancedCHROMIUM, paths_shm_offset) == 16,
- "offset of CoverFillPathInstancedCHROMIUM paths_shm_offset should be 16");
-static_assert(offsetof(CoverFillPathInstancedCHROMIUM, pathBase) == 20,
- "offset of CoverFillPathInstancedCHROMIUM pathBase should be 20");
-static_assert(
- offsetof(CoverFillPathInstancedCHROMIUM, coverMode) == 24,
- "offset of CoverFillPathInstancedCHROMIUM coverMode should be 24");
-static_assert(
- offsetof(CoverFillPathInstancedCHROMIUM, transformType) == 28,
- "offset of CoverFillPathInstancedCHROMIUM transformType should be 28");
-static_assert(offsetof(CoverFillPathInstancedCHROMIUM,
- transformValues_shm_id) == 32,
- "offset of CoverFillPathInstancedCHROMIUM transformValues_shm_id "
- "should be 32");
-static_assert(offsetof(CoverFillPathInstancedCHROMIUM,
- transformValues_shm_offset) == 36,
- "offset of CoverFillPathInstancedCHROMIUM "
- "transformValues_shm_offset should be 36");
-
-struct CoverStrokePathInstancedCHROMIUM {
- typedef CoverStrokePathInstancedCHROMIUM ValueType;
- static const CommandId kCmdId = kCoverStrokePathInstancedCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLenum _coverMode,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- SetHeader();
- numPaths = _numPaths;
- pathNameType = _pathNameType;
- paths_shm_id = _paths_shm_id;
- paths_shm_offset = _paths_shm_offset;
- pathBase = _pathBase;
- coverMode = _coverMode;
- transformType = _transformType;
- transformValues_shm_id = _transformValues_shm_id;
- transformValues_shm_offset = _transformValues_shm_offset;
- }
-
- void* Set(void* cmd,
- GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLenum _coverMode,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- static_cast<ValueType*>(cmd)->Init(_numPaths, _pathNameType, _paths_shm_id,
- _paths_shm_offset, _pathBase, _coverMode,
- _transformType, _transformValues_shm_id,
- _transformValues_shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- int32_t numPaths;
- uint32_t pathNameType;
- uint32_t paths_shm_id;
- uint32_t paths_shm_offset;
- uint32_t pathBase;
- uint32_t coverMode;
- uint32_t transformType;
- uint32_t transformValues_shm_id;
- uint32_t transformValues_shm_offset;
-};
-
-static_assert(sizeof(CoverStrokePathInstancedCHROMIUM) == 40,
- "size of CoverStrokePathInstancedCHROMIUM should be 40");
-static_assert(offsetof(CoverStrokePathInstancedCHROMIUM, header) == 0,
- "offset of CoverStrokePathInstancedCHROMIUM header should be 0");
-static_assert(
- offsetof(CoverStrokePathInstancedCHROMIUM, numPaths) == 4,
- "offset of CoverStrokePathInstancedCHROMIUM numPaths should be 4");
-static_assert(
- offsetof(CoverStrokePathInstancedCHROMIUM, pathNameType) == 8,
- "offset of CoverStrokePathInstancedCHROMIUM pathNameType should be 8");
-static_assert(
- offsetof(CoverStrokePathInstancedCHROMIUM, paths_shm_id) == 12,
- "offset of CoverStrokePathInstancedCHROMIUM paths_shm_id should be 12");
-static_assert(
- offsetof(CoverStrokePathInstancedCHROMIUM, paths_shm_offset) == 16,
- "offset of CoverStrokePathInstancedCHROMIUM paths_shm_offset should be 16");
-static_assert(
- offsetof(CoverStrokePathInstancedCHROMIUM, pathBase) == 20,
- "offset of CoverStrokePathInstancedCHROMIUM pathBase should be 20");
-static_assert(
- offsetof(CoverStrokePathInstancedCHROMIUM, coverMode) == 24,
- "offset of CoverStrokePathInstancedCHROMIUM coverMode should be 24");
-static_assert(
- offsetof(CoverStrokePathInstancedCHROMIUM, transformType) == 28,
- "offset of CoverStrokePathInstancedCHROMIUM transformType should be 28");
-static_assert(offsetof(CoverStrokePathInstancedCHROMIUM,
- transformValues_shm_id) == 32,
- "offset of CoverStrokePathInstancedCHROMIUM "
- "transformValues_shm_id should be 32");
-static_assert(offsetof(CoverStrokePathInstancedCHROMIUM,
- transformValues_shm_offset) == 36,
- "offset of CoverStrokePathInstancedCHROMIUM "
- "transformValues_shm_offset should be 36");
-
-struct StencilThenCoverFillPathInstancedCHROMIUM {
- typedef StencilThenCoverFillPathInstancedCHROMIUM ValueType;
- static const CommandId kCmdId = kStencilThenCoverFillPathInstancedCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLenum _fillMode,
- GLuint _mask,
- GLenum _coverMode,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- SetHeader();
- numPaths = _numPaths;
- pathNameType = _pathNameType;
- paths_shm_id = _paths_shm_id;
- paths_shm_offset = _paths_shm_offset;
- pathBase = _pathBase;
- fillMode = _fillMode;
- mask = _mask;
- coverMode = _coverMode;
- transformType = _transformType;
- transformValues_shm_id = _transformValues_shm_id;
- transformValues_shm_offset = _transformValues_shm_offset;
- }
-
- void* Set(void* cmd,
- GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLenum _fillMode,
- GLuint _mask,
- GLenum _coverMode,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- static_cast<ValueType*>(cmd)->Init(
- _numPaths, _pathNameType, _paths_shm_id, _paths_shm_offset, _pathBase,
- _fillMode, _mask, _coverMode, _transformType, _transformValues_shm_id,
- _transformValues_shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- int32_t numPaths;
- uint32_t pathNameType;
- uint32_t paths_shm_id;
- uint32_t paths_shm_offset;
- uint32_t pathBase;
- uint32_t fillMode;
- uint32_t mask;
- uint32_t coverMode;
- uint32_t transformType;
- uint32_t transformValues_shm_id;
- uint32_t transformValues_shm_offset;
-};
-
-static_assert(sizeof(StencilThenCoverFillPathInstancedCHROMIUM) == 48,
- "size of StencilThenCoverFillPathInstancedCHROMIUM should be 48");
-static_assert(
- offsetof(StencilThenCoverFillPathInstancedCHROMIUM, header) == 0,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM header should be 0");
-static_assert(
- offsetof(StencilThenCoverFillPathInstancedCHROMIUM, numPaths) == 4,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM numPaths should be 4");
-static_assert(offsetof(StencilThenCoverFillPathInstancedCHROMIUM,
- pathNameType) == 8,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM "
- "pathNameType should be 8");
-static_assert(offsetof(StencilThenCoverFillPathInstancedCHROMIUM,
- paths_shm_id) == 12,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM "
- "paths_shm_id should be 12");
-static_assert(offsetof(StencilThenCoverFillPathInstancedCHROMIUM,
- paths_shm_offset) == 16,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM "
- "paths_shm_offset should be 16");
-static_assert(offsetof(StencilThenCoverFillPathInstancedCHROMIUM, pathBase) ==
- 20,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM pathBase "
- "should be 20");
-static_assert(offsetof(StencilThenCoverFillPathInstancedCHROMIUM, fillMode) ==
- 24,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM fillMode "
- "should be 24");
-static_assert(
- offsetof(StencilThenCoverFillPathInstancedCHROMIUM, mask) == 28,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM mask should be 28");
-static_assert(offsetof(StencilThenCoverFillPathInstancedCHROMIUM, coverMode) ==
- 32,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM coverMode "
- "should be 32");
-static_assert(offsetof(StencilThenCoverFillPathInstancedCHROMIUM,
- transformType) == 36,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM "
- "transformType should be 36");
-static_assert(offsetof(StencilThenCoverFillPathInstancedCHROMIUM,
- transformValues_shm_id) == 40,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM "
- "transformValues_shm_id should be 40");
-static_assert(offsetof(StencilThenCoverFillPathInstancedCHROMIUM,
- transformValues_shm_offset) == 44,
- "offset of StencilThenCoverFillPathInstancedCHROMIUM "
- "transformValues_shm_offset should be 44");
-
-struct StencilThenCoverStrokePathInstancedCHROMIUM {
- typedef StencilThenCoverStrokePathInstancedCHROMIUM ValueType;
- static const CommandId kCmdId = kStencilThenCoverStrokePathInstancedCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLint _reference,
- GLuint _mask,
- GLenum _coverMode,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- SetHeader();
- numPaths = _numPaths;
- pathNameType = _pathNameType;
- paths_shm_id = _paths_shm_id;
- paths_shm_offset = _paths_shm_offset;
- pathBase = _pathBase;
- reference = _reference;
- mask = _mask;
- coverMode = _coverMode;
- transformType = _transformType;
- transformValues_shm_id = _transformValues_shm_id;
- transformValues_shm_offset = _transformValues_shm_offset;
- }
-
- void* Set(void* cmd,
- GLsizei _numPaths,
- GLenum _pathNameType,
- uint32_t _paths_shm_id,
- uint32_t _paths_shm_offset,
- GLuint _pathBase,
- GLint _reference,
- GLuint _mask,
- GLenum _coverMode,
- GLenum _transformType,
- uint32_t _transformValues_shm_id,
- uint32_t _transformValues_shm_offset) {
- static_cast<ValueType*>(cmd)->Init(
- _numPaths, _pathNameType, _paths_shm_id, _paths_shm_offset, _pathBase,
- _reference, _mask, _coverMode, _transformType, _transformValues_shm_id,
- _transformValues_shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- int32_t numPaths;
- uint32_t pathNameType;
- uint32_t paths_shm_id;
- uint32_t paths_shm_offset;
- uint32_t pathBase;
- int32_t reference;
- uint32_t mask;
- uint32_t coverMode;
- uint32_t transformType;
- uint32_t transformValues_shm_id;
- uint32_t transformValues_shm_offset;
-};
-
-static_assert(
- sizeof(StencilThenCoverStrokePathInstancedCHROMIUM) == 48,
- "size of StencilThenCoverStrokePathInstancedCHROMIUM should be 48");
-static_assert(
- offsetof(StencilThenCoverStrokePathInstancedCHROMIUM, header) == 0,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM header should be 0");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM, numPaths) ==
- 4,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM numPaths "
- "should be 4");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM,
- pathNameType) == 8,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM "
- "pathNameType should be 8");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM,
- paths_shm_id) == 12,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM "
- "paths_shm_id should be 12");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM,
- paths_shm_offset) == 16,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM "
- "paths_shm_offset should be 16");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM, pathBase) ==
- 20,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM pathBase "
- "should be 20");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM,
- reference) == 24,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM reference "
- "should be 24");
-static_assert(
- offsetof(StencilThenCoverStrokePathInstancedCHROMIUM, mask) == 28,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM mask should be 28");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM,
- coverMode) == 32,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM coverMode "
- "should be 32");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM,
- transformType) == 36,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM "
- "transformType should be 36");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM,
- transformValues_shm_id) == 40,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM "
- "transformValues_shm_id should be 40");
-static_assert(offsetof(StencilThenCoverStrokePathInstancedCHROMIUM,
- transformValues_shm_offset) == 44,
- "offset of StencilThenCoverStrokePathInstancedCHROMIUM "
- "transformValues_shm_offset should be 44");
-
-struct BindFragmentInputLocationCHROMIUMBucket {
- typedef BindFragmentInputLocationCHROMIUMBucket ValueType;
- static const CommandId kCmdId = kBindFragmentInputLocationCHROMIUMBucket;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _program, GLint _location, uint32_t _name_bucket_id) {
- SetHeader();
- program = _program;
- location = _location;
- name_bucket_id = _name_bucket_id;
- }
-
- void* Set(void* cmd,
- GLuint _program,
- GLint _location,
- uint32_t _name_bucket_id) {
- static_cast<ValueType*>(cmd)->Init(_program, _location, _name_bucket_id);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t program;
- int32_t location;
- uint32_t name_bucket_id;
-};
-
-static_assert(sizeof(BindFragmentInputLocationCHROMIUMBucket) == 16,
- "size of BindFragmentInputLocationCHROMIUMBucket should be 16");
-static_assert(
- offsetof(BindFragmentInputLocationCHROMIUMBucket, header) == 0,
- "offset of BindFragmentInputLocationCHROMIUMBucket header should be 0");
-static_assert(
- offsetof(BindFragmentInputLocationCHROMIUMBucket, program) == 4,
- "offset of BindFragmentInputLocationCHROMIUMBucket program should be 4");
-static_assert(
- offsetof(BindFragmentInputLocationCHROMIUMBucket, location) == 8,
- "offset of BindFragmentInputLocationCHROMIUMBucket location should be 8");
-static_assert(offsetof(BindFragmentInputLocationCHROMIUMBucket,
- name_bucket_id) == 12,
- "offset of BindFragmentInputLocationCHROMIUMBucket "
- "name_bucket_id should be 12");
-
-struct ProgramPathFragmentInputGenCHROMIUM {
- typedef ProgramPathFragmentInputGenCHROMIUM ValueType;
- static const CommandId kCmdId = kProgramPathFragmentInputGenCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
-
- static uint32_t ComputeSize() {
- return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() { header.SetCmd<ValueType>(); }
-
- void Init(GLuint _program,
- GLint _location,
- GLenum _genMode,
- GLint _components,
- uint32_t _coeffs_shm_id,
- uint32_t _coeffs_shm_offset) {
- SetHeader();
- program = _program;
- location = _location;
- genMode = _genMode;
- components = _components;
- coeffs_shm_id = _coeffs_shm_id;
- coeffs_shm_offset = _coeffs_shm_offset;
- }
-
- void* Set(void* cmd,
- GLuint _program,
- GLint _location,
- GLenum _genMode,
- GLint _components,
- uint32_t _coeffs_shm_id,
- uint32_t _coeffs_shm_offset) {
- static_cast<ValueType*>(cmd)->Init(_program, _location, _genMode,
- _components, _coeffs_shm_id,
- _coeffs_shm_offset);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32_t program;
- int32_t location;
- uint32_t genMode;
- int32_t components;
- uint32_t coeffs_shm_id;
- uint32_t coeffs_shm_offset;
-};
-
-static_assert(sizeof(ProgramPathFragmentInputGenCHROMIUM) == 28,
- "size of ProgramPathFragmentInputGenCHROMIUM should be 28");
-static_assert(
- offsetof(ProgramPathFragmentInputGenCHROMIUM, header) == 0,
- "offset of ProgramPathFragmentInputGenCHROMIUM header should be 0");
-static_assert(
- offsetof(ProgramPathFragmentInputGenCHROMIUM, program) == 4,
- "offset of ProgramPathFragmentInputGenCHROMIUM program should be 4");
-static_assert(
- offsetof(ProgramPathFragmentInputGenCHROMIUM, location) == 8,
- "offset of ProgramPathFragmentInputGenCHROMIUM location should be 8");
-static_assert(
- offsetof(ProgramPathFragmentInputGenCHROMIUM, genMode) == 12,
- "offset of ProgramPathFragmentInputGenCHROMIUM genMode should be 12");
-static_assert(
- offsetof(ProgramPathFragmentInputGenCHROMIUM, components) == 16,
- "offset of ProgramPathFragmentInputGenCHROMIUM components should be 16");
-static_assert(
- offsetof(ProgramPathFragmentInputGenCHROMIUM, coeffs_shm_id) == 20,
- "offset of ProgramPathFragmentInputGenCHROMIUM coeffs_shm_id should be 20");
-static_assert(offsetof(ProgramPathFragmentInputGenCHROMIUM,
- coeffs_shm_offset) == 24,
- "offset of ProgramPathFragmentInputGenCHROMIUM coeffs_shm_offset "
- "should be 24");
-
struct ContextVisibilityHintCHROMIUM {
typedef ContextVisibilityHintCHROMIUM ValueType;
static const CommandId kCmdId = kContextVisibilityHintCHROMIUM;
@@ -18122,4 +16721,60 @@ static_assert(
offsetof(EndSharedImageAccessDirectCHROMIUM, texture) == 4,
"offset of EndSharedImageAccessDirectCHROMIUM texture should be 4");
+struct BeginBatchReadAccessSharedImageCHROMIUM {
+ typedef BeginBatchReadAccessSharedImageCHROMIUM ValueType;
+ static const CommandId kCmdId = kBeginBatchReadAccessSharedImageCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+static_assert(sizeof(BeginBatchReadAccessSharedImageCHROMIUM) == 4,
+ "size of BeginBatchReadAccessSharedImageCHROMIUM should be 4");
+static_assert(
+ offsetof(BeginBatchReadAccessSharedImageCHROMIUM, header) == 0,
+ "offset of BeginBatchReadAccessSharedImageCHROMIUM header should be 0");
+
+struct EndBatchReadAccessSharedImageCHROMIUM {
+ typedef EndBatchReadAccessSharedImageCHROMIUM ValueType;
+ static const CommandId kCmdId = kEndBatchReadAccessSharedImageCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+static_assert(sizeof(EndBatchReadAccessSharedImageCHROMIUM) == 4,
+ "size of EndBatchReadAccessSharedImageCHROMIUM should be 4");
+static_assert(
+ offsetof(EndBatchReadAccessSharedImageCHROMIUM, header) == 0,
+ "offset of EndBatchReadAccessSharedImageCHROMIUM header should be 0");
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 43c019660e3..791783b0664 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -4443,17 +4443,20 @@ TEST_F(GLES2FormatTest, FlushMappedBufferRange) {
TEST_F(GLES2FormatTest, ResizeCHROMIUM) {
cmds::ResizeCHROMIUM& cmd = *GetBufferAs<cmds::ResizeCHROMIUM>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
- static_cast<GLuint>(12), static_cast<GLfloat>(13),
- static_cast<GLenum>(14), static_cast<GLboolean>(15));
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLint>(11), static_cast<GLint>(12),
+ static_cast<GLfloat>(13), static_cast<GLboolean>(14),
+ static_cast<GLuint>(15), static_cast<GLuint>(16),
+ static_cast<GLsizei>(17));
EXPECT_EQ(static_cast<uint32_t>(cmds::ResizeCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.width);
- EXPECT_EQ(static_cast<GLuint>(12), cmd.height);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.width);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.height);
EXPECT_EQ(static_cast<GLfloat>(13), cmd.scale_factor);
- EXPECT_EQ(static_cast<GLenum>(14), cmd.color_space);
- EXPECT_EQ(static_cast<GLboolean>(15), cmd.alpha);
+ EXPECT_EQ(static_cast<GLboolean>(14), cmd.alpha);
+ EXPECT_EQ(static_cast<GLuint>(15), cmd.shm_id);
+ EXPECT_EQ(static_cast<GLuint>(16), cmd.shm_offset);
+ EXPECT_EQ(static_cast<GLsizei>(17), cmd.color_space_size);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -5103,426 +5106,6 @@ TEST_F(GLES2FormatTest, SetActiveURLCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
-TEST_F(GLES2FormatTest, MatrixLoadfCHROMIUMImmediate) {
- const int kSomeBaseValueToTestWith = 51;
- static GLfloat data[] = {
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 0),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 1),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 2),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 3),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 4),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 5),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 6),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 7),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 8),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 9),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 10),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 11),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 12),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 13),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 14),
- static_cast<GLfloat>(kSomeBaseValueToTestWith + 15),
- };
- cmds::MatrixLoadfCHROMIUMImmediate& cmd =
- *GetBufferAs<cmds::MatrixLoadfCHROMIUMImmediate>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11), data);
- EXPECT_EQ(static_cast<uint32_t>(cmds::MatrixLoadfCHROMIUMImmediate::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
- cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLenum>(11), cmd.matrixMode);
- CheckBytesWrittenMatchesExpectedSize(
- next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
-}
-
-TEST_F(GLES2FormatTest, MatrixLoadIdentityCHROMIUM) {
- cmds::MatrixLoadIdentityCHROMIUM& cmd =
- *GetBufferAs<cmds::MatrixLoadIdentityCHROMIUM>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11));
- EXPECT_EQ(static_cast<uint32_t>(cmds::MatrixLoadIdentityCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLenum>(11), cmd.matrixMode);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, GenPathsCHROMIUM) {
- cmds::GenPathsCHROMIUM& cmd = *GetBufferAs<cmds::GenPathsCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLsizei>(12));
- EXPECT_EQ(static_cast<uint32_t>(cmds::GenPathsCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.first_client_id);
- EXPECT_EQ(static_cast<GLsizei>(12), cmd.range);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, DeletePathsCHROMIUM) {
- cmds::DeletePathsCHROMIUM& cmd = *GetBufferAs<cmds::DeletePathsCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLsizei>(12));
- EXPECT_EQ(static_cast<uint32_t>(cmds::DeletePathsCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.first_client_id);
- EXPECT_EQ(static_cast<GLsizei>(12), cmd.range);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, IsPathCHROMIUM) {
- cmds::IsPathCHROMIUM& cmd = *GetBufferAs<cmds::IsPathCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<uint32_t>(12),
- static_cast<uint32_t>(13));
- EXPECT_EQ(static_cast<uint32_t>(cmds::IsPathCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, PathCommandsCHROMIUM) {
- cmds::PathCommandsCHROMIUM& cmd = *GetBufferAs<cmds::PathCommandsCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLsizei>(12),
- static_cast<uint32_t>(13), static_cast<uint32_t>(14),
- static_cast<GLsizei>(15), static_cast<GLenum>(16),
- static_cast<uint32_t>(17), static_cast<uint32_t>(18));
- EXPECT_EQ(static_cast<uint32_t>(cmds::PathCommandsCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<GLsizei>(12), cmd.numCommands);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.commands_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(14), cmd.commands_shm_offset);
- EXPECT_EQ(static_cast<GLsizei>(15), cmd.numCoords);
- EXPECT_EQ(static_cast<GLenum>(16), cmd.coordType);
- EXPECT_EQ(static_cast<uint32_t>(17), cmd.coords_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(18), cmd.coords_shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, PathParameterfCHROMIUM) {
- cmds::PathParameterfCHROMIUM& cmd =
- *GetBufferAs<cmds::PathParameterfCHROMIUM>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
- static_cast<GLenum>(12), static_cast<GLfloat>(13));
- EXPECT_EQ(static_cast<uint32_t>(cmds::PathParameterfCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
- EXPECT_EQ(static_cast<GLfloat>(13), cmd.value);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, PathParameteriCHROMIUM) {
- cmds::PathParameteriCHROMIUM& cmd =
- *GetBufferAs<cmds::PathParameteriCHROMIUM>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
- static_cast<GLenum>(12), static_cast<GLint>(13));
- EXPECT_EQ(static_cast<uint32_t>(cmds::PathParameteriCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
- EXPECT_EQ(static_cast<GLint>(13), cmd.value);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, PathStencilFuncCHROMIUM) {
- cmds::PathStencilFuncCHROMIUM& cmd =
- *GetBufferAs<cmds::PathStencilFuncCHROMIUM>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11),
- static_cast<GLint>(12), static_cast<GLuint>(13));
- EXPECT_EQ(static_cast<uint32_t>(cmds::PathStencilFuncCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLenum>(11), cmd.func);
- EXPECT_EQ(static_cast<GLint>(12), cmd.ref);
- EXPECT_EQ(static_cast<GLuint>(13), cmd.mask);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, StencilFillPathCHROMIUM) {
- cmds::StencilFillPathCHROMIUM& cmd =
- *GetBufferAs<cmds::StencilFillPathCHROMIUM>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
- static_cast<GLenum>(12), static_cast<GLuint>(13));
- EXPECT_EQ(static_cast<uint32_t>(cmds::StencilFillPathCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.fillMode);
- EXPECT_EQ(static_cast<GLuint>(13), cmd.mask);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, StencilStrokePathCHROMIUM) {
- cmds::StencilStrokePathCHROMIUM& cmd =
- *GetBufferAs<cmds::StencilStrokePathCHROMIUM>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
- static_cast<GLint>(12), static_cast<GLuint>(13));
- EXPECT_EQ(static_cast<uint32_t>(cmds::StencilStrokePathCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<GLint>(12), cmd.reference);
- EXPECT_EQ(static_cast<GLuint>(13), cmd.mask);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, CoverFillPathCHROMIUM) {
- cmds::CoverFillPathCHROMIUM& cmd =
- *GetBufferAs<cmds::CoverFillPathCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12));
- EXPECT_EQ(static_cast<uint32_t>(cmds::CoverFillPathCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.coverMode);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, CoverStrokePathCHROMIUM) {
- cmds::CoverStrokePathCHROMIUM& cmd =
- *GetBufferAs<cmds::CoverStrokePathCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12));
- EXPECT_EQ(static_cast<uint32_t>(cmds::CoverStrokePathCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.coverMode);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, StencilThenCoverFillPathCHROMIUM) {
- cmds::StencilThenCoverFillPathCHROMIUM& cmd =
- *GetBufferAs<cmds::StencilThenCoverFillPathCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12),
- static_cast<GLuint>(13), static_cast<GLenum>(14));
- EXPECT_EQ(
- static_cast<uint32_t>(cmds::StencilThenCoverFillPathCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.fillMode);
- EXPECT_EQ(static_cast<GLuint>(13), cmd.mask);
- EXPECT_EQ(static_cast<GLenum>(14), cmd.coverMode);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, StencilThenCoverStrokePathCHROMIUM) {
- cmds::StencilThenCoverStrokePathCHROMIUM& cmd =
- *GetBufferAs<cmds::StencilThenCoverStrokePathCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLint>(12),
- static_cast<GLuint>(13), static_cast<GLenum>(14));
- EXPECT_EQ(
- static_cast<uint32_t>(cmds::StencilThenCoverStrokePathCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.path);
- EXPECT_EQ(static_cast<GLint>(12), cmd.reference);
- EXPECT_EQ(static_cast<GLuint>(13), cmd.mask);
- EXPECT_EQ(static_cast<GLenum>(14), cmd.coverMode);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, StencilFillPathInstancedCHROMIUM) {
- cmds::StencilFillPathInstancedCHROMIUM& cmd =
- *GetBufferAs<cmds::StencilFillPathInstancedCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLsizei>(11), static_cast<GLenum>(12),
- static_cast<uint32_t>(13), static_cast<uint32_t>(14),
- static_cast<GLuint>(15), static_cast<GLenum>(16),
- static_cast<GLuint>(17), static_cast<GLenum>(18),
- static_cast<uint32_t>(19), static_cast<uint32_t>(20));
- EXPECT_EQ(
- static_cast<uint32_t>(cmds::StencilFillPathInstancedCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLsizei>(11), cmd.numPaths);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.pathNameType);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.paths_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(14), cmd.paths_shm_offset);
- EXPECT_EQ(static_cast<GLuint>(15), cmd.pathBase);
- EXPECT_EQ(static_cast<GLenum>(16), cmd.fillMode);
- EXPECT_EQ(static_cast<GLuint>(17), cmd.mask);
- EXPECT_EQ(static_cast<GLenum>(18), cmd.transformType);
- EXPECT_EQ(static_cast<uint32_t>(19), cmd.transformValues_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(20), cmd.transformValues_shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, StencilStrokePathInstancedCHROMIUM) {
- cmds::StencilStrokePathInstancedCHROMIUM& cmd =
- *GetBufferAs<cmds::StencilStrokePathInstancedCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLsizei>(11), static_cast<GLenum>(12),
- static_cast<uint32_t>(13), static_cast<uint32_t>(14),
- static_cast<GLuint>(15), static_cast<GLint>(16),
- static_cast<GLuint>(17), static_cast<GLenum>(18),
- static_cast<uint32_t>(19), static_cast<uint32_t>(20));
- EXPECT_EQ(
- static_cast<uint32_t>(cmds::StencilStrokePathInstancedCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLsizei>(11), cmd.numPaths);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.pathNameType);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.paths_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(14), cmd.paths_shm_offset);
- EXPECT_EQ(static_cast<GLuint>(15), cmd.pathBase);
- EXPECT_EQ(static_cast<GLint>(16), cmd.reference);
- EXPECT_EQ(static_cast<GLuint>(17), cmd.mask);
- EXPECT_EQ(static_cast<GLenum>(18), cmd.transformType);
- EXPECT_EQ(static_cast<uint32_t>(19), cmd.transformValues_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(20), cmd.transformValues_shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, CoverFillPathInstancedCHROMIUM) {
- cmds::CoverFillPathInstancedCHROMIUM& cmd =
- *GetBufferAs<cmds::CoverFillPathInstancedCHROMIUM>();
- void* next_cmd = cmd.Set(
- &cmd, static_cast<GLsizei>(11), static_cast<GLenum>(12),
- static_cast<uint32_t>(13), static_cast<uint32_t>(14),
- static_cast<GLuint>(15), static_cast<GLenum>(16), static_cast<GLenum>(17),
- static_cast<uint32_t>(18), static_cast<uint32_t>(19));
- EXPECT_EQ(static_cast<uint32_t>(cmds::CoverFillPathInstancedCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLsizei>(11), cmd.numPaths);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.pathNameType);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.paths_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(14), cmd.paths_shm_offset);
- EXPECT_EQ(static_cast<GLuint>(15), cmd.pathBase);
- EXPECT_EQ(static_cast<GLenum>(16), cmd.coverMode);
- EXPECT_EQ(static_cast<GLenum>(17), cmd.transformType);
- EXPECT_EQ(static_cast<uint32_t>(18), cmd.transformValues_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(19), cmd.transformValues_shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, CoverStrokePathInstancedCHROMIUM) {
- cmds::CoverStrokePathInstancedCHROMIUM& cmd =
- *GetBufferAs<cmds::CoverStrokePathInstancedCHROMIUM>();
- void* next_cmd = cmd.Set(
- &cmd, static_cast<GLsizei>(11), static_cast<GLenum>(12),
- static_cast<uint32_t>(13), static_cast<uint32_t>(14),
- static_cast<GLuint>(15), static_cast<GLenum>(16), static_cast<GLenum>(17),
- static_cast<uint32_t>(18), static_cast<uint32_t>(19));
- EXPECT_EQ(
- static_cast<uint32_t>(cmds::CoverStrokePathInstancedCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLsizei>(11), cmd.numPaths);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.pathNameType);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.paths_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(14), cmd.paths_shm_offset);
- EXPECT_EQ(static_cast<GLuint>(15), cmd.pathBase);
- EXPECT_EQ(static_cast<GLenum>(16), cmd.coverMode);
- EXPECT_EQ(static_cast<GLenum>(17), cmd.transformType);
- EXPECT_EQ(static_cast<uint32_t>(18), cmd.transformValues_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(19), cmd.transformValues_shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, StencilThenCoverFillPathInstancedCHROMIUM) {
- cmds::StencilThenCoverFillPathInstancedCHROMIUM& cmd =
- *GetBufferAs<cmds::StencilThenCoverFillPathInstancedCHROMIUM>();
- void* next_cmd = cmd.Set(
- &cmd, static_cast<GLsizei>(11), static_cast<GLenum>(12),
- static_cast<uint32_t>(13), static_cast<uint32_t>(14),
- static_cast<GLuint>(15), static_cast<GLenum>(16), static_cast<GLuint>(17),
- static_cast<GLenum>(18), static_cast<GLenum>(19),
- static_cast<uint32_t>(20), static_cast<uint32_t>(21));
- EXPECT_EQ(static_cast<uint32_t>(
- cmds::StencilThenCoverFillPathInstancedCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLsizei>(11), cmd.numPaths);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.pathNameType);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.paths_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(14), cmd.paths_shm_offset);
- EXPECT_EQ(static_cast<GLuint>(15), cmd.pathBase);
- EXPECT_EQ(static_cast<GLenum>(16), cmd.fillMode);
- EXPECT_EQ(static_cast<GLuint>(17), cmd.mask);
- EXPECT_EQ(static_cast<GLenum>(18), cmd.coverMode);
- EXPECT_EQ(static_cast<GLenum>(19), cmd.transformType);
- EXPECT_EQ(static_cast<uint32_t>(20), cmd.transformValues_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(21), cmd.transformValues_shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, StencilThenCoverStrokePathInstancedCHROMIUM) {
- cmds::StencilThenCoverStrokePathInstancedCHROMIUM& cmd =
- *GetBufferAs<cmds::StencilThenCoverStrokePathInstancedCHROMIUM>();
- void* next_cmd = cmd.Set(
- &cmd, static_cast<GLsizei>(11), static_cast<GLenum>(12),
- static_cast<uint32_t>(13), static_cast<uint32_t>(14),
- static_cast<GLuint>(15), static_cast<GLint>(16), static_cast<GLuint>(17),
- static_cast<GLenum>(18), static_cast<GLenum>(19),
- static_cast<uint32_t>(20), static_cast<uint32_t>(21));
- EXPECT_EQ(static_cast<uint32_t>(
- cmds::StencilThenCoverStrokePathInstancedCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLsizei>(11), cmd.numPaths);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.pathNameType);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.paths_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(14), cmd.paths_shm_offset);
- EXPECT_EQ(static_cast<GLuint>(15), cmd.pathBase);
- EXPECT_EQ(static_cast<GLint>(16), cmd.reference);
- EXPECT_EQ(static_cast<GLuint>(17), cmd.mask);
- EXPECT_EQ(static_cast<GLenum>(18), cmd.coverMode);
- EXPECT_EQ(static_cast<GLenum>(19), cmd.transformType);
- EXPECT_EQ(static_cast<uint32_t>(20), cmd.transformValues_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(21), cmd.transformValues_shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, BindFragmentInputLocationCHROMIUMBucket) {
- cmds::BindFragmentInputLocationCHROMIUMBucket& cmd =
- *GetBufferAs<cmds::BindFragmentInputLocationCHROMIUMBucket>();
- void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
- static_cast<GLint>(12), static_cast<uint32_t>(13));
- EXPECT_EQ(static_cast<uint32_t>(
- cmds::BindFragmentInputLocationCHROMIUMBucket::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
- EXPECT_EQ(static_cast<GLint>(12), cmd.location);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.name_bucket_id);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
-TEST_F(GLES2FormatTest, ProgramPathFragmentInputGenCHROMIUM) {
- cmds::ProgramPathFragmentInputGenCHROMIUM& cmd =
- *GetBufferAs<cmds::ProgramPathFragmentInputGenCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLint>(12),
- static_cast<GLenum>(13), static_cast<GLint>(14),
- static_cast<uint32_t>(15), static_cast<uint32_t>(16));
- EXPECT_EQ(
- static_cast<uint32_t>(cmds::ProgramPathFragmentInputGenCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.program);
- EXPECT_EQ(static_cast<GLint>(12), cmd.location);
- EXPECT_EQ(static_cast<GLenum>(13), cmd.genMode);
- EXPECT_EQ(static_cast<GLint>(14), cmd.components);
- EXPECT_EQ(static_cast<uint32_t>(15), cmd.coeffs_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(16), cmd.coeffs_shm_offset);
- CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
-}
-
TEST_F(GLES2FormatTest, ContextVisibilityHintCHROMIUM) {
cmds::ContextVisibilityHintCHROMIUM& cmd =
*GetBufferAs<cmds::ContextVisibilityHintCHROMIUM>();
@@ -5953,4 +5536,26 @@ TEST_F(GLES2FormatTest, EndSharedImageAccessDirectCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, BeginBatchReadAccessSharedImageCHROMIUM) {
+ cmds::BeginBatchReadAccessSharedImageCHROMIUM& cmd =
+ *GetBufferAs<cmds::BeginBatchReadAccessSharedImageCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(
+ cmds::BeginBatchReadAccessSharedImageCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, EndBatchReadAccessSharedImageCHROMIUM) {
+ cmds::EndBatchReadAccessSharedImageCHROMIUM& cmd =
+ *GetBufferAs<cmds::EndBatchReadAccessSharedImageCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(
+ cmds::EndBatchReadAccessSharedImageCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_TEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index e2b7aa68549..5ed67e6e384 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -319,55 +319,34 @@
OP(FlushDriverCachesCHROMIUM) /* 560 */ \
OP(ScheduleDCLayerCHROMIUM) /* 561 */ \
OP(SetActiveURLCHROMIUM) /* 562 */ \
- OP(MatrixLoadfCHROMIUMImmediate) /* 563 */ \
- OP(MatrixLoadIdentityCHROMIUM) /* 564 */ \
- OP(GenPathsCHROMIUM) /* 565 */ \
- OP(DeletePathsCHROMIUM) /* 566 */ \
- OP(IsPathCHROMIUM) /* 567 */ \
- OP(PathCommandsCHROMIUM) /* 568 */ \
- OP(PathParameterfCHROMIUM) /* 569 */ \
- OP(PathParameteriCHROMIUM) /* 570 */ \
- OP(PathStencilFuncCHROMIUM) /* 571 */ \
- OP(StencilFillPathCHROMIUM) /* 572 */ \
- OP(StencilStrokePathCHROMIUM) /* 573 */ \
- OP(CoverFillPathCHROMIUM) /* 574 */ \
- OP(CoverStrokePathCHROMIUM) /* 575 */ \
- OP(StencilThenCoverFillPathCHROMIUM) /* 576 */ \
- OP(StencilThenCoverStrokePathCHROMIUM) /* 577 */ \
- OP(StencilFillPathInstancedCHROMIUM) /* 578 */ \
- OP(StencilStrokePathInstancedCHROMIUM) /* 579 */ \
- OP(CoverFillPathInstancedCHROMIUM) /* 580 */ \
- OP(CoverStrokePathInstancedCHROMIUM) /* 581 */ \
- OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 582 */ \
- OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 583 */ \
- OP(BindFragmentInputLocationCHROMIUMBucket) /* 584 */ \
- OP(ProgramPathFragmentInputGenCHROMIUM) /* 585 */ \
- OP(ContextVisibilityHintCHROMIUM) /* 586 */ \
- OP(CoverageModulationCHROMIUM) /* 587 */ \
- OP(BlendBarrierKHR) /* 588 */ \
- OP(BindFragDataLocationIndexedEXTBucket) /* 589 */ \
- OP(BindFragDataLocationEXTBucket) /* 590 */ \
- OP(GetFragDataIndexEXT) /* 591 */ \
- OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 592 */ \
- OP(OverlayPromotionHintCHROMIUM) /* 593 */ \
- OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 594 */ \
- OP(SetDrawRectangleCHROMIUM) /* 595 */ \
- OP(SetEnableDCLayersCHROMIUM) /* 596 */ \
- OP(InitializeDiscardableTextureCHROMIUM) /* 597 */ \
- OP(UnlockDiscardableTextureCHROMIUM) /* 598 */ \
- OP(LockDiscardableTextureCHROMIUM) /* 599 */ \
- OP(TexStorage2DImageCHROMIUM) /* 600 */ \
- OP(SetColorSpaceMetadataCHROMIUM) /* 601 */ \
- OP(WindowRectanglesEXTImmediate) /* 602 */ \
- OP(CreateGpuFenceINTERNAL) /* 603 */ \
- OP(WaitGpuFenceCHROMIUM) /* 604 */ \
- OP(DestroyGpuFenceCHROMIUM) /* 605 */ \
- OP(SetReadbackBufferShadowAllocationINTERNAL) /* 606 */ \
- OP(FramebufferTextureMultiviewOVR) /* 607 */ \
- OP(MaxShaderCompilerThreadsKHR) /* 608 */ \
- OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 609 */ \
- OP(BeginSharedImageAccessDirectCHROMIUM) /* 610 */ \
- OP(EndSharedImageAccessDirectCHROMIUM) /* 611 */
+ OP(ContextVisibilityHintCHROMIUM) /* 563 */ \
+ OP(CoverageModulationCHROMIUM) /* 564 */ \
+ OP(BlendBarrierKHR) /* 565 */ \
+ OP(BindFragDataLocationIndexedEXTBucket) /* 566 */ \
+ OP(BindFragDataLocationEXTBucket) /* 567 */ \
+ OP(GetFragDataIndexEXT) /* 568 */ \
+ OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 569 */ \
+ OP(OverlayPromotionHintCHROMIUM) /* 570 */ \
+ OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 571 */ \
+ OP(SetDrawRectangleCHROMIUM) /* 572 */ \
+ OP(SetEnableDCLayersCHROMIUM) /* 573 */ \
+ OP(InitializeDiscardableTextureCHROMIUM) /* 574 */ \
+ OP(UnlockDiscardableTextureCHROMIUM) /* 575 */ \
+ OP(LockDiscardableTextureCHROMIUM) /* 576 */ \
+ OP(TexStorage2DImageCHROMIUM) /* 577 */ \
+ OP(SetColorSpaceMetadataCHROMIUM) /* 578 */ \
+ OP(WindowRectanglesEXTImmediate) /* 579 */ \
+ OP(CreateGpuFenceINTERNAL) /* 580 */ \
+ OP(WaitGpuFenceCHROMIUM) /* 581 */ \
+ OP(DestroyGpuFenceCHROMIUM) /* 582 */ \
+ OP(SetReadbackBufferShadowAllocationINTERNAL) /* 583 */ \
+ OP(FramebufferTextureMultiviewOVR) /* 584 */ \
+ OP(MaxShaderCompilerThreadsKHR) /* 585 */ \
+ OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 586 */ \
+ OP(BeginSharedImageAccessDirectCHROMIUM) /* 587 */ \
+ OP(EndSharedImageAccessDirectCHROMIUM) /* 588 */ \
+ OP(BeginBatchReadAccessSharedImageCHROMIUM) /* 589 */ \
+ OP(EndBatchReadAccessSharedImageCHROMIUM) /* 590 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index 7f0bdb39000..109097bd431 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -933,77 +933,6 @@ uint32_t GLES2Util::GetGroupSizeForBufferType(uint32_t count, uint32_t type) {
return type_size * count;
}
-uint32_t GLES2Util::GetComponentCountForGLTransformType(uint32_t type) {
- switch (type) {
- case GL_TRANSLATE_X_CHROMIUM:
- case GL_TRANSLATE_Y_CHROMIUM:
- return 1;
- case GL_TRANSLATE_2D_CHROMIUM:
- return 2;
- case GL_TRANSLATE_3D_CHROMIUM:
- return 3;
- case GL_AFFINE_2D_CHROMIUM:
- case GL_TRANSPOSE_AFFINE_2D_CHROMIUM:
- return 6;
- case GL_AFFINE_3D_CHROMIUM:
- case GL_TRANSPOSE_AFFINE_3D_CHROMIUM:
- return 12;
- default:
- return 0;
- }
-}
-
-uint32_t GLES2Util::GetCoefficientCountForGLPathFragmentInputGenMode(
- uint32_t gen_mode) {
- switch (gen_mode) {
- case GL_EYE_LINEAR_CHROMIUM:
- return 4;
- case GL_OBJECT_LINEAR_CHROMIUM:
- return 3;
- case GL_CONSTANT_CHROMIUM:
- return 1;
- case GL_NONE:
- default:
- return 0;
- }
-}
-
-uint32_t GLES2Util::GetGLTypeSizeForPathCoordType(uint32_t type) {
- switch (type) {
- case GL_BYTE:
- return sizeof(GLbyte); // NOLINT
- case GL_UNSIGNED_BYTE:
- return sizeof(GLubyte); // NOLINT
- case GL_SHORT:
- return sizeof(GLshort); // NOLINT
- case GL_UNSIGNED_SHORT:
- return sizeof(GLushort); // NOLINT
- case GL_FLOAT:
- return sizeof(GLfloat); // NOLINT
- default:
- return 0;
- }
-}
-
-uint32_t GLES2Util::GetGLTypeSizeForGLPathNameType(uint32_t type) {
- switch (type) {
- case GL_BYTE:
- return sizeof(GLbyte); // NOLINT
- case GL_UNSIGNED_BYTE:
- return sizeof(GLubyte); // NOLINT
- case GL_SHORT:
- return sizeof(GLshort); // NOLINT
- case GL_UNSIGNED_SHORT:
- return sizeof(GLushort); // NOLINT
- case GL_INT:
- return sizeof(GLint); // NOLINT
- case GL_UNSIGNED_INT:
- return sizeof(GLuint); // NOLINT
- default:
- return 0;
- }
-}
-
uint32_t GLES2Util::GLErrorToErrorBit(uint32_t error) {
switch (error) {
case GL_INVALID_ENUM:
@@ -1163,6 +1092,10 @@ uint32_t GLES2Util::GetGLReadPixelsImplementationType(uint32_t internal_format,
case GL_RGBA16UI:
case GL_RGB10_A2:
case GL_RGB10_A2UI:
+ case GL_R16_EXT:
+ case GL_RG16_EXT:
+ case GL_RGB16_EXT:
+ case GL_RGBA16_EXT:
return GL_UNSIGNED_SHORT;
case GL_R32UI:
case GL_RG32UI:
@@ -1175,6 +1108,10 @@ uint32_t GLES2Util::GetGLReadPixelsImplementationType(uint32_t internal_format,
case GL_R16I:
case GL_RG16I:
case GL_RGBA16I:
+ case GL_R16_SNORM_EXT:
+ case GL_RG16_SNORM_EXT:
+ case GL_RGB16_SNORM_EXT:
+ case GL_RGBA16_SNORM_EXT:
return GL_SHORT;
case GL_R32I:
case GL_RG32I:
@@ -1223,6 +1160,8 @@ uint32_t GLES2Util::GetChannelsForFormat(int format) {
case GL_RGB16I:
case GL_RGB32UI:
case GL_RGB32I:
+ case GL_RGB16_EXT:
+ case GL_RGB16_SNORM_EXT:
return kRGB;
case GL_BGRA_EXT:
case GL_BGRA8_EXT:
@@ -1243,6 +1182,8 @@ uint32_t GLES2Util::GetChannelsForFormat(int format) {
case GL_RGBA16I:
case GL_RGBA32UI:
case GL_RGBA32I:
+ case GL_RGBA16_EXT:
+ case GL_RGBA16_SNORM_EXT:
return kRGBA;
case GL_DEPTH_COMPONENT32_OES:
case GL_DEPTH_COMPONENT24_OES:
@@ -1267,6 +1208,8 @@ uint32_t GLES2Util::GetChannelsForFormat(int format) {
case GL_R16I:
case GL_R32UI:
case GL_R32I:
+ case GL_R16_EXT:
+ case GL_R16_SNORM_EXT:
return kRed;
case GL_RG_EXT:
case GL_RG8:
@@ -1279,6 +1222,8 @@ uint32_t GLES2Util::GetChannelsForFormat(int format) {
case GL_RG16I:
case GL_RG32UI:
case GL_RG32I:
+ case GL_RG16_EXT:
+ case GL_RG16_SNORM_EXT:
return kRed | kGreen;
default:
return 0x0000;
@@ -1290,6 +1235,8 @@ bool GLES2Util::IsSizedColorFormat(uint32_t internal_format) {
case GL_ALPHA16F_EXT:
case GL_ALPHA32F_EXT:
case GL_RGB8:
+ case GL_RGB16_EXT:
+ case GL_RGB16_SNORM_EXT:
case GL_RGB565:
case GL_RGB16F:
case GL_RGB32F:
@@ -1307,6 +1254,8 @@ bool GLES2Util::IsSizedColorFormat(uint32_t internal_format) {
case GL_RGBA16F:
case GL_RGBA32F:
case GL_RGBA8_OES:
+ case GL_RGBA16_EXT:
+ case GL_RGBA16_SNORM_EXT:
case GL_RGBA4:
case GL_RGB5_A1:
case GL_SRGB8_ALPHA8:
@@ -1321,6 +1270,8 @@ bool GLES2Util::IsSizedColorFormat(uint32_t internal_format) {
case GL_RGBA32I:
case GL_R8:
case GL_R8_SNORM:
+ case GL_R16_EXT:
+ case GL_R16_SNORM_EXT:
case GL_R16F:
case GL_R32F:
case GL_R8UI:
@@ -1331,6 +1282,8 @@ bool GLES2Util::IsSizedColorFormat(uint32_t internal_format) {
case GL_R32I:
case GL_RG8:
case GL_RG8_SNORM:
+ case GL_RG16_EXT:
+ case GL_RG16_SNORM_EXT:
case GL_RG16F:
case GL_RG32F:
case GL_RG8UI:
@@ -1434,6 +1387,8 @@ void GLES2Util::GetColorFormatComponentSizes(
case GL_RGB16F:
case GL_RGB16UI:
case GL_RGB16I:
+ case GL_RGB16_EXT:
+ case GL_RGB16_SNORM_EXT:
*r = 16;
*g = 16;
*b = 16;
@@ -1469,6 +1424,8 @@ void GLES2Util::GetColorFormatComponentSizes(
case GL_RGBA16F_EXT:
case GL_RGBA16UI:
case GL_RGBA16I:
+ case GL_RGBA16_EXT:
+ case GL_RGBA16_SNORM_EXT:
*r = 16;
*g = 16;
*b = 16;
@@ -1511,6 +1468,7 @@ void GLES2Util::GetColorFormatComponentSizes(
case GL_R16UI:
case GL_R16I:
case GL_R16_EXT:
+ case GL_R16_SNORM_EXT:
*r = 16;
break;
case GL_R32F:
@@ -1528,6 +1486,8 @@ void GLES2Util::GetColorFormatComponentSizes(
case GL_RG16F:
case GL_RG16UI:
case GL_RG16I:
+ case GL_RG16_EXT:
+ case GL_RG16_SNORM_EXT:
*r = 16;
*g = 16;
break;
@@ -1792,6 +1752,8 @@ uint32_t GLES2Util::ConvertToSizedFormat(uint32_t format, uint32_t type) {
return GL_RGB16F;
case GL_FLOAT:
return GL_RGB32F;
+ case GL_UNSIGNED_SHORT:
+ return GL_RGB16_EXT;
default:
NOTREACHED();
break;
@@ -1809,6 +1771,8 @@ uint32_t GLES2Util::ConvertToSizedFormat(uint32_t format, uint32_t type) {
return GL_RGBA16F;
case GL_FLOAT:
return GL_RGBA32F;
+ case GL_UNSIGNED_SHORT:
+ return GL_RGBA16_EXT;
default:
NOTREACHED();
break;
@@ -1850,6 +1814,8 @@ uint32_t GLES2Util::ConvertToSizedFormat(uint32_t format, uint32_t type) {
return GL_RG16F;
case GL_FLOAT:
return GL_RG32F;
+ case GL_UNSIGNED_SHORT:
+ return GL_RG16_EXT;
default:
NOTREACHED();
break;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
index 7ec260bc20c..238fce0975b 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
@@ -157,15 +157,6 @@ class GLES2_UTILS_EXPORT GLES2Util {
static uint32_t GetGroupSizeForBufferType(uint32_t count, uint32_t type);
- static uint32_t GetComponentCountForGLTransformType(uint32_t type);
-
- static uint32_t GetCoefficientCountForGLPathFragmentInputGenMode(
- uint32_t gen_mode);
-
- static uint32_t GetGLTypeSizeForPathCoordType(uint32_t type);
-
- static uint32_t GetGLTypeSizeForGLPathNameType(uint32_t type);
-
static uint32_t GLErrorToErrorBit(uint32_t gl_error);
static uint32_t GLErrorBitToGLError(uint32_t error_bit);
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h
index 9a32ac2208a..652ff7bd177 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h
@@ -49,15 +49,6 @@ static std::string GetStringIndexedGLState(uint32_t value);
static std::string GetStringIndexedStringType(uint32_t value);
static std::string GetStringInternalFormatParameter(uint32_t value);
static std::string GetStringMapBufferAccess(uint32_t value);
-static std::string GetStringMatrixMode(uint32_t value);
-static std::string GetStringPathCoordType(uint32_t value);
-static std::string GetStringPathCoverMode(uint32_t value);
-static std::string GetStringPathFillMode(uint32_t value);
-static std::string GetStringPathFragmentInputGenMode(uint32_t value);
-static std::string GetStringPathInstancedCoverMode(uint32_t value);
-static std::string GetStringPathNameType(uint32_t value);
-static std::string GetStringPathParameter(uint32_t value);
-static std::string GetStringPathTransformType(uint32_t value);
static std::string GetStringPixelStore(uint32_t value);
static std::string GetStringPixelType(uint32_t value);
static std::string GetStringProgramParameter(uint32_t value);
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
index 82920a6c65a..f7510c53ddd 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -829,10 +829,6 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_FILL_NV",
},
{
- 0x1D00,
- "GL_FLAT_CHROMIUM",
- },
- {
0x1E00,
"GL_KEEP",
},
@@ -877,14 +873,6 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_FONT_NUM_GLYPH_INDICES_BIT_NV",
},
{
- 0x2400,
- "GL_EYE_LINEAR_CHROMIUM",
- },
- {
- 0x2401,
- "GL_OBJECT_LINEAR_CHROMIUM",
- },
- {
0x2600,
"GL_NEAREST",
},
@@ -2053,10 +2041,6 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_MAX_CUBE_MAP_TEXTURE_SIZE",
},
{
- 0x8576,
- "GL_CONSTANT_CHROMIUM",
- },
- {
0x8589,
"GL_SRC1_ALPHA_EXT",
},
@@ -2693,26 +2677,6 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_TEXTURE_FILTERING_HINT_CHROMIUM",
},
{
- 0x8AF1,
- "GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM",
- },
- {
- 0x8AF2,
- "GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM",
- },
- {
- 0x8AF3,
- "GL_COLOR_SPACE_SRGB_CHROMIUM",
- },
- {
- 0x8AF4,
- "GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM",
- },
- {
- 0x8AF5,
- "GL_COLOR_SPACE_HDR10_CHROMIUM",
- },
- {
0x8AF6,
"GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM",
},
@@ -4649,10 +4613,6 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_PATH_CLIENT_LENGTH_NV",
},
{
- 0x907a,
- "GL_PATH_MITER_LIMIT_CHROMIUM",
- },
- {
0x9080,
"GL_PATH_FILL_MODE_NV",
},
@@ -4673,10 +4633,6 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_PATH_STROKE_MASK_NV",
},
{
- 0x9086,
- "GL_PATH_STROKE_BOUND_CHROMIUM",
- },
- {
0x9088,
"GL_COUNT_UP_NV",
},
@@ -4985,14 +4941,6 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_CONTEXT_ROBUST_ACCESS_KHR",
},
{
- 0x90a3,
- "GL_SQUARE_CHROMIUM",
- },
- {
- 0x90a4,
- "GL_ROUND_CHROMIUM",
- },
- {
0x9100,
"GL_TEXTURE_2D_MULTISAMPLE",
},
@@ -7534,104 +7482,6 @@ std::string GLES2Util::GetStringMapBufferAccess(uint32_t value) {
base::size(string_table), value);
}
-std::string GLES2Util::GetStringMatrixMode(uint32_t value) {
- static const EnumToString string_table[] = {
- {GL_PATH_PROJECTION_CHROMIUM, "GL_PATH_PROJECTION_CHROMIUM"},
- {GL_PATH_MODELVIEW_CHROMIUM, "GL_PATH_MODELVIEW_CHROMIUM"},
- };
- return GLES2Util::GetQualifiedEnumString(string_table,
- base::size(string_table), value);
-}
-
-std::string GLES2Util::GetStringPathCoordType(uint32_t value) {
- static const EnumToString string_table[] = {
- {GL_BYTE, "GL_BYTE"}, {GL_UNSIGNED_BYTE, "GL_UNSIGNED_BYTE"},
- {GL_SHORT, "GL_SHORT"}, {GL_UNSIGNED_SHORT, "GL_UNSIGNED_SHORT"},
- {GL_FLOAT, "GL_FLOAT"},
- };
- return GLES2Util::GetQualifiedEnumString(string_table,
- base::size(string_table), value);
-}
-
-std::string GLES2Util::GetStringPathCoverMode(uint32_t value) {
- static const EnumToString string_table[] = {
- {GL_CONVEX_HULL_CHROMIUM, "GL_CONVEX_HULL_CHROMIUM"},
- {GL_BOUNDING_BOX_CHROMIUM, "GL_BOUNDING_BOX_CHROMIUM"},
- };
- return GLES2Util::GetQualifiedEnumString(string_table,
- base::size(string_table), value);
-}
-
-std::string GLES2Util::GetStringPathFillMode(uint32_t value) {
- static const EnumToString string_table[] = {
- {GL_INVERT, "GL_INVERT"},
- {GL_COUNT_UP_CHROMIUM, "GL_COUNT_UP_CHROMIUM"},
- {GL_COUNT_DOWN_CHROMIUM, "GL_COUNT_DOWN_CHROMIUM"},
- };
- return GLES2Util::GetQualifiedEnumString(string_table,
- base::size(string_table), value);
-}
-
-std::string GLES2Util::GetStringPathFragmentInputGenMode(uint32_t value) {
- static const EnumToString string_table[] = {
- {GL_NONE, "GL_NONE"},
- {GL_EYE_LINEAR_CHROMIUM, "GL_EYE_LINEAR_CHROMIUM"},
- {GL_OBJECT_LINEAR_CHROMIUM, "GL_OBJECT_LINEAR_CHROMIUM"},
- {GL_CONSTANT_CHROMIUM, "GL_CONSTANT_CHROMIUM"},
- };
- return GLES2Util::GetQualifiedEnumString(string_table,
- base::size(string_table), value);
-}
-
-std::string GLES2Util::GetStringPathInstancedCoverMode(uint32_t value) {
- static const EnumToString string_table[] = {
- {GL_CONVEX_HULL_CHROMIUM, "GL_CONVEX_HULL_CHROMIUM"},
- {GL_BOUNDING_BOX_CHROMIUM, "GL_BOUNDING_BOX_CHROMIUM"},
- {GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM,
- "GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM"},
- };
- return GLES2Util::GetQualifiedEnumString(string_table,
- base::size(string_table), value);
-}
-
-std::string GLES2Util::GetStringPathNameType(uint32_t value) {
- static const EnumToString string_table[] = {
- {GL_UNSIGNED_BYTE, "GL_UNSIGNED_BYTE"}, {GL_BYTE, "GL_BYTE"},
- {GL_UNSIGNED_SHORT, "GL_UNSIGNED_SHORT"}, {GL_SHORT, "GL_SHORT"},
- {GL_UNSIGNED_INT, "GL_UNSIGNED_INT"}, {GL_INT, "GL_INT"},
- };
- return GLES2Util::GetQualifiedEnumString(string_table,
- base::size(string_table), value);
-}
-
-std::string GLES2Util::GetStringPathParameter(uint32_t value) {
- static const EnumToString string_table[] = {
- {GL_PATH_STROKE_WIDTH_CHROMIUM, "GL_PATH_STROKE_WIDTH_CHROMIUM"},
- {GL_PATH_END_CAPS_CHROMIUM, "GL_PATH_END_CAPS_CHROMIUM"},
- {GL_PATH_JOIN_STYLE_CHROMIUM, "GL_PATH_JOIN_STYLE_CHROMIUM"},
- {GL_PATH_MITER_LIMIT_CHROMIUM, "GL_PATH_MITER_LIMIT_CHROMIUM"},
- {GL_PATH_STROKE_BOUND_CHROMIUM, "GL_PATH_STROKE_BOUND_CHROMIUM"},
- };
- return GLES2Util::GetQualifiedEnumString(string_table,
- base::size(string_table), value);
-}
-
-std::string GLES2Util::GetStringPathTransformType(uint32_t value) {
- static const EnumToString string_table[] = {
- {GL_NONE, "GL_NONE"},
- {GL_TRANSLATE_X_CHROMIUM, "GL_TRANSLATE_X_CHROMIUM"},
- {GL_TRANSLATE_Y_CHROMIUM, "GL_TRANSLATE_Y_CHROMIUM"},
- {GL_TRANSLATE_2D_CHROMIUM, "GL_TRANSLATE_2D_CHROMIUM"},
- {GL_TRANSLATE_3D_CHROMIUM, "GL_TRANSLATE_3D_CHROMIUM"},
- {GL_AFFINE_2D_CHROMIUM, "GL_AFFINE_2D_CHROMIUM"},
- {GL_AFFINE_3D_CHROMIUM, "GL_AFFINE_3D_CHROMIUM"},
- {GL_TRANSPOSE_AFFINE_2D_CHROMIUM, "GL_TRANSPOSE_AFFINE_2D_CHROMIUM"},
- {GL_TRANSPOSE_AFFINE_3D_CHROMIUM, "GL_TRANSPOSE_AFFINE_3D_CHROMIUM"},
- };
- return GLES2Util::GetQualifiedEnumString(string_table,
- base::size(string_table), value);
-}
-
std::string GLES2Util::GetStringPixelStore(uint32_t value) {
static const EnumToString string_table[] = {
{GL_PACK_ALIGNMENT, "GL_PACK_ALIGNMENT"},
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
index fa6f40c260c..79ea620a228 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
@@ -14,6 +14,10 @@
namespace gpu {
+#if defined(OS_MACOSX)
+static uint32_t macos_specific_texture_target = GL_TEXTURE_RECTANGLE_ARB;
+#endif // defined(OS_MACOSX)
+
bool IsImageFromGpuMemoryBufferFormatSupported(
gfx::BufferFormat format,
const gpu::Capabilities& capabilities) {
@@ -32,8 +36,8 @@ bool IsImageSizeValidForGpuMemoryBufferFormat(const gfx::Size& size,
case gfx::BufferFormat::RGBX_8888:
case gfx::BufferFormat::BGRA_8888:
case gfx::BufferFormat::BGRX_8888:
- case gfx::BufferFormat::BGRX_1010102:
- case gfx::BufferFormat::RGBX_1010102:
+ case gfx::BufferFormat::BGRA_1010102:
+ case gfx::BufferFormat::RGBA_1010102:
case gfx::BufferFormat::RGBA_F16:
return true;
case gfx::BufferFormat::YVU_420:
@@ -49,7 +53,7 @@ bool IsImageSizeValidForGpuMemoryBufferFormat(const gfx::Size& size,
uint32_t GetPlatformSpecificTextureTarget() {
#if defined(OS_MACOSX)
- return GL_TEXTURE_RECTANGLE_ARB;
+ return macos_specific_texture_target;
#elif defined(OS_ANDROID) || defined(OS_LINUX)
return GL_TEXTURE_EXTERNAL_OES;
#elif defined(OS_WIN) || defined(OS_FUCHSIA)
@@ -62,6 +66,14 @@ uint32_t GetPlatformSpecificTextureTarget() {
#endif
}
+#if defined(OS_MACOSX)
+GPU_EXPORT void SetMacOSSpecificTextureTarget(uint32_t texture_target) {
+ DCHECK(texture_target == GL_TEXTURE_2D ||
+ texture_target == GL_TEXTURE_RECTANGLE_ARB);
+ macos_specific_texture_target = texture_target;
+}
+#endif // defined(OS_MACOSX)
+
GPU_EXPORT uint32_t GetBufferTextureTarget(gfx::BufferUsage usage,
gfx::BufferFormat format,
const Capabilities& capabilities) {
@@ -72,14 +84,16 @@ GPU_EXPORT uint32_t GetBufferTextureTarget(gfx::BufferUsage usage,
GPU_EXPORT bool NativeBufferNeedsPlatformSpecificTextureTarget(
gfx::BufferFormat format) {
-#if defined(USE_OZONE)
+#if defined(USE_OZONE) || defined(OS_LINUX)
// Always use GL_TEXTURE_2D as the target for RGB textures.
// https://crbug.com/916728
if (format == gfx::BufferFormat::R_8 || format == gfx::BufferFormat::RG_88 ||
format == gfx::BufferFormat::RGBA_8888 ||
format == gfx::BufferFormat::BGRA_8888 ||
format == gfx::BufferFormat::RGBX_8888 ||
- format == gfx::BufferFormat::BGRX_8888) {
+ format == gfx::BufferFormat::BGRX_8888 ||
+ format == gfx::BufferFormat::RGBA_1010102 ||
+ format == gfx::BufferFormat::BGRA_1010102) {
return false;
}
#elif defined(OS_ANDROID)
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
index 7d51d660af3..7da3fb92798 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
@@ -5,6 +5,7 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_GPU_MEMORY_BUFFER_SUPPORT_H_
#define GPU_COMMAND_BUFFER_COMMON_GPU_MEMORY_BUFFER_SUPPORT_H_
+#include "build/build_config.h"
#include "gpu/gpu_export.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gfx/geometry/size.h"
@@ -53,6 +54,11 @@ GPU_EXPORT bool IsImageSizeValidForGpuMemoryBufferFormat(
// Returns the texture target to use with native GpuMemoryBuffers.
GPU_EXPORT uint32_t GetPlatformSpecificTextureTarget();
+#if defined(OS_MACOSX)
+// Set the texture target to use with MacOS native GpuMemoryBuffers.
+GPU_EXPORT void SetMacOSSpecificTextureTarget(uint32_t texture_target);
+#endif // defined(OS_MACOSX)
+
// Returns the texture target to be used for the given |usage| and |format|
// based on |capabilities|.
GPU_EXPORT uint32_t GetBufferTextureTarget(gfx::BufferUsage usage,
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
index efef0464127..dc1ae621aef 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
@@ -816,6 +816,8 @@ struct CopySubTextureINTERNALImmediate {
GLint _y,
GLsizei _width,
GLsizei _height,
+ GLboolean _unpack_flip_y,
+ GLboolean _unpack_premultiply_alpha,
const GLbyte* _mailboxes) {
SetHeader();
xoffset = _xoffset;
@@ -824,6 +826,8 @@ struct CopySubTextureINTERNALImmediate {
y = _y;
width = _width;
height = _height;
+ unpack_flip_y = _unpack_flip_y;
+ unpack_premultiply_alpha = _unpack_premultiply_alpha;
memcpy(ImmediateDataAddress(this), _mailboxes, ComputeDataSize());
}
@@ -834,9 +838,12 @@ struct CopySubTextureINTERNALImmediate {
GLint _y,
GLsizei _width,
GLsizei _height,
+ GLboolean _unpack_flip_y,
+ GLboolean _unpack_premultiply_alpha,
const GLbyte* _mailboxes) {
static_cast<ValueType*>(cmd)->Init(_xoffset, _yoffset, _x, _y, _width,
- _height, _mailboxes);
+ _height, _unpack_flip_y,
+ _unpack_premultiply_alpha, _mailboxes);
const uint32_t size = ComputeSize();
return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
}
@@ -848,10 +855,12 @@ struct CopySubTextureINTERNALImmediate {
int32_t y;
int32_t width;
int32_t height;
+ uint32_t unpack_flip_y;
+ uint32_t unpack_premultiply_alpha;
};
-static_assert(sizeof(CopySubTextureINTERNALImmediate) == 28,
- "size of CopySubTextureINTERNALImmediate should be 28");
+static_assert(sizeof(CopySubTextureINTERNALImmediate) == 36,
+ "size of CopySubTextureINTERNALImmediate should be 36");
static_assert(offsetof(CopySubTextureINTERNALImmediate, header) == 0,
"offset of CopySubTextureINTERNALImmediate header should be 0");
static_assert(offsetof(CopySubTextureINTERNALImmediate, xoffset) == 4,
@@ -866,6 +875,13 @@ static_assert(offsetof(CopySubTextureINTERNALImmediate, width) == 20,
"offset of CopySubTextureINTERNALImmediate width should be 20");
static_assert(offsetof(CopySubTextureINTERNALImmediate, height) == 24,
"offset of CopySubTextureINTERNALImmediate height should be 24");
+static_assert(
+ offsetof(CopySubTextureINTERNALImmediate, unpack_flip_y) == 28,
+ "offset of CopySubTextureINTERNALImmediate unpack_flip_y should be 28");
+static_assert(offsetof(CopySubTextureINTERNALImmediate,
+ unpack_premultiply_alpha) == 32,
+ "offset of CopySubTextureINTERNALImmediate "
+ "unpack_premultiply_alpha should be 32");
struct TraceBeginCHROMIUM {
typedef TraceBeginCHROMIUM ValueType;
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
index 68675bc339a..d665f875e99 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
@@ -341,7 +341,8 @@ TEST_F(RasterFormatTest, CopySubTextureINTERNALImmediate) {
void* next_cmd =
cmd.Set(&cmd, static_cast<GLint>(11), static_cast<GLint>(12),
static_cast<GLint>(13), static_cast<GLint>(14),
- static_cast<GLsizei>(15), static_cast<GLsizei>(16), data);
+ static_cast<GLsizei>(15), static_cast<GLsizei>(16),
+ static_cast<GLboolean>(17), static_cast<GLboolean>(18), data);
EXPECT_EQ(
static_cast<uint32_t>(cmds::CopySubTextureINTERNALImmediate::kCmdId),
cmd.header.command);
@@ -353,6 +354,8 @@ TEST_F(RasterFormatTest, CopySubTextureINTERNALImmediate) {
EXPECT_EQ(static_cast<GLint>(14), cmd.y);
EXPECT_EQ(static_cast<GLsizei>(15), cmd.width);
EXPECT_EQ(static_cast<GLsizei>(16), cmd.height);
+ EXPECT_EQ(static_cast<GLboolean>(17), cmd.unpack_flip_y);
+ EXPECT_EQ(static_cast<GLboolean>(18), cmd.unpack_premultiply_alpha);
CheckBytesWrittenMatchesExpectedSize(
next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
}
diff --git a/chromium/gpu/command_buffer/common/shared_image_usage.h b/chromium/gpu/command_buffer/common/shared_image_usage.h
index 76cdd80ef92..ba0148e6f65 100644
--- a/chromium/gpu/command_buffer/common/shared_image_usage.h
+++ b/chromium/gpu/command_buffer/common/shared_image_usage.h
@@ -35,6 +35,10 @@ enum SharedImageUsage : uint32_t {
// TODO(crbug.com/969114): This usage is currently not supported in GL/Vulkan
// interop cases.
SHARED_IMAGE_USAGE_CONCURRENT_READ_WRITE = 1 << 9,
+ // Image will be used for video decode acceleration on Chrome OS.
+ SHARED_IMAGE_USAGE_VIDEO_DECODE = 1 << 10,
+ // Image will be used as a WebGPU swapbuffer
+ SHARED_IMAGE_USAGE_WEBGPU_SWAP_CHAIN_TEXTURE = 1 << 11,
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/skia_utils.cc b/chromium/gpu/command_buffer/common/skia_utils.cc
index e5cbb0b86f9..07070c76a56 100644
--- a/chromium/gpu/command_buffer/common/skia_utils.cc
+++ b/chromium/gpu/command_buffer/common/skia_utils.cc
@@ -7,7 +7,6 @@
#include <inttypes.h>
#include "base/strings/stringprintf.h"
-#include "base/system/sys_info.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/process_memory_dump.h"
#include "build/build_config.h"
@@ -42,6 +41,12 @@ class SkiaGpuTraceMemoryDump : public SkTraceMemoryDump {
auto* dump = GetOrCreateAllocatorDump(dump_name);
dump->AddScalar(value_name, units, value);
}
+ void dumpStringValue(const char* dump_name,
+ const char* value_name,
+ const char* value) override {
+ auto* dump = GetOrCreateAllocatorDump(dump_name);
+ dump->AddString(value_name, "", value);
+ }
void setMemoryBacking(const char* dump_name,
const char* backing_type,
@@ -130,56 +135,6 @@ class SkiaGpuTraceMemoryDump : public SkTraceMemoryDump {
} // namespace
-void DetermineGrCacheLimitsFromAvailableMemory(
- size_t* max_resource_cache_bytes,
- size_t* max_glyph_cache_texture_bytes) {
- // Default limits.
-#if defined(OS_FUCHSIA)
- // Reduce protected budget on fuchsia due to https://fxb/36620.
- constexpr size_t kMaxGaneshResourceCacheBytes = 24 * 1024 * 1024;
-#else
- constexpr size_t kMaxGaneshResourceCacheBytes = 96 * 1024 * 1024;
-#endif // defined(OS_FUCHSIA)
- constexpr size_t kMaxDefaultGlyphCacheTextureBytes = 2048 * 1024 * 4;
-
- *max_resource_cache_bytes = kMaxGaneshResourceCacheBytes;
- *max_glyph_cache_texture_bytes = kMaxDefaultGlyphCacheTextureBytes;
-
-// We can't call AmountOfPhysicalMemory under NACL, so leave the default.
-#if !defined(OS_NACL)
- // The limit of the bytes allocated toward GPU resources in the GrContext's
- // GPU cache.
-#if defined(OS_FUCHSIA)
- // Reduce protected budget on fuchsia due to https://fxb/36620.
- constexpr size_t kMaxLowEndGaneshResourceCacheBytes = 24 * 1024 * 1024;
-#else
- constexpr size_t kMaxLowEndGaneshResourceCacheBytes = 48 * 1024 * 1024;
-#endif // defined(OS_FUCHSIA)
- constexpr size_t kMaxHighEndGaneshResourceCacheBytes = 256 * 1024 * 1024;
- // Limits for glyph cache textures.
- constexpr size_t kMaxLowEndGlyphCacheTextureBytes = 1024 * 512 * 4;
- // High-end / low-end memory cutoffs.
- constexpr int64_t kHighEndMemoryThreshold = (int64_t)4096 * 1024 * 1024;
- constexpr int64_t kLowEndMemoryThreshold = (int64_t)512 * 1024 * 1024;
-
- int64_t amount_of_physical_memory = base::SysInfo::AmountOfPhysicalMemory();
- if (amount_of_physical_memory <= kLowEndMemoryThreshold) {
- *max_resource_cache_bytes = kMaxLowEndGaneshResourceCacheBytes;
- *max_glyph_cache_texture_bytes = kMaxLowEndGlyphCacheTextureBytes;
- } else if (amount_of_physical_memory >= kHighEndMemoryThreshold) {
- *max_resource_cache_bytes = kMaxHighEndGaneshResourceCacheBytes;
- }
-#endif
-}
-
-void DefaultGrCacheLimitsForTests(size_t* max_resource_cache_bytes,
- size_t* max_glyph_cache_texture_bytes) {
- constexpr size_t kDefaultGlyphCacheTextureBytes = 2048 * 1024 * 4;
- constexpr size_t kDefaultGaneshResourceCacheBytes = 96 * 1024 * 1024;
- *max_resource_cache_bytes = kDefaultGaneshResourceCacheBytes;
- *max_glyph_cache_texture_bytes = kDefaultGlyphCacheTextureBytes;
-}
-
void DumpGrMemoryStatistics(const GrContext* context,
base::trace_event::ProcessMemoryDump* pmd,
base::Optional<uint64_t> tracing_guid) {
diff --git a/chromium/gpu/command_buffer/common/skia_utils.h b/chromium/gpu/command_buffer/common/skia_utils.h
index 9cd38ed5a0b..f523af3ffbc 100644
--- a/chromium/gpu/command_buffer/common/skia_utils.h
+++ b/chromium/gpu/command_buffer/common/skia_utils.h
@@ -21,14 +21,6 @@ class ProcessMemoryDump;
namespace gpu {
namespace raster {
-RASTER_EXPORT void DetermineGrCacheLimitsFromAvailableMemory(
- size_t* max_resource_cache_bytes,
- size_t* max_glyph_cache_texture_bytes);
-
-RASTER_EXPORT void DefaultGrCacheLimitsForTests(
- size_t* max_resource_cache_bytes,
- size_t* max_glyph_cache_texture_bytes);
-
// Dumps memory usage from the |context| to |pmd|. A |tracing_guid| can be used
// if these resources are referenced across processes for sharing across dumps.
RASTER_EXPORT void DumpGrMemoryStatistics(
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h b/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h
index b630fbbeab8..83615b30c8a 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h
@@ -18,6 +18,7 @@ enum class PowerPreference : uint32_t {
enum class DawnReturnDataType : uint32_t {
kDawnCommands,
kRequestedDawnAdapterProperties,
+ kRequestedDeviceReturnInfo,
kNumDawnReturnDataType
};
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format.h
index 781a76abb46..ce892da7bfc 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_format.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format.h
@@ -5,18 +5,10 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_FORMAT_H_
#define GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_FORMAT_H_
-#include <stddef.h>
-#include <stdint.h>
-#include <string.h>
-
-#include "base/atomicops.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "gpu/command_buffer/common/common_cmd_format.h"
#include "gpu/command_buffer/common/gl2_types.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/webgpu_cmd_enums.h"
#include "gpu/command_buffer/common/webgpu_cmd_ids.h"
-#include "ui/gfx/buffer_types.h"
namespace gpu {
namespace webgpu {
@@ -31,25 +23,49 @@ static_assert(
sizeof(DawnReturnDataHeader) % GPU_DAWN_RETURN_DATA_ALIGNMENT == 0,
"DawnReturnDataHeader must align to GPU_DAWN_RETURN_DATA_ALIGNMENT");
-struct alignas(GPU_DAWN_RETURN_DATA_ALIGNMENT) DawnReturnAdapterIDs {
- uint32_t request_adapter_serial;
+struct DawnReturnCommandsInfoHeader {
+ DawnReturnDataHeader return_data_header = {DawnReturnDataType::kDawnCommands};
+ DawnDeviceClientID device_client_id;
+};
+
+static_assert(offsetof(DawnReturnCommandsInfoHeader, return_data_header) == 0,
+ "The offset of return_data_header must be 0");
+
+struct DawnReturnCommandsInfo {
+ DawnReturnCommandsInfoHeader header;
+ alignas(GPU_DAWN_RETURN_DATA_ALIGNMENT) char deserialized_buffer[];
+};
+
+static_assert(offsetof(DawnReturnCommandsInfo, header) == 0,
+ "The offset of header must be 0");
+
+struct DawnReturnAdapterInfoHeader {
+ DawnReturnDataHeader return_data_header = {
+ DawnReturnDataType::kRequestedDawnAdapterProperties};
+ DawnRequestAdapterSerial request_adapter_serial;
uint32_t adapter_service_id;
};
-static_assert(
- sizeof(DawnReturnAdapterIDs) % GPU_DAWN_RETURN_DATA_ALIGNMENT == 0,
- "DawnReturnAdapterIDs must align to GPU_DAWN_RETURN_DATA_ALIGNMENT");
+static_assert(offsetof(DawnReturnAdapterInfoHeader, return_data_header) == 0,
+ "The offset of return_data_header must be 0");
+
+struct DawnReturnAdapterInfo {
+ DawnReturnAdapterInfoHeader header;
+ alignas(GPU_DAWN_RETURN_DATA_ALIGNMENT) char deserialized_buffer[];
+};
+
+static_assert(offsetof(DawnReturnAdapterInfo, header) == 0,
+ "The offset of header must be 0");
-struct alignas(GPU_DAWN_RETURN_DATA_ALIGNMENT) DawnReturnAdapterInfo {
- DawnReturnAdapterIDs adapter_ids;
- char deserialized_buffer[];
+struct DawnReturnRequestDeviceInfo {
+ DawnReturnDataHeader return_data_header = {
+ DawnReturnDataType::kRequestedDeviceReturnInfo};
+ DawnDeviceClientID device_client_id;
+ bool is_request_device_success;
};
-static_assert(offsetof(DawnReturnAdapterInfo, deserialized_buffer) %
- GPU_DAWN_RETURN_DATA_ALIGNMENT ==
- 0,
- "The offset of deserialized_buffer must align to "
- "GPU_DAWN_RETURN_DATA_ALIGNMENT");
+static_assert(offsetof(DawnReturnRequestDeviceInfo, return_data_header) == 0,
+ "The offset of return_data_header must be 0");
// Command buffer is GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT byte aligned.
#pragma pack(push, 4)
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h
index 006325bfefd..5a7f561c47b 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h
@@ -25,45 +25,51 @@ struct DawnCommands {
void SetHeader() { header.SetCmd<ValueType>(); }
- void Init(uint32_t _commands_shm_id,
+ void Init(uint64_t _device_client_id,
+ uint32_t _commands_shm_id,
uint32_t _commands_shm_offset,
uint32_t _size) {
SetHeader();
+ device_client_id = _device_client_id;
commands_shm_id = _commands_shm_id;
commands_shm_offset = _commands_shm_offset;
size = _size;
}
void* Set(void* cmd,
+ uint64_t _device_client_id,
uint32_t _commands_shm_id,
uint32_t _commands_shm_offset,
uint32_t _size) {
- static_cast<ValueType*>(cmd)->Init(_commands_shm_id, _commands_shm_offset,
- _size);
+ static_cast<ValueType*>(cmd)->Init(_device_client_id, _commands_shm_id,
+ _commands_shm_offset, _size);
return NextCmdAddress<ValueType>(cmd);
}
gpu::CommandHeader header;
+ uint32_t device_client_id;
uint32_t commands_shm_id;
uint32_t commands_shm_offset;
uint32_t size;
};
-static_assert(sizeof(DawnCommands) == 16, "size of DawnCommands should be 16");
+static_assert(sizeof(DawnCommands) == 20, "size of DawnCommands should be 20");
static_assert(offsetof(DawnCommands, header) == 0,
"offset of DawnCommands header should be 0");
-static_assert(offsetof(DawnCommands, commands_shm_id) == 4,
- "offset of DawnCommands commands_shm_id should be 4");
-static_assert(offsetof(DawnCommands, commands_shm_offset) == 8,
- "offset of DawnCommands commands_shm_offset should be 8");
-static_assert(offsetof(DawnCommands, size) == 12,
- "offset of DawnCommands size should be 12");
+static_assert(offsetof(DawnCommands, device_client_id) == 4,
+ "offset of DawnCommands device_client_id should be 4");
+static_assert(offsetof(DawnCommands, commands_shm_id) == 8,
+ "offset of DawnCommands commands_shm_id should be 8");
+static_assert(offsetof(DawnCommands, commands_shm_offset) == 12,
+ "offset of DawnCommands commands_shm_offset should be 12");
+static_assert(offsetof(DawnCommands, size) == 16,
+ "offset of DawnCommands size should be 16");
struct AssociateMailboxImmediate {
typedef AssociateMailboxImmediate ValueType;
static const CommandId kCmdId = kAssociateMailboxImmediate;
static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
static uint32_t ComputeDataSize() {
return static_cast<uint32_t>(sizeof(GLbyte) * 16);
@@ -75,14 +81,16 @@ struct AssociateMailboxImmediate {
void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
- void Init(GLuint _device_id,
+ void Init(GLuint64 _device_client_id,
GLuint _device_generation,
GLuint _id,
GLuint _generation,
GLuint _usage,
const GLbyte* _mailbox) {
SetHeader();
- device_id = _device_id;
+ gles2::GLES2Util::MapUint64ToTwoUint32(
+ static_cast<uint64_t>(_device_client_id), &device_client_id_0,
+ &device_client_id_1);
device_generation = _device_generation;
id = _id;
generation = _generation;
@@ -91,47 +99,57 @@ struct AssociateMailboxImmediate {
}
void* Set(void* cmd,
- GLuint _device_id,
+ GLuint64 _device_client_id,
GLuint _device_generation,
GLuint _id,
GLuint _generation,
GLuint _usage,
const GLbyte* _mailbox) {
- static_cast<ValueType*>(cmd)->Init(_device_id, _device_generation, _id,
- _generation, _usage, _mailbox);
+ static_cast<ValueType*>(cmd)->Init(_device_client_id, _device_generation,
+ _id, _generation, _usage, _mailbox);
const uint32_t size = ComputeSize();
return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
}
+ GLuint64 device_client_id() const volatile {
+ return static_cast<GLuint64>(gles2::GLES2Util::MapTwoUint32ToUint64(
+ device_client_id_0, device_client_id_1));
+ }
+
gpu::CommandHeader header;
- uint32_t device_id;
+ uint32_t device_client_id_0;
+ uint32_t device_client_id_1;
uint32_t device_generation;
uint32_t id;
uint32_t generation;
uint32_t usage;
};
-static_assert(sizeof(AssociateMailboxImmediate) == 24,
- "size of AssociateMailboxImmediate should be 24");
+static_assert(sizeof(AssociateMailboxImmediate) == 28,
+ "size of AssociateMailboxImmediate should be 28");
static_assert(offsetof(AssociateMailboxImmediate, header) == 0,
"offset of AssociateMailboxImmediate header should be 0");
-static_assert(offsetof(AssociateMailboxImmediate, device_id) == 4,
- "offset of AssociateMailboxImmediate device_id should be 4");
static_assert(
- offsetof(AssociateMailboxImmediate, device_generation) == 8,
- "offset of AssociateMailboxImmediate device_generation should be 8");
-static_assert(offsetof(AssociateMailboxImmediate, id) == 12,
- "offset of AssociateMailboxImmediate id should be 12");
-static_assert(offsetof(AssociateMailboxImmediate, generation) == 16,
- "offset of AssociateMailboxImmediate generation should be 16");
-static_assert(offsetof(AssociateMailboxImmediate, usage) == 20,
- "offset of AssociateMailboxImmediate usage should be 20");
+ offsetof(AssociateMailboxImmediate, device_client_id_0) == 4,
+ "offset of AssociateMailboxImmediate device_client_id_0 should be 4");
+static_assert(
+ offsetof(AssociateMailboxImmediate, device_client_id_1) == 8,
+ "offset of AssociateMailboxImmediate device_client_id_1 should be 8");
+static_assert(
+ offsetof(AssociateMailboxImmediate, device_generation) == 12,
+ "offset of AssociateMailboxImmediate device_generation should be 12");
+static_assert(offsetof(AssociateMailboxImmediate, id) == 16,
+ "offset of AssociateMailboxImmediate id should be 16");
+static_assert(offsetof(AssociateMailboxImmediate, generation) == 20,
+ "offset of AssociateMailboxImmediate generation should be 20");
+static_assert(offsetof(AssociateMailboxImmediate, usage) == 24,
+ "offset of AssociateMailboxImmediate usage should be 24");
struct DissociateMailbox {
typedef DissociateMailbox ValueType;
static const CommandId kCmdId = kDissociateMailbox;
static const cmd::ArgFlags kArgFlags = cmd::kFixed;
- static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
static uint32_t ComputeSize() {
return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
@@ -139,30 +157,50 @@ struct DissociateMailbox {
void SetHeader() { header.SetCmd<ValueType>(); }
- void Init(GLuint _texture_id, GLuint _texture_generation) {
+ void Init(GLuint64 _device_client_id,
+ GLuint _texture_id,
+ GLuint _texture_generation) {
SetHeader();
+ gles2::GLES2Util::MapUint64ToTwoUint32(
+ static_cast<uint64_t>(_device_client_id), &device_client_id_0,
+ &device_client_id_1);
texture_id = _texture_id;
texture_generation = _texture_generation;
}
- void* Set(void* cmd, GLuint _texture_id, GLuint _texture_generation) {
- static_cast<ValueType*>(cmd)->Init(_texture_id, _texture_generation);
+ void* Set(void* cmd,
+ GLuint64 _device_client_id,
+ GLuint _texture_id,
+ GLuint _texture_generation) {
+ static_cast<ValueType*>(cmd)->Init(_device_client_id, _texture_id,
+ _texture_generation);
return NextCmdAddress<ValueType>(cmd);
}
+ GLuint64 device_client_id() const volatile {
+ return static_cast<GLuint64>(gles2::GLES2Util::MapTwoUint32ToUint64(
+ device_client_id_0, device_client_id_1));
+ }
+
gpu::CommandHeader header;
+ uint32_t device_client_id_0;
+ uint32_t device_client_id_1;
uint32_t texture_id;
uint32_t texture_generation;
};
-static_assert(sizeof(DissociateMailbox) == 12,
- "size of DissociateMailbox should be 12");
+static_assert(sizeof(DissociateMailbox) == 20,
+ "size of DissociateMailbox should be 20");
static_assert(offsetof(DissociateMailbox, header) == 0,
"offset of DissociateMailbox header should be 0");
-static_assert(offsetof(DissociateMailbox, texture_id) == 4,
- "offset of DissociateMailbox texture_id should be 4");
-static_assert(offsetof(DissociateMailbox, texture_generation) == 8,
- "offset of DissociateMailbox texture_generation should be 8");
+static_assert(offsetof(DissociateMailbox, device_client_id_0) == 4,
+ "offset of DissociateMailbox device_client_id_0 should be 4");
+static_assert(offsetof(DissociateMailbox, device_client_id_1) == 8,
+ "offset of DissociateMailbox device_client_id_1 should be 8");
+static_assert(offsetof(DissociateMailbox, texture_id) == 12,
+ "offset of DissociateMailbox texture_id should be 12");
+static_assert(offsetof(DissociateMailbox, texture_generation) == 16,
+ "offset of DissociateMailbox texture_generation should be 16");
struct RequestAdapter {
typedef RequestAdapter ValueType;
@@ -176,14 +214,14 @@ struct RequestAdapter {
void SetHeader() { header.SetCmd<ValueType>(); }
- void Init(uint32_t _request_adapter_serial, uint32_t _power_preference) {
+ void Init(uint64_t _request_adapter_serial, uint32_t _power_preference) {
SetHeader();
request_adapter_serial = _request_adapter_serial;
power_preference = _power_preference;
}
void* Set(void* cmd,
- uint32_t _request_adapter_serial,
+ uint64_t _request_adapter_serial,
uint32_t _power_preference) {
static_cast<ValueType*>(cmd)->Init(_request_adapter_serial,
_power_preference);
@@ -216,11 +254,13 @@ struct RequestDevice {
void SetHeader() { header.SetCmd<ValueType>(); }
- void Init(uint32_t _adapter_service_id,
+ void Init(uint64_t _device_client_id,
+ uint32_t _adapter_service_id,
uint32_t _request_device_properties_shm_id,
uint32_t _request_device_properties_shm_offset,
uint32_t _request_device_properties_size) {
SetHeader();
+ device_client_id = _device_client_id;
adapter_service_id = _adapter_service_id;
request_device_properties_shm_id = _request_device_properties_shm_id;
request_device_properties_shm_offset =
@@ -229,38 +269,75 @@ struct RequestDevice {
}
void* Set(void* cmd,
+ uint64_t _device_client_id,
uint32_t _adapter_service_id,
uint32_t _request_device_properties_shm_id,
uint32_t _request_device_properties_shm_offset,
uint32_t _request_device_properties_size) {
- static_cast<ValueType*>(cmd)->Init(
- _adapter_service_id, _request_device_properties_shm_id,
- _request_device_properties_shm_offset, _request_device_properties_size);
+ static_cast<ValueType*>(cmd)->Init(_device_client_id, _adapter_service_id,
+ _request_device_properties_shm_id,
+ _request_device_properties_shm_offset,
+ _request_device_properties_size);
return NextCmdAddress<ValueType>(cmd);
}
gpu::CommandHeader header;
+ uint32_t device_client_id;
uint32_t adapter_service_id;
uint32_t request_device_properties_shm_id;
uint32_t request_device_properties_shm_offset;
uint32_t request_device_properties_size;
};
-static_assert(sizeof(RequestDevice) == 20,
- "size of RequestDevice should be 20");
+static_assert(sizeof(RequestDevice) == 24,
+ "size of RequestDevice should be 24");
static_assert(offsetof(RequestDevice, header) == 0,
"offset of RequestDevice header should be 0");
-static_assert(offsetof(RequestDevice, adapter_service_id) == 4,
- "offset of RequestDevice adapter_service_id should be 4");
+static_assert(offsetof(RequestDevice, device_client_id) == 4,
+ "offset of RequestDevice device_client_id should be 4");
+static_assert(offsetof(RequestDevice, adapter_service_id) == 8,
+ "offset of RequestDevice adapter_service_id should be 8");
static_assert(
- offsetof(RequestDevice, request_device_properties_shm_id) == 8,
- "offset of RequestDevice request_device_properties_shm_id should be 8");
+ offsetof(RequestDevice, request_device_properties_shm_id) == 12,
+ "offset of RequestDevice request_device_properties_shm_id should be 12");
static_assert(offsetof(RequestDevice, request_device_properties_shm_offset) ==
- 12,
+ 16,
"offset of RequestDevice request_device_properties_shm_offset "
- "should be 12");
+ "should be 16");
static_assert(
- offsetof(RequestDevice, request_device_properties_size) == 16,
- "offset of RequestDevice request_device_properties_size should be 16");
+ offsetof(RequestDevice, request_device_properties_size) == 20,
+ "offset of RequestDevice request_device_properties_size should be 20");
+
+struct RemoveDevice {
+ typedef RemoveDevice ValueType;
+ static const CommandId kCmdId = kRemoveDevice;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(uint64_t _device_client_id) {
+ SetHeader();
+ device_client_id = _device_client_id;
+ }
+
+ void* Set(void* cmd, uint64_t _device_client_id) {
+ static_cast<ValueType*>(cmd)->Init(_device_client_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t device_client_id;
+};
+
+static_assert(sizeof(RemoveDevice) == 8, "size of RemoveDevice should be 8");
+static_assert(offsetof(RemoveDevice, header) == 0,
+ "offset of RemoveDevice header should be 0");
+static_assert(offsetof(RemoveDevice, device_client_id) == 4,
+ "offset of RemoveDevice device_client_id should be 4");
#endif // GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_FORMAT_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h
index d2285ae44ac..589388bf348 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h
@@ -17,14 +17,15 @@
TEST_F(WebGPUFormatTest, DawnCommands) {
cmds::DawnCommands& cmd = *GetBufferAs<cmds::DawnCommands>();
void* next_cmd =
- cmd.Set(&cmd, static_cast<uint32_t>(11), static_cast<uint32_t>(12),
- static_cast<uint32_t>(13));
+ cmd.Set(&cmd, static_cast<uint64_t>(11), static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13), static_cast<uint32_t>(14));
EXPECT_EQ(static_cast<uint32_t>(cmds::DawnCommands::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<uint32_t>(11), cmd.commands_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(12), cmd.commands_shm_offset);
- EXPECT_EQ(static_cast<uint32_t>(13), cmd.size);
+ EXPECT_EQ(static_cast<uint64_t>(11), cmd.device_client_id);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.commands_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.commands_shm_offset);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.size);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -51,14 +52,14 @@ TEST_F(WebGPUFormatTest, AssociateMailboxImmediate) {
cmds::AssociateMailboxImmediate& cmd =
*GetBufferAs<cmds::AssociateMailboxImmediate>();
void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
+ cmd.Set(&cmd, static_cast<GLuint64>(11), static_cast<GLuint>(12),
static_cast<GLuint>(13), static_cast<GLuint>(14),
static_cast<GLuint>(15), data);
EXPECT_EQ(static_cast<uint32_t>(cmds::AssociateMailboxImmediate::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.device_id);
+ EXPECT_EQ(static_cast<GLuint64>(11), cmd.device_client_id());
EXPECT_EQ(static_cast<GLuint>(12), cmd.device_generation);
EXPECT_EQ(static_cast<GLuint>(13), cmd.id);
EXPECT_EQ(static_cast<GLuint>(14), cmd.generation);
@@ -69,24 +70,25 @@ TEST_F(WebGPUFormatTest, AssociateMailboxImmediate) {
TEST_F(WebGPUFormatTest, DissociateMailbox) {
cmds::DissociateMailbox& cmd = *GetBufferAs<cmds::DissociateMailbox>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12));
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint64>(11),
+ static_cast<GLuint>(12), static_cast<GLuint>(13));
EXPECT_EQ(static_cast<uint32_t>(cmds::DissociateMailbox::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<GLuint>(11), cmd.texture_id);
- EXPECT_EQ(static_cast<GLuint>(12), cmd.texture_generation);
+ EXPECT_EQ(static_cast<GLuint64>(11), cmd.device_client_id());
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.texture_id);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.texture_generation);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
TEST_F(WebGPUFormatTest, RequestAdapter) {
cmds::RequestAdapter& cmd = *GetBufferAs<cmds::RequestAdapter>();
void* next_cmd =
- cmd.Set(&cmd, static_cast<uint32_t>(11), static_cast<uint32_t>(12));
+ cmd.Set(&cmd, static_cast<uint64_t>(11), static_cast<uint32_t>(12));
EXPECT_EQ(static_cast<uint32_t>(cmds::RequestAdapter::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<uint32_t>(11), cmd.request_adapter_serial);
+ EXPECT_EQ(static_cast<uint64_t>(11), cmd.request_adapter_serial);
EXPECT_EQ(static_cast<uint32_t>(12), cmd.power_preference);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -94,16 +96,28 @@ TEST_F(WebGPUFormatTest, RequestAdapter) {
TEST_F(WebGPUFormatTest, RequestDevice) {
cmds::RequestDevice& cmd = *GetBufferAs<cmds::RequestDevice>();
void* next_cmd =
- cmd.Set(&cmd, static_cast<uint32_t>(11), static_cast<uint32_t>(12),
- static_cast<uint32_t>(13), static_cast<uint32_t>(14));
+ cmd.Set(&cmd, static_cast<uint64_t>(11), static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13), static_cast<uint32_t>(14),
+ static_cast<uint32_t>(15));
EXPECT_EQ(static_cast<uint32_t>(cmds::RequestDevice::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<uint32_t>(11), cmd.adapter_service_id);
- EXPECT_EQ(static_cast<uint32_t>(12), cmd.request_device_properties_shm_id);
- EXPECT_EQ(static_cast<uint32_t>(13),
+ EXPECT_EQ(static_cast<uint64_t>(11), cmd.device_client_id);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.adapter_service_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.request_device_properties_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14),
cmd.request_device_properties_shm_offset);
- EXPECT_EQ(static_cast<uint32_t>(14), cmd.request_device_properties_size);
+ EXPECT_EQ(static_cast<uint32_t>(15), cmd.request_device_properties_size);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(WebGPUFormatTest, RemoveDevice) {
+ cmds::RemoveDevice& cmd = *GetBufferAs<cmds::RemoveDevice>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<uint64_t>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::RemoveDevice::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<uint64_t>(11), cmd.device_client_id);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_ids.h b/chromium/gpu/command_buffer/common/webgpu_cmd_ids.h
index 5e3e659d6ec..6a5a107a835 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_ids.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_ids.h
@@ -16,6 +16,9 @@ namespace webgpu {
const char* GetCommandName(CommandId command_id);
+using DawnRequestAdapterSerial = uint64_t;
+using DawnDeviceClientID = uint64_t;
+
} // namespace webgpu
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h
index 3e706909d44..6ebfffd4fc3 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h
@@ -16,7 +16,8 @@
OP(AssociateMailboxImmediate) /* 257 */ \
OP(DissociateMailbox) /* 258 */ \
OP(RequestAdapter) /* 259 */ \
- OP(RequestDevice) /* 260 */
+ OP(RequestDevice) /* 260 */ \
+ OP(RemoveDevice) /* 261 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
index 94aee8014c2..6115cb03744 100644
--- a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
@@ -303,7 +303,7 @@ GL_APICALL GLboolean GL_APIENTRY glUnmapBuffer (GLenumBufferTarget target);
GL_APICALL void GL_APIENTRY glFlushMappedBufferRange (GLenumBufferTarget target, GLintptrNotNegative offset, GLsizeiptr size);
GL_APICALL void* GL_APIENTRY glMapTexSubImage2DCHROMIUM (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, GLenum access);
GL_APICALL void GL_APIENTRY glUnmapTexSubImage2DCHROMIUM (const void* mem);
-GL_APICALL void GL_APIENTRY glResizeCHROMIUM (GLuint width, GLuint height, GLfloat scale_factor, GLenum color_space, GLboolean alpha);
+GL_APICALL void GL_APIENTRY glResizeCHROMIUM (GLuint width, GLuint height, GLfloat scale_factor, GLcolorSpace color_space, GLboolean alpha);
GL_APICALL const GLchar* GL_APIENTRY glGetRequestableExtensionsCHROMIUM (void);
GL_APICALL void GL_APIENTRY glRequestExtensionCHROMIUM (const char* extension);
GL_APICALL void GL_APIENTRY glGetProgramInfoCHROMIUM (GLidProgram program, GLsizeiNotNegative bufsize, GLsizei* size, void* info);
@@ -346,31 +346,6 @@ GL_APICALL GLuint GL_APIENTRY glGetLastFlushIdCHROMIUM (void);
GL_APICALL void GL_APIENTRY glScheduleDCLayerCHROMIUM (GLuint texture_0, GLuint texture_1, GLint z_order, GLint content_x, GLint content_y, GLint content_width, GLint content_height, GLint quad_x, GLint quad_y, GLint quad_width, GLint quad_height, GLfloat transform_c1r1, GLfloat transform_c2r1, GLfloat transform_c1r2, GLfloat transform_c2r2, GLfloat transform_tx, GLfloat transform_ty, GLboolean is_clipped, GLint clip_x, GLint clip_y, GLint clip_width, GLint clip_height, GLuint protected_video_type);
GL_APICALL void GL_APIENTRY glSetActiveURLCHROMIUM (const char* url);
-// Extension CHROMIUM_path_rendering.
-GL_APICALL void GL_APIENTRY glMatrixLoadfCHROMIUM (GLenumMatrixMode matrixMode, const GLfloat* m);
-GL_APICALL void GL_APIENTRY glMatrixLoadIdentityCHROMIUM (GLenumMatrixMode matrixMode);
-GL_APICALL GLuint GL_APIENTRY glGenPathsCHROMIUM (GLsizei range);
-GL_APICALL void GL_APIENTRY glDeletePathsCHROMIUM (GLidPath path, GLsizei range);
-GL_APICALL GLboolean GL_APIENTRY glIsPathCHROMIUM (GLidPath path);
-GL_APICALL void GL_APIENTRY glPathCommandsCHROMIUM (GLidPath path, GLsizei numCommands, const GLubyte* commands, GLsizei numCoords, GLenumPathCoordType coordType, const GLvoid* coords);
-GL_APICALL void GL_APIENTRY glPathParameterfCHROMIUM (GLidPath path, GLenumPathParameter pname, GLfloat value);
-GL_APICALL void GL_APIENTRY glPathParameteriCHROMIUM (GLidPath path, GLenumPathParameter pname, GLint value);
-GL_APICALL void GL_APIENTRY glPathStencilFuncCHROMIUM (GLenumCmpFunction func, GLint ref, GLuint mask);
-GL_APICALL void GL_APIENTRY glStencilFillPathCHROMIUM (GLidPath path, GLenumPathFillMode fillMode, GLuint mask);
-GL_APICALL void GL_APIENTRY glStencilStrokePathCHROMIUM (GLidPath path, GLint reference, GLuint mask);
-GL_APICALL void GL_APIENTRY glCoverFillPathCHROMIUM (GLidPath path, GLenumPathCoverMode coverMode);
-GL_APICALL void GL_APIENTRY glCoverStrokePathCHROMIUM (GLidPath path, GLenumPathCoverMode coverMode);
-GL_APICALL void GL_APIENTRY glStencilThenCoverFillPathCHROMIUM (GLidPath path, GLenumPathFillMode fillMode, GLuint mask, GLenumPathCoverMode coverMode);
-GL_APICALL void GL_APIENTRY glStencilThenCoverStrokePathCHROMIUM (GLidPath path, GLint reference, GLuint mask, GLenumPathCoverMode coverMode);
-GL_APICALL void GL_APIENTRY glStencilFillPathInstancedCHROMIUM (GLsizei numPaths, GLenum pathNameType, const GLvoid* paths, GLuint pathBase, GLenumPathFillMode fillMode, GLuint mask, GLenumPathTransformType transformType, const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY glStencilStrokePathInstancedCHROMIUM (GLsizei numPaths, GLenum pathNameType, const GLvoid* paths, GLuint pathBase, GLint reference, GLuint mask, GLenumPathTransformType transformType, const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY glCoverFillPathInstancedCHROMIUM (GLsizei numPaths, GLenum pathNameType, const GLvoid* paths, GLuint pathBase, GLenumPathInstancedCoverMode coverMode, GLenumPathTransformType transformType, const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY glCoverStrokePathInstancedCHROMIUM (GLsizei numPaths, GLenum pathNameType, const GLvoid* paths, GLuint pathBase, GLenumPathInstancedCoverMode coverMode, GLenumPathTransformType transformType, const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY glStencilThenCoverFillPathInstancedCHROMIUM (GLsizei numPaths, GLenum pathNameType, const GLvoid* paths, GLuint pathBase, GLenumPathFillMode fillMode, GLuint mask, GLenumPathInstancedCoverMode coverMode, GLenumPathTransformType transformType, const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY glStencilThenCoverStrokePathInstancedCHROMIUM (GLsizei numPaths, GLenum pathNameType, const GLvoid* paths, GLuint pathBase, GLint reference, GLuint mask, GLenumPathInstancedCoverMode coverMode, GLenumPathTransformType transformType, const GLfloat* transformValues);
-GL_APICALL void GL_APIENTRY glBindFragmentInputLocationCHROMIUM (GLidProgram program, GLint location, const char* name);
-GL_APICALL void GL_APIENTRY glProgramPathFragmentInputGenCHROMIUM (GLidProgram program, GLint location, GLenumPathFragmentInputGenMode genMode, GLint components, const GLfloat* coeffs);
-
// Extension CHROMIUM_context_visibility_hint
GL_APICALL void GL_APIENTRY glContextVisibilityHintCHROMIUM (GLboolean visibility);
@@ -409,7 +384,7 @@ GL_APICALL bool GL_APIENTRY glLockDiscardableTextureCHROMIUM (GLuint tex
GL_APICALL void GL_APIENTRY glTexStorage2DImageCHROMIUM (GLenumTextureBindTarget target, GLenumTextureInternalFormatStorage internalFormat, GLenumClientBufferUsage bufferUsage, GLsizei width, GLsizei height);
// Extension CHROMIUM_color_space_metadata
-GL_APICALL void GL_APIENTRY glSetColorSpaceMetadataCHROMIUM (GLuint texture_id, GLColorSpace color_space);
+GL_APICALL void GL_APIENTRY glSetColorSpaceMetadataCHROMIUM (GLuint texture_id, GLcolorSpace color_space);
// Extension EXT_window_rectangles
GL_APICALL void GL_APIENTRY glWindowRectanglesEXT (GLenumWindowRectanglesMode mode, GLsizeiNotNegative count, const GLint* box);
@@ -438,3 +413,5 @@ GL_APICALL GLuint GL_APIENTRY glCreateAndTexStorage2DSharedImageWithIntern
GL_APICALL void GL_APIENTRY glCreateAndTexStorage2DSharedImageINTERNAL (GLuint texture, GLenum internalformat, const GLbyte* mailbox);
GL_APICALL void GL_APIENTRY glBeginSharedImageAccessDirectCHROMIUM (GLuint texture, GLenumSharedImageAccessMode mode);
GL_APICALL void GL_APIENTRY glEndSharedImageAccessDirectCHROMIUM (GLuint texture);
+GL_APICALL void GL_APIENTRY glBeginBatchReadAccessSharedImageCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glEndBatchReadAccessSharedImageCHROMIUM (void);
diff --git a/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
index 64893624e5b..39a67517866 100644
--- a/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
@@ -36,7 +36,7 @@ GL_APICALL void GL_APIENTRY glClearPaintCacheINTERNAL (void);
// TOOD(backer): Remove GL encoding. These are not GL functions.
// |mailboxes| argument is the concatenation of the source mailbox and the destination mailbox (32 bytes total)
-GL_APICALL void GL_APIENTRY glCopySubTextureINTERNAL (GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, const GLbyte* mailboxes);
+GL_APICALL void GL_APIENTRY glCopySubTextureINTERNAL (GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, const GLbyte* mailboxes);
GL_APICALL void GL_APIENTRY glTraceBeginCHROMIUM (const char* category_name, const char* trace_name);
GL_APICALL void GL_APIENTRY glTraceEndCHROMIUM (void);
GL_APICALL void GL_APIENTRY glSetActiveURLCHROMIUM (const char* url);
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index 2709ff8f555..ccf4bff1bb3 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -11,25 +11,17 @@ import("//ui/gl/features.gni")
group("service") {
if (is_component_build) {
- public_deps = [
- "//gpu",
- ]
+ public_deps = [ "//gpu" ]
} else {
- public_deps = [
- ":service_sources",
- ]
+ public_deps = [ ":service_sources" ]
}
}
group("gles2") {
if (is_component_build) {
- public_deps = [
- "//gpu:gles2",
- ]
+ public_deps = [ "//gpu:gles2" ]
} else {
- public_deps = [
- ":gles2_sources",
- ]
+ public_deps = [ ":gles2_sources" ]
}
}
@@ -89,9 +81,11 @@ target(link_target_type, "service_sources") {
"//base/third_party/dynamic_annotations",
"//gpu/command_buffer/client:client_sources",
"//gpu/config:config_sources",
+ "//gpu/ipc/common:ipc_common_sources",
"//gpu/ipc/common:surface_handle_type",
"//ui/gfx",
"//ui/gfx/geometry",
+ "//ui/gfx/ipc/color",
"//ui/gl",
]
@@ -178,6 +172,8 @@ target(link_target_type, "gles2_sources") {
"indexed_buffer_binding_host.h",
"logger.cc",
"logger.h",
+ "mailbox_manager_dummy.cc",
+ "mailbox_manager_dummy.h",
"mailbox_manager_factory.cc",
"mailbox_manager_factory.h",
"mailbox_manager_impl.cc",
@@ -194,8 +190,6 @@ target(link_target_type, "gles2_sources") {
"passthrough_discardable_manager.h",
"passthrough_program_cache.cc",
"passthrough_program_cache.h",
- "path_manager.cc",
- "path_manager.h",
"program_cache.cc",
"program_cache.h",
"program_manager.cc",
@@ -316,20 +310,16 @@ target(link_target_type, "gles2_sources") {
"//third_party/angle:translator",
"//third_party/protobuf:protobuf_lite",
"//third_party/re2",
- "//third_party/smhasher:cityhash",
"//third_party/zlib",
"//ui/gfx",
"//ui/gfx/geometry",
- "//ui/gfx/ipc/color",
"//ui/gl",
"//ui/gl:buildflags",
"//ui/gl/init",
]
if (use_ozone) {
- deps += [
- "//ui/ozone",
- ]
+ deps += [ "//ui/ozone" ]
}
if (enable_vulkan) {
@@ -338,7 +328,7 @@ target(link_target_type, "gles2_sources") {
"//gpu/vulkan",
]
- if (is_linux || is_fuchsia) {
+ if (is_linux || is_fuchsia || is_android || is_win) {
sources += [
"external_vk_image_backing.cc",
"external_vk_image_backing.h",
@@ -353,8 +343,12 @@ target(link_target_type, "gles2_sources") {
if (use_ozone) {
sources += [
+ "shared_image_backing_factory_ozone.cc",
+ "shared_image_backing_factory_ozone.h",
"shared_image_backing_ozone.cc",
"shared_image_backing_ozone.h",
+ "shared_image_representation_gl_ozone.cc",
+ "shared_image_representation_gl_ozone.h",
]
}
@@ -364,6 +358,13 @@ target(link_target_type, "gles2_sources") {
"external_vk_image_dawn_representation.h",
]
}
+
+ if (use_ozone && use_dawn) {
+ sources += [
+ "shared_image_representation_dawn_ozone.cc",
+ "shared_image_representation_dawn_ozone.h",
+ ]
+ }
}
if (use_dawn) {
@@ -404,8 +405,12 @@ target(link_target_type, "gles2_sources") {
"ahardwarebuffer_utils.h",
"image_reader_gl_owner.cc",
"image_reader_gl_owner.h",
+ "shared_image_backing_egl_image.cc",
+ "shared_image_backing_egl_image.h",
"shared_image_backing_factory_ahardwarebuffer.cc",
"shared_image_backing_factory_ahardwarebuffer.h",
+ "shared_image_batch_access_manager.cc",
+ "shared_image_batch_access_manager.h",
"shared_image_video.cc",
"shared_image_video.h",
"stream_texture_shared_image_interface.h",
@@ -431,16 +436,18 @@ target(link_target_type, "gles2_sources") {
if (is_win) {
sources += [
+ "shared_image_backing_d3d.cc",
+ "shared_image_backing_d3d.h",
"shared_image_backing_factory_d3d.cc",
"shared_image_backing_factory_d3d.h",
+ "shared_image_representation_d3d.cc",
+ "shared_image_representation_d3d.h",
]
}
}
proto_library("disk_cache_proto") {
- sources = [
- "disk_cache_proto.proto",
- ]
+ sources = [ "disk_cache_proto.proto" ]
}
if (is_android) {
diff --git a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
index aed8ac6d9a6..9d0d7f74222 100644
--- a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
@@ -9,6 +9,7 @@
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface.h"
#include "ui/gl/scoped_binders.h"
#include "ui/gl/scoped_make_current.h"
@@ -155,6 +156,30 @@ AbstractTextureImplOnSharedContextPassthrough::
~AbstractTextureImplOnSharedContextPassthrough() {
if (cleanup_cb_)
std::move(cleanup_cb_).Run(this);
+
+ // Save the current context and make it current again after deleting the
+ // |texture_|.
+ scoped_refptr<gl::GLContext> previous_context = gl::GLContext::GetCurrent();
+ scoped_refptr<gl::GLSurface> previous_surface = gl::GLSurface::GetCurrent();
+
+ // If the shared context is lost, |shared_context_state_| will be null and the
+ // |texture_| is already marked to have lost its context.
+ if (shared_context_state_) {
+ // Make the |shared_context_state_|'s context current before destroying the
+ // |texture_| since
+ // destructor is not guaranteed to be called on the context on which the
+ // |texture_| was created.
+ if (!shared_context_state_->IsCurrent(nullptr)) {
+ shared_context_state_->MakeCurrent(shared_context_state_->surface(),
+ true /* needs_gl */);
+ }
+ shared_context_state_->RemoveContextLostObserver(this);
+ }
+ texture_.reset();
+
+ // Make the previous context current again.
+ if (!previous_context->IsCurrent(previous_surface.get()))
+ previous_context->MakeCurrent(previous_surface.get());
}
TextureBase* AbstractTextureImplOnSharedContextPassthrough::GetTextureBase()
diff --git a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
index 2c061d514c2..6fa17d7c512 100644
--- a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
+++ b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
@@ -14,9 +14,10 @@ bool AHardwareBufferSupportedFormat(viz::ResourceFormat format) {
switch (format) {
case viz::RGBA_8888:
case viz::RGB_565:
+ case viz::BGR_565:
case viz::RGBA_F16:
case viz::RGBX_8888:
- case viz::RGBX_1010102:
+ case viz::RGBA_1010102:
return true;
default:
return false;
@@ -30,11 +31,13 @@ unsigned int AHardwareBufferFormat(viz::ResourceFormat format) {
return AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
case viz::RGB_565:
return AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
+ case viz::BGR_565:
+ return AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
case viz::RGBA_F16:
return AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT;
case viz::RGBX_8888:
return AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM;
- case viz::RGBX_1010102:
+ case viz::RGBA_1010102:
return AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM;
default:
NOTREACHED();
diff --git a/chromium/gpu/command_buffer/service/command_buffer_direct.cc b/chromium/gpu/command_buffer/service/command_buffer_direct.cc
index 5b9e2a7c177..b5a604d98d4 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_direct.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_direct.cc
@@ -10,16 +10,7 @@
namespace gpu {
-namespace {
-
-uint64_t g_next_command_buffer_id = 1;
-
-} // anonymous namespace
-
-CommandBufferDirect::CommandBufferDirect()
- : service_(this, nullptr),
- command_buffer_id_(
- CommandBufferId::FromUnsafeValue(g_next_command_buffer_id++)) {}
+CommandBufferDirect::CommandBufferDirect() : service_(this, nullptr) {}
CommandBufferDirect::~CommandBufferDirect() = default;
diff --git a/chromium/gpu/command_buffer/service/command_buffer_direct.h b/chromium/gpu/command_buffer/service/command_buffer_direct.h
index 523336507eb..44842ba534e 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_direct.h
+++ b/chromium/gpu/command_buffer/service/command_buffer_direct.h
@@ -6,7 +6,6 @@
#define GPU_COMMAND_BUFFER_SERVICE_COMMAND_BUFFER_DIRECT_H_
#include "base/callback.h"
-#include "gpu/command_buffer/common/command_buffer_id.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/decoder_client.h"
@@ -62,7 +61,6 @@ class GPU_EXPORT CommandBufferDirect : public CommandBuffer,
private:
CommandBufferService service_;
AsyncAPIInterface* handler_ = nullptr;
- const CommandBufferId command_buffer_id_;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/common_decoder.cc b/chromium/gpu/command_buffer/service/common_decoder.cc
index 909d97350ea..e47dbf5d516 100644
--- a/chromium/gpu/command_buffer/service/common_decoder.cc
+++ b/chromium/gpu/command_buffer/service/common_decoder.cc
@@ -13,6 +13,7 @@
#include "base/stl_util.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/decoder_client.h"
+#include "ui/gfx/ipc/color/gfx_param_traits.h"
namespace gpu {
namespace {
@@ -381,4 +382,31 @@ error::Error CommonDecoder::HandleInsertFenceSync(
return error::kNoError;
}
+bool CommonDecoder::ReadColorSpace(uint32_t shm_id,
+ uint32_t shm_offset,
+ uint32_t color_space_size,
+ gfx::ColorSpace* color_space) {
+ // Use the default (invalid) color space if no space was serialized.
+ if (!shm_id && !shm_offset && !color_space_size) {
+ *color_space = gfx::ColorSpace();
+ return true;
+ }
+
+ const char* data = static_cast<const char*>(
+ GetAddressAndCheckSize(shm_id, shm_offset, color_space_size));
+ if (!data) {
+ return false;
+ }
+
+ // Make a copy to reduce the risk of a time of check to time of use attack.
+ std::vector<char> color_space_data(data, data + color_space_size);
+ base::Pickle color_space_pickle(color_space_data.data(), color_space_size);
+ base::PickleIterator iterator(color_space_pickle);
+ if (!IPC::ParamTraits<gfx::ColorSpace>::Read(&color_space_pickle, &iterator,
+ color_space)) {
+ return false;
+ }
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/common_decoder.h b/chromium/gpu/command_buffer/service/common_decoder.h
index b635baa6c0e..3132b8f3356 100644
--- a/chromium/gpu/command_buffer/service/common_decoder.h
+++ b/chromium/gpu/command_buffer/service/common_decoder.h
@@ -22,6 +22,10 @@
typedef int GLsizei;
typedef int GLint;
+namespace gfx {
+class ColorSpace;
+} // namespace gfx
+
namespace gpu {
class CommandBufferServiceBase;
@@ -193,6 +197,13 @@ class GPU_EXPORT CommonDecoder {
// watchdog checks in CommandExecutor().
virtual void ExitCommandProcessingEarly() {}
+ // Read a serialized gfx::ColorSpace. Return true on success and false if the
+ // serialization was invalid.
+ bool ReadColorSpace(uint32_t shm_id,
+ uint32_t shm_offset,
+ uint32_t color_space_size,
+ gfx::ColorSpace* color_space);
+
private:
// Generate a member function prototype for each command in an automated and
// typesafe way.
diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc
index 57035f6dac3..83759c17a22 100644
--- a/chromium/gpu/command_buffer/service/context_group.cc
+++ b/chromium/gpu/command_buffer/service/context_group.cc
@@ -16,7 +16,6 @@
#include "gpu/command_buffer/service/framebuffer_manager.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h"
#include "gpu/command_buffer/service/passthrough_discardable_manager.h"
-#include "gpu/command_buffer/service/path_manager.h"
#include "gpu/command_buffer/service/program_manager.h"
#include "gpu/command_buffer/service/renderbuffer_manager.h"
#include "gpu/command_buffer/service/sampler_manager.h"
@@ -126,7 +125,8 @@ ContextGroup::ContextGroup(
shared_image_representation_factory_(
std::make_unique<SharedImageRepresentationFactory>(
shared_image_manager,
- memory_tracker_.get())) {
+ memory_tracker_.get())),
+ shared_image_manager_(shared_image_manager) {
DCHECK(discardable_manager);
DCHECK(feature_info_);
DCHECK(mailbox_manager_);
@@ -262,13 +262,17 @@ gpu::ContextResult ContextGroup::Initialize(
&uniform_buffer_offset_alignment_);
}
- buffer_manager_ = std::make_unique<BufferManager>(memory_tracker_.get(),
- feature_info_.get());
- renderbuffer_manager_ = std::make_unique<RenderbufferManager>(
- memory_tracker_.get(), max_renderbuffer_size, max_samples,
- feature_info_.get());
- shader_manager_ = std::make_unique<ShaderManager>(progress_reporter_);
- sampler_manager_ = std::make_unique<SamplerManager>(feature_info_.get());
+ // Managers are not used by the passthrough command decoder. Save memory by
+ // not allocating them.
+ if (!use_passthrough_cmd_decoder_) {
+ buffer_manager_ = std::make_unique<BufferManager>(memory_tracker_.get(),
+ feature_info_.get());
+ renderbuffer_manager_ = std::make_unique<RenderbufferManager>(
+ memory_tracker_.get(), max_renderbuffer_size, max_samples,
+ feature_info_.get());
+ shader_manager_ = std::make_unique<ShaderManager>(progress_reporter_);
+ sampler_manager_ = std::make_unique<SamplerManager>(feature_info_.get());
+ }
// Lookup GL things we need to know.
const GLint kGLES2RequiredMinimumVertexAttribs = 8u;
@@ -388,11 +392,15 @@ gpu::ContextResult ContextGroup::Initialize(
feature_info_->workarounds().max_3d_array_texture_size);
}
- texture_manager_.reset(new TextureManager(
- memory_tracker_.get(), feature_info_.get(), max_texture_size,
- max_cube_map_texture_size, max_rectangle_texture_size,
- max_3d_texture_size, max_array_texture_layers, bind_generates_resource_,
- progress_reporter_, discardable_manager_));
+ // Managers are not used by the passthrough command decoder. Save memory by
+ // not allocating them.
+ if (!use_passthrough_cmd_decoder_) {
+ texture_manager_.reset(new TextureManager(
+ memory_tracker_.get(), feature_info_.get(), max_texture_size,
+ max_cube_map_texture_size, max_rectangle_texture_size,
+ max_3d_texture_size, max_array_texture_layers, bind_generates_resource_,
+ progress_reporter_, discardable_manager_));
+ }
const GLint kMinTextureImageUnits = 8;
const GLint kMinVertexTextureImageUnits = 0;
@@ -522,14 +530,16 @@ gpu::ContextResult ContextGroup::Initialize(
}
}
- path_manager_ = std::make_unique<PathManager>();
+ // Managers are not used by the passthrough command decoder. Save memory by
+ // not allocating them.
+ if (!use_passthrough_cmd_decoder_) {
+ program_manager_ = std::make_unique<ProgramManager>(
+ program_cache_, max_varying_vectors_, max_draw_buffers_,
+ max_dual_source_draw_buffers_, max_vertex_attribs_, gpu_preferences_,
+ feature_info_.get(), progress_reporter_);
- program_manager_ = std::make_unique<ProgramManager>(
- program_cache_, max_varying_vectors_, max_draw_buffers_,
- max_dual_source_draw_buffers_, max_vertex_attribs_, gpu_preferences_,
- feature_info_.get(), progress_reporter_);
-
- texture_manager_->Initialize();
+ texture_manager_->Initialize();
+ }
decoders_.push_back(decoder->AsWeakPtr());
return gpu::ContextResult::kSuccess;
@@ -597,12 +607,6 @@ void ContextGroup::Destroy(DecoderContext* decoder, bool have_context) {
ReportProgress();
}
- if (path_manager_ != nullptr) {
- path_manager_->Destroy(have_context);
- path_manager_.reset();
- ReportProgress();
- }
-
if (program_manager_ != nullptr) {
program_manager_->Destroy(have_context);
program_manager_.reset();
diff --git a/chromium/gpu/command_buffer/service/context_group.h b/chromium/gpu/command_buffer/service/context_group.h
index 7ff6e3d8270..deb51afd79f 100644
--- a/chromium/gpu/command_buffer/service/context_group.h
+++ b/chromium/gpu/command_buffer/service/context_group.h
@@ -46,7 +46,6 @@ class ProgramCache;
class BufferManager;
class ImageManager;
class RenderbufferManager;
-class PathManager;
class ProgramManager;
class SamplerManager;
class ShaderManager;
@@ -90,6 +89,10 @@ class GPU_GLES2_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
MailboxManager* mailbox_manager() const { return mailbox_manager_; }
+ gpu::SharedImageManager* shared_image_manager() const {
+ return shared_image_manager_;
+ }
+
MemoryTracker* memory_tracker() const { return memory_tracker_.get(); }
ShaderTranslatorCache* shader_translator_cache() const {
@@ -180,8 +183,6 @@ class GPU_GLES2_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
return texture_manager_.get();
}
- PathManager* path_manager() const { return path_manager_.get(); }
-
ProgramManager* program_manager() const {
return program_manager_.get();
}
@@ -301,8 +302,6 @@ class GPU_GLES2_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
std::unique_ptr<TextureManager> texture_manager_;
- std::unique_ptr<PathManager> path_manager_;
-
std::unique_ptr<ProgramManager> program_manager_;
std::unique_ptr<ShaderManager> shader_manager_;
@@ -336,6 +335,8 @@ class GPU_GLES2_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
std::unique_ptr<SharedImageRepresentationFactory>
shared_image_representation_factory_;
+ gpu::SharedImageManager* shared_image_manager_ = nullptr;
+
DISALLOW_COPY_AND_ASSIGN(ContextGroup);
};
diff --git a/chromium/gpu/command_buffer/service/context_group_unittest.cc b/chromium/gpu/command_buffer/service/context_group_unittest.cc
index 5ed2ee7af6d..a3c4c70be78 100644
--- a/chromium/gpu/command_buffer/service/context_group_unittest.cc
+++ b/chromium/gpu/command_buffer/service/context_group_unittest.cc
@@ -40,7 +40,7 @@ class ContextGroupTest : public GpuServiceTest {
public:
static const bool kBindGeneratesResource = false;
- ContextGroupTest() = default;
+ ContextGroupTest() : discardable_manager_(gpu_preferences_) {}
protected:
void SetUp() override {
@@ -171,5 +171,3 @@ TEST_F(ContextGroupTest, MultipleContexts) {
} // namespace gles2
} // namespace gpu
-
-
diff --git a/chromium/gpu/command_buffer/service/context_state_autogen.h b/chromium/gpu/command_buffer/service/context_state_autogen.h
index 966fa290525..4f42f966bb3 100644
--- a/chromium/gpu/command_buffer/service/context_state_autogen.h
+++ b/chromium/gpu/command_buffer/service/context_state_autogen.h
@@ -80,11 +80,6 @@ GLenum hint_generate_mipmap;
GLenum hint_fragment_shader_derivative;
GLenum hint_texture_filtering;
GLfloat line_width;
-GLfloat modelview_matrix[16];
-GLfloat projection_matrix[16];
-GLenum stencil_path_func;
-GLint stencil_path_ref;
-GLuint stencil_path_mask;
GLint pack_alignment;
GLint unpack_alignment;
GLint pack_row_length;
diff --git a/chromium/gpu/command_buffer/service/context_state_impl_autogen.h b/chromium/gpu/command_buffer/service/context_state_impl_autogen.h
index 64b2938d6a4..d3f7e2223cd 100644
--- a/chromium/gpu/command_buffer/service/context_state_impl_autogen.h
+++ b/chromium/gpu/command_buffer/service/context_state_impl_autogen.h
@@ -79,41 +79,6 @@ void ContextState::Initialize() {
hint_fragment_shader_derivative = GL_DONT_CARE;
hint_texture_filtering = GL_NICEST;
line_width = 1.0f;
- modelview_matrix[0] = 1.0f;
- modelview_matrix[1] = 0.0f;
- modelview_matrix[2] = 0.0f;
- modelview_matrix[3] = 0.0f;
- modelview_matrix[4] = 0.0f;
- modelview_matrix[5] = 1.0f;
- modelview_matrix[6] = 0.0f;
- modelview_matrix[7] = 0.0f;
- modelview_matrix[8] = 0.0f;
- modelview_matrix[9] = 0.0f;
- modelview_matrix[10] = 1.0f;
- modelview_matrix[11] = 0.0f;
- modelview_matrix[12] = 0.0f;
- modelview_matrix[13] = 0.0f;
- modelview_matrix[14] = 0.0f;
- modelview_matrix[15] = 1.0f;
- projection_matrix[0] = 1.0f;
- projection_matrix[1] = 0.0f;
- projection_matrix[2] = 0.0f;
- projection_matrix[3] = 0.0f;
- projection_matrix[4] = 0.0f;
- projection_matrix[5] = 1.0f;
- projection_matrix[6] = 0.0f;
- projection_matrix[7] = 0.0f;
- projection_matrix[8] = 0.0f;
- projection_matrix[9] = 0.0f;
- projection_matrix[10] = 1.0f;
- projection_matrix[11] = 0.0f;
- projection_matrix[12] = 0.0f;
- projection_matrix[13] = 0.0f;
- projection_matrix[14] = 0.0f;
- projection_matrix[15] = 1.0f;
- stencil_path_func = GL_ALWAYS;
- stencil_path_ref = 0;
- stencil_path_mask = 0xFFFFFFFFU;
pack_alignment = 4;
unpack_alignment = 4;
pack_row_length = 0;
@@ -314,25 +279,6 @@ void ContextState::InitState(const ContextState* prev_state) const {
}
if ((line_width != prev_state->line_width))
DoLineWidth(line_width);
- if (feature_info_->feature_flags().chromium_path_rendering) {
- if (memcmp(prev_state->modelview_matrix, modelview_matrix,
- sizeof(GLfloat) * 16)) {
- api()->glMatrixLoadfEXTFn(GL_PATH_MODELVIEW_CHROMIUM, modelview_matrix);
- }
- }
- if (feature_info_->feature_flags().chromium_path_rendering) {
- if (memcmp(prev_state->projection_matrix, projection_matrix,
- sizeof(GLfloat) * 16)) {
- api()->glMatrixLoadfEXTFn(GL_PATH_PROJECTION_CHROMIUM,
- projection_matrix);
- }
- }
- if (feature_info_->feature_flags().chromium_path_rendering)
- if ((stencil_path_func != prev_state->stencil_path_func) ||
- (stencil_path_ref != prev_state->stencil_path_ref) ||
- (stencil_path_mask != prev_state->stencil_path_mask))
- api()->glPathStencilFuncNVFn(stencil_path_func, stencil_path_ref,
- stencil_path_mask);
if (prev_state->pack_alignment != pack_alignment) {
api()->glPixelStoreiFn(GL_PACK_ALIGNMENT, pack_alignment);
}
@@ -415,15 +361,6 @@ void ContextState::InitState(const ContextState* prev_state) const {
hint_texture_filtering);
}
DoLineWidth(line_width);
- if (feature_info_->feature_flags().chromium_path_rendering) {
- api()->glMatrixLoadfEXTFn(GL_PATH_MODELVIEW_CHROMIUM, modelview_matrix);
- }
- if (feature_info_->feature_flags().chromium_path_rendering) {
- api()->glMatrixLoadfEXTFn(GL_PATH_PROJECTION_CHROMIUM, projection_matrix);
- }
- if (feature_info_->feature_flags().chromium_path_rendering)
- api()->glPathStencilFuncNVFn(stencil_path_func, stencil_path_ref,
- stencil_path_mask);
api()->glPixelStoreiFn(GL_PACK_ALIGNMENT, pack_alignment);
api()->glPixelStoreiFn(GL_UNPACK_ALIGNMENT, unpack_alignment);
api()->glPolygonOffsetFn(polygon_offset_factor, polygon_offset_units);
@@ -622,40 +559,6 @@ bool ContextState::GetStateAsGLint(GLenum pname,
params[0] = static_cast<GLint>(line_width);
}
return true;
- case GL_PATH_MODELVIEW_MATRIX_CHROMIUM:
- *num_written = 16;
- if (params) {
- for (size_t i = 0; i < 16; ++i) {
- params[i] = static_cast<GLint>(round(modelview_matrix[i]));
- }
- }
- return true;
- case GL_PATH_PROJECTION_MATRIX_CHROMIUM:
- *num_written = 16;
- if (params) {
- for (size_t i = 0; i < 16; ++i) {
- params[i] = static_cast<GLint>(round(projection_matrix[i]));
- }
- }
- return true;
- case GL_PATH_STENCIL_FUNC_CHROMIUM:
- *num_written = 1;
- if (params) {
- params[0] = static_cast<GLint>(stencil_path_func);
- }
- return true;
- case GL_PATH_STENCIL_REF_CHROMIUM:
- *num_written = 1;
- if (params) {
- params[0] = static_cast<GLint>(stencil_path_ref);
- }
- return true;
- case GL_PATH_STENCIL_VALUE_MASK_CHROMIUM:
- *num_written = 1;
- if (params) {
- params[0] = static_cast<GLint>(stencil_path_mask);
- }
- return true;
case GL_PACK_ALIGNMENT:
*num_written = 1;
if (params) {
@@ -1084,36 +987,6 @@ bool ContextState::GetStateAsGLfloat(GLenum pname,
params[0] = static_cast<GLfloat>(line_width);
}
return true;
- case GL_PATH_MODELVIEW_MATRIX_CHROMIUM:
- *num_written = 16;
- if (params) {
- memcpy(params, modelview_matrix, sizeof(GLfloat) * 16);
- }
- return true;
- case GL_PATH_PROJECTION_MATRIX_CHROMIUM:
- *num_written = 16;
- if (params) {
- memcpy(params, projection_matrix, sizeof(GLfloat) * 16);
- }
- return true;
- case GL_PATH_STENCIL_FUNC_CHROMIUM:
- *num_written = 1;
- if (params) {
- params[0] = static_cast<GLfloat>(stencil_path_func);
- }
- return true;
- case GL_PATH_STENCIL_REF_CHROMIUM:
- *num_written = 1;
- if (params) {
- params[0] = static_cast<GLfloat>(stencil_path_ref);
- }
- return true;
- case GL_PATH_STENCIL_VALUE_MASK_CHROMIUM:
- *num_written = 1;
- if (params) {
- params[0] = static_cast<GLfloat>(stencil_path_mask);
- }
- return true;
case GL_PACK_ALIGNMENT:
*num_written = 1;
if (params) {
diff --git a/chromium/gpu/command_buffer/service/context_state_test_helpers_autogen.h b/chromium/gpu/command_buffer/service/context_state_test_helpers_autogen.h
index 6fa1c4930bd..cd9d25ebfd2 100644
--- a/chromium/gpu/command_buffer/service/context_state_test_helpers_autogen.h
+++ b/chromium/gpu/command_buffer/service/context_state_test_helpers_autogen.h
@@ -83,21 +83,6 @@ void ContextStateTestHelpers::SetupInitStateExpectations(
.RetiresOnSaturation();
}
SetupInitStateManualExpectationsForDoLineWidth(gl, 1.0f);
- if (feature_info->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl, MatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, _))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl, MatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, _))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl, PathStencilFuncNV(GL_ALWAYS, 0, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- }
EXPECT_CALL(*gl, PixelStorei(GL_PACK_ALIGNMENT, 4))
.Times(1)
.RetiresOnSaturation();
diff --git a/chromium/gpu/command_buffer/service/decoder_context.h b/chromium/gpu/command_buffer/service/decoder_context.h
index ac8af444a17..3b6a708b913 100644
--- a/chromium/gpu/command_buffer/service/decoder_context.h
+++ b/chromium/gpu/command_buffer/service/decoder_context.h
@@ -193,6 +193,15 @@ class GPU_GLES2_EXPORT DecoderContext : public AsyncAPIInterface,
unsigned format,
int width,
int height) = 0;
+ // Clears a level sub area of a compressed 3D texture.
+ // Returns false if a GL error should be generated.
+ virtual bool ClearCompressedTextureLevel3D(gles2::Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height,
+ int depth) = 0;
// Clears a level of a 3D texture.
// Returns false if a GL error should be generated.
virtual bool ClearLevel3D(gles2::Texture* texture,
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
index b99410e49d8..64a0a6c35da 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
@@ -7,11 +7,13 @@
#include <utility>
#include <vector>
+#include "base/memory/ptr_util.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/posix/eintr_wrapper.h"
#include "base/stl_util.h"
#include "base/system/sys_info.h"
#include "build/build_config.h"
+#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
#include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
@@ -21,6 +23,7 @@
#include "gpu/vulkan/vulkan_command_pool.h"
#include "gpu/vulkan/vulkan_fence_helper.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_util.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gl/buildflags.h"
@@ -34,10 +37,20 @@
#include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
#endif
-#if defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_FUCHSIA)
+#define GL_DEDICATED_MEMORY_OBJECT_EXT 0x9581
+#define GL_TEXTURE_TILING_EXT 0x9580
+#define GL_TILING_TYPES_EXT 0x9583
+#define GL_OPTIMAL_TILING_EXT 0x9584
+#define GL_LINEAR_TILING_EXT 0x9585
#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586
#endif
+#if defined(OS_FUCHSIA)
+#define GL_HANDLE_TYPE_ZIRCON_VMO_ANGLE 0x93AE
+#define GL_HANDLE_TYPE_ZIRCON_EVENT_ANGLE 0x93AF
+#endif
+
namespace gpu {
namespace {
@@ -62,8 +75,8 @@ static const struct {
{GL_RED, GL_UNSIGNED_SHORT, 2}, // R16_EXT
{GL_RGBA, GL_UNSIGNED_BYTE, 4}, // RGBX_8888
{GL_BGRA, GL_UNSIGNED_BYTE, 4}, // BGRX_8888
- {GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, 4}, // RGBX_1010102
- {GL_BGRA, GL_UNSIGNED_INT_2_10_10_10_REV, 4}, // BGRX_1010102
+ {GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, 4}, // RGBA_1010102
+ {GL_BGRA, GL_UNSIGNED_INT_2_10_10_10_REV, 4}, // BGRA_1010102
{GL_ZERO, GL_ZERO, 0}, // YVU_420
{GL_ZERO, GL_ZERO, 0}, // YUV_420_BIPLANAR
{GL_ZERO, GL_ZERO, 0}, // P010
@@ -71,62 +84,6 @@ static const struct {
static_assert(base::size(kFormatTable) == (viz::RESOURCE_FORMAT_MAX + 1),
"kFormatTable does not handle all cases.");
-GrVkImageInfo CreateGrVkImageInfo(
- VkImage image,
- VkFormat vk_format,
- VkDeviceMemory memory,
- size_t memory_size,
- bool use_protected_memory,
- const GrVkYcbcrConversionInfo& gr_ycbcr_info) {
- GrVkAlloc alloc(memory, 0 /* offset */, memory_size, 0 /* flags */);
- return GrVkImageInfo(
- image, alloc, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_LAYOUT_UNDEFINED,
- vk_format, 1 /* levelCount */, VK_QUEUE_FAMILY_IGNORED,
- use_protected_memory ? GrProtected::kYes : GrProtected::kNo,
- gr_ycbcr_info);
-}
-
-VkResult CreateVkImage(SharedContextState* context_state,
- VkFormat format,
- const gfx::Size& size,
- bool is_transfer_dst,
- bool is_external,
- bool use_protected_memory,
- VkImage* image) {
- VkExternalMemoryImageCreateInfoKHR external_info = {
- .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR,
- .handleTypes = context_state->vk_context_provider()
- ->GetVulkanImplementation()
- ->GetExternalImageHandleType(),
- };
-
- auto usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
- if (is_transfer_dst)
- usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-
- VkImageCreateInfo create_info = {
- .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
- .pNext = is_external ? &external_info : nullptr,
- .flags = use_protected_memory ? VK_IMAGE_CREATE_PROTECTED_BIT : 0,
- .imageType = VK_IMAGE_TYPE_2D,
- .format = format,
- .extent = {size.width(), size.height(), 1},
- .mipLevels = 1,
- .arrayLayers = 1,
- .samples = VK_SAMPLE_COUNT_1_BIT,
- .tiling = VK_IMAGE_TILING_OPTIMAL,
- .usage = usage,
- .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
- .queueFamilyIndexCount = 0,
- .pQueueFamilyIndices = nullptr,
- .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
- };
-
- VkDevice device =
- context_state->vk_context_provider()->GetDeviceQueue()->GetVulkanDevice();
- return vkCreateImage(device, &create_info, nullptr, image);
-}
-
uint32_t FindMemoryTypeIndex(SharedContextState* context_state,
const VkMemoryRequirements& requirements,
VkMemoryPropertyFlags flags) {
@@ -169,23 +126,6 @@ class ScopedPixelStore {
DISALLOW_COPY_AND_ASSIGN(ScopedPixelStore);
};
-base::Optional<WGPUTextureFormat> GetWGPUFormat(viz::ResourceFormat format) {
- switch (format) {
- case viz::RED_8:
- case viz::ALPHA_8:
- case viz::LUMINANCE_8:
- return WGPUTextureFormat_R8Unorm;
- case viz::RG_88:
- return WGPUTextureFormat_RG8Unorm;
- case viz::RGBA_8888:
- return WGPUTextureFormat_RGBA8Unorm;
- case viz::BGRA_8888:
- return WGPUTextureFormat_BGRA8Unorm;
- default:
- return {};
- }
-}
-
} // namespace
// static
@@ -197,75 +137,50 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
+ const VulkanImageUsageCache* image_usage_cache,
base::span<const uint8_t> pixel_data,
bool using_gmb) {
- VkDevice device =
- context_state->vk_context_provider()->GetDeviceQueue()->GetVulkanDevice();
- VkFormat vk_format = ToVkFormat(format);
- VkImage image;
bool is_external = context_state->support_vulkan_external_object();
bool is_transfer_dst = using_gmb || !pixel_data.empty() || !is_external;
- if (context_state->vk_context_provider()
- ->GetVulkanImplementation()
- ->enforce_protected_memory()) {
- usage |= SHARED_IMAGE_USAGE_PROTECTED;
- }
- VkResult result =
- CreateVkImage(context_state, vk_format, size, is_transfer_dst,
- is_external, usage & SHARED_IMAGE_USAGE_PROTECTED, &image);
- if (result != VK_SUCCESS) {
- DLOG(ERROR) << "Failed to create external VkImage: " << result;
- return nullptr;
- }
-
- VkMemoryRequirements requirements;
- vkGetImageMemoryRequirements(device, image, &requirements);
-
- if (!requirements.memoryTypeBits) {
- DLOG(ERROR)
- << "Unable to find appropriate memory type for external VkImage";
- vkDestroyImage(device, image, nullptr);
- return nullptr;
- }
- VkExportMemoryAllocateInfoKHR external_info = {
- .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR,
- .handleTypes = context_state->vk_context_provider()
- ->GetVulkanImplementation()
- ->GetExternalImageHandleType(),
- };
+ auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
+ VkFormat vk_format = ToVkFormat(format);
+ VkImageUsageFlags vk_usage =
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (is_transfer_dst)
+ vk_usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- VkMemoryAllocateInfo mem_alloc_info = {
- .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
- .pNext = is_external ? &external_info : nullptr,
- .allocationSize = requirements.size,
- .memoryTypeIndex = FindMemoryTypeIndex(
- context_state, requirements, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT),
- };
+ // Requested usage flags must be supported.
+ DCHECK_EQ(vk_usage & image_usage_cache->optimal_tiling_usage[format],
+ vk_usage);
- VkDeviceMemory memory;
- // TODO(crbug.com/932286): Allocating a separate piece of memory for every
- // VkImage might have too much overhead. It is recommended that one large
- // VkDeviceMemory be sub-allocated to multiple VkImages instead.
- result = vkAllocateMemory(device, &mem_alloc_info, nullptr, &memory);
- if (result != VK_SUCCESS) {
- DLOG(ERROR) << "Failed to allocate memory for external VkImage: " << result;
- vkDestroyImage(device, image, nullptr);
- return nullptr;
+ if (is_external && (usage & SHARED_IMAGE_USAGE_GLES2)) {
+ // Must request all available image usage flags if aliasing GL texture. This
+ // is a spec requirement.
+ vk_usage |= image_usage_cache->optimal_tiling_usage[format];
}
- result = vkBindImageMemory(device, image, memory, 0);
- if (result != VK_SUCCESS) {
- DLOG(ERROR) << "Failed to bind memory to external VkImage: " << result;
- vkFreeMemory(device, memory, nullptr);
- vkDestroyImage(device, image, nullptr);
- return nullptr;
+ auto* vulkan_implementation =
+ context_state->vk_context_provider()->GetVulkanImplementation();
+ VkImageCreateFlags vk_flags =
+ vulkan_implementation->enforce_protected_memory()
+ ? VK_IMAGE_CREATE_PROTECTED_BIT
+ : 0;
+ std::unique_ptr<VulkanImage> image;
+ if (is_external) {
+ image = VulkanImage::CreateWithExternalMemory(device_queue, size, vk_format,
+ vk_usage, vk_flags,
+ VK_IMAGE_TILING_OPTIMAL);
+ } else {
+ image = VulkanImage::Create(device_queue, size, vk_format, vk_usage,
+ vk_flags, VK_IMAGE_TILING_OPTIMAL);
}
+ if (!image)
+ return nullptr;
- auto backing = base::WrapUnique(new ExternalVkImageBacking(
- mailbox, format, size, color_space, usage, context_state, image, memory,
- requirements.size, vk_format, command_pool, GrVkYcbcrConversionInfo(),
- GetWGPUFormat(format), mem_alloc_info.memoryTypeIndex));
+ auto backing = std::make_unique<ExternalVkImageBacking>(
+ util::PassKey<ExternalVkImageBacking>(), mailbox, format, size,
+ color_space, usage, context_state, std::move(image), command_pool);
if (!pixel_data.empty()) {
backing->WritePixels(
@@ -287,7 +202,8 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
gfx::BufferFormat buffer_format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
- uint32_t usage) {
+ uint32_t usage,
+ const VulkanImageUsageCache* image_usage_cache) {
if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) {
DLOG(ERROR) << "Invalid image size for format.";
return nullptr;
@@ -297,40 +213,20 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
context_state->vk_context_provider()->GetVulkanImplementation();
auto resource_format = viz::GetResourceFormat(buffer_format);
if (vulkan_implementation->CanImportGpuMemoryBuffer(handle.type)) {
- VkDevice vk_device = context_state->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanDevice();
- VkImage vk_image = VK_NULL_HANDLE;
- VkImageCreateInfo vk_image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO};
- VkDeviceMemory vk_device_memory = VK_NULL_HANDLE;
- VkDeviceSize memory_size = 0;
- base::Optional<VulkanYCbCrInfo> ycbcr_info;
-
- if (!vulkan_implementation->CreateImageFromGpuMemoryHandle(
- vk_device, std::move(handle), size, &vk_image, &vk_image_info,
- &vk_device_memory, &memory_size, &ycbcr_info)) {
+ auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
+ VkFormat vk_format = ToVkFormat(resource_format);
+ auto image = vulkan_implementation->CreateImageFromGpuMemoryHandle(
+ device_queue, std::move(handle), size, vk_format);
+ if (!image) {
DLOG(ERROR) << "Failed to create VkImage from GpuMemoryHandle.";
return nullptr;
}
- VkFormat expected_format = ToVkFormat(resource_format);
- if (expected_format != vk_image_info.format) {
- DLOG(ERROR) << "BufferFormat doesn't match the buffer ";
- vkFreeMemory(vk_device, vk_device_memory, nullptr);
- vkDestroyImage(vk_device, vk_image, nullptr);
- return nullptr;
- }
-
- GrVkYcbcrConversionInfo gr_ycbcr_info =
- CreateGrVkYcbcrConversionInfo(context_state->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanPhysicalDevice(),
- vk_image_info.tiling, ycbcr_info);
-
- return base::WrapUnique(new ExternalVkImageBacking(
- mailbox, resource_format, size, color_space, usage, context_state,
- vk_image, vk_device_memory, memory_size, vk_image_info.format,
- command_pool, gr_ycbcr_info, GetWGPUFormat(resource_format), {}));
+ auto backing = std::make_unique<ExternalVkImageBacking>(
+ util::PassKey<ExternalVkImageBacking>(), mailbox, resource_format, size,
+ color_space, usage, context_state, std::move(image), command_pool);
+ backing->SetCleared();
+ return backing;
}
if (gfx::NumberOfPlanesForLinearBufferFormat(buffer_format) != 1) {
@@ -408,8 +304,8 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
}
auto backing = Create(context_state, command_pool, mailbox, resource_format,
- size, color_space, usage, base::span<const uint8_t>(),
- true /* using_gmb */);
+ size, color_space, usage, image_usage_cache,
+ base::span<const uint8_t>(), true /* using_gmb */);
if (!backing)
return nullptr;
@@ -419,42 +315,58 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
}
ExternalVkImageBacking::ExternalVkImageBacking(
+ util::PassKey<ExternalVkImageBacking>,
const Mailbox& mailbox,
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
SharedContextState* context_state,
- VkImage image,
- VkDeviceMemory memory,
- size_t memory_size,
- VkFormat vk_format,
- VulkanCommandPool* command_pool,
- const GrVkYcbcrConversionInfo& ycbcr_info,
- base::Optional<WGPUTextureFormat> wgpu_format,
- base::Optional<uint32_t> memory_type_index)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- memory_size,
- false /* is_thread_safe */),
+ std::unique_ptr<VulkanImage> image,
+ VulkanCommandPool* command_pool)
+ : ClearTrackingSharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ image->device_size(),
+ false /* is_thread_safe */),
context_state_(context_state),
+ image_(std::move(image)),
backend_texture_(size.width(),
size.height(),
- CreateGrVkImageInfo(image,
- vk_format,
- memory,
- memory_size,
- usage & SHARED_IMAGE_USAGE_PROTECTED,
- ycbcr_info)),
- command_pool_(command_pool),
- wgpu_format_(wgpu_format),
- memory_type_index_(memory_type_index) {}
+ CreateGrVkImageInfo(image_.get())),
+ command_pool_(command_pool) {}
ExternalVkImageBacking::~ExternalVkImageBacking() {
- DCHECK(!backend_texture_.isValid());
+ GrVkImageInfo image_info;
+ bool result = backend_texture_.getVkImageInfo(&image_info);
+ DCHECK(result);
+
+ auto* fence_helper = context_state()
+ ->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper();
+ fence_helper->EnqueueVulkanObjectCleanupForSubmittedWork(std::move(image_));
+ backend_texture_ = GrBackendTexture();
+
+ if (texture_) {
+ // Ensure that a context is current before removing the ref and calling
+ // glDeleteTextures.
+ if (!gl::GLContext::GetCurrent())
+ context_state()->MakeCurrent(nullptr, true /* need_gl */);
+ texture_->RemoveLightweightRef(have_context());
+ }
+
+ if (texture_passthrough_) {
+ // Ensure that a context is current before releasing |texture_passthrough_|,
+ // it calls glDeleteTextures.
+ if (!gl::GLContext::GetCurrent())
+ context_state()->MakeCurrent(nullptr, true /* need_gl */);
+ if (!have_context())
+ texture_passthrough_->MarkContextLost();
+ texture_passthrough_ = nullptr;
+ }
}
bool ExternalVkImageBacking::BeginAccess(
@@ -466,12 +378,110 @@ bool ExternalVkImageBacking::BeginAccess(
if (texture_)
UpdateContent(kInGLTexture);
}
- return BeginAccessInternal(readonly, semaphore_handles);
+ if (!BeginAccessInternal(readonly, semaphore_handles))
+ return false;
+
+ if (!is_gl)
+ return true;
+
+ if (use_separate_gl_texture())
+ return true;
+
+ DCHECK(need_sychronization());
+
+ auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
+ {
+ ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
+ GrVkImageInfo image_info;
+ bool success = backend_texture_.getVkImageInfo(&image_info);
+ DCHECK(success);
+ auto image_layout = image_info.fImageLayout;
+ if (image_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
+ // dst_image_layout cannot be VK_IMAGE_LAYOUT_UNDEFINED, so we set it to
+ // VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL.
+ image_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ command_buffer->TransitionImageLayout(
+ image_info.fImage, image_info.fImageLayout, image_layout);
+ // Update backend_texture_ image layout.
+ backend_texture_.setVkImageLayout(image_layout);
+ }
+ uint32_t vulkan_queue_index = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanQueueIndex();
+ // Transfer image queue faimily ownership to external, so the image can be
+ // used by GL.
+ command_buffer->TransitionImageLayout(image_info.fImage, image_layout,
+ image_layout, vulkan_queue_index,
+ VK_QUEUE_FAMILY_EXTERNAL);
+ }
+
+ std::vector<VkSemaphore> wait_semaphores;
+ wait_semaphores.reserve(semaphore_handles->size());
+ for (auto& handle : *semaphore_handles) {
+ VkSemaphore semaphore = vulkan_implementation()->ImportSemaphoreHandle(
+ device(), std::move(handle));
+ wait_semaphores.emplace_back(semaphore);
+ }
+ semaphore_handles->clear();
+
+ VkSemaphore signal_semaphore =
+ vulkan_implementation()->CreateExternalSemaphore(device());
+ // TODO(penghuang): ask skia to do it for us to avoid this queue submission.
+ command_buffer->Submit(wait_semaphores.size(), wait_semaphores.data(), 1,
+ &signal_semaphore);
+ auto end_access_semphore_handle =
+ vulkan_implementation()->GetSemaphoreHandle(device(), signal_semaphore);
+ semaphore_handles->push_back(std::move(end_access_semphore_handle));
+
+ auto* fence_helper =
+ context_state_->vk_context_provider()->GetDeviceQueue()->GetFenceHelper();
+ fence_helper->EnqueueVulkanObjectCleanupForSubmittedWork(
+ std::move(command_buffer));
+ wait_semaphores.emplace_back(signal_semaphore);
+ fence_helper->EnqueueSemaphoresCleanupForSubmittedWork(
+ std::move(wait_semaphores));
+
+ return true;
}
void ExternalVkImageBacking::EndAccess(bool readonly,
SemaphoreHandle semaphore_handle,
bool is_gl) {
+ if (is_gl && !use_separate_gl_texture()) {
+ auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
+ {
+ ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
+ GrVkImageInfo image_info;
+ bool success = backend_texture_.getVkImageInfo(&image_info);
+ DCHECK(success);
+ uint32_t vulkan_queue_index = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanQueueIndex();
+
+ // After GL accessing, transfer image queue family ownership back, so it
+ // can be used by vulkan.
+ command_buffer->TransitionImageLayout(
+ image_info.fImage, image_info.fImageLayout, image_info.fImageLayout,
+ VK_QUEUE_FAMILY_EXTERNAL, vulkan_queue_index);
+ }
+
+ VkSemaphore semaphore = vulkan_implementation()->ImportSemaphoreHandle(
+ device(), std::move(semaphore_handle));
+ VkSemaphore end_access_semaphore =
+ vulkan_implementation()->CreateExternalSemaphore(device());
+ // TODO(penghuang): ask skia to do it for us to avoid this queue submission.
+ command_buffer->Submit(1, &semaphore, 1, &end_access_semaphore);
+ semaphore_handle = vulkan_implementation()->GetSemaphoreHandle(
+ device(), end_access_semaphore);
+ auto* fence_helper = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper();
+ fence_helper->EnqueueVulkanObjectCleanupForSubmittedWork(
+ std::move(command_buffer));
+ fence_helper->EnqueueSemaphoresCleanupForSubmittedWork(
+ {semaphore, end_access_semaphore});
+ }
+
EndAccessInternal(readonly, std::move(semaphore_handle));
if (!readonly) {
if (use_separate_gl_texture()) {
@@ -482,47 +492,12 @@ void ExternalVkImageBacking::EndAccess(bool readonly,
}
}
-bool ExternalVkImageBacking::IsCleared() const {
- return is_cleared_;
-}
-
-void ExternalVkImageBacking::SetCleared() {
- is_cleared_ = true;
-}
-
void ExternalVkImageBacking::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
DCHECK(!in_fence);
latest_content_ = kInSharedMemory;
SetCleared();
}
-void ExternalVkImageBacking::Destroy() {
- GrVkImageInfo image_info;
- bool result = backend_texture_.getVkImageInfo(&image_info);
- DCHECK(result);
-
- auto* fence_helper = context_state()
- ->vk_context_provider()
- ->GetDeviceQueue()
- ->GetFenceHelper();
- fence_helper->EnqueueImageCleanupForSubmittedWork(image_info.fImage,
- image_info.fAlloc.fMemory);
- backend_texture_ = GrBackendTexture();
-
- if (texture_) {
- // Ensure that a context is current before removing the ref and calling
- // glDeleteTextures.
- if (!gl::GLContext::GetCurrent())
- context_state()->MakeCurrent(nullptr, true /* need_gl */);
- texture_->RemoveLightweightRef(have_context());
- }
- if (texture_passthrough_) {
- if (!have_context())
- texture_passthrough_->MarkContextLost();
- texture_passthrough_ = nullptr;
- }
-}
-
bool ExternalVkImageBacking::ProduceLegacyMailbox(
MailboxManager* mailbox_manager) {
// It is not safe to produce a legacy mailbox because it would bypass the
@@ -536,13 +511,10 @@ ExternalVkImageBacking::ProduceDawn(SharedImageManager* manager,
MemoryTypeTracker* tracker,
WGPUDevice wgpuDevice) {
#if defined(OS_LINUX) && BUILDFLAG(USE_DAWN)
- if (!wgpu_format_) {
- DLOG(ERROR) << "Format not supported for Dawn";
- return nullptr;
- }
+ auto wgpu_format = viz::ToWGPUFormat(format());
- if (!memory_type_index_) {
- DLOG(ERROR) << "No type index info provided";
+ if (wgpu_format == WGPUTextureFormat_Undefined) {
+ DLOG(ERROR) << "Format not supported for Dawn";
return nullptr;
}
@@ -550,14 +522,13 @@ ExternalVkImageBacking::ProduceDawn(SharedImageManager* manager,
bool result = backend_texture_.getVkImageInfo(&image_info);
DCHECK(result);
- int memory_fd = GetMemoryFd(image_info);
- if (memory_fd < 0) {
+ auto memory_fd = image_->GetMemoryFd();
+ if (!memory_fd.is_valid()) {
return nullptr;
}
return std::make_unique<ExternalVkImageDawnRepresentation>(
- manager, this, tracker, wgpuDevice, wgpu_format_.value(), memory_fd,
- image_info.fAlloc.fSize, memory_type_index_.value());
+ manager, this, tracker, wgpuDevice, wgpu_format, std::move(memory_fd));
#else // !defined(OS_LINUX) || !BUILDFLAG(USE_DAWN)
NOTIMPLEMENTED_LOG_ONCE();
return nullptr;
@@ -565,7 +536,8 @@ ExternalVkImageBacking::ProduceDawn(SharedImageManager* manager,
}
GLuint ExternalVkImageBacking::ProduceGLTextureInternal() {
-#if defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_FUCHSIA) || \
+ defined(OS_WIN)
GrVkImageInfo image_info;
bool result = backend_texture_.getVkImageInfo(&image_info);
DCHECK(result);
@@ -573,14 +545,38 @@ GLuint ExternalVkImageBacking::ProduceGLTextureInternal() {
gl::GLApi* api = gl::g_current_gl_context;
GLuint memory_object = 0;
if (!use_separate_gl_texture()) {
- int memory_fd = GetMemoryFd(image_info);
- if (memory_fd < 0) {
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ auto memory_fd = image_->GetMemoryFd();
+ if (!memory_fd.is_valid()) {
return 0;
}
api->glCreateMemoryObjectsEXTFn(1, &memory_object);
+ int dedicated = GL_TRUE;
+ api->glMemoryObjectParameterivEXTFn(
+ memory_object, GL_DEDICATED_MEMORY_OBJECT_EXT, &dedicated);
api->glImportMemoryFdEXTFn(memory_object, image_info.fAlloc.fSize,
- GL_HANDLE_TYPE_OPAQUE_FD_EXT, memory_fd);
+ GL_HANDLE_TYPE_OPAQUE_FD_EXT,
+ memory_fd.release());
+#elif defined(OS_FUCHSIA)
+ zx::vmo vmo = image_->GetMemoryZirconHandle();
+ if (!vmo)
+ return 0;
+
+ api->glCreateMemoryObjectsEXTFn(1, &memory_object);
+ // ANGLE doesn't implement glMemoryObjectParameterivEXTFn yet. Avoid
+ // calling it on Fuchsia until its implemented.
+ // TODO(spang): Implement glMemoryObjectParameterivEXTFn in ANGLE.
+ api->glImportMemoryZirconHandleANGLEFn(
+ memory_object, image_info.fAlloc.fSize, GL_HANDLE_TYPE_ZIRCON_VMO_ANGLE,
+ vmo.release());
+#elif defined(OS_WIN)
+ // TODO(penghuang): support interop on Windows
+ NOTIMPLEMENTED();
+ return 0;
+#else
+#error Unsupported OS
+#endif
}
GLuint internal_format = viz::TextureStorageFormat(format());
@@ -598,24 +594,21 @@ GLuint ExternalVkImageBacking::ProduceGLTextureInternal() {
size().height());
} else {
DCHECK(memory_object);
- if (internal_format == GL_BGRA8_EXT) {
- // BGRA8 internal format is not well supported, so use RGBA8 instead.
- api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, GL_RGBA8, size().width(),
- size().height(), memory_object, 0);
+ bool is_brga8 = (internal_format == GL_BGRA8_EXT);
+ if (is_brga8)
+ internal_format = GL_RGBA8;
+ api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, internal_format,
+ size().width(), size().height(), memory_object,
+ 0);
+ api->glDeleteMemoryObjectsEXTFn(1, &memory_object);
+ if (is_brga8) {
api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_B, GL_RED);
api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_R, GL_BLUE);
- } else {
- api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, internal_format,
- size().width(), size().height(),
- memory_object, 0);
}
}
api->glBindTextureFn(GL_TEXTURE_2D, old_texture_binding);
return texture_service_id;
-#elif defined(OS_FUCHSIA)
- NOTIMPLEMENTED_LOG_ONCE();
- return 0;
-#else // !defined(OS_LINUX) && !defined(OS_FUCHSIA)
+#else // !defined(OS_LINUX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
#error Unsupported OS
#endif
}
@@ -629,7 +622,8 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
return nullptr;
}
-#if defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_FUCHSIA) || \
+ defined(OS_WIN)
if (!texture_) {
GLuint texture_service_id = ProduceGLTextureInternal();
if (!texture_service_id)
@@ -647,7 +641,7 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
// If the backing is already cleared, no need to clear it again.
gfx::Rect cleared_rect;
- if (is_cleared_)
+ if (IsCleared())
cleared_rect = gfx::Rect(size());
texture_->SetLevelInfo(GL_TEXTURE_2D, 0, internal_format, size().width(),
@@ -657,10 +651,7 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
}
return std::make_unique<ExternalVkImageGLRepresentation>(
manager, this, tracker, texture_, texture_->service_id());
-#elif defined(OS_FUCHSIA)
- NOTIMPLEMENTED_LOG_ONCE();
- return nullptr;
-#else // !defined(OS_LINUX) && !defined(OS_FUCHSIA)
+#else
#error Unsupported OS
#endif
}
@@ -675,7 +666,8 @@ ExternalVkImageBacking::ProduceGLTexturePassthrough(
return nullptr;
}
-#if defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_FUCHSIA) || \
+ defined(OS_WIN)
if (!texture_passthrough_) {
GLuint texture_service_id = ProduceGLTextureInternal();
if (!texture_service_id)
@@ -692,10 +684,7 @@ ExternalVkImageBacking::ProduceGLTexturePassthrough(
return std::make_unique<ExternalVkImageGLPassthroughRepresentation>(
manager, this, tracker, texture_passthrough_->service_id());
-#elif defined(OS_FUCHSIA)
- NOTIMPLEMENTED_LOG_ONCE();
- return nullptr;
-#else // !defined(OS_LINUX) && !defined(OS_FUCHSIA)
+#else
#error Unsupported OS
#endif
}
@@ -713,23 +702,6 @@ ExternalVkImageBacking::ProduceSkia(
tracker);
}
-#ifdef OS_LINUX
-int ExternalVkImageBacking::GetMemoryFd(const GrVkImageInfo& image_info) {
- VkMemoryGetFdInfoKHR get_fd_info;
- get_fd_info.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
- get_fd_info.pNext = nullptr;
- get_fd_info.memory = image_info.fAlloc.fMemory;
- get_fd_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
-
- int memory_fd = -1;
- vkGetMemoryFdKHR(device(), &get_fd_info, &memory_fd);
- if (memory_fd < 0) {
- DLOG(ERROR) << "Unable to extract file descriptor out of external VkImage";
- }
- return memory_fd;
-}
-#endif
-
void ExternalVkImageBacking::InstallSharedMemory(
base::WritableSharedMemoryMapping shared_memory_mapping,
size_t stride,
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
index 6bab8393dd2..6a612aaddc4 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
@@ -11,6 +11,8 @@
#include "base/memory/scoped_refptr.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/optional.h"
+#include "base/util/type_safety/pass_key.h"
+#include "build/build_config.h"
#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/shared_context_state.h"
@@ -23,8 +25,14 @@
namespace gpu {
class VulkanCommandPool;
+class VulkanImage;
-class ExternalVkImageBacking final : public SharedImageBacking {
+struct VulkanImageUsageCache {
+ // Maximal usage flags for VK_IMAGE_TILING_OPTIMAL each ResourceFormat.
+ VkImageUsageFlags optimal_tiling_usage[viz::RESOURCE_FORMAT_MAX + 1];
+};
+
+class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
public:
static std::unique_ptr<ExternalVkImageBacking> Create(
SharedContextState* context_state,
@@ -34,6 +42,7 @@ class ExternalVkImageBacking final : public SharedImageBacking {
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
+ const VulkanImageUsageCache* image_usage_cache,
base::span<const uint8_t> pixel_data,
bool using_gmb = false);
@@ -45,12 +54,24 @@ class ExternalVkImageBacking final : public SharedImageBacking {
gfx::BufferFormat buffer_format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
- uint32_t usage);
+ uint32_t usage,
+ const VulkanImageUsageCache* image_usage_cache);
+
+ ExternalVkImageBacking(util::PassKey<ExternalVkImageBacking>,
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ SharedContextState* context_state,
+ std::unique_ptr<VulkanImage> image,
+ VulkanCommandPool* command_pool);
~ExternalVkImageBacking() override;
SharedContextState* context_state() const { return context_state_; }
const GrBackendTexture& backend_texture() const { return backend_texture_; }
+ VulkanImage* image() const { return image_.get(); }
const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
const {
return texture_passthrough_;
@@ -91,13 +112,22 @@ class ExternalVkImageBacking final : public SharedImageBacking {
void EndAccess(bool readonly, SemaphoreHandle semaphore_handle, bool is_gl);
// SharedImageBacking implementation.
- bool IsCleared() const override;
- void SetCleared() override;
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
- void Destroy() override;
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
protected:
+ static std::unique_ptr<ExternalVkImageBacking> CreateInternal(
+ SharedContextState* context_state,
+ VulkanCommandPool* command_pool,
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const VulkanImageUsageCache* image_usage_cache,
+ base::span<const uint8_t> pixel_data,
+ bool using_gmb);
+
void UpdateContent(uint32_t content_flags);
bool BeginAccessInternal(bool readonly,
std::vector<SemaphoreHandle>* semaphore_handles);
@@ -120,22 +150,8 @@ class ExternalVkImageBacking final : public SharedImageBacking {
scoped_refptr<SharedContextState> context_state) override;
private:
- ExternalVkImageBacking(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- SharedContextState* context_state,
- VkImage image,
- VkDeviceMemory memory,
- size_t memory_size,
- VkFormat vk_format,
- VulkanCommandPool* command_pool,
- const GrVkYcbcrConversionInfo& ycbcr_info,
- base::Optional<WGPUTextureFormat> wgpu_format,
- base::Optional<uint32_t> memory_type_index);
-
-#ifdef OS_LINUX
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
// Extract file descriptor from image
int GetMemoryFd(const GrVkImageInfo& image_info);
#endif
@@ -156,12 +172,12 @@ class ExternalVkImageBacking final : public SharedImageBacking {
void CopyPixelsFromShmToGLTexture();
SharedContextState* const context_state_;
+ std::unique_ptr<VulkanImage> image_;
GrBackendTexture backend_texture_;
VulkanCommandPool* const command_pool_;
SemaphoreHandle write_semaphore_handle_;
std::vector<SemaphoreHandle> read_semaphore_handles_;
- bool is_cleared_ = false;
bool is_write_in_progress_ = false;
uint32_t reads_in_progress_ = 0;
@@ -180,9 +196,6 @@ class ExternalVkImageBacking final : public SharedImageBacking {
};
uint32_t latest_content_ = 0;
- base::Optional<WGPUTextureFormat> wgpu_format_;
- base::Optional<uint32_t> memory_type_index_;
-
DISALLOW_COPY_AND_ASSIGN(ExternalVkImageBacking);
};
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc
index f7f0427f6ec..9ac9c29ea1f 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc
@@ -13,6 +13,7 @@
#include "base/posix/eintr_wrapper.h"
#include "build/build_config.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_instance.h"
#include "ui/gl/buildflags.h"
@@ -25,15 +26,11 @@ ExternalVkImageDawnRepresentation::ExternalVkImageDawnRepresentation(
MemoryTypeTracker* tracker,
WGPUDevice device,
WGPUTextureFormat wgpu_format,
- int memory_fd,
- VkDeviceSize allocation_size,
- uint32_t memory_type_index)
+ base::ScopedFD memory_fd)
: SharedImageRepresentationDawn(manager, backing, tracker),
device_(device),
wgpu_format_(wgpu_format),
- memory_fd_(memory_fd),
- allocation_size_(allocation_size),
- memory_type_index_(memory_type_index),
+ memory_fd_(std::move(memory_fd)),
dawn_procs_(dawn_native::GetProcs()) {
DCHECK(device_);
@@ -67,11 +64,10 @@ WGPUTexture ExternalVkImageDawnRepresentation::BeginAccess(
dawn_native::vulkan::ExternalImageDescriptorOpaqueFD descriptor = {};
descriptor.cTextureDescriptor = &texture_descriptor;
- descriptor.isCleared = true;
- descriptor.allocationSize = allocation_size_;
- descriptor.memoryTypeIndex = memory_type_index_;
- descriptor.memoryFD = memory_fd_;
- descriptor.waitFDs = {};
+ descriptor.isCleared = IsCleared();
+ descriptor.allocationSize = backing_impl()->image()->device_size();
+ descriptor.memoryTypeIndex = backing_impl()->image()->memory_type_index();
+ descriptor.memoryFD = dup(memory_fd_.get());
// TODO(http://crbug.com/dawn/200): We may not be obeying all of the rules
// specified by Vulkan for external queue transfer barriers. Investigate this.
@@ -87,14 +83,6 @@ WGPUTexture ExternalVkImageDawnRepresentation::BeginAccess(
// Keep a reference to the texture so that it stays valid (its content
// might be destroyed).
dawn_procs_.textureReference(texture_);
-
- // Assume that the user of this representation will write to the texture
- // so set the cleared flag so that other representations don't overwrite
- // the result.
- // TODO(cwallez@chromium.org): This is incorrect and allows reading
- // uninitialized data. When !IsCleared we should tell dawn_native to
- // consider the texture lazy-cleared.
- SetCleared();
}
return texture_;
@@ -105,13 +93,14 @@ void ExternalVkImageDawnRepresentation::EndAccess() {
return;
}
- // TODO(cwallez@chromium.org): query dawn_native to know if the texture was
- // cleared and set IsCleared appropriately.
-
// Grab the signal semaphore from dawn
int signal_semaphore_fd =
dawn_native::vulkan::ExportSignalSemaphoreOpaqueFD(device_, texture_);
+ if (dawn_native::IsTextureSubresourceInitialized(texture_, 0, 1, 0, 1)) {
+ SetCleared();
+ }
+
// Wrap file descriptor in a handle
SemaphoreHandle signal_semaphore(
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.h b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.h
index f26e0d12f06..03032411caa 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.h
@@ -5,6 +5,7 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_DAWN_REPRESENTATION_H_
#define GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_DAWN_REPRESENTATION_H_
+#include "base/files/scoped_file.h"
#include "gpu/command_buffer/service/external_vk_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
@@ -17,9 +18,7 @@ class ExternalVkImageDawnRepresentation : public SharedImageRepresentationDawn {
MemoryTypeTracker* tracker,
WGPUDevice device,
WGPUTextureFormat dawn_format,
- int memory_fd,
- VkDeviceSize allocation_size,
- uint32_t memory_type_index);
+ base::ScopedFD memory_fd);
~ExternalVkImageDawnRepresentation() override;
WGPUTexture BeginAccess(WGPUTextureUsage usage) override;
@@ -28,9 +27,7 @@ class ExternalVkImageDawnRepresentation : public SharedImageRepresentationDawn {
private:
const WGPUDevice device_;
const WGPUTextureFormat wgpu_format_;
- const int memory_fd_;
- const VkDeviceSize allocation_size_;
- const uint32_t memory_type_index_;
+ base::ScopedFD memory_fd_;
WGPUTexture texture_ = nullptr;
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory.cc b/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
index 5831d6e6e87..86441a1659f 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
@@ -4,9 +4,8 @@
#include "gpu/command_buffer/service/external_vk_image_factory.h"
-#include <unistd.h>
-
#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/service/external_vk_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/vulkan/vulkan_command_buffer.h"
@@ -18,12 +17,59 @@
namespace gpu {
+namespace {
+
+VkImageUsageFlags GetMaximalImageUsageFlags(
+ VkFormatFeatureFlags feature_flags) {
+ VkImageUsageFlags usage_flags = 0;
+ if (feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
+ usage_flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (feature_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)
+ usage_flags |= VK_IMAGE_USAGE_STORAGE_BIT;
+ if (feature_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)
+ usage_flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ if (feature_flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)
+ usage_flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ if (feature_flags & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT)
+ usage_flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ if (feature_flags & VK_FORMAT_FEATURE_TRANSFER_DST_BIT)
+ usage_flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ usage_flags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+ return usage_flags;
+}
+
+VulkanImageUsageCache CreateImageUsageCache(
+ VkPhysicalDevice vk_physical_device) {
+ VulkanImageUsageCache image_usage_cache = {};
+
+ for (int i = 0; i <= static_cast<int>(viz::RESOURCE_FORMAT_MAX); ++i) {
+ viz::ResourceFormat format = static_cast<viz::ResourceFormat>(i);
+ if (!viz::HasVkFormat(format))
+ continue;
+ VkFormat vk_format = viz::ToVkFormat(format);
+ DCHECK_NE(vk_format, VK_FORMAT_UNDEFINED);
+ VkFormatProperties format_props = {};
+ vkGetPhysicalDeviceFormatProperties(vk_physical_device, vk_format,
+ &format_props);
+ image_usage_cache.optimal_tiling_usage[format] =
+ GetMaximalImageUsageFlags(format_props.optimalTilingFeatures);
+ }
+
+ return image_usage_cache;
+}
+
+} // namespace
+
ExternalVkImageFactory::ExternalVkImageFactory(
SharedContextState* context_state)
: context_state_(context_state),
command_pool_(context_state_->vk_context_provider()
->GetDeviceQueue()
- ->CreateCommandPool()) {}
+ ->CreateCommandPool()),
+ image_usage_cache_(
+ CreateImageUsageCache(context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanPhysicalDevice())) {}
ExternalVkImageFactory::~ExternalVkImageFactory() {
if (command_pool_) {
@@ -37,14 +83,15 @@ ExternalVkImageFactory::~ExternalVkImageFactory() {
std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
bool is_thread_safe) {
DCHECK(!is_thread_safe);
- return ExternalVkImageBacking::Create(context_state_, command_pool_.get(),
- mailbox, format, size, color_space,
- usage, base::span<const uint8_t>());
+ return ExternalVkImageBacking::Create(
+ context_state_, command_pool_.get(), mailbox, format, size, color_space,
+ usage, &image_usage_cache_, base::span<const uint8_t>());
}
std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
@@ -56,7 +103,7 @@ std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
base::span<const uint8_t> pixel_data) {
return ExternalVkImageBacking::Create(context_state_, command_pool_.get(),
mailbox, format, size, color_space,
- usage, pixel_data);
+ usage, &image_usage_cache_, pixel_data);
}
std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
@@ -71,7 +118,7 @@ std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
DCHECK(CanImportGpuMemoryBuffer(handle.type));
return ExternalVkImageBacking::CreateFromGMB(
context_state_, command_pool_.get(), mailbox, std::move(handle),
- buffer_format, size, color_space, usage);
+ buffer_format, size, color_space, usage, &image_usage_cache_);
}
bool ExternalVkImageFactory::CanImportGpuMemoryBuffer(
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory.h b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
index 383b2df6706..6f1a57a1bee 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_factory.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
@@ -8,6 +8,7 @@
#include <vulkan/vulkan.h>
#include <memory>
+#include "gpu/command_buffer/service/external_vk_image_backing.h"
#include "gpu/command_buffer/service/shared_image_backing_factory.h"
namespace gpu {
@@ -21,13 +22,14 @@ class VulkanCommandPool;
// that allow it to be exported out and shared with GL.
class ExternalVkImageFactory : public SharedImageBackingFactory {
public:
- ExternalVkImageFactory(SharedContextState* context_state);
+ explicit ExternalVkImageFactory(SharedContextState* context_state);
~ExternalVkImageFactory() override;
// SharedImageBackingFactory implementation.
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
@@ -61,6 +63,8 @@ class ExternalVkImageFactory : public SharedImageBackingFactory {
SharedContextState* const context_state_;
std::unique_ptr<VulkanCommandPool> command_pool_;
+ const VulkanImageUsageCache image_usage_cache_;
+
DISALLOW_COPY_AND_ASSIGN(ExternalVkImageFactory);
};
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
index 58f60deabe2..a1a5bbe1448 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
@@ -11,6 +11,7 @@
#include "build/build_config.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_implementation.h"
+#include "gpu/vulkan/vulkan_util.h"
#define GL_LAYOUT_GENERAL_EXT 0x958D
#define GL_LAYOUT_COLOR_ATTACHMENT_EXT 0x958E
@@ -24,6 +25,11 @@
#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586
+#if defined(OS_FUCHSIA)
+#define GL_HANDLE_TYPE_ZIRCON_VMO_ANGLE 0x93AE
+#define GL_HANDLE_TYPE_ZIRCON_EVENT_ANGLE 0x93AF
+#endif
+
namespace gpu {
namespace {
@@ -166,6 +172,12 @@ void ExternalVkImageGLRepresentationShared::EndAccess() {
api()->glSignalSemaphoreEXTFn(gl_semaphore, 0, nullptr, 1,
&texture_service_id_, &dst_layout);
api()->glDeleteSemaphoresEXTFn(1, &gl_semaphore);
+ // Base on the spec, the glSignalSemaphoreEXT() call just inserts signal
+ // semaphore command in the gl context. It may or may not flush the context
+ // which depends on the impelemntation. So to make it safe, we always call
+ // glFlush() here. If the implementation does flush in the
+ // glSignalSemaphoreEXT() call, the glFlush() call should be a noop.
+ api()->glFlushFn();
}
backing_impl()->EndAccess(readonly, std::move(semaphore_handle),
@@ -176,10 +188,7 @@ GLuint ExternalVkImageGLRepresentationShared::ImportVkSemaphoreIntoGL(
SemaphoreHandle handle) {
if (!handle.is_valid())
return 0;
-#if defined(OS_FUCHSIA)
- NOTIMPLEMENTED_LOG_ONCE();
- return 0;
-#elif defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID)
if (handle.vk_handle_type() !=
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
DLOG(ERROR) << "Importing semaphore handle of unexpected type:"
@@ -194,7 +203,24 @@ GLuint ExternalVkImageGLRepresentationShared::ImportVkSemaphoreIntoGL(
fd.release());
return gl_semaphore;
-#else // !defined(OS_FUCHSIA) && !defined(OS_LINUX)
+#elif defined(OS_FUCHSIA)
+ if (handle.vk_handle_type() !=
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA) {
+ DLOG(ERROR) << "Importing semaphore handle of unexpected type:"
+ << handle.vk_handle_type();
+ return 0;
+ }
+ zx::event event = handle.TakeHandle();
+ gl::GLApi* api = gl::g_current_gl_context;
+ GLuint gl_semaphore;
+ api->glGenSemaphoresEXTFn(1, &gl_semaphore);
+ api->glImportSemaphoreZirconHandleANGLEFn(
+ gl_semaphore, GL_HANDLE_TYPE_ZIRCON_EVENT_ANGLE, event.release());
+ return gl_semaphore;
+#elif defined(OS_WIN)
+ NOTIMPLEMENTED();
+ return 0;
+#else
#error Unsupported OS
#endif
}
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
index d4faa916394..fd1b9fb6d2c 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
@@ -140,4 +140,4 @@ void ExternalVkImageSkiaRepresentation::EndAccess(bool readonly) {
backing_impl()->EndAccess(readonly, std::move(handle), false /* is_gl */);
}
-} // namespace gpu \ No newline at end of file
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index 4381c016645..a47b5da7fe7 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -579,12 +579,68 @@ void FeatureInfo::InitializeFeatures() {
GL_COMPRESSED_RGBA_S3TC_DXT5_EXT);
}
+ bool have_bptc =
+ gfx::HasExtension(extensions, "GL_EXT_texture_compression_bptc") ||
+ gl_version_info_->IsAtLeastGL(4, 2) ||
+ gfx::HasExtension(extensions, "GL_ARB_texture_compression_bptc");
+ if (have_bptc) {
+ feature_flags_.ext_texture_compression_bptc = true;
+ AddExtensionString("GL_EXT_texture_compression_bptc");
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGBA_BPTC_UNORM_EXT);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_COMPRESSED_RGBA_BPTC_UNORM_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT);
+ }
+
+ bool have_rgtc =
+ gfx::HasExtension(extensions, "GL_EXT_texture_compression_rgtc") ||
+ gl_version_info_->IsAtLeastGL(3, 0) ||
+ gfx::HasExtension(extensions, "GL_ARB_texture_compression_rgtc");
+ if (have_rgtc) {
+ feature_flags_.ext_texture_compression_rgtc = true;
+ AddExtensionString("GL_EXT_texture_compression_rgtc");
+ validators_.compressed_texture_format.AddValue(GL_COMPRESSED_RED_RGTC1_EXT);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_SIGNED_RED_RGTC1_EXT);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_RED_GREEN_RGTC2_EXT);
+ validators_.compressed_texture_format.AddValue(
+ GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_COMPRESSED_RED_RGTC1_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_COMPRESSED_SIGNED_RED_RGTC1_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_COMPRESSED_RED_GREEN_RGTC2_EXT);
+ validators_.texture_internal_format_storage.AddValue(
+ GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT);
+ }
+
bool have_astc =
gfx::HasExtension(extensions, "GL_KHR_texture_compression_astc_ldr");
if (have_astc) {
feature_flags_.ext_texture_format_astc = true;
AddExtensionString("GL_KHR_texture_compression_astc_ldr");
+ bool have_astc_hdr =
+ gfx::HasExtension(extensions, "GL_KHR_texture_compression_astc_hdr");
+ if (have_astc_hdr) {
+ feature_flags_.ext_texture_format_astc_hdr = true;
+ AddExtensionString("GL_KHR_texture_compression_astc_hdr");
+ }
+
// GL_COMPRESSED_RGBA_ASTC(0x93B0 ~ 0x93BD)
GLint astc_format_it = GL_COMPRESSED_RGBA_ASTC_4x4_KHR;
GLint astc_format_max = GL_COMPRESSED_RGBA_ASTC_12x12_KHR;
@@ -762,7 +818,7 @@ void FeatureInfo::InitializeFeatures() {
if (gl_version_info_->is_es3)
has_srgb_framebuffer_support = true;
- if (has_srgb_framebuffer_support && !IsWebGLContext()) {
+ if (has_srgb_framebuffer_support) {
// GL_FRAMEBUFFER_SRGB_EXT is exposed by the GLES extension
// GL_EXT_sRGB_write_control (which is not part of the core, even in GLES3),
// and the desktop extension GL_ARB_framebuffer_sRGB (part of the core in
@@ -770,8 +826,12 @@ void FeatureInfo::InitializeFeatures() {
if (feature_flags_.desktop_srgb_support ||
gfx::HasExtension(extensions, "GL_EXT_sRGB_write_control")) {
feature_flags_.ext_srgb_write_control = true;
- AddExtensionString("GL_EXT_sRGB_write_control");
- validators_.capability.AddValue(GL_FRAMEBUFFER_SRGB_EXT);
+
+ // Do not expose this extension to WebGL.
+ if (!IsWebGLContext()) {
+ AddExtensionString("GL_EXT_sRGB_write_control");
+ validators_.capability.AddValue(GL_FRAMEBUFFER_SRGB_EXT);
+ }
}
}
@@ -1032,6 +1092,15 @@ void FeatureInfo::InitializeFeatures() {
if (gfx::HasExtension(extensions, "GL_OES_EGL_image_external")) {
AddExtensionString("GL_OES_EGL_image_external");
feature_flags_.oes_egl_image_external = true;
+
+ // In many places we check oes_egl_image_external to know whether
+ // TEXTURE_EXTERNAL_OES is valid. Drivers with the _essl3 version *should*
+ // have both. But to be safe, only enable the _essl3 version if the
+ // non-_essl3 version is available.
+ if (gfx::HasExtension(extensions, "GL_OES_EGL_image_external_essl3")) {
+ AddExtensionString("GL_OES_EGL_image_external_essl3");
+ feature_flags_.oes_egl_image_external_essl3 = true;
+ }
}
if (gfx::HasExtension(extensions, "GL_NV_EGL_stream_consumer_external")) {
AddExtensionString("GL_NV_EGL_stream_consumer_external");
@@ -1125,31 +1194,31 @@ void FeatureInfo::InitializeFeatures() {
}
#if defined(OS_MACOSX)
- // Mac can create GLImages out of XR30 IOSurfaces only after High Sierra.
- feature_flags_.chromium_image_xr30 = base::mac::IsAtLeastOS10_13();
+ // Mac can create GLImages out of AR30 IOSurfaces only after High Sierra.
+ feature_flags_.chromium_image_ar30 = base::mac::IsAtLeastOS10_13();
#elif !defined(OS_WIN)
// TODO(mcasas): connect in Windows, https://crbug.com/803451
// XB30 support was introduced in GLES 3.0/ OpenGL 3.3, before that it was
// signalled via a specific extension.
- feature_flags_.chromium_image_xb30 =
+ feature_flags_.chromium_image_ab30 =
gl_version_info_->IsAtLeastGL(3, 3) ||
gl_version_info_->IsAtLeastGLES(3, 0) ||
gfx::HasExtension(extensions, "GL_EXT_texture_type_2_10_10_10_REV");
#endif
- if (feature_flags_.chromium_image_xr30 ||
- feature_flags_.chromium_image_xb30) {
+ if (feature_flags_.chromium_image_ar30 ||
+ feature_flags_.chromium_image_ab30) {
validators_.texture_internal_format.AddValue(GL_RGB10_A2_EXT);
validators_.render_buffer_format.AddValue(GL_RGB10_A2_EXT);
validators_.texture_internal_format_storage.AddValue(GL_RGB10_A2_EXT);
validators_.pixel_type.AddValue(GL_UNSIGNED_INT_2_10_10_10_REV);
}
- if (feature_flags_.chromium_image_xr30) {
+ if (feature_flags_.chromium_image_ar30) {
feature_flags_.gpu_memory_buffer_formats.Add(
- gfx::BufferFormat::BGRX_1010102);
+ gfx::BufferFormat::BGRA_1010102);
}
- if (feature_flags_.chromium_image_xb30) {
+ if (feature_flags_.chromium_image_ab30) {
feature_flags_.gpu_memory_buffer_formats.Add(
- gfx::BufferFormat::RGBX_1010102);
+ gfx::BufferFormat::RGBA_1010102);
}
if (feature_flags_.chromium_image_ycbcr_p010) {
@@ -1350,26 +1419,6 @@ void FeatureInfo::InitializeFeatures() {
validators_.g_l_state.AddValue(GL_COVERAGE_MODULATION_CHROMIUM);
}
- if (gfx::HasExtension(extensions, "GL_NV_path_rendering")) {
- bool has_dsa = gl_version_info_->IsAtLeastGL(4, 5) ||
- gfx::HasExtension(extensions, "GL_EXT_direct_state_access");
- bool has_piq =
- gl_version_info_->IsAtLeastGL(4, 3) ||
- gfx::HasExtension(extensions, "GL_ARB_program_interface_query");
- bool has_fms = feature_flags_.chromium_framebuffer_mixed_samples;
- if ((gl_version_info_->IsAtLeastGLES(3, 1) ||
- (gl_version_info_->IsAtLeastGL(3, 2) && has_dsa && has_piq)) &&
- has_fms) {
- AddExtensionString("GL_CHROMIUM_path_rendering");
- feature_flags_.chromium_path_rendering = true;
- validators_.g_l_state.AddValue(GL_PATH_MODELVIEW_MATRIX_CHROMIUM);
- validators_.g_l_state.AddValue(GL_PATH_PROJECTION_MATRIX_CHROMIUM);
- validators_.g_l_state.AddValue(GL_PATH_STENCIL_FUNC_CHROMIUM);
- validators_.g_l_state.AddValue(GL_PATH_STENCIL_REF_CHROMIUM);
- validators_.g_l_state.AddValue(GL_PATH_STENCIL_VALUE_MASK_CHROMIUM);
- }
- }
-
if ((gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile ||
gfx::HasExtension(extensions, "GL_EXT_texture_rg") ||
gfx::HasExtension(extensions, "GL_ARB_texture_rg")) &&
@@ -1402,23 +1451,60 @@ void FeatureInfo::InitializeFeatures() {
}
UMA_HISTOGRAM_BOOLEAN("GPU.TextureRG", feature_flags_.ext_texture_rg);
- if (gl_version_info_->is_desktop_core_profile ||
- (gl_version_info_->IsAtLeastGL(2, 1) &&
- gfx::HasExtension(extensions, "GL_ARB_texture_rg")) ||
- gfx::HasExtension(extensions, "GL_EXT_texture_norm16")) {
- // TODO(hubbe): Rename ext_texture_norm16 to texture_r16
+ if (IsWebGL2OrES3OrHigherContext() &&
+ (gl_version_info_->is_desktop_core_profile ||
+ (gl_version_info_->IsAtLeastGL(2, 1) &&
+ gfx::HasExtension(extensions, "GL_ARB_texture_rg")) ||
+ gfx::HasExtension(extensions, "GL_EXT_texture_norm16"))) {
+ AddExtensionString("GL_EXT_texture_norm16");
feature_flags_.ext_texture_norm16 = true;
g_r16_is_present = true;
- // Note: EXT_texture_norm16 is not exposed through WebGL API so we validate
- // only the combinations used internally.
validators_.pixel_type.AddValue(GL_UNSIGNED_SHORT);
+ validators_.pixel_type.AddValue(GL_SHORT);
+
validators_.texture_format.AddValue(GL_RED_EXT);
+ validators_.texture_format.AddValue(GL_RG_EXT);
+
validators_.texture_internal_format.AddValue(GL_R16_EXT);
+ validators_.texture_internal_format.AddValue(GL_RG16_EXT);
+ validators_.texture_internal_format.AddValue(GL_RGB16_EXT);
+ validators_.texture_internal_format.AddValue(GL_RGBA16_EXT);
+ validators_.texture_internal_format.AddValue(GL_R16_SNORM_EXT);
+ validators_.texture_internal_format.AddValue(GL_RG16_SNORM_EXT);
+ validators_.texture_internal_format.AddValue(GL_RGB16_SNORM_EXT);
+ validators_.texture_internal_format.AddValue(GL_RGBA16_SNORM_EXT);
validators_.texture_internal_format.AddValue(GL_RED_EXT);
+
+ validators_.read_pixel_format.AddValue(GL_R16_EXT);
+ validators_.read_pixel_format.AddValue(GL_RG16_EXT);
+ validators_.read_pixel_format.AddValue(GL_RGBA16_EXT);
+
+ validators_.render_buffer_format.AddValue(GL_R16_EXT);
+ validators_.render_buffer_format.AddValue(GL_RG16_EXT);
+ validators_.render_buffer_format.AddValue(GL_RGBA16_EXT);
+
validators_.texture_unsized_internal_format.AddValue(GL_RED_EXT);
+ validators_.texture_unsized_internal_format.AddValue(GL_RG_EXT);
+
validators_.texture_internal_format_storage.AddValue(GL_R16_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RG16_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RGB16_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RGBA16_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_R16_SNORM_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RG16_SNORM_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RGB16_SNORM_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RGBA16_SNORM_EXT);
+
+ validators_.texture_sized_color_renderable_internal_format.AddValue(
+ GL_R16_EXT);
+ validators_.texture_sized_color_renderable_internal_format.AddValue(
+ GL_RG16_EXT);
+ validators_.texture_sized_color_renderable_internal_format.AddValue(
+ GL_RGBA16_EXT);
+ // TODO(shrekshao): gpu_memory_buffer_formats is not used by WebGL
+ // So didn't expose all buffer formats here.
feature_flags_.gpu_memory_buffer_formats.Add(gfx::BufferFormat::R_16);
}
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index 2ca859e9cd8..4a3255b6b6e 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -53,6 +53,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool use_chromium_screen_space_antialiasing_via_shaders = false;
bool oes_standard_derivatives = false;
bool oes_egl_image_external = false;
+ bool oes_egl_image_external_essl3 = false;
bool nv_egl_stream_consumer_external = false;
bool oes_depth24 = false;
bool oes_compressed_etc1_rgb8_texture = false;
@@ -73,6 +74,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool use_arb_occlusion_query_for_occlusion_query_boolean = false;
bool native_vertex_array_object = false;
bool ext_texture_format_astc = false;
+ bool ext_texture_format_astc_hdr = false;
bool ext_texture_format_atc = false;
bool ext_texture_format_bgra8888 = false;
bool ext_texture_format_dxt1 = false;
@@ -92,7 +94,6 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool chromium_texture_filtering_hint = false;
bool angle_texture_usage = false;
bool ext_texture_storage = false;
- bool chromium_path_rendering = false;
bool chromium_raster_transport = false;
bool chromium_framebuffer_mixed_samples = false;
bool blend_equation_advanced = false;
@@ -101,8 +102,8 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool ext_texture_norm16 = false;
bool chromium_image_ycbcr_420v = false;
bool chromium_image_ycbcr_422 = false;
- bool chromium_image_xr30 = false;
- bool chromium_image_xb30 = false;
+ bool chromium_image_ar30 = false;
+ bool chromium_image_ab30 = false;
bool chromium_image_ycbcr_p010 = false;
bool emulate_primitive_restart_fixed_index = false;
bool ext_render_buffer_format_bgra8888 = false;
@@ -148,6 +149,8 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool oes_fbo_render_mipmap = false;
bool webgl_draw_instanced_base_vertex_base_instance = false;
bool webgl_multi_draw_instanced_base_vertex_base_instance = false;
+ bool ext_texture_compression_bptc = false;
+ bool ext_texture_compression_rgtc = false;
};
FeatureInfo();
diff --git a/chromium/gpu/command_buffer/service/feature_info_unittest.cc b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
index 0865a9bde21..e032a4dc398 100644
--- a/chromium/gpu/command_buffer/service/feature_info_unittest.cc
+++ b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
@@ -1596,49 +1596,6 @@ TEST_P(FeatureInfoTest, BlendEquationAdvancedDisabled) {
EXPECT_FALSE(info_->feature_flags().blend_equation_advanced_coherent);
}
-TEST_P(FeatureInfoTest, InitializeCHROMIUM_path_rendering) {
- SetupInitExpectationsWithGLVersion(
- "GL_ARB_compatibility GL_NV_path_rendering GL_EXT_direct_state_access "
- "GL_NV_framebuffer_mixed_samples",
- "", "4.3");
- EXPECT_TRUE(info_->feature_flags().chromium_path_rendering);
- EXPECT_TRUE(
- gfx::HasExtension(info_->extensions(), "GL_CHROMIUM_path_rendering"));
-}
-
-TEST_P(FeatureInfoTest, InitializeCHROMIUM_path_rendering2) {
- SetupInitExpectationsWithGLVersion(
- "GL_NV_path_rendering GL_NV_framebuffer_mixed_samples", "",
- "OpenGL ES 3.1");
- EXPECT_TRUE(info_->feature_flags().chromium_path_rendering);
- EXPECT_TRUE(
- gfx::HasExtension(info_->extensions(), "GL_CHROMIUM_path_rendering"));
-}
-
-TEST_P(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering) {
- SetupInitExpectationsWithGLVersion("GL_ARB_compatibility", "", "4.3");
- EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
- EXPECT_FALSE(
- gfx::HasExtension(info_->extensions(), "GL_CHROMIUM_path_rendering"));
-}
-
-TEST_P(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering2) {
- SetupInitExpectationsWithGLVersion(
- "GL_ARB_compatibility GL_NV_path_rendering", "", "4.3");
- EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
- EXPECT_FALSE(
- gfx::HasExtension(info_->extensions(), "GL_CHROMIUM_path_rendering"));
-}
-
-TEST_P(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering3) {
- // Missing framebuffer mixed samples.
- SetupInitExpectationsWithGLVersion("GL_NV_path_rendering", "",
- "OpenGL ES 3.1");
- EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
- EXPECT_FALSE(
- gfx::HasExtension(info_->extensions(), "GL_CHROMIUM_path_rendering"));
-}
-
TEST_P(FeatureInfoTest, InitializeNoKHR_blend_equation_advanced) {
SetupInitExpectationsWithGLVersion("GL_ARB_compatibility", "", "4.3");
EXPECT_FALSE(info_->feature_flags().blend_equation_advanced);
@@ -1706,13 +1663,38 @@ TEST_P(FeatureInfoTest, InitializeARB_texture_rgNoFloat) {
TEST_P(FeatureInfoTest, InitializeEXT_texture_norm16) {
SetupInitExpectations("GL_EXT_texture_norm16");
+
+ if (!info_->IsWebGL2OrES3OrHigherContext()) {
+ return;
+ }
+
EXPECT_TRUE(info_->feature_flags().ext_texture_norm16);
EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_RED_EXT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_RG_EXT));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_RGB));
+ EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_RGBA));
EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(GL_R16_EXT));
- EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(GL_RED_EXT));
+ EXPECT_TRUE(
+ info_->validators()->texture_internal_format.IsValid(GL_RG16_EXT));
+ EXPECT_TRUE(
+ info_->validators()->texture_internal_format.IsValid(GL_RGB16_EXT));
+ EXPECT_TRUE(
+ info_->validators()->texture_internal_format.IsValid(GL_RGBA16_EXT));
+ EXPECT_TRUE(info_->validators()->read_pixel_format.IsValid(GL_R16_EXT));
+ EXPECT_TRUE(info_->validators()->read_pixel_format.IsValid(GL_RG16_EXT));
+ EXPECT_TRUE(info_->validators()->read_pixel_format.IsValid(GL_RGBA16_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(GL_R16_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(GL_RG16_EXT));
+ EXPECT_TRUE(info_->validators()->render_buffer_format.IsValid(GL_RGBA16_EXT));
EXPECT_TRUE(
info_->validators()->texture_internal_format_storage.IsValid(GL_R16_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RG16_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGB16_EXT));
+ EXPECT_TRUE(info_->validators()->texture_internal_format_storage.IsValid(
+ GL_RGBA16_EXT));
}
TEST_P(FeatureInfoTest, InitializeCHROMIUM_unpremultiply_and_dither_copy) {
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc b/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
index 22be5a54c8c..5cf6f4f9f5d 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
@@ -17,6 +17,7 @@
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/config/gpu_preferences.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_mock.h"
@@ -44,7 +45,8 @@ class FramebufferManagerTest : public GpuServiceTest {
public:
FramebufferManagerTest()
: manager_(1, 1, nullptr),
- feature_info_(new FeatureInfo()) {
+ feature_info_(new FeatureInfo()),
+ discardable_manager_(GpuPreferences()) {
texture_manager_.reset(new TextureManager(
nullptr, feature_info_.get(), kMaxTextureSize, kMaxCubemapSize,
kMaxRectangleTextureSize, kMax3DTextureSize, kMaxArrayTextureLayers,
@@ -123,7 +125,8 @@ class FramebufferInfoTestBase : public GpuServiceTest {
manager_(kMaxDrawBuffers,
kMaxColorAttachments,
&framebuffer_completeness_cache_),
- feature_info_(new FeatureInfo()) {
+ feature_info_(new FeatureInfo()),
+ discardable_manager_(GpuPreferences()) {
texture_manager_.reset(new TextureManager(
nullptr, feature_info_.get(), kMaxTextureSize, kMaxCubemapSize,
kMaxRectangleTextureSize, kMax3DTextureSize, kMaxArrayTextureLayers,
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc b/chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc
index 67975fb4078..d8defe0a608 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc
@@ -49,7 +49,7 @@ TEST_F(GLContextVirtualTest, Reinitialize) {
{
auto base_context = base::MakeRefCounted<gl::GLContextStub>();
gl::GLShareGroup* share_group = base_context->share_group();
- share_group->SetSharedContext(GetGLSurface(), base_context.get());
+ share_group->SetSharedContext(base_context.get());
auto context = base::MakeRefCounted<GLContextVirtual>(
share_group, base_context.get(), decoder_->AsWeakPtr());
EXPECT_TRUE(context->Initialize(GetGLSurface(), gl::GLContextAttribs()));
@@ -58,7 +58,7 @@ TEST_F(GLContextVirtualTest, Reinitialize) {
{
auto base_context = base::MakeRefCounted<gl::GLContextStub>();
gl::GLShareGroup* share_group = base_context->share_group();
- share_group->SetSharedContext(GetGLSurface(), base_context.get());
+ share_group->SetSharedContext(base_context.get());
auto context = base::MakeRefCounted<GLContextVirtual>(
share_group, base_context.get(), decoder_->AsWeakPtr());
EXPECT_TRUE(context->Initialize(GetGLSurface(), gl::GLContextAttribs()));
@@ -77,7 +77,7 @@ TEST_F(GLContextVirtualTest, CheckStickyGraphicsResetStatus) {
base_context->SetExtensionsString(gl_extensions);
gl::GLShareGroup* share_group = base_context->share_group();
- share_group->SetSharedContext(GetGLSurface(), base_context.get());
+ share_group->SetSharedContext(base_context.get());
auto context = base::MakeRefCounted<GLContextVirtual>(
share_group, base_context.get(), decoder_->AsWeakPtr());
EXPECT_TRUE(context->Initialize(GetGLSurface(), gl::GLContextAttribs()));
diff --git a/chromium/gpu/command_buffer/service/gl_surface_mock.h b/chromium/gpu/command_buffer/service/gl_surface_mock.h
index 7812ab7fe4f..0c9ed3b2f17 100644
--- a/chromium/gpu/command_buffer/service/gl_surface_mock.h
+++ b/chromium/gpu/command_buffer/service/gl_surface_mock.h
@@ -21,7 +21,7 @@ class GLSurfaceMock : public gl::GLSurface {
MOCK_METHOD4(Resize,
bool(const gfx::Size& size,
float scale_factor,
- ColorSpace color_space,
+ const gfx::ColorSpace& color_space,
bool alpha));
MOCK_METHOD0(IsOffscreen, bool());
MOCK_METHOD1(SwapBuffers, gfx::SwapResult(PresentationCallback callback));
diff --git a/chromium/gpu/command_buffer/service/gl_utils.cc b/chromium/gpu/command_buffer/service/gl_utils.cc
index 87ceecf75a1..cfd3e07f460 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.cc
+++ b/chromium/gpu/command_buffer/service/gl_utils.cc
@@ -7,7 +7,6 @@
#include <algorithm>
#include <unordered_set>
-#include "base/metrics/histogram.h"
#include "gpu/command_buffer/common/capabilities.h"
#include "gpu/command_buffer/service/error_state.h"
#include "gpu/command_buffer/service/feature_info.h"
@@ -27,6 +26,10 @@ const int kS3TCBlockHeight = 4;
const int kS3TCDXT1BlockSize = 8;
const int kS3TCDXT3AndDXT5BlockSize = 16;
const int kEACAndETC2BlockSize = 4;
+const int kBPTCBlockWidth = 4;
+const int kBPTCBlockHeight = 4;
+const int kRGTCBlockWidth = 4;
+const int kRGTCBlockHeight = 4;
typedef struct {
int blockWidth;
@@ -107,18 +110,6 @@ const char* GetDebugSeverityString(GLenum severity) {
}
} // namespace
-std::vector<int> GetAllGLErrors() {
- int gl_errors[] = {
- GL_NO_ERROR,
- GL_INVALID_ENUM,
- GL_INVALID_VALUE,
- GL_INVALID_OPERATION,
- GL_INVALID_FRAMEBUFFER_OPERATION,
- GL_OUT_OF_MEMORY,
- };
- return base::CustomHistogram::ArrayToCustomEnumRanges(gl_errors);
-}
-
bool PrecisionMeetsSpecForHighpFloat(GLint rangeMin,
GLint rangeMax,
GLint precision) {
@@ -503,6 +494,29 @@ bool GetCompressedTexSizeInBytes(const char* function_name,
bytes_required *= 16;
bytes_required *= depth;
break;
+ case GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGBA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT:
+ case GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT:
+ bytes_required = (width + kBPTCBlockWidth - 1) / kBPTCBlockWidth;
+ bytes_required *= (height + kBPTCBlockHeight - 1) / kBPTCBlockHeight;
+ bytes_required *= 16;
+ bytes_required *= depth;
+ break;
+ case GL_COMPRESSED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_SIGNED_RED_RGTC1_EXT:
+ bytes_required = (width + kRGTCBlockWidth - 1) / kRGTCBlockWidth;
+ bytes_required *= (height + kRGTCBlockHeight - 1) / kRGTCBlockHeight;
+ bytes_required *= 8;
+ bytes_required *= depth;
+ break;
+ case GL_COMPRESSED_RED_GREEN_RGTC2_EXT:
+ case GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT:
+ bytes_required = (width + kRGTCBlockWidth - 1) / kRGTCBlockWidth;
+ bytes_required *= (height + kRGTCBlockHeight - 1) / kRGTCBlockHeight;
+ bytes_required *= 16;
+ bytes_required *= depth;
+ break;
default:
if (function_name && error_state) {
ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(error_state, function_name, format,
@@ -523,6 +537,47 @@ bool GetCompressedTexSizeInBytes(const char* function_name,
return true;
}
+bool ValidateCompressedFormatTarget(GLenum target, GLenum format) {
+ if (target == GL_TEXTURE_3D) {
+ // Formats not supporting 3D Tex
+ switch (format) {
+ // ES 3.1, Section 8.7, page 169.
+ case GL_COMPRESSED_R11_EAC:
+ case GL_COMPRESSED_SIGNED_R11_EAC:
+ case GL_COMPRESSED_RG11_EAC:
+ case GL_COMPRESSED_SIGNED_RG11_EAC:
+ case GL_COMPRESSED_RGB8_ETC2:
+ case GL_COMPRESSED_SRGB8_ETC2:
+ case GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+ case GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+ case GL_COMPRESSED_RGBA8_ETC2_EAC:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
+ // GL_EXT_texture_compression_s3tc
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
+ // GL_EXT_texture_compression_rgtc
+ case GL_COMPRESSED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_SIGNED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_RED_GREEN_RGTC2_EXT:
+ case GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT:
+ return false;
+ // GL_KHR_texture_compression_astc_hdr, TEXTURE_3D is not supported
+ // without HDR profile. This is guaranteed to be validated beforehand
+ // in GLES2DecoderImpl::TexStorageImpl before calling this.
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
bool ValidateCompressedTexSubDimensions(GLenum target,
GLint level,
GLint xoffset,
@@ -534,6 +589,11 @@ bool ValidateCompressedTexSubDimensions(GLenum target,
GLenum format,
Texture* texture,
const char** error_message) {
+ if (!ValidateCompressedFormatTarget(target, format)) {
+ *error_message = "target invalid for format";
+ return false;
+ }
+
if (xoffset < 0 || yoffset < 0 || zoffset < 0) {
*error_message = "x/y/z offset < 0";
return false;
@@ -668,7 +728,15 @@ bool ValidateCompressedTexSubDimensions(GLenum target,
case GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
case GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
case GL_COMPRESSED_RGBA8_ETC2_EAC:
- case GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC: {
+ case GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
+ case GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGBA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT:
+ case GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT:
+ case GL_COMPRESSED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_SIGNED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_RED_GREEN_RGTC2_EXT:
+ case GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT: {
const int kBlockSize = 4;
GLsizei tex_width, tex_height;
if (target == GL_TEXTURE_3D ||
@@ -696,6 +764,10 @@ bool ValidateCompressedTexDimensions(GLenum target,
GLsizei depth,
GLenum format,
const char** error_message) {
+ if (!ValidateCompressedFormatTarget(target, format)) {
+ *error_message = "target invalid for format";
+ return false;
+ }
switch (format) {
case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
@@ -705,7 +777,6 @@ bool ValidateCompressedTexDimensions(GLenum target,
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
- DCHECK_EQ(1, depth); // 2D formats.
if (!IsValidS3TCSizeForWebGLAndANGLE(level, width) ||
!IsValidS3TCSizeForWebGLAndANGLE(level, height)) {
*error_message = "width or height invalid for level";
@@ -744,7 +815,6 @@ bool ValidateCompressedTexDimensions(GLenum target,
case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
case GL_ETC1_RGB8_OES:
- DCHECK_EQ(1, depth); // 2D formats.
if (width <= 0 || height <= 0) {
*error_message = "width or height invalid for level";
return false;
@@ -754,7 +824,6 @@ bool ValidateCompressedTexDimensions(GLenum target,
case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG:
case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG:
- DCHECK_EQ(1, depth); // 2D formats.
if (!IsValidPVRTCSize(level, width) || !IsValidPVRTCSize(level, height)) {
*error_message = "width or height invalid for level";
return false;
@@ -776,8 +845,30 @@ bool ValidateCompressedTexDimensions(GLenum target,
*error_message = "width, height, or depth invalid";
return false;
}
- if (target == GL_TEXTURE_3D) {
- *error_message = "target invalid for format";
+ return true;
+ case GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGBA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT:
+ case GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT:
+ if (width < 0 || height < 0 || depth < 0) {
+ *error_message = "width, height, or depth invalid";
+ return false;
+ }
+ if (!(width % kBPTCBlockWidth == 0 && height % kBPTCBlockHeight == 0)) {
+ *error_message = "width or height is not a multiple of four";
+ return false;
+ }
+ return true;
+ case GL_COMPRESSED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_SIGNED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_RED_GREEN_RGTC2_EXT:
+ case GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT:
+ if (width < 0 || height < 0 || depth < 0) {
+ *error_message = "width, height, or depth invalid";
+ return false;
+ }
+ if (!(width % kRGTCBlockWidth == 0 && height % kRGTCBlockHeight == 0)) {
+ *error_message = "width or height is not a multiple of four";
return false;
}
return true;
@@ -933,8 +1024,21 @@ CopyTextureMethod GetCopyTextureCHROMIUMMethod(const FeatureInfo* feature_info,
if (source_target == GL_TEXTURE_2D &&
(dest_target == GL_TEXTURE_2D || dest_target == GL_TEXTURE_CUBE_MAP) &&
source_format_color_renderable && copy_tex_image_format_valid &&
- source_level == 0 && !flip_y && !premultiply_alpha_change && !dither)
- return CopyTextureMethod::DIRECT_COPY;
+ source_level == 0 && !flip_y && !premultiply_alpha_change && !dither) {
+ auto source_texture_type = GLES2Util::GetGLReadPixelsImplementationType(
+ source_internal_format, source_target);
+ auto dest_texture_type = GLES2Util::GetGLReadPixelsImplementationType(
+ dest_internal_format, dest_target);
+ if (source_texture_type != GL_UNSIGNED_SHORT ||
+ source_texture_type == dest_texture_type) {
+ // https://crbug.com/1042239. As it is stated in the latest OpenGL ES 3.2
+ // spec (Oct 22, 2019) it is optional for implementation to support
+ // conversion between unmatched source and dest effective internal format.
+ // R16 to R16F direct copy failure is seen on Android Nvidia shield
+ // devices. So we won't use DIRECT_COPY for this format.
+ return CopyTextureMethod::DIRECT_COPY;
+ }
+ }
if (dest_format_color_renderable && dest_level == 0 &&
dest_target != GL_TEXTURE_CUBE_MAP)
return CopyTextureMethod::DIRECT_DRAW;
@@ -1027,6 +1131,8 @@ bool ValidateCopyTextureCHROMIUMInternalFormats(const FeatureInfo* feature_info,
source_internal_format == GL_RGB_YCBCR_422_CHROMIUM ||
source_internal_format == GL_RGB_YCBCR_P010_CHROMIUM ||
source_internal_format == GL_R16_EXT ||
+ source_internal_format == GL_RG16_EXT ||
+ source_internal_format == GL_RGBA16_EXT ||
source_internal_format == GL_RGB10_A2;
if (!valid_source_format) {
*output_error_msg = "invalid source internal format " +
@@ -1114,5 +1220,124 @@ bool GetGFXBufferUsage(GLenum buffer_usage, gfx::BufferUsage* out_usage) {
}
}
+bool IsASTCFormat(GLenum internal_format) {
+ switch (internal_format) {
+ case GL_COMPRESSED_RGBA_ASTC_4x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x12_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+// This is only called in Texture::SetLevelInfo in texture_manager.cc
+// where there is no direct access to decoder->IsCompressedTextureFormat
+// or feature_info->validators()->compressed_texture_format.IsValid
+bool IsCompressedTextureFormat(GLenum internal_format) {
+ switch (internal_format) {
+ // S3TC
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
+ // ASTC
+ case GL_COMPRESSED_RGBA_ASTC_4x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x12_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR:
+ // BPTC
+ case GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGBA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT:
+ case GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT:
+ // RGTC
+ case GL_COMPRESSED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_SIGNED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_RED_GREEN_RGTC2_EXT:
+ case GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT:
+ // ETC2/EAC
+ case GL_COMPRESSED_R11_EAC:
+ case GL_COMPRESSED_SIGNED_R11_EAC:
+ case GL_COMPRESSED_RGB8_ETC2:
+ case GL_COMPRESSED_SRGB8_ETC2:
+ case GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+ case GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+ case GL_COMPRESSED_RG11_EAC:
+ case GL_COMPRESSED_SIGNED_RG11_EAC:
+ case GL_COMPRESSED_RGBA8_ETC2_EAC:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
+ // ETC1
+ case GL_ETC1_RGB8_OES:
+ // PVRTC
+ case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG:
+ // ATC
+ case GL_ATC_RGB_AMD:
+ case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gl_utils.h b/chromium/gpu/command_buffer/service/gl_utils.h
index 9e76d767954..b96b5a4a66b 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.h
+++ b/chromium/gpu/command_buffer/service/gl_utils.h
@@ -9,7 +9,6 @@
#define GPU_COMMAND_BUFFER_SERVICE_GL_UTILS_H_
#include <string>
-#include <vector>
#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
@@ -64,8 +63,6 @@ struct CALayerSharedState {
gfx::Transform transform;
};
-std::vector<int> GetAllGLErrors();
-
bool PrecisionMeetsSpecForHighpFloat(GLint rangeMin,
GLint rangeMax,
GLint precision);
@@ -110,6 +107,8 @@ bool GetCompressedTexSizeInBytes(const char* function_name,
GLsizei* size_in_bytes,
ErrorState* error_state);
+bool ValidateCompressedFormatTarget(GLenum target, GLenum format);
+
bool ValidateCompressedTexSubDimensions(GLenum target,
GLint level,
GLint xoffset,
@@ -161,6 +160,9 @@ gfx::OverlayTransform GetGFXOverlayTransform(GLenum plane_transform);
bool GetGFXBufferFormat(GLenum internal_format, gfx::BufferFormat* out_format);
bool GetGFXBufferUsage(GLenum buffer_usage, gfx::BufferUsage* out_usage);
+bool IsASTCFormat(GLenum internal_format);
+bool IsCompressedTextureFormat(GLenum internal_format);
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
index 4c459e41aa6..2eb24507574 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -9,6 +9,7 @@
#include <algorithm>
#include <unordered_map>
+#include "gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.h"
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/decoder_context.h"
#include "gpu/command_buffer/service/gl_utils.h"
@@ -35,6 +36,7 @@ enum {
S_FORMAT_LUMINANCE,
S_FORMAT_LUMINANCE_ALPHA,
S_FORMAT_RED,
+ S_FORMAT_RG,
S_FORMAT_RGB,
S_FORMAT_RGBA,
S_FORMAT_RGB8,
@@ -83,36 +85,34 @@ enum {
NUM_D_FORMAT
};
+enum {
+ GLSL_ESSL100_OR_COMPATIBILITY_PROFILE,
+ GLSL_ESSL300,
+ GLSL_CORE_PROFILE,
+ NUM_GLSL
+};
+
const unsigned kAlphaSize = 4;
const unsigned kDitherSize = 2;
-const unsigned kNumVertexShaders = NUM_SAMPLERS;
-const unsigned kNumFragmentShaders =
- kAlphaSize * kDitherSize * NUM_SAMPLERS * NUM_S_FORMAT * NUM_D_FORMAT;
+const unsigned kNumVertexShaders = NUM_GLSL;
+
+static_assert(std::numeric_limits<unsigned>::max() / NUM_GLSL / NUM_D_FORMAT /
+ NUM_S_FORMAT / NUM_SAMPLERS / kDitherSize / kAlphaSize >
+ 0,
+ "ShaderId would overflow");
+const unsigned kNumFragmentShaders = kAlphaSize * kDitherSize * NUM_SAMPLERS *
+ NUM_S_FORMAT * NUM_D_FORMAT * NUM_GLSL;
typedef unsigned ShaderId;
-ShaderId GetVertexShaderId(GLenum target) {
- ShaderId id = 0;
- switch (target) {
- case GL_TEXTURE_2D:
- id = SAMPLER_2D;
- break;
- case GL_TEXTURE_RECTANGLE_ARB:
- id = SAMPLER_RECTANGLE_ARB;
- break;
- case GL_TEXTURE_EXTERNAL_OES:
- id = SAMPLER_EXTERNAL_OES;
- break;
- default:
- NOTREACHED();
- break;
- }
- return id;
+ShaderId GetVertexShaderId(unsigned glslVersion) {
+ return glslVersion;
}
// Returns the correct fragment shader id to evaluate the copy operation for
// the premultiply alpha pixel store settings and target.
-ShaderId GetFragmentShaderId(bool premultiply_alpha,
+ShaderId GetFragmentShaderId(unsigned glslVersion,
+ bool premultiply_alpha,
bool unpremultiply_alpha,
bool dither,
GLenum target,
@@ -157,10 +157,14 @@ ShaderId GetFragmentShaderId(bool premultiply_alpha,
case GL_R16_EXT:
sourceFormatIndex = S_FORMAT_RED;
break;
+ case GL_RG16_EXT:
+ sourceFormatIndex = S_FORMAT_RG;
+ break;
case GL_RGB:
sourceFormatIndex = S_FORMAT_RGB;
break;
case GL_RGBA:
+ case GL_RGBA16_EXT:
sourceFormatIndex = S_FORMAT_RGBA;
break;
case GL_RGB8:
@@ -297,11 +301,14 @@ ShaderId GetFragmentShaderId(bool premultiply_alpha,
break;
}
- return alphaIndex + ditherIndex * kAlphaSize +
- targetIndex * kAlphaSize * kDitherSize +
- sourceFormatIndex * kAlphaSize * kDitherSize * NUM_SAMPLERS +
- destFormatIndex * kAlphaSize * kDitherSize * NUM_SAMPLERS *
- NUM_S_FORMAT;
+ ShaderId id = 0;
+ id = id * NUM_GLSL + glslVersion;
+ id = id * NUM_D_FORMAT + destFormatIndex;
+ id = id * NUM_S_FORMAT + sourceFormatIndex;
+ id = id * NUM_SAMPLERS + targetIndex;
+ id = id * kDitherSize + ditherIndex;
+ id = id * kAlphaSize + alphaIndex;
+ return id;
}
const char* kShaderPrecisionPreamble =
@@ -317,23 +324,35 @@ const char* kShaderPrecisionPreamble =
"#define TexCoordPrecision\n"
"#endif\n";
-std::string GetVertexShaderSource(const gl::GLVersionInfo& gl_version_info,
- GLenum target) {
+void InsertVersionDirective(std::string* source, unsigned glslVersion) {
+ if (glslVersion == GLSL_CORE_PROFILE) {
+ *source += "#version 150\n";
+ } else if (glslVersion == GLSL_ESSL300) {
+ *source += "#version 300 es\n";
+ }
+}
+
+unsigned ChooseGLSLVersion(const gl::GLVersionInfo& gl_version_info,
+ GLenum dest_format) {
+ bool use_essl300_features = CopyTextureCHROMIUMNeedsESSL3(dest_format);
+ if (use_essl300_features && gl_version_info.is_es) {
+ return GLSL_ESSL300;
+ } else if (gl_version_info.IsAtLeastGL(3, 2)) {
+ return GLSL_CORE_PROFILE;
+ } else {
+ return GLSL_ESSL100_OR_COMPATIBILITY_PROFILE;
+ }
+}
+
+std::string GetVertexShaderSource(unsigned glslVersion) {
std::string source;
+ InsertVersionDirective(&source, glslVersion);
- if (gl_version_info.is_es || gl_version_info.IsLowerThanGL(3, 2)) {
- if (gl_version_info.is_es3 && target != GL_TEXTURE_EXTERNAL_OES) {
- source += "#version 300 es\n";
- source +=
- "#define ATTRIBUTE in\n"
- "#define VARYING out\n";
- } else {
- source +=
- "#define ATTRIBUTE attribute\n"
- "#define VARYING varying\n";
- }
+ if (glslVersion == GLSL_ESSL100_OR_COMPATIBILITY_PROFILE) {
+ source +=
+ "#define ATTRIBUTE attribute\n"
+ "#define VARYING varying\n";
} else {
- source += "#version 150\n";
source +=
"#define ATTRIBUTE in\n"
"#define VARYING out\n";
@@ -357,7 +376,7 @@ std::string GetVertexShaderSource(const gl::GLVersionInfo& gl_version_info,
return source;
}
-std::string GetFragmentShaderSource(const gl::GLVersionInfo& gl_version_info,
+std::string GetFragmentShaderSource(unsigned glslVersion,
bool premultiply_alpha,
bool unpremultiply_alpha,
bool dither,
@@ -366,21 +385,20 @@ std::string GetFragmentShaderSource(const gl::GLVersionInfo& gl_version_info,
GLenum source_format,
GLenum dest_format) {
std::string source;
-
- // Preamble for core and compatibility mode.
- if (gl_version_info.is_es || gl_version_info.IsLowerThanGL(3, 2)) {
- if (gl_version_info.is_es3 && target != GL_TEXTURE_EXTERNAL_OES) {
- source += "#version 300 es\n";
- }
- if (target == GL_TEXTURE_EXTERNAL_OES) {
+ InsertVersionDirective(&source, glslVersion);
+
+ // #extension directives
+ if (target == GL_TEXTURE_EXTERNAL_OES) {
+ // If target is TEXTURE_EXTERNAL_OES, API must be ES.
+ if (glslVersion == GLSL_ESSL300) {
+ source += "#extension GL_OES_EGL_image_external_essl3 : enable\n";
+ } else { // ESSL100
source += "#extension GL_OES_EGL_image_external : enable\n";
+ }
- if (nv_egl_stream_consumer_external) {
- source += "#extension GL_NV_EGL_stream_consumer_external : enable\n";
- }
+ if (nv_egl_stream_consumer_external) {
+ source += "#extension GL_NV_EGL_stream_consumer_external : enable\n";
}
- } else {
- source += "#version 150\n";
}
// Preamble for texture precision.
@@ -390,6 +408,7 @@ std::string GetFragmentShaderSource(const gl::GLVersionInfo& gl_version_info,
// format or unsigned normalized fixed-point format. |source_format| can only
// be unsigned normalized fixed-point format.
if (gpu::gles2::GLES2Util::IsUnsignedIntegerFormat(dest_format)) {
+ DCHECK(glslVersion == GLSL_ESSL300 || glslVersion == GLSL_CORE_PROFILE);
source += "#define TextureType uvec4\n";
source += "#define ZERO 0u\n";
source += "#define MAX_COLOR 255u\n";
@@ -401,8 +420,11 @@ std::string GetFragmentShaderSource(const gl::GLVersionInfo& gl_version_info,
source += "#define MAX_COLOR 1.0\n";
source += "#define ScaleValue 1.0\n";
}
- if (gl_version_info.is_es2 || gl_version_info.IsLowerThanGL(3, 2) ||
- target == GL_TEXTURE_EXTERNAL_OES) {
+
+ if (glslVersion == GLSL_ESSL100_OR_COMPATIBILITY_PROFILE) {
+ source +=
+ "#define VARYING varying\n"
+ "#define FRAGCOLOR gl_FragColor\n";
switch (target) {
case GL_TEXTURE_2D:
case GL_TEXTURE_EXTERNAL_OES:
@@ -415,10 +437,6 @@ std::string GetFragmentShaderSource(const gl::GLVersionInfo& gl_version_info,
NOTREACHED();
break;
}
-
- source +=
- "#define VARYING varying\n"
- "#define FRAGCOLOR gl_FragColor\n";
} else {
source +=
"#define VARYING in\n"
@@ -1395,11 +1413,13 @@ void CopyTextureResourceManagerImpl::DoCopyTextureInternal(
glVertexAttribPointer(kVertexPositionAttrib, 2, GL_FLOAT, GL_FALSE, 0, 0);
}
- ShaderId vertex_shader_id = GetVertexShaderId(source_target);
+ unsigned glslVersion = ChooseGLSLVersion(gl_version_info, dest_format);
+
+ ShaderId vertex_shader_id = GetVertexShaderId(glslVersion);
DCHECK_LT(static_cast<size_t>(vertex_shader_id), vertex_shaders_.size());
ShaderId fragment_shader_id =
- GetFragmentShaderId(premultiply_alpha, unpremultiply_alpha, dither,
- source_target, source_format, dest_format);
+ GetFragmentShaderId(glslVersion, premultiply_alpha, unpremultiply_alpha,
+ dither, source_target, source_format, dest_format);
DCHECK_LT(static_cast<size_t>(fragment_shader_id), fragment_shaders_.size());
ProgramMapKey key(fragment_shader_id);
@@ -1410,8 +1430,7 @@ void CopyTextureResourceManagerImpl::DoCopyTextureInternal(
GLuint* vertex_shader = &vertex_shaders_[vertex_shader_id];
if (!*vertex_shader) {
*vertex_shader = glCreateShader(GL_VERTEX_SHADER);
- std::string source =
- GetVertexShaderSource(gl_version_info, source_target);
+ std::string source = GetVertexShaderSource(glslVersion);
CompileShaderWithLog(*vertex_shader, source.c_str());
}
glAttachShader(info->program, *vertex_shader);
@@ -1419,7 +1438,7 @@ void CopyTextureResourceManagerImpl::DoCopyTextureInternal(
if (!*fragment_shader) {
*fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
std::string source = GetFragmentShaderSource(
- gl_version_info, premultiply_alpha, unpremultiply_alpha, dither,
+ glslVersion, premultiply_alpha, unpremultiply_alpha, dither,
nv_egl_stream_consumer_external_, source_target, source_format,
dest_format);
CompileShaderWithLog(*fragment_shader, source.c_str());
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
index c0952dbc2e4..351e181a635 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
@@ -8,6 +8,7 @@
#include <vector>
#include "base/macros.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/gpu_gles2_export.h"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 8de442a6b01..d729023a0b9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -28,6 +28,7 @@
#include "base/containers/span.h"
#include "base/debug/alias.h"
#include "base/debug/dump_without_crashing.h"
+#include "base/hash/legacy_hash.h"
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/ranges.h"
@@ -39,6 +40,7 @@
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "gpu/command_buffer/common/gles2_cmd_copy_texture_chromium_utils.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
@@ -70,7 +72,6 @@
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/multi_draw_manager.h"
-#include "gpu/command_buffer/service/path_manager.h"
#include "gpu/command_buffer/service/program_manager.h"
#include "gpu/command_buffer/service/renderbuffer_manager.h"
#include "gpu/command_buffer/service/sampler_manager.h"
@@ -85,7 +86,6 @@
#include "gpu/command_buffer/service/vertex_array_manager.h"
#include "gpu/command_buffer/service/vertex_attrib_manager.h"
#include "gpu/config/gpu_preferences.h"
-#include "third_party/smhasher/src/City.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/point.h"
@@ -94,7 +94,6 @@
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_fence.h"
#include "ui/gfx/gpu_memory_buffer.h"
-#include "ui/gfx/ipc/color/gfx_param_traits.h"
#include "ui/gfx/transform.h"
#include "ui/gl/ca_renderer_layer_params.h"
#include "ui/gl/dc_renderer_layer_params.h"
@@ -120,10 +119,6 @@
#include <OpenGL/CGLIOSurface.h>
#endif // OS_MACOSX
-#if defined(OS_WIN)
-#include "gpu/command_buffer/service/shared_image_backing_factory_d3d.h"
-#endif // OS_WIN
-
// Note: this undefs far and near so include this after other Windows headers.
#include "third_party/angle/src/image_util/loadimage.h"
@@ -802,8 +797,6 @@ class GLES2DecoderImpl : public GLES2Decoder,
ForcedMultisampleMode mode);
bool RegenerateRenderbufferIfNeeded(Renderbuffer* renderbuffer);
- PathManager* path_manager() { return group_->path_manager(); }
-
void SetCopyTextureResourceManagerForTest(
CopyTextureCHROMIUMResourceManager* copy_texture_resource_manager)
override {
@@ -837,6 +830,25 @@ class GLES2DecoderImpl : public GLES2Decoder,
kBindBufferRange
};
+ // Helper class to ensure that GLES2DecoderImpl::Destroy() is always called
+ // unless we specifically call OnSuccess().
+ class DestroyOnFailure {
+ public:
+ DestroyOnFailure(GLES2DecoderImpl* decoder) : decoder_(decoder) {}
+ ~DestroyOnFailure() {
+ if (!success_)
+ decoder_->Destroy(has_context_);
+ }
+
+ void OnSuccess() { success_ = true; }
+ void LoseContext() { has_context_ = false; }
+
+ private:
+ GLES2DecoderImpl* decoder_ = nullptr;
+ bool success_ = false;
+ bool has_context_ = true;
+ };
+
const char* GetCommandName(unsigned int command_id) const;
// Initialize or re-initialize the shader translator.
@@ -860,8 +872,6 @@ class GLES2DecoderImpl : public GLES2Decoder,
bool GenVertexArraysOESHelper(GLsizei n, const GLuint* client_ids);
void DeleteVertexArraysOESHelper(GLsizei n,
const volatile GLuint* client_ids);
- bool GenPathsCHROMIUMHelper(GLuint first_client_id, GLsizei range);
- bool DeletePathsCHROMIUMHelper(GLuint first_client_id, GLsizei range);
bool GenSamplersHelper(GLsizei n, const GLuint* client_ids);
void DeleteSamplersHelper(GLsizei n, const volatile GLuint* client_ids);
bool GenTransformFeedbacksHelper(GLsizei n, const GLuint* client_ids);
@@ -1187,6 +1197,9 @@ class GLES2DecoderImpl : public GLES2Decoder,
void DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id, GLenum mode);
void DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id);
+ void DoBeginBatchReadAccessSharedImageCHROMIUM();
+ void DoEndBatchReadAccessSharedImageCHROMIUM();
+
void BindImage(uint32_t client_texture_id,
uint32_t texture_target,
gl::GLImage* image,
@@ -1214,9 +1227,6 @@ class GLES2DecoderImpl : public GLES2Decoder,
void DoFlushDriverCachesCHROMIUM(void);
- void DoMatrixLoadfCHROMIUM(GLenum matrix_mode,
- const volatile GLfloat* matrix);
- void DoMatrixLoadIdentityCHROMIUM(GLenum matrix_mode);
void DoScheduleCALayerInUseQueryCHROMIUM(GLsizei count,
const volatile GLuint* textures);
@@ -1482,6 +1492,13 @@ class GLES2DecoderImpl : public GLES2Decoder,
unsigned format,
int width,
int height) override;
+ bool ClearCompressedTextureLevel3D(Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height,
+ int depth) override;
bool IsCompressedTextureFormat(unsigned format) override;
// overridden from GLES2Decoder
@@ -1906,7 +1923,6 @@ class GLES2DecoderImpl : public GLES2Decoder,
bool DoIsSampler(GLuint client_id);
bool DoIsTransformFeedback(GLuint client_id);
bool DoIsVertexArrayOES(GLuint client_id);
- bool DoIsPathCHROMIUM(GLuint client_id);
bool DoIsSync(GLuint client_id);
void DoLineWidth(GLfloat width);
@@ -2462,10 +2478,6 @@ class GLES2DecoderImpl : public GLES2Decoder,
// Checks to see if the inserted fence has completed.
void ProcessDescheduleUntilFinished();
- void DoBindFragmentInputLocationCHROMIUM(GLuint program_id,
- GLint location,
- const std::string& name);
-
// If |texture_manager_version_| doesn't match the current version, then this
// will rebind all external textures to match their current service_id.
void RestoreAllExternalTextureBindingsIfNeeded() override;
@@ -3180,6 +3192,8 @@ void BackTexture::Invalidate() {
texture_ref_->ForceContextLost();
texture_ref_ = nullptr;
}
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = 0;
}
GLenum BackTexture::Target() {
@@ -3214,7 +3228,8 @@ bool BackTexture::AllocateNativeGpuMemoryBuffer(const gfx::Size& size,
}
scoped_refptr<gl::GLImage> image =
decoder_->GetContextGroup()->image_factory()->CreateAnonymousImage(
- size, buffer_format, gfx::BufferUsage::SCANOUT, &is_cleared);
+ size, buffer_format, gfx::BufferUsage::SCANOUT,
+ gpu::kNullSurfaceHandle, &is_cleared);
if (!image)
return false;
DCHECK_EQ(format, image->GetDataFormat());
@@ -3360,6 +3375,8 @@ void BackRenderbuffer::Destroy() {
void BackRenderbuffer::Invalidate() {
id_ = 0;
+ memory_tracker_.TrackMemFree(bytes_allocated_);
+ bytes_allocated_ = 0;
}
BackFramebuffer::BackFramebuffer(GLES2DecoderImpl* decoder)
@@ -3535,6 +3552,10 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
surfaceless_ = surface->IsSurfaceless() && !offscreen;
set_initialized();
+ // At this point we are partially initialized and must Destroy() in any
+ // failure case.
+ DestroyOnFailure destroy_on_failure(this);
+
gpu_state_tracer_ = GPUStateTracer::Create(&state_);
if (group_->gpu_preferences().enable_gpu_debugging)
@@ -3577,7 +3598,6 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
feature_info_->feature_flags().is_swiftshader_for_webgl) {
// Must not destroy ContextGroup if it is not initialized.
group_ = nullptr;
- Destroy(true);
LOG(ERROR) << "ContextResult::kFatalFailure: "
"fail_if_major_perf_caveat + swiftshader";
return gpu::ContextResult::kFatalFailure;
@@ -3587,7 +3607,6 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
if (attrib_helper.context_type == CONTEXT_TYPE_WEBGL2_COMPUTE) {
// Must not destroy ContextGroup if it is not initialized.
group_ = nullptr;
- Destroy(true);
LOG(ERROR)
<< "ContextResult::kFatalFailure: "
"webgl2-compute is not supported on validating command decoder.";
@@ -3599,7 +3618,6 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
if (result != gpu::ContextResult::kSuccess) {
// Must not destroy ContextGroup if it is not initialized.
group_ = nullptr;
- Destroy(true);
return result;
}
CHECK_GL_ERROR();
@@ -3626,7 +3644,6 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
}
if (!supported) {
- Destroy(true);
LOG(ERROR) << "ContextResult::kFatalFailure: "
"native gmb format not supported";
return gpu::ContextResult::kFatalFailure;
@@ -4003,7 +4020,6 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
// of the frame buffers is okay.
if (!ResizeOffscreenFramebuffer(
gfx::Size(state_.viewport_width, state_.viewport_height))) {
- Destroy(true);
LOG(ERROR) << "ContextResult::kFatalFailure: "
"Could not allocate offscreen buffer storage.";
return gpu::ContextResult::kFatalFailure;
@@ -4011,15 +4027,16 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
if (!offscreen_single_buffer_) {
// Allocate the offscreen saved color texture.
DCHECK(offscreen_saved_color_format_);
+ // Use 64x64 instead of 1x1 to handle minimum framebuffer size
+ // requirement on some platforms: b/151774454
offscreen_saved_color_texture_->AllocateStorage(
- gfx::Size(1, 1), offscreen_saved_color_format_, true);
+ gfx::Size(64, 64), offscreen_saved_color_format_, true);
offscreen_saved_frame_buffer_->AttachRenderTexture(
offscreen_saved_color_texture_.get());
if (offscreen_saved_frame_buffer_->CheckStatus() !=
GL_FRAMEBUFFER_COMPLETE) {
bool was_lost = CheckResetStatus();
- Destroy(true);
LOG(ERROR) << (was_lost ? "ContextResult::kTransientFailure: "
: "ContextResult::kFatalFailure: ")
<< "Offscreen saved FBO was incomplete.";
@@ -4113,9 +4130,11 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
LOG(ERROR)
<< " GLES2DecoderImpl: Context reset detected after initialization.";
group_->LoseContexts(error::kUnknown);
+ destroy_on_failure.LoseContext();
return gpu::ContextResult::kTransientFailure;
}
+ destroy_on_failure.OnSuccess();
return gpu::ContextResult::kSuccess;
}
@@ -4230,6 +4249,8 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
caps.egl_image_external =
feature_info_->feature_flags().oes_egl_image_external;
+ caps.egl_image_external_essl3 =
+ feature_info_->feature_flags().oes_egl_image_external_essl3;
caps.texture_format_astc =
feature_info_->feature_flags().ext_texture_format_astc;
caps.texture_format_atc =
@@ -4268,19 +4289,15 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
caps.commit_overlay_planes = supports_commit_overlay_planes_;
caps.surfaceless = surfaceless_;
bool is_offscreen = !!offscreen_target_frame_buffer_.get();
- caps.flips_vertically = !is_offscreen && surface_->FlipsVertically();
+ caps.surface_origin =
+ !is_offscreen ? surface_->GetOrigin() : gfx::SurfaceOrigin::kBottomLeft;
caps.msaa_is_slow = workarounds().msaa_is_slow;
caps.avoid_stencil_buffers = workarounds().avoid_stencil_buffers;
caps.multisample_compatibility =
feature_info_->feature_flags().ext_multisample_compatibility;
caps.dc_layers = supports_dc_layers_;
- caps.use_dc_overlays_for_video = surface_->UseOverlaysForVideo();
caps.protected_video_swap_chain = surface_->SupportsProtectedVideo();
caps.gpu_vsync = surface_->SupportsGpuVSync();
-#if defined(OS_WIN)
- caps.shared_image_swap_chain =
- SharedImageBackingFactoryD3D::IsSwapChainSupported();
-#endif // OS_WIN
caps.blend_equation_advanced =
feature_info_->feature_flags().blend_equation_advanced;
caps.blend_equation_advanced_coherent =
@@ -4299,8 +4316,8 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
caps.image_ycbcr_420v_disabled_for_video_frames =
group_->gpu_preferences()
.disable_biplanar_gpu_memory_buffers_for_video_frames;
- caps.image_xr30 = feature_info_->feature_flags().chromium_image_xr30;
- caps.image_xb30 = feature_info_->feature_flags().chromium_image_xb30;
+ caps.image_ar30 = feature_info_->feature_flags().chromium_image_ar30;
+ caps.image_ab30 = feature_info_->feature_flags().chromium_image_ab30;
caps.image_ycbcr_p010 =
feature_info_->feature_flags().chromium_image_ycbcr_p010;
caps.max_copy_texture_chromium_size =
@@ -4315,10 +4332,6 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
group_->gpu_feature_info()
.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] ==
kGpuFeatureStatusEnabled;
- if (workarounds().disable_non_empty_post_sub_buffers_for_onscreen_surfaces &&
- !surface_->IsOffscreen()) {
- caps.disable_non_empty_post_sub_buffers = true;
- }
if (workarounds().broken_egl_image_ref_counting &&
group_->gpu_preferences().enable_threaded_texture_mailboxes) {
caps.disable_2d_canvas_copy_on_write = true;
@@ -4493,10 +4506,17 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() {
if (((shader_spec == SH_WEBGL_SPEC || shader_spec == SH_WEBGL2_SPEC) &&
features().enable_shader_name_hashing) ||
- force_shader_name_hashing_for_test)
- resources.HashFunction = &CityHash64;
- else
+ force_shader_name_hashing_for_test) {
+ // TODO(https://crbug.com/902789): In theory, it should be OK to change this
+ // hash. However, in practice, this seems to cause some tests to fail. See
+ // https://crbug.com/963889.
+ resources.HashFunction = +[](const char* data, size_t size) {
+ return base::legacy::CityHash64(
+ base::as_bytes(base::make_span(data, size)));
+ };
+ } else {
resources.HashFunction = nullptr;
+ }
ShCompileOptions driver_bug_workarounds = 0;
if (workarounds().init_gl_position_in_vertex_shader)
@@ -4654,44 +4674,6 @@ bool GLES2DecoderImpl::GenTransformFeedbacksHelper(
return true;
}
-bool GLES2DecoderImpl::GenPathsCHROMIUMHelper(GLuint first_client_id,
- GLsizei range) {
- GLuint last_client_id;
- if (range < 1 || !base::CheckAdd(first_client_id, range - 1)
- .AssignIfValid(&last_client_id))
- return false;
-
- if (path_manager()->HasPathsInRange(first_client_id, last_client_id))
- return false;
-
- GLuint first_service_id = api()->glGenPathsNVFn(range);
- if (first_service_id == 0) {
- // We have to fail the connection here, because client has already
- // succeeded in allocating the ids. This happens if we allocate
- // the whole path id space (two allocations of 0x7FFFFFFF paths, for
- // example).
- return false;
- }
- // GenPathsNV does not wrap.
- DCHECK(first_service_id + range - 1 >= first_service_id);
-
- path_manager()->CreatePathRange(first_client_id, last_client_id,
- first_service_id);
-
- return true;
-}
-
-bool GLES2DecoderImpl::DeletePathsCHROMIUMHelper(GLuint first_client_id,
- GLsizei range) {
- GLuint last_client_id;
- if (range < 1 || !base::CheckAdd(first_client_id, range - 1)
- .AssignIfValid(&last_client_id))
- return false;
-
- path_manager()->RemovePaths(first_client_id, last_client_id);
- return true;
-}
-
void GLES2DecoderImpl::DeleteBuffersHelper(GLsizei n,
const volatile GLuint* client_ids) {
for (GLsizei ii = 0; ii < n; ++ii) {
@@ -5856,8 +5838,12 @@ error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(
GLuint width = static_cast<GLuint>(c.width);
GLuint height = static_cast<GLuint>(c.height);
GLfloat scale_factor = c.scale_factor;
- GLenum color_space = c.color_space;
GLboolean has_alpha = c.alpha;
+ gfx::ColorSpace color_space;
+ if (!ReadColorSpace(c.shm_id, c.shm_offset, c.color_space_size,
+ &color_space)) {
+ return error::kOutOfBounds;
+ }
TRACE_EVENT2("gpu", "glResizeChromium", "width", width, "height", height);
// gfx::Size uses integers, make sure width and height do not overflow
@@ -5867,29 +5853,6 @@ error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(
width = base::ClampToRange(width, 1U, kMaxDimension);
height = base::ClampToRange(height, 1U, kMaxDimension);
- gl::GLSurface::ColorSpace surface_color_space =
- gl::GLSurface::ColorSpace::UNSPECIFIED;
- switch (color_space) {
- case GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::UNSPECIFIED;
- break;
- case GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::SCRGB_LINEAR;
- break;
- case GL_COLOR_SPACE_HDR10_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::HDR10;
- break;
- case GL_COLOR_SPACE_SRGB_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::SRGB;
- break;
- case GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::DISPLAY_P3;
- break;
- default:
- LOG(ERROR) << "GLES2DecoderImpl: Context lost because specified color"
- << "space was invalid.";
- return error::kLostContext;
- }
bool is_offscreen = !!offscreen_target_frame_buffer_.get();
if (is_offscreen) {
if (!ResizeOffscreenFramebuffer(gfx::Size(width, height))) {
@@ -5898,8 +5861,8 @@ error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(
return error::kLostContext;
}
} else {
- if (!surface_->Resize(gfx::Size(width, height), scale_factor,
- surface_color_space, !!has_alpha)) {
+ if (!surface_->Resize(gfx::Size(width, height), scale_factor, color_space,
+ !!has_alpha)) {
LOG(ERROR) << "GLES2DecoderImpl: Context lost because resize failed.";
return error::kLostContext;
}
@@ -9274,7 +9237,7 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
!feature_info_->feature_flags().desktop_srgb_support ||
gl_version_info().IsAtLeastGL(4, 4) ||
(gl_version_info().IsAtLeastGL(4, 2) && encode_srgb_only)) {
- if (enable_srgb && gl_version_info().IsAtLeastGL(4, 2)) {
+ if (enable_srgb && feature_info_->feature_flags().ext_srgb_write_control) {
state_.EnableDisableFramebufferSRGB(enable_srgb);
}
@@ -13328,6 +13291,12 @@ error::Error GLES2DecoderImpl::HandleReadPixels(uint32_t immediate_data_size,
accepted_formats.push_back(GL_RGBA);
accepted_types.push_back(GL_UNSIGNED_INT_2_10_10_10_REV);
break;
+ case GL_R16_EXT:
+ case GL_RG16_EXT:
+ case GL_RGBA16_EXT:
+ accepted_formats.push_back(GL_RGBA);
+ accepted_types.push_back(GL_UNSIGNED_SHORT);
+ break;
default:
accepted_formats.push_back(GL_RGBA);
{
@@ -13846,20 +13815,11 @@ error::Error GLES2DecoderImpl::HandleSetColorSpaceMetadataCHROMIUM(
cmd_data);
GLuint texture_id = c.texture_id;
- GLsizei color_space_size = c.color_space_size;
- const char* data = static_cast<const char*>(
- GetAddressAndCheckSize(c.shm_id, c.shm_offset, color_space_size));
- if (!data)
- return error::kOutOfBounds;
-
- // Make a copy to reduce the risk of a time of check to time of use attack.
- std::vector<char> color_space_data(data, data + color_space_size);
- base::Pickle color_space_pickle(color_space_data.data(), color_space_size);
- base::PickleIterator iterator(color_space_pickle);
gfx::ColorSpace color_space;
- if (!IPC::ParamTraits<gfx::ColorSpace>::Read(&color_space_pickle, &iterator,
- &color_space))
+ if (!ReadColorSpace(c.shm_id, c.shm_offset, c.color_space_size,
+ &color_space)) {
return error::kOutOfBounds;
+ }
TextureRef* ref = texture_manager()->GetTexture(texture_id);
if (!ref) {
@@ -14488,6 +14448,53 @@ bool GLES2DecoderImpl::ClearCompressedTextureLevel(Texture* texture,
return true;
}
+bool GLES2DecoderImpl::ClearCompressedTextureLevel3D(Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height,
+ int depth) {
+ DCHECK(target == GL_TEXTURE_3D || target == GL_TEXTURE_2D_ARRAY);
+ // This code path can only be called if the texture was originally
+ // allocated via TexStorage3D. Note that TexStorage3D is exposed
+ // internally for ES 2.0 contexts, but compressed texture support is
+ // not part of that exposure.
+ DCHECK(feature_info_->IsWebGL2OrES3Context());
+
+ GLsizei bytes_required = 0;
+ if (!GetCompressedTexSizeInBytes("ClearCompressedTextureLevel3D", width,
+ height, 1, format, &bytes_required,
+ error_state_.get())) {
+ return false;
+ }
+
+ TRACE_EVENT1("gpu", "GLES2DecoderImpl::ClearCompressedTextureLevel3D",
+ "bytes_required", bytes_required);
+
+ api()->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, 0);
+ {
+ // Add extra scope to destroy zero and the object it owns right
+ // after its usage.
+ std::unique_ptr<char[]> zero(new char[bytes_required]);
+ memset(zero.get(), 0, bytes_required);
+ api()->glBindTextureFn(texture->target(), texture->service_id());
+ api()->glCompressedTexSubImage3DFn(target, level, 0, 0, 0, width, height,
+ depth, format, bytes_required,
+ zero.get());
+ }
+ TextureRef* bound_texture =
+ texture_manager()->GetTextureInfoForTarget(&state_, texture->target());
+ api()->glBindTextureFn(texture->target(),
+ bound_texture ? bound_texture->service_id() : 0);
+ Buffer* bound_buffer =
+ buffer_manager()->GetBufferInfoForTarget(&state_, GL_PIXEL_UNPACK_BUFFER);
+ if (bound_buffer) {
+ api()->glBindBufferFn(GL_PIXEL_UNPACK_BUFFER, bound_buffer->service_id());
+ }
+ return true;
+}
+
bool GLES2DecoderImpl::IsCompressedTextureFormat(unsigned format) {
return feature_info_->validators()->compressed_texture_format.IsValid(
format);
@@ -17929,12 +17936,6 @@ bool GLES2DecoderImpl::DoIsVertexArrayOES(GLuint client_id) {
return vao && vao->IsValid() && !vao->IsDeleted();
}
-bool GLES2DecoderImpl::DoIsPathCHROMIUM(GLuint client_id) {
- GLuint service_id = 0;
- return path_manager()->GetPath(client_id, &service_id) &&
- api()->glIsPathNVFn(service_id) == GL_TRUE;
-}
-
bool GLES2DecoderImpl::DoIsSync(GLuint client_id) {
GLsync service_sync = 0;
return group_->GetSyncServiceId(client_id, &service_sync);
@@ -18081,6 +18082,15 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
return;
}
+ if (source_target == GL_TEXTURE_EXTERNAL_OES &&
+ CopyTextureCHROMIUMNeedsESSL3(internal_format) &&
+ !feature_info_->feature_flags().oes_egl_image_external_essl3) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
+ "Copy*TextureCHROMIUM from EXTERNAL_OES to integer "
+ "format requires OES_EGL_image_external_essl3");
+ return;
+ }
+
if (feature_info_->feature_flags().desktop_srgb_support) {
bool enable_framebuffer_srgb =
GLES2Util::GetColorEncodingFromInternalFormat(source_internal_format) ==
@@ -18344,6 +18354,15 @@ void GLES2DecoderImpl::CopySubTextureHelper(const char* function_name,
return;
}
+ if (source_target == GL_TEXTURE_EXTERNAL_OES &&
+ CopyTextureCHROMIUMNeedsESSL3(dest_internal_format) &&
+ !feature_info_->feature_flags().oes_egl_image_external_essl3) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
+ "Copy*TextureCHROMIUM from EXTERNAL_OES to integer "
+ "format requires OES_EGL_image_external_essl3");
+ return;
+ }
+
if (feature_info_->feature_flags().desktop_srgb_support) {
bool enable_framebuffer_srgb =
GLES2Util::GetColorEncodingFromInternalFormat(source_internal_format) ==
@@ -18538,23 +18557,38 @@ void GLES2DecoderImpl::TexStorageImpl(GLenum target,
return;
}
bool is_compressed_format = IsCompressedTextureFormat(internal_format);
- if (is_compressed_format && target == GL_TEXTURE_3D) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION, function_name, "target invalid for format");
- return;
+ if (is_compressed_format) {
+ if (target == GL_TEXTURE_3D &&
+ !feature_info_->feature_flags().ext_texture_format_astc_hdr &&
+ ::gpu::gles2::IsASTCFormat(internal_format)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
+ "target invalid for format");
+ return;
+ }
+ if (!::gpu::gles2::ValidateCompressedFormatTarget(target,
+ internal_format)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
+ "target invalid for format");
+ return;
+ }
}
// The glTexStorage entry points require width, height, and depth to be
// at least 1, but the other texture entry points (those which use
// ValidForTarget) do not. So we have to add an extra check here.
bool is_invalid_texstorage_size = width < 1 || height < 1 || depth < 1;
if (!texture_manager()->ValidForTarget(target, 0, width, height, depth) ||
- is_invalid_texstorage_size ||
- TextureManager::ComputeMipMapCount(target, width, height, depth) <
- levels) {
+ is_invalid_texstorage_size) {
LOCAL_SET_GL_ERROR(
GL_INVALID_VALUE, function_name, "dimensions out of range");
return;
}
+ // glTexStorage generates GL_INVALID_OPERATION for out of bounds level
+ // which is a bit different from other GL calls generating GL_INVALID_VALUE
+ if (TextureManager::ComputeMipMapCount(target, width, height, depth) <
+ levels) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name, "too many levels");
+ return;
+ }
TextureRef* texture_ref = texture_manager()->GetTextureInfoForTarget(
&state_, target);
if (!texture_ref) {
@@ -18765,7 +18799,7 @@ void GLES2DecoderImpl::DoTexStorage2DImageCHROMIUM(GLenum target,
scoped_refptr<gl::GLImage> image =
GetContextGroup()->image_factory()->CreateAnonymousImage(
gfx::Size(width, height), buffer_format, gfx_buffer_usage,
- &is_cleared);
+ gpu::kNullSurfaceHandle, &is_cleared);
if (!image || !image->BindTexImage(target)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glTexStorage2DImageCHROMIUM",
"Failed to create or bind GL Image");
@@ -18963,6 +18997,28 @@ void GLES2DecoderImpl::DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id) {
texture_ref->EndAccessSharedImage();
}
+void GLES2DecoderImpl::DoBeginBatchReadAccessSharedImageCHROMIUM() {
+ DCHECK(group_->shared_image_manager());
+
+ if (!group_->shared_image_manager()->BeginBatchReadAccess()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "DoBeginBatchReadAccessSharedImageCHROMIUM",
+ "shared image begin batch read access failed ");
+ return;
+ }
+}
+
+void GLES2DecoderImpl::DoEndBatchReadAccessSharedImageCHROMIUM() {
+ DCHECK(group_->shared_image_manager());
+
+ if (!group_->shared_image_manager()->EndBatchReadAccess()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
+ "DoEndBatchReadAccessSharedImageCHROMIUM",
+ "shared image end batch read access failed ");
+ return;
+ }
+}
+
void GLES2DecoderImpl::DoInsertEventMarkerEXT(
GLsizei length, const GLchar* marker) {
if (!marker) {
@@ -19212,34 +19268,6 @@ void GLES2DecoderImpl::DoFlushDriverCachesCHROMIUM(void) {
}
}
-void GLES2DecoderImpl::DoMatrixLoadfCHROMIUM(GLenum matrix_mode,
- const volatile GLfloat* matrix) {
- DCHECK(matrix_mode == GL_PATH_PROJECTION_CHROMIUM ||
- matrix_mode == GL_PATH_MODELVIEW_CHROMIUM);
-
- GLfloat* target_matrix = matrix_mode == GL_PATH_PROJECTION_CHROMIUM
- ? state_.projection_matrix
- : state_.modelview_matrix;
- memcpy(target_matrix, const_cast<const GLfloat*>(matrix),
- sizeof(GLfloat) * 16);
- // The matrix_mode is either GL_PATH_MODELVIEW_NV or GL_PATH_PROJECTION_NV
- // since the values of the _NV and _CHROMIUM tokens match.
- api()->glMatrixLoadfEXTFn(matrix_mode, target_matrix);
-}
-
-void GLES2DecoderImpl::DoMatrixLoadIdentityCHROMIUM(GLenum matrix_mode) {
- DCHECK(matrix_mode == GL_PATH_PROJECTION_CHROMIUM ||
- matrix_mode == GL_PATH_MODELVIEW_CHROMIUM);
-
- GLfloat* target_matrix = matrix_mode == GL_PATH_PROJECTION_CHROMIUM
- ? state_.projection_matrix
- : state_.modelview_matrix;
- memcpy(target_matrix, kIdentityMatrix, sizeof(kIdentityMatrix));
- // The matrix_mode is either GL_PATH_MODELVIEW_NV or GL_PATH_PROJECTION_NV
- // since the values of the _NV and _CHROMIUM tokens match.
- api()->glMatrixLoadIdentityEXTFn(matrix_mode);
-}
-
error::Error GLES2DecoderImpl::HandleUniformBlockBinding(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -19832,945 +19860,6 @@ void GLES2DecoderImpl::OnOutOfMemoryError() {
}
}
-// Class to validate path rendering command parameters. Contains validation
-// for the common parameters that are used in multiple different commands.
-// The individual functions are needed in order to control the order of the
-// validation.
-// The Get* functions will return false if the function call should be stopped.
-// In this case, PathCommandValidatorContext::error() will return the command
-// buffer error that should be returned. The decoder error state will be set to
-// appropriate GL error if needed.
-// The Get* functions will return true if the function call should
-// continue as-is.
-class PathCommandValidatorContext {
- public:
- PathCommandValidatorContext(GLES2DecoderImpl* decoder,
- const char* function_name)
- : decoder_(decoder),
- error_state_(decoder->GetErrorState()),
- validators_(decoder->GetContextGroup()->feature_info()->validators()),
- function_name_(function_name),
- error_(error::kNoError) {}
-
- error::Error error() const { return error_; }
-
- template <typename Cmd>
- bool GetPathRange(const Cmd& cmd, GLsizei* out_range) {
- GLsizei range = static_cast<GLsizei>(cmd.range);
- if (range < 0) {
- ERRORSTATE_SET_GL_ERROR(error_state_, GL_INVALID_VALUE, function_name_,
- "range < 0");
- return false;
- }
- *out_range = range;
- return true;
- }
- template <typename Cmd>
- bool GetPathCountAndType(const Cmd& cmd,
- GLuint* out_num_paths,
- GLenum* out_path_name_type) {
- int32_t numPaths = cmd.numPaths;
- if (numPaths < 0) {
- ERRORSTATE_SET_GL_ERROR(error_state_, GL_INVALID_VALUE, function_name_,
- "numPaths < 0");
- return false;
- }
- GLenum path_name_type = static_cast<GLenum>(cmd.pathNameType);
- if (!validators_->path_name_type.IsValid(path_name_type)) {
- ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(error_state_, function_name_,
- path_name_type, "pathNameType");
- return false;
- }
- *out_num_paths = static_cast<GLsizei>(numPaths);
- *out_path_name_type = path_name_type;
- return true;
- }
- template <typename Cmd>
- bool GetFillMode(const Cmd& cmd, GLenum* out_fill_mode) {
- GLenum fill_mode = static_cast<GLenum>(cmd.fillMode);
- if (!validators_->path_fill_mode.IsValid(fill_mode)) {
- ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(error_state_, function_name_,
- fill_mode, "fillMode");
- return false;
- }
- *out_fill_mode = fill_mode;
- return true;
- }
- template <typename Cmd>
- bool GetFillModeAndMask(const Cmd& cmd,
- GLenum* out_fill_mode,
- GLuint* out_mask) {
- GLenum fill_mode;
- if (!GetFillMode(cmd, &fill_mode))
- return false;
- GLuint mask = static_cast<GLuint>(cmd.mask);
- /* The error INVALID_VALUE is generated if /fillMode/ is COUNT_UP_CHROMIUM
- or COUNT_DOWN_CHROMIUM and the effective /mask/+1 is not an integer
- power of two */
- if ((fill_mode == GL_COUNT_UP_CHROMIUM ||
- fill_mode == GL_COUNT_DOWN_CHROMIUM) &&
- GLES2Util::IsNPOT(mask + 1)) {
- ERRORSTATE_SET_GL_ERROR(error_state_, GL_INVALID_VALUE, function_name_,
- "mask+1 is not power of two");
- return false;
- }
- *out_fill_mode = fill_mode;
- *out_mask = mask;
- return true;
- }
- template <typename Cmd>
- bool GetTransformType(const Cmd& cmd, GLenum* out_transform_type) {
- GLenum transform_type = static_cast<GLenum>(cmd.transformType);
- if (!validators_->path_transform_type.IsValid(transform_type)) {
- ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(error_state_, function_name_,
- transform_type, "transformType");
- return false;
- }
- *out_transform_type = transform_type;
- return true;
- }
- template <typename Cmd>
- bool GetPathNameData(const Cmd& cmd,
- GLuint num_paths,
- GLenum path_name_type,
- std::unique_ptr<GLuint[]>* out_buffer) {
- DCHECK(validators_->path_name_type.IsValid(path_name_type));
- GLuint path_base = static_cast<GLuint>(cmd.pathBase);
- uint32_t shm_id = static_cast<uint32_t>(cmd.paths_shm_id);
- uint32_t shm_offset = static_cast<uint32_t>(cmd.paths_shm_offset);
- if (shm_id == 0 && shm_offset == 0) {
- error_ = error::kOutOfBounds;
- return false;
- }
- switch (path_name_type) {
- case GL_BYTE:
- return GetPathNameDataImpl<GLbyte>(num_paths, path_base, shm_id,
- shm_offset, out_buffer);
- case GL_UNSIGNED_BYTE:
- return GetPathNameDataImpl<GLubyte>(num_paths, path_base, shm_id,
- shm_offset, out_buffer);
- case GL_SHORT:
- return GetPathNameDataImpl<GLshort>(num_paths, path_base, shm_id,
- shm_offset, out_buffer);
- case GL_UNSIGNED_SHORT:
- return GetPathNameDataImpl<GLushort>(num_paths, path_base, shm_id,
- shm_offset, out_buffer);
- case GL_INT:
- return GetPathNameDataImpl<GLint>(num_paths, path_base, shm_id,
- shm_offset, out_buffer);
- case GL_UNSIGNED_INT:
- return GetPathNameDataImpl<GLuint>(num_paths, path_base, shm_id,
- shm_offset, out_buffer);
- default:
- break;
- }
- NOTREACHED();
- error_ = error::kOutOfBounds;
- return false;
- }
- template <typename Cmd>
- bool GetTransforms(const Cmd& cmd,
- GLuint num_paths,
- GLenum transform_type,
- const GLfloat** out_transforms) {
- if (transform_type == GL_NONE) {
- *out_transforms = nullptr;
- return true;
- }
- uint32_t transforms_shm_id =
- static_cast<uint32_t>(cmd.transformValues_shm_id);
- uint32_t transforms_shm_offset =
- static_cast<uint32_t>(cmd.transformValues_shm_offset);
- uint32_t transforms_component_count =
- GLES2Util::GetComponentCountForGLTransformType(transform_type);
- // Below multiplication will not overflow.
- DCHECK_LE(transforms_component_count, 12U);
- uint32_t one_transform_size = sizeof(GLfloat) * transforms_component_count;
- uint32_t transforms_size = 0;
- if (!base::CheckMul(one_transform_size, num_paths)
- .AssignIfValid(&transforms_size)) {
- error_ = error::kOutOfBounds;
- return false;
- }
- const GLfloat* transforms = nullptr;
- if (transforms_shm_id != 0 || transforms_shm_offset != 0)
- transforms = decoder_->GetSharedMemoryAs<const GLfloat*>(
- transforms_shm_id, transforms_shm_offset, transforms_size);
- if (!transforms) {
- error_ = error::kOutOfBounds;
- return false;
- }
- *out_transforms = transforms;
- return true;
- }
- template <typename Cmd>
- bool GetCoverMode(const Cmd& cmd, GLenum* out_cover_mode) {
- GLenum cover_mode = static_cast<GLuint>(cmd.coverMode);
- if (!validators_->path_instanced_cover_mode.IsValid(cover_mode)) {
- ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(error_state_, function_name_,
- cover_mode, "coverMode");
- return false;
- }
- *out_cover_mode = cover_mode;
- return true;
- }
-
- private:
- template <typename T>
- bool GetPathNameDataImpl(GLuint num_paths,
- GLuint path_base,
- uint32_t shm_id,
- uint32_t shm_offset,
- std::unique_ptr<GLuint[]>* out_buffer) {
- uint32_t paths_size = 0;
- if (!base::CheckMul(num_paths, sizeof(T)).AssignIfValid(&paths_size)) {
- error_ = error::kOutOfBounds;
- return false;
- }
- T* paths = decoder_->GetSharedMemoryAs<T*>(shm_id, shm_offset, paths_size);
- if (!paths) {
- error_ = error::kOutOfBounds;
- return false;
- }
- std::unique_ptr<GLuint[]> result_paths(new GLuint[num_paths]);
- bool has_paths = false;
- for (GLuint i = 0; i < num_paths; ++i) {
- GLuint service_id = 0;
- // The below addition is ok even with over- and underflows.
- // There is no difference if client passes:
- // * base==4, T=GLbyte, paths[0]==0xfa (-6)
- // * base==0xffffffff, T=GLuint, paths[0]==0xffffffff
- // * base==0, T=GLuint, paths[0]==0xfffffffe
- // For the all the cases, the interpretation is that
- // client intends to use the path 0xfffffffe.
- // The client_id verification is only done after the addition.
- uint32_t client_id = path_base + paths[i];
- if (decoder_->path_manager()->GetPath(client_id, &service_id))
- has_paths = true;
- // Will use path 0 if the path is not found. This is in line
- // of the spec: missing paths will produce nothing, let
- // the instanced draw continue.
- result_paths[i] = service_id;
- }
- out_buffer->reset(result_paths.release());
-
- return has_paths;
- }
- GLES2DecoderImpl* decoder_;
- ErrorState* error_state_;
- const Validators* validators_;
- const char* function_name_;
- error::Error error_;
-};
-
-error::Error GLES2DecoderImpl::HandleGenPathsCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::GenPathsCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::GenPathsCHROMIUM*>(cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, "glGenPathsCHROMIUM");
- GLsizei range = 0;
- if (!v.GetPathRange(c, &range))
- return v.error();
-
- GLuint first_client_id = static_cast<GLuint>(c.first_client_id);
- if (first_client_id == 0)
- return error::kInvalidArguments;
-
- if (range == 0)
- return error::kNoError;
-
- if (!GenPathsCHROMIUMHelper(first_client_id, range))
- return error::kInvalidArguments;
-
- return error::kNoError;
-}
-error::Error GLES2DecoderImpl::HandleDeletePathsCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::DeletePathsCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::DeletePathsCHROMIUM*>(cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, "glDeletePathsCHROMIUM");
- GLsizei range = 0;
- if (!v.GetPathRange(c, &range))
- return v.error();
-
- if (range == 0)
- return error::kNoError;
-
- GLuint first_client_id = c.first_client_id;
- // first_client_id can be 0, because non-existing path ids are skipped.
-
- if (!DeletePathsCHROMIUMHelper(first_client_id, range))
- return error::kInvalidArguments;
-
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandlePathCommandsCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glPathCommandsCHROMIUM";
- const volatile gles2::cmds::PathCommandsCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::PathCommandsCHROMIUM*>(cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- GLuint service_id = 0;
- if (!path_manager()->GetPath(static_cast<GLuint>(c.path), &service_id)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
- "invalid path name");
- return error::kNoError;
- }
-
- GLsizei num_commands = static_cast<GLsizei>(c.numCommands);
- if (num_commands < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName, "numCommands < 0");
- return error::kNoError;
- }
-
- GLsizei num_coords = static_cast<uint32_t>(c.numCoords);
- if (num_coords < 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName, "numCoords < 0");
- return error::kNoError;
- }
-
- GLenum coord_type = static_cast<uint32_t>(c.coordType);
- if (!validators_->path_coord_type.IsValid(static_cast<GLint>(coord_type))) {
- LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, kFunctionName, "invalid coordType");
- return error::kNoError;
- }
-
- std::unique_ptr<GLubyte[]> commands;
- base::CheckedNumeric<GLsizei> num_coords_expected = 0;
-
- if (num_commands > 0) {
- uint32_t commands_shm_id = static_cast<uint32_t>(c.commands_shm_id);
- uint32_t commands_shm_offset = static_cast<uint32_t>(c.commands_shm_offset);
- if (commands_shm_id != 0 || commands_shm_offset != 0) {
- const GLubyte* shared_commands = GetSharedMemoryAs<const GLubyte*>(
- commands_shm_id, commands_shm_offset, num_commands);
- if (shared_commands) {
- commands.reset(new GLubyte[num_commands]);
- memcpy(commands.get(), shared_commands, num_commands);
- }
- }
- if (!commands)
- return error::kOutOfBounds;
-
- for (GLsizei i = 0; i < num_commands; ++i) {
- switch (commands[i]) {
- case GL_CLOSE_PATH_CHROMIUM:
- // Close has no coords.
- break;
- case GL_MOVE_TO_CHROMIUM:
- // Fallthrough.
- case GL_LINE_TO_CHROMIUM:
- num_coords_expected += 2;
- break;
- case GL_QUADRATIC_CURVE_TO_CHROMIUM:
- num_coords_expected += 4;
- break;
- case GL_CUBIC_CURVE_TO_CHROMIUM:
- num_coords_expected += 6;
- break;
- case GL_CONIC_CURVE_TO_CHROMIUM:
- num_coords_expected += 5;
- break;
- default:
- LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, kFunctionName, "invalid command");
- return error::kNoError;
- }
- }
- }
-
- if (!num_coords_expected.IsValid() ||
- num_coords != num_coords_expected.ValueOrDefault(0)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
- "numCoords does not match commands");
- return error::kNoError;
- }
-
- const void* coords = nullptr;
-
- if (num_coords > 0) {
- uint32_t coords_size = 0;
- uint32_t coord_type_size =
- GLES2Util::GetGLTypeSizeForPathCoordType(coord_type);
- if (!base::CheckMul(num_coords, coord_type_size)
- .AssignIfValid(&coords_size))
- return error::kOutOfBounds;
-
- uint32_t coords_shm_id = static_cast<uint32_t>(c.coords_shm_id);
- uint32_t coords_shm_offset = static_cast<uint32_t>(c.coords_shm_offset);
- if (coords_shm_id != 0 || coords_shm_offset != 0)
- coords = GetSharedMemoryAs<const void*>(coords_shm_id, coords_shm_offset,
- coords_size);
-
- if (!coords)
- return error::kOutOfBounds;
- }
-
- api()->glPathCommandsNVFn(service_id, num_commands, commands.get(),
- num_coords, coord_type, coords);
-
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandlePathParameterfCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glPathParameterfCHROMIUM";
- const volatile gles2::cmds::PathParameterfCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::PathParameterfCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- GLuint service_id = 0;
- if (!path_manager()->GetPath(static_cast<GLuint>(c.path), &service_id)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
- "invalid path name");
- return error::kNoError;
- }
-
- GLenum pname = static_cast<GLenum>(c.pname);
- GLfloat value = static_cast<GLfloat>(c.value);
- bool hasValueError = false;
-
- switch (pname) {
- case GL_PATH_STROKE_WIDTH_CHROMIUM:
- case GL_PATH_MITER_LIMIT_CHROMIUM:
- hasValueError = std::isnan(value) || !std::isfinite(value) || value < 0;
- break;
- case GL_PATH_STROKE_BOUND_CHROMIUM:
- value = base::ClampToRange(value, 0.0f, 1.0f);
- break;
- case GL_PATH_END_CAPS_CHROMIUM:
- hasValueError = !validators_->path_parameter_cap_values.IsValid(
- static_cast<GLint>(value));
- break;
- case GL_PATH_JOIN_STYLE_CHROMIUM:
- hasValueError = !validators_->path_parameter_join_values.IsValid(
- static_cast<GLint>(value));
- break;
- default:
- DCHECK(!validators_->path_parameter.IsValid(pname));
- LOCAL_SET_GL_ERROR_INVALID_ENUM(kFunctionName, pname, "pname");
- return error::kNoError;
- }
- DCHECK(validators_->path_parameter.IsValid(pname));
-
- if (hasValueError) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName, "value not correct");
- return error::kNoError;
- }
-
- api()->glPathParameterfNVFn(service_id, pname, value);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandlePathParameteriCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glPathParameteriCHROMIUM";
- const volatile gles2::cmds::PathParameteriCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::PathParameteriCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- GLuint service_id = 0;
- if (!path_manager()->GetPath(static_cast<GLuint>(c.path), &service_id)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
- "invalid path name");
- return error::kNoError;
- }
-
- GLenum pname = static_cast<GLenum>(c.pname);
- GLint value = static_cast<GLint>(c.value);
- bool hasValueError = false;
-
- switch (pname) {
- case GL_PATH_STROKE_WIDTH_CHROMIUM:
- case GL_PATH_MITER_LIMIT_CHROMIUM:
- hasValueError = value < 0;
- break;
- case GL_PATH_STROKE_BOUND_CHROMIUM:
- value = base::ClampToRange(value, 0, 1);
- break;
- case GL_PATH_END_CAPS_CHROMIUM:
- hasValueError = !validators_->path_parameter_cap_values.IsValid(value);
- break;
- case GL_PATH_JOIN_STYLE_CHROMIUM:
- hasValueError = !validators_->path_parameter_join_values.IsValid(value);
- break;
- default:
- DCHECK(!validators_->path_parameter.IsValid(pname));
- LOCAL_SET_GL_ERROR_INVALID_ENUM(kFunctionName, pname, "pname");
- return error::kNoError;
- }
- DCHECK(validators_->path_parameter.IsValid(pname));
-
- if (hasValueError) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName, "value not correct");
- return error::kNoError;
- }
-
- api()->glPathParameteriNVFn(service_id, pname, value);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleStencilFillPathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glStencilFillPathCHROMIUM";
- const volatile gles2::cmds::StencilFillPathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::StencilFillPathCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
- PathCommandValidatorContext v(this, kFunctionName);
- GLenum fill_mode = GL_COUNT_UP_CHROMIUM;
- GLuint mask = 0;
- if (!v.GetFillModeAndMask(c, &fill_mode, &mask))
- return v.error();
- GLuint service_id = 0;
- if (!path_manager()->GetPath(static_cast<GLuint>(c.path), &service_id)) {
- // "If /path/ does not name an existing path object, the command does
- // nothing (and no error is generated)."
- // This holds for other rendering functions, too.
- return error::kNoError;
- }
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glStencilFillPathNVFn(service_id, fill_mode, mask);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleStencilStrokePathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glStencilStrokePathCHROMIUM";
- const volatile gles2::cmds::StencilStrokePathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::StencilStrokePathCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- GLuint service_id = 0;
- if (!path_manager()->GetPath(static_cast<GLuint>(c.path), &service_id)) {
- return error::kNoError;
- }
- GLint reference = static_cast<GLint>(c.reference);
- GLuint mask = static_cast<GLuint>(c.mask);
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glStencilStrokePathNVFn(service_id, reference, mask);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleCoverFillPathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glCoverFillPathCHROMIUM";
- const volatile gles2::cmds::CoverFillPathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::CoverFillPathCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, kFunctionName);
- GLenum cover_mode = GL_BOUNDING_BOX_CHROMIUM;
- if (!v.GetCoverMode(c, &cover_mode))
- return v.error();
-
- GLuint service_id = 0;
- if (!path_manager()->GetPath(static_cast<GLuint>(c.path), &service_id))
- return error::kNoError;
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glCoverFillPathNVFn(service_id, cover_mode);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleCoverStrokePathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glCoverStrokePathCHROMIUM";
- const volatile gles2::cmds::CoverStrokePathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::CoverStrokePathCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, kFunctionName);
- GLenum cover_mode = GL_BOUNDING_BOX_CHROMIUM;
- if (!v.GetCoverMode(c, &cover_mode))
- return v.error();
-
- GLuint service_id = 0;
- if (!path_manager()->GetPath(static_cast<GLuint>(c.path), &service_id))
- return error::kNoError;
-
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glCoverStrokePathNVFn(service_id, cover_mode);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleStencilThenCoverFillPathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glStencilThenCoverFillPathCHROMIUM";
- const volatile gles2::cmds::StencilThenCoverFillPathCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::StencilThenCoverFillPathCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, kFunctionName);
- GLenum fill_mode = GL_COUNT_UP_CHROMIUM;
- GLuint mask = 0;
- GLenum cover_mode = GL_BOUNDING_BOX_CHROMIUM;
- if (!v.GetFillModeAndMask(c, &fill_mode, &mask) ||
- !v.GetCoverMode(c, &cover_mode))
- return v.error();
-
- GLuint service_id = 0;
- if (!path_manager()->GetPath(static_cast<GLuint>(c.path), &service_id))
- return error::kNoError;
-
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glStencilThenCoverFillPathNVFn(service_id, fill_mode, mask,
- cover_mode);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleStencilThenCoverStrokePathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glStencilThenCoverStrokePathCHROMIUM";
- const volatile gles2::cmds::StencilThenCoverStrokePathCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::StencilThenCoverStrokePathCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, kFunctionName);
- GLenum cover_mode = GL_BOUNDING_BOX_CHROMIUM;
- if (!v.GetCoverMode(c, &cover_mode))
- return v.error();
-
- GLuint service_id = 0;
- if (!path_manager()->GetPath(static_cast<GLuint>(c.path), &service_id))
- return error::kNoError;
-
- GLint reference = static_cast<GLint>(c.reference);
- GLuint mask = static_cast<GLuint>(c.mask);
-
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glStencilThenCoverStrokePathNVFn(service_id, reference, mask,
- cover_mode);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleStencilFillPathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glStencilFillPathInstancedCHROMIUM";
- const volatile gles2::cmds::StencilFillPathInstancedCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::StencilFillPathInstancedCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, kFunctionName);
- GLuint num_paths = 0;
- GLenum path_name_type = GL_NONE;
- GLenum fill_mode = GL_COUNT_UP_CHROMIUM;
- GLuint mask = 0;
- GLenum transform_type = GL_NONE;
- if (!v.GetPathCountAndType(c, &num_paths, &path_name_type) ||
- !v.GetFillModeAndMask(c, &fill_mode, &mask) ||
- !v.GetTransformType(c, &transform_type))
- return v.error();
-
- if (num_paths == 0)
- return error::kNoError;
-
- std::unique_ptr<GLuint[]> paths;
- if (!v.GetPathNameData(c, num_paths, path_name_type, &paths))
- return v.error();
-
- const GLfloat* transforms = nullptr;
- if (!v.GetTransforms(c, num_paths, transform_type, &transforms))
- return v.error();
-
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glStencilFillPathInstancedNVFn(num_paths, GL_UNSIGNED_INT, paths.get(),
- 0, fill_mode, mask, transform_type,
- transforms);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleStencilStrokePathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glStencilStrokePathInstancedCHROMIUM";
- const volatile gles2::cmds::StencilStrokePathInstancedCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::StencilStrokePathInstancedCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, kFunctionName);
- GLuint num_paths = 0;
- GLenum path_name_type = GL_NONE;
- GLenum transform_type = GL_NONE;
- if (!v.GetPathCountAndType(c, &num_paths, &path_name_type) ||
- !v.GetTransformType(c, &transform_type))
- return v.error();
-
- if (num_paths == 0)
- return error::kNoError;
-
- std::unique_ptr<GLuint[]> paths;
- if (!v.GetPathNameData(c, num_paths, path_name_type, &paths))
- return v.error();
-
- const GLfloat* transforms = nullptr;
- if (!v.GetTransforms(c, num_paths, transform_type, &transforms))
- return v.error();
-
- GLint reference = static_cast<GLint>(c.reference);
- GLuint mask = static_cast<GLuint>(c.mask);
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glStencilStrokePathInstancedNVFn(num_paths, GL_UNSIGNED_INT,
- paths.get(), 0, reference, mask,
- transform_type, transforms);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleCoverFillPathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glCoverFillPathInstancedCHROMIUM";
- const volatile gles2::cmds::CoverFillPathInstancedCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::CoverFillPathInstancedCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, kFunctionName);
- GLuint num_paths = 0;
- GLenum path_name_type = GL_NONE;
- GLenum cover_mode = GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM;
- GLenum transform_type = GL_NONE;
- if (!v.GetPathCountAndType(c, &num_paths, &path_name_type) ||
- !v.GetCoverMode(c, &cover_mode) ||
- !v.GetTransformType(c, &transform_type))
- return v.error();
-
- if (num_paths == 0)
- return error::kNoError;
-
- std::unique_ptr<GLuint[]> paths;
- if (!v.GetPathNameData(c, num_paths, path_name_type, &paths))
- return v.error();
-
- const GLfloat* transforms = nullptr;
- if (!v.GetTransforms(c, num_paths, transform_type, &transforms))
- return v.error();
-
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glCoverFillPathInstancedNVFn(num_paths, GL_UNSIGNED_INT, paths.get(),
- 0, cover_mode, transform_type,
- transforms);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleCoverStrokePathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glCoverStrokePathInstancedCHROMIUM";
- const volatile gles2::cmds::CoverStrokePathInstancedCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::CoverStrokePathInstancedCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
-
- PathCommandValidatorContext v(this, kFunctionName);
- GLuint num_paths = 0;
- GLenum path_name_type = GL_NONE;
- GLenum cover_mode = GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM;
- GLenum transform_type = GL_NONE;
- if (!v.GetPathCountAndType(c, &num_paths, &path_name_type) ||
- !v.GetCoverMode(c, &cover_mode) ||
- !v.GetTransformType(c, &transform_type))
- return v.error();
-
- if (num_paths == 0)
- return error::kNoError;
-
- std::unique_ptr<GLuint[]> paths;
- if (!v.GetPathNameData(c, num_paths, path_name_type, &paths))
- return v.error();
-
- const GLfloat* transforms = nullptr;
- if (!v.GetTransforms(c, num_paths, transform_type, &transforms))
- return v.error();
-
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glCoverStrokePathInstancedNVFn(num_paths, GL_UNSIGNED_INT, paths.get(),
- 0, cover_mode, transform_type,
- transforms);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleStencilThenCoverFillPathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] =
- "glStencilThenCoverFillPathInstancedCHROMIUM";
- const volatile gles2::cmds::StencilThenCoverFillPathInstancedCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::
- StencilThenCoverFillPathInstancedCHROMIUM*>(cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
- PathCommandValidatorContext v(this, kFunctionName);
-
- GLuint num_paths = 0;
- GLenum path_name_type = GL_NONE;
- GLenum fill_mode = GL_COUNT_UP_CHROMIUM;
- GLuint mask = 0;
- GLenum cover_mode = GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM;
- GLenum transform_type = GL_NONE;
- if (!v.GetPathCountAndType(c, &num_paths, &path_name_type) ||
- !v.GetFillModeAndMask(c, &fill_mode, &mask) ||
- !v.GetCoverMode(c, &cover_mode) ||
- !v.GetTransformType(c, &transform_type))
- return v.error();
-
- if (num_paths == 0)
- return error::kNoError;
-
- std::unique_ptr<GLuint[]> paths;
- if (!v.GetPathNameData(c, num_paths, path_name_type, &paths))
- return v.error();
-
- const GLfloat* transforms = nullptr;
- if (!v.GetTransforms(c, num_paths, transform_type, &transforms))
- return v.error();
-
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glStencilThenCoverFillPathInstancedNVFn(
- num_paths, GL_UNSIGNED_INT, paths.get(), 0, fill_mode, mask, cover_mode,
- transform_type, transforms);
- return error::kNoError;
-}
-
-error::Error
-GLES2DecoderImpl::HandleStencilThenCoverStrokePathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] =
- "glStencilThenCoverStrokeInstancedCHROMIUM";
- const volatile gles2::cmds::StencilThenCoverStrokePathInstancedCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::
- StencilThenCoverStrokePathInstancedCHROMIUM*>(cmd_data);
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
- PathCommandValidatorContext v(this, kFunctionName);
- GLuint num_paths = 0;
- GLenum path_name_type = GL_NONE;
- GLenum cover_mode = GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM;
- GLenum transform_type = GL_NONE;
- if (!v.GetPathCountAndType(c, &num_paths, &path_name_type) ||
- !v.GetCoverMode(c, &cover_mode) ||
- !v.GetTransformType(c, &transform_type))
- return v.error();
-
- if (num_paths == 0)
- return error::kNoError;
-
- std::unique_ptr<GLuint[]> paths;
- if (!v.GetPathNameData(c, num_paths, path_name_type, &paths))
- return v.error();
-
- const GLfloat* transforms = nullptr;
- if (!v.GetTransforms(c, num_paths, transform_type, &transforms))
- return v.error();
-
- GLint reference = static_cast<GLint>(c.reference);
- GLuint mask = static_cast<GLuint>(c.mask);
-
- if (!CheckBoundDrawFramebufferValid(kFunctionName))
- return error::kNoError;
- ApplyDirtyState();
- api()->glStencilThenCoverStrokePathInstancedNVFn(
- num_paths, GL_UNSIGNED_INT, paths.get(), 0, reference, mask, cover_mode,
- transform_type, transforms);
- return error::kNoError;
-}
-
-void GLES2DecoderImpl::DoBindFragmentInputLocationCHROMIUM(
- GLuint program_id,
- GLint location,
- const std::string& name) {
- static const char kFunctionName[] = "glBindFragmentInputLocationCHROMIUM";
- if (!StringIsValidForGLES(name)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName, "invalid character");
- return;
- }
- if (ProgramManager::HasBuiltInPrefix(name)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName, "reserved prefix");
- return;
- }
- Program* program = GetProgram(program_id);
- if (!program || program->IsDeleted()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName, "invalid program");
- return;
- }
- if (location < 0 ||
- static_cast<uint32_t>(location) >= group_->max_varying_vectors() * 4) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
- "location out of range");
- return;
- }
-
- program->SetFragmentInputLocationBinding(name, location);
-}
-
const SamplerState& GLES2DecoderImpl::GetSamplerStateForTextureUnit(
GLenum target, GLuint unit) {
if (features().enable_samplers) {
@@ -20847,137 +19936,6 @@ void GLES2DecoderImpl::ClearFramebufferForWorkaround(GLbitfield mask) {
state_.color_clear_alpha, state_.depth_clear, state_.stencil_clear);
}
-error::Error GLES2DecoderImpl::HandleBindFragmentInputLocationCHROMIUMBucket(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::BindFragmentInputLocationCHROMIUMBucket& c =
- *static_cast<
- const volatile gles2::cmds::BindFragmentInputLocationCHROMIUMBucket*>(
- cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLuint program = static_cast<GLuint>(c.program);
- GLint location = static_cast<GLint>(c.location);
- Bucket* bucket = GetBucket(c.name_bucket_id);
- if (!bucket || bucket->size() == 0) {
- return error::kInvalidArguments;
- }
- std::string name_str;
- if (!bucket->GetAsString(&name_str)) {
- return error::kInvalidArguments;
- }
- DoBindFragmentInputLocationCHROMIUM(program, location, name_str);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleProgramPathFragmentInputGenCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- static const char kFunctionName[] = "glProgramPathFragmentInputGenCHROMIUM";
- const volatile gles2::cmds::ProgramPathFragmentInputGenCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::ProgramPathFragmentInputGenCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLint program_id = static_cast<GLint>(c.program);
-
- Program* program = GetProgram(program_id);
- if (!program || !program->IsValid() || program->IsDeleted()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName, "invalid program");
- return error::kNoError;
- }
-
- GLenum gen_mode = static_cast<GLint>(c.genMode);
- if (!validators_->path_fragment_input_gen_mode.IsValid(gen_mode)) {
- LOCAL_SET_GL_ERROR_INVALID_ENUM(kFunctionName, gen_mode, "genMode");
- return error::kNoError;
- }
-
- GLint components = static_cast<GLint>(c.components);
- if (components < 0 || components > 4) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
- "components out of range");
- return error::kNoError;
- }
-
- if ((components != 0 && gen_mode == GL_NONE) ||
- (components == 0 && gen_mode != GL_NONE)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
- "components and genMode do not match");
- return error::kNoError;
- }
-
- GLint location = static_cast<GLint>(c.location);
- if (program->IsInactiveFragmentInputLocationByFakeLocation(location))
- return error::kNoError;
-
- const Program::FragmentInputInfo* fragment_input_info =
- program->GetFragmentInputInfoByFakeLocation(location);
- if (!fragment_input_info) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName, "unknown location");
- return error::kNoError;
- }
- GLint real_location = fragment_input_info->location;
-
- const GLfloat* coeffs = nullptr;
-
- if (components > 0) {
- GLint components_needed = -1;
-
- switch (fragment_input_info->type) {
- case GL_FLOAT:
- components_needed = 1;
- break;
- case GL_FLOAT_VEC2:
- components_needed = 2;
- break;
- case GL_FLOAT_VEC3:
- components_needed = 3;
- break;
- case GL_FLOAT_VEC4:
- components_needed = 4;
- break;
- default:
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
- "fragment input type is not single-precision "
- "floating-point scalar or vector");
- return error::kNoError;
- }
-
- if (components_needed != components) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
- "components does not match fragment input type");
- return error::kNoError;
- }
- uint32_t coeffs_per_component =
- GLES2Util::GetCoefficientCountForGLPathFragmentInputGenMode(gen_mode);
- // The multiplication below will not overflow.
- DCHECK(coeffs_per_component > 0 && coeffs_per_component <= 4);
- DCHECK(components > 0 && components <= 4);
- uint32_t coeffs_size = sizeof(GLfloat) * coeffs_per_component * components;
-
- uint32_t coeffs_shm_id = static_cast<uint32_t>(c.coeffs_shm_id);
- uint32_t coeffs_shm_offset = static_cast<uint32_t>(c.coeffs_shm_offset);
-
- if (coeffs_shm_id != 0 || coeffs_shm_offset != 0) {
- coeffs = GetSharedMemoryAs<const GLfloat*>(
- coeffs_shm_id, coeffs_shm_offset, coeffs_size);
- }
-
- if (!coeffs) {
- return error::kOutOfBounds;
- }
- }
- api()->glProgramPathFragmentInputGenNVFn(program->service_id(), real_location,
- gen_mode, components, coeffs);
- return error::kNoError;
-}
-
void GLES2DecoderImpl::RestoreAllExternalTextureBindingsIfNeeded() {
if (texture_manager()->GetServiceIdGeneration() ==
texture_manager_service_id_generation_)
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index 7cccbf52378..f8be401bf99 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -5263,105 +5263,6 @@ error::Error GLES2DecoderImpl::HandleScheduleDCLayerCHROMIUM(
return error::kNoError;
}
-error::Error GLES2DecoderImpl::HandleMatrixLoadfCHROMIUMImmediate(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::MatrixLoadfCHROMIUMImmediate& c =
- *static_cast<const volatile gles2::cmds::MatrixLoadfCHROMIUMImmediate*>(
- cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLenum matrixMode = static_cast<GLenum>(c.matrixMode);
- uint32_t m_size;
- if (!GLES2Util::ComputeDataSize<GLfloat, 16>(1, &m_size)) {
- return error::kOutOfBounds;
- }
- if (m_size > immediate_data_size) {
- return error::kOutOfBounds;
- }
- volatile const GLfloat* m = GetImmediateDataAs<volatile const GLfloat*>(
- c, m_size, immediate_data_size);
- if (!validators_->matrix_mode.IsValid(matrixMode)) {
- LOCAL_SET_GL_ERROR_INVALID_ENUM("glMatrixLoadfCHROMIUM", matrixMode,
- "matrixMode");
- return error::kNoError;
- }
- if (m == nullptr) {
- return error::kOutOfBounds;
- }
- DoMatrixLoadfCHROMIUM(matrixMode, m);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleMatrixLoadIdentityCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::MatrixLoadIdentityCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::MatrixLoadIdentityCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLenum matrixMode = static_cast<GLenum>(c.matrixMode);
- if (!validators_->matrix_mode.IsValid(matrixMode)) {
- LOCAL_SET_GL_ERROR_INVALID_ENUM("glMatrixLoadIdentityCHROMIUM", matrixMode,
- "matrixMode");
- return error::kNoError;
- }
- DoMatrixLoadIdentityCHROMIUM(matrixMode);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleIsPathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::IsPathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::IsPathCHROMIUM*>(cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLuint path = c.path;
- typedef cmds::IsPathCHROMIUM::Result Result;
- Result* result_dst = GetSharedMemoryAs<Result*>(
- c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
- if (!result_dst) {
- return error::kOutOfBounds;
- }
- *result_dst = DoIsPathCHROMIUM(path);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandlePathStencilFuncCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::PathStencilFuncCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::PathStencilFuncCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLenum func = static_cast<GLenum>(c.func);
- GLint ref = static_cast<GLint>(c.ref);
- GLuint mask = static_cast<GLuint>(c.mask);
- if (!validators_->cmp_function.IsValid(func)) {
- LOCAL_SET_GL_ERROR_INVALID_ENUM("glPathStencilFuncCHROMIUM", func, "func");
- return error::kNoError;
- }
- if (state_.stencil_path_func != func || state_.stencil_path_ref != ref ||
- state_.stencil_path_mask != mask) {
- state_.stencil_path_func = func;
- state_.stencil_path_ref = ref;
- state_.stencil_path_mask = mask;
- glPathStencilFuncNV(func, ref, mask);
- }
- return error::kNoError;
-}
-
error::Error GLES2DecoderImpl::HandleContextVisibilityHintCHROMIUM(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -5702,6 +5603,20 @@ error::Error GLES2DecoderImpl::HandleEndSharedImageAccessDirectCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleBeginBatchReadAccessSharedImageCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ DoBeginBatchReadAccessSharedImageCHROMIUM();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleEndBatchReadAccessSharedImageCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ DoEndBatchReadAccessSharedImageCHROMIUM();
+ return error::kNoError;
+}
+
bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
switch (cap) {
case GL_BLEND:
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
index be417a0d848..d740594a696 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
@@ -129,6 +129,14 @@ class MockGLES2Decoder : public GLES2Decoder {
unsigned format,
int width,
int height));
+ MOCK_METHOD7(ClearCompressedTextureLevel3D,
+ bool(Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height,
+ int depth));
MOCK_METHOD1(IsCompressedTextureFormat,
bool(unsigned format));
MOCK_METHOD8(ClearLevel3D,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index e3afefb6d2f..111663ba855 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -34,6 +34,135 @@ namespace gpu {
namespace gles2 {
namespace {
+GLenum GetterForTextureTarget(GLenum target) {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ return GL_TEXTURE_BINDING_2D;
+ case GL_TEXTURE_EXTERNAL_OES:
+ return GL_TEXTURE_BINDING_EXTERNAL_OES;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return GL_TEXTURE_BINDING_RECTANGLE_ARB;
+ default:
+ // Other targets not currently used.
+ NOTIMPLEMENTED();
+ return GL_TEXTURE_2D;
+ }
+}
+
+class ScopedFramebufferBindingReset {
+ public:
+ ScopedFramebufferBindingReset(gl::GLApi* api,
+ bool supports_separate_fbo_bindings)
+ : api_(api),
+ supports_separate_fbo_bindings_(supports_separate_fbo_bindings),
+ draw_framebuffer_(0),
+ read_framebuffer_(0) {
+ if (supports_separate_fbo_bindings_) {
+ api_->glGetIntegervFn(GL_DRAW_FRAMEBUFFER_BINDING, &draw_framebuffer_);
+ api_->glGetIntegervFn(GL_READ_FRAMEBUFFER_BINDING, &read_framebuffer_);
+ } else {
+ api_->glGetIntegervFn(GL_FRAMEBUFFER_BINDING, &draw_framebuffer_);
+ }
+ }
+
+ ~ScopedFramebufferBindingReset() {
+ if (supports_separate_fbo_bindings_) {
+ api_->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER, draw_framebuffer_);
+ api_->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, read_framebuffer_);
+ } else {
+ api_->glBindFramebufferEXTFn(GL_FRAMEBUFFER, draw_framebuffer_);
+ }
+ }
+
+ private:
+ gl::GLApi* api_;
+ bool supports_separate_fbo_bindings_;
+ GLint draw_framebuffer_;
+ GLint read_framebuffer_;
+};
+
+class ScopedRenderbufferBindingReset {
+ public:
+ explicit ScopedRenderbufferBindingReset(gl::GLApi* api)
+ : api_(api), renderbuffer_(0) {
+ api_->glGetIntegervFn(GL_RENDERBUFFER_BINDING, &renderbuffer_);
+ }
+
+ ~ScopedRenderbufferBindingReset() {
+ api_->glBindRenderbufferEXTFn(GL_RENDERBUFFER, renderbuffer_);
+ }
+
+ private:
+ gl::GLApi* api_;
+ GLint renderbuffer_;
+};
+
+class ScopedTextureBindingReset {
+ public:
+ // |texture_target| only supports GL_TEXTURE_2D, GL_TEXTURE_EXTERNAL_OES, and
+ // GL_TEXTURE_RECTANGLE_ARB.
+ ScopedTextureBindingReset(gl::GLApi* api, GLenum texture_target)
+ : api_(api), texture_target_(texture_target), texture_(0) {
+ api_->glGetIntegervFn(GetterForTextureTarget(texture_target_), &texture_);
+ }
+
+ ~ScopedTextureBindingReset() {
+ api_->glBindTextureFn(texture_target_, texture_);
+ }
+
+ private:
+ gl::GLApi* api_;
+ GLenum texture_target_;
+ GLint texture_;
+};
+
+class ScopedClearColorReset {
+ public:
+ explicit ScopedClearColorReset(gl::GLApi* api) : api_(api) {
+ api_->glGetFloatvFn(GL_COLOR_CLEAR_VALUE, clear_color_);
+ }
+ ~ScopedClearColorReset() {
+ api_->glClearColorFn(clear_color_[0], clear_color_[1], clear_color_[2],
+ clear_color_[3]);
+ }
+
+ private:
+ gl::GLApi* api_;
+ GLfloat clear_color_[4];
+};
+
+class ScopedColorMaskReset {
+ public:
+ explicit ScopedColorMaskReset(gl::GLApi* api) : api_(api) {
+ api_->glGetBooleanvFn(GL_COLOR_WRITEMASK, color_mask_);
+ }
+ ~ScopedColorMaskReset() {
+ api_->glColorMaskFn(color_mask_[0], color_mask_[1], color_mask_[2],
+ color_mask_[3]);
+ }
+
+ private:
+ gl::GLApi* api_;
+ GLboolean color_mask_[4];
+};
+
+class ScopedScissorTestReset {
+ public:
+ explicit ScopedScissorTestReset(gl::GLApi* api) : api_(api) {
+ api_->glGetBooleanvFn(GL_SCISSOR_TEST, &scissor_test_);
+ }
+ ~ScopedScissorTestReset() {
+ if (scissor_test_)
+ api_->glEnableFn(GL_SCISSOR_TEST);
+ else
+ api_->glDisableFn(GL_SCISSOR_TEST);
+ }
+
+ private:
+ gl::GLApi* api_;
+ GLboolean scissor_test_;
+};
+
template <typename ClientType, typename ServiceType, typename DeleteFunction>
void DeleteServiceObjects(ClientServiceMap<ClientType, ServiceType>* id_map,
bool have_context,
@@ -259,46 +388,58 @@ operator=(SharedImageData&& other) {
return *this;
}
-ScopedFramebufferBindingReset::ScopedFramebufferBindingReset(
- gl::GLApi* api,
- bool supports_separate_fbo_bindings)
- : api_(api),
- supports_separate_fbo_bindings_(supports_separate_fbo_bindings),
- draw_framebuffer_(0),
- read_framebuffer_(0) {
- if (supports_separate_fbo_bindings_) {
- api_->glGetIntegervFn(GL_DRAW_FRAMEBUFFER_BINDING, &draw_framebuffer_);
- api_->glGetIntegervFn(GL_READ_FRAMEBUFFER_BINDING, &read_framebuffer_);
- } else {
- api_->glGetIntegervFn(GL_FRAMEBUFFER_BINDING, &draw_framebuffer_);
+bool PassthroughResources::SharedImageData::BeginAccess(GLenum mode,
+ gl::GLApi* api) {
+ DCHECK(!is_being_accessed());
+ // When importing a texture for use in passthrough cmd decoder, always allow
+ // uncleared access. We ensure the texture is cleared below.
+ scoped_access_ = representation_->BeginScopedAccess(
+ mode, SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ if (!scoped_access_) {
+ return false;
}
-}
+ // ANGLE does not handle clear tracking in an interoperable way. Clear
+ // any uncleared SharedImage before using it with ANGLE.
+ //
+ // NOTE: This will not introduce extra clears in common cases. The default GL
+ // SharedImage, which is exclusively used with ANGLE, always returns true
+ // from IsCleared, allowing ANGLE to manage clearing internally. This path is
+ // only run for interop shared image backings.
+ if (!representation_->IsCleared()) {
+ auto texture = representation_->GetTexturePassthrough();
+
+ // Back up all state we are about to change.
+ ScopedFramebufferBindingReset fbo_reset(
+ api, false /* supports_seperate_fbo_bindings */);
+ ScopedTextureBindingReset texture_reset(api, texture->target());
+ ScopedClearColorReset clear_color_reset(api);
+ ScopedColorMaskReset color_mask_reset(api);
+ ScopedScissorTestReset scissor_test_reset(api);
+
+ // Generate a new framebuffer and bind the shared image's uncleared texture
+ // to it.
+ GLuint fbo = 0;
+ api->glGenFramebuffersEXTFn(1, &fbo);
+ api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
+ api->glBindTextureFn(texture->target(), texture->service_id());
+ api->glFramebufferTexture2DEXTFn(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ texture->target(), texture->service_id(),
+ 0);
+ // Clear the bound framebuffer.
+ api->glClearColorFn(0, 0, 0, 0);
+ api->glColorMaskFn(true, true, true, true);
+ api->glDisableFn(GL_SCISSOR_TEST);
+ api->glClearFn(GL_COLOR_BUFFER_BIT);
+
+ // Delete the generated framebuffer.
+ api->glFramebufferTexture2DEXTFn(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ texture->target(), 0, 0);
+ api->glDeleteFramebuffersEXTFn(1, &fbo);
-ScopedFramebufferBindingReset::~ScopedFramebufferBindingReset() {
- if (supports_separate_fbo_bindings_) {
- api_->glBindFramebufferEXTFn(GL_DRAW_FRAMEBUFFER, draw_framebuffer_);
- api_->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, read_framebuffer_);
- } else {
- api_->glBindFramebufferEXTFn(GL_FRAMEBUFFER, draw_framebuffer_);
+ // Mark the shared image as cleared.
+ representation_->SetCleared();
}
-}
-
-ScopedRenderbufferBindingReset::ScopedRenderbufferBindingReset(gl::GLApi* api)
- : api_(api), renderbuffer_(0) {
- api_->glGetIntegervFn(GL_RENDERBUFFER_BINDING, &renderbuffer_);
-}
-
-ScopedRenderbufferBindingReset::~ScopedRenderbufferBindingReset() {
- api_->glBindRenderbufferEXTFn(GL_RENDERBUFFER, renderbuffer_);
-}
-
-ScopedTexture2DBindingReset::ScopedTexture2DBindingReset(gl::GLApi* api)
- : api_(api), texture_(0) {
- api_->glGetIntegervFn(GL_TEXTURE_2D_BINDING_EXT, &texture_);
-}
-
-ScopedTexture2DBindingReset::~ScopedTexture2DBindingReset() {
- api_->glBindTextureFn(GL_TEXTURE_2D, texture_);
+ return true;
}
GLES2DecoderPassthroughImpl::PendingQuery::PendingQuery() = default;
@@ -352,7 +493,7 @@ GLES2DecoderPassthroughImpl::EmulatedColorBuffer::EmulatedColorBuffer(
gl::GLApi* api,
const EmulatedDefaultFramebufferFormat& format_in)
: api(api), format(format_in) {
- ScopedTexture2DBindingReset scoped_texture_reset(api);
+ ScopedTextureBindingReset scoped_texture_reset(api, GL_TEXTURE_2D);
GLuint color_buffer_texture = 0;
api->glGenTexturesFn(1, &color_buffer_texture);
@@ -373,7 +514,7 @@ void GLES2DecoderPassthroughImpl::EmulatedColorBuffer::Resize(
return;
size = new_size;
- ScopedTexture2DBindingReset scoped_texture_reset(api);
+ ScopedTextureBindingReset scoped_texture_reset(api, GL_TEXTURE_2D);
DCHECK(texture);
DCHECK(texture->target() == GL_TEXTURE_2D);
@@ -467,8 +608,9 @@ GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::
std::unique_ptr<GLES2DecoderPassthroughImpl::EmulatedColorBuffer>
GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::SetColorBuffer(
std::unique_ptr<EmulatedColorBuffer> new_color_buffer) {
- DCHECK(color_texture != nullptr && new_color_buffer != nullptr);
- DCHECK(color_texture->size == new_color_buffer->size);
+ DCHECK(color_texture != nullptr);
+ DCHECK(new_color_buffer != nullptr);
+ DCHECK_EQ(color_texture->size, new_color_buffer->size);
std::unique_ptr<EmulatedColorBuffer> old_buffer(std::move(color_texture));
color_texture = std::move(new_color_buffer);
@@ -486,7 +628,7 @@ GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::SetColorBuffer(
void GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::Blit(
EmulatedColorBuffer* target) {
DCHECK(target != nullptr);
- DCHECK(target->size == size);
+ DCHECK_EQ(target->size, size);
ScopedFramebufferBindingReset scoped_fbo_reset(
api, supports_separate_fbo_bindings);
@@ -784,9 +926,11 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
gl::GetRequestableGLExtensionsFromCurrentContext());
static constexpr const char* kRequiredFunctionalityExtensions[] = {
+ "GL_ANGLE_framebuffer_blit",
"GL_ANGLE_memory_size",
"GL_ANGLE_native_id",
"GL_ANGLE_texture_storage_external",
+ "GL_ANGLE_texture_usage",
"GL_CHROMIUM_bind_uniform_location",
"GL_CHROMIUM_sync_query",
"GL_EXT_debug_marker",
@@ -810,14 +954,12 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
if (request_optional_extensions_) {
static constexpr const char* kOptionalFunctionalityExtensions[] = {
"GL_ANGLE_depth_texture",
- "GL_ANGLE_framebuffer_blit",
"GL_ANGLE_framebuffer_multisample",
"GL_ANGLE_instanced_arrays",
"GL_ANGLE_pack_reverse_row_order",
"GL_ANGLE_texture_compression_dxt1",
"GL_ANGLE_texture_compression_dxt3",
"GL_ANGLE_texture_compression_dxt5",
- "GL_ANGLE_texture_usage",
"GL_ANGLE_translated_shader_source",
"GL_CHROMIUM_framebuffer_mixed_samples",
"GL_CHROMIUM_path_rendering",
@@ -1057,6 +1199,16 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
api()->glGetIntegervFn(GL_SCISSOR_BOX, scissor_);
ApplySurfaceDrawOffset();
+#if defined(OS_MACOSX)
+ // On mac we need the ANGLE_texture_rectangle extension to support IOSurface
+ // backbuffers, but we don't want it exposed to WebGL user shaders. This
+ // disables support for it in the shader compiler. We then enable it
+ // temporarily when necessary; see
+ // ScopedEnableTextureRectangleInShaderCompiler.
+ if (feature_info_->IsWebGLContext())
+ api()->glDisableFn(GL_TEXTURE_RECTANGLE_ANGLE);
+#endif
+
set_initialized();
return gpu::ContextResult::kSuccess;
}
@@ -1105,10 +1257,15 @@ void GLES2DecoderPassthroughImpl::Destroy(bool have_context) {
api()->glDeleteTransformFeedbacksFn(
1, &transform_feedback);
});
- DeleteServiceObjects(&query_id_map_, have_context,
- [this](GLuint client_id, GLuint query) {
- api()->glDeleteQueriesFn(1, &query);
- });
+ DeleteServiceObjects(
+ &query_id_map_, have_context, [this](GLuint client_id, GLuint query) {
+ // glDeleteQueries is not loaded unless GL_EXT_occlusion_query_boolean
+ // is present. All queries must be emulated so they don't need to be
+ // deleted.
+ if (feature_info_->feature_flags().occlusion_query_boolean) {
+ api()->glDeleteQueriesFn(1, &query);
+ }
+ });
DeleteServiceObjects(&vertex_array_id_map_, have_context,
[this](GLuint client_id, GLuint vertex_array) {
api()->glDeleteVertexArraysOESFn(1, &vertex_array);
@@ -1166,6 +1323,10 @@ void GLES2DecoderPassthroughImpl::Destroy(bool have_context) {
group_ = nullptr;
}
+ if (have_context) {
+ api()->glDebugMessageCallbackFn(nullptr, nullptr);
+ }
+
if (context_.get()) {
context_->ReleaseCurrent(nullptr);
context_ = nullptr;
@@ -1292,7 +1453,7 @@ bool GLES2DecoderPassthroughImpl::ResizeOffscreenFramebuffer(
// Destroy all the available color textures, they should not be the same size
// as the back buffer
for (auto& available_color_texture : available_color_textures_) {
- DCHECK(available_color_texture->size != size);
+ DCHECK_NE(available_color_texture->size, size);
available_color_texture->Destroy(true);
}
available_color_textures_.clear();
@@ -1367,6 +1528,8 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
caps.egl_image_external =
feature_info_->feature_flags().oes_egl_image_external;
+ caps.egl_image_external_essl3 =
+ feature_info_->feature_flags().oes_egl_image_external_essl3;
caps.texture_format_astc =
feature_info_->feature_flags().ext_texture_format_astc;
caps.texture_format_atc =
@@ -1408,8 +1571,8 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
caps.image_ycbcr_420v_disabled_for_video_frames =
group_->gpu_preferences()
.disable_biplanar_gpu_memory_buffers_for_video_frames;
- caps.image_xr30 = feature_info_->feature_flags().chromium_image_xr30;
- caps.image_xb30 = feature_info_->feature_flags().chromium_image_xb30;
+ caps.image_ar30 = feature_info_->feature_flags().chromium_image_ar30;
+ caps.image_ab30 = feature_info_->feature_flags().chromium_image_ab30;
caps.image_ycbcr_p010 =
feature_info_->feature_flags().chromium_image_ycbcr_p010;
caps.max_copy_texture_chromium_size =
@@ -1426,7 +1589,8 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
caps.post_sub_buffer = surface_->SupportsPostSubBuffer();
caps.swap_buffers_with_bounds = surface_->SupportsSwapBuffersWithBounds();
caps.surfaceless = !offscreen_ && surface_->IsSurfaceless();
- caps.flips_vertically = !offscreen_ && surface_->FlipsVertically();
+ caps.surface_origin =
+ !offscreen_ ? surface_->GetOrigin() : gfx::SurfaceOrigin::kBottomLeft;
caps.msaa_is_slow = feature_info_->workarounds().msaa_is_slow;
caps.avoid_stencil_buffers =
feature_info_->workarounds().avoid_stencil_buffers;
@@ -1434,7 +1598,6 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
feature_info_->feature_flags().ext_multisample_compatibility;
caps.dc_layers = !offscreen_ && surface_->SupportsDCLayers();
caps.commit_overlay_planes = surface_->SupportsCommitOverlayPlanes();
- caps.use_dc_overlays_for_video = surface_->UseOverlaysForVideo();
caps.protected_video_swap_chain = surface_->SupportsProtectedVideo();
caps.gpu_vsync = surface_->SupportsGpuVSync();
#if defined(OS_WIN)
@@ -1614,6 +1777,17 @@ bool GLES2DecoderPassthroughImpl::ClearCompressedTextureLevel(Texture* texture,
return true;
}
+bool GLES2DecoderPassthroughImpl::ClearCompressedTextureLevel3D(
+ Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height,
+ int depth) {
+ return true;
+}
+
bool GLES2DecoderPassthroughImpl::IsCompressedTextureFormat(unsigned format) {
return false;
}
@@ -1864,13 +2038,6 @@ void GLES2DecoderPassthroughImpl::InitializeFeatureInfo(
}
}
-void* GLES2DecoderPassthroughImpl::GetScratchMemory(size_t size) {
- if (scratch_memory_.size() < size) {
- scratch_memory_.resize(size, 0);
- }
- return scratch_memory_.data();
-}
-
template <typename T>
error::Error GLES2DecoderPassthroughImpl::PatchGetNumericResults(GLenum pname,
GLsizei length,
@@ -2355,7 +2522,7 @@ void GLES2DecoderPassthroughImpl::ReadBackBuffersIntoShadowCopies(
if (!resources_->buffer_id_map.GetServiceID(client_id, &service_id)) {
// Buffer no longer exists, this shadow update should have been removed by
// DoDeleteBuffers
- DCHECK(false);
+ NOTREACHED();
continue;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index e782addd70e..ade661ee2af 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -107,15 +107,7 @@ struct PassthroughResources {
return representation_.get();
}
- bool BeginAccess(GLenum mode) {
- DCHECK(!is_being_accessed());
- scoped_access_.emplace(representation_.get(), mode);
- if (!scoped_access_->success()) {
- scoped_access_.reset();
- return false;
- }
- return true;
- }
+ bool BeginAccess(GLenum mode, gl::GLApi* api);
void EndAccess() {
DCHECK(is_being_accessed());
@@ -127,7 +119,7 @@ struct PassthroughResources {
private:
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
representation_;
- base::Optional<SharedImageRepresentationGLTexturePassthrough::ScopedAccess>
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough::ScopedAccess>
scoped_access_;
DISALLOW_COPY_AND_ASSIGN(SharedImageData);
};
@@ -147,39 +139,6 @@ struct PassthroughResources {
std::unordered_map<GLuint, MappedBuffer> mapped_buffer_map;
};
-class ScopedFramebufferBindingReset {
- public:
- explicit ScopedFramebufferBindingReset(gl::GLApi* api,
- bool supports_separate_fbo_bindings);
- ~ScopedFramebufferBindingReset();
-
- private:
- gl::GLApi* api_;
- bool supports_separate_fbo_bindings_;
- GLint draw_framebuffer_;
- GLint read_framebuffer_;
-};
-
-class ScopedRenderbufferBindingReset {
- public:
- explicit ScopedRenderbufferBindingReset(gl::GLApi* api);
- ~ScopedRenderbufferBindingReset();
-
- private:
- gl::GLApi* api_;
- GLint renderbuffer_;
-};
-
-class ScopedTexture2DBindingReset {
- public:
- explicit ScopedTexture2DBindingReset(gl::GLApi* api);
- ~ScopedTexture2DBindingReset();
-
- private:
- gl::GLApi* api_;
- GLint texture_;
-};
-
class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
public:
GLES2DecoderPassthroughImpl(DecoderClient* client,
@@ -335,6 +294,16 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
int width,
int height) override;
+ // Clears a level sub area of a compressed 3D texture.
+ // Returns false if a GL error should be generated.
+ bool ClearCompressedTextureLevel3D(Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height,
+ int depth) override;
+
// Indicates whether a given internal format is one for a compressed
// texture.
bool IsCompressedTextureFormat(unsigned format) override;
@@ -419,34 +388,21 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
const DisallowedFeatures& disallowed_features,
bool force_reinitialize);
- void* GetScratchMemory(size_t size);
-
- template <typename T>
- T* GetTypedScratchMemory(size_t count) {
- return reinterpret_cast<T*>(GetScratchMemory(count * sizeof(T)));
- }
-
template <typename T, typename GLGetFunction>
error::Error GetNumericHelper(GLenum pname,
GLsizei bufsize,
GLsizei* length,
T* params,
GLGetFunction get_call) {
- // Get a scratch buffer to hold the result of the query
- T* scratch_params = GetTypedScratchMemory<T>(bufsize);
- get_call(pname, bufsize, length, scratch_params);
+ get_call(pname, bufsize, length, params);
// Update the results of the query, if needed
- error::Error error = PatchGetNumericResults(pname, *length, scratch_params);
+ const error::Error error = PatchGetNumericResults(pname, *length, params);
if (error != error::kNoError) {
*length = 0;
return error;
}
- // Copy into the destination
- DCHECK(*length <= bufsize);
- std::copy(scratch_params, scratch_params + *length, params);
-
return error::kNoError;
}
@@ -896,9 +852,6 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
bool reset_by_robustness_extension_;
bool lose_context_when_out_of_memory_;
- // Cache of scratch memory
- std::vector<uint8_t> scratch_memory_;
-
// After a second fence is inserted, both the GpuChannelMessageQueue and
// CommandExecutor are descheduled. Once the first fence has completed, both
// get rescheduled.
@@ -911,6 +864,8 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
base::WeakPtrFactory<GLES2DecoderPassthroughImpl> weak_ptr_factory_{this};
+ class ScopedEnableTextureRectangleInShaderCompiler;
+
// Include the prototypes of all the doer functions from a separate header to
// keep this file clean.
#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index 2f790cbf1fd..be04a014d68 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -805,7 +805,7 @@ error::Error DoUnmapBuffer(GLenum target);
error::Error DoResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
- GLenum color_space,
+ gfx::ColorSpace color_space,
GLboolean alpha);
error::Error DoGetRequestableExtensionsCHROMIUM(const char** extensions);
error::Error DoRequestExtensionCHROMIUM(const char* extension);
@@ -1141,4 +1141,6 @@ error::Error DoCreateAndTexStorage2DSharedImageINTERNAL(
error::Error DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id,
GLenum mode);
error::Error DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id);
+error::Error DoBeginBatchReadAccessSharedImageCHROMIUM(void);
+error::Error DoEndBatchReadAccessSharedImageCHROMIUM(void);
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_PASSTHROUGH_DOER_PROTOTYPES_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index c6891934ba9..ecad6a45c14 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -27,6 +27,42 @@
namespace gpu {
namespace gles2 {
+// Temporarily allows compilation of shaders that use the
+// ARB_texture_rectangle/ANGLE_texture_rectangle extension. We don't want to
+// expose the extension to WebGL user shaders but we still need to use it for
+// parts of the implementation on macOS. Note that the extension is always
+// enabled on macOS and this only controls shader compilation.
+class GLES2DecoderPassthroughImpl::
+ ScopedEnableTextureRectangleInShaderCompiler {
+ public:
+ ScopedEnableTextureRectangleInShaderCompiler(
+ const ScopedEnableTextureRectangleInShaderCompiler&) = delete;
+ ScopedEnableTextureRectangleInShaderCompiler& operator=(
+ const ScopedEnableTextureRectangleInShaderCompiler&) = delete;
+
+ // This class is a no-op except on macOS.
+#if !defined(OS_MACOSX)
+ explicit ScopedEnableTextureRectangleInShaderCompiler(
+ GLES2DecoderPassthroughImpl* decoder) {}
+
+ private:
+#else
+ explicit ScopedEnableTextureRectangleInShaderCompiler(
+ GLES2DecoderPassthroughImpl* decoder)
+ : decoder_(decoder) {
+ if (decoder_->feature_info_->IsWebGLContext())
+ decoder_->api_->glEnableFn(GL_TEXTURE_RECTANGLE_ANGLE);
+ }
+ ~ScopedEnableTextureRectangleInShaderCompiler() {
+ if (decoder_->feature_info_->IsWebGLContext())
+ decoder_->api_->glDisableFn(GL_TEXTURE_RECTANGLE_ANGLE);
+ }
+
+ private:
+ GLES2DecoderPassthroughImpl* decoder_;
+#endif
+};
+
namespace {
template <typename ClientType, typename ServiceType, typename GenFunction>
@@ -709,18 +745,7 @@ error::Error GLES2DecoderPassthroughImpl::DoColorMask(GLboolean red,
}
error::Error GLES2DecoderPassthroughImpl::DoCompileShader(GLuint shader) {
-#if defined(OS_MACOSX)
- // On mac we need this extension to support IOSurface backbuffers, but we
- // don't want it exposed to WebGL user shaders. Temporarily disable it during
- // shader compilation.
- if (feature_info_->IsWebGLContext())
- api()->glDisableExtensionANGLEFn("GL_ANGLE_texture_rectangle");
-#endif
api()->glCompileShaderFn(GetShaderServiceID(shader, resources_));
-#if defined(OS_MACOSX)
- if (feature_info_->IsWebGLContext())
- api()->glRequestExtensionANGLEFn("GL_ANGLE_texture_rectangle");
-#endif
return error::kNoError;
}
@@ -1655,10 +1680,8 @@ error::Error GLES2DecoderPassthroughImpl::DoGetFramebufferAttachmentParameteriv(
CheckErrorCallbackState();
- // Get a scratch buffer to hold the result of the query
- GLint* scratch_params = GetTypedScratchMemory<GLint>(bufsize);
api()->glGetFramebufferAttachmentParameterivRobustANGLEFn(
- target, updated_attachment, pname, bufsize, length, scratch_params);
+ target, updated_attachment, pname, bufsize, length, params);
if (CheckErrorCallbackState()) {
DCHECK(*length == 0);
@@ -1666,17 +1689,13 @@ error::Error GLES2DecoderPassthroughImpl::DoGetFramebufferAttachmentParameteriv(
}
// Update the results of the query, if needed
- error::Error error = PatchGetFramebufferAttachmentParameter(
- target, updated_attachment, pname, *length, scratch_params);
+ const error::Error error = PatchGetFramebufferAttachmentParameter(
+ target, updated_attachment, pname, *length, params);
if (error != error::kNoError) {
*length = 0;
return error;
}
- // Copy into the destination
- DCHECK(*length < bufsize);
- std::copy(scratch_params, scratch_params + *length, params);
-
return error::kNoError;
}
@@ -3363,7 +3382,8 @@ error::Error GLES2DecoderPassthroughImpl::DoTexStorage2DImageCHROMIUM(
bool is_cleared;
scoped_refptr<gl::GLImage> image =
GetContextGroup()->image_factory()->CreateAnonymousImage(
- gfx::Size(width, height), buffer_format, buffer_usage, &is_cleared);
+ gfx::Size(width, height), buffer_format, buffer_usage,
+ gpu::kNullSurfaceHandle, &is_cleared);
if (!image || !image->BindTexImage(target)) {
InsertError(GL_INVALID_OPERATION, "Failed to create or bind GL Image");
return error::kNoError;
@@ -3734,7 +3754,7 @@ error::Error GLES2DecoderPassthroughImpl::DoSwapBuffers(uint64_t swap_id,
}
}
- DCHECK(emulated_front_buffer_->size == emulated_back_buffer_->size);
+ DCHECK_EQ(emulated_front_buffer_->size, emulated_back_buffer_->size);
if (emulated_default_framebuffer_format_.samples > 0) {
// Resolve the multisampled renderbuffer into the emulated_front_buffer_
@@ -3892,11 +3912,12 @@ error::Error GLES2DecoderPassthroughImpl::DoUnmapBuffer(GLenum target) {
return error::kNoError;
}
-error::Error GLES2DecoderPassthroughImpl::DoResizeCHROMIUM(GLuint width,
- GLuint height,
- GLfloat scale_factor,
- GLenum color_space,
- GLboolean alpha) {
+error::Error GLES2DecoderPassthroughImpl::DoResizeCHROMIUM(
+ GLuint width,
+ GLuint height,
+ GLfloat scale_factor,
+ gfx::ColorSpace color_space,
+ GLboolean alpha) {
// gfx::Size uses integers, make sure width and height do not overflow
static_assert(sizeof(GLuint) >= sizeof(int), "Unexpected GLuint size.");
static const GLuint kMaxDimension =
@@ -3910,31 +3931,7 @@ error::Error GLES2DecoderPassthroughImpl::DoResizeCHROMIUM(GLuint width,
return error::kLostContext;
}
} else {
- gl::GLSurface::ColorSpace surface_color_space =
- gl::GLSurface::ColorSpace::UNSPECIFIED;
- switch (color_space) {
- case GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::UNSPECIFIED;
- break;
- case GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::SCRGB_LINEAR;
- break;
- case GL_COLOR_SPACE_HDR10_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::HDR10;
- break;
- case GL_COLOR_SPACE_SRGB_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::SRGB;
- break;
- case GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM:
- surface_color_space = gl::GLSurface::ColorSpace::DISPLAY_P3;
- break;
- default:
- LOG(ERROR) << "GLES2DecoderPassthroughImpl: Context lost because "
- "specified color space was invalid.";
- return error::kLostContext;
- }
- if (!surface_->Resize(safe_size, scale_factor, surface_color_space,
- !!alpha)) {
+ if (!surface_->Resize(safe_size, scale_factor, color_space, !!alpha)) {
LOG(ERROR)
<< "GLES2DecoderPassthroughImpl: Context lost because resize failed.";
return error::kLostContext;
@@ -4410,6 +4407,7 @@ error::Error GLES2DecoderPassthroughImpl::DoCopyTextureCHROMIUM(
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
+ ScopedEnableTextureRectangleInShaderCompiler enable(this);
BindPendingImageForClientIDIfNeeded(source_id);
api()->glCopyTextureCHROMIUMFn(
GetTextureServiceID(api(), source_id, resources_, false), source_level,
@@ -4437,6 +4435,7 @@ error::Error GLES2DecoderPassthroughImpl::DoCopySubTextureCHROMIUM(
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
+ ScopedEnableTextureRectangleInShaderCompiler enable(this);
BindPendingImageForClientIDIfNeeded(source_id);
api()->glCopySubTextureCHROMIUMFn(
GetTextureServiceID(api(), source_id, resources_, false), source_level,
@@ -4686,6 +4685,11 @@ error::Error GLES2DecoderPassthroughImpl::DoDescheduleUntilFinishedCHROMIUM() {
error::Error GLES2DecoderPassthroughImpl::DoDrawBuffersEXT(
GLsizei count,
const volatile GLenum* bufs) {
+ if (!feature_info_->feature_flags().ext_draw_buffers &&
+ !feature_info_->gl_version_info().is_es3) {
+ return error::kUnknownCommand;
+ }
+
// Validate that count is non-negative before allocating a vector
if (count < 0) {
InsertError(GL_INVALID_VALUE, "count cannot be negative.");
@@ -5004,232 +5008,6 @@ error::Error GLES2DecoderPassthroughImpl::DoFlushDriverCachesCHROMIUM() {
return error::kNoError;
}
-error::Error GLES2DecoderPassthroughImpl::DoMatrixLoadfCHROMIUM(
- GLenum matrixMode,
- const volatile GLfloat* m) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoMatrixLoadIdentityCHROMIUM(
- GLenum matrixMode) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoGenPathsCHROMIUM(GLuint path,
- GLsizei range) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoDeletePathsCHROMIUM(GLuint path,
- GLsizei range) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoIsPathCHROMIUM(GLuint path,
- uint32_t* result) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoPathCommandsCHROMIUM(
- GLuint path,
- GLsizei numCommands,
- const GLubyte* commands,
- GLsizei numCoords,
- GLenum coordType,
- const GLvoid* coords,
- GLsizei coords_bufsize) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoPathParameterfCHROMIUM(
- GLuint path,
- GLenum pname,
- GLfloat value) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoPathParameteriCHROMIUM(
- GLuint path,
- GLenum pname,
- GLint value) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoPathStencilFuncCHROMIUM(
- GLenum func,
- GLint ref,
- GLuint mask) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoStencilFillPathCHROMIUM(
- GLuint path,
- GLenum fillMode,
- GLuint mask) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoStencilStrokePathCHROMIUM(
- GLuint path,
- GLint reference,
- GLuint mask) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoCoverFillPathCHROMIUM(
- GLuint path,
- GLenum coverMode) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoCoverStrokePathCHROMIUM(
- GLuint path,
- GLenum coverMode) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoStencilThenCoverFillPathCHROMIUM(
- GLuint path,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoStencilThenCoverStrokePathCHROMIUM(
- GLuint path,
- GLint reference,
- GLuint mask,
- GLenum coverMode) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoStencilFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLsizei pathsBufsize,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues,
- GLsizei transformValuesBufsize) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoStencilStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLsizei pathsBufsize,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum transformType,
- const GLfloat* transformValues,
- GLsizei transformValuesBufsize) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoCoverFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLsizei pathsBufsize,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues,
- GLsizei transformValuesBufsize) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoCoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLsizei pathsBufsize,
- GLuint pathBase,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues,
- GLsizei transformValuesBufsize) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::DoStencilThenCoverFillPathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLsizei pathsBufsize,
- GLuint pathBase,
- GLenum fillMode,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues,
- GLsizei transformValuesBufsize) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::DoStencilThenCoverStrokePathInstancedCHROMIUM(
- GLsizei numPaths,
- GLenum pathNameType,
- const GLvoid* paths,
- GLsizei pathsBufsize,
- GLuint pathBase,
- GLint reference,
- GLuint mask,
- GLenum coverMode,
- GLenum transformType,
- const GLfloat* transformValues,
- GLsizei transformValuesBufsize) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoBindFragmentInputLocationCHROMIUM(
- GLuint program,
- GLint location,
- const char* name) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::DoProgramPathFragmentInputGenCHROMIUM(
- GLuint program,
- GLint location,
- GLenum genMode,
- GLint components,
- const GLfloat* coeffs,
- GLsizei coeffsBufsize) {
- NOTIMPLEMENTED();
- return error::kNoError;
-}
-
error::Error GLES2DecoderPassthroughImpl::DoCoverageModulationCHROMIUM(
GLenum components) {
NOTIMPLEMENTED();
@@ -5645,7 +5423,7 @@ GLES2DecoderPassthroughImpl::DoBeginSharedImageAccessDirectCHROMIUM(
return error::kNoError;
}
- if (!found->second.BeginAccess(mode)) {
+ if (!found->second.BeginAccess(mode, api())) {
InsertError(GL_INVALID_OPERATION, "unable to begin access");
return error::kNoError;
}
@@ -5668,5 +5446,19 @@ error::Error GLES2DecoderPassthroughImpl::DoEndSharedImageAccessDirectCHROMIUM(
return error::kNoError;
}
+error::Error
+GLES2DecoderPassthroughImpl::DoBeginBatchReadAccessSharedImageCHROMIUM() {
+ DCHECK(group_->shared_image_manager());
+ group_->shared_image_manager()->BeginBatchReadAccess();
+ return error::kNoError;
+}
+
+error::Error
+GLES2DecoderPassthroughImpl::DoEndBatchReadAccessSharedImageCHROMIUM() {
+ DCHECK(group_->shared_image_manager());
+ group_->shared_image_manager()->EndBatchReadAccess();
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
index f9761ec8bc5..d681ee61fee 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
@@ -6,7 +6,6 @@
#include "gpu/command_buffer/common/discardable_handle.h"
#include "gpu/command_buffer/service/multi_draw_manager.h"
-#include "ui/gfx/ipc/color/gfx_param_traits.h"
namespace gpu {
namespace gles2 {
@@ -128,6 +127,9 @@ error::Error GLES2DecoderPassthroughImpl::HandleDrawArrays(
error::Error GLES2DecoderPassthroughImpl::HandleDrawArraysIndirect(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2ComputeContext()) {
+ return error::kUnknownCommand;
+ }
const volatile gles2::cmds::DrawArraysIndirect& c =
*static_cast<const volatile gles2::cmds::DrawArraysIndirect*>(cmd_data);
GLenum mode = static_cast<GLenum>(c.mode);
@@ -154,6 +156,9 @@ error::Error GLES2DecoderPassthroughImpl::HandleDrawElements(
error::Error GLES2DecoderPassthroughImpl::HandleDrawElementsIndirect(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2ComputeContext()) {
+ return error::kUnknownCommand;
+ }
const volatile gles2::cmds::DrawElementsIndirect& c =
*static_cast<const volatile gles2::cmds::DrawElementsIndirect*>(cmd_data);
GLenum mode = static_cast<GLenum>(c.mode);
@@ -1559,9 +1564,12 @@ error::Error GLES2DecoderPassthroughImpl::HandleResizeCHROMIUM(
GLuint width = static_cast<GLuint>(c.width);
GLuint height = static_cast<GLuint>(c.height);
GLfloat scale_factor = static_cast<GLfloat>(c.scale_factor);
- GLenum color_space = static_cast<GLenum>(c.color_space);
GLboolean has_alpha = static_cast<GLboolean>(c.alpha);
-
+ gfx::ColorSpace color_space;
+ if (!ReadColorSpace(c.shm_id, c.shm_offset, c.color_space_size,
+ &color_space)) {
+ return error::kOutOfBounds;
+ }
return DoResizeCHROMIUM(width, height, scale_factor, color_space, has_alpha);
}
@@ -2300,606 +2308,14 @@ error::Error GLES2DecoderPassthroughImpl::HandleSetColorSpaceMetadataCHROMIUM(
*static_cast<const volatile gles2::cmds::SetColorSpaceMetadataCHROMIUM*>(
cmd_data);
GLuint texture_id = c.texture_id;
- GLsizei color_space_size = c.color_space_size;
- const char* data = static_cast<const char*>(
- GetAddressAndCheckSize(c.shm_id, c.shm_offset, color_space_size));
- if (!data) {
- return error::kOutOfBounds;
- }
-
- // Make a copy to reduce the risk of a time of check to time of use attack.
- std::vector<char> color_space_data(data, data + color_space_size);
- base::Pickle color_space_pickle(color_space_data.data(), color_space_size);
- base::PickleIterator iterator(color_space_pickle);
gfx::ColorSpace color_space;
- if (!IPC::ParamTraits<gfx::ColorSpace>::Read(&color_space_pickle, &iterator,
- &color_space)) {
+ if (!ReadColorSpace(c.shm_id, c.shm_offset, c.color_space_size,
+ &color_space)) {
return error::kOutOfBounds;
}
-
return DoSetColorSpaceMetadataCHROMIUM(texture_id, color_space);
}
-error::Error GLES2DecoderPassthroughImpl::HandleGenPathsCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::GenPathsCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::GenPathsCHROMIUM*>(cmd_data);
- GLuint path = static_cast<GLuint>(c.first_client_id);
- GLsizei range = static_cast<GLsizei>(c.range);
-
- return DoGenPathsCHROMIUM(path, range);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandleDeletePathsCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::DeletePathsCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::DeletePathsCHROMIUM*>(cmd_data);
- GLuint path = static_cast<GLuint>(c.first_client_id);
- GLsizei range = static_cast<GLsizei>(c.range);
-
- return DoDeletePathsCHROMIUM(path, range);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandlePathCommandsCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::PathCommandsCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::PathCommandsCHROMIUM*>(cmd_data);
- GLuint path = static_cast<GLuint>(c.path);
- GLsizei num_commands = static_cast<GLsizei>(c.numCommands);
- GLsizei num_coords = static_cast<GLsizei>(c.numCoords);
- GLenum coord_type = static_cast<GLenum>(c.coordType);
- uint32_t commands_shm_id = c.commands_shm_id;
- uint32_t commands_shm_offset = c.commands_shm_offset;
- uint32_t coords_shm_id = c.coords_shm_id;
- uint32_t coords_shm_offset = c.coords_shm_offset;
-
- const GLubyte* commands = nullptr;
- if (num_commands > 0) {
- if (commands_shm_id != 0 || commands_shm_offset != 0) {
- commands = GetSharedMemoryAs<const GLubyte*>(
- commands_shm_id, commands_shm_offset, num_commands);
- }
- if (!commands) {
- return error::kOutOfBounds;
- }
- }
- const GLvoid* coords = nullptr;
- GLsizei coords_bufsize = 0;
- if (num_coords > 0) {
- if (coords_shm_id != 0 || coords_shm_offset != 0) {
- unsigned int memory_size = 0;
- coords = GetSharedMemoryAndSizeAs<const GLvoid*>(
- coords_shm_id, coords_shm_offset, 0, &memory_size);
- coords_bufsize = static_cast<GLsizei>(memory_size);
- }
-
- if (!coords) {
- return error::kOutOfBounds;
- }
- }
-
- return DoPathCommandsCHROMIUM(path, num_commands, commands, num_coords,
- coord_type, coords, coords_bufsize);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandlePathParameterfCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::PathParameterfCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::PathParameterfCHROMIUM*>(
- cmd_data);
- GLuint path = static_cast<GLuint>(c.path);
- GLenum pname = static_cast<GLenum>(c.pname);
- GLfloat value = static_cast<GLfloat>(c.value);
-
- return DoPathParameterfCHROMIUM(path, pname, value);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandlePathParameteriCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::PathParameteriCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::PathParameteriCHROMIUM*>(
- cmd_data);
- GLuint path = static_cast<GLuint>(c.path);
- GLenum pname = static_cast<GLenum>(c.pname);
- GLint value = static_cast<GLint>(c.value);
-
- return DoPathParameteriCHROMIUM(path, pname, value);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandleStencilFillPathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
- const volatile gles2::cmds::StencilFillPathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::StencilFillPathCHROMIUM*>(
- cmd_data);
- GLuint path = static_cast<GLuint>(c.path);
- GLenum fill_mode = static_cast<GLenum>(c.fillMode);
- GLuint mask = static_cast<GLuint>(c.mask);
-
- return DoStencilFillPathCHROMIUM(path, fill_mode, mask);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandleStencilStrokePathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::StencilStrokePathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::StencilStrokePathCHROMIUM*>(
- cmd_data);
- GLuint path = static_cast<GLuint>(c.path);
- GLint reference = static_cast<GLint>(c.reference);
- GLuint mask = static_cast<GLuint>(c.mask);
-
- return DoStencilStrokePathCHROMIUM(path, reference, mask);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandleCoverFillPathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
- const volatile gles2::cmds::CoverFillPathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::CoverFillPathCHROMIUM*>(
- cmd_data);
- GLuint path = static_cast<GLuint>(c.path);
- GLenum cover_mode = static_cast<GLenum>(c.coverMode);
-
- return DoCoverFillPathCHROMIUM(path, cover_mode);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandleCoverStrokePathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::CoverStrokePathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::CoverStrokePathCHROMIUM*>(
- cmd_data);
- GLuint path = static_cast<GLuint>(c.path);
- GLenum cover_mode = static_cast<GLenum>(c.coverMode);
-
- return DoCoverStrokePathCHROMIUM(path, cover_mode);
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::HandleStencilThenCoverFillPathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering)
- return error::kUnknownCommand;
- const volatile gles2::cmds::StencilThenCoverFillPathCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::StencilThenCoverFillPathCHROMIUM*>(
- cmd_data);
- GLuint path = static_cast<GLuint>(c.path);
- GLenum fill_mode = static_cast<GLenum>(c.fillMode);
- GLuint mask = static_cast<GLuint>(c.mask);
- GLenum cover_mode = static_cast<GLenum>(c.coverMode);
-
- return DoStencilThenCoverFillPathCHROMIUM(path, fill_mode, mask, cover_mode);
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::HandleStencilThenCoverStrokePathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::StencilThenCoverStrokePathCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::StencilThenCoverStrokePathCHROMIUM*>(
- cmd_data);
- GLuint path = static_cast<GLuint>(c.path);
- GLint reference = static_cast<GLint>(c.reference);
- GLuint mask = static_cast<GLuint>(c.mask);
- GLenum cover_mode = static_cast<GLenum>(c.coverMode);
-
- return DoStencilThenCoverStrokePathCHROMIUM(path, reference, mask,
- cover_mode);
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::HandleStencilFillPathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::StencilFillPathInstancedCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::StencilFillPathInstancedCHROMIUM*>(
- cmd_data);
- GLsizei num_paths = static_cast<GLsizei>(c.numPaths);
- GLenum path_name_type = static_cast<GLuint>(c.pathNameType);
- GLuint path_base = static_cast<GLuint>(c.pathBase);
- GLenum fill_mode = static_cast<GLenum>(c.fillMode);
- GLuint mask = static_cast<GLuint>(c.mask);
- GLenum transform_type = static_cast<GLuint>(c.transformType);
- uint32_t paths_shm_id = c.paths_shm_id;
- uint32_t paths_shm_offset = c.paths_shm_offset;
- uint32_t transform_values_shm_id = c.transformValues_shm_id;
- uint32_t transform_values_shm_offset = c.transformValues_shm_offset;
-
- const GLvoid* paths = nullptr;
- GLsizei paths_bufsize = 0;
- if (num_paths > 0) {
- if (paths_shm_id != 0 || paths_shm_offset != 0) {
- unsigned int memory_size = 0;
- paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, 0, &memory_size);
- paths_bufsize = static_cast<GLsizei>(memory_size);
- }
-
- if (!paths) {
- return error::kOutOfBounds;
- }
- }
- const GLfloat* transform_values = nullptr;
- GLsizei transform_values_bufsize = 0;
- if (transform_values_shm_id != 0 || transform_values_shm_offset != 0) {
- unsigned int memory_size = 0;
- transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- transform_values_shm_id, transform_values_shm_offset, 0, &memory_size);
- transform_values_bufsize = static_cast<GLsizei>(memory_size);
- }
- if (!transform_values) {
- return error::kOutOfBounds;
- }
-
- return DoStencilFillPathInstancedCHROMIUM(
- num_paths, path_name_type, paths, paths_bufsize, path_base, fill_mode,
- mask, transform_type, transform_values, transform_values_bufsize);
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::HandleStencilStrokePathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::StencilStrokePathInstancedCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::StencilStrokePathInstancedCHROMIUM*>(
- cmd_data);
- GLsizei num_paths = static_cast<GLsizei>(c.numPaths);
- GLenum path_name_type = static_cast<GLuint>(c.pathNameType);
- GLuint path_base = static_cast<GLuint>(c.pathBase);
- GLint reference = static_cast<GLint>(c.reference);
- GLuint mask = static_cast<GLuint>(c.mask);
- GLenum transform_type = static_cast<GLuint>(c.transformType);
- uint32_t paths_shm_id = c.paths_shm_id;
- uint32_t paths_shm_offset = c.paths_shm_offset;
- uint32_t transform_values_shm_id = c.transformValues_shm_id;
- uint32_t transform_values_shm_offset = c.transformValues_shm_offset;
-
- const GLvoid* paths = nullptr;
- GLsizei paths_bufsize = 0;
- if (num_paths > 0) {
- if (paths_shm_id != 0 || paths_shm_offset != 0) {
- unsigned int memory_size = 0;
- paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, 0, &memory_size);
- paths_bufsize = static_cast<GLsizei>(memory_size);
- }
-
- if (!paths) {
- return error::kOutOfBounds;
- }
- }
- const GLfloat* transform_values = nullptr;
- GLsizei transform_values_bufsize = 0;
- if (transform_values_shm_id != 0 || transform_values_shm_offset != 0) {
- unsigned int memory_size = 0;
- transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- transform_values_shm_id, transform_values_shm_offset, 0, &memory_size);
- transform_values_bufsize = static_cast<GLsizei>(memory_size);
- }
- if (!transform_values) {
- return error::kOutOfBounds;
- }
-
- return DoStencilStrokePathInstancedCHROMIUM(
- num_paths, path_name_type, paths, paths_bufsize, path_base, reference,
- mask, transform_type, transform_values, transform_values_bufsize);
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandleCoverFillPathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::CoverFillPathInstancedCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::CoverFillPathInstancedCHROMIUM*>(
- cmd_data);
- GLsizei num_paths = static_cast<GLsizei>(c.numPaths);
- GLenum path_name_type = static_cast<GLuint>(c.pathNameType);
- GLuint path_base = static_cast<GLuint>(c.pathBase);
- GLenum cover_mode = static_cast<GLenum>(c.coverMode);
- GLenum transform_type = static_cast<GLuint>(c.transformType);
- uint32_t paths_shm_id = c.paths_shm_id;
- uint32_t paths_shm_offset = c.paths_shm_offset;
- uint32_t transform_values_shm_id = c.transformValues_shm_id;
- uint32_t transform_values_shm_offset = c.transformValues_shm_offset;
-
- const GLvoid* paths = nullptr;
- GLsizei paths_bufsize = 0;
- if (num_paths > 0) {
- if (paths_shm_id != 0 || paths_shm_offset != 0) {
- unsigned int memory_size = 0;
- paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, 0, &memory_size);
- paths_bufsize = static_cast<GLsizei>(memory_size);
- }
-
- if (!paths) {
- return error::kOutOfBounds;
- }
- }
- const GLfloat* transform_values = nullptr;
- GLsizei transform_values_bufsize = 0;
- if (transform_values_shm_id != 0 || transform_values_shm_offset != 0) {
- unsigned int memory_size = 0;
- transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- transform_values_shm_id, transform_values_shm_offset, 0, &memory_size);
- transform_values_bufsize = static_cast<GLsizei>(memory_size);
- }
- if (!transform_values) {
- return error::kOutOfBounds;
- }
-
- return DoCoverFillPathInstancedCHROMIUM(
- num_paths, path_name_type, paths, paths_bufsize, path_base, cover_mode,
- transform_type, transform_values, transform_values_bufsize);
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::HandleCoverStrokePathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::CoverStrokePathInstancedCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::CoverStrokePathInstancedCHROMIUM*>(
- cmd_data);
- GLsizei num_paths = static_cast<GLsizei>(c.numPaths);
- GLenum path_name_type = static_cast<GLuint>(c.pathNameType);
- GLuint path_base = static_cast<GLuint>(c.pathBase);
- GLenum cover_mode = static_cast<GLenum>(c.coverMode);
- GLenum transform_type = static_cast<GLuint>(c.transformType);
- uint32_t paths_shm_id = c.paths_shm_id;
- uint32_t paths_shm_offset = c.paths_shm_offset;
- uint32_t transform_values_shm_id = c.transformValues_shm_id;
- uint32_t transform_values_shm_offset = c.transformValues_shm_offset;
-
- const GLvoid* paths = nullptr;
- GLsizei paths_bufsize = 0;
- if (num_paths > 0) {
- if (paths_shm_id != 0 || paths_shm_offset != 0) {
- unsigned int memory_size = 0;
- paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, 0, &memory_size);
- paths_bufsize = static_cast<GLsizei>(memory_size);
- }
-
- if (!paths) {
- return error::kOutOfBounds;
- }
- }
- const GLfloat* transform_values = nullptr;
- GLsizei transform_values_bufsize = 0;
- if (transform_values_shm_id != 0 || transform_values_shm_offset != 0) {
- unsigned int memory_size = 0;
- transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- transform_values_shm_id, transform_values_shm_offset, 0, &memory_size);
- transform_values_bufsize = static_cast<GLsizei>(memory_size);
- }
- if (!transform_values) {
- return error::kOutOfBounds;
- }
-
- return DoCoverStrokePathInstancedCHROMIUM(
- num_paths, path_name_type, paths, paths_bufsize, path_base, cover_mode,
- transform_type, transform_values, transform_values_bufsize);
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::HandleStencilThenCoverFillPathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::StencilThenCoverFillPathInstancedCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::
- StencilThenCoverFillPathInstancedCHROMIUM*>(cmd_data);
- GLsizei num_paths = static_cast<GLsizei>(c.numPaths);
- GLenum path_name_type = static_cast<GLuint>(c.pathNameType);
- GLuint path_base = static_cast<GLuint>(c.pathBase);
- GLenum cover_mode = static_cast<GLenum>(c.coverMode);
- GLenum fill_mode = static_cast<GLenum>(c.fillMode);
- GLuint mask = static_cast<GLuint>(c.mask);
- GLenum transform_type = static_cast<GLuint>(c.transformType);
- uint32_t paths_shm_id = c.paths_shm_id;
- uint32_t paths_shm_offset = c.paths_shm_offset;
- uint32_t transform_values_shm_id = c.transformValues_shm_id;
- uint32_t transform_values_shm_offset = c.transformValues_shm_offset;
-
- const GLvoid* paths = nullptr;
- GLsizei paths_bufsize = 0;
- if (num_paths > 0) {
- if (paths_shm_id != 0 || paths_shm_offset != 0) {
- unsigned int memory_size = 0;
- paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, 0, &memory_size);
- paths_bufsize = static_cast<GLsizei>(memory_size);
- }
-
- if (!paths) {
- return error::kOutOfBounds;
- }
- }
- const GLfloat* transform_values = nullptr;
- GLsizei transform_values_bufsize = 0;
- if (transform_values_shm_id != 0 || transform_values_shm_offset != 0) {
- unsigned int memory_size = 0;
- transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- transform_values_shm_id, transform_values_shm_offset, 0, &memory_size);
- transform_values_bufsize = static_cast<GLsizei>(memory_size);
- }
- if (!transform_values) {
- return error::kOutOfBounds;
- }
-
- return DoStencilThenCoverFillPathInstancedCHROMIUM(
- num_paths, path_name_type, paths, paths_bufsize, path_base, cover_mode,
- fill_mode, mask, transform_type, transform_values,
- transform_values_bufsize);
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::HandleStencilThenCoverStrokePathInstancedCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::StencilThenCoverStrokePathInstancedCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::
- StencilThenCoverStrokePathInstancedCHROMIUM*>(cmd_data);
- GLsizei num_paths = static_cast<GLsizei>(c.numPaths);
- GLenum path_name_type = static_cast<GLuint>(c.pathNameType);
- GLuint path_base = static_cast<GLuint>(c.pathBase);
- GLenum cover_mode = static_cast<GLenum>(c.coverMode);
- GLint reference = static_cast<GLint>(c.reference);
- GLuint mask = static_cast<GLuint>(c.mask);
- GLenum transform_type = static_cast<GLuint>(c.transformType);
- uint32_t paths_shm_id = c.paths_shm_id;
- uint32_t paths_shm_offset = c.paths_shm_offset;
- uint32_t transform_values_shm_id = c.transformValues_shm_id;
- uint32_t transform_values_shm_offset = c.transformValues_shm_offset;
-
- const GLvoid* paths = nullptr;
- GLsizei paths_bufsize = 0;
- if (num_paths > 0) {
- if (paths_shm_id != 0 || paths_shm_offset != 0) {
- unsigned int memory_size = 0;
- paths = GetSharedMemoryAndSizeAs<const GLvoid*>(
- paths_shm_id, paths_shm_offset, 0, &memory_size);
- paths_bufsize = static_cast<GLsizei>(memory_size);
- }
-
- if (!paths) {
- return error::kOutOfBounds;
- }
- }
- const GLfloat* transform_values = nullptr;
- GLsizei transform_values_bufsize = 0;
- if (transform_values_shm_id != 0 || transform_values_shm_offset != 0) {
- unsigned int memory_size = 0;
- transform_values = GetSharedMemoryAndSizeAs<const GLfloat*>(
- transform_values_shm_id, transform_values_shm_offset, 0, &memory_size);
- transform_values_bufsize = static_cast<GLsizei>(memory_size);
- }
- if (!transform_values) {
- return error::kOutOfBounds;
- }
-
- return DoStencilThenCoverStrokePathInstancedCHROMIUM(
- num_paths, path_name_type, paths, paths_bufsize, path_base, cover_mode,
- reference, mask, transform_type, transform_values,
- transform_values_bufsize);
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::HandleBindFragmentInputLocationCHROMIUMBucket(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::BindFragmentInputLocationCHROMIUMBucket& c =
- *static_cast<
- const volatile gles2::cmds::BindFragmentInputLocationCHROMIUMBucket*>(
- cmd_data);
- GLuint program = static_cast<GLuint>(c.program);
- GLint location = static_cast<GLint>(c.location);
- uint32_t name_bucket_id = c.name_bucket_id;
-
- Bucket* bucket = GetBucket(name_bucket_id);
- if (!bucket || bucket->size() == 0) {
- return error::kInvalidArguments;
- }
- std::string name_str;
- if (!bucket->GetAsString(&name_str)) {
- return error::kInvalidArguments;
- }
- return DoBindFragmentInputLocationCHROMIUM(program, location,
- name_str.c_str());
-}
-
-error::Error
-GLES2DecoderPassthroughImpl::HandleProgramPathFragmentInputGenCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
- const volatile gles2::cmds::ProgramPathFragmentInputGenCHROMIUM& c =
- *static_cast<
- const volatile gles2::cmds::ProgramPathFragmentInputGenCHROMIUM*>(
- cmd_data);
- GLint program = static_cast<GLint>(c.program);
- GLint location = static_cast<GLint>(c.location);
- GLenum gen_mode = static_cast<GLint>(c.genMode);
- GLint components = static_cast<GLint>(c.components);
- uint32_t coeffs_shm_id = c.coeffs_shm_id;
- uint32_t coeffs_shm_offset = c.coeffs_shm_offset;
-
- const GLfloat* coeffs = nullptr;
- GLsizei coeffs_bufsize = 0;
- if (coeffs_shm_id != 0 || coeffs_shm_offset != 0) {
- unsigned int memory_size = 0;
- coeffs = GetSharedMemoryAndSizeAs<const GLfloat*>(
- coeffs_shm_id, coeffs_shm_offset, 0, &memory_size);
- coeffs_bufsize = static_cast<GLsizei>(memory_size);
- }
- if (!coeffs) {
- return error::kOutOfBounds;
- }
- return DoProgramPathFragmentInputGenCHROMIUM(
- program, location, gen_mode, components, coeffs, coeffs_bufsize);
-}
-
error::Error
GLES2DecoderPassthroughImpl::HandleBindFragDataLocationIndexedEXTBucket(
uint32_t immediate_data_size,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index caac9cfa69f..7e7ca53b9c9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -4522,97 +4522,6 @@ error::Error GLES2DecoderPassthroughImpl::HandleScheduleDCLayerCHROMIUM(
return error::kNoError;
}
-error::Error GLES2DecoderPassthroughImpl::HandleMatrixLoadfCHROMIUMImmediate(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::MatrixLoadfCHROMIUMImmediate& c =
- *static_cast<const volatile gles2::cmds::MatrixLoadfCHROMIUMImmediate*>(
- cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLenum matrixMode = static_cast<GLenum>(c.matrixMode);
- uint32_t m_size;
- if (!GLES2Util::ComputeDataSize<GLfloat, 16>(1, &m_size)) {
- return error::kOutOfBounds;
- }
- if (m_size > immediate_data_size) {
- return error::kOutOfBounds;
- }
- volatile const GLfloat* m = GetImmediateDataAs<volatile const GLfloat*>(
- c, m_size, immediate_data_size);
- if (m == nullptr) {
- return error::kOutOfBounds;
- }
- error::Error error = DoMatrixLoadfCHROMIUM(matrixMode, m);
- if (error != error::kNoError) {
- return error;
- }
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandleMatrixLoadIdentityCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::MatrixLoadIdentityCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::MatrixLoadIdentityCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLenum matrixMode = static_cast<GLenum>(c.matrixMode);
- error::Error error = DoMatrixLoadIdentityCHROMIUM(matrixMode);
- if (error != error::kNoError) {
- return error;
- }
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandleIsPathCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::IsPathCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::IsPathCHROMIUM*>(cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLuint path = c.path;
- typedef cmds::IsPathCHROMIUM::Result Result;
- Result* result = GetSharedMemoryAs<Result*>(
- c.result_shm_id, c.result_shm_offset, sizeof(*result));
- if (!result) {
- return error::kOutOfBounds;
- }
- error::Error error = DoIsPathCHROMIUM(path, result);
- if (error != error::kNoError) {
- return error;
- }
- return error::kNoError;
-}
-
-error::Error GLES2DecoderPassthroughImpl::HandlePathStencilFuncCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::PathStencilFuncCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::PathStencilFuncCHROMIUM*>(
- cmd_data);
- if (!features().chromium_path_rendering) {
- return error::kUnknownCommand;
- }
-
- GLenum func = static_cast<GLenum>(c.func);
- GLint ref = static_cast<GLint>(c.ref);
- GLuint mask = static_cast<GLuint>(c.mask);
- error::Error error = DoPathStencilFuncCHROMIUM(func, ref, mask);
- if (error != error::kNoError) {
- return error;
- }
- return error::kNoError;
-}
-
error::Error GLES2DecoderPassthroughImpl::HandleContextVisibilityHintCHROMIUM(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -4957,5 +4866,27 @@ GLES2DecoderPassthroughImpl::HandleEndSharedImageAccessDirectCHROMIUM(
return error::kNoError;
}
+error::Error
+GLES2DecoderPassthroughImpl::HandleBeginBatchReadAccessSharedImageCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ error::Error error = DoBeginBatchReadAccessSharedImageCHROMIUM();
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error
+GLES2DecoderPassthroughImpl::HandleEndBatchReadAccessSharedImageCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ error::Error error = DoEndBatchReadAccessSharedImageCHROMIUM();
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
index 97d5900f914..f2fffe2c56d 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
@@ -4,101 +4,43 @@
#include <stdint.h>
+#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/test_shared_image_backing.h"
namespace gpu {
namespace gles2 {
namespace {
-static const uint32_t kNewServiceId = 431;
-
-class TestSharedImageBackingPassthrough : public SharedImageBacking {
- public:
- class TestSharedImageRepresentationPassthrough
- : public SharedImageRepresentationGLTexturePassthrough {
- public:
- TestSharedImageRepresentationPassthrough(
- SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- scoped_refptr<TexturePassthrough>& texture_passthrough)
- : SharedImageRepresentationGLTexturePassthrough(manager,
- backing,
- tracker),
- texture_passthrough_(texture_passthrough) {}
-
- const scoped_refptr<TexturePassthrough>& GetTexturePassthrough() override {
- return texture_passthrough_;
- }
-
- void set_can_access(bool can_access) { can_access_ = can_access; }
- bool BeginAccess(GLenum mode) override { return can_access_; }
-
- private:
- const scoped_refptr<TexturePassthrough>& texture_passthrough_;
- bool can_access_ = true;
- };
-
- TestSharedImageBackingPassthrough(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- GLuint texture_id)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- 0 /* estimated_size */,
- false /* is_thread_safe */) {
- texture_passthrough_ =
- base::MakeRefCounted<TexturePassthrough>(texture_id, GL_TEXTURE_2D);
- }
-
- bool IsCleared() const override { return false; }
-
- void SetCleared() override {}
-
- void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
- DCHECK(!in_fence);
- }
-
- bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
- return false;
- }
-
- void Destroy() override { texture_passthrough_.reset(); }
-
- void OnMemoryDump(const std::string& dump_name,
- base::trace_event::MemoryAllocatorDump* dump,
- base::trace_event::ProcessMemoryDump* pmd,
- uint64_t client_tracing_id) override {}
-
- protected:
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- ProduceGLTexturePassthrough(SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- return std::make_unique<TestSharedImageRepresentationPassthrough>(
- manager, this, tracker, texture_passthrough_);
- }
-
- private:
- scoped_refptr<TexturePassthrough> texture_passthrough_;
-};
+std::unique_ptr<TestSharedImageBacking> AllocateTextureAndCreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ GLuint service_id;
+ glGenTextures(1, &service_id);
+ glBindTexture(GL_TEXTURE_2D, service_id);
+ glTexImage2D(GL_TEXTURE_2D, 0, GLInternalFormat(format), size.width(),
+ size.height(), 0, GLDataFormat(format), GLDataType(format),
+ nullptr /* data */);
+ return std::make_unique<TestSharedImageBacking>(
+ mailbox, format, size, color_space, usage, 0 /* estimated_size */,
+ service_id);
+}
} // namespace
TEST_F(GLES2DecoderPassthroughTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
MemoryTypeTracker memory_tracker(nullptr);
Mailbox mailbox = Mailbox::GenerateForSharedImage();
+ auto backing = AllocateTextureAndCreateSharedImage(
+ mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
+ gfx::ColorSpace(), 0);
+ GLuint service_id = backing->service_id();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
- GetSharedImageManager()->Register(
- std::make_unique<TestSharedImageBackingPassthrough>(
- mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
- gfx::ColorSpace(), 0, kNewServiceId),
- &memory_tracker);
+ GetSharedImageManager()->Register(std::move(backing), &memory_tracker);
auto& cmd = *GetImmediateAs<
cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate>();
@@ -110,11 +52,11 @@ TEST_F(GLES2DecoderPassthroughTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
uint32_t found_service_id = 0;
EXPECT_TRUE(GetPassthroughResources()->texture_id_map.GetServiceID(
kNewClientId, &found_service_id));
- EXPECT_EQ(found_service_id, kNewServiceId);
+ EXPECT_EQ(found_service_id, service_id);
scoped_refptr<TexturePassthrough> found_texture_passthrough;
EXPECT_TRUE(GetPassthroughResources()->texture_object_map.GetServiceID(
kNewClientId, &found_texture_passthrough));
- EXPECT_EQ(found_texture_passthrough->service_id(), kNewServiceId);
+ EXPECT_EQ(found_texture_passthrough->service_id(), service_id);
found_texture_passthrough.reset();
EXPECT_EQ(1u, GetPassthroughResources()->texture_shared_image_map.count(
kNewClientId));
@@ -158,9 +100,9 @@ TEST_F(GLES2DecoderPassthroughTest,
Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
- std::make_unique<TestSharedImageBackingPassthrough>(
+ AllocateTextureAndCreateSharedImage(
mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
- gfx::ColorSpace(), 0, kNewServiceId),
+ gfx::ColorSpace(), 0),
&memory_tracker);
{
@@ -193,9 +135,9 @@ TEST_F(GLES2DecoderPassthroughTest, BeginEndSharedImageAccessCRHOMIUM) {
Mailbox mailbox = Mailbox::GenerateForSharedImage();
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
GetSharedImageManager()->Register(
- std::make_unique<TestSharedImageBackingPassthrough>(
+ AllocateTextureAndCreateSharedImage(
mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
- gfx::ColorSpace(), 0, kNewServiceId),
+ gfx::ColorSpace(), 0),
&memory_tracker);
shared_images.emplace_back(std::move(shared_image));
@@ -224,17 +166,10 @@ TEST_F(GLES2DecoderPassthroughTest, BeginEndSharedImageAccessCRHOMIUM) {
EXPECT_EQ(GL_NO_ERROR, GetGLError());
cmds::EndSharedImageAccessDirectCHROMIUM readwrite_end_cmd;
readwrite_end_cmd.Init(client_id);
- // EXPECT_EQ(error::kNoError, ExecuteCmd(readwrite_end_cmd));
- // EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
-
- for (int i = 20; i > 10; --i) {
- cmds::EndSharedImageAccessDirectCHROMIUM readwrite_end_cmd;
- readwrite_end_cmd.Init(kNewClientId + i);
EXPECT_EQ(error::kNoError, ExecuteCmd(readwrite_end_cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
- DoDeleteTexture(kNewClientId + i);
- fprintf(stderr, "EEEE DoDeleteTexture() i=%d\n", i);
+
+ DoDeleteTexture(client_id);
}
// Cleanup
@@ -265,12 +200,14 @@ TEST_F(GLES2DecoderPassthroughTest,
// Create a shared image.
MemoryTypeTracker memory_tracker(nullptr);
Mailbox mailbox = Mailbox::GenerateForSharedImage();
+ auto shared_image_backing = AllocateTextureAndCreateSharedImage(
+ mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
+ gfx::ColorSpace(), 0);
+ // Set the shared image to fail BeginAccess.
+ shared_image_backing->set_can_access(false);
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
- GetSharedImageManager()->Register(
- std::make_unique<TestSharedImageBackingPassthrough>(
- mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
- gfx::ColorSpace(), 0, kNewServiceId),
- &memory_tracker);
+ GetSharedImageManager()->Register(std::move(shared_image_backing),
+ &memory_tracker);
auto& cmd = *GetImmediateAs<
cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate>();
@@ -280,14 +217,6 @@ TEST_F(GLES2DecoderPassthroughTest,
// Try to begin access with a shared image representation that fails
// BeginAccess.
- auto found =
- GetPassthroughResources()->texture_shared_image_map.find(kNewClientId);
- ASSERT_TRUE(found !=
- GetPassthroughResources()->texture_shared_image_map.end());
- static_cast<TestSharedImageBackingPassthrough::
- TestSharedImageRepresentationPassthrough*>(
- found->second.representation())
- ->set_can_access(false);
cmds::BeginSharedImageAccessDirectCHROMIUM read_access_cmd;
read_access_cmd.Init(kNewClientId, GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
EXPECT_EQ(error::kNoError, ExecuteCmd(read_access_cmd));
@@ -307,5 +236,84 @@ TEST_F(GLES2DecoderPassthroughTest,
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
+TEST_F(GLES2DecoderPassthroughTest,
+ BeginSharedImageAccessDirectCHROMIUMClearUncleared) {
+ // Create an uncleared shared image.
+ MemoryTypeTracker memory_tracker(nullptr);
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
+ std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
+ GetSharedImageManager()->Register(
+ AllocateTextureAndCreateSharedImage(
+ mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
+ gfx::ColorSpace(), 0),
+ &memory_tracker);
+
+ auto& cmd = *GetImmediateAs<
+ cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate>();
+ cmd.Init(kNewClientId, GL_NONE, mailbox.name);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailbox.name)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ // Backing should be initially uncleared.
+ EXPECT_FALSE(shared_image->IsCleared());
+
+ // Set various pieces of state to ensure the texture clear correctly restores
+ // them.
+ GLboolean color_mask[4] = {true, false, false, true};
+ glColorMask(color_mask[0], color_mask[1], color_mask[2], color_mask[3]);
+ GLfloat clear_color[4] = {0.5f, 0.7f, 0.3f, 0.8f};
+ glClearColor(clear_color[0], clear_color[1], clear_color[2], clear_color[3]);
+ GLuint dummy_fbo;
+ glGenFramebuffersEXT(1, &dummy_fbo);
+ glBindFramebufferEXT(GL_FRAMEBUFFER, dummy_fbo);
+ GLuint dummy_texture;
+ glGenTextures(1, &dummy_texture);
+ glBindTexture(GL_TEXTURE_2D, dummy_texture);
+ glEnable(GL_SCISSOR_TEST);
+
+ // Begin access. We should clear the backing.
+ {
+ cmds::BeginSharedImageAccessDirectCHROMIUM read_access_cmd;
+ read_access_cmd.Init(kNewClientId,
+ GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(read_access_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(shared_image->IsCleared());
+ }
+
+ // Our state should not be modified.
+ GLboolean test_color_mask[4];
+ glGetBooleanv(GL_COLOR_WRITEMASK, test_color_mask);
+ EXPECT_TRUE(0 ==
+ memcmp(test_color_mask, color_mask, sizeof(test_color_mask)));
+ GLfloat test_clear_color[4];
+ glGetFloatv(GL_COLOR_CLEAR_VALUE, test_clear_color);
+ EXPECT_TRUE(0 ==
+ memcmp(test_clear_color, clear_color, sizeof(test_clear_color)));
+ GLint test_fbo;
+ glGetIntegerv(GL_FRAMEBUFFER_BINDING, &test_fbo);
+ EXPECT_EQ(test_fbo, static_cast<GLint>(dummy_fbo));
+ GLint test_texture;
+ glGetIntegerv(GL_TEXTURE_BINDING_2D, &test_texture);
+ EXPECT_EQ(test_texture, static_cast<GLint>(dummy_texture));
+ GLboolean test_scissor;
+ glGetBooleanv(GL_SCISSOR_TEST, &test_scissor);
+ EXPECT_TRUE(test_scissor);
+
+ // End access.
+ {
+ cmds::EndSharedImageAccessDirectCHROMIUM end_access_cmd;
+ end_access_cmd.Init(kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(end_access_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ // Cleanup
+ glDeleteFramebuffersEXT(1, &dummy_fbo);
+ glDeleteTextures(1, &dummy_texture);
+ DoDeleteTexture(kNewClientId);
+ shared_image.reset();
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index 3e4071b35d8..e93c320c478 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -1321,7 +1321,8 @@ class SizeOnlyMemoryTracker : public MemoryTracker {
}
~SizeOnlyMemoryTracker() override = default;
- void TrackMemoryAllocatedChange(uint64_t delta) override {
+ void TrackMemoryAllocatedChange(int64_t delta) override {
+ DCHECK(delta >= 0 || pool_info_.size >= static_cast<uint64_t>(-delta));
pool_info_.size += delta;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
index 45fae069c90..44fb80e0c4a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -132,7 +132,8 @@ GLES2DecoderTestBase::GLES2DecoderTestBase()
cached_stencil_front_mask_(static_cast<GLuint>(-1)),
cached_stencil_back_mask_(static_cast<GLuint>(-1)),
shader_language_version_(100),
- shader_translator_cache_(gpu_preferences_) {
+ shader_translator_cache_(gpu_preferences_),
+ discardable_manager_(gpu_preferences_) {
memset(immediate_buffer_, 0xEE, sizeof(immediate_buffer_));
}
@@ -517,7 +518,8 @@ ContextResult GLES2DecoderTestBase::MaybeInitDecoderWithWorkarounds(
gpu::ContextResult result = decoder_->Initialize(
surface_, context_, false, DisallowedFeatures(), attribs);
if (result != gpu::ContextResult::kSuccess) {
- decoder_->Destroy(false /* have_context */);
+ // GLES2CmdDecoder::Destroy should be handled by Initialize in all failure
+ // cases.
decoder_.reset();
group_->Destroy(mock_decoder_.get(), false);
return result;
@@ -2394,7 +2396,9 @@ GpuPreferences GenerateGpuPreferencesForPassthroughTests() {
GLES2DecoderPassthroughTestBase::GLES2DecoderPassthroughTestBase(
ContextType context_type)
: gpu_preferences_(GenerateGpuPreferencesForPassthroughTests()),
- shader_translator_cache_(gpu_preferences_) {
+ shader_translator_cache_(gpu_preferences_),
+ discardable_manager_(gpu_preferences_),
+ passthrough_discardable_manager_(gpu_preferences_) {
context_creation_attribs_.context_type = context_type;
}
@@ -2430,7 +2434,7 @@ void GLES2DecoderPassthroughTestBase::SetUp() {
gl::init::InitializeStaticGLBindingsImplementation(
gl::kGLImplementationEGLANGLE, false);
- gl::init::InitializeGLOneOffPlatformImplementation(false, false, false, true);
+ gl::init::InitializeGLOneOffPlatformImplementation(false, false, true);
scoped_refptr<gles2::FeatureInfo> feature_info = new gles2::FeatureInfo();
group_ = new gles2::ContextGroup(
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
index 6477d349b71..978a837f568 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
@@ -34,453 +34,6 @@ INSTANTIATE_TEST_SUITE_P(Service,
GLES2DecoderTestDisabledExtensions,
::testing::Bool());
-TEST_P(GLES2DecoderTestDisabledExtensions, CHROMIUMPathRenderingDisabled) {
- const GLuint kClientPathId = 0;
- {
- cmds::MatrixLoadfCHROMIUMImmediate& cmd =
- *GetImmediateAs<cmds::MatrixLoadfCHROMIUMImmediate>();
- GLfloat temp[16] = {
- 0,
- };
- cmd.Init(GL_PATH_MODELVIEW_CHROMIUM, temp);
- EXPECT_EQ(error::kUnknownCommand, ExecuteImmediateCmd(cmd, sizeof(temp)));
- }
- {
- cmds::MatrixLoadIdentityCHROMIUM cmd;
- cmd.Init(GL_PATH_PROJECTION_CHROMIUM);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::GenPathsCHROMIUM cmd;
- cmd.Init(0, 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::DeletePathsCHROMIUM cmd;
- cmd.Init(0, 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::IsPathCHROMIUM cmd;
- cmd.Init(kClientPathId, shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::PathCommandsCHROMIUM cmd;
- cmd.Init(kClientPathId, 0, 0, 0, 0, GL_FLOAT, 0, 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::PathParameterfCHROMIUM cmd;
- cmd.Init(kClientPathId, GL_PATH_STROKE_WIDTH_CHROMIUM, 1.0f);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::PathParameteriCHROMIUM cmd;
- cmd.Init(kClientPathId, GL_PATH_STROKE_WIDTH_CHROMIUM, 1);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::PathStencilFuncCHROMIUM cmd;
- cmd.Init(GL_NEVER, 2, 3);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::StencilFillPathCHROMIUM cmd;
- cmd.Init(kClientPathId, GL_COUNT_UP_CHROMIUM, 1);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::StencilStrokePathCHROMIUM cmd;
- cmd.Init(kClientPathId, 1, 2);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::CoverFillPathCHROMIUM cmd;
- cmd.Init(kClientPathId, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::CoverStrokePathCHROMIUM cmd;
- cmd.Init(kClientPathId, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::StencilThenCoverFillPathCHROMIUM cmd;
- cmd.Init(kClientPathId, GL_COUNT_UP_CHROMIUM, 1, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::StencilThenCoverStrokePathCHROMIUM cmd;
- cmd.Init(kClientPathId, 1, 2, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::StencilFillPathInstancedCHROMIUM cmd;
- GLuint* paths = GetSharedMemoryAs<GLuint*>();
- paths[0] = kClientPathId;
- cmd.Init(1, GL_UNSIGNED_INT, shared_memory_id_, shared_memory_offset_, 0,
- GL_COUNT_UP_CHROMIUM, 1, GL_NONE, 0, 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::StencilStrokePathInstancedCHROMIUM cmd;
- GLuint* paths = GetSharedMemoryAs<GLuint*>();
- paths[0] = kClientPathId;
- cmd.Init(1, GL_UNSIGNED_INT, shared_memory_id_, shared_memory_offset_, 0,
- 0x80, 0x80, GL_NONE, 0, 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::CoverFillPathInstancedCHROMIUM cmd;
- GLuint* paths = GetSharedMemoryAs<GLuint*>();
- paths[0] = kClientPathId;
- cmd.Init(1, GL_UNSIGNED_INT, shared_memory_id_, shared_memory_offset_, 0,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM, GL_NONE, 0, 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::CoverStrokePathInstancedCHROMIUM cmd;
- GLuint* paths = GetSharedMemoryAs<GLuint*>();
- paths[0] = kClientPathId;
- cmd.Init(1, GL_UNSIGNED_INT, shared_memory_id_, shared_memory_offset_, 0,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM, GL_NONE, 0, 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::StencilThenCoverFillPathInstancedCHROMIUM cmd;
- GLuint* paths = GetSharedMemoryAs<GLuint*>();
- paths[0] = kClientPathId;
- cmd.Init(1, GL_UNSIGNED_INT, shared_memory_id_, shared_memory_offset_, 0,
- GL_COUNT_UP_CHROMIUM, 1,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM, GL_NONE, 0, 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::StencilThenCoverStrokePathInstancedCHROMIUM cmd;
- GLuint* paths = GetSharedMemoryAs<GLuint*>();
- paths[0] = kClientPathId;
- cmd.Init(1, GL_UNSIGNED_INT, shared_memory_id_, shared_memory_offset_, 0,
- 0x80, 0x80, GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM, GL_NONE, 0,
- 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::BindFragmentInputLocationCHROMIUMBucket cmd;
- const uint32_t kBucketId = 123;
- const GLint kLocation = 2;
- const char* kName = "testing";
- SetBucketAsCString(kBucketId, kName);
- cmd.Init(client_program_id_, kLocation, kBucketId);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
- {
- cmds::ProgramPathFragmentInputGenCHROMIUM cmd;
- const GLint kLocation = 2;
- cmd.Init(client_program_id_, kLocation, 0, GL_NONE, 0, 0);
- EXPECT_EQ(error::kUnknownCommand, ExecuteCmd(cmd));
- }
-}
-
-class GLES2DecoderTestWithCHROMIUMPathRendering : public GLES2DecoderTest {
- public:
- GLES2DecoderTestWithCHROMIUMPathRendering() : client_path_id_(125) {}
-
- void SetUp() override {
- InitState init;
- init.gl_version = "OpenGL ES 3.1";
- init.has_alpha = true;
- init.has_depth = true;
- init.request_alpha = true;
- init.request_depth = true;
- init.bind_generates_resource = true;
- init.extensions = "GL_NV_path_rendering GL_NV_framebuffer_mixed_samples";
- InitDecoder(init);
-
- EXPECT_CALL(*gl_, GenPathsNV(1))
- .WillOnce(Return(kServicePathId))
- .RetiresOnSaturation();
- cmds::GenPathsCHROMIUM cmd;
- cmd.Init(client_path_id_, 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
-
- // The tests use client_path_id_ to test all sorts of drawing. The NVPR API
- // behaves differently with a path name that is "used" but not which does
- // not "allocate path object state" and a path name that is a name of a real
- // path object. The drawing with former causes GL error while latter works
- // ok, even if there is nothing in the actual path object. To remain
- // compatible with the API, we allocate path object state even when using
- // the mock API.
- EXPECT_CALL(
- *gl_, PathCommandsNV(kServicePathId, 0, nullptr, 0, GL_FLOAT, nullptr))
- .RetiresOnSaturation();
- cmds::PathCommandsCHROMIUM pcmd;
- pcmd.Init(client_path_id_, 0, 0, 0, 0, GL_FLOAT, 0, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(pcmd));
- }
-
- protected:
- template <typename TypeParam>
- void TestPathCommandsCHROMIUMCoordTypes();
-
- struct InstancedTestcase {
- GLsizei num_paths;
- GLenum path_name_type;
- const void* paths;
- GLuint path_base;
- GLenum fill_mode;
- GLuint reference;
- GLuint mask;
- GLenum transform_type;
- const GLfloat* transform_values;
- size_t sizeof_paths; // Used for copying to shm buffer.
- size_t sizeof_transform_values; // Used for copying to shm buffer.
- error::Error expected_error;
- GLint expected_gl_error;
- bool expect_gl_call;
- };
-
- void CallAllInstancedCommands(const InstancedTestcase& testcase) {
- // Note: for testcases that expect a call, We do not compare the 'paths'
- // array during EXPECT_CALL due to it being void*. Instead, we rely on the
- // fact that if the path base was not added correctly, the paths wouldn't
- // exists and the call wouldn't come through.
-
- bool copy_paths = false; // Paths are copied for each call that has paths,
- // since the implementation modifies the memory
- // area.
- void* paths = nullptr;
- uint32_t paths_shm_id = 0;
- uint32_t paths_shm_offset = 0;
- GLfloat* transforms = nullptr;
- uint32_t transforms_shm_id = 0;
- uint32_t transforms_shm_offset = 0;
-
- if (testcase.transform_values) {
- transforms = GetSharedMemoryAs<GLfloat*>();
- transforms_shm_id = shared_memory_id_;
- transforms_shm_offset = shared_memory_offset_;
- memcpy(transforms, testcase.transform_values,
- testcase.sizeof_transform_values);
- } else {
- DCHECK(testcase.sizeof_transform_values == 0);
- }
- if (testcase.paths) {
- paths =
- GetSharedMemoryAsWithOffset<void*>(testcase.sizeof_transform_values);
- paths_shm_id = shared_memory_id_;
- paths_shm_offset =
- shared_memory_offset_ + testcase.sizeof_transform_values;
- copy_paths = true;
- } else {
- DCHECK(testcase.sizeof_paths == 0);
- }
-
- if (testcase.expect_gl_call) {
- EXPECT_CALL(*gl_, StencilFillPathInstancedNV(
- testcase.num_paths, GL_UNSIGNED_INT, _, 0,
- testcase.fill_mode, testcase.mask,
- testcase.transform_type, transforms))
- .RetiresOnSaturation();
- }
- if (copy_paths) {
- memcpy(paths, testcase.paths, testcase.sizeof_paths);
- }
- {
- cmds::StencilFillPathInstancedCHROMIUM sfi_cmd;
- sfi_cmd.Init(testcase.num_paths, testcase.path_name_type, paths_shm_id,
- paths_shm_offset, testcase.path_base, testcase.fill_mode,
- testcase.mask, testcase.transform_type, transforms_shm_id,
- transforms_shm_offset);
- EXPECT_EQ(testcase.expected_error, ExecuteCmd(sfi_cmd));
- EXPECT_EQ(testcase.expected_gl_error, GetGLError());
- }
-
- if (testcase.expect_gl_call) {
- EXPECT_CALL(*gl_, StencilStrokePathInstancedNV(
- testcase.num_paths, GL_UNSIGNED_INT, _, 0,
- testcase.reference, testcase.mask,
- testcase.transform_type, transforms))
- .RetiresOnSaturation();
- }
- if (copy_paths) {
- memcpy(paths, testcase.paths, testcase.sizeof_paths);
- }
- {
- cmds::StencilStrokePathInstancedCHROMIUM ssi_cmd;
- ssi_cmd.Init(testcase.num_paths, testcase.path_name_type, paths_shm_id,
- paths_shm_offset, testcase.path_base, testcase.reference,
- testcase.mask, testcase.transform_type, transforms_shm_id,
- transforms_shm_offset);
- EXPECT_EQ(testcase.expected_error, ExecuteCmd(ssi_cmd));
- EXPECT_EQ(testcase.expected_gl_error, GetGLError());
- }
-
- if (testcase.expect_gl_call) {
- EXPECT_CALL(*gl_, CoverFillPathInstancedNV(
- testcase.num_paths, GL_UNSIGNED_INT, _, 0,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV,
- testcase.transform_type, transforms))
- .RetiresOnSaturation();
- }
- if (copy_paths) {
- memcpy(paths, testcase.paths, testcase.sizeof_paths);
- }
- {
- cmds::CoverFillPathInstancedCHROMIUM cfi_cmd;
- cfi_cmd.Init(testcase.num_paths, testcase.path_name_type, paths_shm_id,
- paths_shm_offset, testcase.path_base,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM,
- testcase.transform_type, transforms_shm_id,
- transforms_shm_offset);
- EXPECT_EQ(testcase.expected_error, ExecuteCmd(cfi_cmd));
- EXPECT_EQ(testcase.expected_gl_error, GetGLError());
- }
- if (testcase.expect_gl_call) {
- EXPECT_CALL(*gl_, CoverStrokePathInstancedNV(
- testcase.num_paths, GL_UNSIGNED_INT, _, 0,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV,
- testcase.transform_type, transforms))
- .RetiresOnSaturation();
- }
- if (copy_paths) {
- memcpy(paths, testcase.paths, testcase.sizeof_paths);
- }
-
- {
- cmds::CoverStrokePathInstancedCHROMIUM csi_cmd;
- csi_cmd.Init(testcase.num_paths, testcase.path_name_type, paths_shm_id,
- paths_shm_offset, testcase.path_base,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM,
- testcase.transform_type, transforms_shm_id,
- transforms_shm_offset);
- EXPECT_EQ(testcase.expected_error, ExecuteCmd(csi_cmd));
- EXPECT_EQ(testcase.expected_gl_error, GetGLError());
- }
-
- if (testcase.expect_gl_call) {
- EXPECT_CALL(*gl_, StencilThenCoverFillPathInstancedNV(
- testcase.num_paths, GL_UNSIGNED_INT, _, 0,
- testcase.fill_mode, testcase.mask,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV,
- testcase.transform_type, transforms))
- .RetiresOnSaturation();
- }
- if (copy_paths) {
- memcpy(paths, testcase.paths, testcase.sizeof_paths);
- }
- {
- cmds::StencilThenCoverFillPathInstancedCHROMIUM stcfi_cmd;
- stcfi_cmd.Init(testcase.num_paths, testcase.path_name_type, paths_shm_id,
- paths_shm_offset, testcase.path_base, testcase.fill_mode,
- testcase.mask, GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM,
- testcase.transform_type, transforms_shm_id,
- transforms_shm_offset);
- EXPECT_EQ(testcase.expected_error, ExecuteCmd(stcfi_cmd));
- EXPECT_EQ(testcase.expected_gl_error, GetGLError());
- }
-
- if (testcase.expect_gl_call) {
- EXPECT_CALL(*gl_, StencilThenCoverStrokePathInstancedNV(
- testcase.num_paths, GL_UNSIGNED_INT, _, 0,
- testcase.reference, testcase.mask,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV,
- testcase.transform_type, transforms))
- .RetiresOnSaturation();
- }
- if (copy_paths) {
- memcpy(paths, testcase.paths, testcase.sizeof_paths);
- }
- {
- cmds::StencilThenCoverStrokePathInstancedCHROMIUM stcsi_cmd;
- stcsi_cmd.Init(testcase.num_paths, testcase.path_name_type, paths_shm_id,
- paths_shm_offset, testcase.path_base, testcase.reference,
- testcase.mask, GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM,
- testcase.transform_type, transforms_shm_id,
- transforms_shm_offset);
- EXPECT_EQ(testcase.expected_error, ExecuteCmd(stcsi_cmd));
- EXPECT_EQ(testcase.expected_gl_error, GetGLError());
- }
- }
-
- void CallAllInstancedCommandsWithInvalidSHM(GLsizei num_paths,
- const GLuint* paths,
- GLuint* paths_shm,
- uint32_t paths_shm_id,
- uint32_t paths_shm_offset,
- uint32_t transforms_shm_id,
- uint32_t transforms_shm_offset) {
- const GLuint kPathBase = 0;
- const GLenum kFillMode = GL_INVERT;
- const GLuint kMask = 0x80;
- const GLuint kReference = 0xFF;
- const GLuint kTransformType = GL_AFFINE_3D_CHROMIUM;
- memcpy(paths_shm, paths, sizeof(GLuint) * num_paths);
- {
- cmds::StencilFillPathInstancedCHROMIUM sfi_cmd;
- sfi_cmd.Init(num_paths, GL_UNSIGNED_INT, paths_shm_id, paths_shm_offset,
- kPathBase, kFillMode, kMask, kTransformType,
- transforms_shm_id, transforms_shm_offset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(sfi_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
- memcpy(paths_shm, paths, sizeof(GLuint) * num_paths);
- {
- cmds::StencilStrokePathInstancedCHROMIUM ssi_cmd;
- ssi_cmd.Init(num_paths, GL_UNSIGNED_INT, paths_shm_id, paths_shm_offset,
- kPathBase, kReference, kMask, kTransformType,
- transforms_shm_id, transforms_shm_offset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(ssi_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
- memcpy(paths_shm, paths, sizeof(GLuint) * num_paths);
- {
- cmds::CoverFillPathInstancedCHROMIUM cfi_cmd;
- cfi_cmd.Init(num_paths, GL_UNSIGNED_INT, paths_shm_id, paths_shm_offset,
- kPathBase, GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM,
- kTransformType, transforms_shm_id, transforms_shm_offset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cfi_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
- memcpy(paths_shm, paths, sizeof(GLuint) * num_paths);
- {
- cmds::CoverStrokePathInstancedCHROMIUM csi_cmd;
- csi_cmd.Init(num_paths, GL_UNSIGNED_INT, paths_shm_id, paths_shm_offset,
- kPathBase, GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM,
- kTransformType, transforms_shm_id, transforms_shm_offset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(csi_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
- memcpy(paths_shm, paths, sizeof(GLuint) * num_paths);
- {
- cmds::StencilThenCoverFillPathInstancedCHROMIUM stcfi_cmd;
- stcfi_cmd.Init(num_paths, GL_UNSIGNED_INT, paths_shm_id, paths_shm_offset,
- kPathBase, kFillMode, kMask,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM, kTransformType,
- transforms_shm_id, transforms_shm_offset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(stcfi_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
- memcpy(paths_shm, paths, sizeof(GLuint) * num_paths);
- {
- cmds::StencilThenCoverStrokePathInstancedCHROMIUM stcsi_cmd;
- stcsi_cmd.Init(num_paths, GL_UNSIGNED_INT, paths_shm_id, paths_shm_offset,
- kPathBase, kReference, kMask,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM, kTransformType,
- transforms_shm_id, transforms_shm_offset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(stcsi_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
- }
-
- GLuint client_path_id_;
- static const GLuint kServicePathId = 311;
-};
-
-INSTANTIATE_TEST_SUITE_P(Service,
- GLES2DecoderTestWithCHROMIUMPathRendering,
- ::testing::Bool());
-
class GLES2DecoderTestWithBlendEquationAdvanced : public GLES2DecoderTest {
public:
GLES2DecoderTestWithBlendEquationAdvanced() = default;
@@ -553,7 +106,7 @@ class GLES2DecoderTestWithCHROMIUMFramebufferMixedSamples
init.request_alpha = true;
init.request_depth = true;
init.bind_generates_resource = true;
- init.extensions = "GL_NV_path_rendering GL_NV_framebuffer_mixed_samples ";
+ init.extensions = "GL_NV_framebuffer_mixed_samples ";
InitDecoder(init);
}
};
@@ -562,1170 +115,6 @@ INSTANTIATE_TEST_SUITE_P(Service,
GLES2DecoderTestWithCHROMIUMFramebufferMixedSamples,
::testing::Bool());
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, GenDeletePaths) {
- static GLuint kFirstClientID = client_path_id_ + 88;
- static GLsizei kPathCount = 58;
- static GLuint kFirstCreatedServiceID = 8000;
-
- // GenPaths range 0 causes no calls.
- cmds::GenPathsCHROMIUM gen_cmd;
- gen_cmd.Init(kFirstClientID, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // DeletePaths range 0 causes no calls.
- cmds::DeletePathsCHROMIUM delete_cmd;
- delete_cmd.Init(kFirstClientID, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // DeletePaths client id 0 causes no calls and no errors.
- delete_cmd.Init(0, 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // DeletePaths with a big range should not cause any deletes.
- delete_cmd.Init(client_path_id_ + 1,
- std::numeric_limits<GLsizei>::max() - client_path_id_ - 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- delete_cmd.Init(static_cast<GLuint>(std::numeric_limits<GLsizei>::max()) + 1,
- std::numeric_limits<GLsizei>::max());
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Normal Gen and Delete should cause the normal calls.
- EXPECT_CALL(*gl_, GenPathsNV(kPathCount))
- .WillOnce(Return(kFirstCreatedServiceID))
- .RetiresOnSaturation();
-
- gen_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstCreatedServiceID, kPathCount))
- .RetiresOnSaturation();
-
- delete_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, GenDeleteRanges) {
- static GLuint kFirstClientID = client_path_id_ + 77;
- static GLsizei kPathCount = 5800;
- static GLuint kFirstCreatedServiceID = 8000;
-
- // Create a range of path names, delete one in middle and then
- // the rest. Expect 3 DeletePath calls.
- EXPECT_CALL(*gl_, GenPathsNV(kPathCount))
- .WillOnce(Return(kFirstCreatedServiceID))
- .RetiresOnSaturation();
- cmds::GenPathsCHROMIUM gen_cmd;
- gen_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstCreatedServiceID + (kPathCount / 2), 1))
- .RetiresOnSaturation();
-
- cmds::DeletePathsCHROMIUM delete_cmd;
- delete_cmd.Init(kFirstClientID + (kPathCount / 2), 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstCreatedServiceID, (kPathCount / 2)))
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstCreatedServiceID + (kPathCount / 2) + 1,
- (kPathCount / 2) - 1)).RetiresOnSaturation();
-
- delete_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, GenDeleteManyPaths) {
- static GLuint kFirstClientID = client_path_id_ + 1;
- static GLsizei kPathCount = std::numeric_limits<GLsizei>::max();
- static GLuint kFirstCreatedServiceID = 8000;
-
- EXPECT_CALL(*gl_, GenPathsNV(kPathCount))
- .WillOnce(Return(kFirstCreatedServiceID))
- .RetiresOnSaturation();
-
- // GenPaths with big range.
- cmds::GenPathsCHROMIUM gen_cmd;
- gen_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Path range wraps, so we get connection error.
- gen_cmd.Init(kFirstClientID + kPathCount, kPathCount);
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstCreatedServiceID, kPathCount))
- .RetiresOnSaturation();
-
- cmds::DeletePathsCHROMIUM delete_cmd;
- delete_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Delete every possible path.
- // We run into the one created for client_path_id_.
- EXPECT_CALL(*gl_, DeletePathsNV(kServicePathId, 1)).RetiresOnSaturation();
-
- delete_cmd.Init(1u, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- delete_cmd.Init(static_cast<GLuint>(kPathCount) + 1u, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Allocate every possible path, delete few, allocate them back and
- // expect minimum amount of calls.
- EXPECT_CALL(*gl_, GenPathsNV(kPathCount))
- .WillOnce(Return(static_cast<GLuint>(1u)))
- .WillOnce(Return(static_cast<GLuint>(kPathCount) + 1u))
- .RetiresOnSaturation();
-
- gen_cmd.Init(1u, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- gen_cmd.Init(static_cast<GLuint>(kPathCount) + 1u, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- gen_cmd.Init(static_cast<GLuint>(kPathCount) * 2u + 2u, kPathCount);
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstClientID, 4)).RetiresOnSaturation();
-
- delete_cmd.Init(kFirstClientID, 4);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstClientID * 3, 1)).RetiresOnSaturation();
-
- delete_cmd.Init(kFirstClientID * 3, 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, GenPathsNV(1))
- .WillOnce(Return(kFirstClientID))
- .WillOnce(Return(kFirstClientID + 1))
- .WillOnce(Return(kFirstClientID + 2))
- .WillOnce(Return(kFirstClientID + 3))
- .RetiresOnSaturation();
-
- for (int i = 0; i < 4; ++i) {
- gen_cmd.Init(kFirstClientID + i, 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
-
- EXPECT_CALL(*gl_, GenPathsNV(1))
- .WillOnce(Return(kFirstClientID * 3))
- .RetiresOnSaturation();
- gen_cmd.Init(kFirstClientID * 3, 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, DeletePathsNV(1u, kPathCount)).RetiresOnSaturation();
- EXPECT_CALL(*gl_, DeletePathsNV(static_cast<GLuint>(kPathCount) + 1u,
- kPathCount)).RetiresOnSaturation();
-
- delete_cmd.Init(1u, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- delete_cmd.Init(static_cast<GLuint>(kPathCount) + 1u, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Cleanup: return the client_path_id_ as a path.
- EXPECT_CALL(*gl_, GenPathsNV(1))
- .WillOnce(Return(static_cast<GLuint>(kServicePathId)))
- .RetiresOnSaturation();
-
- gen_cmd.Init(client_path_id_, 1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- GenPathsCHROMIUMInvalidCalls) {
- static GLuint kFirstClientID = client_path_id_ + 88;
- static GLsizei kPathCount = 5800;
- static GLuint kFirstCreatedServiceID = 8000;
-
- // Range < 0 is causes gl error.
- cmds::GenPathsCHROMIUM gen_cmd;
- gen_cmd.Init(kFirstClientID, -1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
-
- // Path 0 is invalid client id, connection error.
- gen_cmd.Init(0, kPathCount);
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Too big range causes client id to wrap, connection error.
- gen_cmd.Init(static_cast<GLuint>(std::numeric_limits<GLsizei>::max()) + 3,
- std::numeric_limits<GLsizei>::max());
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Creating duplicate client_ids cause connection error.
- EXPECT_CALL(*gl_, GenPathsNV(kPathCount))
- .WillOnce(Return(kFirstCreatedServiceID))
- .RetiresOnSaturation();
-
- gen_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Create duplicate by executing the same cmd.
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Create duplicate by creating a range that contains
- // an already existing client path id.
- gen_cmd.Init(kFirstClientID - 1, 2);
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Cleanup.
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstCreatedServiceID, kPathCount))
- .RetiresOnSaturation();
- cmds::DeletePathsCHROMIUM delete_cmd;
- delete_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- DeletePathsCHROMIUMInvalidCalls) {
- static GLuint kFirstClientID = client_path_id_ + 88;
-
- // Range < 0 is causes gl error.
- cmds::DeletePathsCHROMIUM delete_cmd;
- delete_cmd.Init(kFirstClientID, -1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
-
- // Too big range causes client id to wrap, connection error.
- delete_cmd.Init(static_cast<GLuint>(std::numeric_limits<GLsizei>::max()) + 3,
- std::numeric_limits<GLsizei>::max());
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- PathCommandsCHROMIUMInvalidCalls) {
- static const GLsizei kCorrectCoordCount = 19;
- static const GLsizei kCorrectCommandCount = 6;
- static const GLenum kInvalidCoordType = GL_NONE;
-
- GLfloat* coords = GetSharedMemoryAs<GLfloat*>();
- unsigned commands_offset = sizeof(GLfloat) * kCorrectCoordCount;
- GLubyte* commands = GetSharedMemoryAsWithOffset<GLubyte*>(commands_offset);
- for (int i = 0; i < kCorrectCoordCount; ++i) {
- coords[i] = 5.0f * i;
- }
- commands[0] = GL_MOVE_TO_CHROMIUM;
- commands[1] = GL_CLOSE_PATH_CHROMIUM;
- commands[2] = GL_LINE_TO_CHROMIUM;
- commands[3] = GL_QUADRATIC_CURVE_TO_CHROMIUM;
- commands[4] = GL_CUBIC_CURVE_TO_CHROMIUM;
- commands[5] = GL_CONIC_CURVE_TO_CHROMIUM;
-
- EXPECT_CALL(*gl_, PathCommandsNV(kServicePathId, kCorrectCommandCount, _,
- kCorrectCoordCount, GL_FLOAT, coords))
- .RetiresOnSaturation();
-
- cmds::PathCommandsCHROMIUM cmd;
-
- // Reference call -- this succeeds.
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_ + commands_offset, kCorrectCoordCount,
- GL_FLOAT, shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, PathCommandsNV(_, _, _, _, _, _)).Times(0);
-
- // Invalid client id fails.
- cmd.Init(client_path_id_ - 1, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_, kCorrectCoordCount, GL_FLOAT,
- shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
-
- // The numCommands < 0.
- cmd.Init(client_path_id_, -1, shared_memory_id_, shared_memory_offset_,
- kCorrectCoordCount, GL_FLOAT, shared_memory_id_,
- shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
-
- // The numCoords < 0.
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_, -1, GL_FLOAT, shared_memory_id_,
- shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
-
- // Invalid coordType fails.
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_, kCorrectCoordCount, kInvalidCoordType,
- shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
-
- // Big command counts.
- cmd.Init(client_path_id_, std::numeric_limits<GLsizei>::max(),
- shared_memory_id_, shared_memory_offset_ + commands_offset,
- kCorrectCoordCount, GL_FLOAT, shared_memory_id_,
- shared_memory_offset_);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Invalid SHM cases.
- cmd.Init(client_path_id_, kCorrectCommandCount, kInvalidSharedMemoryId,
- shared_memory_offset_ + commands_offset, kCorrectCoordCount,
- GL_FLOAT, shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- kInvalidSharedMemoryOffset, kCorrectCoordCount, GL_FLOAT,
- shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_ + commands_offset, kCorrectCoordCount,
- GL_FLOAT, kInvalidSharedMemoryId, shared_memory_offset_);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_ + commands_offset, kCorrectCoordCount,
- GL_FLOAT, shared_memory_id_, kInvalidSharedMemoryOffset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // NULL shm command id with non-zero command count.
- cmd.Init(client_path_id_, kCorrectCommandCount, 0, 0, kCorrectCoordCount,
- GL_FLOAT, shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // NULL shm coord id with non-zero coord count.
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_ + commands_offset, kCorrectCoordCount,
- GL_FLOAT, 0, 0);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // The coordCount not matching what is in commands.
- // Expects kCorrectCoordCount+2 coords.
- commands[1] = GL_MOVE_TO_CHROMIUM;
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_ + commands_offset, kCorrectCoordCount,
- GL_FLOAT, shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
-
- // The coordCount not matching what is in commands.
- // Expects kCorrectCoordCount-2 coords.
- commands[0] = GL_CLOSE_PATH_CHROMIUM;
- commands[1] = GL_CLOSE_PATH_CHROMIUM;
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
-
- // NULL shm coord ids. Currently causes gl error, though client should not let
- // this through.
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_ + commands_offset, kCorrectCoordCount,
- GL_FLOAT, 0, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- PathCommandsCHROMIUMEmptyCommands) {
- EXPECT_CALL(*gl_,
- PathCommandsNV(kServicePathId, 0, nullptr, 0, GL_FLOAT, nullptr))
- .RetiresOnSaturation();
- cmds::PathCommandsCHROMIUM cmd;
- cmd.Init(client_path_id_, 0, 0, 0, 0, GL_FLOAT, 0, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- PathCommandsCHROMIUMInvalidCommands) {
- EXPECT_CALL(*gl_, PathCommandsNV(_, _, _, _, _, _)).Times(0);
-
- cmds::PathCommandsCHROMIUM cmd;
-
- {
- const GLsizei kCoordCount = 2;
- const GLsizei kCommandCount = 2;
- GLfloat* coords = GetSharedMemoryAs<GLfloat*>();
- unsigned commands_offset = sizeof(GLfloat) * kCoordCount;
- GLubyte* commands = GetSharedMemoryAsWithOffset<GLubyte*>(commands_offset);
-
- coords[0] = 5.0f;
- coords[1] = 5.0f;
- commands[0] = 0x3; // Token MOVE_TO_RELATIVE in NV_path_rendering.
- commands[1] = GL_CLOSE_PATH_CHROMIUM;
-
- cmd.Init(client_path_id_ - 1, kCommandCount, shared_memory_id_,
- shared_memory_offset_, kCoordCount, GL_FLOAT, shared_memory_id_,
- shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
- }
- {
- const GLsizei kCoordCount = 8;
- const GLsizei kCommandCount = 4;
- GLfloat* coords = GetSharedMemoryAs<GLfloat*>();
- unsigned commands_offset = sizeof(GLfloat) * kCoordCount;
- GLubyte* commands = GetSharedMemoryAsWithOffset<GLubyte*>(commands_offset);
-
- for (int i = 0; i < kCoordCount; ++i) {
- coords[i] = 5.0f * i;
- }
- commands[0] = GL_MOVE_TO_CHROMIUM;
- commands[1] = GL_MOVE_TO_CHROMIUM;
- commands[2] = 'M'; // Synonym to MOVE_TO in NV_path_rendering.
- commands[3] = GL_MOVE_TO_CHROMIUM;
-
- cmd.Init(client_path_id_ - 1, kCommandCount, shared_memory_id_,
- shared_memory_offset_, kCoordCount, GL_FLOAT, shared_memory_id_,
- shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
- }
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, PathParameterXCHROMIUM) {
- static GLuint kFirstClientID = client_path_id_ + 88;
- static GLsizei kPathCount = 2;
- static GLuint kFirstCreatedServiceID = 8000;
-
- // Create a paths so that we do not modify client_path_id_
- EXPECT_CALL(*gl_, GenPathsNV(kPathCount))
- .WillOnce(Return(kFirstCreatedServiceID))
- .RetiresOnSaturation();
- cmds::GenPathsCHROMIUM gen_cmd;
- gen_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- cmds::PathParameterfCHROMIUM fcmd;
- cmds::PathParameteriCHROMIUM icmd;
- const struct {
- GLenum pname;
- GLfloat value;
- GLfloat expected_value;
- } kTestcases[] = {
- {GL_PATH_STROKE_WIDTH_CHROMIUM, 1.0f, 1.0f},
- {GL_PATH_STROKE_WIDTH_CHROMIUM, 0.0f, 0.0f},
- {GL_PATH_MITER_LIMIT_CHROMIUM, 500.0f, 500.0f},
- {GL_PATH_STROKE_BOUND_CHROMIUM, .80f, .80f},
- {GL_PATH_STROKE_BOUND_CHROMIUM, 1.80f, 1.0f},
- {GL_PATH_STROKE_BOUND_CHROMIUM, -1.0f, 0.0f},
- {GL_PATH_END_CAPS_CHROMIUM, GL_FLAT_CHROMIUM, GL_FLAT_CHROMIUM},
- {GL_PATH_END_CAPS_CHROMIUM, GL_SQUARE_CHROMIUM, GL_SQUARE_CHROMIUM},
- {GL_PATH_JOIN_STYLE_CHROMIUM,
- GL_MITER_REVERT_CHROMIUM,
- GL_MITER_REVERT_CHROMIUM},
- };
-
- for (auto& testcase : kTestcases) {
- EXPECT_CALL(*gl_, PathParameterfNV(kFirstCreatedServiceID, testcase.pname,
- testcase.expected_value))
- .Times(1)
- .RetiresOnSaturation();
- fcmd.Init(kFirstClientID, testcase.pname, testcase.value);
- EXPECT_EQ(error::kNoError, ExecuteCmd(fcmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_,
- PathParameteriNV(kFirstCreatedServiceID + 1, testcase.pname,
- static_cast<GLint>(testcase.expected_value)))
- .Times(1)
- .RetiresOnSaturation();
- icmd.Init(kFirstClientID + 1, testcase.pname,
- static_cast<GLint>(testcase.value));
- EXPECT_EQ(error::kNoError, ExecuteCmd(icmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
-
- // Cleanup.
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstCreatedServiceID, kPathCount))
- .RetiresOnSaturation();
-
- cmds::DeletePathsCHROMIUM delete_cmd;
- delete_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- PathParameterXCHROMIUMInvalidArgs) {
- static GLuint kFirstClientID = client_path_id_ + 88;
- static GLsizei kPathCount = 2;
- static GLuint kFirstCreatedServiceID = 8000;
-
- // Create a paths so that we do not modify client_path_id_
- EXPECT_CALL(*gl_, GenPathsNV(kPathCount))
- .WillOnce(Return(kFirstCreatedServiceID))
- .RetiresOnSaturation();
- cmds::GenPathsCHROMIUM gen_cmd;
- gen_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(gen_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- cmds::PathParameterfCHROMIUM fcmd;
- cmds::PathParameteriCHROMIUM icmd;
- const struct {
- GLenum pname;
- GLfloat value;
- bool try_int_version;
- GLint error;
- } kTestcases[] = {
- {GL_PATH_STROKE_WIDTH_CHROMIUM, -1.0f, true, GL_INVALID_VALUE},
- {GL_PATH_MITER_LIMIT_CHROMIUM,
- std::numeric_limits<float>::infinity(),
- false,
- GL_INVALID_VALUE},
- {GL_PATH_MITER_LIMIT_CHROMIUM,
- std::numeric_limits<float>::quiet_NaN(),
- false,
- GL_INVALID_VALUE},
- {GL_PATH_END_CAPS_CHROMIUM, 0x4, true, GL_INVALID_VALUE},
- {GL_PATH_END_CAPS_CHROMIUM,
- GL_MITER_REVERT_CHROMIUM,
- true,
- GL_INVALID_VALUE},
- {GL_PATH_JOIN_STYLE_CHROMIUM, GL_FLAT_CHROMIUM, true, GL_INVALID_VALUE},
- {GL_PATH_MODELVIEW_CHROMIUM, GL_FLAT_CHROMIUM, true, GL_INVALID_ENUM},
- };
-
- EXPECT_CALL(*gl_, PathParameterfNV(_, _, _)).Times(0);
- EXPECT_CALL(*gl_, PathParameteriNV(_, _, _)).Times(0);
-
- for (auto& testcase : kTestcases) {
- fcmd.Init(kFirstClientID, testcase.pname, testcase.value);
- EXPECT_EQ(error::kNoError, ExecuteCmd(fcmd));
- EXPECT_EQ(testcase.error, GetGLError());
- if (!testcase.try_int_version)
- continue;
-
- icmd.Init(kFirstClientID + 1, testcase.pname,
- static_cast<GLint>(testcase.value));
- EXPECT_EQ(error::kNoError, ExecuteCmd(icmd));
- EXPECT_EQ(testcase.error, GetGLError());
- }
-
- // Cleanup.
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstCreatedServiceID, kPathCount))
- .RetiresOnSaturation();
-
- cmds::DeletePathsCHROMIUM delete_cmd;
- delete_cmd.Init(kFirstClientID, kPathCount);
- EXPECT_EQ(error::kNoError, ExecuteCmd(delete_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, StencilFillPathCHROMIUM) {
- SetupExpectationsForApplyingDefaultDirtyState();
-
- cmds::StencilFillPathCHROMIUM cmd;
- cmds::StencilThenCoverFillPathCHROMIUM tcmd;
-
- static const GLenum kFillModes[] = {
- GL_INVERT, GL_COUNT_UP_CHROMIUM, GL_COUNT_DOWN_CHROMIUM};
- static const GLuint kMask = 0x7F;
-
- for (auto& fill_mode : kFillModes) {
- EXPECT_CALL(*gl_, StencilFillPathNV(kServicePathId, fill_mode, kMask))
- .RetiresOnSaturation();
- cmd.Init(client_path_id_, fill_mode, kMask);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, StencilThenCoverFillPathNV(kServicePathId, fill_mode,
- kMask, GL_BOUNDING_BOX_NV))
- .RetiresOnSaturation();
- tcmd.Init(client_path_id_, fill_mode, kMask, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(tcmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
-
- // Non-existent path: no error, no call.
- cmd.Init(client_path_id_ - 1, GL_INVERT, 0x80);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- tcmd.Init(client_path_id_ - 1, GL_INVERT, 0x80, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(tcmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- StencilFillPathCHROMIUMInvalidArgs) {
- EXPECT_CALL(*gl_, StencilFillPathNV(_, _, _)).Times(0);
- EXPECT_CALL(*gl_, StencilThenCoverFillPathNV(_, _, _, GL_BOUNDING_BOX_NV))
- .Times(0);
-
- cmds::StencilFillPathCHROMIUM cmd;
- cmds::StencilThenCoverFillPathCHROMIUM tcmd;
-
- cmd.Init(client_path_id_, GL_INVERT - 1, 0x80);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
-
- tcmd.Init(client_path_id_, GL_INVERT - 1, 0x80, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(tcmd));
- EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
-
- // The /mask/+1 is not power of two -> invalid value.
- cmd.Init(client_path_id_, GL_COUNT_UP_CHROMIUM, 0x80);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
-
- tcmd.Init(client_path_id_, GL_COUNT_UP_CHROMIUM, 0x80,
- GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(tcmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
-
- cmd.Init(client_path_id_, GL_COUNT_DOWN_CHROMIUM, 5);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
-
- tcmd.Init(client_path_id_, GL_COUNT_DOWN_CHROMIUM, 5,
- GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(tcmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, StencilStrokePathCHROMIUM) {
- SetupExpectationsForApplyingDefaultDirtyState();
-
- EXPECT_CALL(*gl_, StencilStrokePathNV(kServicePathId, 1, 0x80))
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilThenCoverStrokePathNV(kServicePathId, 1, 0x80,
- GL_BOUNDING_BOX_NV))
- .RetiresOnSaturation();
-
- cmds::StencilStrokePathCHROMIUM cmd;
- cmds::StencilThenCoverStrokePathCHROMIUM tcmd;
-
- cmd.Init(client_path_id_, 1, 0x80);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- tcmd.Init(client_path_id_, 1, 0x80, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(tcmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, StencilThenCoverStrokePathNV(kServicePathId, 1, 0x80,
- GL_CONVEX_HULL_NV))
- .RetiresOnSaturation();
-
- tcmd.Init(client_path_id_, 1, 0x80, GL_CONVEX_HULL_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(tcmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Non-existent path: no error, no call.
- cmd.Init(client_path_id_ - 1, 1, 0x80);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- tcmd.Init(client_path_id_ - 1, 1, 0x80, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(tcmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, CoverFillPathCHROMIUM) {
- SetupExpectationsForApplyingDefaultDirtyState();
-
- EXPECT_CALL(*gl_, CoverFillPathNV(kServicePathId, GL_BOUNDING_BOX_NV))
- .RetiresOnSaturation();
- cmds::CoverFillPathCHROMIUM cmd;
- cmd.Init(client_path_id_, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, CoverFillPathNV(kServicePathId, GL_CONVEX_HULL_NV))
- .RetiresOnSaturation();
- cmd.Init(client_path_id_, GL_CONVEX_HULL_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Non-existent path: no error, no call.
- cmd.Init(client_path_id_ - 1, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, CoverStrokePathCHROMIUM) {
- SetupExpectationsForApplyingDefaultDirtyState();
- EXPECT_CALL(*gl_, CoverStrokePathNV(kServicePathId, GL_BOUNDING_BOX_NV))
- .RetiresOnSaturation();
- cmds::CoverStrokePathCHROMIUM cmd;
- cmd.Init(client_path_id_, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- EXPECT_CALL(*gl_, CoverStrokePathNV(kServicePathId, GL_CONVEX_HULL_NV))
- .RetiresOnSaturation();
- cmd.Init(client_path_id_, GL_CONVEX_HULL_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- // Non-existent path: no error, no call.
- cmd.Init(client_path_id_ - 1, GL_BOUNDING_BOX_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-namespace {
-template <typename T>
-struct gl_type_enum {};
-template <>
-struct gl_type_enum<GLbyte> {
- enum { kGLType = GL_BYTE };
-};
-template <>
-struct gl_type_enum<GLubyte> {
- enum { kGLType = GL_UNSIGNED_BYTE };
-};
-template <>
-struct gl_type_enum<GLshort> {
- enum { kGLType = GL_SHORT };
-};
-template <>
-struct gl_type_enum<GLushort> {
- enum { kGLType = GL_UNSIGNED_SHORT };
-};
-template <>
-struct gl_type_enum<GLfloat> {
- enum { kGLType = GL_FLOAT };
-};
-}
-
-template <typename TypeParam>
-void GLES2DecoderTestWithCHROMIUMPathRendering::
- TestPathCommandsCHROMIUMCoordTypes() {
- static const GLsizei kCorrectCoordCount = 19;
- static const GLsizei kCorrectCommandCount = 6;
-
- TypeParam* coords = GetSharedMemoryAs<TypeParam*>();
- unsigned commands_offset = sizeof(TypeParam) * kCorrectCoordCount;
- GLubyte* commands = GetSharedMemoryAsWithOffset<GLubyte*>(commands_offset);
- for (int i = 0; i < kCorrectCoordCount; ++i) {
- coords[i] = static_cast<TypeParam>(5 * i);
- }
- commands[0] = GL_MOVE_TO_CHROMIUM;
- commands[1] = GL_CLOSE_PATH_CHROMIUM;
- commands[2] = GL_LINE_TO_CHROMIUM;
- commands[3] = GL_QUADRATIC_CURVE_TO_CHROMIUM;
- commands[4] = GL_CUBIC_CURVE_TO_CHROMIUM;
- commands[5] = GL_CONIC_CURVE_TO_CHROMIUM;
-
- EXPECT_CALL(*gl_, PathCommandsNV(kServicePathId, kCorrectCommandCount, _,
- kCorrectCoordCount,
- gl_type_enum<TypeParam>::kGLType, coords))
- .RetiresOnSaturation();
-
- cmds::PathCommandsCHROMIUM cmd;
-
- cmd.Init(client_path_id_, kCorrectCommandCount, shared_memory_id_,
- shared_memory_offset_ + commands_offset, kCorrectCoordCount,
- gl_type_enum<TypeParam>::kGLType, shared_memory_id_,
- shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- PathCommandsCHROMIUMCoordTypes) {
- // Not using a typed test case, because the base class is already parametrized
- // test case and uses GetParam.
- TestPathCommandsCHROMIUMCoordTypes<GLbyte>();
- TestPathCommandsCHROMIUMCoordTypes<GLubyte>();
- TestPathCommandsCHROMIUMCoordTypes<GLshort>();
- TestPathCommandsCHROMIUMCoordTypes<GLushort>();
- TestPathCommandsCHROMIUMCoordTypes<GLfloat>();
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- StencilXFillPathInstancedCHROMIUMInvalidArgs) {
- cmds::StencilFillPathInstancedCHROMIUM sfi_cmd;
- cmds::StencilThenCoverFillPathInstancedCHROMIUM stcfi_cmd;
-
- const GLuint kPaths[] = {client_path_id_, client_path_id_ + 5,
- client_path_id_, client_path_id_ + 18};
- const GLsizei kPathCount = base::size(kPaths);
-
- struct {
- GLenum fill_mode;
- GLuint mask;
- GLint expected_error;
- } testcases[] = {
- // Using invalid fill mode produces invalid enum.
- {GL_COUNT_UP_CHROMIUM - 1, 0x7F, GL_INVALID_ENUM},
- {GL_COUNT_DOWN_CHROMIUM + 1, 0x7F, GL_INVALID_ENUM},
- // Using /mask/+1 which is not power of two produces invalid value.
- {GL_COUNT_UP_CHROMIUM, 0x80, GL_INVALID_VALUE},
- {GL_COUNT_DOWN_CHROMIUM, 4, GL_INVALID_VALUE}};
-
- GLuint* paths = GetSharedMemoryAs<GLuint*>();
-
- for (size_t i = 0; i < base::size(testcases); ++i) {
- memcpy(paths, kPaths, sizeof(kPaths));
- sfi_cmd.Init(kPathCount, GL_UNSIGNED_INT, shared_memory_id_,
- shared_memory_offset_, 0, testcases[i].fill_mode,
- testcases[i].mask, GL_NONE, 0, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(sfi_cmd));
- EXPECT_EQ(testcases[i].expected_error, GetGLError());
-
- memcpy(paths, kPaths, sizeof(kPaths));
- stcfi_cmd.Init(kPathCount, GL_UNSIGNED_INT, shared_memory_id_,
- shared_memory_offset_, 0, testcases[i].fill_mode,
- testcases[i].mask,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM, GL_NONE, 0, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(stcfi_cmd));
- EXPECT_EQ(testcases[i].expected_error, GetGLError());
- }
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- StencilXFillPathInstancedCHROMIUMFillMode) {
- SetupExpectationsForApplyingDefaultDirtyState();
-
- // Test different fill modes.
- cmds::StencilFillPathInstancedCHROMIUM sfi_cmd;
- cmds::StencilThenCoverFillPathInstancedCHROMIUM stcfi_cmd;
-
- const GLuint kPaths[] = {client_path_id_, client_path_id_ + 5,
- client_path_id_, client_path_id_ + 18};
- const GLsizei kPathCount = base::size(kPaths);
-
- static const GLenum kFillModes[] = {GL_INVERT, GL_COUNT_UP_CHROMIUM,
- GL_COUNT_DOWN_CHROMIUM};
- const GLuint kMask = 0x7F;
-
- GLuint* paths = GetSharedMemoryAs<GLuint*>();
-
- for (size_t i = 0; i < base::size(kFillModes); ++i) {
- memcpy(paths, kPaths, sizeof(kPaths));
- EXPECT_CALL(*gl_, StencilFillPathInstancedNV(kPathCount, GL_UNSIGNED_INT, _,
- 0, kFillModes[i], kMask,
- GL_NONE, nullptr))
- .RetiresOnSaturation();
- sfi_cmd.Init(kPathCount, GL_UNSIGNED_INT, shared_memory_id_,
- shared_memory_offset_, 0, kFillModes[i], kMask, GL_NONE, 0, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(sfi_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- memcpy(paths, kPaths, sizeof(kPaths));
- EXPECT_CALL(*gl_,
- StencilThenCoverFillPathInstancedNV(
- kPathCount, GL_UNSIGNED_INT, _, 0, kFillModes[i], kMask,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV, GL_NONE, nullptr))
- .RetiresOnSaturation();
- stcfi_cmd.Init(kPathCount, GL_UNSIGNED_INT, shared_memory_id_,
- shared_memory_offset_, 0, kFillModes[i], kMask,
- GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM, GL_NONE, 0, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(stcfi_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedCalls) {
- SetupExpectationsForApplyingDefaultDirtyState();
-
- const GLuint kPaths[] = {0, client_path_id_, 15, client_path_id_};
- const GLsizei kPathCount = base::size(kPaths);
-
- // The path base will be client_path_id_, and so 0 is a
- // valid path.
- const GLuint kPathBase = client_path_id_;
- const GLuint kPathsWithBase[] = {0, 5, 0, 18};
-
- const GLshort kShortPathBase = client_path_id_ * 2;
- const GLshort kShortPathsWithBase[] = {
- -static_cast<GLshort>(client_path_id_), 5,
- -static_cast<GLshort>(client_path_id_), 18};
-
- const GLenum kFillMode = GL_INVERT;
- const GLuint kMask = 0x80;
- const GLuint kReference = 0xFF;
-
- GLfloat transform_values[12 * kPathCount];
- for (GLsizei i = 0; i < kPathCount; ++i) {
- for (int j = 0; j < 12; ++j) {
- transform_values[i * 12 + j] = 0.1f * j + i;
- }
- }
-
- // Path name overflows to correct path.
- const GLuint kBigPathBase = std::numeric_limits<GLuint>::max();
- const GLuint kPathsWithBigBase[] = {client_path_id_ + 1, 5,
- client_path_id_ + 1, 18};
-
- // Path name underflows. As a technical limitation, we can not get to correct
- // path,
- // so test just tests that there is no GL error.
- const GLuint kNegativePathBase = 1;
- const GLbyte kNegativePathsWithBaseByte[] = {-1, -2, -5, -18};
- const GLint kNegativePathsWithBaseInt[] = {-2, -3, -4, -5};
-
- InstancedTestcase testcases[] = {
- // Test a normal call.
- {kPathCount, GL_UNSIGNED_INT, kPaths, 0, kFillMode, kReference, kMask,
- GL_NONE, nullptr, sizeof(kPaths), 0, error::kNoError, GL_NO_ERROR, true},
- // Test that the path base is applied correctly for each instanced call.
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBase, kPathBase, kFillMode,
- kReference, kMask, GL_NONE, nullptr, sizeof(kPaths), 0, error::kNoError,
- GL_NO_ERROR, true},
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBase, kPathBase, kFillMode,
- kReference, kMask,
-
- // Test all possible transform types. The float array is big enough for
- // all the variants. The contents of the array in call is not checked,
- // though.
- GL_TRANSLATE_X_CHROMIUM, transform_values, sizeof(kPaths),
- sizeof(transform_values), error::kNoError, GL_NO_ERROR, true},
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBase, kPathBase, kFillMode,
- kReference, kMask, GL_TRANSLATE_Y_CHROMIUM, transform_values,
- sizeof(kPaths), sizeof(transform_values), error::kNoError, GL_NO_ERROR,
- true},
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBase, kPathBase, kFillMode,
- kReference, kMask, GL_TRANSLATE_2D_CHROMIUM, transform_values,
- sizeof(kPaths), sizeof(transform_values), error::kNoError, GL_NO_ERROR,
- true},
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBase, kPathBase, kFillMode,
- kReference, kMask, GL_TRANSLATE_3D_CHROMIUM, transform_values,
- sizeof(kPaths), sizeof(transform_values), error::kNoError, GL_NO_ERROR,
- true},
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBase, kPathBase, kFillMode,
- kReference, kMask, GL_AFFINE_2D_CHROMIUM, transform_values,
- sizeof(kPaths), sizeof(transform_values), error::kNoError, GL_NO_ERROR,
- true},
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBase, kPathBase, kFillMode,
- kReference, kMask, GL_AFFINE_3D_CHROMIUM, transform_values,
- sizeof(kPaths), sizeof(transform_values), error::kNoError, GL_NO_ERROR,
- true},
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBase, kPathBase, kFillMode,
- kReference, kMask, GL_TRANSPOSE_AFFINE_2D_CHROMIUM, transform_values,
- sizeof(kPaths), sizeof(transform_values), error::kNoError, GL_NO_ERROR,
- true},
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBase, kPathBase, kFillMode,
- kReference, kMask, GL_TRANSPOSE_AFFINE_3D_CHROMIUM, transform_values,
- sizeof(kPaths), sizeof(transform_values), error::kNoError, GL_NO_ERROR,
- true},
- {kPathCount, GL_SHORT, kShortPathsWithBase, kShortPathBase, kFillMode,
- kReference, kMask, GL_TRANSPOSE_AFFINE_3D_CHROMIUM, transform_values,
- sizeof(kShortPathsWithBase), sizeof(transform_values), error::kNoError,
- GL_NO_ERROR, true},
-
- // Test that if using path base causes path id to overflow, we get no
- // error.
- {kPathCount, GL_UNSIGNED_INT, kPathsWithBigBase, kBigPathBase, kFillMode,
- kReference, kMask, GL_TRANSLATE_X_CHROMIUM, transform_values,
- sizeof(kPathsWithBigBase), sizeof(transform_values), error::kNoError,
- GL_NO_ERROR, true},
- // Test that if using path base causes path id to underflow, we get no
- // error.
- {kPathCount, GL_BYTE, kNegativePathsWithBaseByte, kNegativePathBase,
- kFillMode, kReference, kMask, GL_TRANSLATE_X_CHROMIUM, transform_values,
- sizeof(kNegativePathsWithBaseByte), sizeof(transform_values),
- error::kNoError, GL_NO_ERROR, false},
- {kPathCount, GL_INT, kNegativePathsWithBaseInt, kNegativePathBase,
- kFillMode, kReference, kMask, GL_TRANSLATE_X_CHROMIUM, transform_values,
- sizeof(kNegativePathsWithBaseInt), sizeof(transform_values),
- error::kNoError, GL_NO_ERROR, false},
-
- };
-
- for (size_t i = 0; i < base::size(testcases); ++i) {
- SCOPED_TRACE(testing::Message() << "InstancedCalls testcase " << i);
- CallAllInstancedCommands(testcases[i]);
- }
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedNoCalls) {
- const GLuint kPaths[] = {1, client_path_id_, 5, client_path_id_};
- const GLsizei kPathCount = base::size(kPaths);
-
- const GLenum kFillMode = GL_INVERT;
- const GLuint kMask = 0x80;
- const GLuint kReference = 0xFF;
- GLfloat transform_values[12 * kPathCount];
- for (GLsizei i = 0; i < kPathCount; ++i) {
- for (int j = 0; j < 12; ++j) {
- transform_values[i * 12 + j] = 0.1f * j + i;
- }
- }
-
- // The path base will be client_path_id_, and so 0 is a valid path and others
- // should be invalid.
- const GLuint kInvalidPathBase = client_path_id_;
- const GLuint kInvalidPathsWithBase[] = {1, client_path_id_, 5, 18};
-
- InstancedTestcase testcases[] = {
- // Zero path count produces no error, no call.
- {0, GL_UNSIGNED_INT, nullptr, 0, kFillMode, kReference, kMask, GL_NONE,
- nullptr, 0, 0, error::kNoError, GL_NO_ERROR, false},
-
- // Zero path count, even with path data, produces no error, no call.
- {0, GL_UNSIGNED_INT, kPaths, 0, kFillMode, kReference, kMask,
- GL_TRANSLATE_X_CHROMIUM, transform_values, sizeof(kPaths),
- sizeof(transform_values), error::kNoError, GL_NO_ERROR, false},
-
- // Negative path count produces error.
- {-1, GL_UNSIGNED_INT, kPaths, 0, kFillMode, kReference, kMask,
- GL_TRANSLATE_X_CHROMIUM, transform_values, sizeof(kPaths),
- sizeof(transform_values), error::kNoError, GL_INVALID_VALUE, false},
-
- // Passing paths count but not having the shm data is a connection error.
- {kPathCount, GL_UNSIGNED_INT, nullptr, 0, kFillMode, kReference, kMask,
- GL_TRANSLATE_X_CHROMIUM, transform_values, 0, sizeof(transform_values),
- error::kOutOfBounds, GL_NO_ERROR, false},
-
- // Huge path count would cause huge transfer buffer, it does not go
- // through.
- {std::numeric_limits<GLsizei>::max() - 3, GL_UNSIGNED_INT, kPaths, 0,
- kFillMode, kReference, kMask, GL_TRANSLATE_X_CHROMIUM, transform_values,
- sizeof(kPaths), sizeof(transform_values), error::kOutOfBounds,
- GL_NO_ERROR, false},
-
- // Test that the path base is applied correctly for each instanced call.
- // In this case no path is marked as used, and so no GL function should be
- // called and no error should be generated.
- {kPathCount, GL_UNSIGNED_INT, kInvalidPathsWithBase, kInvalidPathBase,
- kFillMode, kReference, kMask, GL_TRANSLATE_X_CHROMIUM, transform_values,
- sizeof(kInvalidPathsWithBase), sizeof(transform_values), error::kNoError,
- GL_NO_ERROR, false},
-
- // Test that using correct paths but invalid transform type produces
- // invalid enum.
- {kPathCount, GL_UNSIGNED_INT, kPaths, 0, kFillMode, kReference, kMask,
- GL_TRANSLATE_X_CHROMIUM - 1, transform_values, sizeof(kPaths),
- sizeof(transform_values), error::kNoError, GL_INVALID_ENUM, false},
-
- // Test that if we have transform, not having the shm data is a connection
- // error.
- {kPathCount, GL_UNSIGNED_INT, kPaths, 0, kFillMode, kReference, kMask,
- GL_TRANSLATE_X_CHROMIUM, nullptr, sizeof(kPaths), 0, error::kOutOfBounds,
- GL_NO_ERROR, false},
-
- };
- for (size_t i = 0; i < base::size(testcases); ++i) {
- SCOPED_TRACE(testing::Message() << "InstancedNoCalls testcase " << i);
- CallAllInstancedCommands(testcases[i]);
- }
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, InstancedInvalidSHMValues) {
- const GLuint kPaths[] = {1, client_path_id_, 5, client_path_id_};
- const GLsizei kPathCount = base::size(kPaths);
- GLfloat transform_values[12 * kPathCount];
- for (GLsizei i = 0; i < kPathCount; ++i) {
- for (int j = 0; j < 12; ++j) {
- transform_values[i * 12 + j] = 0.1f * j + i;
- }
- }
- enum {
- kPathsSHMIdInvalid = 1,
- kPathsSHMOffsetInvalid = 1 << 1,
- kTransformsHMIdInvalid = 1 << 2,
- kTransformsHMOffsetInvalid = 1 << 3,
- kFirstTestcase = kPathsSHMIdInvalid,
- kLastTestcase = kTransformsHMOffsetInvalid
- };
-
- for (int testcase = kFirstTestcase; testcase <= kLastTestcase; ++testcase) {
- GLfloat* transforms = GetSharedMemoryAs<GLfloat*>();
- uint32_t transforms_shm_id = shared_memory_id_;
- uint32_t transforms_shm_offset = shared_memory_offset_;
- memcpy(transforms, transform_values, sizeof(transform_values));
-
- GLuint* paths =
- GetSharedMemoryAsWithOffset<GLuint*>(sizeof(transform_values));
- uint32_t paths_shm_id = shared_memory_id_;
- uint32_t paths_shm_offset =
- shared_memory_offset_ + sizeof(transform_values);
-
- if (testcase & kPathsSHMIdInvalid) {
- paths_shm_id = kInvalidSharedMemoryId;
- }
- if (testcase & kPathsSHMOffsetInvalid) {
- paths_shm_offset = kInvalidSharedMemoryOffset;
- }
- if (testcase & kTransformsHMIdInvalid) {
- transforms_shm_id = kInvalidSharedMemoryId;
- }
- if (testcase & kTransformsHMOffsetInvalid) {
- transforms_shm_offset = kInvalidSharedMemoryOffset;
- }
- SCOPED_TRACE(testing::Message() << "InstancedInvalidSHMValues testcase "
- << testcase);
- CallAllInstancedCommandsWithInvalidSHM(
- kPathCount, kPaths, paths, paths_shm_id, paths_shm_offset,
- transforms_shm_id, transforms_shm_offset);
- }
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- BindFragmentInputLocationCHROMIUM) {
- const uint32_t kBucketId = 123;
- const GLint kLocation = 2;
- const char* kName = "testing";
- const char* kBadName1 = "gl_testing";
-
- SetBucketAsCString(kBucketId, kName);
- cmds::BindFragmentInputLocationCHROMIUMBucket cmd;
- cmd.Init(client_program_id_, kLocation, kBucketId);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- // Check negative location.
- SetBucketAsCString(kBucketId, kName);
- cmd.Init(client_program_id_, -1, kBucketId);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
- // Check the highest location.
- SetBucketAsCString(kBucketId, kName);
- const GLint kMaxLocation = kMaxVaryingVectors * 4 - 1;
- cmd.Init(client_program_id_, kMaxLocation, kBucketId);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- // Check too high location.
- SetBucketAsCString(kBucketId, kName);
- cmd.Init(client_program_id_, kMaxLocation + 1, kBucketId);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
- // Check bad name "gl_...".
- SetBucketAsCString(kBucketId, kBadName1);
- cmd.Init(client_program_id_, kLocation, kBucketId);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
-}
-
class GLES2DecoderTestWithCHROMIUMRasterTransport : public GLES2DecoderTest {
public:
GLES2DecoderTestWithCHROMIUMRasterTransport() = default;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h
index 4bf38b436fe..3f767a9d409 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h
@@ -12,61 +12,6 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_EXTENSIONS_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_EXTENSIONS_AUTOGEN_H_
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- MatrixLoadfCHROMIUMImmediateValidArgs) {
- cmds::MatrixLoadfCHROMIUMImmediate& cmd =
- *GetImmediateAs<cmds::MatrixLoadfCHROMIUMImmediate>();
- SpecializedSetup<cmds::MatrixLoadfCHROMIUMImmediate, 0>(true);
- GLfloat temp[16] = {
- 0,
- };
- cmd.Init(GL_PATH_PROJECTION_CHROMIUM, &temp[0]);
- EXPECT_CALL(*gl_, MatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM,
- PointsToArray(temp, 16)));
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- MatrixLoadIdentityCHROMIUMValidArgs) {
- EXPECT_CALL(*gl_, MatrixLoadIdentityEXT(GL_PATH_PROJECTION_CHROMIUM));
- SpecializedSetup<cmds::MatrixLoadIdentityCHROMIUM, 0>(true);
- cmds::MatrixLoadIdentityCHROMIUM cmd;
- cmd.Init(GL_PATH_PROJECTION_CHROMIUM);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering, IsPathCHROMIUMValidArgs) {
- EXPECT_CALL(*gl_, IsPathNV(kServicePathId));
- SpecializedSetup<cmds::IsPathCHROMIUM, 0>(true);
- cmds::IsPathCHROMIUM cmd;
- cmd.Init(client_path_id_, shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- IsPathCHROMIUMInvalidArgsBadSharedMemoryId) {
- EXPECT_CALL(*gl_, IsPathNV(kServicePathId)).Times(0);
- SpecializedSetup<cmds::IsPathCHROMIUM, 0>(false);
- cmds::IsPathCHROMIUM cmd;
- cmd.Init(client_path_id_, kInvalidSharedMemoryId, shared_memory_offset_);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- cmd.Init(client_path_id_, shared_memory_id_, kInvalidSharedMemoryOffset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
-}
-
-TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
- PathStencilFuncCHROMIUMValidArgs) {
- EXPECT_CALL(*gl_, PathStencilFuncNV(GL_NEVER, 2, 3));
- SpecializedSetup<cmds::PathStencilFuncCHROMIUM, 0>(true);
- cmds::PathStencilFuncCHROMIUM cmd;
- cmd.Init(GL_NEVER, 2, 3);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
TEST_P(GLES2DecoderTestWithCHROMIUMFramebufferMixedSamples,
CoverageModulationCHROMIUMValidArgs) {
EXPECT_CALL(*gl_, CoverageModulationNV(GL_RGB));
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
index 9b55c973e31..04ecdd606c9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -3310,7 +3310,7 @@ TEST_P(GLES2DecoderTest, ClearBackbufferBitsOnFlipSwap) {
EXPECT_CALL(*gl_, Finish()).Times(AnyNumber());
auto& resize_cmd = *GetImmediateAs<cmds::ResizeCHROMIUM>();
- resize_cmd.Init(1, 1, 1.0f, GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM, GL_TRUE);
+ resize_cmd.Init(1, 1, 1.0f, GL_TRUE, 0, 0, 0);
EXPECT_EQ(error::kNoError, ExecuteCmd(resize_cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(static_cast<uint32_t>(GL_COLOR_BUFFER_BIT),
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index 76f6ab40a98..4b7cd4187ae 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -10,6 +10,7 @@
#include "base/command_line.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
+#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/id_allocator.h"
@@ -24,6 +25,7 @@
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/test_shared_image_backing.h"
#include "gpu/config/gpu_switches.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_image_stub.h"
@@ -3111,80 +3113,6 @@ TEST_P(GLES2DecoderTest, CreateAndConsumeTextureCHROMIUMInvalidTexture) {
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
-class TestSharedImageBacking : public SharedImageBacking {
- public:
- class TestSharedImageRepresentation
- : public SharedImageRepresentationGLTexture {
- public:
- TestSharedImageRepresentation(SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- gles2::Texture* texture)
- : SharedImageRepresentationGLTexture(manager, backing, tracker),
- texture_(texture) {}
-
- gles2::Texture* GetTexture() override { return texture_; }
-
- void set_can_access(bool can_access) { can_access_ = can_access; }
- bool BeginAccess(GLenum mode) override { return can_access_; }
-
- private:
- gles2::Texture* texture_;
- bool can_access_ = true;
- };
-
- TestSharedImageBacking(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- MemoryTypeTracker* memory_tracker,
- GLuint texture_id)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- 0 /* estimated_size */,
- false /* is_thread_safe */) {
- texture_ = new gles2::Texture(texture_id);
- texture_->SetLightweightRef();
- }
-
- bool IsCleared() const override { return false; }
-
- void SetCleared() override {}
-
- void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
- DCHECK(!in_fence);
- }
-
- bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
- return false;
- }
-
- void Destroy() override {
- texture_->RemoveLightweightRef(have_context());
- texture_ = nullptr;
- }
-
- void OnMemoryDump(const std::string& dump_name,
- base::trace_event::MemoryAllocatorDump* dump,
- base::trace_event::ProcessMemoryDump* pmd,
- uint64_t client_tracing_id) override {}
-
- protected:
- std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- return std::make_unique<TestSharedImageRepresentation>(manager, this,
- tracker, texture_);
- }
-
- private:
- gles2::Texture* texture_;
-};
-
TEST_P(GLES2DecoderTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
MemoryTypeTracker memory_tracker(memory_tracker_.get());
Mailbox mailbox = Mailbox::GenerateForSharedImage();
@@ -3192,7 +3120,7 @@ TEST_P(GLES2DecoderTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBacking>(
mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
- gfx::ColorSpace(), 0, &memory_tracker, kNewServiceId),
+ gfx::ColorSpace(), 0, 0, kNewServiceId),
&memory_tracker);
auto& cmd = *GetImmediateAs<
@@ -3253,7 +3181,7 @@ TEST_P(GLES2DecoderTest,
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBacking>(
mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
- gfx::ColorSpace(), 0, &memory_tracker, kNewServiceId),
+ gfx::ColorSpace(), 0, 0, kNewServiceId),
&memory_tracker);
auto& cmd = *GetImmediateAs<
@@ -3276,7 +3204,7 @@ TEST_P(GLES2DecoderTest, BeginEndSharedImageAccessCRHOMIUM) {
GetSharedImageManager()->Register(
std::make_unique<TestSharedImageBacking>(
mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
- gfx::ColorSpace(), 0, &memory_tracker, kNewServiceId),
+ gfx::ColorSpace(), 0, 0, kNewServiceId),
&memory_tracker);
auto& cmd = *GetImmediateAs<
@@ -3332,12 +3260,14 @@ TEST_P(GLES2DecoderTest, BeginSharedImageAccessDirectCHROMIUMCantBeginAccess) {
// Create a shared image.
MemoryTypeTracker memory_tracker(memory_tracker_.get());
Mailbox mailbox = Mailbox::GenerateForSharedImage();
+ auto shared_image_backing = std::make_unique<TestSharedImageBacking>(
+ mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
+ gfx::ColorSpace(), 0, 0, kNewServiceId);
+ // Set the shared image to fail BeginAccess.
+ shared_image_backing->set_can_access(false);
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
- GetSharedImageManager()->Register(
- std::make_unique<TestSharedImageBacking>(
- mailbox, viz::ResourceFormat::RGBA_8888, gfx::Size(10, 10),
- gfx::ColorSpace(), 0, &memory_tracker, kNewServiceId),
- &memory_tracker);
+ GetSharedImageManager()->Register(std::move(shared_image_backing),
+ &memory_tracker);
auto& cmd = *GetImmediateAs<
cmds::CreateAndTexStorage2DSharedImageINTERNALImmediate>();
@@ -3347,12 +3277,6 @@ TEST_P(GLES2DecoderTest, BeginSharedImageAccessDirectCHROMIUMCantBeginAccess) {
// Try to begin access with a shared image representation that fails
// BeginAccess.
- auto* texture_ref = group().texture_manager()->GetTexture(kNewClientId);
- ASSERT_NE(texture_ref, nullptr);
- ASSERT_NE(texture_ref->shared_image(), nullptr);
- static_cast<TestSharedImageBacking::TestSharedImageRepresentation*>(
- texture_ref->shared_image())
- ->set_can_access(false);
cmds::BeginSharedImageAccessDirectCHROMIUM read_access_cmd;
read_access_cmd.Init(kNewClientId, GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
EXPECT_EQ(error::kNoError, ExecuteCmd(read_access_cmd));
@@ -4096,24 +4020,21 @@ TEST_P(GLES2DecoderManualInitTest, TexImage2DFloatConvertsFormatDesktop) {
GL_LUMINANCE_ALPHA32F_ARB);
}
-TEST_P(GLES2DecoderManualInitTest, TexImage2Dnorm16OnGLES2) {
- InitState init;
- init.extensions = "GL_EXT_texture_norm16";
- init.gl_version = "OpenGL ES 2.0";
- InitDecoder(init);
- DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
- DoTexImage2D(GL_TEXTURE_2D, 0, GL_RED, 16, 17, 0, GL_RED, GL_UNSIGNED_SHORT,
- 0, 0);
-}
-
TEST_P(GLES2DecoderManualInitTest, TexImage2Dnorm16OnGLES3) {
InitState init;
init.extensions = "GL_EXT_texture_norm16";
init.gl_version = "OpenGL ES 3.0";
+ init.context_type = CONTEXT_TYPE_OPENGLES3;
InitDecoder(init);
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
DoTexImage2D(GL_TEXTURE_2D, 0, GL_R16_EXT, 16, 17, 0, GL_RED,
GL_UNSIGNED_SHORT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RG16_EXT, 16, 17, 0, GL_RG,
+ GL_UNSIGNED_SHORT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGB16_EXT, 16, 17, 0, GL_RGB,
+ GL_UNSIGNED_SHORT, 0, 0);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16_EXT, 16, 17, 0, GL_RGBA,
+ GL_UNSIGNED_SHORT, 0, 0);
}
class GLES2DecoderCompressedFormatsTest : public GLES2DecoderManualInitTest {
@@ -4234,6 +4155,22 @@ TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsASTC) {
CheckFormats("GL_KHR_texture_compression_astc_ldr", formats, 28);
}
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsBPTC) {
+ const GLenum formats[] = {GL_COMPRESSED_RGBA_BPTC_UNORM_EXT,
+ GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT,
+ GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT,
+ GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT};
+ CheckFormats("GL_EXT_texture_compression_bptc", formats, 4);
+}
+
+TEST_P(GLES2DecoderCompressedFormatsTest, GetCompressedTextureFormatsRGTC) {
+ const GLenum formats[] = {GL_COMPRESSED_RED_RGTC1_EXT,
+ GL_COMPRESSED_SIGNED_RED_RGTC1_EXT,
+ GL_COMPRESSED_RED_GREEN_RGTC2_EXT,
+ GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT};
+ CheckFormats("GL_EXT_texture_compression_rgtc", formats, 4);
+}
+
TEST_P(GLES2DecoderManualInitTest, GetNoCompressedTextureFormats) {
InitState init;
init.bind_generates_resource = true;
@@ -4280,7 +4217,7 @@ TEST_P(GLES2DecoderManualInitTest, TexStorageInvalidLevels) {
cmds::TexStorage2DEXT cmd;
cmd.Init(GL_TEXTURE_RECTANGLE_ARB, 2, GL_RGBA8, 4, 4);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
TEST_P(GLES2DecoderManualInitTest, TexStorageInvalidSize) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
index 6673feae8e0..9b0a4798a72 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
@@ -152,72 +152,6 @@ class MapBufferAccessValidator {
};
MapBufferAccessValidator map_buffer_access;
-class MatrixModeValidator {
- public:
- bool IsValid(const GLenum value) const;
-};
-MatrixModeValidator matrix_mode;
-
-class PathCoordTypeValidator {
- public:
- bool IsValid(const GLenum value) const;
-};
-PathCoordTypeValidator path_coord_type;
-
-class PathCoverModeValidator {
- public:
- bool IsValid(const GLenum value) const;
-};
-PathCoverModeValidator path_cover_mode;
-
-class PathFillModeValidator {
- public:
- bool IsValid(const GLenum value) const;
-};
-PathFillModeValidator path_fill_mode;
-
-class PathFragmentInputGenModeValidator {
- public:
- bool IsValid(const GLenum value) const;
-};
-PathFragmentInputGenModeValidator path_fragment_input_gen_mode;
-
-class PathInstancedCoverModeValidator {
- public:
- bool IsValid(const GLenum value) const;
-};
-PathInstancedCoverModeValidator path_instanced_cover_mode;
-
-class PathNameTypeValidator {
- public:
- bool IsValid(const GLenum value) const;
-};
-PathNameTypeValidator path_name_type;
-
-class PathParameterValidator {
- public:
- bool IsValid(const GLenum value) const;
-};
-PathParameterValidator path_parameter;
-
-class PathParameterCapValuesValidator {
- public:
- bool IsValid(const GLint value) const;
-};
-PathParameterCapValuesValidator path_parameter_cap_values;
-
-class PathParameterJoinValuesValidator {
- public:
- bool IsValid(const GLint value) const;
-};
-PathParameterJoinValuesValidator path_parameter_join_values;
-
-class PathTransformTypeValidator {
- public:
- bool IsValid(const GLenum value) const;
-};
-PathTransformTypeValidator path_transform_type;
-
ValueValidator<GLenum> pixel_store;
class PixelStoreAlignmentValidator {
public:
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
index efc505e5507..5284cc802b9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
@@ -537,132 +537,6 @@ bool Validators::MapBufferAccessValidator::IsValid(const GLenum value) const {
return false;
}
-bool Validators::MatrixModeValidator::IsValid(const GLenum value) const {
- switch (value) {
- case GL_PATH_PROJECTION_CHROMIUM:
- case GL_PATH_MODELVIEW_CHROMIUM:
- return true;
- }
- return false;
-}
-
-bool Validators::PathCoordTypeValidator::IsValid(const GLenum value) const {
- switch (value) {
- case GL_BYTE:
- case GL_UNSIGNED_BYTE:
- case GL_SHORT:
- case GL_UNSIGNED_SHORT:
- case GL_FLOAT:
- return true;
- }
- return false;
-}
-
-bool Validators::PathCoverModeValidator::IsValid(const GLenum value) const {
- switch (value) {
- case GL_CONVEX_HULL_CHROMIUM:
- case GL_BOUNDING_BOX_CHROMIUM:
- return true;
- }
- return false;
-}
-
-bool Validators::PathFillModeValidator::IsValid(const GLenum value) const {
- switch (value) {
- case GL_INVERT:
- case GL_COUNT_UP_CHROMIUM:
- case GL_COUNT_DOWN_CHROMIUM:
- return true;
- }
- return false;
-}
-
-bool Validators::PathFragmentInputGenModeValidator::IsValid(
- const GLenum value) const {
- switch (value) {
- case GL_NONE:
- case GL_EYE_LINEAR_CHROMIUM:
- case GL_OBJECT_LINEAR_CHROMIUM:
- case GL_CONSTANT_CHROMIUM:
- return true;
- }
- return false;
-}
-
-bool Validators::PathInstancedCoverModeValidator::IsValid(
- const GLenum value) const {
- switch (value) {
- case GL_CONVEX_HULL_CHROMIUM:
- case GL_BOUNDING_BOX_CHROMIUM:
- case GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM:
- return true;
- }
- return false;
-}
-
-bool Validators::PathNameTypeValidator::IsValid(const GLenum value) const {
- switch (value) {
- case GL_UNSIGNED_BYTE:
- case GL_BYTE:
- case GL_UNSIGNED_SHORT:
- case GL_SHORT:
- case GL_UNSIGNED_INT:
- case GL_INT:
- return true;
- }
- return false;
-}
-
-bool Validators::PathParameterValidator::IsValid(const GLenum value) const {
- switch (value) {
- case GL_PATH_STROKE_WIDTH_CHROMIUM:
- case GL_PATH_END_CAPS_CHROMIUM:
- case GL_PATH_JOIN_STYLE_CHROMIUM:
- case GL_PATH_MITER_LIMIT_CHROMIUM:
- case GL_PATH_STROKE_BOUND_CHROMIUM:
- return true;
- }
- return false;
-}
-
-bool Validators::PathParameterCapValuesValidator::IsValid(
- const GLint value) const {
- switch (value) {
- case GL_FLAT:
- case GL_SQUARE_CHROMIUM:
- case GL_ROUND_CHROMIUM:
- return true;
- }
- return false;
-}
-
-bool Validators::PathParameterJoinValuesValidator::IsValid(
- const GLint value) const {
- switch (value) {
- case GL_MITER_REVERT_CHROMIUM:
- case GL_BEVEL_CHROMIUM:
- case GL_ROUND_CHROMIUM:
- return true;
- }
- return false;
-}
-
-bool Validators::PathTransformTypeValidator::IsValid(const GLenum value) const {
- switch (value) {
- case GL_NONE:
- case GL_TRANSLATE_X_CHROMIUM:
- case GL_TRANSLATE_Y_CHROMIUM:
- case GL_TRANSLATE_2D_CHROMIUM:
- case GL_TRANSLATE_3D_CHROMIUM:
- case GL_AFFINE_2D_CHROMIUM:
- case GL_AFFINE_3D_CHROMIUM:
- case GL_TRANSPOSE_AFFINE_2D_CHROMIUM:
- case GL_TRANSPOSE_AFFINE_3D_CHROMIUM:
- return true;
- }
- return false;
-}
-
static const GLenum valid_pixel_store_table[] = {
GL_PACK_ALIGNMENT,
GL_UNPACK_ALIGNMENT,
diff --git a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
index fb9aa52d2c6..fa686c24432 100644
--- a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
+++ b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
@@ -9,6 +9,7 @@
#include "base/bind.h"
#include "base/metrics/histogram_macros.h"
#include "gpu/ipc/common/command_buffer_id.h"
+#include "gpu/ipc/common/gpu_peak_memory.h"
// Macro to reduce code duplication when logging memory in
// GpuCommandBufferMemoryTracker. This is needed as the UMA_HISTOGRAM_* macros
@@ -61,11 +62,14 @@ GpuCommandBufferMemoryTracker::~GpuCommandBufferMemoryTracker() {
LogMemoryStatsShutdown();
}
-void GpuCommandBufferMemoryTracker::TrackMemoryAllocatedChange(uint64_t delta) {
+void GpuCommandBufferMemoryTracker::TrackMemoryAllocatedChange(int64_t delta) {
+ DCHECK(delta >= 0 || size_ >= static_cast<uint64_t>(-delta));
uint64_t old_size = size_;
size_ += delta;
if (observer_)
- observer_->OnMemoryAllocatedChange(command_buffer_id_, old_size, size_);
+ observer_->OnMemoryAllocatedChange(
+ command_buffer_id_, old_size, size_,
+ GpuPeakMemoryAllocationSource::COMMAND_BUFFER);
}
uint64_t GpuCommandBufferMemoryTracker::GetSize() const {
diff --git a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h
index bf0d0a681f5..731b0b10ff5 100644
--- a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h
+++ b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h
@@ -30,7 +30,7 @@ class GPU_GLES2_EXPORT GpuCommandBufferMemoryTracker : public MemoryTracker {
~GpuCommandBufferMemoryTracker() override;
// MemoryTracker implementation.
- void TrackMemoryAllocatedChange(uint64_t delta) override;
+ void TrackMemoryAllocatedChange(int64_t delta) override;
uint64_t GetSize() const override;
uint64_t ClientTracingId() const override;
int ClientId() const override;
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.cc b/chromium/gpu/command_buffer/service/gpu_switches.cc
index f9aadc74c57..886e0f039f5 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.cc
+++ b/chromium/gpu/command_buffer/service/gpu_switches.cc
@@ -43,6 +43,13 @@ const char kEnforceGLMinimums[] = "enforce-gl-minimums";
// Sets the total amount of memory that may be allocated for GPU resources
const char kForceGpuMemAvailableMb[] = "force-gpu-mem-available-mb";
+// Sets the maximum GPU memory to use for discardable caches.
+const char kForceGpuMemDiscardableLimitMb[] =
+ "force-gpu-mem-discardable-limit-mb";
+
+// Sets the maximum texture size in pixels.
+const char kForceMaxTextureSize[] = "force-max-texture-size";
+
// Sets the maximum size of the in-memory gpu program cache, in kb
const char kGpuProgramCacheSizeKb[] = "gpu-program-cache-size-kb";
@@ -62,14 +69,10 @@ const char kGLShaderIntermOutput[] = "gl-shader-interm-output";
// round intermediate values in ANGLE.
const char kEmulateShaderPrecision[] = "emulate-shader-precision";
-// Selects the type of the GrContext.
-const char kGrContextType[] = "gr-context-type";
-const char kGrContextTypeGL[] = "gl";
-const char kGrContextTypeVulkan[] = "vulkan";
-const char kGrContextTypeMetal[] = "metal";
-const char kGrContextTypeDawn[] = "dawn";
// Enable Vulkan support and select Vulkan implementation, must also have
-// ENABLE_VULKAN defined.
+// ENABLE_VULKAN defined. This only initializes Vulkan, the flag
+// --enable-features=Vulkan must also be used to select Vulkan for compositing
+// and rasterization.
const char kUseVulkan[] = "use-vulkan";
const char kVulkanImplementationNameNative[] = "native";
const char kVulkanImplementationNameSwiftshader[] = "swiftshader";
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.h b/chromium/gpu/command_buffer/service/gpu_switches.h
index ee702988014..1eca57d6eaf 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.h
+++ b/chromium/gpu/command_buffer/service/gpu_switches.h
@@ -23,17 +23,13 @@ GPU_EXPORT extern const char kEnableGPUDriverDebugLogging[];
GPU_EXPORT extern const char kDisableGpuProgramCache[];
GPU_EXPORT extern const char kEnforceGLMinimums[];
GPU_EXPORT extern const char kForceGpuMemAvailableMb[];
+GPU_EXPORT extern const char kForceGpuMemDiscardableLimitMb[];
+GPU_EXPORT extern const char kForceMaxTextureSize[];
GPU_EXPORT extern const char kGpuProgramCacheSizeKb[];
GPU_EXPORT extern const char kDisableGpuShaderDiskCache[];
GPU_EXPORT extern const char kEnableThreadedTextureMailboxes[];
GPU_EXPORT extern const char kGLShaderIntermOutput[];
GPU_EXPORT extern const char kEmulateShaderPrecision[];
-GPU_EXPORT extern const char kGrContextType[];
-GPU_EXPORT extern const char kGrContextTypeGL[];
-GPU_EXPORT extern const char kGrContextTypeVulkan[];
-GPU_EXPORT extern const char kGrContextTypeMetal[];
-GPU_EXPORT extern const char kGrContextTypeDawn[];
-GPU_EXPORT extern const char kVulkanImplementationNameNative[];
GPU_EXPORT extern const char kUseVulkan[];
GPU_EXPORT extern const char kVulkanImplementationNameNative[];
GPU_EXPORT extern const char kVulkanImplementationNameSwiftshader[];
diff --git a/chromium/gpu/command_buffer/service/gpu_tracer.cc b/chromium/gpu/command_buffer/service/gpu_tracer.cc
index 17a670920ec..f6c29308767 100644
--- a/chromium/gpu/command_buffer/service/gpu_tracer.cc
+++ b/chromium/gpu/command_buffer/service/gpu_tracer.cc
@@ -168,21 +168,24 @@ void GPUTrace::Process() {
}
}
-GPUTracer::GPUTracer(DecoderContext* decoder)
- : gpu_trace_srv_category(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
+GPUTracer::GPUTracer(DecoderContext* decoder, bool context_is_gl)
+ : gpu_trace_srv_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("gpu.service"))),
- gpu_trace_dev_category(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
+ gpu_trace_dev_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("gpu.device"))),
decoder_(decoder) {
DCHECK(decoder_);
- gl::GLContext* context = decoder_->GetGLContext();
- if (context)
- gpu_timing_client_ = context->CreateGPUTimingClient();
- else
- gpu_timing_client_ = new gl::GPUTimingClient();
-
+ gl::GLContext* gl_context = decoder_->GetGLContext();
+ if (context_is_gl && gl_context) {
+ can_trace_dev_ = true;
+ gpu_timing_client_ = gl_context->CreateGPUTimingClient();
+ disjoint_time_ = gpu_timing_client_->GetCurrentCPUTime();
+ } else {
+ can_trace_dev_ = false;
+ // TODO(crbug.com/1018725): GPUTiming should support backends other than GL.
+ gpu_timing_client_ = nullptr;
+ }
outputter_ = decoder_->outputter();
- disjoint_time_ = gpu_timing_client_->GetCurrentCPUTime();
}
GPUTracer::~GPUTracer() = default;
@@ -201,14 +204,13 @@ bool GPUTracer::BeginDecoding() {
// Begin a Trace for all active markers
for (int n = 0; n < NUM_TRACER_SOURCES; n++) {
for (size_t i = 0; i < markers_[n].size(); i++) {
- began_device_traces_ |= (*gpu_trace_dev_category != 0);
+ began_device_traces_ |= is_gpu_device_tracing_enabled();
TraceMarker& trace_marker = markers_[n][i];
- trace_marker.trace_ =
- new GPUTrace(outputter_, gpu_timing_client_.get(),
- static_cast<GpuTracerSource>(n),
- trace_marker.category_, trace_marker.name_,
- *gpu_trace_srv_category != 0,
- *gpu_trace_dev_category != 0);
+ trace_marker.trace_ = new GPUTrace(
+ outputter_, gpu_timing_client_.get(),
+ static_cast<GpuTracerSource>(n), trace_marker.category_,
+ trace_marker.name_, is_gpu_service_tracing_enabled(),
+ is_gpu_device_tracing_enabled());
trace_marker.trace_->Start();
}
}
@@ -253,11 +255,10 @@ bool GPUTracer::Begin(const std::string& category, const std::string& name,
// Create trace
if (IsTracing()) {
- began_device_traces_ |= (*gpu_trace_dev_category != 0);
+ began_device_traces_ |= is_gpu_device_tracing_enabled();
scoped_refptr<GPUTrace> trace = new GPUTrace(
outputter_, gpu_timing_client_.get(), source, category, name,
- *gpu_trace_srv_category != 0,
- *gpu_trace_dev_category != 0);
+ is_gpu_service_tracing_enabled(), is_gpu_device_tracing_enabled());
trace->Start();
markers_[source].back().trace_ = trace;
}
@@ -293,7 +294,7 @@ bool GPUTracer::HasTracesToProcess() {
}
void GPUTracer::ProcessTraces() {
- if (!gpu_timing_client_->IsAvailable()) {
+ if (gpu_timing_client_ && !gpu_timing_client_->IsAvailable()) {
while (!finished_traces_.empty()) {
finished_traces_.front()->Destroy(false);
finished_traces_.pop_front();
@@ -335,7 +336,7 @@ void GPUTracer::ProcessTraces() {
}
bool GPUTracer::IsTracing() {
- return (*gpu_trace_srv_category != 0) || (*gpu_trace_dev_category != 0);
+ return is_gpu_service_tracing_enabled() || is_gpu_device_tracing_enabled();
}
const std::string& GPUTracer::CurrentCategory(GpuTracerSource source) const {
@@ -357,10 +358,10 @@ const std::string& GPUTracer::CurrentName(GpuTracerSource source) const {
}
bool GPUTracer::CheckDisjointStatus() {
- const int64_t current_time = gpu_timing_client_->GetCurrentCPUTime();
- if (*gpu_trace_dev_category == 0)
+ if (!is_gpu_device_tracing_enabled())
return false;
+ const int64_t current_time = gpu_timing_client_->GetCurrentCPUTime();
bool status = gpu_timing_client_->CheckAndResetTimerErrors();
if (status && began_device_traces_) {
// Log disjoint event if we have active traces.
diff --git a/chromium/gpu/command_buffer/service/gpu_tracer.h b/chromium/gpu/command_buffer/service/gpu_tracer.h
index 72b5c2f01c9..bc512e4725c 100644
--- a/chromium/gpu/command_buffer/service/gpu_tracer.h
+++ b/chromium/gpu/command_buffer/service/gpu_tracer.h
@@ -57,7 +57,7 @@ struct TraceMarker {
// Traces GPU Commands.
class GPU_GLES2_EXPORT GPUTracer {
public:
- explicit GPUTracer(DecoderContext* decoder);
+ explicit GPUTracer(DecoderContext* decoder, bool context_is_gl = true);
virtual ~GPUTracer();
void Destroy(bool have_context);
@@ -86,9 +86,18 @@ class GPU_GLES2_EXPORT GPUTracer {
const std::string& CurrentName(GpuTracerSource source) const;
protected:
+ bool is_gpu_service_tracing_enabled() {
+ return *gpu_trace_srv_category_ != 0;
+ }
+ bool is_gpu_device_tracing_enabled() {
+ return *gpu_trace_dev_category_ != 0 && can_trace_dev_;
+ }
+
scoped_refptr<gl::GPUTimingClient> gpu_timing_client_;
- const unsigned char* gpu_trace_srv_category;
- const unsigned char* gpu_trace_dev_category;
+ const unsigned char* gpu_trace_srv_category_;
+ const unsigned char* gpu_trace_dev_category_;
+ // Disable gpu.device tracing if context is corrupted or not GL.
+ bool can_trace_dev_;
private:
bool CheckDisjointStatus();
diff --git a/chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc b/chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc
index 8d6248c177e..f53aebe21a5 100644
--- a/chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gpu_tracer_unittest.cc
@@ -56,8 +56,8 @@ class GPUTracerTester : public GPUTracer {
gpu_timing_client_->SetCpuTimeForTesting(base::BindRepeating(&FakeCpuTime));
// Force tracing to be dependent on our mock variable here.
- gpu_trace_srv_category = &tracing_enabled_;
- gpu_trace_dev_category = &tracing_enabled_;
+ gpu_trace_srv_category_ = &tracing_enabled_;
+ gpu_trace_dev_category_ = &tracing_enabled_;
}
~GPUTracerTester() override = default;
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc b/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
index f70344e1454..724f94b2ffc 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
@@ -40,7 +40,7 @@ class GrCacheControllerTest : public testing::Test {
context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), std::move(surface), std::move(context),
false /* use_virtualized_gl_contexts */, base::DoNothing());
- context_state_->InitializeGrContext(workarounds, nullptr);
+ context_state_->InitializeGrContext(GpuPreferences(), workarounds, nullptr);
auto feature_info =
base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
context_state_->InitializeGL(GpuPreferences(), std::move(feature_info));
diff --git a/chromium/gpu/command_buffer/service/gr_shader_cache.cc b/chromium/gpu/command_buffer/service/gr_shader_cache.cc
index bbc8b148c32..7cd7a872b6f 100644
--- a/chromium/gpu/command_buffer/service/gr_shader_cache.cc
+++ b/chromium/gpu/command_buffer/service/gr_shader_cache.cc
@@ -126,8 +126,6 @@ void GrShaderCache::PurgeMemory(
switch (memory_pressure_level) {
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- // This function is only called with moderate or critical pressure.
- NOTREACHED();
return;
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
cache_size_limit_ = cache_size_limit_ / 4;
diff --git a/chromium/gpu/command_buffer/service/image_factory.cc b/chromium/gpu/command_buffer/service/image_factory.cc
index 385c8f2fc32..86db9bad782 100644
--- a/chromium/gpu/command_buffer/service/image_factory.cc
+++ b/chromium/gpu/command_buffer/service/image_factory.cc
@@ -20,6 +20,7 @@ scoped_refptr<gl::GLImage> ImageFactory::CreateAnonymousImage(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
+ SurfaceHandle surface_handle,
bool* is_cleared) {
NOTREACHED();
return nullptr;
diff --git a/chromium/gpu/command_buffer/service/image_factory.h b/chromium/gpu/command_buffer/service/image_factory.h
index 339ad39ecbe..7cfda32b203 100644
--- a/chromium/gpu/command_buffer/service/image_factory.h
+++ b/chromium/gpu/command_buffer/service/image_factory.h
@@ -38,6 +38,7 @@ class GPU_EXPORT ImageFactory {
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
+ SurfaceHandle surface_handle,
bool* is_cleared);
// An image can only be bound to a texture with the appropriate type.
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
index 0e7617f23d1..be8c59c8b06 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
@@ -93,13 +93,13 @@ ImageReaderGLOwner::ImageReaderGLOwner(
int32_t width = 1, height = 1;
// This should be as small as possible to limit the memory usage.
- // ImageReader needs 2 images to mimic the behavior of SurfaceTexture. For
- // SurfaceControl we need 3 images instead of 2 since 1 frame(and hence image
- // associated with it) will be with system compositor and 2 frames will be in
- // flight. Also note that we always acquire an image before deleting the
- // previous acquired image. This causes 2 acquired images to be in flight at
- // the image acquisition point until the previous image is deleted.
- max_images_ = IsSurfaceControl(mode) ? 3 : 2;
+ // ImageReader needs 1 image to mimic the behavior of SurfaceTexture. Ideally
+ // it should be 2 but that doesn't work on some devices
+ // (see crbug.com/1051705).
+ // For SurfaceControl we need 3 images instead of 2 since 1 frame (and hence
+ // image associated with it) will be with system compositor and 2 frames will
+ // be in flight.
+ max_images_ = IsSurfaceControl(mode) ? 3 : 1;
AIMAGE_FORMATS format = mode == Mode::kAImageReaderSecureSurfaceControl
? AIMAGE_FORMAT_PRIVATE
: AIMAGE_FORMAT_YUV_420_888;
@@ -221,7 +221,12 @@ void ImageReaderGLOwner::UpdateTexImage() {
DCHECK(image_reader_);
- // Acquire the latest image asynchronously
+ // Acquire the latest image asynchronously. We must release the current image
+ // before acquiring a new one if the ImageReader was initialized with one
+ // outstanding image at max.
+ if (max_images_ == 1)
+ current_image_ref_.reset();
+
AImage* image = nullptr;
int acquire_fence_fd = -1;
media_status_t return_code = AMEDIA_OK;
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc
index 58d8096c0d2..4c28a31e6ac 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc
@@ -8,12 +8,10 @@
#include <memory>
#include <utility>
-#include "base/test/scoped_feature_list.h"
#include "base/test/task_environment.h"
#include "gpu/command_buffer/service/abstract_texture.h"
#include "gpu/command_buffer/service/image_reader_gl_owner.h"
#include "gpu/command_buffer/service/mock_abstract_texture.h"
-#include "media/base/media_switches.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context_egl.h"
@@ -32,12 +30,9 @@ class ImageReaderGLOwnerTest : public testing::Test {
if (!IsImageReaderSupported())
return;
- scoped_feature_list_.InitAndEnableFeature(media::kAImageReaderVideoOutput);
-
gl::init::InitializeStaticGLBindingsImplementation(
gl::kGLImplementationEGLGLES2, false);
- gl::init::InitializeGLOneOffPlatformImplementation(false, false, false,
- true);
+ gl::init::InitializeGLOneOffPlatformImplementation(false, false, true);
surface_ = new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240));
surface_->Initialize();
@@ -74,7 +69,6 @@ class ImageReaderGLOwnerTest : public testing::Test {
return base::android::AndroidImageReader::GetInstance().IsSupported();
}
- base::test::ScopedFeatureList scoped_feature_list_;
scoped_refptr<TextureOwner> image_reader_;
GLuint texture_id_ = 0;
@@ -147,14 +141,14 @@ TEST_F(ImageReaderGLOwnerTest, DestructionWorksWithWrongContext) {
new_surface = nullptr;
}
-// The max number of images used by the ImageReader must be 2 for non-Surface
+// The max number of images used by the ImageReader must be 1 for non-Surface
// control.
TEST_F(ImageReaderGLOwnerTest, MaxImageExpectation) {
if (!IsImageReaderSupported())
return;
EXPECT_EQ(static_cast<ImageReaderGLOwner*>(image_reader_.get())
->max_images_for_testing(),
- 2);
+ 1);
}
class ImageReaderGLOwnerSecureSurfaceControlTest
diff --git a/chromium/gpu/command_buffer/service/mailbox_manager_dummy.cc b/chromium/gpu/command_buffer/service/mailbox_manager_dummy.cc
new file mode 100644
index 00000000000..72908bae086
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/mailbox_manager_dummy.cc
@@ -0,0 +1,27 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/mailbox_manager_dummy.h"
+
+#include "base/logging.h"
+#include "gpu/command_buffer/service/texture_base.h"
+
+namespace gpu {
+namespace gles2 {
+
+MailboxManagerDummy::MailboxManagerDummy() = default;
+
+MailboxManagerDummy::~MailboxManagerDummy() = default;
+
+bool MailboxManagerDummy::UsesSync() {
+ return false;
+}
+
+TextureBase* MailboxManagerDummy::ConsumeTexture(const Mailbox& mailbox) {
+ NOTREACHED();
+ return nullptr;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/mailbox_manager_dummy.h b/chromium/gpu/command_buffer/service/mailbox_manager_dummy.h
new file mode 100644
index 00000000000..32fc1722dc8
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/mailbox_manager_dummy.h
@@ -0,0 +1,38 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_MAILBOX_MANAGER_DUMMY_H_
+#define GPU_COMMAND_BUFFER_SERVICE_MAILBOX_MANAGER_DUMMY_H_
+
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/gpu_gles2_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+// Dummy implementation to be used instead of MailboxManagerSync when shared
+// images are enabled on webview. None of the below methods needs to do any
+// operation when shared images are enabled hence making all no-op.
+class GPU_GLES2_EXPORT MailboxManagerDummy : public MailboxManager {
+ public:
+ MailboxManagerDummy();
+ ~MailboxManagerDummy() override;
+
+ // MailboxManager implementation:
+ TextureBase* ConsumeTexture(const Mailbox& mailbox) override;
+ void ProduceTexture(const Mailbox& mailbox, TextureBase* texture) override {}
+ bool UsesSync() override;
+ void PushTextureUpdates(const SyncToken& token) override {}
+ void PullTextureUpdates(const SyncToken& token) override {}
+ void TextureDeleted(TextureBase* texture) override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MailboxManagerDummy);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_MAILBOX_MANAGER_DUMMY_H_
diff --git a/chromium/gpu/command_buffer/service/mailbox_manager_factory.cc b/chromium/gpu/command_buffer/service/mailbox_manager_factory.cc
index b8bb0fdea8d..8075a0c36e2 100644
--- a/chromium/gpu/command_buffer/service/mailbox_manager_factory.cc
+++ b/chromium/gpu/command_buffer/service/mailbox_manager_factory.cc
@@ -5,8 +5,11 @@
#include "gpu/command_buffer/service/mailbox_manager_factory.h"
#include "base/command_line.h"
+#include "base/feature_list.h"
+#include "gpu/command_buffer/service/mailbox_manager_dummy.h"
#include "gpu/command_buffer/service/mailbox_manager_impl.h"
#include "gpu/command_buffer/service/mailbox_manager_sync.h"
+#include "gpu/config/gpu_finch_features.h"
#include "gpu/config/gpu_preferences.h"
namespace gpu {
@@ -14,8 +17,15 @@ namespace gles2 {
std::unique_ptr<MailboxManager> CreateMailboxManager(
const GpuPreferences& gpu_preferences) {
- if (gpu_preferences.enable_threaded_texture_mailboxes)
- return std::make_unique<MailboxManagerSync>();
+ // TODO(vikassoni):Once shared images have been completely tested and stable
+ // on webview, remove MailboxManagerSync and MailboxManagerSyncDummy.
+ if (gpu_preferences.enable_threaded_texture_mailboxes) {
+ if (base::FeatureList::IsEnabled(features::kEnableSharedImageForWebview)) {
+ return std::make_unique<MailboxManagerDummy>();
+ } else {
+ return std::make_unique<MailboxManagerSync>();
+ }
+ }
return std::make_unique<MailboxManagerImpl>();
}
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.cc b/chromium/gpu/command_buffer/service/memory_program_cache.cc
index 734436a8ab0..688f4861741 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.cc
@@ -26,16 +26,6 @@
#include "third_party/zlib/zlib.h"
#include "ui/gl/gl_bindings.h"
-// Macro to help with logging times under 10ms.
-#define UMA_HISTOGRAM_VERY_SHORT_TIMES(name, time_delta) \
- UMA_HISTOGRAM_CUSTOM_COUNTS( \
- name, \
- static_cast<base::HistogramBase::Sample>((time_delta).InMicroseconds()), \
- 1, \
- static_cast<base::HistogramBase::Sample>( \
- base::TimeDelta::FromMilliseconds(10).InMicroseconds()), \
- 50);
-
namespace gpu {
namespace gles2 {
@@ -224,7 +214,6 @@ bool ProgramBinaryExtensionsAvailable() {
// Returns an empty vector if compression fails.
std::vector<uint8_t> CompressData(const std::vector<uint8_t>& data) {
- auto start_time = base::TimeTicks::Now();
uLongf compressed_size = compressBound(data.size());
std::vector<uint8_t> compressed_data(compressed_size);
// Level indicates a trade-off between compression and speed. Level 1
@@ -234,18 +223,12 @@ std::vector<uint8_t> CompressData(const std::vector<uint8_t>& data) {
// It should be impossible for compression to fail with the provided
// parameters.
bool success = Z_OK == result;
- UMA_HISTOGRAM_BOOLEAN("GPU.ProgramCache.CompressDataSuccess", success);
if (!success)
return std::vector<uint8_t>();
compressed_data.resize(compressed_size);
compressed_data.shrink_to_fit();
- UMA_HISTOGRAM_VERY_SHORT_TIMES("GPU.ProgramCache.CompressDataTime",
- base::TimeTicks::Now() - start_time);
- UMA_HISTOGRAM_PERCENTAGE("GPU.ProgramCache.CompressionPercentage",
- (100 * compressed_size) / data.size());
-
return compressed_data;
}
@@ -253,7 +236,6 @@ std::vector<uint8_t> CompressData(const std::vector<uint8_t>& data) {
std::vector<uint8_t> DecompressData(const std::vector<uint8_t>& data,
size_t decompressed_size,
size_t max_size_bytes) {
- auto start_time = base::TimeTicks::Now();
std::vector<uint8_t> decompressed_data(decompressed_size);
uLongf decompressed_size_out =
static_cast<uLongf>(decompressed_size);
@@ -262,13 +244,9 @@ std::vector<uint8_t> DecompressData(const std::vector<uint8_t>& data,
bool success =
result == Z_OK && decompressed_data.size() == decompressed_size_out;
- UMA_HISTOGRAM_BOOLEAN("GPU.ProgramCache.DecompressDataSuccess", success);
if (!success)
return std::vector<uint8_t>();
- UMA_HISTOGRAM_VERY_SHORT_TIMES("GPU.ProgramCache.DecompressDataTime",
- base::TimeTicks::Now() - start_time);
-
return decompressed_data;
}
@@ -428,9 +406,6 @@ void MemoryProgramCache::SaveLinkedProgram(
if (binary.size() > max_size_bytes())
return;
- UMA_HISTOGRAM_COUNTS_1M("GPU.ProgramCache.ProgramBinarySizeBytes",
- binary.size());
-
char a_sha[kHashLength];
char b_sha[kHashLength];
DCHECK(shader_a && !shader_a->last_compiled_source().empty() &&
@@ -449,9 +424,6 @@ void MemoryProgramCache::SaveLinkedProgram(
sha);
const std::string sha_string(sha, sizeof(sha));
- UMA_HISTOGRAM_COUNTS_1M("GPU.ProgramCache.MemorySizeBeforeKb",
- curr_size_bytes_ / 1024);
-
// Evict any cached program with the same key in favor of the least recently
// accessed.
ProgramMRUCache::iterator existing = store_.Peek(sha_string);
@@ -486,9 +458,6 @@ void MemoryProgramCache::SaveLinkedProgram(
shader_b->uniform_map(), shader_b->varying_map(),
shader_b->output_variable_list(), shader_b->interface_block_map(),
this));
-
- UMA_HISTOGRAM_COUNTS_1M("GPU.ProgramCache.MemorySizeAfterKb",
- curr_size_bytes_ / 1024);
}
void MemoryProgramCache::LoadProgram(const std::string& key,
@@ -565,9 +534,6 @@ void MemoryProgramCache::LoadProgram(const std::string& key,
vertex_interface_blocks, proto->fragment_shader().sha().c_str(),
fragment_attribs, fragment_uniforms, fragment_varyings,
fragment_output_variables, fragment_interface_blocks, this));
-
- UMA_HISTOGRAM_COUNTS_1M("GPU.ProgramCache.MemorySizeAfterKb",
- curr_size_bytes_ / 1024);
} else {
DVLOG(2) << "Failed to parse proto file.";
}
diff --git a/chromium/gpu/command_buffer/service/memory_tracking.h b/chromium/gpu/command_buffer/service/memory_tracking.h
index 3ce58ed998b..ea211deddf6 100644
--- a/chromium/gpu/command_buffer/service/memory_tracking.h
+++ b/chromium/gpu/command_buffer/service/memory_tracking.h
@@ -14,6 +14,7 @@
#include "base/memory/ref_counted.h"
#include "base/trace_event/trace_event.h"
#include "gpu/command_buffer/common/command_buffer_id.h"
+#include "gpu/ipc/common/gpu_peak_memory.h"
namespace gpu {
@@ -27,16 +28,18 @@ class MemoryTracker {
Observer() = default;
virtual ~Observer() = default;
- virtual void OnMemoryAllocatedChange(CommandBufferId id,
- uint64_t old_size,
- uint64_t new_size) = 0;
+ virtual void OnMemoryAllocatedChange(
+ CommandBufferId id,
+ uint64_t old_size,
+ uint64_t new_size,
+ GpuPeakMemoryAllocationSource source) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(Observer);
};
virtual ~MemoryTracker() = default;
- virtual void TrackMemoryAllocatedChange(uint64_t delta) = 0;
+ virtual void TrackMemoryAllocatedChange(int64_t delta) = 0;
virtual uint64_t GetSize() const = 0;
// Raw ID identifying the GPU client for whom memory is being allocated.
@@ -58,20 +61,20 @@ class MemoryTypeTracker {
explicit MemoryTypeTracker(MemoryTracker* memory_tracker)
: memory_tracker_(memory_tracker) {}
- ~MemoryTypeTracker() = default;
+ ~MemoryTypeTracker() { DCHECK(!mem_represented_); }
void TrackMemAlloc(size_t bytes) {
+ DCHECK(bytes >= 0);
mem_represented_ += bytes;
if (memory_tracker_ && bytes)
memory_tracker_->TrackMemoryAllocatedChange(bytes);
}
void TrackMemFree(size_t bytes) {
- DCHECK(bytes <= mem_represented_);
+ DCHECK(bytes >= 0 && bytes <= mem_represented_);
mem_represented_ -= bytes;
if (memory_tracker_ && bytes) {
- memory_tracker_->TrackMemoryAllocatedChange(
- -static_cast<uint64_t>(bytes));
+ memory_tracker_->TrackMemoryAllocatedChange(-static_cast<int64_t>(bytes));
}
}
diff --git a/chromium/gpu/command_buffer/service/mocks.h b/chromium/gpu/command_buffer/service/mocks.h
index b55ae6cda81..d01e9b6c4bd 100644
--- a/chromium/gpu/command_buffer/service/mocks.h
+++ b/chromium/gpu/command_buffer/service/mocks.h
@@ -152,7 +152,7 @@ class MockMemoryTracker : public MemoryTracker {
MockMemoryTracker();
~MockMemoryTracker() override;
- MOCK_METHOD1(TrackMemoryAllocatedChange, void(uint64_t delta));
+ MOCK_METHOD1(TrackMemoryAllocatedChange, void(int64_t delta));
uint64_t GetSize() const override { return 0; }
MOCK_CONST_METHOD0(ClientTracingId, uint64_t());
MOCK_CONST_METHOD0(ClientId, int());
diff --git a/chromium/gpu/command_buffer/service/passthrough_discardable_manager.cc b/chromium/gpu/command_buffer/service/passthrough_discardable_manager.cc
index 631aaa46d24..82d433d3de3 100644
--- a/chromium/gpu/command_buffer/service/passthrough_discardable_manager.cc
+++ b/chromium/gpu/command_buffer/service/passthrough_discardable_manager.cc
@@ -7,6 +7,7 @@
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
+#include "gpu/config/gpu_preferences.h"
namespace gpu {
@@ -25,9 +26,12 @@ PassthroughDiscardableManager::DiscardableCacheValue::operator=(
PassthroughDiscardableManager::DiscardableCacheValue::~DiscardableCacheValue() =
default;
-PassthroughDiscardableManager::PassthroughDiscardableManager()
+PassthroughDiscardableManager::PassthroughDiscardableManager(
+ const GpuPreferences& preferences)
: cache_(DiscardableCache::NO_AUTO_EVICT),
- cache_size_limit_(DiscardableCacheSizeLimit()) {}
+ cache_size_limit_(preferences.force_gpu_mem_discardable_limit_bytes
+ ? preferences.force_gpu_mem_discardable_limit_bytes
+ : DiscardableCacheSizeLimit()) {}
PassthroughDiscardableManager::~PassthroughDiscardableManager() {
DCHECK(cache_.empty());
diff --git a/chromium/gpu/command_buffer/service/passthrough_discardable_manager.h b/chromium/gpu/command_buffer/service/passthrough_discardable_manager.h
index e4174a97603..c8a566a1830 100644
--- a/chromium/gpu/command_buffer/service/passthrough_discardable_manager.h
+++ b/chromium/gpu/command_buffer/service/passthrough_discardable_manager.h
@@ -11,6 +11,7 @@
#include "gpu/gpu_gles2_export.h"
namespace gpu {
+struct GpuPreferences;
namespace gles2 {
class TexturePassthrough;
class ContextGroup;
@@ -18,7 +19,7 @@ class ContextGroup;
class GPU_GLES2_EXPORT PassthroughDiscardableManager {
public:
- PassthroughDiscardableManager();
+ explicit PassthroughDiscardableManager(const GpuPreferences& preferences);
~PassthroughDiscardableManager();
void InitializeTexture(uint32_t client_id,
diff --git a/chromium/gpu/command_buffer/service/path_manager.cc b/chromium/gpu/command_buffer/service/path_manager.cc
deleted file mode 100644
index e2155270fab..00000000000
--- a/chromium/gpu/command_buffer/service/path_manager.cc
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/command_buffer/service/path_manager.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-#include "gpu/command_buffer/common/gles2_cmd_utils.h"
-#include "ui/gl/gl_bindings.h"
-
-namespace gpu {
-namespace gles2 {
-
-namespace {
-void CallDeletePaths(GLuint first_id, GLuint range) {
- while (range > 0) {
- GLsizei irange;
- if (range > static_cast<GLuint>(std::numeric_limits<GLsizei>::max()))
- irange = std::numeric_limits<GLsizei>::max();
- else
- irange = static_cast<GLsizei>(range);
-
- glDeletePathsNV(first_id, irange);
- range -= irange;
- first_id += irange;
- }
-}
-
-template <typename RangeIterator>
-static GLuint RangeSize(const RangeIterator& it) {
- return it->second.last_client_id - it->first + 1;
-}
-template <typename RangeIterator>
-static GLuint FirstClientId(const RangeIterator& it) {
- return it->first;
-}
-template <typename RangeIterator>
-static GLuint FirstServiceId(const RangeIterator& it) {
- return it->second.first_service_id;
-}
-template <typename RangeIterator>
-static GLuint LastServiceId(const RangeIterator& it) {
- return FirstServiceId(it) + RangeSize(it) - 1;
-}
-static GLuint LastClientId(PathManager::PathRangeMap::const_iterator& it) {
- return it->second.last_client_id;
-}
-// Note: this one can be assigned to.
-static GLuint& LastClientId(PathManager::PathRangeMap::iterator& it) {
- return it->second.last_client_id;
-}
-
-template <typename T>
-struct IteratorSelector {
- typedef typename T::iterator iterator;
-};
-template <typename T>
-struct IteratorSelector<const T> {
- typedef typename T::const_iterator iterator;
-};
-
-// Returns the range position that contains |client_id| or
-// |PathRangeMap::iterator::end()| if |client_id| is not found.
-template <typename MapType>
-static typename IteratorSelector<MapType>::iterator GetContainingRange(
- MapType& path_map,
- GLuint client_id) {
- auto it = path_map.lower_bound(client_id);
- if (it != path_map.end() && FirstClientId(it) == client_id)
- return it;
- if (it != path_map.begin()) {
- --it;
- if (LastClientId(it) >= client_id)
- return it;
- }
- return path_map.end();
-}
-
-// Returns the range position that contains |client_id|. If that is
-// not available, returns the range that has smallest
-// |first_client_id| that is bigger than |client_id|. Returns
-// |PathRangeMap::iterator::end()| if there is no such range.
-template <typename MapType>
-static typename IteratorSelector<MapType>::iterator GetContainingOrNextRange(
- MapType& path_map,
- GLuint client_id) {
- auto it = path_map.lower_bound(client_id);
- if (it != path_map.end() && FirstClientId(it) == client_id) {
- return it;
- }
- if (it != path_map.begin()) {
- --it;
- if (LastClientId(it) >= client_id)
- return it;
- ++it;
- }
- return it;
-}
-
-} // anonymous namespace
-
-PathManager::PathManager() = default;
-
-PathManager::~PathManager() {
- DCHECK(path_map_.empty());
-}
-
-void PathManager::Destroy(bool have_context) {
- if (have_context) {
- for (PathRangeMap::const_iterator it = path_map_.begin();
- it != path_map_.end(); ++it)
- CallDeletePaths(FirstServiceId(it), RangeSize(it));
- }
- path_map_.clear();
-}
-
-void PathManager::CreatePathRange(GLuint first_client_id,
- GLuint last_client_id,
- GLuint first_service_id) {
- DCHECK(first_service_id > 0u);
- DCHECK(first_client_id > 0u);
- DCHECK(!HasPathsInRange(first_client_id, last_client_id));
- DCHECK(CheckConsistency());
-
- PathRangeMap::iterator range =
- GetContainingRange(path_map_, first_client_id - 1u);
-
- if (range != path_map_.end() &&
- LastServiceId(range) == first_service_id - 1u) {
- DCHECK_EQ(LastClientId(range), first_client_id - 1u);
- LastClientId(range) = last_client_id;
- } else {
- auto result = path_map_.insert(
- std::make_pair(first_client_id,
- PathRangeDescription(last_client_id, first_service_id)));
- DCHECK(result.second);
- range = result.first;
- }
-
- PathRangeMap::iterator next_range = range;
- ++next_range;
- if (next_range != path_map_.end()) {
- if (LastClientId(range) == FirstClientId(next_range) - 1u &&
- LastServiceId(range) == FirstServiceId(next_range) - 1u) {
- LastClientId(range) = LastClientId(next_range);
- path_map_.erase(next_range);
- }
- }
- DCHECK(CheckConsistency());
-}
-
-bool PathManager::HasPathsInRange(GLuint first_client_id,
- GLuint last_client_id) const {
- PathRangeMap::const_iterator it =
- GetContainingOrNextRange(path_map_, first_client_id);
- if (it == path_map_.end())
- return false;
-
- return FirstClientId(it) <= last_client_id;
-}
-
-bool PathManager::GetPath(GLuint client_id, GLuint* service_id) const {
- PathRangeMap::const_iterator range = GetContainingRange(path_map_, client_id);
- if (range == path_map_.end())
- return false;
-
- *service_id = FirstServiceId(range) + client_id - FirstClientId(range);
- return true;
-}
-
-void PathManager::RemovePaths(GLuint first_client_id, GLuint last_client_id) {
- DCHECK(CheckConsistency());
- PathRangeMap::iterator it =
- GetContainingOrNextRange(path_map_, first_client_id);
-
- while (it != path_map_.end() && FirstClientId(it) <= last_client_id) {
- GLuint delete_first_client_id =
- std::max(first_client_id, FirstClientId(it));
- GLuint delete_last_client_id = std::min(last_client_id, LastClientId(it));
- GLuint delete_first_service_id =
- FirstServiceId(it) + delete_first_client_id - FirstClientId(it);
- GLuint delete_range = delete_last_client_id - delete_first_client_id + 1u;
-
- CallDeletePaths(delete_first_service_id, delete_range);
-
- PathRangeMap::iterator current = it;
- ++it;
-
- GLuint current_last_client_id = LastClientId(current);
-
- if (FirstClientId(current) < delete_first_client_id)
- LastClientId(current) = delete_first_client_id - 1u;
- else
- path_map_.erase(current);
-
- if (current_last_client_id > delete_last_client_id) {
- path_map_.insert(std::make_pair(
- delete_last_client_id + 1u,
- PathRangeDescription(current_last_client_id,
- delete_first_service_id + delete_range)));
- DCHECK(delete_last_client_id == last_client_id);
- // This is necessarily the last range to check. Return early due to
- // consistency. Iterator increment would skip the inserted range. The
- // algorithm would work ok, but it looks weird.
- DCHECK(CheckConsistency());
- return;
- }
- }
- DCHECK(CheckConsistency());
-}
-
-bool PathManager::CheckConsistency() {
- GLuint prev_first_client_id = 0u;
- GLuint prev_last_client_id = 0u;
- GLuint prev_first_service_id = 0u;
- for (PathRangeMap::iterator range = path_map_.begin();
- range != path_map_.end(); ++range) {
- // Code relies on ranges not starting at 0. Also, the above initialization
- // is only
- // correct then.
- if (FirstClientId(range) == 0u || FirstServiceId(range) == 0u)
- return false;
-
- // Each range is consistent.
- if (FirstClientId(range) > LastClientId(range))
- return false;
-
- if (prev_first_client_id != 0u) {
- // No overlapping ranges. (The iteration is sorted).
- if (FirstClientId(range) <= prev_last_client_id)
- return false;
-
- // No mergeable ranges.
- bool is_mergeable_client =
- FirstClientId(range) - 1u == prev_last_client_id;
- bool is_mergeable_service =
- FirstServiceId(range) - 1u == prev_first_service_id;
- if (is_mergeable_client && is_mergeable_service)
- return false;
- }
- prev_first_client_id = FirstClientId(range);
- prev_last_client_id = LastClientId(range);
- prev_first_service_id = FirstServiceId(range);
- }
- return true;
-}
-
-} // namespace gles2
-} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/path_manager.h b/chromium/gpu/command_buffer/service/path_manager.h
deleted file mode 100644
index 840f6217252..00000000000
--- a/chromium/gpu/command_buffer/service/path_manager.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_COMMAND_BUFFER_SERVICE_PATH_MANAGER_H_
-#define GPU_COMMAND_BUFFER_SERVICE_PATH_MANAGER_H_
-
-#include <map>
-
-#include "base/macros.h"
-#include "gpu/command_buffer/service/gl_utils.h"
-#include "gpu/gpu_gles2_export.h"
-
-namespace gpu {
-namespace gles2 {
-
-// This class keeps track of paths and their client and service ids
-// so we can correctly clear them.
-class GPU_GLES2_EXPORT PathManager {
- public:
- PathManager();
- ~PathManager();
-
- // Must call before destruction.
- void Destroy(bool have_context);
-
- // Creates a path mapping between closed intervals
- // [first_client_id, last_client_id] -> [first_service_id, ...].
- void CreatePathRange(GLuint first_client_id,
- GLuint last_client_id,
- GLuint first_service_id);
-
- // Returns true if any path exists in the closed interval
- // [first_client_id, last_client_id].
- bool HasPathsInRange(GLuint first_client_id, GLuint last_client_id) const;
-
- // Gets the path id corresponding the client path id.
- // Returns false if no such service path id was not found.
- bool GetPath(GLuint client_id, GLuint* service_id) const;
-
- // Removes a closed interval of paths [first_client_id, last_client_id].
- void RemovePaths(GLuint first_client_id, GLuint last_client_id);
-
- // Mapping between client id and service id.
- // Should be used only by the implementation.
- struct PathRangeDescription {
- PathRangeDescription(GLuint last_client, GLuint first_service)
- : last_client_id(last_client), first_service_id(first_service) {}
- GLuint last_client_id;
- GLuint first_service_id;
- typedef GLuint ServiceIdType;
- };
- typedef std::map<GLuint, PathRangeDescription> PathRangeMap;
-
- private:
- // Checks for consistency inside the book-keeping structures. Used as
- // DCHECK pre/post condition in mutating functions.
- bool CheckConsistency();
-
- PathRangeMap path_map_;
-
- DISALLOW_COPY_AND_ASSIGN(PathManager);
-};
-
-} // namespace gles2
-} // namespace gpu
-
-#endif // GPU_COMMAND_BUFFER_SERVICE_PATH_MANAGER_H_
diff --git a/chromium/gpu/command_buffer/service/path_manager_unittest.cc b/chromium/gpu/command_buffer/service/path_manager_unittest.cc
deleted file mode 100644
index e1173d2b7df..00000000000
--- a/chromium/gpu/command_buffer/service/path_manager_unittest.cc
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/command_buffer/service/path_manager.h"
-
-#include <memory>
-
-#include "gpu/command_buffer/service/gpu_service_test.h"
-#include "gpu/command_buffer/service/mocks.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "ui/gl/gl_mock.h"
-
-namespace gpu {
-namespace gles2 {
-
-class PathManagerTest : public GpuServiceTest {
- public:
- PathManagerTest() = default;
-
- protected:
- void SetUp() override {
- SetUpWithGLVersion("3.0", "GL_NV_path_rendering");
- manager_.reset(new PathManager());
- }
-
- void TearDown() override {
- manager_->Destroy(true);
- manager_.reset();
- GpuServiceTest::TearDown();
- }
-
- std::unique_ptr<PathManager> manager_;
-};
-
-TEST_F(PathManagerTest, Basic) {
- const GLuint kClient1Id = 1;
- const GLuint kService1Id = 11;
- const GLuint kClient2Id = 2;
- GLuint service_id = 0;
- manager_->CreatePathRange(kClient1Id, kClient1Id, kService1Id);
- ASSERT_TRUE(manager_->HasPathsInRange(kClient1Id, kClient1Id));
- EXPECT_TRUE(manager_->GetPath(kClient1Id, &service_id));
- EXPECT_EQ(kService1Id, service_id);
-
- // Check we get nothing for a non-existent path.
- service_id = 123u;
- ASSERT_FALSE(manager_->HasPathsInRange(kClient2Id, kClient2Id));
- EXPECT_FALSE(manager_->GetPath(kClient2Id, &service_id));
- EXPECT_EQ(123u, service_id);
-
- // Check trying to remove non-existent paths does not crash.
- manager_->RemovePaths(kClient2Id, kClient2Id);
-
- // Check that it gets deleted when the last reference is released.
- EXPECT_CALL(*gl_, DeletePathsNV(kService1Id, 1))
- .Times(1)
- .RetiresOnSaturation();
-
- // Check we can't get the path after we remove it.
- manager_->RemovePaths(kClient1Id, kClient1Id);
- ASSERT_FALSE(manager_->HasPathsInRange(kClient1Id, kClient1Id));
- EXPECT_FALSE(manager_->GetPath(kClient1Id, &service_id));
-}
-
-// Tests that path manager does not merge ranges that contain service ids that
-// prevent the merging. Path ranges A and B can be merged if
-// * client ids of B start immediately after the last client id of A
-// * service ids of B start immediately after the last service id of A
-// and similarly for the 'before' case.
-TEST_F(PathManagerTest, NonContiguousServiceIds) {
- const GLuint kMergeCheckRange = 54;
-
- const struct {
- GLuint first_client_id;
- GLuint last_client_id;
- GLuint first_service_id;
- } kIdRanges[] = {{500, 1000, 900}, {1001, 1155, 1}, {200, 499, 4888}};
- for (auto& range : kIdRanges) {
- manager_->CreatePathRange(range.first_client_id, range.last_client_id,
- range.first_service_id);
- ASSERT_TRUE(manager_->HasPathsInRange(range.first_client_id,
- range.first_client_id));
- ASSERT_TRUE(
- manager_->HasPathsInRange(range.last_client_id, range.last_client_id));
- ASSERT_TRUE(
- manager_->HasPathsInRange(range.first_client_id, range.last_client_id));
- GLuint service_id = 0u;
- EXPECT_TRUE(manager_->GetPath(range.first_client_id + 5u, &service_id));
- EXPECT_EQ(range.first_service_id + 5u, service_id);
- }
-
- // Insert a mergeable range last, to check that merges
- // work. Otherwise the test could succeed because merges were not
- // working.
- auto& merge_candidate = kIdRanges[1];
- GLuint merge_candidate_range =
- merge_candidate.last_client_id - merge_candidate.first_client_id + 1;
- manager_->CreatePathRange(
- merge_candidate.last_client_id + 1,
- merge_candidate.last_client_id + kMergeCheckRange,
- merge_candidate.first_service_id + merge_candidate_range);
-
- // We detect that ranges were not merged accidentally by detecting individual
- // deletes.
- for (auto& range : kIdRanges) {
- if (&range == &merge_candidate)
- continue;
- GLsizei range_amount = range.last_client_id - range.first_client_id + 1;
- EXPECT_CALL(*gl_, DeletePathsNV(range.first_service_id, range_amount))
- .Times(1)
- .RetiresOnSaturation();
- }
-
- // Just a check that merges work.
- EXPECT_CALL(*gl_, DeletePathsNV(merge_candidate.first_service_id,
- merge_candidate_range + kMergeCheckRange))
- .Times(1)
- .RetiresOnSaturation();
-
- // Remove all ids. This should cause the expected amount of DeletePathsNV
- // calls.
- manager_->RemovePaths(1, std::numeric_limits<GLsizei>::max());
-
- for (auto& range : kIdRanges) {
- ASSERT_FALSE(
- manager_->HasPathsInRange(range.first_client_id, range.last_client_id));
- }
-}
-
-TEST_F(PathManagerTest, DeleteBigRange) {
- // Allocates two ranges which in path manager end up merging as one
- // big range. The range will be too big to fit in one DeletePaths
- // call. Test that the range is deleted correctly with two calls.
- const GLuint kFirstClientId1 = 1;
- const GLsizei kRange1 = std::numeric_limits<GLsizei>::max() - 3;
- const GLuint kLastClientId1 = kFirstClientId1 + kRange1 - 1;
- const GLuint kFirstServiceId1 = 77;
- const GLuint kLastServiceId1 = kFirstServiceId1 + kRange1 - 1;
-
- const GLuint kFirstClientId2 = kLastClientId1 + 1;
- const GLsizei kRange2 = 15;
- const GLuint kLastClientId2 = kFirstClientId2 + kRange2 - 1;
- const GLuint kFirstServiceId2 = kLastServiceId1 + 1;
-
- const GLsizei kFirstDeleteRange = std::numeric_limits<GLsizei>::max();
- const GLsizei kSecondDeleteRange = kRange2 - (kFirstDeleteRange - kRange1);
- const GLuint kSecondDeleteFirstServiceId =
- kFirstServiceId1 + kFirstDeleteRange;
-
- EXPECT_CALL(*gl_, DeletePathsNV(kFirstServiceId1,
- std::numeric_limits<GLsizei>::max()))
- .RetiresOnSaturation();
-
- EXPECT_CALL(*gl_, DeletePathsNV(kSecondDeleteFirstServiceId,
- kSecondDeleteRange)).RetiresOnSaturation();
-
- manager_->CreatePathRange(kFirstClientId1, kLastClientId1, kFirstServiceId1);
- manager_->CreatePathRange(kFirstClientId2, kLastClientId2, kFirstServiceId2);
- manager_->RemovePaths(0, std::numeric_limits<GLuint>::max());
-}
-
-} // namespace gles2
-
-} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/program_cache.cc b/chromium/gpu/command_buffer/service/program_cache.cc
index 7e76fdae38d..50858c6b082 100644
--- a/chromium/gpu/command_buffer/service/program_cache.cc
+++ b/chromium/gpu/command_buffer/service/program_cache.cc
@@ -200,9 +200,10 @@ void ProgramCache::ComputeProgramHash(
void ProgramCache::HandleMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
- // This is only called with moderate or critical pressure.
- DCHECK_NE(memory_pressure_level,
- base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE);
+ if (memory_pressure_level ==
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE) {
+ return;
+ }
// Set a low limit on cache size for MEMORY_PRESSURE_LEVEL_MODERATE.
size_t limit = max_size_bytes_ / 4;
@@ -211,12 +212,7 @@ void ProgramCache::HandleMemoryPressure(
limit = 0;
}
- size_t bytes_freed = Trim(limit);
- if (bytes_freed > 0) {
- UMA_HISTOGRAM_COUNTS_100000(
- "GPU.ProgramCache.MemoryReleasedOnPressure",
- static_cast<base::HistogramBase::Sample>(bytes_freed) / 1024);
- }
+ Trim(limit);
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/program_manager.cc b/chromium/gpu/command_buffer/service/program_manager.cc
index 5f82dcb65b0..4574d33f211 100644
--- a/chromium/gpu/command_buffer/service/program_manager.cc
+++ b/chromium/gpu/command_buffer/service/program_manager.cc
@@ -430,8 +430,6 @@ void Program::Reset() {
attrib_infos_.clear();
uniform_infos_.clear();
uniform_locations_.clear();
- fragment_input_infos_.clear();
- fragment_input_locations_.clear();
program_output_infos_.clear();
sampler_indices_.clear();
attrib_location_to_index_map_.clear();
@@ -853,7 +851,6 @@ void Program::Update() {
}
}
- UpdateFragmentInputs();
UpdateProgramOutputs();
UpdateFragmentOutputBaseTypes();
UpdateVertexInputBaseTypes();
@@ -1042,134 +1039,6 @@ bool Program::UpdateUniforms() {
return true;
}
-void Program::UpdateFragmentInputs() {
- if (!feature_info().feature_flags().chromium_path_rendering)
- return;
- for (const auto& binding : bind_fragment_input_location_map_) {
- if (binding.second < 0)
- continue;
- size_t client_location = static_cast<size_t>(binding.second);
- if (fragment_input_locations_.size() <= client_location)
- fragment_input_locations_.resize(client_location + 1);
- fragment_input_locations_[client_location].SetInactive();
- }
-
- GLint num_fragment_inputs = 0;
- glGetProgramInterfaceiv(service_id_, GL_FRAGMENT_INPUT_NV,
- GL_ACTIVE_RESOURCES, &num_fragment_inputs);
- if (num_fragment_inputs <= 0)
- return;
-
- GLint max_len = 0;
- glGetProgramInterfaceiv(service_id_, GL_FRAGMENT_INPUT_NV, GL_MAX_NAME_LENGTH,
- &max_len);
- DCHECK(max_len > 0);
-
- std::unique_ptr<char[]> name_buffer(new char[max_len]);
-
- Shader* fragment_shader =
- shaders_from_last_successful_link_[ShaderTypeToIndex(GL_FRAGMENT_SHADER)]
- .get();
-
- const GLenum kQueryProperties[] = {GL_LOCATION, GL_TYPE, GL_ARRAY_SIZE};
-
- std::vector<size_t> client_location_indices;
- for (GLint ii = 0; ii < num_fragment_inputs; ++ii) {
- GLsizei name_length = 0;
- glGetProgramResourceName(service_id_, GL_FRAGMENT_INPUT_NV, ii, max_len,
- &name_length, name_buffer.get());
- DCHECK(name_length < max_len);
- DCHECK(name_length == 0 || name_buffer[name_length] == '\0');
- // A fragment shader can have gl_FragCoord, gl_FrontFacing or gl_PointCoord
- // built-ins as its input, as well as custom varyings. We are interested in
- // custom varyings, client is allowed to bind only them.
- std::string service_name(name_buffer.get(), name_length);
- if (ProgramManager::HasBuiltInPrefix(service_name))
- continue;
- // Unlike when binding uniforms, we expect the driver to give correct
- // names: "name" for simple variable, "name[0]" for an array.
- GLsizei query_length = 0;
- GLint query_results[base::size(kQueryProperties)] = {
- 0,
- };
- glGetProgramResourceiv(service_id_, GL_FRAGMENT_INPUT_NV, ii,
- base::size(kQueryProperties), kQueryProperties,
- base::size(query_results), &query_length,
- query_results);
- DCHECK(query_length == base::size(kQueryProperties));
-
- GLenum type = static_cast<GLenum>(query_results[1]);
- GLsizei size = static_cast<GLsizei>(query_results[2]);
- std::string client_name;
-
- const sh::Varying* varying = fragment_shader->GetVaryingInfo(service_name);
- const sh::ShaderVariable* info = nullptr;
- if (varying &&
- varying->findInfoByMappedName(service_name, &info, &client_name)) {
- type = info->type;
- size = std::max(1u, info->getOutermostArraySize());
- } else {
- // Should only happen if there are major bugs in the driver, ANGLE or if
- // the shader translator is disabled.
- DCHECK(feature_info().disable_shader_translator());
- client_name = service_name;
- if (size <= 0)
- continue;
- }
-
- auto it = bind_fragment_input_location_map_.find(client_name);
- if (it != bind_fragment_input_location_map_.end() && it->second >= 0 &&
- query_results[0] >= 0) {
- size_t client_location = static_cast<size_t>(it->second);
- GLuint service_location = static_cast<GLuint>(query_results[0]);
- fragment_input_infos_.push_back(
- FragmentInputInfo(type, service_location));
- client_location_indices.push_back(client_location);
- }
-
- if (size <= 1)
- continue;
- GLSLArrayName parsed_client_name(client_name);
- GLSLArrayName parsed_service_name(service_name);
- if (!parsed_client_name.IsArrayName() ||
- parsed_client_name.element_index() != 0 ||
- !parsed_service_name.IsArrayName() ||
- parsed_service_name.element_index() != 0) {
- NOTREACHED() << "GLSL array variable names should end with \"[0]\". "
- "Likely driver or ANGLE error.";
- continue;
- }
-
- for (GLsizei jj = 1; jj < size; ++jj) {
- std::string array_spec(std::string("[") + base::NumberToString(jj) + "]");
- std::string client_element_name =
- parsed_client_name.base_name() + array_spec;
-
- it = bind_fragment_input_location_map_.find(client_element_name);
- if (it != bind_fragment_input_location_map_.end() && it->second >= 0) {
- size_t client_location = static_cast<size_t>(it->second);
- std::string service_element_name =
- parsed_service_name.base_name() + array_spec;
- GLint service_location = glGetProgramResourceLocation(
- service_id_, GL_FRAGMENT_INPUT_NV, service_element_name.c_str());
- if (service_location >= 0) {
- fragment_input_infos_.push_back(
- FragmentInputInfo(type, static_cast<GLuint>(service_location)));
- client_location_indices.push_back(client_location);
- }
- }
- }
- }
- for (size_t i = 0; i < client_location_indices.size(); ++i) {
- size_t client_location = client_location_indices[i];
- // Before linking, we already validated that no two statically used fragment
- // inputs are bound to the same location.
- DCHECK(!fragment_input_locations_[client_location].IsActive());
- fragment_input_locations_[client_location].SetActive(
- &fragment_input_infos_[i]);
- }
-}
-
void Program::UpdateProgramOutputs() {
if (!feature_info().gl_version_info().is_es3_capable ||
feature_info().disable_shader_translator())
@@ -1398,7 +1267,6 @@ bool Program::Link(ShaderManager* manager,
&bind_attrib_location_map_, transform_feedback_varyings_,
transform_feedback_buffer_mode_, client);
link = success != ProgramCache::PROGRAM_LOAD_SUCCESS;
- UMA_HISTOGRAM_BOOLEAN("GPU.ProgramCache.LoadBinarySuccess", !link);
}
}
@@ -1444,10 +1312,6 @@ bool Program::Link(ShaderManager* manager,
set_log_info(ProcessLogInfo(info_log).c_str());
return false;
}
- if (DetectFragmentInputLocationBindingConflicts()) {
- set_log_info("glBindFragmentInputLocationCHROMIUM() conflicts");
- return false;
- }
if (DetectProgramOutputLocationBindingConflicts()) {
set_log_info("glBindFragDataLocation() conflicts");
return false;
@@ -1687,28 +1551,6 @@ const sh::InterfaceBlock* Program::GetInterfaceBlockInfo(
return nullptr;
}
-const Program::FragmentInputInfo* Program::GetFragmentInputInfoByFakeLocation(
- GLint fake_location) const {
- if (fake_location < 0)
- return nullptr;
- size_t location_index = static_cast<size_t>(fake_location);
- if (location_index >= fragment_input_locations_.size())
- return nullptr;
- if (!fragment_input_locations_[location_index].IsActive())
- return nullptr;
- return fragment_input_locations_[location_index].shader_variable();
-}
-
-bool Program::IsInactiveFragmentInputLocationByFakeLocation(
- GLint fake_location) const {
- if (fake_location < 0)
- return true;
- size_t location_index = static_cast<size_t>(fake_location);
- if (location_index >= fragment_input_locations_.size())
- return false;
- return fragment_input_locations_[location_index].IsInactive();
-}
-
bool Program::SetUniformLocationBinding(
const std::string& name, GLint location) {
std::string short_name;
@@ -1721,21 +1563,6 @@ bool Program::SetUniformLocationBinding(
return true;
}
-void Program::SetFragmentInputLocationBinding(const std::string& name,
- GLint location) {
- // The client wants to bind either "name" or "name[0]".
- // GL ES 3.1 spec refers to active array names with language such as:
- // "if the string identifies the base name of an active array, where the
- // string would exactly match the name of the variable if the suffix "[0]"
- // were appended to the string".
-
- // At this point we can not know if the string identifies a simple variable,
- // a base name of an array, or nothing. Store both, so if user overwrites
- // either, both still work correctly.
- bind_fragment_input_location_map_[name] = location;
- bind_fragment_input_location_map_[name + "[0]"] = location;
-}
-
void Program::SetProgramOutputLocationBinding(const std::string& name,
GLuint color_name) {
SetProgramOutputLocationIndexedBinding(name, color_name, 0);
@@ -2068,28 +1895,6 @@ bool Program::DetectVaryingsMismatch(std::string* conflicting_name) const {
return false;
}
-bool Program::DetectFragmentInputLocationBindingConflicts() const {
- auto* shader = attached_shaders_[ShaderTypeToIndex(GL_FRAGMENT_SHADER)].get();
- if (!shader || !shader->valid())
- return false;
-
- std::set<GLint> location_binding_used;
- for (auto it : bind_fragment_input_location_map_) {
- // Find out if an fragment input is statically used in this program's
- // shaders.
- const std::string* mapped_name = shader->GetVaryingMappedName(it.first);
- if (!mapped_name)
- continue;
- const sh::Varying* fragment_input = shader->GetVaryingInfo(*mapped_name);
- if (fragment_input && fragment_input->staticUse) {
- auto result = location_binding_used.insert(it.second);
- if (!result.second)
- return true;
- }
- }
- return false;
-}
-
bool Program::DetectProgramOutputLocationBindingConflicts() const {
if (feature_info().disable_shader_translator()) {
return false;
diff --git a/chromium/gpu/command_buffer/service/program_manager.h b/chromium/gpu/command_buffer/service/program_manager.h
index 6c7d57bcfb0..800de937f5d 100644
--- a/chromium/gpu/command_buffer/service/program_manager.h
+++ b/chromium/gpu/command_buffer/service/program_manager.h
@@ -77,13 +77,6 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
enum VaryingsPackingOption { kCountOnlyStaticallyUsed, kCountAll };
- struct FragmentInputInfo {
- FragmentInputInfo(GLenum _type, GLuint _location)
- : type(_type), location(_location) {}
- FragmentInputInfo() : type(GL_NONE), location(0) {}
- GLenum type;
- GLuint location;
- };
struct ProgramOutputInfo {
ProgramOutputInfo(GLuint _color_name,
GLuint _index,
@@ -195,9 +188,6 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
typedef std::vector<ShaderVariableLocationEntry<UniformInfo>>
UniformLocationVector;
typedef std::vector<VertexAttrib> AttribInfoVector;
- typedef std::vector<FragmentInputInfo> FragmentInputInfoVector;
- typedef std::vector<ShaderVariableLocationEntry<FragmentInputInfo>>
- FragmentInputLocationVector;
typedef std::vector<ProgramOutputInfo> ProgramOutputInfoVector;
typedef std::vector<int> SamplerIndices;
typedef std::map<std::string, GLint> LocationMap;
@@ -258,11 +248,6 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
const sh::InterfaceBlock* GetInterfaceBlockInfo(
const std::string& hashed_name) const;
- const FragmentInputInfo* GetFragmentInputInfoByFakeLocation(
- GLint fake_location) const;
-
- bool IsInactiveFragmentInputLocationByFakeLocation(GLint fake_location) const;
-
// Gets the fake location of a uniform by name.
GLint GetUniformFakeLocation(const std::string& name) const;
@@ -360,10 +345,6 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
// Detects if the shader version combination is not valid.
bool DetectShaderVersionMismatch() const;
- // Sets fragment input-location binding from a
- // glBindFragmentInputLocationCHROMIUM() call.
- void SetFragmentInputLocationBinding(const std::string& name, GLint location);
-
// Sets program output variable location. Also sets color index to zero.
void SetProgramOutputLocationBinding(const std::string& name,
GLuint colorName);
@@ -397,11 +378,6 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
// is not declared in vertex shader.
bool DetectVaryingsMismatch(std::string* conflicting_name) const;
- // Detects if there are fragment input location conflicts from
- // glBindFragmentInputLocationCHROMIUM() calls.
- // We only consider the statically used fragment inputs in the program.
- bool DetectFragmentInputLocationBindingConflicts() const;
-
// Detects if there are program output location conflicts from
// glBindFragDataLocation and ..LocationIndexedEXT calls.
// We only consider the statically used program outputs in the program.
@@ -510,7 +486,6 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
// Updates the program info after a successful link.
void Update();
bool UpdateUniforms();
- void UpdateFragmentInputs();
void UpdateProgramOutputs();
void UpdateFragmentOutputBaseTypes();
void UpdateVertexInputBaseTypes();
@@ -589,9 +564,6 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
// The indices of the uniforms that are samplers.
SamplerIndices sampler_indices_;
- FragmentInputInfoVector fragment_input_infos_;
- FragmentInputLocationVector fragment_input_locations_;
-
ProgramOutputInfoVector program_output_infos_;
// The program this Program is tracking.
@@ -642,10 +614,6 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> {
// is calculated at DrawArrays{Instanced} time by multiplying vertex count.
std::vector<GLsizeiptr> transform_feedback_data_size_per_vertex_;
- // Fragment input-location binding map from
- // glBindFragmentInputLocationCHROMIUM() calls.
- LocationMap bind_fragment_input_location_map_;
-
// output variable - (location,index) binding map from
// glBindFragDataLocation() and ..IndexedEXT() calls.
LocationIndexMap bind_program_output_location_index_map_;
diff --git a/chromium/gpu/command_buffer/service/program_manager_unittest.cc b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
index adc65411643..a3476cb36a3 100644
--- a/chromium/gpu/command_buffer/service/program_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
@@ -2402,140 +2402,6 @@ TEST_F(ProgramManagerWithCacheTest, RelinkOnChangedCompileOptions) {
EXPECT_TRUE(program_->Link(nullptr, Program::kCountOnlyStaticallyUsed, this));
}
-class ProgramManagerWithPathRenderingTest
- : public ProgramManagerWithShaderTest,
- public testing::WithParamInterface<
- testing::tuple<const char*, const char*>> {
- protected:
- void SetUp() override {
- SetUpBase(testing::get<0>(GetParam()), testing::get<1>(GetParam()));
- }
- static const char* kFragmentInput1Name;
- static const char* kFragmentInput2Name;
- // Name that GL reports for input 2. Needed because input 2 is an
- // array.
- static const char* kFragmentInput2GLName;
- static const char* kFragmentInput3Name;
- static const char* kFragmentInput3GLName;
- static const GLint kFragmentInput1Size = 1;
- static const GLint kFragmentInput2Size = 3;
- static const GLint kFragmentInput3Size = 2;
- static const int kFragmentInput1Precision = GL_LOW_FLOAT;
- static const int kFragmentInput2Precision = GL_MEDIUM_INT;
- static const int kFragmentInput3Precision = GL_HIGH_FLOAT;
- static const int kFragmentInput1StaticUse = 1;
- static const int kFragmentInput2StaticUse = 1;
- static const int kFragmentInput3StaticUse = 1;
- static const GLint kFragmentInput1FakeLocation = 0;
- static const GLint kFragmentInput2FakeLocation = 1;
- static const GLint kFragmentInput3FakeLocation = 2;
- static const GLint kFragmentInput1RealLocation = 11;
- static const GLint kFragmentInput2RealLocation = 22;
- static const GLint kFragmentInput3RealLocation = 33;
- static const GLenum kFragmentInput1Type = GL_FLOAT_VEC4;
- static const GLenum kFragmentInput2Type = GL_INT_VEC2;
- static const GLenum kFragmentInput3Type = GL_FLOAT_VEC3;
-};
-#ifndef COMPILER_MSVC
-const GLint ProgramManagerWithPathRenderingTest::kFragmentInput1Size;
-const GLint ProgramManagerWithPathRenderingTest::kFragmentInput2Size;
-const GLint ProgramManagerWithPathRenderingTest::kFragmentInput3Size;
-const int ProgramManagerWithPathRenderingTest::kFragmentInput1Precision;
-const int ProgramManagerWithPathRenderingTest::kFragmentInput2Precision;
-const int ProgramManagerWithPathRenderingTest::kFragmentInput3Precision;
-const int ProgramManagerWithPathRenderingTest::kFragmentInput1StaticUse;
-const int ProgramManagerWithPathRenderingTest::kFragmentInput2StaticUse;
-const int ProgramManagerWithPathRenderingTest::kFragmentInput3StaticUse;
-const GLint ProgramManagerWithPathRenderingTest::kFragmentInput1FakeLocation;
-const GLint ProgramManagerWithPathRenderingTest::kFragmentInput2FakeLocation;
-const GLint ProgramManagerWithPathRenderingTest::kFragmentInput3FakeLocation;
-const GLint ProgramManagerWithPathRenderingTest::kFragmentInput1RealLocation;
-const GLint ProgramManagerWithPathRenderingTest::kFragmentInput2RealLocation;
-const GLint ProgramManagerWithPathRenderingTest::kFragmentInput3RealLocation;
-const GLenum ProgramManagerWithPathRenderingTest::kFragmentInput1Type;
-const GLenum ProgramManagerWithPathRenderingTest::kFragmentInput2Type;
-const GLenum ProgramManagerWithPathRenderingTest::kFragmentInput3Type;
-#endif
-
-const char* ProgramManagerWithPathRenderingTest::kFragmentInput1Name = "color1";
-const char* ProgramManagerWithPathRenderingTest::kFragmentInput2Name = "color2";
-const char* ProgramManagerWithPathRenderingTest::kFragmentInput2GLName =
- "color2[0]";
-const char* ProgramManagerWithPathRenderingTest::kFragmentInput3Name = "color3";
-const char* ProgramManagerWithPathRenderingTest::kFragmentInput3GLName =
- "color3[0]";
-
-TEST_P(ProgramManagerWithPathRenderingTest, BindFragmentInputLocation) {
- const GLint kFragmentInput1DesiredLocation = 10;
- const GLint kFragmentInput2DesiredLocation = -1;
- const GLint kFragmentInput3DesiredLocation = 5;
-
- Shader* vshader = shader_manager_.CreateShader(
- kVertexShaderClientId, kVertexShaderServiceId, GL_VERTEX_SHADER);
- ASSERT_TRUE(vshader != nullptr);
- Shader* fshader = shader_manager_.CreateShader(
- kFragmentShaderClientId, kFragmentShaderServiceId, GL_FRAGMENT_SHADER);
- ASSERT_TRUE(fshader != nullptr);
- VaryingMap varying_map;
- varying_map[kFragmentInput1Name] = TestHelper::ConstructVarying(
- kFragmentInput1Type, kFragmentInput1Size, kFragmentInput1Precision,
- kFragmentInput1StaticUse, kFragmentInput1Name);
- varying_map[kFragmentInput2Name] = TestHelper::ConstructVarying(
- kFragmentInput2Type, kFragmentInput2Size, kFragmentInput2Precision,
- kFragmentInput2StaticUse, kFragmentInput2Name);
- varying_map[kFragmentInput3Name] = TestHelper::ConstructVarying(
- kFragmentInput3Type, kFragmentInput3Size, kFragmentInput3Precision,
- kFragmentInput3StaticUse, kFragmentInput3Name);
- TestHelper::SetShaderStates(gl_.get(), vshader, true, nullptr, nullptr,
- nullptr, nullptr, nullptr, &varying_map, nullptr,
- nullptr, nullptr);
- TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
- nullptr, nullptr, nullptr, &varying_map, nullptr,
- nullptr, nullptr);
- Program* program =
- manager_->CreateProgram(kClientProgramId, kServiceProgramId);
- ASSERT_TRUE(program != nullptr);
- EXPECT_TRUE(program->AttachShader(&shader_manager_, vshader));
- EXPECT_TRUE(program->AttachShader(&shader_manager_, fshader));
- program->SetFragmentInputLocationBinding(kFragmentInput1Name,
- kFragmentInput1DesiredLocation);
- program->SetFragmentInputLocationBinding(kFragmentInput3Name,
- kFragmentInput3DesiredLocation);
- TestHelper::VaryingInfo kFragmentInputExpectationInfos[] = {
- {
- kFragmentInput1Name, kFragmentInput1Size, kFragmentInput1Type,
- kFragmentInput1FakeLocation, kFragmentInput1RealLocation,
- kFragmentInput1DesiredLocation,
- },
- {
- kFragmentInput2GLName, kFragmentInput2Size, kFragmentInput2Type,
- kFragmentInput2FakeLocation, kFragmentInput2RealLocation,
- kFragmentInput2DesiredLocation,
- },
- {
- kFragmentInput3GLName, kFragmentInput3Size, kFragmentInput3Type,
- kFragmentInput3FakeLocation, kFragmentInput3RealLocation,
- kFragmentInput3DesiredLocation,
- },
- };
- TestHelper::SetupShaderExpectationsWithVaryings(
- gl_.get(), feature_info_.get(), nullptr, 0, nullptr, 0,
- kFragmentInputExpectationInfos,
- base::size(kFragmentInputExpectationInfos), nullptr, 0,
- kServiceProgramId);
- program->Link(nullptr, Program::kCountOnlyStaticallyUsed, this);
- const Program::FragmentInputInfo* info1 =
- program->GetFragmentInputInfoByFakeLocation(
- kFragmentInput1DesiredLocation);
- ASSERT_NE(info1, nullptr);
- EXPECT_EQ(kFragmentInput1RealLocation, static_cast<GLint>(info1->location));
- const Program::FragmentInputInfo* info3 =
- program->GetFragmentInputInfoByFakeLocation(
- kFragmentInput3DesiredLocation);
- ASSERT_NE(info3, nullptr);
- EXPECT_EQ(kFragmentInput3RealLocation, static_cast<GLint>(info3->location));
-}
-
// For some compilers, using make_tuple("a", "bb") would end up
// instantiating make_tuple<char[1], char[2]>. This does not work.
namespace {
@@ -2546,21 +2412,6 @@ testing::tuple<const char*, const char*> make_gl_ext_tuple(
}
}
-INSTANTIATE_TEST_SUITE_P(
- SupportedContexts,
- ProgramManagerWithPathRenderingTest,
- testing::Values(
- make_gl_ext_tuple("3.2",
- "GL_ARB_program_interface_query "
- "GL_EXT_direct_state_access GL_NV_path_rendering "
- "GL_NV_framebuffer_mixed_samples"),
- make_gl_ext_tuple("4.5",
- "GL_NV_path_rendering "
- "GL_NV_framebuffer_mixed_samples"),
- make_gl_ext_tuple("OpenGL ES 3.1",
- "GL_NV_path_rendering "
- "GL_NV_framebuffer_mixed_samples")));
-
class ProgramManagerDualSourceBlendingTest
: public ProgramManagerWithShaderTest,
public testing::WithParamInterface<
diff --git a/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
index 030b62fa323..863853ddcb8 100644
--- a/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
@@ -79,7 +79,7 @@ static const viz::ResourceFormat valid_viz_resource_format_table[] = {
viz::ResourceFormat::RG_88, viz::ResourceFormat::LUMINANCE_F16,
viz::ResourceFormat::RGBA_F16, viz::ResourceFormat::R16_EXT,
viz::ResourceFormat::RGBX_8888, viz::ResourceFormat::BGRX_8888,
- viz::ResourceFormat::RGBX_1010102, viz::ResourceFormat::BGRX_1010102,
+ viz::ResourceFormat::RGBA_1010102, viz::ResourceFormat::BGRA_1010102,
viz::ResourceFormat::YVU_420, viz::ResourceFormat::YUV_420_BIPLANAR,
};
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index 6ad44fdc6c8..9b3bf4b725c 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -216,7 +216,8 @@ class RasterDecoderImpl final : public RasterDecoder,
const GpuPreferences& gpu_preferences,
MemoryTracker* memory_tracker,
SharedImageManager* shared_image_manager,
- scoped_refptr<SharedContextState> shared_context_state);
+ scoped_refptr<SharedContextState> shared_context_state,
+ bool is_privileged);
~RasterDecoderImpl() override;
gles2::GLES2Util* GetGLES2Util() override { return &util_; }
@@ -321,6 +322,13 @@ class RasterDecoderImpl final : public RasterDecoder,
unsigned format,
int width,
int height) override;
+ bool ClearCompressedTextureLevel3D(gles2::Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height,
+ int depth) override;
bool ClearLevel3D(gles2::Texture* texture,
unsigned target,
int level,
@@ -429,6 +437,8 @@ class RasterDecoderImpl final : public RasterDecoder,
GLint y,
GLsizei width,
GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
const volatile GLbyte* mailboxes);
void DoCopySubTextureINTERNALGLPassthrough(GLint xoffset,
GLint yoffset,
@@ -436,6 +446,8 @@ class RasterDecoderImpl final : public RasterDecoder,
GLint y,
GLsizei width,
GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
const Mailbox& source_mailbox,
const Mailbox& dest_mailbox);
void DoCopySubTextureINTERNALGL(GLint xoffset,
@@ -444,6 +456,8 @@ class RasterDecoderImpl final : public RasterDecoder,
GLint y,
GLsizei width,
GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
const Mailbox& source_mailbox,
const Mailbox& dest_mailbox);
void DoCopySubTextureINTERNALSkia(GLint xoffset,
@@ -452,6 +466,8 @@ class RasterDecoderImpl final : public RasterDecoder,
GLint y,
GLsizei width,
GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
const Mailbox& source_mailbox,
const Mailbox& dest_mailbox);
void DoLoseContextCHROMIUM(GLenum current, GLenum other) { NOTIMPLEMENTED(); }
@@ -552,8 +568,6 @@ class RasterDecoderImpl final : public RasterDecoder,
// only if not returning an error.
error::Error current_decoder_error_ = error::kNoError;
- scoped_refptr<gl::GLContext> context_;
-
GpuPreferences gpu_preferences_;
gles2::DebugMarkerManager debug_marker_manager_;
@@ -585,7 +599,7 @@ class RasterDecoderImpl final : public RasterDecoder,
// Raster helpers.
scoped_refptr<ServiceFontManager> font_manager_;
std::unique_ptr<SharedImageRepresentationSkia> shared_image_;
- base::Optional<SharedImageRepresentationSkia::ScopedWriteAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_shared_image_write_;
SkSurface* sk_surface_ = nullptr;
sk_sp<SkSurface> sk_surface_for_testing_;
@@ -605,6 +619,8 @@ class RasterDecoderImpl final : public RasterDecoder,
bool in_copy_sub_texture_ = false;
bool reset_texture_state_ = false;
+ bool is_privileged_ = false;
+
gl::GLApi* api_ = nullptr;
base::WeakPtrFactory<DecoderContext> weak_ptr_factory_{this};
@@ -633,11 +649,12 @@ RasterDecoder* RasterDecoder::Create(
const GpuPreferences& gpu_preferences,
MemoryTracker* memory_tracker,
SharedImageManager* shared_image_manager,
- scoped_refptr<SharedContextState> shared_context_state) {
+ scoped_refptr<SharedContextState> shared_context_state,
+ bool is_privileged) {
return new RasterDecoderImpl(client, command_buffer_service, outputter,
gpu_feature_info, gpu_preferences,
memory_tracker, shared_image_manager,
- std::move(shared_context_state));
+ std::move(shared_context_state), is_privileged);
}
RasterDecoder::RasterDecoder(DecoderClient* client,
@@ -689,7 +706,8 @@ RasterDecoderImpl::RasterDecoderImpl(
const GpuPreferences& gpu_preferences,
MemoryTracker* memory_tracker,
SharedImageManager* shared_image_manager,
- scoped_refptr<SharedContextState> shared_context_state)
+ scoped_refptr<SharedContextState> shared_context_state,
+ bool is_privileged)
: RasterDecoder(client, command_buffer_service, outputter),
raster_decoder_id_(g_raster_decoder_id.GetNext() + 1),
supports_gpu_raster_(
@@ -710,7 +728,8 @@ RasterDecoderImpl::RasterDecoderImpl(
memory_tracker),
gpu_decoder_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("gpu.decoder"))),
- font_manager_(base::MakeRefCounted<ServiceFontManager>(this)) {
+ font_manager_(base::MakeRefCounted<ServiceFontManager>(this)),
+ is_privileged_(is_privileged) {
DCHECK(shared_context_state_);
}
@@ -727,8 +746,7 @@ ContextResult RasterDecoderImpl::Initialize(
const gles2::DisallowedFeatures& disallowed_features,
const ContextCreationAttribs& attrib_helper) {
TRACE_EVENT0("gpu", "RasterDecoderImpl::Initialize");
- DCHECK(shared_context_state_->IsCurrent(surface.get()));
- DCHECK(!context_.get());
+ DCHECK(shared_context_state_->IsCurrent(nullptr));
api_ = gl::g_current_gl_context;
@@ -746,10 +764,10 @@ ContextResult RasterDecoderImpl::Initialize(
DCHECK_EQ(surface.get(), shared_context_state_->surface());
DCHECK_EQ(context.get(), shared_context_state_->context());
- context_ = context;
// Create GPU Tracer for timing values.
- gpu_tracer_.reset(new gles2::GPUTracer(this));
+ gpu_tracer_.reset(
+ new gles2::GPUTracer(this, shared_context_state_->GrContextIsGL()));
// Save the loseContextWhenOutOfMemory context creation attribute.
lose_context_when_out_of_memory_ =
@@ -831,13 +849,6 @@ void RasterDecoderImpl::Destroy(bool have_context) {
query_manager_.reset();
}
- // Destroy the surface before the context, some surface destructors make GL
- // calls.
- if (context_.get()) {
- context_->ReleaseCurrent(nullptr);
- context_ = nullptr;
- }
-
font_manager_->Destroy();
font_manager_.reset();
}
@@ -847,9 +858,6 @@ bool RasterDecoderImpl::MakeCurrent() {
if (!shared_context_state_->GrContextIsGL())
return true;
- if (!context_.get())
- return false;
-
if (context_lost_) {
LOG(ERROR) << " RasterDecoderImpl: Trying to make lost context current.";
return false;
@@ -877,11 +885,15 @@ bool RasterDecoderImpl::MakeCurrent() {
}
gl::GLContext* RasterDecoderImpl::GetGLContext() {
- return context_.get();
+ return shared_context_state_->GrContextIsGL()
+ ? shared_context_state_->context()
+ : nullptr;
}
gl::GLSurface* RasterDecoderImpl::GetGLSurface() {
- return shared_context_state_->surface();
+ return shared_context_state_->GrContextIsGL()
+ ? shared_context_state_->surface()
+ : nullptr;
}
Capabilities RasterDecoderImpl::GetCapabilities() {
@@ -937,8 +949,6 @@ Capabilities RasterDecoderImpl::GetCapabilities() {
if (gr_context()) {
caps.context_supports_distance_field_text =
gr_context()->supportsDistanceFieldText();
- caps.glyph_cache_max_texture_bytes =
- shared_context_state_->glyph_cache_max_texture_bytes();
}
return caps;
}
@@ -1235,7 +1245,8 @@ error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
if (DebugImpl && doing_gpu_trace)
gpu_tracer_->End(gles2::kTraceDecoder);
- if (DebugImpl && debug() && !WasContextLost()) {
+ if (DebugImpl && shared_context_state_->GrContextIsGL() && debug() &&
+ !WasContextLost()) {
GLenum error;
while ((error = api()->glGetErrorFn()) != GL_NO_ERROR) {
LOG(ERROR) << "[" << logger_.GetLogPrefix() << "] "
@@ -1419,6 +1430,17 @@ bool RasterDecoderImpl::ClearCompressedTextureLevel(gles2::Texture* texture,
return false;
}
+bool RasterDecoderImpl::ClearCompressedTextureLevel3D(gles2::Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height,
+ int depth) {
+ NOTREACHED();
+ return false;
+}
+
int RasterDecoderImpl::GetRasterDecoderId() const {
return raster_decoder_id_;
}
@@ -1739,6 +1761,8 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
GLint y,
GLsizei width,
GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
const volatile GLbyte* mailboxes) {
Mailbox source_mailbox = Mailbox::FromVolatile(
reinterpret_cast<const volatile Mailbox*>(mailboxes)[0]);
@@ -1758,12 +1782,15 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
if (!shared_context_state_->GrContextIsGL()) {
// Use Skia to copy texture if raster's gr_context() is not using GL.
DoCopySubTextureINTERNALSkia(xoffset, yoffset, x, y, width, height,
+ unpack_flip_y, unpack_premultiply_alpha,
source_mailbox, dest_mailbox);
} else if (use_passthrough_) {
- DoCopySubTextureINTERNALGLPassthrough(xoffset, yoffset, x, y, width, height,
- source_mailbox, dest_mailbox);
+ DoCopySubTextureINTERNALGLPassthrough(
+ xoffset, yoffset, x, y, width, height, unpack_flip_y,
+ unpack_premultiply_alpha, source_mailbox, dest_mailbox);
} else {
DoCopySubTextureINTERNALGL(xoffset, yoffset, x, y, width, height,
+ unpack_flip_y, unpack_premultiply_alpha,
source_mailbox, dest_mailbox);
}
}
@@ -1775,6 +1802,8 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGLPassthrough(
GLint y,
GLsizei width,
GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
const Mailbox& source_mailbox,
const Mailbox& dest_mailbox) {
DCHECK(source_mailbox != dest_mailbox);
@@ -1793,22 +1822,42 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGLPassthrough(
return;
}
- SharedImageRepresentationGLTexturePassthrough::ScopedAccess source_access(
- source_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
- if (!source_access.success()) {
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough::ScopedAccess>
+ source_access = source_shared_image->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ if (!source_access) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
"unable to access source for read");
return;
}
- SharedImageRepresentationGLTexturePassthrough::ScopedAccess dest_access(
- dest_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
- if (!dest_access.success()) {
+ // Allow uncleared access, as we manually handle clear tracking.
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough::ScopedAccess>
+ dest_access = dest_shared_image->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ if (!dest_access) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
"unable to access destination for write");
return;
}
+ gfx::Rect new_cleared_rect;
+ gfx::Rect old_cleared_rect = dest_shared_image->ClearedRect();
+ gfx::Rect dest_rect(xoffset, yoffset, width, height);
+ if (gles2::TextureManager::CombineAdjacentRects(old_cleared_rect, dest_rect,
+ &new_cleared_rect)) {
+ DCHECK(old_cleared_rect.IsEmpty() ||
+ new_cleared_rect.Contains(old_cleared_rect));
+ } else {
+ // No users of RasterDecoder leverage this functionality. Clearing uncleared
+ // regions could be added here if needed.
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "Cannot clear non-combineable rects.");
+ return;
+ }
+
gles2::TexturePassthrough* source_texture =
source_shared_image->GetTexturePassthrough().get();
gles2::TexturePassthrough* dest_texture =
@@ -1819,10 +1868,14 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGLPassthrough(
api()->glCopySubTextureCHROMIUMFn(
source_texture->service_id(), /*source_level=*/0, dest_texture->target(),
dest_texture->service_id(),
- /*dest_level=*/0, xoffset, yoffset, x, y, width, height,
- /*unpack_flip_y=*/false, /*unpack_premultiply_alpha=*/false,
+ /*dest_level=*/0, xoffset, yoffset, x, y, width, height, unpack_flip_y,
+ unpack_premultiply_alpha,
/*unpack_unmultiply_alpha=*/false);
LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopySubTexture");
+
+ if (!dest_shared_image->IsCleared()) {
+ dest_shared_image->SetClearedRect(new_cleared_rect);
+ }
}
void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
@@ -1832,6 +1885,8 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
GLint y,
GLsizei width,
GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
const Mailbox& source_mailbox,
const Mailbox& dest_mailbox) {
DCHECK(source_mailbox != dest_mailbox);
@@ -1846,9 +1901,11 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
return;
}
- SharedImageRepresentationGLTexture::ScopedAccess source_access(
- source_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
- if (!source_access.success()) {
+ std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
+ source_access = source_shared_image->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ if (!source_access) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
"unable to access source for read");
return;
@@ -1866,9 +1923,12 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
return;
}
- SharedImageRepresentationGLTexture::ScopedAccess dest_access(
- dest_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
- if (!dest_access.success()) {
+ // Allow uncleared access, as we manually handle clear tracking.
+ std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
+ dest_access = dest_shared_image->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ if (!dest_access) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
"unable to access destination for write");
return;
@@ -1927,14 +1987,11 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
DCHECK(old_cleared_rect.IsEmpty() ||
new_cleared_rect.Contains(old_cleared_rect));
} else {
- // Otherwise clear part of texture level that is not already cleared.
- if (!gles2::TextureManager::ClearTextureLevel(this, dest_texture,
- dest_target, dest_level)) {
- LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, "glCopySubTexture",
- "destination texture dimensions too big");
- return;
- }
- new_cleared_rect = gfx::Rect(dest_size);
+ // No users of RasterDecoder leverage this functionality. Clearing uncleared
+ // regions could be added here if needed.
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "Cannot clear non-combineable rects.");
+ return;
}
ScopedTextureBinder binder(state(), dest_target, dest_texture->service_id(),
@@ -1997,8 +2054,7 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
source_internal_format, dest_target, dest_texture->service_id(),
dest_level, dest_internal_format, xoffset, yoffset, x, y, width,
height, dest_size.width(), dest_size.height(), source_size.width(),
- source_size.height(), false /* unpack_flip_y */,
- false /* unpack_premultiply_alpha */,
+ source_size.height(), unpack_flip_y, unpack_premultiply_alpha,
false /* unpack_unmultiply_alpha */, false /* dither */,
transform_matrix, copy_tex_image_blit_.get());
dest_texture->SetLevelClearedRect(dest_target, dest_level,
@@ -2009,9 +2065,9 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
gles2::CopyTextureMethod method = GetCopyTextureCHROMIUMMethod(
GetFeatureInfo(), source_target, source_level, source_internal_format,
- source_type, dest_target, dest_level, dest_internal_format,
- false /* unpack_flip_y */, false /* unpack_premultiply_alpha */,
- false /* unpack_unmultiply_alpha */, false /* dither */);
+ source_type, dest_target, dest_level, dest_internal_format, unpack_flip_y,
+ unpack_premultiply_alpha, false /* unpack_unmultiply_alpha */,
+ false /* dither */);
#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
// glDrawArrays is faster than glCopyTexSubImage2D on IA Mesa driver,
// although opposite in Android.
@@ -2031,9 +2087,9 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
source_internal_format, dest_target, dest_texture->service_id(),
dest_level, dest_internal_format, xoffset, yoffset, x, y, width, height,
dest_size.width(), dest_size.height(), source_size.width(),
- source_size.height(), false /* unpack_flip_y */,
- false /* unpack_premultiply_alpha */, false /* unpack_unmultiply_alpha */,
- false /* dither */, method, copy_tex_image_blit_.get());
+ source_size.height(), unpack_flip_y, unpack_premultiply_alpha,
+ false /* unpack_unmultiply_alpha */, false /* dither */, method,
+ copy_tex_image_blit_.get());
dest_texture->SetLevelClearedRect(dest_target, dest_level, new_cleared_rect);
in_copy_sub_texture_ = false;
if (reset_texture_state_) {
@@ -2059,6 +2115,8 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALSkia(
GLint y,
GLsizei width,
GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
const Mailbox& source_mailbox,
const Mailbox& dest_mailbox) {
DCHECK(source_mailbox != dest_mailbox);
@@ -2092,43 +2150,71 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALSkia(
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
- SharedImageRepresentationSkia::ScopedWriteAccess dest_scoped_access(
- dest_shared_image.get(), &begin_semaphores, &end_semaphores);
- if (!dest_scoped_access.success()) {
+ // Allow uncleared access, as we manually handle clear tracking.
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
+ dest_scoped_access = dest_shared_image->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ if (!dest_scoped_access) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
"Dest shared image is not writable");
return;
}
+ gfx::Rect new_cleared_rect;
+ gfx::Rect old_cleared_rect = dest_shared_image->ClearedRect();
+ if (gles2::TextureManager::CombineAdjacentRects(old_cleared_rect, dest_rect,
+ &new_cleared_rect)) {
+ DCHECK(old_cleared_rect.IsEmpty() ||
+ new_cleared_rect.Contains(old_cleared_rect));
+ } else {
+ // No users of RasterDecoder leverage this functionality. Clearing uncleared
+ // regions could be added here if needed.
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "Cannot clear non-combineable rects.");
+ return;
+ }
+
// With OneCopyRasterBufferProvider, source_shared_image->BeginReadAccess()
// will copy pixels from SHM GMB to the texture in |source_shared_image|,
// and then use drawImageRect() to draw that texure to the target
// |dest_shared_image|. We can save one copy by drawing the SHM GMB to the
// target |dest_shared_image| directly.
// TODO(penghuang): get rid of the one extra copy. https://crbug.com/984045
- SharedImageRepresentationSkia::ScopedReadAccess source_scoped_access(
- source_shared_image.get(), &begin_semaphores, &end_semaphores);
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ source_scoped_access = source_shared_image->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
if (!begin_semaphores.empty()) {
- bool result = dest_scoped_access.surface()->wait(begin_semaphores.size(),
- begin_semaphores.data());
+ bool result = dest_scoped_access->surface()->wait(begin_semaphores.size(),
+ begin_semaphores.data());
DCHECK(result);
}
- if (!source_scoped_access.success()) {
+ if (!source_scoped_access) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
"Source shared image is not accessable");
} else {
auto color_type = viz::ResourceFormatToClosestSkColorType(
true /* gpu_compositing */, source_shared_image->format());
+
+ // TODO(http://crbug.com/1034086): We should initialize alpha_type and
+ // origin using metadata stored with the shared image.
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ if (unpack_premultiply_alpha)
+ alpha_type = kUnpremul_SkAlphaType;
auto source_image = SkImage::MakeFromTexture(
shared_context_state_->gr_context(),
- source_scoped_access.promise_image_texture()->backendTexture(),
- kTopLeft_GrSurfaceOrigin, color_type, kUnpremul_SkAlphaType,
+ source_scoped_access->promise_image_texture()->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, color_type, alpha_type,
nullptr /* colorSpace */);
- auto* canvas = dest_scoped_access.surface()->getCanvas();
+ auto* canvas = dest_scoped_access->surface()->getCanvas();
SkPaint paint;
+ if (unpack_flip_y) {
+ canvas->scale(1, -1);
+ canvas->translate(0, -height);
+ }
paint.setBlendMode(SkBlendMode::kSrc);
canvas->drawImageRect(source_image, gfx::RectToSkRect(source_rect),
gfx::RectToSkRect(dest_rect), &paint);
@@ -2144,8 +2230,12 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALSkia(
};
gpu::AddVulkanCleanupTaskForSkiaFlush(
shared_context_state_->vk_context_provider(), &flush_info);
- dest_scoped_access.surface()->flush(
+ dest_scoped_access->surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
+
+ if (!dest_shared_image->IsCleared()) {
+ dest_shared_image->SetClearedRect(new_cleared_rect);
+ }
}
namespace {
@@ -2282,9 +2372,18 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
std::vector<GrBackendSemaphore> begin_semaphores;
DCHECK(end_semaphores_.empty());
DCHECK(!scoped_shared_image_write_);
- scoped_shared_image_write_.emplace(shared_image_.get(), final_msaa_count,
- surface_props, &begin_semaphores,
- &end_semaphores_);
+ // Allow uncleared access, as raster specifically handles uncleared images by
+ // clearing them before writing.
+ scoped_shared_image_write_ = shared_image_->BeginScopedWriteAccess(
+ final_msaa_count, surface_props, &begin_semaphores, &end_semaphores_,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ if (!scoped_shared_image_write_) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "failed to create surface");
+ shared_image_.reset();
+ return;
+ }
+
sk_surface_ = scoped_shared_image_write_->surface();
if (!begin_semaphores.empty()) {
@@ -2293,14 +2392,6 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
DCHECK(result);
}
- if (!sk_surface_) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
- "failed to create surface");
- scoped_shared_image_write_.reset();
- shared_image_.reset();
- return;
- }
-
if (use_ddl_) {
SkSurfaceCharacterization characterization;
bool result = sk_surface_->characterize(&characterization);
@@ -2386,7 +2477,7 @@ void RasterDecoderImpl::DoRasterCHROMIUM(GLuint raster_shm_id,
TransferCacheDeserializeHelperImpl impl(raster_decoder_id_, transfer_cache());
cc::PaintOp::DeserializeOptions options(
&impl, paint_cache_.get(), font_manager_->strike_client(),
- shared_context_state_->scratch_deserialization_buffer());
+ shared_context_state_->scratch_deserialization_buffer(), is_privileged_);
options.crash_dump_on_failure = true;
size_t paint_buffer_size = raster_shm_size;
@@ -2423,10 +2514,13 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
raster_canvas_ = nullptr;
+ // The DDL pins memory for the recorded ops so it must be kept alive until its
+ // flushed.
+ std::unique_ptr<SkDeferredDisplayList> ddl;
if (use_ddl_) {
TRACE_EVENT0("gpu",
"RasterDecoderImpl::DoEndRasterCHROMIUM::DetachAndDrawDDL");
- auto ddl = recorder_->detach();
+ ddl = recorder_->detach();
recorder_ = nullptr;
sk_surface_->draw(ddl.get());
}
@@ -2449,8 +2543,10 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
flush_info);
DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
end_semaphores_.clear();
+ ddl.reset();
}
+ shared_context_state_->UpdateSkiaOwnedMemorySize();
sk_surface_ = nullptr;
if (!shared_image_) {
// Test only path for SetUpForRasterCHROMIUMForTest.
@@ -2504,6 +2600,12 @@ void RasterDecoderImpl::DoCreateTransferCacheEntryINTERNAL(
return;
}
+ if (entry_type == cc::TransferCacheEntryType::kSkottie && !is_privileged_) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCreateTransferCacheEntryINTERNAL",
+ "Attempt to use skottie on a non privileged channel");
+ return;
+ }
+
uint8_t* data_memory =
GetSharedMemoryAs<uint8_t*>(data_shm_id, data_shm_offset, data_size);
if (!data_memory) {
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.h b/chromium/gpu/command_buffer/service/raster_decoder.h
index 64c90a8f9c4..dbfdd18ac8a 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder.h
@@ -43,7 +43,8 @@ class GPU_GLES2_EXPORT RasterDecoder : public DecoderContext,
const GpuPreferences& gpu_preferences,
MemoryTracker* memory_tracker,
SharedImageManager* shared_image_manager,
- scoped_refptr<SharedContextState> shared_context_state);
+ scoped_refptr<SharedContextState> shared_context_state,
+ bool is_priviliged);
~RasterDecoder() override;
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
index 1a0c03a200f..ea83f087e79 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
@@ -268,6 +268,9 @@ error::Error RasterDecoderImpl::HandleCopySubTextureINTERNALImmediate(
GLint y = static_cast<GLint>(c.y);
GLsizei width = static_cast<GLsizei>(c.width);
GLsizei height = static_cast<GLsizei>(c.height);
+ GLboolean unpack_flip_y = static_cast<GLboolean>(c.unpack_flip_y);
+ GLboolean unpack_premultiply_alpha =
+ static_cast<GLboolean>(c.unpack_premultiply_alpha);
uint32_t mailboxes_size;
if (!gles2::GLES2Util::ComputeDataSize<GLbyte, 32>(1, &mailboxes_size)) {
return error::kOutOfBounds;
@@ -291,7 +294,8 @@ error::Error RasterDecoderImpl::HandleCopySubTextureINTERNALImmediate(
if (mailboxes == nullptr) {
return error::kOutOfBounds;
}
- DoCopySubTextureINTERNAL(xoffset, yoffset, x, y, width, height, mailboxes);
+ DoCopySubTextureINTERNAL(xoffset, yoffset, x, y, width, height, unpack_flip_y,
+ unpack_premultiply_alpha, mailboxes);
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
index 52844f907d5..b1b9c630cf6 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
@@ -21,6 +21,7 @@
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/config/gpu_preferences.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_image_stub.h"
#include "ui/gl/gl_mock.h"
@@ -187,7 +188,7 @@ TEST_P(RasterDecoderTest, CopyTexSubImage2DSizeMismatch) {
// This will initialize the bottom right corner of destination.
SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
auto& cmd = *GetImmediateAs<cmds::CopySubTextureINTERNALImmediate>();
- cmd.Init(1, 1, 0, 0, 1, 1, mailboxes);
+ cmd.Init(1, 1, 0, 0, 1, 1, false, false, mailboxes);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(dest_texture->GetLevelClearedRect(GL_TEXTURE_2D, 0),
@@ -197,7 +198,7 @@ TEST_P(RasterDecoderTest, CopyTexSubImage2DSizeMismatch) {
{
// Dest rect outside of dest bounds
auto& cmd = *GetImmediateAs<cmds::CopySubTextureINTERNALImmediate>();
- cmd.Init(2, 2, 0, 0, 1, 1, mailboxes);
+ cmd.Init(2, 2, 0, 0, 1, 1, false, false, mailboxes);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
EXPECT_EQ(dest_texture->GetLevelClearedRect(GL_TEXTURE_2D, 0),
@@ -207,7 +208,7 @@ TEST_P(RasterDecoderTest, CopyTexSubImage2DSizeMismatch) {
{
// Source rect outside of source bounds
auto& cmd = *GetImmediateAs<cmds::CopySubTextureINTERNALImmediate>();
- cmd.Init(0, 0, 0, 0, 2, 2, mailboxes);
+ cmd.Init(0, 0, 0, 0, 2, 2, false, false, mailboxes);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
EXPECT_EQ(dest_texture->GetLevelClearedRect(GL_TEXTURE_2D, 0),
@@ -221,40 +222,74 @@ TEST_P(RasterDecoderTest, CopyTexSubImage2DTwiceClearsUnclearedTexture) {
gpu::Mailbox source_texture_mailbox =
CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RGBA_8888,
/*width=*/2, /*height=*/2,
- /*cleared=*/false);
+ /*cleared=*/true);
GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
CopyMailboxes(mailboxes, source_texture_mailbox, client_texture_mailbox_);
+ SharedImageRepresentationFactory repr_factory(shared_image_manager(),
+ nullptr);
+ auto representation = repr_factory.ProduceGLTexture(client_texture_mailbox_);
+ EXPECT_FALSE(representation->IsCleared());
+
// This will initialize the top half of destination.
{
- // Source is undefined, so first call to CopySubTexture will clear the
- // source.
- SetupClearTextureExpectations(kNewServiceId, kServiceTextureId,
- GL_TEXTURE_2D, GL_TEXTURE_2D, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, 0, 0, 2, 2, 0);
SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
auto& cmd = *GetImmediateAs<cmds::CopySubTextureINTERNALImmediate>();
- cmd.Init(0, 0, 0, 0, 2, 1, mailboxes);
+ cmd.Init(0, 0, 0, 0, 2, 1, false, false, mailboxes);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
}
+ EXPECT_EQ(gfx::Rect(0, 0, 2, 1), representation->ClearedRect());
+ EXPECT_FALSE(representation->IsCleared());
- // This will initialize bottom right corner of the destination.
- // CopySubTexture will clear the bottom half of the destination because a
- // single rectangle is insufficient to keep track of the initialized area.
+ // This will initialize bottom half of the destination.
{
- SetupClearTextureExpectations(kServiceTextureId, kServiceTextureId,
- GL_TEXTURE_2D, GL_TEXTURE_2D, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, 0, 1, 2, 1, 0);
SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
auto& cmd = *GetImmediateAs<cmds::CopySubTextureINTERNALImmediate>();
- cmd.Init(1, 1, 0, 0, 1, 1, mailboxes);
+ cmd.Init(0, 1, 0, 0, 2, 1, false, false, mailboxes);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
}
+ EXPECT_TRUE(representation->IsCleared());
+}
+
+// Unlike the GLES2 version, RasterInterface's CopySubTexture does not allow
+// initializing a texture in parts *unless* the rectangles being cleared
+// can be trivially combined into a larger rectangle.
+TEST_P(RasterDecoderTest, CopyTexSubImage2DPartialFailsWithUnalignedRect) {
+ shared_context_state_->set_need_context_state_reset(true);
+ // Create uninitialized source texture.
+ gpu::Mailbox source_texture_mailbox =
+ CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RGBA_8888,
+ /*width=*/2, /*height=*/2,
+ /*cleared=*/true);
+ GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
+ CopyMailboxes(mailboxes, source_texture_mailbox, client_texture_mailbox_);
SharedImageRepresentationFactory repr_factory(shared_image_manager(),
nullptr);
auto representation = repr_factory.ProduceGLTexture(client_texture_mailbox_);
- EXPECT_TRUE(representation->GetTexture()->SafeToRenderFrom());
+ EXPECT_FALSE(representation->IsCleared());
+
+ // This will initialize the top half of destination.
+ {
+ SetScopedTextureBinderExpectations(GL_TEXTURE_2D);
+ auto& cmd = *GetImmediateAs<cmds::CopySubTextureINTERNALImmediate>();
+ cmd.Init(0, 0, 0, 0, 2, 1, false, false, mailboxes);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
+ }
+ EXPECT_EQ(gfx::Rect(0, 0, 2, 1), representation->ClearedRect());
+ EXPECT_FALSE(representation->IsCleared());
+
+ // This will attempt to initialize the bottom corner of the destination. As
+ // the new rect cannot be trivially combined with the previous cleared rect,
+ // this will fail.
+ {
+ auto& cmd = *GetImmediateAs<cmds::CopySubTextureINTERNALImmediate>();
+ cmd.Init(1, 1, 0, 0, 1, 1, false, false, mailboxes);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(mailboxes)));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+ }
+ EXPECT_EQ(gfx::Rect(0, 0, 2, 1), representation->ClearedRect());
+ EXPECT_FALSE(representation->IsCleared());
}
TEST_P(RasterDecoderManualInitTest, CopyTexSubImage2DValidateColorFormat) {
@@ -263,6 +298,11 @@ TEST_P(RasterDecoderManualInitTest, CopyTexSubImage2DValidateColorFormat) {
init.extensions.push_back("GL_EXT_texture_rg");
InitDecoder(init);
+ // Recreate |client_texture_mailbox_| as a cleared mailbox.
+ client_texture_mailbox_ = CreateFakeTexture(
+ kServiceTextureId, viz::ResourceFormat::RGBA_8888, /*width=*/2,
+ /*height=*/2, /*cleared=*/true);
+
// Create dest texture.
gpu::Mailbox dest_texture_mailbox =
CreateFakeTexture(kNewServiceId, viz::ResourceFormat::RED_8,
@@ -271,7 +311,7 @@ TEST_P(RasterDecoderManualInitTest, CopyTexSubImage2DValidateColorFormat) {
auto& copy_cmd = *GetImmediateAs<cmds::CopySubTextureINTERNALImmediate>();
GLbyte mailboxes[sizeof(gpu::Mailbox) * 2];
CopyMailboxes(mailboxes, client_texture_mailbox_, dest_texture_mailbox);
- copy_cmd.Init(0, 0, 0, 0, 2, 1, mailboxes);
+ copy_cmd.Init(0, 0, 0, 0, 2, 1, false, false, mailboxes);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(copy_cmd, sizeof(mailboxes)));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
@@ -298,7 +338,7 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
std::move(share_group), std::move(surface), std::move(context),
false /* use_virtualized_gl_contexts */, base::DoNothing(),
GpuPreferences().gr_context_type);
- context_state_->InitializeGrContext(workarounds, nullptr);
+ context_state_->InitializeGrContext(GpuPreferences(), workarounds, nullptr);
context_state_->InitializeGL(GpuPreferences(), feature_info);
}
void TearDown() override {
@@ -322,7 +362,7 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
auto decoder = base::WrapUnique(RasterDecoder::Create(
this, &command_buffer_service_, &outputter_, gpu_feature_info_,
GpuPreferences(), nullptr /* memory_tracker */, &shared_image_manager_,
- context_state_));
+ context_state_, true /* is_privileged */));
ContextCreationAttribs attribs;
attribs.enable_oop_rasterization = true;
attribs.enable_raster_interface = true;
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
index 135dc082464..f445c2deb92 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
@@ -192,7 +192,7 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
decoder_.reset(RasterDecoder::Create(
this, command_buffer_service_.get(), &outputter_, gpu_feature_info,
gpu_preferences_, nullptr /* memory_tracker */, &shared_image_manager_,
- shared_context_state_));
+ shared_context_state_, true /* is_privileged */));
decoder_->SetIgnoreCachedStateForTest(ignore_cached_state_for_test_);
decoder_->DisableFlushWorkaroundForTest();
decoder_->GetLogger()->set_log_synthesized_gl_errors(false);
diff --git a/chromium/gpu/command_buffer/service/service_discardable_manager.cc b/chromium/gpu/command_buffer/service/service_discardable_manager.cc
index 52fe92be8ba..94bae9556f5 100644
--- a/chromium/gpu/command_buffer/service/service_discardable_manager.cc
+++ b/chromium/gpu/command_buffer/service/service_discardable_manager.cc
@@ -6,13 +6,17 @@
#include <inttypes.h>
+#include "base/command_line.h"
#include "base/memory/singleton.h"
+#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
#include "build/build_config.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/config/gpu_preferences.h"
namespace gpu {
@@ -21,7 +25,7 @@ size_t DiscardableCacheSizeLimit() {
// sizes for 1-1.5 renderers. These will be updated as more types of data are
// moved to this cache.
#if defined(OS_ANDROID)
- const size_t kLowEndCacheSizeBytes = 512 * 1024;
+ const size_t kLowEndCacheSizeBytes = 1024 * 1024;
const size_t kNormalCacheSizeBytes = 128 * 1024 * 1024;
#else
const size_t kNormalCacheSizeBytes = 192 * 1024 * 1024;
@@ -53,8 +57,6 @@ size_t DiscardableCacheSizeLimitForPressure(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
switch (memory_pressure_level) {
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- // This function is only called with moderate or critical pressure.
- NOTREACHED();
return base_cache_limit;
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
// With moderate pressure, shrink to 1/4 our normal size.
@@ -80,9 +82,12 @@ ServiceDiscardableManager::GpuDiscardableEntry::GpuDiscardableEntry(
ServiceDiscardableManager::GpuDiscardableEntry::~GpuDiscardableEntry() =
default;
-ServiceDiscardableManager::ServiceDiscardableManager()
+ServiceDiscardableManager::ServiceDiscardableManager(
+ const GpuPreferences& preferences)
: entries_(EntryCache::NO_AUTO_EVICT),
- cache_size_limit_(DiscardableCacheSizeLimit()) {
+ cache_size_limit_(preferences.force_gpu_mem_discardable_limit_bytes
+ ? preferences.force_gpu_mem_discardable_limit_bytes
+ : DiscardableCacheSizeLimit()) {
// In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
// Don't register a dump provider in these cases.
if (base::ThreadTaskRunnerHandle::IsSet()) {
diff --git a/chromium/gpu/command_buffer/service/service_discardable_manager.h b/chromium/gpu/command_buffer/service/service_discardable_manager.h
index 4a54490b40b..522b4a16129 100644
--- a/chromium/gpu/command_buffer/service/service_discardable_manager.h
+++ b/chromium/gpu/command_buffer/service/service_discardable_manager.h
@@ -14,6 +14,7 @@
#include "gpu/gpu_gles2_export.h"
namespace gpu {
+struct GpuPreferences;
namespace gles2 {
class TextureManager;
class TextureRef;
@@ -27,7 +28,7 @@ GPU_GLES2_EXPORT size_t DiscardableCacheSizeLimitForPressure(
class GPU_GLES2_EXPORT ServiceDiscardableManager
: public base::trace_event::MemoryDumpProvider {
public:
- ServiceDiscardableManager();
+ explicit ServiceDiscardableManager(const GpuPreferences& preferences);
~ServiceDiscardableManager() override;
// base::trace_event::MemoryDumpProvider implementation.
diff --git a/chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc b/chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc
index 1a50b05af10..6d1aa65cbe1 100644
--- a/chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc
@@ -15,6 +15,7 @@
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/config/gpu_preferences.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_image_stub.h"
#include "ui/gl/gl_mock.h"
@@ -65,7 +66,7 @@ static const size_t kSmallTextureSize = 4 * kSmallTextureDim * kSmallTextureDim;
class ServiceDiscardableManagerTest : public GpuServiceTest {
public:
- ServiceDiscardableManagerTest() = default;
+ ServiceDiscardableManagerTest() : discardable_manager_(GpuPreferences()) {}
~ServiceDiscardableManagerTest() override = default;
protected:
diff --git a/chromium/gpu/command_buffer/service/service_font_manager.cc b/chromium/gpu/command_buffer/service/service_font_manager.cc
index f025d0ce966..50a1b4a74e2 100644
--- a/chromium/gpu/command_buffer/service/service_font_manager.cc
+++ b/chromium/gpu/command_buffer/service/service_font_manager.cc
@@ -106,11 +106,7 @@ class ServiceFontManager::SkiaDiscardableManager
// it can be fixed.
NOTREACHED();
- const bool no_fallback = (type == SkStrikeClient::kGlyphMetrics ||
- type == SkStrikeClient::kGlyphPath ||
- type == SkStrikeClient::kGlyphImage);
-
- if (no_fallback && dump_count_ < kMaxDumps && base::RandInt(1, 100) == 1) {
+ if (dump_count_ < kMaxDumps && base::RandInt(1, 100) == 1) {
++dump_count_;
base::debug::DumpWithoutCrashing();
}
diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.cc b/chromium/gpu/command_buffer/service/service_transfer_cache.cc
index c15e9b632a4..25d7e01a985 100644
--- a/chromium/gpu/command_buffer/service/service_transfer_cache.cc
+++ b/chromium/gpu/command_buffer/service/service_transfer_cache.cc
@@ -8,6 +8,7 @@
#include <utility>
+#include "base/auto_reset.h"
#include "base/bind.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
@@ -74,20 +75,26 @@ void DumpMemoryForYUVImageTransferCacheEntry(
DCHECK(entry->is_yuv());
std::vector<size_t> plane_sizes = entry->GetPlaneCachedSizes();
+ if (plane_sizes.empty()) {
+ // This entry corresponds to an unmipped hardware decoded image.
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(
+ dump_base_name + base::StringPrintf("/dma_buf"));
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, entry->CachedSize());
+ // We don't need to establish shared ownership of the dump with Skia: the
+ // reason is that Skia doesn't own the textures for hardware decoded images,
+ // so it won't count them in its memory dump (because
+ // SkiaGpuTraceMemoryDump::shouldDumpWrappedObjects() returns false).
+ return;
+ }
+
for (size_t i = 0u; i < entry->num_planes(); ++i) {
MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(
dump_base_name +
base::StringPrintf("/plane_%0u", base::checked_cast<uint32_t>(i)));
- if (plane_sizes.empty()) {
- // Hardware-decoded image case.
- dump->AddScalar(MemoryAllocatorDump::kNameSize,
- MemoryAllocatorDump::kUnitsBytes,
- (i == SkYUVAIndex::kY_Index) ? entry->CachedSize() : 0u);
- } else {
- DCHECK_EQ(plane_sizes.size(), entry->num_planes());
- dump->AddScalar(MemoryAllocatorDump::kNameSize,
- MemoryAllocatorDump::kUnitsBytes, plane_sizes.at(i));
- }
+ DCHECK_EQ(plane_sizes.size(), entry->num_planes());
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, plane_sizes.at(i));
// If entry->image() is backed by multiple textures,
// getBackendTexture() would end up flattening them to RGB, which is
@@ -123,9 +130,11 @@ ServiceTransferCache::CacheEntryInternal&
ServiceTransferCache::CacheEntryInternal::operator=(
CacheEntryInternal&& other) = default;
-ServiceTransferCache::ServiceTransferCache()
+ServiceTransferCache::ServiceTransferCache(const GpuPreferences& preferences)
: entries_(EntryCache::NO_AUTO_EVICT),
- cache_size_limit_(DiscardableCacheSizeLimit()),
+ cache_size_limit_(preferences.force_gpu_mem_discardable_limit_bytes
+ ? preferences.force_gpu_mem_discardable_limit_bytes
+ : DiscardableCacheSizeLimit()),
max_cache_entries_(kMaxCacheEntries) {
// In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
// Don't register a dump provider in these cases.
@@ -249,21 +258,10 @@ void ServiceTransferCache::EnforceLimits() {
void ServiceTransferCache::PurgeMemory(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
- switch (memory_pressure_level) {
- case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- // This function is only called with moderate or critical pressure.
- NOTREACHED();
- return;
- case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
- cache_size_limit_ = cache_size_limit_ / 4;
- break;
- case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
- cache_size_limit_ = 0u;
- break;
- }
-
+ base::AutoReset<size_t> reset_limit(
+ &cache_size_limit_, DiscardableCacheSizeLimitForPressure(
+ cache_size_limit_, memory_pressure_level));
EnforceLimits();
- cache_size_limit_ = DiscardableCacheSizeLimit();
}
void ServiceTransferCache::DeleteAllEntriesForDecoder(int decoder_id) {
diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.h b/chromium/gpu/command_buffer/service/service_transfer_cache.h
index 990152b3072..c51b3367896 100644
--- a/chromium/gpu/command_buffer/service/service_transfer_cache.h
+++ b/chromium/gpu/command_buffer/service/service_transfer_cache.h
@@ -48,7 +48,7 @@ class GPU_GLES2_EXPORT ServiceTransferCache
uint32_t entry_id;
};
- ServiceTransferCache();
+ explicit ServiceTransferCache(const GpuPreferences& preferences);
~ServiceTransferCache() override;
bool CreateLockedEntry(const EntryKey& key,
@@ -137,10 +137,10 @@ class GPU_GLES2_EXPORT ServiceTransferCache
int total_image_count_ = 0;
// The limit above which the cache will start evicting resources.
- size_t cache_size_limit_ = 0;
+ size_t cache_size_limit_;
// The max number of entries we will hold in the cache.
- size_t max_cache_entries_ = 0;
+ size_t max_cache_entries_;
DISALLOW_COPY_AND_ASSIGN(ServiceTransferCache);
};
diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache_unittest.cc b/chromium/gpu/command_buffer/service/service_transfer_cache_unittest.cc
index ff4412ccd77..4308b549cb4 100644
--- a/chromium/gpu/command_buffer/service/service_transfer_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/service_transfer_cache_unittest.cc
@@ -5,6 +5,7 @@
#include "gpu/command_buffer/service/service_transfer_cache.h"
#include "cc/paint/raw_memory_transfer_cache_entry.h"
+#include "gpu/config/gpu_preferences.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
@@ -21,7 +22,7 @@ std::unique_ptr<cc::ServiceTransferCacheEntry> CreateEntry(size_t size) {
}
TEST(ServiceTransferCacheTest, EnforcesOnPurgeMemory) {
- ServiceTransferCache cache;
+ ServiceTransferCache cache{GpuPreferences()};
uint32_t entry_id = 0u;
size_t entry_size = 1024u;
uint32_t number_of_entry = 4u;
@@ -57,7 +58,7 @@ TEST(ServiceTransferCacheTest, EnforcesOnPurgeMemory) {
}
TEST(ServiceTransferCache, MultipleDecoderUse) {
- ServiceTransferCache cache;
+ ServiceTransferCache cache{GpuPreferences()};
const uint32_t entry_id = 0u;
const size_t entry_size = 1024u;
@@ -84,7 +85,7 @@ TEST(ServiceTransferCache, MultipleDecoderUse) {
}
TEST(ServiceTransferCache, DeleteEntriesForDecoder) {
- ServiceTransferCache cache;
+ ServiceTransferCache cache{GpuPreferences()};
const size_t entry_size = 1024u;
const size_t cache_size = 4 * entry_size;
cache.SetCacheSizeLimitForTesting(cache_size);
diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc
index 0a7df7e9292..5c17236c511 100644
--- a/chromium/gpu/command_buffer/service/service_utils.cc
+++ b/chromium/gpu/command_buffer/service/service_utils.cc
@@ -8,6 +8,7 @@
#include "base/command_line.h"
#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/gpu_switches.h"
@@ -129,9 +130,16 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) {
gpu_preferences.enforce_gl_minimums =
command_line->HasSwitch(switches::kEnforceGLMinimums);
if (GetUintFromSwitch(command_line, switches::kForceGpuMemAvailableMb,
- &gpu_preferences.force_gpu_mem_available)) {
- gpu_preferences.force_gpu_mem_available *= 1024 * 1024;
+ &gpu_preferences.force_gpu_mem_available_bytes)) {
+ gpu_preferences.force_gpu_mem_available_bytes *= 1024 * 1024;
}
+ if (GetUintFromSwitch(
+ command_line, switches::kForceGpuMemDiscardableLimitMb,
+ &gpu_preferences.force_gpu_mem_discardable_limit_bytes)) {
+ gpu_preferences.force_gpu_mem_discardable_limit_bytes *= 1024 * 1024;
+ }
+ GetUintFromSwitch(command_line, switches::kForceMaxTextureSize,
+ &gpu_preferences.force_max_texture_size);
if (GetUintFromSwitch(command_line, switches::kGpuProgramCacheSizeKb,
&gpu_preferences.gpu_program_cache_size)) {
gpu_preferences.gpu_program_cache_size *= 1024;
@@ -154,63 +162,49 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) {
command_line->HasSwitch(switches::kIgnoreGpuBlacklist);
gpu_preferences.enable_webgpu =
command_line->HasSwitch(switches::kEnableUnsafeWebGPU);
- if (command_line->HasSwitch(switches::kUseVulkan)) {
- auto value = command_line->GetSwitchValueASCII(switches::kUseVulkan);
- if (value.empty() || value == switches::kVulkanImplementationNameNative) {
- gpu_preferences.use_vulkan = VulkanImplementationName::kForcedNative;
- } else if (value == switches::kVulkanImplementationNameSwiftshader) {
- gpu_preferences.use_vulkan = VulkanImplementationName::kSwiftshader;
- } else {
- gpu_preferences.use_vulkan = VulkanImplementationName::kNone;
- }
- }
+ gpu_preferences.gr_context_type = ParseGrContextType();
+ gpu_preferences.use_vulkan = ParseVulkanImplementationName(
+ command_line, gpu_preferences.gr_context_type);
gpu_preferences.disable_vulkan_surface =
command_line->HasSwitch(switches::kDisableVulkanSurface);
- if (command_line->HasSwitch(switches::kGrContextType)) {
- auto value = command_line->GetSwitchValueASCII(switches::kGrContextType);
- if (value == switches::kGrContextTypeGL) {
- gpu_preferences.gr_context_type = GrContextType::kGL;
- } else if (value == switches::kGrContextTypeVulkan) {
- gpu_preferences.gr_context_type = GrContextType::kVulkan;
- } else if (value == switches::kGrContextTypeMetal) {
-#if defined(OS_MACOSX)
- DCHECK(base::FeatureList::IsEnabled(features::kMetal))
- << "GrContextType is Metal, but Metal is not enabled.";
- gpu_preferences.gr_context_type = GrContextType::kMetal;
-#endif
+
+ gpu_preferences.enable_gpu_blocked_time_metric =
+ command_line->HasSwitch(switches::kEnableGpuBlockedTime);
+
+ return gpu_preferences;
+}
+
+GrContextType ParseGrContextType() {
#if BUILDFLAG(SKIA_USE_DAWN)
- } else if (value == switches::kGrContextTypeDawn) {
- gpu_preferences.gr_context_type = GrContextType::kDawn;
+ if (base::FeatureList::IsEnabled(features::kSkiaDawn))
+ return GrContextType::kDawn;
#endif
- } else {
- NOTREACHED() << "Invalid GrContextType.";
- gpu_preferences.gr_context_type = GrContextType::kGL;
- }
- } else {
#if defined(OS_MACOSX)
- gpu_preferences.gr_context_type =
- base::FeatureList::IsEnabled(features::kMetal) ?
- GrContextType::kMetal :
- GrContextType::kGL;
+ return base::FeatureList::IsEnabled(features::kMetal) ? GrContextType::kMetal
+ : GrContextType::kGL;
#else
- if (base::FeatureList::IsEnabled(features::kVulkan)) {
- gpu_preferences.gr_context_type = GrContextType::kVulkan;
- } else {
- gpu_preferences.gr_context_type = GrContextType::kGL;
- }
+ return base::FeatureList::IsEnabled(features::kVulkan)
+ ? GrContextType::kVulkan
+ : GrContextType::kGL;
#endif
- }
- if (gpu_preferences.gr_context_type == GrContextType::kVulkan &&
- gpu_preferences.use_vulkan == gpu::VulkanImplementationName::kNone) {
- // If gpu_preferences.use_vulkan is not set from --use-vulkan, the native
- // vulkan implementation will be used by default.
- gpu_preferences.use_vulkan = gpu::VulkanImplementationName::kNative;
- }
-
- gpu_preferences.enable_gpu_blocked_time_metric =
- command_line->HasSwitch(switches::kEnableGpuBlockedTime);
+}
- return gpu_preferences;
+VulkanImplementationName ParseVulkanImplementationName(
+ const base::CommandLine* command_line,
+ GrContextType gr_context_type) {
+ if (command_line->HasSwitch(switches::kUseVulkan)) {
+ auto value = command_line->GetSwitchValueASCII(switches::kUseVulkan);
+ if (value.empty() || value == switches::kVulkanImplementationNameNative) {
+ return VulkanImplementationName::kForcedNative;
+ } else if (value == switches::kVulkanImplementationNameSwiftshader) {
+ return VulkanImplementationName::kSwiftshader;
+ }
+ }
+ // If the vulkan implementation is not set from --use-vulkan, the native
+ // vulkan implementation will be used by default.
+ return gr_context_type == GrContextType::kVulkan
+ ? VulkanImplementationName::kNative
+ : VulkanImplementationName::kNone;
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/service_utils.h b/chromium/gpu/command_buffer/service/service_utils.h
index 1d5d523ef7a..76a802e433c 100644
--- a/chromium/gpu/command_buffer/service/service_utils.h
+++ b/chromium/gpu/command_buffer/service/service_utils.h
@@ -6,12 +6,12 @@
#define GPU_COMMAND_BUFFER_SERVICE_SERVICE_UTILS_H_
#include "base/command_line.h"
+#include "gpu/config/gpu_preferences.h"
#include "gpu/gpu_gles2_export.h"
#include "ui/gl/gl_context.h"
namespace gpu {
struct ContextCreationAttribs;
-struct GpuPreferences;
namespace gles2 {
class ContextGroup;
@@ -34,6 +34,17 @@ GPU_GLES2_EXPORT bool PassthroughCommandDecoderSupported();
GPU_GLES2_EXPORT GpuPreferences
ParseGpuPreferences(const base::CommandLine* command_line);
+// Determine which Skia GrContext backend will be used for GPU compositing and
+// rasterization (if enabled) by checking the feature flags for Vulkan and
+// Metal. If they are not enabled, default to GL.
+GPU_GLES2_EXPORT GrContextType ParseGrContextType();
+
+// Parse the value of --use-vulkan from the command line. If unspecified and
+// a Vulkan GrContext is going to be used, default to the native implementation.
+GPU_GLES2_EXPORT VulkanImplementationName
+ParseVulkanImplementationName(const base::CommandLine* command_line,
+ GrContextType gr_context_type);
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc
index 295242470fd..a20500f4f54 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/shared_context_state.cc
@@ -4,14 +4,17 @@
#include "gpu/command_buffer/service/shared_context_state.h"
+#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
+#include "components/crash/core/common/crash_key.h"
#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/gl_context_virtual.h"
#include "gpu/command_buffer/service/service_transfer_cache.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/config/skia_limits.h"
#include "gpu/vulkan/buildflags.h"
#include "skia/buildflags.h"
#include "ui/gl/gl_bindings.h"
@@ -23,6 +26,11 @@
#if BUILDFLAG(ENABLE_VULKAN)
#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
#endif
#if defined(OS_MACOSX)
@@ -49,7 +57,7 @@ void SharedContextState::compileError(const char* shader, const char* errors) {
}
SharedContextState::MemoryTracker::MemoryTracker(
- gpu::MemoryTracker::Observer* peak_memory_monitor)
+ base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor)
: peak_memory_monitor_(peak_memory_monitor) {}
SharedContextState::MemoryTracker::~MemoryTracker() {
@@ -59,16 +67,15 @@ SharedContextState::MemoryTracker::~MemoryTracker() {
void SharedContextState::MemoryTracker::OnMemoryAllocatedChange(
CommandBufferId id,
uint64_t old_size,
- uint64_t new_size) {
- uint64_t delta = new_size - old_size;
- old_size = size_;
- size_ += delta;
- if (peak_memory_monitor_)
- peak_memory_monitor_->OnMemoryAllocatedChange(id, old_size, size_);
-}
-
-uint64_t SharedContextState::MemoryTracker::GetMemoryUsage() const {
- return size_;
+ uint64_t new_size,
+ GpuPeakMemoryAllocationSource source) {
+ size_ += new_size - old_size;
+ if (source == GpuPeakMemoryAllocationSource::UNKNOWN)
+ source = GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE;
+ if (peak_memory_monitor_) {
+ peak_memory_monitor_->OnMemoryAllocatedChange(id, old_size, new_size,
+ source);
+ }
}
SharedContextState::SharedContextState(
@@ -81,7 +88,7 @@ SharedContextState::SharedContextState(
viz::VulkanContextProvider* vulkan_context_provider,
viz::MetalContextProvider* metal_context_provider,
viz::DawnContextProvider* dawn_context_provider,
- gpu::MemoryTracker::Observer* peak_memory_monitor)
+ base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor)
: use_virtualized_gl_contexts_(use_virtualized_gl_contexts),
context_lost_callback_(std::move(context_lost_callback)),
gr_context_type_(gr_context_type),
@@ -93,8 +100,6 @@ SharedContextState::SharedContextState(
context_(context),
real_context_(std::move(context)),
surface_(std::move(surface)) {
- raster::DetermineGrCacheLimitsFromAvailableMemory(
- &max_resource_cache_bytes_, &glyph_cache_max_texture_bytes_);
if (GrContextIsVulkan()) {
#if BUILDFLAG(ENABLE_VULKAN)
gr_context_ = vk_context_provider_->GetGrContext();
@@ -124,6 +129,10 @@ SharedContextState::SharedContextState(
// Initialize the scratch buffer to some small initial size.
scratch_deserialization_buffer_.resize(
kInitialScratchDeserializationBufferSize);
+
+ static crash_reporter::CrashKeyString<16> crash_key("gr-context-type");
+ crash_key.Set(
+ base::StringPrintf("%u", static_cast<uint32_t>(gr_context_type_)));
}
SharedContextState::~SharedContextState() {
@@ -141,6 +150,15 @@ SharedContextState::~SharedContextState() {
// initialized.
DCHECK(!owned_gr_context_ || owned_gr_context_->unique());
+ // GPU memory allocations except skia_gr_cache_size_ tracked by this
+ // memory_tracker_ should have been released.
+ DCHECK_EQ(skia_gr_cache_size_, memory_tracker_.GetMemoryUsage());
+ // gr_context_ and all resources owned by it will be released soon, so set it
+ // to null, and UpdateSkiaOwnedMemorySize() will update skia memory usage to
+ // 0, to ensure that PeakGpuMemoryMonitor sees 0 allocated memory.
+ gr_context_ = nullptr;
+ UpdateSkiaOwnedMemorySize();
+
// Delete the GrContext. This will either do cleanup if the context is
// current, or the GrContext was already abandoned if the GLContext was lost.
owned_gr_context_.reset();
@@ -152,6 +170,7 @@ SharedContextState::~SharedContextState() {
}
void SharedContextState::InitializeGrContext(
+ const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
GrContextOptions::PersistentCache* cache,
GpuProcessActivityFlags* activity_flags,
@@ -163,11 +182,25 @@ void SharedContextState::InitializeGrContext(
metal_context_provider_->SetProgressReporter(progress_reporter);
#endif
+ size_t max_resource_cache_bytes;
+ size_t glyph_cache_max_texture_bytes;
+ DetermineGrCacheLimitsFromAvailableMemory(&max_resource_cache_bytes,
+ &glyph_cache_max_texture_bytes);
+
if (GrContextIsGL()) {
DCHECK(context_->IsCurrent(nullptr));
+
+ std::vector<const char*> blacklisted_extensions;
+ constexpr char kQualcommTiledRendering[] = "GL_QCOM_tiled_rendering";
+ // We rely on |enable_threaded_texture_mailboxes| to limit the
+ // workaround to webview only.
+ if (workarounds.disable_qcomm_tiled_rendering &&
+ gpu_preferences.enable_threaded_texture_mailboxes) {
+ blacklisted_extensions.push_back(kQualcommTiledRendering);
+ }
sk_sp<GrGLInterface> interface(gl::init::CreateGrGLInterface(
*context_->GetVersionInfo(), workarounds.use_es2_for_oopr,
- progress_reporter));
+ progress_reporter, blacklisted_extensions));
if (!interface) {
LOG(ERROR) << "OOP raster support disabled: GrGLInterface creation "
"failed.";
@@ -196,7 +229,7 @@ void SharedContextState::InitializeGrContext(
options.fDriverBugWorkarounds =
GrDriverBugWorkarounds(workarounds.ToIntSet());
options.fDisableCoverageCountingPaths = true;
- options.fGlyphCacheTextureMaximumBytes = glyph_cache_max_texture_bytes_;
+ options.fGlyphCacheTextureMaximumBytes = glyph_cache_max_texture_bytes;
options.fPersistentCache = cache;
options.fAvoidStencilBuffers = workarounds.avoid_stencil_buffers;
if (workarounds.disable_program_disk_cache) {
@@ -204,6 +237,8 @@ void SharedContextState::InitializeGrContext(
GrContextOptions::ShaderCacheStrategy::kBackendSource;
}
options.fShaderErrorHandler = this;
+ if (gpu_preferences.force_max_texture_size)
+ options.fMaxTextureSizeOverride = gpu_preferences.force_max_texture_size;
// TODO(csmartdalton): enable internal multisampling after the related Skia
// rolls are in.
options.fInternalMultisampleCount = 0;
@@ -215,9 +250,9 @@ void SharedContextState::InitializeGrContext(
LOG(ERROR) << "OOP raster support disabled: GrContext creation "
"failed.";
} else {
- gr_context_->setResourceCacheLimit(max_resource_cache_bytes_);
+ gr_context_->setResourceCacheLimit(max_resource_cache_bytes);
}
- transfer_cache_ = std::make_unique<ServiceTransferCache>();
+ transfer_cache_ = std::make_unique<ServiceTransferCache>(gpu_preferences);
}
bool SharedContextState::InitializeGL(
@@ -252,6 +287,10 @@ bool SharedContextState::InitializeGL(
GLint max_vertex_attribs = 0;
api->glGetIntegervFn(GL_MAX_VERTEX_ATTRIBS, &max_vertex_attribs);
if (max_vertex_attribs < kGLES2RequiredMinimumVertexAttribs) {
+ LOG(ERROR)
+ << "SharedContextState::InitializeGL failure max_vertex_attribs : "
+ << max_vertex_attribs << " is less that minimum required : "
+ << kGLES2RequiredMinimumVertexAttribs;
feature_info_ = nullptr;
return false;
}
@@ -275,6 +314,8 @@ bool SharedContextState::InitializeGL(
// inconsistent between various ContextStates on the same underlying real
// GL context. Make sure to report the failure early, to not allow
// virtualized context switches in that case.
+ LOG(ERROR) << "SharedContextState::InitializeGL failure driver error : "
+ << driver_status;
feature_info_ = nullptr;
context_state_ = nullptr;
return false;
@@ -285,6 +326,8 @@ bool SharedContextState::InitializeGL(
share_group_.get(), real_context_.get(),
weak_ptr_factory_.GetWeakPtr());
if (!virtual_context->Initialize(surface_.get(), gl::GLContextAttribs())) {
+ LOG(ERROR) << "SharedContextState::InitializeGL failure Initialize "
+ "virtual context failed";
feature_info_ = nullptr;
context_state_ = nullptr;
return false;
@@ -300,38 +343,103 @@ bool SharedContextState::InitializeGL(
// Swiftshader GL and Vulkan report supporting external objects extensions,
// but they don't.
+ bool gl_supports_memory_object =
+ gl::g_current_gl_driver->ext.b_GL_EXT_memory_object_fd ||
+ gl::g_current_gl_driver->ext.b_GL_ANGLE_memory_object_fuchsia;
+ bool gl_supports_semaphore =
+ gl::g_current_gl_driver->ext.b_GL_EXT_semaphore_fd ||
+ gl::g_current_gl_driver->ext.b_GL_ANGLE_semaphore_fuchsia;
+ bool vk_supports_external_memory = false;
+ bool vk_supports_external_semaphore = false;
+#if BUILDFLAG(ENABLE_VULKAN)
+ if (vk_context_provider_) {
+ const auto& extensions =
+ vk_context_provider_->GetDeviceQueue()->enabled_extensions();
+#if !defined(OS_FUCHSIA)
+ vk_supports_external_memory =
+ gfx::HasExtension(extensions, VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME) &&
+ gfx::HasExtension(extensions, VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME);
+ vk_supports_external_semaphore =
+ gfx::HasExtension(extensions,
+ VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME) &&
+ gfx::HasExtension(extensions,
+ VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME);
+#else
+ vk_supports_external_memory =
+ gfx::HasExtension(extensions, VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME) &&
+ gfx::HasExtension(extensions,
+ VK_FUCHSIA_EXTERNAL_MEMORY_EXTENSION_NAME);
+ vk_supports_external_semaphore =
+ gfx::HasExtension(extensions,
+ VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME) &&
+ gfx::HasExtension(extensions,
+ VK_FUCHSIA_EXTERNAL_SEMAPHORE_EXTENSION_NAME);
+#endif
+ }
+#endif // BUILDFLAG(ENABLE_VULKAN)
+
support_vulkan_external_object_ =
!gl::g_current_gl_version->is_swiftshader && is_native_vulkan &&
- gl::g_current_gl_driver->ext.b_GL_EXT_memory_object_fd &&
- gl::g_current_gl_driver->ext.b_GL_EXT_semaphore_fd;
+ gl_supports_memory_object && gl_supports_semaphore &&
+ vk_supports_external_memory && vk_supports_external_semaphore;
return true;
}
bool SharedContextState::MakeCurrent(gl::GLSurface* surface, bool needs_gl) {
+ if (context_lost_)
+ return false;
+
+ if (gr_context_ && gr_context_->abandoned()) {
+ MarkContextLost();
+ return false;
+ }
+
if (!GrContextIsGL() && !needs_gl)
return true;
- if (context_lost_)
- return false;
+ gl::GLSurface* dont_care_surface =
+ last_current_surface_ ? last_current_surface_ : surface_.get();
+ surface = surface ? surface : dont_care_surface;
- if (!context_->MakeCurrent(surface ? surface : surface_.get())) {
+ if (!context_->MakeCurrent(surface)) {
MarkContextLost();
return false;
}
+ last_current_surface_ = surface;
+
return true;
}
+void SharedContextState::ReleaseCurrent(gl::GLSurface* surface) {
+ if (!surface)
+ surface = last_current_surface_;
+
+ if (surface != last_current_surface_)
+ return;
+
+ last_current_surface_ = nullptr;
+ if (!context_lost_)
+ context_->ReleaseCurrent(surface);
+}
+
void SharedContextState::MarkContextLost() {
- DCHECK(GrContextIsGL());
if (!context_lost_) {
scoped_refptr<SharedContextState> prevent_last_ref_drop = this;
context_lost_ = true;
// context_state_ could be nullptr for some unittests.
if (context_state_)
context_state_->MarkContextLost();
- if (gr_context_)
- gr_context_->abandonContext();
+ // Only abandon the GrContext if it is owned by SharedContextState, because
+ // the passed in GrContext will be reused.
+ // TODO(https://crbug.com/1048692): always abandon GrContext to release all
+ // resources when chrome goes into background with low end device.
+ if (owned_gr_context_) {
+ owned_gr_context_->abandonContext();
+ owned_gr_context_.reset();
+ gr_context_ = nullptr;
+ }
+ UpdateSkiaOwnedMemorySize();
std::move(context_lost_callback_).Run();
for (auto& observer : context_lost_observers_)
observer.OnContextLost();
@@ -378,16 +486,16 @@ void SharedContextState::PurgeMemory(
}
// Ensure the context is current before doing any GPU cleanup.
- MakeCurrent(nullptr);
+ if (!MakeCurrent(nullptr))
+ return;
switch (memory_pressure_level) {
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- // This function is only called with moderate or critical pressure.
- NOTREACHED();
return;
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
// With moderate pressure, clear any unlocked resources.
gr_context_->purgeUnlockedResources(true /* scratchResourcesOnly */);
+ UpdateSkiaOwnedMemorySize();
scratch_deserialization_buffer_.resize(
kInitialScratchDeserializationBufferSize);
scratch_deserialization_buffer_.shrink_to_fit();
@@ -395,12 +503,36 @@ void SharedContextState::PurgeMemory(
case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
// With critical pressure, purge as much as possible.
gr_context_->freeGpuResources();
+ UpdateSkiaOwnedMemorySize();
scratch_deserialization_buffer_.resize(0u);
scratch_deserialization_buffer_.shrink_to_fit();
break;
}
- transfer_cache_->PurgeMemory(memory_pressure_level);
+ if (transfer_cache_)
+ transfer_cache_->PurgeMemory(memory_pressure_level);
+}
+
+uint64_t SharedContextState::GetMemoryUsage() {
+ UpdateSkiaOwnedMemorySize();
+ return memory_tracker_.GetMemoryUsage();
+}
+
+void SharedContextState::UpdateSkiaOwnedMemorySize() {
+ if (!gr_context_) {
+ memory_tracker_.OnMemoryAllocatedChange(CommandBufferId(),
+ skia_gr_cache_size_, 0u);
+ skia_gr_cache_size_ = 0u;
+ return;
+ }
+ size_t new_size;
+ gr_context_->getResourceCacheUsage(nullptr /* resourceCount */, &new_size);
+ // Skia does not have a CommandBufferId. PeakMemoryMonitor currently does not
+ // use CommandBufferId to identify source, so use zero here to separate
+ // prevent confusion.
+ memory_tracker_.OnMemoryAllocatedChange(
+ CommandBufferId(), skia_gr_cache_size_, static_cast<uint64_t>(new_size));
+ skia_gr_cache_size_ = static_cast<uint64_t>(new_size);
}
void SharedContextState::PessimisticallyResetGrContext() const {
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.h b/chromium/gpu/command_buffer/service/shared_context_state.h
index 65adbeaf5e4..5c83ed2955e 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.h
+++ b/chromium/gpu/command_buffer/service/shared_context_state.h
@@ -19,6 +19,7 @@
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/config/gpu_preferences.h"
#include "gpu/gpu_gles2_export.h"
+#include "gpu/ipc/common/gpu_peak_memory.h"
#include "third_party/skia/include/gpu/GrContext.h"
#include "ui/gl/progress_reporter.h"
@@ -62,9 +63,11 @@ class GPU_GLES2_EXPORT SharedContextState
viz::VulkanContextProvider* vulkan_context_provider = nullptr,
viz::MetalContextProvider* metal_context_provider = nullptr,
viz::DawnContextProvider* dawn_context_provider = nullptr,
- gpu::MemoryTracker::Observer* peak_memory_monitor = nullptr);
+ base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor =
+ nullptr);
- void InitializeGrContext(const GpuDriverBugWorkarounds& workarounds,
+ void InitializeGrContext(const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
GrContextOptions::PersistentCache* cache,
GpuProcessActivityFlags* activity_flags = nullptr,
gl::ProgressReporter* progress_reporter = nullptr);
@@ -86,13 +89,15 @@ class GPU_GLES2_EXPORT SharedContextState
bool IsGLInitialized() const { return !!feature_info_; }
bool MakeCurrent(gl::GLSurface* surface, bool needs_gl = false);
+ void ReleaseCurrent(gl::GLSurface* surface);
void MarkContextLost();
bool IsCurrent(gl::GLSurface* surface);
void PurgeMemory(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level);
- uint64_t GetMemoryUsage() const { return memory_tracker_.GetMemoryUsage(); }
+ void UpdateSkiaOwnedMemorySize();
+ uint64_t GetMemoryUsage();
void PessimisticallyResetGrContext() const;
@@ -124,10 +129,6 @@ class GPU_GLES2_EXPORT SharedContextState
std::vector<uint8_t>* scratch_deserialization_buffer() {
return &scratch_deserialization_buffer_;
}
- size_t max_resource_cache_bytes() const { return max_resource_cache_bytes_; }
- size_t glyph_cache_max_texture_bytes() const {
- return glyph_cache_max_texture_bytes_;
- }
bool use_virtualized_gl_contexts() const {
return use_virtualized_gl_contexts_;
}
@@ -158,22 +159,26 @@ class GPU_GLES2_EXPORT SharedContextState
// shared image, and forward information to both histograms and task manager.
class GPU_GLES2_EXPORT MemoryTracker : public gpu::MemoryTracker::Observer {
public:
- MemoryTracker(gpu::MemoryTracker::Observer* peak_memory_monitor);
+ explicit MemoryTracker(
+ base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor);
MemoryTracker(MemoryTracker&) = delete;
MemoryTracker& operator=(MemoryTracker&) = delete;
~MemoryTracker() override;
// gpu::MemoryTracker::Observer implementation:
- void OnMemoryAllocatedChange(CommandBufferId id,
- uint64_t old_size,
- uint64_t new_size) override;
+ void OnMemoryAllocatedChange(
+ CommandBufferId id,
+ uint64_t old_size,
+ uint64_t new_size,
+ GpuPeakMemoryAllocationSource source =
+ GpuPeakMemoryAllocationSource::UNKNOWN) override;
// Reports to GpuServiceImpl::GetVideoMemoryUsageStats()
- uint64_t GetMemoryUsage() const;
+ uint64_t GetMemoryUsage() const { return size_; }
private:
uint64_t size_ = 0;
- gpu::MemoryTracker::Observer* const peak_memory_monitor_;
+ base::WeakPtr<gpu::MemoryTracker::Observer> const peak_memory_monitor_;
};
~SharedContextState() override;
@@ -212,6 +217,12 @@ class GPU_GLES2_EXPORT SharedContextState
scoped_refptr<gl::GLContext> context_;
scoped_refptr<gl::GLContext> real_context_;
scoped_refptr<gl::GLSurface> surface_;
+
+ // Most recent surface that this ShareContextState was made current with.
+ // Avoids a call to MakeCurrent with a different surface, if we don't
+ // care which surface is current.
+ gl::GLSurface* last_current_surface_ = nullptr;
+
scoped_refptr<gles2::FeatureInfo> feature_info_;
// raster decoders and display compositor share this context_state_.
@@ -220,8 +231,7 @@ class GPU_GLES2_EXPORT SharedContextState
gl::ProgressReporter* progress_reporter_ = nullptr;
sk_sp<GrContext> owned_gr_context_;
std::unique_ptr<ServiceTransferCache> transfer_cache_;
- size_t max_resource_cache_bytes_ = 0u;
- size_t glyph_cache_max_texture_bytes_ = 0u;
+ uint64_t skia_gr_cache_size_ = 0;
std::vector<uint8_t> scratch_deserialization_buffer_;
// |need_context_state_reset| is set whenever Skia may have altered the
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.cc b/chromium/gpu/command_buffer/service/shared_image_backing.cc
index bddce86bb50..dcef9d4ac7b 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing.cc
@@ -7,6 +7,7 @@
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/texture_manager.h"
namespace gpu {
@@ -109,9 +110,6 @@ void SharedImageBacking::ReleaseRef(SharedImageRepresentation* representation) {
refs_[0]->tracker()->TrackMemAlloc(estimated_size_);
return;
}
-
- // Last ref deleted, clean up.
- Destroy();
}
bool SharedImageBacking::HasAnyRefs() const {
@@ -121,6 +119,7 @@ bool SharedImageBacking::HasAnyRefs() const {
}
void SharedImageBacking::OnReadSucceeded() {
+ AutoLock auto_lock(this);
if (scoped_write_uma_) {
scoped_write_uma_->SetConsumed();
scoped_write_uma_.reset();
@@ -128,6 +127,7 @@ void SharedImageBacking::OnReadSucceeded() {
}
void SharedImageBacking::OnWriteSucceeded() {
+ AutoLock auto_lock(this);
scoped_write_uma_.emplace();
}
@@ -136,18 +136,9 @@ size_t SharedImageBacking::EstimatedSizeForMemTracking() const {
}
bool SharedImageBacking::have_context() const {
- AssertLockedIfNecessary();
-
- DCHECK(refs_.empty());
-
return have_context_;
}
-void SharedImageBacking::AssertLockedIfNecessary() const {
- if (lock_)
- lock_->AssertAcquired();
-}
-
SharedImageBacking::AutoLock::AutoLock(
const SharedImageBacking* shared_image_backing)
: auto_lock_(InitializeLock(shared_image_backing)) {}
@@ -162,4 +153,44 @@ base::Lock* SharedImageBacking::AutoLock::InitializeLock(
return &shared_image_backing->lock_.value();
}
+ClearTrackingSharedImageBacking::ClearTrackingSharedImageBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ size_t estimated_size,
+ bool is_thread_safe)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size,
+ is_thread_safe) {}
+
+gfx::Rect ClearTrackingSharedImageBacking::ClearedRect() const {
+ AutoLock auto_lock(this);
+ return ClearedRectInternal();
+}
+
+void ClearTrackingSharedImageBacking::SetClearedRect(
+ const gfx::Rect& cleared_rect) {
+ AutoLock auto_lock(this);
+ SetClearedRectInternal(cleared_rect);
+}
+
+gfx::Rect ClearTrackingSharedImageBacking::ClearedRectInternal() const {
+ return cleared_rect_;
+}
+
+void ClearTrackingSharedImageBacking::SetClearedRectInternal(
+ const gfx::Rect& cleared_rect) {
+ cleared_rect_ = cleared_rect;
+}
+
+scoped_refptr<gfx::NativePixmap> SharedImageBacking::GetNativePixmap() {
+ return nullptr;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.h b/chromium/gpu/command_buffer/service/shared_image_backing.h
index 9e6f95cc53f..ace5705712e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing.h
@@ -18,7 +18,9 @@
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/gpu_gles2_export.h"
#include "ui/gfx/color_space.h"
+#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/native_pixmap.h"
namespace base {
namespace trace_event {
@@ -64,6 +66,7 @@ class GPU_GLES2_EXPORT SharedImageBacking {
uint32_t usage() const { return usage_; }
const Mailbox& mailbox() const { return mailbox_; }
size_t estimated_size() const { return estimated_size_; }
+ bool is_thread_safe() const { return !!lock_; }
void OnContextLost();
// Concrete functions to manage a ref count.
@@ -76,21 +79,18 @@ class GPU_GLES2_EXPORT SharedImageBacking {
// Notify backing a write access is succeeded.
void OnWriteSucceeded();
- // Tracks whether the backing has ever been cleared, or whether it may contain
- // uninitialized pixels.
- virtual bool IsCleared() const = 0;
+ // Returns the initialized / cleared region of the SharedImage.
+ virtual gfx::Rect ClearedRect() const = 0;
- // Marks the backing as cleared, after which point it is assumed to contain no
- // unintiailized pixels.
- virtual void SetCleared() = 0;
+ // Marks the provided rect as cleared.
+ virtual void SetClearedRect(const gfx::Rect& cleared_rect) = 0;
virtual void Update(std::unique_ptr<gfx::GpuFence> in_fence) = 0;
- // Destroys the underlying backing. Must be called before destruction.
- virtual void Destroy() = 0;
-
virtual bool PresentSwapChain();
+ virtual void MarkForDestruction() {}
+
// Allows the backing to attach additional data to the dump or dump
// additional sub paths.
virtual void OnMemoryDump(const std::string& dump_name,
@@ -106,6 +106,16 @@ class GPU_GLES2_EXPORT SharedImageBacking {
// tracking.
virtual size_t EstimatedSizeForMemTracking() const;
+ // Returns the NativePixmap backing the SharedImageBacking. Returns null if
+ // the SharedImage is not backed by a NativePixmap.
+ virtual scoped_refptr<gfx::NativePixmap> GetNativePixmap();
+
+ // Helper to determine if the entire SharedImage is cleared.
+ bool IsCleared() const { return ClearedRect() == gfx::Rect(size()); }
+
+ // Helper function which clears the entire image.
+ void SetCleared() { SetClearedRect(gfx::Rect(size())); }
+
protected:
// Used by SharedImageManager.
friend class SharedImageManager;
@@ -130,15 +140,15 @@ class GPU_GLES2_EXPORT SharedImageBacking {
SharedImageManager* manager,
MemoryTypeTracker* tracker);
- // Used by subclasses in Destroy.
- bool have_context() const;
+ // Used by subclasses during destruction.
+ bool have_context() const EXCLUSIVE_LOCKS_REQUIRED(lock_);
- void AssertLockedIfNecessary() const;
-
- class GPU_GLES2_EXPORT AutoLock {
+ // Helper class used by subclasses to acquire |lock_| if it exists.
+ class SCOPED_LOCKABLE GPU_GLES2_EXPORT AutoLock {
public:
- explicit AutoLock(const SharedImageBacking* shared_image_backing);
- ~AutoLock();
+ explicit AutoLock(const SharedImageBacking* shared_image_backing)
+ EXCLUSIVE_LOCK_FUNCTION(shared_image_backing->lock_);
+ ~AutoLock() UNLOCK_FUNCTION();
AutoLock(const AutoLock&) = delete;
AutoLock& operator=(const AutoLock&) = delete;
@@ -150,6 +160,11 @@ class GPU_GLES2_EXPORT SharedImageBacking {
base::AutoLockMaybe auto_lock_;
};
+ // Protects non-const members here and in derived classes. Protected access
+ // to allow GUARDED_BY macros in derived classes. Should not be used
+ // directly. Use AutoLock instead.
+ mutable base::Optional<base::Lock> lock_;
+
private:
class ScopedWriteUMA {
public:
@@ -174,18 +189,41 @@ class GPU_GLES2_EXPORT SharedImageBacking {
const uint32_t usage_;
const size_t estimated_size_;
- // Protects non-const members here and in derived classes.
- mutable base::Optional<base::Lock> lock_;
-
- bool have_context_ = true;
+ bool have_context_ GUARDED_BY(lock_) = true;
// A scoped object for recording write UMA.
- base::Optional<ScopedWriteUMA> scoped_write_uma_;
+ base::Optional<ScopedWriteUMA> scoped_write_uma_ GUARDED_BY(lock_);
// A vector of SharedImageRepresentations which hold references to this
// backing. The first reference is considered the owner, and the vector is
// ordered by the order in which references were taken.
- std::vector<SharedImageRepresentation*> refs_;
+ std::vector<SharedImageRepresentation*> refs_ GUARDED_BY(lock_);
+};
+
+// Helper implementation of SharedImageBacking which tracks a simple
+// rectangular clear region. Classes which do not need more complex
+// implementations of SetClearedRect and ClearedRect can inherit from this.
+class GPU_GLES2_EXPORT ClearTrackingSharedImageBacking
+ : public SharedImageBacking {
+ public:
+ ClearTrackingSharedImageBacking(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ size_t estimated_size,
+ bool is_thread_safe);
+
+ gfx::Rect ClearedRect() const override;
+ void SetClearedRect(const gfx::Rect& cleared_rect) override;
+
+ protected:
+ gfx::Rect ClearedRectInternal() const EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void SetClearedRectInternal(const gfx::Rect& cleared_rect)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ private:
+ gfx::Rect cleared_rect_ GUARDED_BY(lock_);
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_d3d.cc b/chromium/gpu/command_buffer/service/shared_image_backing_d3d.cc
new file mode 100644
index 00000000000..cf99b4119f5
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_d3d.cc
@@ -0,0 +1,222 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_d3d.h"
+
+#include "base/trace_event/memory_dump_manager.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/service/shared_image_representation_d3d.h"
+#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h"
+#include "ui/gl/trace_util.h"
+
+namespace gpu {
+
+namespace {
+
+class ScopedRestoreTexture2D {
+ public:
+ explicit ScopedRestoreTexture2D(gl::GLApi* api) : api_(api) {
+ GLint binding = 0;
+ api->glGetIntegervFn(GL_TEXTURE_BINDING_2D, &binding);
+ prev_binding_ = binding;
+ }
+
+ ~ScopedRestoreTexture2D() {
+ api_->glBindTextureFn(GL_TEXTURE_2D, prev_binding_);
+ }
+
+ private:
+ gl::GLApi* const api_;
+ GLuint prev_binding_ = 0;
+ DISALLOW_COPY_AND_ASSIGN(ScopedRestoreTexture2D);
+};
+
+} // anonymous namespace
+
+SharedImageBackingD3D::SharedImageBackingD3D(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain,
+ scoped_refptr<gles2::TexturePassthrough> texture,
+ scoped_refptr<gl::GLImageD3D> image,
+ size_t buffer_index,
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture,
+ base::win::ScopedHandle shared_handle,
+ Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dxgi_keyed_mutex)
+ : ClearTrackingSharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ texture->estimated_size(),
+ false /* is_thread_safe */),
+ swap_chain_(std::move(swap_chain)),
+ texture_(std::move(texture)),
+ image_(std::move(image)),
+ buffer_index_(buffer_index),
+ d3d11_texture_(std::move(d3d11_texture)),
+ shared_handle_(std::move(shared_handle)),
+ dxgi_keyed_mutex_(std::move(dxgi_keyed_mutex)) {
+ DCHECK(d3d11_texture_);
+ DCHECK(texture_);
+}
+
+SharedImageBackingD3D::~SharedImageBackingD3D() {
+ if (!have_context())
+ texture_->MarkContextLost();
+ texture_ = nullptr;
+ swap_chain_ = nullptr;
+ d3d11_texture_.Reset();
+ dxgi_keyed_mutex_.Reset();
+ keyed_mutex_acquire_key_ = 0;
+ keyed_mutex_acquired_ = false;
+ shared_handle_.Close();
+}
+
+void SharedImageBackingD3D::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
+ DLOG(ERROR) << "SharedImageBackingD3D::Update : Trying to update "
+ "Shared Images associated with swap chain.";
+}
+
+bool SharedImageBackingD3D::ProduceLegacyMailbox(
+ MailboxManager* mailbox_manager) {
+ mailbox_manager->ProduceTexture(mailbox(), texture_.get());
+ return true;
+}
+
+std::unique_ptr<SharedImageRepresentationDawn>
+SharedImageBackingD3D::ProduceDawn(SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) {
+#if BUILDFLAG(USE_DAWN)
+ return std::make_unique<SharedImageRepresentationDawnD3D>(manager, this,
+ tracker, device);
+#else
+ return nullptr;
+#endif // BUILDFLAG(USE_DAWN)
+}
+
+void SharedImageBackingD3D::OnMemoryDump(
+ const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) {
+ // Add a |service_guid| which expresses shared ownership between the
+ // various GPU dumps.
+ auto client_guid = GetSharedImageGUIDForTracing(mailbox());
+ base::trace_event::MemoryAllocatorDumpGuid service_guid =
+ gl::GetGLTextureServiceGUIDForTracing(texture_->service_id());
+ pmd->CreateSharedGlobalAllocatorDump(service_guid);
+
+ int importance = 2; // This client always owns the ref.
+ pmd->AddOwnershipEdge(client_guid, service_guid, importance);
+
+ // Swap chain textures only have one level backed by an image.
+ image_->OnMemoryDump(pmd, client_tracing_id, dump_name);
+}
+
+bool SharedImageBackingD3D::BeginAccessD3D12(uint64_t* acquire_key) {
+ if (keyed_mutex_acquired_) {
+ DLOG(ERROR) << "Recursive BeginAccess not supported";
+ return false;
+ }
+ *acquire_key = keyed_mutex_acquire_key_;
+ keyed_mutex_acquire_key_++;
+ keyed_mutex_acquired_ = true;
+ return true;
+}
+
+void SharedImageBackingD3D::EndAccessD3D12() {
+ keyed_mutex_acquired_ = false;
+}
+
+bool SharedImageBackingD3D::BeginAccessD3D11() {
+ if (dxgi_keyed_mutex_) {
+ if (keyed_mutex_acquired_) {
+ DLOG(ERROR) << "Recursive BeginAccess not supported";
+ return false;
+ }
+ const HRESULT hr =
+ dxgi_keyed_mutex_->AcquireSync(keyed_mutex_acquire_key_, INFINITE);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Unable to acquire the keyed mutex " << std::hex << hr;
+ return false;
+ }
+ keyed_mutex_acquire_key_++;
+ keyed_mutex_acquired_ = true;
+ }
+ return true;
+}
+void SharedImageBackingD3D::EndAccessD3D11() {
+ if (dxgi_keyed_mutex_) {
+ const HRESULT hr = dxgi_keyed_mutex_->ReleaseSync(keyed_mutex_acquire_key_);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Unable to release the keyed mutex " << std::hex << hr;
+ return;
+ }
+ keyed_mutex_acquired_ = false;
+ }
+}
+
+HANDLE SharedImageBackingD3D::GetSharedHandle() const {
+ return shared_handle_.Get();
+}
+
+bool SharedImageBackingD3D::PresentSwapChain() {
+ TRACE_EVENT0("gpu", "SharedImageBackingD3D::PresentSwapChain");
+ if (buffer_index_ != 0) {
+ DLOG(ERROR) << "Swap chain backing does not correspond to back buffer";
+ return false;
+ }
+
+ DXGI_PRESENT_PARAMETERS params = {};
+ params.DirtyRectsCount = 0;
+ params.pDirtyRects = nullptr;
+
+ UINT flags = DXGI_PRESENT_ALLOW_TEARING;
+
+ HRESULT hr = swap_chain_->Present1(0 /* interval */, flags, &params);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Present1 failed with error " << std::hex << hr;
+ return false;
+ }
+
+ gl::GLApi* const api = gl::g_current_gl_context;
+ ScopedRestoreTexture2D scoped_restore(api);
+
+ api->glBindTextureFn(GL_TEXTURE_2D, texture_->service_id());
+ if (!image_->BindTexImage(GL_TEXTURE_2D)) {
+ DLOG(ERROR) << "GLImageD3D::BindTexImage failed";
+ return false;
+ }
+
+ TRACE_EVENT0("gpu", "SharedImageBackingD3D::PresentSwapChain::Flush");
+ // Flush device context through ANGLE otherwise present could be deferred.
+ api->glFlushFn();
+ return true;
+}
+
+std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+SharedImageBackingD3D::ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ TRACE_EVENT0("gpu", "SharedImageBackingD3D::ProduceGLTexturePassthrough");
+ return std::make_unique<SharedImageRepresentationGLTexturePassthroughD3D>(
+ manager, this, tracker, texture_);
+}
+
+std::unique_ptr<SharedImageRepresentationSkia>
+SharedImageBackingD3D::ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) {
+ return SharedImageRepresentationSkiaGL::Create(
+ ProduceGLTexturePassthrough(manager, tracker), std::move(context_state),
+ manager, this, tracker);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_d3d.h b/chromium/gpu/command_buffer/service/shared_image_backing_d3d.h
new file mode 100644
index 00000000000..ecf44221054
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_d3d.h
@@ -0,0 +1,113 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_D3D_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_D3D_H_
+
+#include <d3d11.h>
+#include <dxgi1_2.h>
+#include <windows.h>
+#include <wrl/client.h>
+
+#include "base/macros.h"
+#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/gl_image_d3d.h"
+
+namespace gfx {
+class Size;
+class ColorSpace;
+} // namespace gfx
+
+namespace gpu {
+class SharedImageBacking;
+struct Mailbox;
+
+// Implementation of SharedImageBacking that holds buffer (front buffer/back
+// buffer of swap chain) texture (as gles2::Texture/gles2::TexturePassthrough)
+// and a reference to created swap chain.
+class SharedImageBackingD3D : public ClearTrackingSharedImageBacking {
+ public:
+ SharedImageBackingD3D(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain,
+ scoped_refptr<gles2::TexturePassthrough> texture,
+ scoped_refptr<gl::GLImageD3D> image,
+ size_t buffer_index,
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture,
+ base::win::ScopedHandle shared_handle,
+ Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dxgi_keyed_mutex);
+
+ ~SharedImageBackingD3D() override;
+
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
+
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
+
+ std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) override;
+
+ void OnMemoryDump(const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) override;
+
+ bool BeginAccessD3D12(uint64_t* acquire_key);
+ void EndAccessD3D12();
+
+ bool BeginAccessD3D11();
+ void EndAccessD3D11();
+
+ HANDLE GetSharedHandle() const;
+
+ bool PresentSwapChain() override;
+
+ protected:
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) override;
+
+ private:
+ Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain_;
+ scoped_refptr<gles2::TexturePassthrough> texture_;
+ scoped_refptr<gl::GLImageD3D> image_;
+ const size_t buffer_index_;
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture_;
+
+ // If d3d11_texture_ has a keyed mutex, it will be stored in
+ // dxgi_keyed_mutex. The keyed mutex is used to synchronize
+ // D3D11 and D3D12 Chromium components.
+ // dxgi_keyed_mutex_ is the D3D11 side of the keyed mutex.
+ // To create the corresponding D3D12 interface, pass the handle
+ // stored in shared_handle_ to ID3D12Device::OpenSharedHandle.
+ // Only one component is allowed to read/write to the texture
+ // at a time. keyed_mutex_acquire_key_ is incremented on every
+ // Acquire/Release usage.
+ base::win::ScopedHandle shared_handle_;
+ Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dxgi_keyed_mutex_;
+ uint64_t keyed_mutex_acquire_key_ = 0;
+ bool keyed_mutex_acquired_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageBackingD3D);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_D3D_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
new file mode 100644
index 00000000000..3117ba53f49
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
@@ -0,0 +1,331 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_egl_image.h"
+
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_batch_access_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h"
+#include "gpu/command_buffer/service/texture_definition.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/gl_fence_egl.h"
+#include "ui/gl/gl_utils.h"
+#include "ui/gl/scoped_binders.h"
+#include "ui/gl/shared_gl_fence_egl.h"
+
+namespace gpu {
+
+// Implementation of SharedImageRepresentationGLTexture which uses GL texture
+// which is an EGLImage sibling.
+class SharedImageRepresentationEglImageGLTexture
+ : public SharedImageRepresentationGLTexture {
+ public:
+ SharedImageRepresentationEglImageGLTexture(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture)
+ : SharedImageRepresentationGLTexture(manager, backing, tracker),
+ texture_(texture) {}
+
+ ~SharedImageRepresentationEglImageGLTexture() override {
+ EndAccess();
+
+ if (texture_)
+ texture_->RemoveLightweightRef(has_context());
+ }
+
+ bool BeginAccess(GLenum mode) override {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ if (!egl_backing()->BeginRead(this))
+ return false;
+ mode_ = RepresentationAccessMode::kRead;
+ } else if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) {
+ if (!egl_backing()->BeginWrite())
+ return false;
+ mode_ = RepresentationAccessMode::kWrite;
+ } else {
+ NOTREACHED();
+ }
+ return true;
+ }
+
+ void EndAccess() override {
+ if (mode_ == RepresentationAccessMode::kNone)
+ return;
+
+ // Pass this fence to its backing.
+ if (mode_ == RepresentationAccessMode::kRead) {
+ egl_backing()->EndRead(this);
+ } else if (mode_ == RepresentationAccessMode::kWrite) {
+ egl_backing()->EndWrite();
+ } else {
+ NOTREACHED();
+ }
+ mode_ = RepresentationAccessMode::kNone;
+ }
+
+ gles2::Texture* GetTexture() override { return texture_; }
+
+ bool SupportsMultipleConcurrentReadAccess() override { return true; }
+
+ private:
+ SharedImageBackingEglImage* egl_backing() {
+ return static_cast<SharedImageBackingEglImage*>(backing());
+ }
+
+ gles2::Texture* texture_;
+ RepresentationAccessMode mode_ = RepresentationAccessMode::kNone;
+ DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationEglImageGLTexture);
+};
+
+SharedImageBackingEglImage::SharedImageBackingEglImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ size_t estimated_size,
+ GLuint gl_format,
+ GLuint gl_type,
+ SharedImageBatchAccessManager* batch_access_manager,
+ const GpuDriverBugWorkarounds& workarounds)
+ : ClearTrackingSharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size,
+ true /*is_thread_safe*/),
+ gl_format_(gl_format),
+ gl_type_(gl_type),
+ batch_access_manager_(batch_access_manager) {
+ DCHECK(batch_access_manager_);
+#if DCHECK_IS_ON()
+ created_on_context_ = gl::g_current_gl_context;
+#endif
+ // On some GPUs (NVidia) keeping reference to egl image itself is not enough,
+ // we must keep reference to at least one sibling.
+ if (workarounds.dont_delete_source_texture_for_egl_image) {
+ source_texture_ = GenEGLImageSibling();
+ }
+}
+
+SharedImageBackingEglImage::~SharedImageBackingEglImage() {
+ // Un-Register this backing from the |batch_access_manager_|.
+ batch_access_manager_->UnregisterEglBacking(this);
+ DCHECK(!source_texture_);
+}
+
+void SharedImageBackingEglImage::Update(
+ std::unique_ptr<gfx::GpuFence> in_fence) {
+ NOTREACHED();
+}
+
+bool SharedImageBackingEglImage::ProduceLegacyMailbox(
+ MailboxManager* mailbox_manager) {
+ // This backing doe not support legacy mailbox system.
+ return false;
+}
+
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageBackingEglImage::ProduceGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ auto* texture = GenEGLImageSibling();
+ if (!texture)
+ return nullptr;
+ return std::make_unique<SharedImageRepresentationEglImageGLTexture>(
+ manager, this, tracker, texture);
+}
+
+std::unique_ptr<SharedImageRepresentationSkia>
+SharedImageBackingEglImage::ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) {
+ auto* texture = GenEGLImageSibling();
+ if (!texture)
+ return nullptr;
+
+ auto gl_representation =
+ std::make_unique<SharedImageRepresentationEglImageGLTexture>(
+ manager, this, tracker, std::move(texture));
+ return SharedImageRepresentationSkiaGL::Create(std::move(gl_representation),
+ std::move(context_state),
+ manager, this, tracker);
+}
+
+bool SharedImageBackingEglImage::BeginWrite() {
+ AutoLock auto_lock(this);
+
+ if (is_writing_ || !active_readers_.empty()) {
+ DLOG(ERROR) << "BeginWrite should only be called when there are no other "
+ "readers or writers";
+ return false;
+ }
+ is_writing_ = true;
+
+ // When multiple threads wants to write to the same backing, writer needs to
+ // wait on previous reads and writes to be finished.
+ if (!read_fences_.empty()) {
+ for (const auto& read_fence : read_fences_) {
+ read_fence.second->ServerWait();
+ }
+ // Once all the read fences have been waited upon, its safe to clear all of
+ // them. Note that when there is an active writer, no one can read and hence
+ // can not update |read_fences_|.
+ read_fences_.clear();
+ }
+
+ if (write_fence_)
+ write_fence_->ServerWait();
+
+ return true;
+}
+
+void SharedImageBackingEglImage::EndWrite() {
+ AutoLock auto_lock(this);
+
+ if (!is_writing_) {
+ DLOG(ERROR) << "Attempt to end write to a SharedImageBacking without a "
+ "successful begin write";
+ return;
+ }
+
+ is_writing_ = false;
+ write_fence_ = gl::GLFenceEGL::Create();
+}
+
+bool SharedImageBackingEglImage::BeginRead(
+ const SharedImageRepresentation* reader) {
+ AutoLock auto_lock(this);
+
+ if (is_writing_) {
+ DLOG(ERROR) << "BeginRead should only be called when there are no writers";
+ return false;
+ }
+
+ if (active_readers_.contains(reader)) {
+ LOG(ERROR) << "BeginRead was called twice on the same representation";
+ return false;
+ }
+ active_readers_.insert(reader);
+ if (write_fence_)
+ write_fence_->ServerWait();
+
+ return true;
+}
+
+void SharedImageBackingEglImage::EndRead(
+ const SharedImageRepresentation* reader) {
+ {
+ AutoLock auto_lock(this);
+
+ if (!active_readers_.contains(reader)) {
+ DLOG(ERROR) << "Attempt to end read to a SharedImageBacking without a "
+ "successful begin read";
+ return;
+ }
+ active_readers_.erase(reader);
+ }
+
+ // For batch reads, we only need to create 1 fence after the last
+ // EndRead() for the whole batch of reads. Hence we just register this backing
+ // here with the |batch_access_manager_| so that it can set an end read fence
+ // on this backing later after the last read of the batch. This improves
+ // performance because creating and inserting gl fences are costly. For non
+ // batch reads/regular reads, we create 1 fence per EndRead().
+ if (batch_access_manager_->IsDoingBatchReads()) {
+ batch_access_manager_->RegisterEglBackingForEndReadFence(this);
+ return;
+ }
+ AutoLock auto_lock(this);
+ read_fences_[gl::g_current_gl_context] =
+ base::MakeRefCounted<gl::SharedGLFenceEGL>();
+}
+
+gles2::Texture* SharedImageBackingEglImage::GenEGLImageSibling() {
+ // Create a gles2::texture.
+ GLenum target = GL_TEXTURE_2D;
+ gl::GLApi* api = gl::g_current_gl_context;
+ GLuint service_id = 0;
+ api->glGenTexturesFn(1, &service_id);
+
+ gl::ScopedTextureBinder texture_binder(target, service_id);
+
+ api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ auto* texture = new gles2::Texture(service_id);
+ texture->SetLightweightRef();
+ texture->SetTarget(target, 1 /*max_levels*/);
+ texture->sampler_state_.min_filter = GL_LINEAR;
+ texture->sampler_state_.mag_filter = GL_LINEAR;
+ texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+
+ // If the backing is already cleared, no need to clear it again.
+ gfx::Rect cleared_rect;
+ if (IsCleared())
+ cleared_rect = gfx::Rect(size());
+
+ // Set the level info.
+ texture->SetLevelInfo(target, 0, gl_format_, size().width(), size().height(),
+ 1, 0, gl_format_, gl_type_, cleared_rect);
+
+ // Note that we needed to use |bind_egl_image| flag and add some additional
+ // logic to handle it in order to make the locks
+ // more granular since BindToTexture() do not need to be behind the lock.
+ // We don't need to bind the |egl_image_buffer_| first time when it's created.
+ bool bind_egl_image = true;
+ scoped_refptr<gles2::NativeImageBuffer> buffer;
+ {
+ AutoLock auto_lock(this);
+ if (!egl_image_buffer_) {
+ // Allocate memory for texture object if this is the first EGLImage
+ // target/sibling. Memory for EGLImage will not be created if we don't
+ // allocate memory for the texture object.
+ api->glTexImage2DFn(target, 0, gl_format_, size().width(),
+ size().height(), 0, gl_format_, gl_type_, nullptr);
+
+ // Use service id of the texture as a source to create the native buffer.
+ egl_image_buffer_ = gles2::NativeImageBuffer::Create(service_id);
+ if (!egl_image_buffer_) {
+ texture->RemoveLightweightRef(have_context());
+ return nullptr;
+ }
+ bind_egl_image = false;
+ }
+ buffer = egl_image_buffer_;
+ }
+ if (bind_egl_image) {
+ // If we already have the |egl_image_buffer_|, just bind it to the new
+ // texture to make it an EGLImage sibling.
+ buffer->BindToTexture(target);
+ }
+
+ texture->SetImmutable(true /*immutable*/, false /*immutable_storage*/);
+ return texture;
+}
+
+void SharedImageBackingEglImage::SetEndReadFence(
+ scoped_refptr<gl::SharedGLFenceEGL> shared_egl_fence) {
+ AutoLock auto_lock(this);
+ read_fences_[gl::g_current_gl_context] = std::move(shared_egl_fence);
+}
+
+void SharedImageBackingEglImage::MarkForDestruction() {
+ AutoLock auto_lock(this);
+#if DCHECK_IS_ON()
+ DCHECK(!have_context() || created_on_context_ == gl::g_current_gl_context);
+#endif
+ if (source_texture_) {
+ source_texture_->RemoveLightweightRef(have_context());
+ source_texture_ = nullptr;
+ }
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.h b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.h
new file mode 100644
index 00000000000..29f992265c1
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.h
@@ -0,0 +1,113 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_EGL_IMAGE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_EGL_IMAGE_H_
+
+#include "base/memory/scoped_refptr.h"
+#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "ui/gfx/buffer_types.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gl {
+class GLFenceEGL;
+class SharedGLFenceEGL;
+} // namespace gl
+
+namespace gpu {
+class GpuDriverBugWorkarounds;
+class SharedImageRepresentationGLTexture;
+class SharedImageRepresentationSkia;
+class SharedImageBatchAccessManager;
+struct Mailbox;
+
+namespace gles2 {
+class NativeImageBuffer;
+class Texture;
+} // namespace gles2
+
+// Implementation of SharedImageBacking that is used to create EGLImage targets
+// from the same EGLImage object. Hence all the representations created from
+// this backing uses EGL Image siblings. This backing is thread safe across
+// different threads running different GL contexts not part of same shared
+// group. This is achieved by using locks and fences for proper synchronization.
+class SharedImageBackingEglImage : public ClearTrackingSharedImageBacking {
+ public:
+ SharedImageBackingEglImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ size_t estimated_size,
+ GLuint gl_format,
+ GLuint gl_type,
+ SharedImageBatchAccessManager* batch_access_manager,
+ const GpuDriverBugWorkarounds& workarounds);
+
+ ~SharedImageBackingEglImage() override;
+
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
+ void MarkForDestruction() override;
+
+ bool BeginWrite();
+ void EndWrite();
+ bool BeginRead(const SharedImageRepresentation* reader);
+ void EndRead(const SharedImageRepresentation* reader);
+
+ protected:
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) override;
+
+ private:
+ friend class SharedImageBatchAccessManager;
+ friend class SharedImageRepresentationEglImageGLTexture;
+
+ // Use to create EGLImage texture target from the same EGLImage object.
+ gles2::Texture* GenEGLImageSibling();
+
+ void SetEndReadFence(scoped_refptr<gl::SharedGLFenceEGL> shared_egl_fence);
+
+ const GLuint gl_format_;
+ const GLuint gl_type_;
+ gles2::Texture* source_texture_ = nullptr;
+
+#if DCHECK_IS_ON()
+ gl::GLApi* created_on_context_;
+#endif
+
+ // This class encapsulates the EGLImage object for android.
+ scoped_refptr<gles2::NativeImageBuffer> egl_image_buffer_ GUARDED_BY(lock_);
+
+ // All reads and writes must wait for exiting writes to complete.
+ // TODO(vikassoni): Use SharedGLFenceEGL here instead of GLFenceEGL here in
+ // future for |write_fence_| once the SharedGLFenceEGL has the capability to
+ // support multiple GLContexts.
+ std::unique_ptr<gl::GLFenceEGL> write_fence_ GUARDED_BY(lock_);
+ bool is_writing_ GUARDED_BY(lock_) = false;
+
+ // All writes must wait for existing reads to complete. For a given GL
+ // context, we only need to keep the most recent fence. Waiting on the most
+ // recent read fence is enough to make sure all past read fences have been
+ // signalled.
+ base::flat_map<gl::GLApi*, scoped_refptr<gl::SharedGLFenceEGL>> read_fences_
+ GUARDED_BY(lock_);
+ base::flat_set<const SharedImageRepresentation*> active_readers_
+ GUARDED_BY(lock_);
+ SharedImageBatchAccessManager* batch_access_manager_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageBackingEglImage);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_EGL_IMAGE_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
index 406b13c85a3..575c80666f1 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
@@ -27,6 +27,7 @@ class SharedImageBackingFactory {
virtual std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
index b3aa8b6ae72..c31a2d6d2e1 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
@@ -13,6 +13,7 @@
#include <vector>
#include "base/android/android_hardware_buffer_compat.h"
+#include "base/android/scoped_hardware_buffer_fence_sync.h"
#include "base/android/scoped_hardware_buffer_handle.h"
#include "base/containers/flat_set.h"
#include "base/logging.h"
@@ -37,6 +38,7 @@
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_fence_helper.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_util.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
@@ -55,72 +57,72 @@
namespace gpu {
namespace {
-sk_sp<SkPromiseImageTexture> CreatePromiseTextureAHB(
- viz::VulkanContextProvider* context_provider,
- base::android::ScopedHardwareBufferHandle ahb_handle,
- gfx::Size size,
- viz::ResourceFormat format) {
- VulkanImplementation* vk_implementation =
- context_provider->GetVulkanImplementation();
- VkDevice vk_device = context_provider->GetDeviceQueue()->GetVulkanDevice();
- VkPhysicalDevice vk_physical_device =
- context_provider->GetDeviceQueue()->GetVulkanPhysicalDevice();
-
- // Create a VkImage and import AHB.
- VkImage vk_image;
- VkImageCreateInfo vk_image_info;
- VkDeviceMemory vk_device_memory;
- VkDeviceSize mem_allocation_size;
- if (!vk_implementation->CreateVkImageAndImportAHB(
- vk_device, vk_physical_device, size, std::move(ahb_handle), &vk_image,
- &vk_image_info, &vk_device_memory, &mem_allocation_size)) {
- return nullptr;
+class OverlayImage final : public gl::GLImage {
+ public:
+ explicit OverlayImage(AHardwareBuffer* buffer)
+ : handle_(base::android::ScopedHardwareBufferHandle::Create(buffer)) {}
+
+ void SetBeginFence(base::ScopedFD fence_fd) {
+ DCHECK(!end_read_fence_.is_valid());
+ DCHECK(!begin_read_fence_.is_valid());
+ begin_read_fence_ = std::move(fence_fd);
}
- // Create backend texture from the VkImage.
- GrVkAlloc alloc = {vk_device_memory, 0, mem_allocation_size, 0};
- GrVkImageInfo vk_info = {vk_image,
- alloc,
- vk_image_info.tiling,
- vk_image_info.initialLayout,
- vk_image_info.format,
- vk_image_info.mipLevels,
- VK_QUEUE_FAMILY_EXTERNAL};
- // TODO(bsalomon): Determine whether it makes sense to attempt to reuse this
- // if the vk_info stays the same on subsequent calls.
- auto promise_texture = SkPromiseImageTexture::Make(
- GrBackendTexture(size.width(), size.height(), vk_info));
- if (!promise_texture) {
- vkDestroyImage(vk_device, vk_image, nullptr);
- vkFreeMemory(vk_device, vk_device_memory, nullptr);
- return nullptr;
+ base::ScopedFD TakeEndFence() {
+ DCHECK(!begin_read_fence_.is_valid());
+ return std::move(end_read_fence_);
}
- return promise_texture;
-}
+ // gl::GLImage:
+ std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
+ GetAHardwareBuffer() override {
+ return std::make_unique<ScopedHardwareBufferFenceSyncImpl>(
+ this, base::android::ScopedHardwareBufferHandle::Create(handle_.get()),
+ std::move(begin_read_fence_));
+ }
-void DestroyVkPromiseTextureAHB(viz::VulkanContextProvider* context_provider,
- sk_sp<SkPromiseImageTexture> promise_texture) {
- DCHECK(promise_texture);
- DCHECK(promise_texture->unique());
+ protected:
+ ~OverlayImage() override = default;
- GrVkImageInfo vk_image_info;
- bool result =
- promise_texture->backendTexture().getVkImageInfo(&vk_image_info);
- DCHECK(result);
+ private:
+ class ScopedHardwareBufferFenceSyncImpl
+ : public base::android::ScopedHardwareBufferFenceSync {
+ public:
+ ScopedHardwareBufferFenceSyncImpl(
+ scoped_refptr<OverlayImage> image,
+ base::android::ScopedHardwareBufferHandle handle,
+ base::ScopedFD fence_fd)
+ : ScopedHardwareBufferFenceSync(std::move(handle), std::move(fence_fd)),
+ image_(std::move(image)) {}
+ ~ScopedHardwareBufferFenceSyncImpl() override = default;
+
+ void SetReadFence(base::ScopedFD fence_fd, bool has_context) override {
+ DCHECK(!image_->begin_read_fence_.is_valid());
+ DCHECK(!image_->end_read_fence_.is_valid());
+ image_->end_read_fence_ = std::move(fence_fd);
+ }
- VulkanFenceHelper* fence_helper =
- context_provider->GetDeviceQueue()->GetFenceHelper();
- fence_helper->EnqueueImageCleanupForSubmittedWork(
- vk_image_info.fImage, vk_image_info.fAlloc.fMemory);
-}
+ private:
+ scoped_refptr<OverlayImage> image_;
+ };
+
+ base::android::ScopedHardwareBufferHandle handle_;
+
+ // The fence for overlay controller to wait on before scanning out.
+ base::ScopedFD begin_read_fence_;
+
+ // The fence for overlay controller to set to indicate scanning out
+ // completion. The image content should not be modified before passing this
+ // fence.
+ base::ScopedFD end_read_fence_;
+};
} // namespace
// Implementation of SharedImageBacking that holds an AHardwareBuffer. This
// can be used to create a GL texture or a VK Image from the AHardwareBuffer
// backing.
-class SharedImageBackingAHB : public SharedImageBacking {
+class SharedImageBackingAHB : public ClearTrackingSharedImageBacking {
public:
SharedImageBackingAHB(const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -134,11 +136,14 @@ class SharedImageBackingAHB : public SharedImageBacking {
~SharedImageBackingAHB() override;
- bool IsCleared() const override;
- void SetCleared() override;
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
- bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
- void Destroy() override;
+ // We never generate LegacyMailboxes in threadsafe mode, so exclude this
+ // function from thread safety analysis.
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager)
+ NO_THREAD_SAFETY_ANALYSIS override;
+ gfx::Rect ClearedRect() const override;
+ void SetClearedRect(const gfx::Rect& cleared_rect) override;
+
base::android::ScopedHardwareBufferHandle GetAhbHandle() const;
bool BeginWrite(base::ScopedFD* fd_to_wait_on);
@@ -147,6 +152,8 @@ class SharedImageBackingAHB : public SharedImageBacking {
base::ScopedFD* fd_to_wait_on);
void EndRead(const SharedImageRepresentation* reader,
base::ScopedFD end_read_fd);
+ gl::GLImage* BeginOverlayAccess();
+ void EndOverlayAccess();
protected:
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
@@ -158,21 +165,29 @@ class SharedImageBackingAHB : public SharedImageBacking {
MemoryTypeTracker* tracker,
scoped_refptr<SharedContextState> context_state) override;
+ std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+
private:
gles2::Texture* GenGLTexture();
- base::android::ScopedHardwareBufferHandle hardware_buffer_handle_;
+ const base::android::ScopedHardwareBufferHandle hardware_buffer_handle_;
+ // Not guarded by |lock_| as we do not use legacy_texture_ in threadsafe
+ // mode.
gles2::Texture* legacy_texture_ = nullptr;
- bool is_cleared_ = false;
-
// All reads and writes must wait for exiting writes to complete.
- base::ScopedFD write_sync_fd_;
- bool is_writing_ = false;
+ base::ScopedFD write_sync_fd_ GUARDED_BY(lock_);
+ bool is_writing_ GUARDED_BY(lock_) = false;
// All writes must wait for existing reads to complete.
- base::ScopedFD read_sync_fd_;
- base::flat_set<const SharedImageRepresentation*> active_readers_;
+ base::ScopedFD read_sync_fd_ GUARDED_BY(lock_);
+ base::flat_set<const SharedImageRepresentation*> active_readers_
+ GUARDED_BY(lock_);
+
+ scoped_refptr<OverlayImage> overlay_image_ GUARDED_BY(lock_);
+ bool is_overlay_accessing_ GUARDED_BY(lock_) = false;
DISALLOW_COPY_AND_ASSIGN(SharedImageBackingAHB);
};
@@ -231,11 +246,6 @@ class SharedImageRepresentationGLTextureAHB
ahb_backing()->EndRead(this, std::move(sync_fd));
} else if (mode_ == RepresentationAccessMode::kWrite) {
ahb_backing()->EndWrite(std::move(sync_fd));
-
- if (texture_) {
- if (texture_->IsLevelCleared(texture_->target(), 0))
- backing()->SetCleared();
- }
}
mode_ = RepresentationAccessMode::kNone;
@@ -259,21 +269,31 @@ class SharedImageRepresentationSkiaVkAHB
SharedImageManager* manager,
SharedImageBacking* backing,
scoped_refptr<SharedContextState> context_state,
- sk_sp<SkPromiseImageTexture> promise_texture,
+ std::unique_ptr<VulkanImage> vulkan_image,
MemoryTypeTracker* tracker)
: SharedImageRepresentationSkia(manager, backing, tracker),
- promise_texture_(std::move(promise_texture)),
+ vulkan_image_(std::move(vulkan_image)),
context_state_(std::move(context_state)) {
- DCHECK(promise_texture_);
+ DCHECK(vulkan_image_);
DCHECK(context_state_);
DCHECK(context_state_->vk_context_provider());
+ // TODO(bsalomon): Determine whether it makes sense to attempt to reuse this
+ // if the vk_info stays the same on subsequent calls.
+ promise_texture_ = SkPromiseImageTexture::Make(
+ GrBackendTexture(size().width(), size().height(),
+ CreateGrVkImageInfo(vulkan_image_.get())));
+ DCHECK(promise_texture_);
}
~SharedImageRepresentationSkiaVkAHB() override {
- DestroyVkPromiseTextureAHB(context_state_->vk_context_provider(),
- std::move(promise_texture_));
DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
- DCHECK(!surface_);
+ surface_.reset();
+ DCHECK(vulkan_image_);
+ VulkanFenceHelper* fence_helper = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper();
+ fence_helper->EnqueueVulkanObjectCleanupForSubmittedWork(
+ std::move(vulkan_image_));
}
sk_sp<SkSurface> BeginWriteAccess(
@@ -282,29 +302,31 @@ class SharedImageRepresentationSkiaVkAHB
std::vector<GrBackendSemaphore>* begin_semaphores,
std::vector<GrBackendSemaphore>* end_semaphores) override {
DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
- DCHECK(!surface_);
if (!BeginAccess(false /* readonly */, begin_semaphores, end_semaphores))
return nullptr;
- SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
- /*gpu_compositing=*/true, format());
- auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
- context_state_->gr_context(), promise_texture_->backendTexture(),
- kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
- color_space().ToSkColorSpace(), &surface_props);
- DCHECK(surface);
- surface_ = surface.get();
- return surface;
+ if (!surface_) {
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format());
+ surface_ = SkSurface::MakeFromBackendTextureAsRenderTarget(
+ context_state_->gr_context(), promise_texture_->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
+ color_space().ToSkColorSpace(), &surface_props);
+ }
+
+ DCHECK(surface_);
+ return surface_;
}
void EndWriteAccess(sk_sp<SkSurface> surface) override {
DCHECK_EQ(mode_, RepresentationAccessMode::kWrite);
- DCHECK_EQ(surface.get(), surface_);
- DCHECK(surface->unique());
+ DCHECK_EQ(surface.get(), surface_.get());
+
+ surface.reset();
+ DCHECK(surface_->unique());
EndAccess(false /* readonly */);
- surface_ = nullptr;
}
sk_sp<SkPromiseImageTexture> BeginReadAccess(
@@ -426,13 +448,51 @@ class SharedImageRepresentationSkiaVkAHB
mode_ = RepresentationAccessMode::kNone;
}
+ std::unique_ptr<VulkanImage> vulkan_image_;
sk_sp<SkPromiseImageTexture> promise_texture_;
RepresentationAccessMode mode_ = RepresentationAccessMode::kNone;
- SkSurface* surface_ = nullptr;
+ sk_sp<SkSurface> surface_;
scoped_refptr<SharedContextState> context_state_;
VkSemaphore end_access_semaphore_ = VK_NULL_HANDLE;
};
+class SharedImageRepresentationOverlayAHB
+ : public SharedImageRepresentationOverlay {
+ public:
+ SharedImageRepresentationOverlayAHB(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker)
+ : SharedImageRepresentationOverlay(manager, backing, tracker) {}
+
+ ~SharedImageRepresentationOverlayAHB() override { EndReadAccess(); }
+
+ private:
+ SharedImageBackingAHB* ahb_backing() {
+ return static_cast<SharedImageBackingAHB*>(backing());
+ }
+
+ void NotifyOverlayPromotion(bool promotion,
+ const gfx::Rect& bounds) override {
+ NOTREACHED();
+ }
+
+ bool BeginReadAccess() override {
+ gl_image_ = ahb_backing()->BeginOverlayAccess();
+ return !!gl_image_;
+ }
+
+ void EndReadAccess() override {
+ if (gl_image_) {
+ ahb_backing()->EndOverlayAccess();
+ gl_image_ = nullptr;
+ }
+ }
+
+ gl::GLImage* GetGLImage() override { return gl_image_; }
+
+ gl::GLImage* gl_image_ = nullptr;
+};
+
SharedImageBackingAHB::SharedImageBackingAHB(
const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -443,38 +503,52 @@ SharedImageBackingAHB::SharedImageBackingAHB(
size_t estimated_size,
bool is_thread_safe,
base::ScopedFD initial_upload_fd)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- estimated_size,
- is_thread_safe),
+ : ClearTrackingSharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size,
+ is_thread_safe),
hardware_buffer_handle_(std::move(handle)),
write_sync_fd_(std::move(initial_upload_fd)) {
DCHECK(hardware_buffer_handle_.is_valid());
}
SharedImageBackingAHB::~SharedImageBackingAHB() {
- // Check to make sure buffer is explicitly destroyed using Destroy() api
- // before this destructor is called.
- DCHECK(!hardware_buffer_handle_.is_valid());
+ // Locking here in destructor since we are accessing member variable
+ // |have_context_| via have_context().
+ AutoLock auto_lock(this);
+ DCHECK(hardware_buffer_handle_.is_valid());
+ if (legacy_texture_) {
+ legacy_texture_->RemoveLightweightRef(have_context());
+ legacy_texture_ = nullptr;
+ }
}
-bool SharedImageBackingAHB::IsCleared() const {
+gfx::Rect SharedImageBackingAHB::ClearedRect() const {
AutoLock auto_lock(this);
-
- return is_cleared_;
+ // If a |legacy_texture_| exists, defer to that. Once created,
+ // |legacy_texture_| is never destroyed, so no need to synchronize with
+ // ClearedRectInternal.
+ if (legacy_texture_) {
+ return legacy_texture_->GetLevelClearedRect(legacy_texture_->target(), 0);
+ } else {
+ return ClearedRectInternal();
+ }
}
-void SharedImageBackingAHB::SetCleared() {
- // TODO(cblume): We could avoid this lock if we instead pass a flag to clear
- // into EndWrite() or BeginRead()
+void SharedImageBackingAHB::SetClearedRect(const gfx::Rect& cleared_rect) {
AutoLock auto_lock(this);
-
- if (legacy_texture_)
- legacy_texture_->SetLevelCleared(legacy_texture_->target(), 0, true);
- is_cleared_ = true;
+ // If a |legacy_texture_| exists, defer to that. Once created,
+ // |legacy_texture_| is never destroyed, so no need to synchronize with
+ // SetClearedRectInternal.
+ if (legacy_texture_) {
+ legacy_texture_->SetLevelClearedRect(legacy_texture_->target(), 0,
+ cleared_rect);
+ } else {
+ SetClearedRectInternal(cleared_rect);
+ }
}
void SharedImageBackingAHB::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
@@ -483,6 +557,10 @@ void SharedImageBackingAHB::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
bool SharedImageBackingAHB::ProduceLegacyMailbox(
MailboxManager* mailbox_manager) {
+ // Legacy mailboxes cannot be used safely in threadsafe mode.
+ if (is_thread_safe())
+ return false;
+
// This doesn't need to take a lock because it is only called at creation
// time.
DCHECK(!is_writing_);
@@ -491,23 +569,15 @@ bool SharedImageBackingAHB::ProduceLegacyMailbox(
legacy_texture_ = GenGLTexture();
if (!legacy_texture_)
return false;
+ // Make sure our |legacy_texture_| has the right initial cleared rect.
+ legacy_texture_->SetLevelClearedRect(legacy_texture_->target(), 0,
+ ClearedRectInternal());
mailbox_manager->ProduceTexture(mailbox(), legacy_texture_);
return true;
}
-void SharedImageBackingAHB::Destroy() {
- DCHECK(hardware_buffer_handle_.is_valid());
- if (legacy_texture_) {
- legacy_texture_->RemoveLightweightRef(have_context());
- legacy_texture_ = nullptr;
- }
- hardware_buffer_handle_.reset();
-}
-
base::android::ScopedHardwareBufferHandle SharedImageBackingAHB::GetAhbHandle()
const {
- AutoLock auto_lock(this);
-
return hardware_buffer_handle_.Clone();
}
@@ -534,12 +604,16 @@ SharedImageBackingAHB::ProduceSkia(
// Check whether we are in Vulkan mode OR GL mode and accordingly create
// Skia representation.
if (context_state->GrContextIsVulkan()) {
- sk_sp<SkPromiseImageTexture> promise_texture = CreatePromiseTextureAHB(
- context_state->vk_context_provider(), GetAhbHandle(), size(), format());
- if (!promise_texture)
+ auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
+ gfx::GpuMemoryBufferHandle gmb_handle(GetAhbHandle());
+ auto vulkan_image = VulkanImage::CreateFromGpuMemoryBufferHandle(
+ device_queue, std::move(gmb_handle), size(), ToVkFormat(format()),
+ 0 /* usage */);
+ if (!vulkan_image)
return nullptr;
+
return std::make_unique<SharedImageRepresentationSkiaVkAHB>(
- manager, this, std::move(context_state), std::move(promise_texture),
+ manager, this, std::move(context_state), std::move(vulkan_image),
tracker);
}
DCHECK(context_state->GrContextIsGL());
@@ -555,10 +629,17 @@ SharedImageBackingAHB::ProduceSkia(
manager, this, tracker);
}
+std::unique_ptr<SharedImageRepresentationOverlay>
+SharedImageBackingAHB::ProduceOverlay(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ return std::make_unique<SharedImageRepresentationOverlayAHB>(manager, this,
+ tracker);
+}
+
bool SharedImageBackingAHB::BeginWrite(base::ScopedFD* fd_to_wait_on) {
AutoLock auto_lock(this);
- if (is_writing_ || !active_readers_.empty()) {
+ if (is_writing_ || !active_readers_.empty() || is_overlay_accessing_) {
LOG(ERROR) << "BeginWrite should only be called when there are no other "
"readers or writers";
return false;
@@ -626,6 +707,42 @@ void SharedImageBackingAHB::EndRead(const SharedImageRepresentation* reader,
gl::MergeFDs(std::move(read_sync_fd_), std::move(end_read_fd));
}
+gl::GLImage* SharedImageBackingAHB::BeginOverlayAccess() {
+ AutoLock auto_lock(this);
+
+ DCHECK(!is_overlay_accessing_);
+
+ if (is_writing_) {
+ LOG(ERROR)
+ << "BeginOverlayAccess should only be called when there are no writers";
+ return nullptr;
+ }
+
+ if (!overlay_image_) {
+ overlay_image_ =
+ base::MakeRefCounted<OverlayImage>(hardware_buffer_handle_.get());
+ overlay_image_->SetColorSpace(color_space());
+ }
+
+ if (write_sync_fd_.is_valid()) {
+ base::ScopedFD fence_fd(HANDLE_EINTR(dup(write_sync_fd_.get())));
+ overlay_image_->SetBeginFence(std::move(fence_fd));
+ }
+
+ is_overlay_accessing_ = true;
+ return overlay_image_.get();
+}
+
+void SharedImageBackingAHB::EndOverlayAccess() {
+ AutoLock auto_lock(this);
+
+ DCHECK(is_overlay_accessing_);
+ is_overlay_accessing_ = false;
+
+ auto fence_fd = overlay_image_->TakeEndFence();
+ read_sync_fd_ = gl::MergeFDs(std::move(read_sync_fd_), std::move(fence_fd));
+}
+
gles2::Texture* SharedImageBackingAHB::GenGLTexture() {
DCHECK(hardware_buffer_handle_.is_valid());
@@ -663,6 +780,7 @@ gles2::Texture* SharedImageBackingAHB::GenGLTexture() {
api->glDeleteTexturesFn(1, &service_id);
return nullptr;
}
+ egl_image->SetColorSpace(color_space());
// Create a gles2 Texture.
auto* texture = new gles2::Texture(service_id);
@@ -673,19 +791,10 @@ gles2::Texture* SharedImageBackingAHB::GenGLTexture() {
texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- // If the backing is already cleared, no need to clear it again.
- gfx::Rect cleared_rect;
- {
- AutoLock auto_lock(this);
-
- if (is_cleared_)
- cleared_rect = gfx::Rect(size());
- }
-
texture->SetLevelInfo(target, 0, egl_image->GetInternalFormat(),
size().width(), size().height(), 1, 0,
egl_image->GetDataFormat(), egl_image->GetDataType(),
- cleared_rect);
+ ClearedRect());
texture->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
texture->SetImmutable(true, false);
api->glBindTextureFn(target, old_texture_binding);
@@ -883,7 +992,6 @@ std::unique_ptr<SharedImageBacking> SharedImageBackingFactoryAHB::MakeBacking(
AHardwareBuffer_Desc hwb_info;
base::AndroidHardwareBufferCompat::GetInstance().Describe(buffer,
&hwb_info);
-
void* address = nullptr;
if (int error = base::AndroidHardwareBufferCompat::GetInstance().Lock(
buffer, AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY, -1, 0, &address)) {
@@ -912,6 +1020,11 @@ std::unique_ptr<SharedImageBacking> SharedImageBackingFactoryAHB::MakeBacking(
auto backing = std::make_unique<SharedImageBackingAHB>(
mailbox, format, size, color_space, usage, std::move(handle),
estimated_size, is_thread_safe, std::move(initial_upload_fd));
+
+ // If we uploaded initial data, set the backing as cleared.
+ if (!pixel_data.empty())
+ backing->SetCleared();
+
return backing;
}
@@ -919,6 +1032,7 @@ std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryAHB::CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
@@ -947,6 +1061,14 @@ bool SharedImageBackingFactoryAHB::CanImportGpuMemoryBuffer(
return memory_buffer_type == gfx::ANDROID_HARDWARE_BUFFER;
}
+bool SharedImageBackingFactoryAHB::IsFormatSupported(
+ viz::ResourceFormat format) {
+ DCHECK_GE(format, 0);
+ DCHECK_LE(format, viz::RESOURCE_FORMAT_MAX);
+
+ return format_info_[format].ahb_supported;
+}
+
SharedImageBackingFactoryAHB::FormatInfo::FormatInfo() = default;
SharedImageBackingFactoryAHB::FormatInfo::~FormatInfo() = default;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
index dea781e0371..294e8ddd80b 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
@@ -35,6 +35,7 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryAHB
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
@@ -57,6 +58,7 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryAHB
uint32_t usage) override;
bool CanImportGpuMemoryBuffer(
gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool IsFormatSupported(viz::ResourceFormat format);
private:
bool ValidateUsage(uint32_t usage,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
index 6647d31d95d..ca4028c89c9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
@@ -5,6 +5,7 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h"
#include "base/android/android_hardware_buffer_compat.h"
+#include "base/android/scoped_hardware_buffer_fence_sync.h"
#include "base/bind_helpers.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
@@ -14,6 +15,7 @@
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/shared_image_test_utils.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_feature_info.h"
@@ -57,7 +59,7 @@ class SharedImageBackingFactoryAHBTest : public testing::Test {
context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), surface_, context_,
false /* use_virtualized_gl_contexts */, base::DoNothing());
- context_state_->InitializeGrContext(workarounds, nullptr);
+ context_state_->InitializeGrContext(GpuPreferences(), workarounds, nullptr);
auto feature_info =
base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
context_state_->InitializeGL(GpuPreferences(), std::move(feature_info));
@@ -73,47 +75,6 @@ class SharedImageBackingFactoryAHBTest : public testing::Test {
GrContext* gr_context() { return context_state_->gr_context(); }
- std::vector<uint8_t> ReadPixels(Mailbox mailbox, gfx::Size size) {
- auto skia_representation =
- shared_image_representation_factory_->ProduceSkia(mailbox,
- context_state_.get());
- EXPECT_TRUE(skia_representation);
- std::vector<GrBackendSemaphore> begin_semaphores;
- std::vector<GrBackendSemaphore> end_semaphores;
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
- scoped_read_access;
- scoped_read_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
- auto* promise_texture = scoped_read_access->promise_image_texture();
- EXPECT_EQ(0u, begin_semaphores.size());
- EXPECT_EQ(0u, end_semaphores.size());
- EXPECT_TRUE(promise_texture);
- GrBackendTexture backend_texture = promise_texture->backendTexture();
- EXPECT_TRUE(backend_texture.isValid());
- EXPECT_EQ(size.width(), backend_texture.width());
- EXPECT_EQ(size.height(), backend_texture.height());
-
- // Create an Sk Image from GrBackendTexture.
- auto sk_image = SkImage::MakeFromTexture(
- gr_context(), promise_texture->backendTexture(),
- kTopLeft_GrSurfaceOrigin, kRGBA_8888_SkColorType, kOpaque_SkAlphaType,
- nullptr);
-
- SkImageInfo dst_info =
- SkImageInfo::Make(size.width(), size.height(), kRGBA_8888_SkColorType,
- kOpaque_SkAlphaType, nullptr);
-
- const int num_pixels = size.width() * size.height();
- std::vector<uint8_t> dst_pixels(num_pixels * 4);
-
- // Read back pixels from Sk Image.
- EXPECT_TRUE(sk_image->readPixels(dst_info, dst_pixels.data(),
- dst_info.minRowBytes(), 0, 0));
- scoped_read_access.reset();
-
- return dst_pixels;
- }
-
protected:
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<gl::GLContext> context_;
@@ -164,11 +125,12 @@ TEST_F(SharedImageBackingFactoryAHBTest, Basic) {
EXPECT_TRUE(skia_representation);
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
- base::Optional<SharedImageRepresentationSkia::ScopedWriteAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access;
- scoped_write_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
- EXPECT_TRUE(scoped_write_access->success());
+ scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(scoped_write_access);
auto* surface = scoped_write_access->surface();
EXPECT_TRUE(surface);
EXPECT_EQ(gl_legacy_shared_image.size().width(), surface->width());
@@ -177,10 +139,11 @@ TEST_F(SharedImageBackingFactoryAHBTest, Basic) {
EXPECT_EQ(0u, end_semaphores.size());
scoped_write_access.reset();
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
scoped_read_access;
- scoped_read_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
+ scoped_read_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+ EXPECT_TRUE(scoped_read_access);
auto* promise_texture = scoped_read_access->promise_image_texture();
EXPECT_EQ(0u, begin_semaphores.size());
EXPECT_EQ(0u, end_semaphores.size());
@@ -206,8 +169,10 @@ TEST_F(SharedImageBackingFactoryAHBTest, GLSkiaGL) {
gfx::Size size(1, 1);
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_TRUE(backing);
GLenum expected_target = GL_TEXTURE_2D;
@@ -236,9 +201,13 @@ TEST_F(SharedImageBackingFactoryAHBTest, GLSkiaGL) {
// Set the clear color to green.
api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
api->glClearFn(GL_COLOR_BUFFER_BIT);
+
+ // Mark the representation as cleared.
+ gl_representation->SetCleared();
gl_representation.reset();
- auto dst_pixels = ReadPixels(mailbox, size);
+ auto dst_pixels = ReadPixels(mailbox, size, context_state_.get(),
+ shared_image_representation_factory_.get());
// Compare the pixel values.
EXPECT_EQ(dst_pixels[0], 0);
@@ -274,7 +243,8 @@ TEST_F(SharedImageBackingFactoryAHBTest, InitialData) {
shared_image_manager_.Register(std::move(backing),
memory_type_tracker_.get());
- auto dst_pixels = ReadPixels(mailbox, size);
+ auto dst_pixels = ReadPixels(mailbox, size, context_state_.get(),
+ shared_image_representation_factory_.get());
// Compare the pixel values.
DCHECK(dst_pixels.size() == initial_data.size());
@@ -293,9 +263,11 @@ TEST_F(SharedImageBackingFactoryAHBTest, InvalidFormat) {
auto format = viz::ResourceFormat::YUV_420_BIPLANAR;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_FALSE(backing);
}
@@ -308,14 +280,17 @@ TEST_F(SharedImageBackingFactoryAHBTest, InvalidSize) {
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(0, 0);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_FALSE(backing);
size = gfx::Size(INT_MAX, INT_MAX);
- backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ backing = backing_factory_->CreateSharedImage(mailbox, format, surface_handle,
+ size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_FALSE(backing);
}
@@ -327,9 +302,11 @@ TEST_F(SharedImageBackingFactoryAHBTest, EstimatedSize) {
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_TRUE(backing);
size_t backing_estimated_size = backing->estimated_size();
@@ -359,11 +336,12 @@ TEST_F(SharedImageBackingFactoryAHBTest, DISABLED_OnlyOneWriter) {
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
- base::Optional<SharedImageRepresentationSkia::ScopedWriteAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access;
- scoped_write_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
- EXPECT_TRUE(scoped_write_access->success());
+ scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(scoped_write_access);
EXPECT_EQ(0u, begin_semaphores.size());
EXPECT_EQ(0u, end_semaphores.size());
@@ -371,11 +349,12 @@ TEST_F(SharedImageBackingFactoryAHBTest, DISABLED_OnlyOneWriter) {
gl_legacy_shared_image.mailbox(), context_state_.get());
std::vector<GrBackendSemaphore> begin_semaphores2;
std::vector<GrBackendSemaphore> end_semaphores2;
- base::Optional<SharedImageRepresentationSkia::ScopedWriteAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access2;
- scoped_write_access2.emplace(skia_representation2.get(), &begin_semaphores2,
- &end_semaphores2);
- EXPECT_FALSE(scoped_write_access->success());
+ scoped_write_access2 = skia_representation2->BeginScopedWriteAccess(
+ &begin_semaphores2, &end_semaphores2,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_FALSE(scoped_write_access);
EXPECT_EQ(0u, begin_semaphores2.size());
EXPECT_EQ(0u, end_semaphores2.size());
skia_representation2.reset();
@@ -401,19 +380,19 @@ TEST_F(SharedImageBackingFactoryAHBTest, CanHaveMultipleReaders) {
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
scoped_read_access;
- scoped_read_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
- EXPECT_TRUE(scoped_read_access->success());
+ scoped_read_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+ EXPECT_TRUE(scoped_read_access);
EXPECT_EQ(0u, begin_semaphores.size());
EXPECT_EQ(0u, end_semaphores.size());
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
scoped_read_access2;
- scoped_read_access2.emplace(skia_representation2.get(), &begin_semaphores,
- &end_semaphores);
- EXPECT_TRUE(scoped_read_access2->success());
+ scoped_read_access2 = skia_representation2->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+ EXPECT_TRUE(scoped_read_access2);
EXPECT_EQ(0u, begin_semaphores.size());
EXPECT_EQ(0u, end_semaphores.size());
@@ -439,11 +418,11 @@ TEST_F(SharedImageBackingFactoryAHBTest, CannotWriteWhileReading) {
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
scoped_read_access;
- scoped_read_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
- EXPECT_TRUE(scoped_read_access->success());
+ scoped_read_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+ EXPECT_TRUE(scoped_read_access);
EXPECT_EQ(0u, begin_semaphores.size());
EXPECT_EQ(0u, end_semaphores.size());
@@ -453,11 +432,12 @@ TEST_F(SharedImageBackingFactoryAHBTest, CannotWriteWhileReading) {
std::vector<GrBackendSemaphore> begin_semaphores2;
std::vector<GrBackendSemaphore> end_semaphores2;
- base::Optional<SharedImageRepresentationSkia::ScopedWriteAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access;
- scoped_write_access.emplace(skia_representation2.get(), &begin_semaphores2,
- &end_semaphores2);
- EXPECT_FALSE(scoped_write_access->success());
+ scoped_write_access = skia_representation2->BeginScopedWriteAccess(
+ &begin_semaphores2, &end_semaphores2,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_FALSE(scoped_write_access);
EXPECT_EQ(0u, begin_semaphores2.size());
EXPECT_EQ(0u, end_semaphores2.size());
skia_representation2.reset();
@@ -480,11 +460,12 @@ TEST_F(SharedImageBackingFactoryAHBTest, CannotReadWhileWriting) {
gl_legacy_shared_image.mailbox(), context_state_.get());
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
- base::Optional<SharedImageRepresentationSkia::ScopedWriteAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access;
- scoped_write_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
- EXPECT_TRUE(scoped_write_access->success());
+ scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(scoped_write_access);
EXPECT_EQ(0u, begin_semaphores.size());
EXPECT_EQ(0u, end_semaphores.size());
@@ -492,11 +473,11 @@ TEST_F(SharedImageBackingFactoryAHBTest, CannotReadWhileWriting) {
gl_legacy_shared_image.mailbox(), context_state_.get());
std::vector<GrBackendSemaphore> begin_semaphores2;
std::vector<GrBackendSemaphore> end_semaphores2;
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
scoped_read_access;
- scoped_read_access.emplace(skia_representation2.get(), &begin_semaphores2,
- &end_semaphores2);
- EXPECT_FALSE(scoped_read_access->success());
+ scoped_read_access = skia_representation2->BeginScopedReadAccess(
+ &begin_semaphores2, &end_semaphores2);
+ EXPECT_FALSE(scoped_read_access);
EXPECT_EQ(0u, begin_semaphores2.size());
EXPECT_EQ(0u, end_semaphores2.size());
skia_representation2.reset();
@@ -505,6 +486,63 @@ TEST_F(SharedImageBackingFactoryAHBTest, CannotReadWhileWriting) {
skia_representation.reset();
}
+// Test to check that setting/unsetting legacy shared image mailboxes works as
+// expected.
+TEST_F(SharedImageBackingFactoryAHBTest, LegacyClearing) {
+ if (!base::AndroidHardwareBufferCompat::IsSupportAvailable())
+ return;
+
+ GlLegacySharedImage gl_legacy_shared_image{
+ backing_factory_.get(), false /* is_thread_safe */,
+ &mailbox_manager_, &shared_image_manager_,
+ memory_type_tracker_.get(), shared_image_representation_factory_.get()};
+
+ TextureBase* texture_base =
+ mailbox_manager_.ConsumeTexture(gl_legacy_shared_image.mailbox());
+ auto* texture = gles2::Texture::CheckedCast(texture_base);
+ EXPECT_TRUE(texture);
+ GLenum target = texture->target();
+
+ auto skia_representation = shared_image_representation_factory_->ProduceSkia(
+ gl_legacy_shared_image.mailbox(), context_state_.get());
+ EXPECT_TRUE(skia_representation);
+
+ // Check initial state.
+ EXPECT_TRUE(texture->IsLevelCleared(target, 0));
+ EXPECT_TRUE(skia_representation->IsCleared());
+
+ // Un-clear the representation.
+ skia_representation->SetClearedRect(gfx::Rect());
+ EXPECT_FALSE(texture->IsLevelCleared(target, 0));
+ EXPECT_FALSE(skia_representation->IsCleared());
+
+ // Partially clear the representation.
+ gfx::Rect partial_clear_rect(0, 0, 128, 128);
+ skia_representation->SetClearedRect(partial_clear_rect);
+ EXPECT_EQ(partial_clear_rect, texture->GetLevelClearedRect(target, 0));
+ EXPECT_EQ(partial_clear_rect, skia_representation->ClearedRect());
+
+ // Fully clear the representation.
+ skia_representation->SetCleared();
+ EXPECT_TRUE(texture->IsLevelCleared(target, 0));
+ EXPECT_TRUE(skia_representation->IsCleared());
+
+ // Un-clear the texture.
+ texture->SetLevelClearedRect(target, 0, gfx::Rect());
+ EXPECT_FALSE(texture->IsLevelCleared(target, 0));
+ EXPECT_FALSE(skia_representation->IsCleared());
+
+ // Partially clear the texture.
+ texture->SetLevelClearedRect(target, 0, partial_clear_rect);
+ EXPECT_EQ(partial_clear_rect, texture->GetLevelClearedRect(target, 0));
+ EXPECT_EQ(partial_clear_rect, skia_representation->ClearedRect());
+
+ // Fully clear the representation.
+ texture->SetLevelCleared(target, 0, true);
+ EXPECT_TRUE(texture->IsLevelCleared(target, 0));
+ EXPECT_TRUE(skia_representation->IsCleared());
+}
+
GlLegacySharedImage::GlLegacySharedImage(
SharedImageBackingFactoryAHB* backing_factory,
bool is_thread_safe,
@@ -516,6 +554,7 @@ GlLegacySharedImage::GlLegacySharedImage(
mailbox_ = Mailbox::GenerateForSharedImage();
auto format = viz::ResourceFormat::RGBA_8888;
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
// SHARED_IMAGE_USAGE_DISPLAY for skia read and SHARED_IMAGE_USAGE_RASTER for
// skia write.
@@ -523,7 +562,8 @@ GlLegacySharedImage::GlLegacySharedImage(
if (!is_thread_safe)
usage |= SHARED_IMAGE_USAGE_DISPLAY;
backing_ = backing_factory->CreateSharedImage(
- mailbox_, format, size_, color_space, usage, is_thread_safe);
+ mailbox_, format, surface_handle, size_, color_space, usage,
+ is_thread_safe);
EXPECT_TRUE(backing_);
// Check clearing.
@@ -532,24 +572,31 @@ GlLegacySharedImage::GlLegacySharedImage(
EXPECT_TRUE(backing_->IsCleared());
}
- // First, validate via a legacy mailbox.
GLenum expected_target = GL_TEXTURE_2D;
- EXPECT_TRUE(backing_->ProduceLegacyMailbox(mailbox_manager_));
-
- TextureBase* texture_base = mailbox_manager_->ConsumeTexture(mailbox_);
- // Currently there is no support for passthrough texture on android and hence
- // in AHB backing. So the TextureBase* should be pointing to a Texture object.
- auto* texture = gles2::Texture::CheckedCast(texture_base);
- EXPECT_TRUE(texture);
- EXPECT_EQ(texture->target(), expected_target);
- EXPECT_TRUE(texture->IsImmutable());
- int width, height, depth;
- bool has_level =
- texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height, &depth);
- EXPECT_TRUE(has_level);
- EXPECT_EQ(width, size_.width());
- EXPECT_EQ(height, size_.height());
+ // First, validate via a legacy mailbox (only available when not
+ // |is_thread_safe|).
+ if (!is_thread_safe) {
+ EXPECT_TRUE(backing_->ProduceLegacyMailbox(mailbox_manager_));
+
+ TextureBase* texture_base = mailbox_manager_->ConsumeTexture(mailbox_);
+
+ // Currently there is no support for passthrough texture on android and
+ // hence in AHB backing. So the TextureBase* should be pointing to a Texture
+ // object.
+ auto* texture = gles2::Texture::CheckedCast(texture_base);
+ EXPECT_TRUE(texture);
+ EXPECT_EQ(texture->target(), expected_target);
+ EXPECT_TRUE(texture->IsImmutable());
+ int width, height, depth;
+ bool has_level =
+ texture->GetLevelSize(GL_TEXTURE_2D, 0, &width, &height, &depth);
+ EXPECT_TRUE(has_level);
+ EXPECT_EQ(width, size_.width());
+ EXPECT_EQ(height, size_.height());
+ } else {
+ EXPECT_FALSE(backing_->ProduceLegacyMailbox(mailbox_manager_));
+ }
shared_image_ =
shared_image_manager->Register(std::move(backing_), memory_type_tracker);
@@ -572,5 +619,42 @@ GlLegacySharedImage::~GlLegacySharedImage() {
EXPECT_FALSE(mailbox_manager_->ConsumeTexture(mailbox_));
}
+TEST_F(SharedImageBackingFactoryAHBTest, Overlay) {
+ if (!base::AndroidHardwareBufferCompat::IsSupportAvailable())
+ return;
+
+ GlLegacySharedImage gl_legacy_shared_image{
+ backing_factory_.get(), false /* is_thread_safe */,
+ &mailbox_manager_, &shared_image_manager_,
+ memory_type_tracker_.get(), shared_image_representation_factory_.get()};
+
+ auto skia_representation = shared_image_representation_factory_->ProduceSkia(
+ gl_legacy_shared_image.mailbox(), context_state_.get());
+
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ auto scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(scoped_write_access);
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
+ scoped_write_access.reset();
+
+ auto overlay_representation =
+ shared_image_representation_factory_->ProduceOverlay(
+ gl_legacy_shared_image.mailbox());
+ EXPECT_TRUE(overlay_representation);
+
+ auto scoped_read_access =
+ overlay_representation->BeginScopedReadAccess(true /* needs_gl_image */);
+ EXPECT_TRUE(scoped_read_access);
+ EXPECT_TRUE(scoped_read_access->gl_image());
+ auto buffer = scoped_read_access->gl_image()->GetAHardwareBuffer();
+ DCHECK(buffer);
+ scoped_read_access.reset();
+ skia_representation.reset();
+}
+
} // anonymous namespace
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc
index 2888f5fc5d6..6529f07f8da 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc
@@ -4,29 +4,12 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_d3d.h"
-#include "base/trace_event/memory_dump_manager.h"
#include "components/viz/common/resources/resource_format_utils.h"
-#include "gpu/command_buffer/common/shared_image_trace_utils.h"
-#include "gpu/command_buffer/service/mailbox_manager.h"
-#include "gpu/command_buffer/service/shared_context_state.h"
-#include "gpu/command_buffer/service/shared_image_backing.h"
-#include "gpu/command_buffer/service/shared_image_manager.h"
-#include "gpu/command_buffer/service/shared_image_representation.h"
-#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h"
-#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/service/shared_image_backing_d3d.h"
#include "ui/gfx/buffer_format_util.h"
-#include "ui/gl/buildflags.h"
#include "ui/gl/direct_composition_surface_win.h"
#include "ui/gl/gl_angle_util_win.h"
#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_image_d3d.h"
-#include "ui/gl/trace_util.h"
-
-// Usage of BUILDFLAG(USE_DAWN) needs to be after the include for
-// ui/gl/buildflags.h
-#if BUILDFLAG(USE_DAWN)
-#include <dawn_native/D3D12Backend.h>
-#endif // BUILDFLAG(USE_DAWN)
namespace gpu {
@@ -34,7 +17,7 @@ namespace {
class ScopedRestoreTexture2D {
public:
- ScopedRestoreTexture2D(gl::GLApi* api) : api_(api) {
+ explicit ScopedRestoreTexture2D(gl::GLApi* api) : api_(api) {
GLint binding = 0;
api->glGetIntegervFn(GL_TEXTURE_BINDING_2D, &binding);
prev_binding_ = binding;
@@ -94,443 +77,10 @@ base::Optional<DXGI_FORMAT> VizFormatToDXGIFormat(
}
}
-#if BUILDFLAG(USE_DAWN)
-base::Optional<WGPUTextureFormat> VizResourceFormatToWGPUTextureFormat(
- viz::ResourceFormat viz_resource_format) {
- switch (viz_resource_format) {
- case viz::RGBA_F16:
- return WGPUTextureFormat_RGBA16Float;
- case viz::BGRA_8888:
- return WGPUTextureFormat_BGRA8Unorm;
- case viz::RGBA_8888:
- return WGPUTextureFormat_RGBA8Unorm;
- default:
- NOTREACHED();
- return {};
- }
-}
-#endif // BUILDFLAG(USE_DAWN)
-
} // anonymous namespace
-// Representation of a SharedImageBackingD3D as a GL Texture.
-class SharedImageRepresentationGLTextureD3D
- : public SharedImageRepresentationGLTexture {
- public:
- SharedImageRepresentationGLTextureD3D(SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- gles2::Texture* texture)
- : SharedImageRepresentationGLTexture(manager, backing, tracker),
- texture_(texture) {}
-
- gles2::Texture* GetTexture() override { return texture_; }
-
- private:
- gles2::Texture* const texture_;
-};
-
-// Representation of a SharedImageBackingD3D as a GL
-// TexturePassthrough.
-class SharedImageRepresentationGLTexturePassthroughD3D
- : public SharedImageRepresentationGLTexturePassthrough {
- public:
- SharedImageRepresentationGLTexturePassthroughD3D(
- SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough)
- : SharedImageRepresentationGLTexturePassthrough(manager,
- backing,
- tracker),
- texture_passthrough_(std::move(texture_passthrough)) {}
-
- const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
- override {
- return texture_passthrough_;
- }
-
- private:
- bool BeginAccess(GLenum mode) override;
- void EndAccess() override;
-
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
-};
-
-// Representation of a SharedImageBackingD3D as a Dawn Texture
-#if BUILDFLAG(USE_DAWN)
-class SharedImageRepresentationDawnD3D : public SharedImageRepresentationDawn {
- public:
- SharedImageRepresentationDawnD3D(SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- WGPUDevice device)
- : SharedImageRepresentationDawn(manager, backing, tracker),
- device_(device),
- dawn_procs_(dawn_native::GetProcs()) {
- DCHECK(device_);
-
- // Keep a reference to the device so that it stays valid (it might become
- // lost in which case operations will be noops).
- dawn_procs_.deviceReference(device_);
- }
-
- ~SharedImageRepresentationDawnD3D() override {
- EndAccess();
- dawn_procs_.deviceRelease(device_);
- }
-
- WGPUTexture BeginAccess(WGPUTextureUsage usage) override;
- void EndAccess() override;
-
- private:
- WGPUDevice device_;
- WGPUTexture texture_ = nullptr;
-
- // TODO(cwallez@chromium.org): Load procs only once when the factory is
- // created and pass a pointer to them around?
- DawnProcTable dawn_procs_;
-};
-#endif // BUILDFLAG(USE_DAWN)
-
-// Implementation of SharedImageBacking that holds buffer (front buffer/back
-// buffer of swap chain) texture (as gles2::Texture/gles2::TexturePassthrough)
-// and a reference to created swap chain.
-class SharedImageBackingD3D : public SharedImageBacking {
- public:
- SharedImageBackingD3D(
- const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain,
- gles2::Texture* texture,
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough,
- scoped_refptr<gl::GLImageD3D> image,
- size_t buffer_index,
- Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture,
- base::win::ScopedHandle shared_handle,
- Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dxgi_keyed_mutex)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- texture ? texture->estimated_size()
- : texture_passthrough->estimated_size(),
- false /* is_thread_safe */),
- swap_chain_(std::move(swap_chain)),
- texture_(texture),
- texture_passthrough_(std::move(texture_passthrough)),
- image_(std::move(image)),
- buffer_index_(buffer_index),
- d3d11_texture_(std::move(d3d11_texture)),
- shared_handle_(std::move(shared_handle)),
- dxgi_keyed_mutex_(std::move(dxgi_keyed_mutex)) {
- DCHECK(d3d11_texture_);
- DCHECK((texture_ && !texture_passthrough_) ||
- (!texture_ && texture_passthrough_));
- }
-
- ~SharedImageBackingD3D() override {
- // Destroy() is safe to call even if it's already been called.
- Destroy();
- }
-
- // Texture is cleared on initialization.
- bool IsCleared() const override { return true; }
-
- void SetCleared() override {}
-
- void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
- DLOG(ERROR) << "SharedImageBackingD3D::Update : Trying to update "
- "Shared Images associated with swap chain.";
- }
-
- bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
- if (texture_) {
- mailbox_manager->ProduceTexture(mailbox(), texture_);
- } else {
- mailbox_manager->ProduceTexture(mailbox(), texture_passthrough_.get());
- }
- return true;
- }
-
- std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- WGPUDevice device) override {
-#if BUILDFLAG(USE_DAWN)
- return std::make_unique<SharedImageRepresentationDawnD3D>(manager, this,
- tracker, device);
-#else
- return nullptr;
-#endif // BUILDFLAG(USE_DAWN)
- }
-
- void Destroy() override {
- if (texture_) {
- texture_->RemoveLightweightRef(have_context());
- texture_ = nullptr;
- } else if (texture_passthrough_) {
- if (!have_context())
- texture_passthrough_->MarkContextLost();
- texture_passthrough_ = nullptr;
- }
- swap_chain_ = nullptr;
- d3d11_texture_.Reset();
- dxgi_keyed_mutex_.Reset();
- keyed_mutex_acquire_key_ = 0;
- keyed_mutex_acquired_ = false;
- shared_handle_.Close();
- }
-
- void OnMemoryDump(const std::string& dump_name,
- base::trace_event::MemoryAllocatorDump* dump,
- base::trace_event::ProcessMemoryDump* pmd,
- uint64_t client_tracing_id) override {
- // Add a |service_guid| which expresses shared ownership between the
- // various GPU dumps.
- auto client_guid = GetSharedImageGUIDForTracing(mailbox());
- GLuint service_id =
- texture_ ? texture_->service_id() : texture_passthrough_->service_id();
- base::trace_event::MemoryAllocatorDumpGuid service_guid =
- gl::GetGLTextureServiceGUIDForTracing(service_id);
- pmd->CreateSharedGlobalAllocatorDump(service_guid);
-
- int importance = 2; // This client always owns the ref.
- pmd->AddOwnershipEdge(client_guid, service_guid, importance);
-
- // Swap chain textures only have one level backed by an image.
- image_->OnMemoryDump(pmd, client_tracing_id, dump_name);
- }
-
- bool BeginAccessD3D12(uint64_t* acquire_key) {
- if (keyed_mutex_acquired_) {
- DLOG(ERROR) << "Recursive BeginAccess not supported";
- return false;
- }
- *acquire_key = keyed_mutex_acquire_key_;
- keyed_mutex_acquire_key_++;
- keyed_mutex_acquired_ = true;
- return true;
- }
-
- void EndAccessD3D12() { keyed_mutex_acquired_ = false; }
-
- bool BeginAccessD3D11() {
- if (dxgi_keyed_mutex_) {
- if (keyed_mutex_acquired_) {
- DLOG(ERROR) << "Recursive BeginAccess not supported";
- return false;
- }
- const HRESULT hr =
- dxgi_keyed_mutex_->AcquireSync(keyed_mutex_acquire_key_, INFINITE);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Unable to acquire the keyed mutex " << std::hex << hr;
- return false;
- }
- keyed_mutex_acquire_key_++;
- keyed_mutex_acquired_ = true;
- }
- return true;
- }
- void EndAccessD3D11() {
- if (dxgi_keyed_mutex_) {
- const HRESULT hr =
- dxgi_keyed_mutex_->ReleaseSync(keyed_mutex_acquire_key_);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Unable to release the keyed mutex " << std::hex << hr;
- return;
- }
- keyed_mutex_acquired_ = false;
- }
- }
-
- HANDLE GetSharedHandle() const { return shared_handle_.Get(); }
-
- bool PresentSwapChain() override {
- TRACE_EVENT0("gpu", "SharedImageBackingD3D::PresentSwapChain");
- if (buffer_index_ != 0) {
- DLOG(ERROR) << "Swap chain backing does not correspond to back buffer";
- return false;
- }
-
- DXGI_PRESENT_PARAMETERS params = {};
- params.DirtyRectsCount = 0;
- params.pDirtyRects = nullptr;
-
- UINT flags = DXGI_PRESENT_ALLOW_TEARING;
-
- HRESULT hr = swap_chain_->Present1(0 /* interval */, flags, &params);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Present1 failed with error " << std::hex << hr;
- return false;
- }
-
- gl::GLApi* const api = gl::g_current_gl_context;
- ScopedRestoreTexture2D scoped_restore(api);
-
- const GLenum target = GL_TEXTURE_2D;
- const GLuint service_id =
- texture_ ? texture_->service_id() : texture_passthrough_->service_id();
- api->glBindTextureFn(target, service_id);
-
- if (!image_->BindTexImage(target)) {
- DLOG(ERROR) << "GLImageD3D::BindTexImage failed";
- return false;
- }
-
- TRACE_EVENT0("gpu", "SharedImageBackingD3D::PresentSwapChain::Flush");
- // Flush device context through ANGLE otherwise present could be deferred.
- api->glFlushFn();
- return true;
- }
-
- protected:
- std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- DCHECK(texture_);
- TRACE_EVENT0("gpu", "SharedImageBackingD3D::ProduceGLTexture");
- return std::make_unique<SharedImageRepresentationGLTextureD3D>(
- manager, this, tracker, texture_);
- }
-
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- ProduceGLTexturePassthrough(SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- DCHECK(texture_passthrough_);
- TRACE_EVENT0("gpu", "SharedImageBackingD3D::ProduceGLTexturePassthrough");
- return std::make_unique<SharedImageRepresentationGLTexturePassthroughD3D>(
- manager, this, tracker, texture_passthrough_);
- }
-
- std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- scoped_refptr<SharedContextState> context_state) override {
- return SharedImageRepresentationSkiaGL::CreateForPassthrough(
- ProduceGLTexturePassthrough(manager, tracker), std::move(context_state),
- manager, this, tracker);
- }
-
- private:
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain_;
- gles2::Texture* texture_ = nullptr;
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
- scoped_refptr<gl::GLImageD3D> image_;
- const size_t buffer_index_;
- Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture_;
-
- // If d3d11_texture_ has a keyed mutex, it will be stored in
- // dxgi_keyed_mutex. The keyed mutex is used to synchronize
- // D3D11 and D3D12 Chromium components.
- // dxgi_keyed_mutex_ is the D3D11 side of the keyed mutex.
- // To create the corresponding D3D12 interface, pass the handle
- // stored in shared_handle_ to ID3D12Device::OpenSharedHandle.
- // Only one component is allowed to read/write to the texture
- // at a time. keyed_mutex_acquire_key_ is incremented on every
- // Acquire/Release usage.
- base::win::ScopedHandle shared_handle_;
- Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dxgi_keyed_mutex_;
- uint64_t keyed_mutex_acquire_key_ = 0;
- bool keyed_mutex_acquired_ = false;
-
- DISALLOW_COPY_AND_ASSIGN(SharedImageBackingD3D);
-};
-
-#if BUILDFLAG(USE_DAWN)
-WGPUTexture SharedImageRepresentationDawnD3D::BeginAccess(
- WGPUTextureUsage usage) {
- SharedImageBackingD3D* d3d_image_backing =
- static_cast<SharedImageBackingD3D*>(backing());
-
- const HANDLE shared_handle = d3d_image_backing->GetSharedHandle();
- const viz::ResourceFormat viz_resource_format = d3d_image_backing->format();
- const base::Optional<WGPUTextureFormat> wgpu_texture_format =
- VizResourceFormatToWGPUTextureFormat(viz_resource_format);
- if (!wgpu_texture_format.has_value()) {
- DLOG(ERROR) << "Unsupported viz format found: " << viz_resource_format;
- return nullptr;
- }
-
- uint64_t shared_mutex_acquire_key;
- if (!d3d_image_backing->BeginAccessD3D12(&shared_mutex_acquire_key)) {
- return nullptr;
- }
-
- WGPUTextureDescriptor desc;
- desc.nextInChain = nullptr;
- desc.format = wgpu_texture_format.value();
- desc.usage = usage;
- desc.dimension = WGPUTextureDimension_2D;
- desc.size = {size().width(), size().height(), 1};
- desc.arrayLayerCount = 1;
- desc.mipLevelCount = 1;
- desc.sampleCount = 1;
-
- texture_ = dawn_native::d3d12::WrapSharedHandle(device_, &desc, shared_handle,
- shared_mutex_acquire_key);
- if (texture_) {
- // Keep a reference to the texture so that it stays valid (its content
- // might be destroyed).
- dawn_procs_.textureReference(texture_);
-
- // Assume that the user of this representation will write to the texture
- // so set the cleared flag so that other representations don't overwrite
- // the result.
- // TODO(cwallez@chromium.org): This is incorrect and allows reading
- // uninitialized data. When !IsCleared we should tell dawn_native to
- // consider the texture lazy-cleared.
- SetCleared();
- } else {
- d3d_image_backing->EndAccessD3D12();
- }
-
- return texture_;
-}
-
-void SharedImageRepresentationDawnD3D::EndAccess() {
- if (!texture_) {
- return;
- }
-
- SharedImageBackingD3D* d3d_image_backing =
- static_cast<SharedImageBackingD3D*>(backing());
-
- // TODO(cwallez@chromium.org): query dawn_native to know if the texture was
- // cleared and set IsCleared appropriately.
-
- // All further operations on the textures are errors (they would be racy
- // with other backings).
- dawn_procs_.textureDestroy(texture_);
-
- dawn_procs_.textureRelease(texture_);
- texture_ = nullptr;
-
- d3d_image_backing->EndAccessD3D12();
-}
-#endif // BUILDFLAG(USE_DAWN)
-
-bool SharedImageRepresentationGLTexturePassthroughD3D::BeginAccess(
- GLenum mode) {
- SharedImageBackingD3D* d3d_image_backing =
- static_cast<SharedImageBackingD3D*>(backing());
- return d3d_image_backing->BeginAccessD3D11();
-}
-
-void SharedImageRepresentationGLTexturePassthroughD3D::EndAccess() {
- SharedImageBackingD3D* d3d_image_backing =
- static_cast<SharedImageBackingD3D*>(backing());
- d3d_image_backing->EndAccessD3D11();
-}
-
-SharedImageBackingFactoryD3D::SharedImageBackingFactoryD3D(bool use_passthrough)
- : use_passthrough_(use_passthrough),
- d3d11_device_(gl::QueryD3D11DeviceObjectFromANGLE()) {
-}
+SharedImageBackingFactoryD3D::SharedImageBackingFactoryD3D()
+ : d3d11_device_(gl::QueryD3D11DeviceObjectFromANGLE()) {}
SharedImageBackingFactoryD3D::~SharedImageBackingFactoryD3D() = default;
@@ -616,36 +166,17 @@ std::unique_ptr<SharedImageBacking> SharedImageBackingFactoryD3D::MakeBacking(
return nullptr;
}
- gles2::Texture* texture = nullptr;
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough;
-
- if (use_passthrough_) {
- texture_passthrough =
- base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
- texture_passthrough->SetLevelImage(target, 0, image.get());
- GLint texture_memory_size = 0;
- api->glGetTexParameterivFn(target, GL_MEMORY_SIZE_ANGLE,
- &texture_memory_size);
- texture_passthrough->SetEstimatedSize(texture_memory_size);
- } else {
- texture = new gles2::Texture(service_id);
- texture->SetLightweightRef();
- texture->SetTarget(target, 1);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->SetLevelInfo(target, 0 /* level */, internal_format, size.width(),
- size.height(), 1 /* depth */, 0 /* border */,
- data_format, data_type, gfx::Rect(size));
- texture->SetLevelImage(target, 0 /* level */, image.get(),
- gles2::Texture::BOUND);
- texture->SetImmutable(true, false);
- }
+ scoped_refptr<gles2::TexturePassthrough> texture =
+ base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
+ texture->SetLevelImage(target, 0, image.get());
+ GLint texture_memory_size = 0;
+ api->glGetTexParameterivFn(target, GL_MEMORY_SIZE_ANGLE,
+ &texture_memory_size);
+ texture->SetEstimatedSize(texture_memory_size);
return std::make_unique<SharedImageBackingD3D>(
- mailbox, format, size, color_space, usage, std::move(swap_chain), texture,
- std::move(texture_passthrough), std::move(image), buffer_index,
+ mailbox, format, size, color_space, usage, std::move(swap_chain),
+ std::move(texture), std::move(image), buffer_index,
std::move(d3d11_texture), std::move(shared_handle),
std::move(dxgi_keyed_mutex));
}
@@ -750,6 +281,7 @@ std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryD3D::CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h
index 632b70a7328..d82838384a9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h
@@ -29,7 +29,7 @@ struct Mailbox;
class GPU_GLES2_EXPORT SharedImageBackingFactoryD3D
: public SharedImageBackingFactory {
public:
- explicit SharedImageBackingFactoryD3D(bool use_passthrough);
+ SharedImageBackingFactoryD3D();
~SharedImageBackingFactoryD3D() override;
// Returns true if DXGI swap chain shared images for overlays are supported.
@@ -61,6 +61,7 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryD3D
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
@@ -102,9 +103,6 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryD3D
const Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture,
base::win::ScopedHandle shared_handle);
- // Whether we're using the passthrough command decoder and should generate
- // passthrough textures.
- const bool use_passthrough_ = false;
Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device_;
DISALLOW_COPY_AND_ASSIGN(SharedImageBackingFactoryD3D);
};
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc
index f4b9a1cbb6c..08ffa2ac4ac 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc
@@ -75,12 +75,9 @@ bool IsD3DSharedImageSupported() {
return true;
}
-class SharedImageBackingFactoryD3DTestBase
- : public testing::TestWithParam<bool> {
+class SharedImageBackingFactoryD3DTestBase : public testing::Test {
public:
void SetUp() override {
- use_passthrough_texture_ = GetParam();
-
surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
ASSERT_TRUE(surface_);
context_ = gl::init::CreateGLContext(nullptr, surface_.get(),
@@ -93,12 +90,10 @@ class SharedImageBackingFactoryD3DTestBase
shared_image_representation_factory_ =
std::make_unique<SharedImageRepresentationFactory>(
&shared_image_manager_, nullptr);
- shared_image_factory_ = std::make_unique<SharedImageBackingFactoryD3D>(
- use_passthrough_texture_);
+ shared_image_factory_ = std::make_unique<SharedImageBackingFactoryD3D>();
}
protected:
- bool use_passthrough_texture_ = false;
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<gl::GLContext> context_;
SharedImageManager shared_image_manager_;
@@ -118,7 +113,7 @@ class SharedImageBackingFactoryD3DTestSwapChain
}
};
-TEST_P(SharedImageBackingFactoryD3DTestSwapChain, InvalidFormat) {
+TEST_F(SharedImageBackingFactoryD3DTestSwapChain, InvalidFormat) {
if (!SharedImageBackingFactoryD3D::IsSwapChainSupported())
return;
@@ -134,8 +129,6 @@ TEST_P(SharedImageBackingFactoryD3DTestSwapChain, InvalidFormat) {
color_space, usage);
EXPECT_TRUE(backings.front_buffer);
EXPECT_TRUE(backings.back_buffer);
- backings.front_buffer->Destroy();
- backings.back_buffer->Destroy();
}
{
auto valid_format = viz::BGRA_8888;
@@ -144,8 +137,6 @@ TEST_P(SharedImageBackingFactoryD3DTestSwapChain, InvalidFormat) {
color_space, usage);
EXPECT_TRUE(backings.front_buffer);
EXPECT_TRUE(backings.back_buffer);
- backings.front_buffer->Destroy();
- backings.back_buffer->Destroy();
}
{
auto valid_format = viz::RGBA_F16;
@@ -154,8 +145,6 @@ TEST_P(SharedImageBackingFactoryD3DTestSwapChain, InvalidFormat) {
color_space, usage);
EXPECT_TRUE(backings.front_buffer);
EXPECT_TRUE(backings.back_buffer);
- backings.front_buffer->Destroy();
- backings.back_buffer->Destroy();
}
{
auto invalid_format = viz::RGBA_4444;
@@ -167,7 +156,7 @@ TEST_P(SharedImageBackingFactoryD3DTestSwapChain, InvalidFormat) {
}
}
-TEST_P(SharedImageBackingFactoryD3DTestSwapChain, CreateAndPresentSwapChain) {
+TEST_F(SharedImageBackingFactoryD3DTestSwapChain, CreateAndPresentSwapChain) {
if (!SharedImageBackingFactoryD3D::IsSwapChainSupported())
return;
@@ -196,59 +185,30 @@ TEST_P(SharedImageBackingFactoryD3DTestSwapChain, CreateAndPresentSwapChain) {
GLuint back_texture_id, front_texture_id = 0u;
gl::GLImageD3D *back_image, *front_image = 0u;
- if (use_passthrough_texture_) {
- auto back_texture = shared_image_representation_factory_
- ->ProduceGLTexturePassthrough(back_buffer_mailbox)
- ->GetTexturePassthrough();
- ASSERT_TRUE(back_texture);
- EXPECT_EQ(back_texture->target(), static_cast<unsigned>(GL_TEXTURE_2D));
-
- back_texture_id = back_texture->service_id();
- EXPECT_NE(back_texture_id, 0u);
-
- back_image = gl::GLImageD3D::FromGLImage(
- back_texture->GetLevelImage(GL_TEXTURE_2D, 0));
-
- auto front_texture = shared_image_representation_factory_
- ->ProduceGLTexturePassthrough(front_buffer_mailbox)
- ->GetTexturePassthrough();
- ASSERT_TRUE(front_texture);
- EXPECT_EQ(front_texture->target(), static_cast<unsigned>(GL_TEXTURE_2D));
-
- front_texture_id = front_texture->service_id();
- EXPECT_NE(front_texture_id, 0u);
-
- front_image = gl::GLImageD3D::FromGLImage(
- front_texture->GetLevelImage(GL_TEXTURE_2D, 0));
- } else {
- auto* back_texture = shared_image_representation_factory_
- ->ProduceGLTexture(back_buffer_mailbox)
- ->GetTexture();
- ASSERT_TRUE(back_texture);
- EXPECT_EQ(back_texture->target(), static_cast<unsigned>(GL_TEXTURE_2D));
-
- back_texture_id = back_texture->service_id();
- EXPECT_NE(back_texture_id, 0u);
-
- gles2::Texture::ImageState image_state = gles2::Texture::UNBOUND;
- back_image = gl::GLImageD3D::FromGLImage(
- back_texture->GetLevelImage(GL_TEXTURE_2D, 0, &image_state));
- EXPECT_EQ(image_state, gles2::Texture::BOUND);
-
- auto* front_texture = shared_image_representation_factory_
- ->ProduceGLTexture(front_buffer_mailbox)
- ->GetTexture();
- ASSERT_TRUE(front_texture);
- EXPECT_EQ(front_texture->target(), static_cast<unsigned>(GL_TEXTURE_2D));
-
- front_texture_id = front_texture->service_id();
- EXPECT_NE(front_texture_id, 0u);
-
- image_state = gles2::Texture::UNBOUND;
- front_image = gl::GLImageD3D::FromGLImage(
- front_texture->GetLevelImage(GL_TEXTURE_2D, 0, &image_state));
- EXPECT_EQ(image_state, gles2::Texture::BOUND);
- }
+
+ auto back_texture = shared_image_representation_factory_
+ ->ProduceGLTexturePassthrough(back_buffer_mailbox)
+ ->GetTexturePassthrough();
+ ASSERT_TRUE(back_texture);
+ EXPECT_EQ(back_texture->target(), static_cast<unsigned>(GL_TEXTURE_2D));
+
+ back_texture_id = back_texture->service_id();
+ EXPECT_NE(back_texture_id, 0u);
+
+ back_image = gl::GLImageD3D::FromGLImage(
+ back_texture->GetLevelImage(GL_TEXTURE_2D, 0));
+
+ auto front_texture = shared_image_representation_factory_
+ ->ProduceGLTexturePassthrough(front_buffer_mailbox)
+ ->GetTexturePassthrough();
+ ASSERT_TRUE(front_texture);
+ EXPECT_EQ(front_texture->target(), static_cast<unsigned>(GL_TEXTURE_2D));
+
+ front_texture_id = front_texture->service_id();
+ EXPECT_NE(front_texture_id, 0u);
+
+ front_image = gl::GLImageD3D::FromGLImage(
+ front_texture->GetLevelImage(GL_TEXTURE_2D, 0));
ASSERT_TRUE(back_image);
EXPECT_EQ(back_image->ShouldBindOrCopy(), gl::GLImage::BIND);
@@ -427,13 +387,12 @@ class SharedImageBackingFactoryD3DTest
return;
SharedImageBackingFactoryD3DTestBase::SetUp();
- ASSERT_TRUE(use_passthrough_texture_);
GpuDriverBugWorkarounds workarounds;
scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), surface_, context_,
/*use_virtualized_gl_contexts=*/false, base::DoNothing());
- context_state_->InitializeGrContext(workarounds, nullptr);
+ context_state_->InitializeGrContext(GpuPreferences(), workarounds, nullptr);
auto feature_info =
base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
context_state_->InitializeGL(GpuPreferences(), std::move(feature_info));
@@ -442,16 +401,20 @@ class SharedImageBackingFactoryD3DTest
protected:
GrContext* gr_context() const { return context_state_->gr_context(); }
- void CheckSkiaPixels(const Mailbox& mailbox, const gfx::Size& size) const {
+ void CheckSkiaPixels(const Mailbox& mailbox,
+ const gfx::Size& size,
+ const std::vector<uint8_t> expected_color) const {
auto skia_representation =
shared_image_representation_factory_->ProduceSkia(mailbox,
context_state_);
ASSERT_NE(skia_representation, nullptr);
- SharedImageRepresentationSkia::ScopedReadAccess scoped_read_access(
- skia_representation.get(), nullptr, nullptr);
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access =
+ skia_representation->BeginScopedReadAccess(nullptr, nullptr);
+ EXPECT_TRUE(scoped_read_access);
- auto* promise_texture = scoped_read_access.promise_image_texture();
+ auto* promise_texture = scoped_read_access->promise_image_texture();
GrBackendTexture backend_texture = promise_texture->backendTexture();
EXPECT_TRUE(backend_texture.isValid());
@@ -477,10 +440,10 @@ class SharedImageBackingFactoryD3DTest
for (int i = 0; i < num_pixels; i++) {
// Compare the pixel values.
const uint8_t* pixel = dst_pixels.data() + (i * 4);
- EXPECT_EQ(pixel[0], 0);
- EXPECT_EQ(pixel[1], 255);
- EXPECT_EQ(pixel[2], 0);
- EXPECT_EQ(pixel[3], 255);
+ EXPECT_EQ(pixel[0], expected_color[0]);
+ EXPECT_EQ(pixel[1], expected_color[1]);
+ EXPECT_EQ(pixel[2], expected_color[2]);
+ EXPECT_EQ(pixel[3], expected_color[3]);
}
}
@@ -490,7 +453,7 @@ class SharedImageBackingFactoryD3DTest
// Test to check interaction between Gl and skia GL representations.
// We write to a GL texture using gl representation and then read from skia
// representation.
-TEST_P(SharedImageBackingFactoryD3DTest, GL_SkiaGL) {
+TEST_F(SharedImageBackingFactoryD3DTest, GL_SkiaGL) {
if (!IsD3DSharedImageSupported())
return;
@@ -500,8 +463,10 @@ TEST_P(SharedImageBackingFactoryD3DTest, GL_SkiaGL) {
const gfx::Size size(1, 1);
const auto color_space = gfx::ColorSpace::CreateSRGB();
const uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY;
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
auto backing = shared_image_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
ASSERT_NE(backing, nullptr);
GLenum expected_target = GL_TEXTURE_2D;
@@ -516,9 +481,11 @@ TEST_P(SharedImageBackingFactoryD3DTest, GL_SkiaGL) {
EXPECT_EQ(expected_target,
gl_representation->GetTexturePassthrough()->target());
- SharedImageRepresentationGLTexturePassthrough::ScopedAccess scoped_access(
- gl_representation.get(), GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
- EXPECT_TRUE(scoped_access.success());
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough::ScopedAccess>
+ scoped_access = gl_representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(scoped_access);
// Create an FBO.
GLuint fbo = 0;
@@ -535,17 +502,19 @@ TEST_P(SharedImageBackingFactoryD3DTest, GL_SkiaGL) {
// Set the clear color to green.
api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
api->glClearFn(GL_COLOR_BUFFER_BIT);
+ gl_representation->SetCleared();
+
scoped_access.reset();
gl_representation.reset();
- CheckSkiaPixels(mailbox, size);
+ CheckSkiaPixels(mailbox, size, {0, 255, 0, 255});
factory_ref.reset();
}
#if BUILDFLAG(USE_DAWN)
// Test to check interaction between Dawn and skia GL representations.
-TEST_P(SharedImageBackingFactoryD3DTest, Dawn_SkiaGL) {
+TEST_F(SharedImageBackingFactoryD3DTest, Dawn_SkiaGL) {
if (!IsD3DSharedImageSupported())
return;
@@ -569,9 +538,11 @@ TEST_P(SharedImageBackingFactoryD3DTest, Dawn_SkiaGL) {
const auto format = viz::ResourceFormat::RGBA_8888;
const gfx::Size size(1, 1);
const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
const uint32_t usage = SHARED_IMAGE_USAGE_WEBGPU | SHARED_IMAGE_USAGE_DISPLAY;
auto backing = shared_image_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
ASSERT_NE(backing, nullptr);
std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
@@ -584,9 +555,14 @@ TEST_P(SharedImageBackingFactoryD3DTest, Dawn_SkiaGL) {
auto dawn_representation =
shared_image_representation_factory_->ProduceDawn(mailbox,
device.Get());
+ ASSERT_TRUE(dawn_representation);
+
+ auto scoped_access = dawn_representation->BeginScopedAccess(
+ WGPUTextureUsage_OutputAttachment,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ ASSERT_TRUE(scoped_access);
- wgpu::Texture texture = wgpu::Texture::Acquire(
- dawn_representation->BeginAccess(WGPUTextureUsage_OutputAttachment));
+ wgpu::Texture texture = wgpu::Texture::Acquire(scoped_access->texture());
wgpu::RenderPassColorAttachmentDescriptor color_desc;
color_desc.attachment = texture.CreateView();
@@ -607,11 +583,126 @@ TEST_P(SharedImageBackingFactoryD3DTest, Dawn_SkiaGL) {
wgpu::Queue queue = device.CreateQueue();
queue.Submit(1, &commands);
+ }
+
+ CheckSkiaPixels(mailbox, size, {0, 255, 0, 255});
+
+ // Shut down Dawn
+ device = wgpu::Device();
+ dawnProcSetProcs(nullptr);
- dawn_representation->EndAccess();
+ factory_ref.reset();
+}
+
+// 1. Draw a color to texture through GL
+// 2. Do not call SetCleared so we can test Dawn Lazy clear
+// 3. Begin render pass in Dawn, but do not do anything
+// 4. Verify through CheckSkiaPixel that GL drawn color not seen
+TEST_F(SharedImageBackingFactoryD3DTest, GL_Dawn_Skia_UnclearTexture) {
+ if (!IsD3DSharedImageSupported())
+ return;
+
+ // Create a backing using mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ const auto format = viz::ResourceFormat::RGBA_8888;
+ const gfx::Size size(1, 1);
+ const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY |
+ SHARED_IMAGE_USAGE_WEBGPU;
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ auto backing = shared_image_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+ ASSERT_NE(backing, nullptr);
+
+ GLenum expected_target = GL_TEXTURE_2D;
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+ {
+ // Create a SharedImageRepresentationGLTexture.
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexturePassthrough(
+ mailbox);
+ EXPECT_EQ(expected_target,
+ gl_representation->GetTexturePassthrough()->target());
+
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough::ScopedAccess>
+ gl_scoped_access = gl_representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(gl_scoped_access);
+
+ // Create an FBO.
+ GLuint fbo = 0;
+ gl::GLApi* api = gl::g_current_gl_context;
+ api->glGenFramebuffersEXTFn(1, &fbo);
+ api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
+
+ // Attach the texture to FBO.
+ api->glFramebufferTexture2DEXTFn(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ gl_representation->GetTexturePassthrough()->target(),
+ gl_representation->GetTexturePassthrough()->service_id(), 0);
+
+ // Set the clear color to green.
+ api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
+ api->glClearFn(GL_COLOR_BUFFER_BIT);
+
+ // Don't call SetCleared, we want to see if Dawn will lazy clear the texture
+ EXPECT_FALSE(factory_ref->IsCleared());
}
- CheckSkiaPixels(mailbox, size);
+ // Create a Dawn D3D12 device
+ dawn_native::Instance instance;
+ instance.DiscoverDefaultAdapters();
+
+ std::vector<dawn_native::Adapter> adapters = instance.GetAdapters();
+ auto adapter_it = std::find_if(
+ adapters.begin(), adapters.end(), [](dawn_native::Adapter adapter) {
+ return adapter.GetBackendType() == dawn_native::BackendType::D3D12;
+ });
+ ASSERT_NE(adapter_it, adapters.end());
+
+ wgpu::Device device = wgpu::Device::Acquire(adapter_it->CreateDevice());
+ DawnProcTable procs = dawn_native::GetProcs();
+ dawnProcSetProcs(&procs);
+ {
+ auto dawn_representation =
+ shared_image_representation_factory_->ProduceDawn(mailbox,
+ device.Get());
+ ASSERT_TRUE(dawn_representation);
+
+ auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
+ WGPUTextureUsage_OutputAttachment,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ ASSERT_TRUE(dawn_scoped_access);
+
+ wgpu::Texture texture =
+ wgpu::Texture::Acquire(dawn_scoped_access->texture());
+ wgpu::RenderPassColorAttachmentDescriptor color_desc;
+ color_desc.attachment = texture.CreateView();
+ color_desc.resolveTarget = nullptr;
+ color_desc.loadOp = wgpu::LoadOp::Load;
+ color_desc.storeOp = wgpu::StoreOp::Store;
+
+ wgpu::RenderPassDescriptor renderPassDesc;
+ renderPassDesc.colorAttachmentCount = 1;
+ renderPassDesc.colorAttachments = &color_desc;
+ renderPassDesc.depthStencilAttachment = nullptr;
+
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+ wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+ pass.EndPass();
+ wgpu::CommandBuffer commands = encoder.Finish();
+
+ wgpu::Queue queue = device.CreateQueue();
+ queue.Submit(1, &commands);
+ }
+
+ // Check skia pixels returns black since texture was lazy cleared in Dawn
+ EXPECT_TRUE(factory_ref->IsCleared());
+ CheckSkiaPixels(mailbox, size, {0, 0, 0, 0});
// Shut down Dawn
device = wgpu::Device();
@@ -619,13 +710,132 @@ TEST_P(SharedImageBackingFactoryD3DTest, Dawn_SkiaGL) {
factory_ref.reset();
}
+
+// 1. Draw a color to texture through Dawn
+// 2. Set the renderpass storeOp = Clear
+// 3. Texture in Dawn will stay as uninitialized
+// 3. Expect skia to fail to access the texture because texture is not
+// initialized
+TEST_F(SharedImageBackingFactoryD3DTest, UnclearDawn_SkiaFails) {
+ if (!IsD3DSharedImageSupported())
+ return;
+
+ // Create a backing using mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ const auto format = viz::ResourceFormat::RGBA_8888;
+ const gfx::Size size(1, 1);
+ const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY |
+ SHARED_IMAGE_USAGE_WEBGPU;
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ auto backing = shared_image_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+ ASSERT_NE(backing, nullptr);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ // Create dawn device
+ dawn_native::Instance instance;
+ instance.DiscoverDefaultAdapters();
+
+ std::vector<dawn_native::Adapter> adapters = instance.GetAdapters();
+ auto adapter_it = std::find_if(
+ adapters.begin(), adapters.end(), [](dawn_native::Adapter adapter) {
+ return adapter.GetBackendType() == dawn_native::BackendType::D3D12;
+ });
+ ASSERT_NE(adapter_it, adapters.end());
+
+ wgpu::Device device = wgpu::Device::Acquire(adapter_it->CreateDevice());
+ DawnProcTable procs = dawn_native::GetProcs();
+ dawnProcSetProcs(&procs);
+ {
+ auto dawn_representation =
+ shared_image_representation_factory_->ProduceDawn(mailbox,
+ device.Get());
+ ASSERT_TRUE(dawn_representation);
+
+ auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
+ WGPUTextureUsage_OutputAttachment,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ ASSERT_TRUE(dawn_scoped_access);
+
+ wgpu::Texture texture =
+ wgpu::Texture::Acquire(dawn_scoped_access->texture());
+ wgpu::RenderPassColorAttachmentDescriptor color_desc;
+ color_desc.attachment = texture.CreateView();
+ color_desc.resolveTarget = nullptr;
+ color_desc.loadOp = wgpu::LoadOp::Clear;
+ color_desc.storeOp = wgpu::StoreOp::Clear;
+ color_desc.clearColor = {0, 255, 0, 255};
+
+ wgpu::RenderPassDescriptor renderPassDesc;
+ renderPassDesc.colorAttachmentCount = 1;
+ renderPassDesc.colorAttachments = &color_desc;
+ renderPassDesc.depthStencilAttachment = nullptr;
+
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+ wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+ pass.EndPass();
+ wgpu::CommandBuffer commands = encoder.Finish();
+
+ wgpu::Queue queue = device.CreateQueue();
+ queue.Submit(1, &commands);
+ }
+
+ // Shut down Dawn
+ device = wgpu::Device();
+ dawnProcSetProcs(nullptr);
+
+ EXPECT_FALSE(factory_ref->IsCleared());
+
+ // Produce skia representation
+ auto skia_representation = shared_image_representation_factory_->ProduceSkia(
+ mailbox, context_state_);
+ ASSERT_NE(skia_representation, nullptr);
+
+ // Expect BeginScopedReadAccess to fail because sharedImage is uninitialized
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access =
+ skia_representation->BeginScopedReadAccess(nullptr, nullptr);
+ EXPECT_EQ(scoped_read_access, nullptr);
+}
#endif // BUILDFLAG(USE_DAWN)
-INSTANTIATE_TEST_SUITE_P(/* no prefix */,
- SharedImageBackingFactoryD3DTestSwapChain,
- testing::Bool());
-INSTANTIATE_TEST_SUITE_P(/* no prefix */,
- SharedImageBackingFactoryD3DTest,
- testing::Values(true));
+// Test that Skia trying to access uninitialized SharedImage will fail
+TEST_F(SharedImageBackingFactoryD3DTest, SkiaAccessFirstFails) {
+ if (!IsD3DSharedImageSupported())
+ return;
+
+ // Create a mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ const auto format = viz::ResourceFormat::RGBA_8888;
+ const gfx::Size size(1, 1);
+ const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY;
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ auto backing = shared_image_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+ ASSERT_NE(backing, nullptr);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ // Produce skia representation
+ auto skia_representation = shared_image_representation_factory_->ProduceSkia(
+ mailbox, context_state_);
+ ASSERT_NE(skia_representation, nullptr);
+ EXPECT_FALSE(skia_representation->IsCleared());
+
+ // Expect BeginScopedReadAccess to fail because sharedImage is uninitialized
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access =
+ skia_representation->BeginScopedReadAccess(nullptr, nullptr);
+ EXPECT_EQ(scoped_read_access, nullptr);
+}
} // anonymous namespace
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
index 07981081e05..a7292c01c10 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
@@ -5,13 +5,16 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
#include <algorithm>
+#include <list>
#include <string>
#include <utility>
#include "base/feature_list.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
#include "components/viz/common/resources/resource_format_utils.h"
+#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
@@ -33,10 +36,19 @@
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_gl_api_implementation.h"
+#include "ui/gl/gl_image_native_pixmap.h"
#include "ui/gl/gl_image_shared_memory.h"
+#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_version_info.h"
+#include "ui/gl/scoped_binders.h"
+#include "ui/gl/shared_gl_fence_egl.h"
#include "ui/gl/trace_util.h"
+#if defined(OS_ANDROID)
+#include "gpu/command_buffer/service/shared_image_backing_egl_image.h"
+#include "gpu/command_buffer/service/shared_image_batch_access_manager.h"
+#endif
+
namespace gpu {
namespace {
@@ -231,6 +243,19 @@ class SharedImageRepresentationGLTexturePassthroughImpl
return texture_passthrough_;
}
+ void EndAccess() override {
+ GLenum target = texture_passthrough_->target();
+ gl::GLImage* image = texture_passthrough_->GetLevelImage(target, 0);
+ if (!image)
+ return;
+ if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ gl::ScopedTextureBinder binder(target,
+ texture_passthrough_->service_id());
+ image->ReleaseTexImage(target);
+ image->BindTexImage(target);
+ }
+ }
+
private:
scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
};
@@ -330,6 +355,8 @@ class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
// TODO(ericrk): Handle begin/end correctness checks.
}
+ bool SupportsMultipleConcurrentReadAccess() override { return true; }
+
sk_sp<SkPromiseImageTexture> promise_texture() { return promise_texture_; }
private:
@@ -369,19 +396,29 @@ class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
texture_(texture),
attribs_(attribs) {
DCHECK(texture_);
+ gl::GLImage* image =
+ texture_->GetLevelImage(texture_->target(), 0, nullptr);
+ if (image)
+ native_pixmap_ = image->GetNativePixmap();
}
~SharedImageBackingGLTexture() override {
- DCHECK(!texture_);
- DCHECK(!rgb_emulation_texture_);
+ DCHECK(texture_);
+ texture_->RemoveLightweightRef(have_context());
+ texture_ = nullptr;
+
+ if (rgb_emulation_texture_) {
+ rgb_emulation_texture_->RemoveLightweightRef(have_context());
+ rgb_emulation_texture_ = nullptr;
+ }
}
- bool IsCleared() const override {
- return texture_->IsLevelCleared(texture_->target(), 0);
+ gfx::Rect ClearedRect() const override {
+ return texture_->GetLevelClearedRect(texture_->target(), 0);
}
- void SetCleared() override {
- texture_->SetLevelCleared(texture_->target(), 0, true);
+ void SetClearedRect(const gfx::Rect& cleared_rect) override {
+ texture_->SetLevelClearedRect(texture_->target(), 0, cleared_rect);
}
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
@@ -421,17 +458,6 @@ class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
return true;
}
- void Destroy() override {
- DCHECK(texture_);
- texture_->RemoveLightweightRef(have_context());
- texture_ = nullptr;
-
- if (rgb_emulation_texture_) {
- rgb_emulation_texture_->RemoveLightweightRef(have_context());
- rgb_emulation_texture_ = nullptr;
- }
- }
-
void OnMemoryDump(const std::string& dump_name,
base::trace_event::MemoryAllocatorDump* dump,
base::trace_event::ProcessMemoryDump* pmd,
@@ -475,6 +501,10 @@ class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
}
}
+ scoped_refptr<gfx::NativePixmap> GetNativePixmap() override {
+ return native_pixmap_;
+ }
+
protected:
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
SharedImageManager* manager,
@@ -553,6 +583,7 @@ class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
gles2::Texture* rgb_emulation_texture_ = nullptr;
sk_sp<SkPromiseImageTexture> cached_promise_texture_;
const UnpackStateAttribs attribs_;
+ scoped_refptr<gfx::NativePixmap> native_pixmap_;
};
// Implementation of SharedImageBacking that creates a GL Texture and stores it
@@ -567,8 +598,7 @@ class SharedImageBackingPassthroughGLTexture
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
- scoped_refptr<gles2::TexturePassthrough> passthrough_texture,
- bool is_cleared)
+ scoped_refptr<gles2::TexturePassthrough> passthrough_texture)
: SharedImageBackingWithReadAccess(mailbox,
format,
size,
@@ -576,17 +606,24 @@ class SharedImageBackingPassthroughGLTexture
usage,
passthrough_texture->estimated_size(),
false /* is_thread_safe */),
- texture_passthrough_(std::move(passthrough_texture)),
- is_cleared_(is_cleared) {
+ texture_passthrough_(std::move(passthrough_texture)) {
DCHECK(texture_passthrough_);
}
~SharedImageBackingPassthroughGLTexture() override {
- DCHECK(!texture_passthrough_);
+ DCHECK(texture_passthrough_);
+ if (!have_context())
+ texture_passthrough_->MarkContextLost();
+ texture_passthrough_.reset();
+ }
+
+ gfx::Rect ClearedRect() const override {
+ // This backing is used exclusively with ANGLE which handles clear tracking
+ // internally. Act as though the texture is always cleared.
+ return gfx::Rect(size());
}
- bool IsCleared() const override { return is_cleared_; }
- void SetCleared() override { is_cleared_ = true; }
+ void SetClearedRect(const gfx::Rect& cleared_rect) override {}
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
GLenum target = texture_passthrough_->target();
@@ -610,13 +647,6 @@ class SharedImageBackingPassthroughGLTexture
return true;
}
- void Destroy() override {
- DCHECK(texture_passthrough_);
- if (!have_context())
- texture_passthrough_->MarkContextLost();
- texture_passthrough_.reset();
- }
-
void OnMemoryDump(const std::string& dump_name,
base::trace_event::MemoryAllocatorDump* dump,
base::trace_event::ProcessMemoryDump* pmd,
@@ -661,18 +691,21 @@ class SharedImageBackingPassthroughGLTexture
private:
scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
sk_sp<SkPromiseImageTexture> cached_promise_texture_;
-
- bool is_cleared_ = false;
};
SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
const GpuFeatureInfo& gpu_feature_info,
- ImageFactory* image_factory)
+ ImageFactory* image_factory,
+ SharedImageBatchAccessManager* batch_access_manager)
: use_passthrough_(gpu_preferences.use_passthrough_cmd_decoder &&
gles2::PassthroughCommandDecoderSupported()),
- image_factory_(image_factory) {
+ image_factory_(image_factory),
+ workarounds_(workarounds) {
+#if defined(OS_ANDROID)
+ batch_access_manager_ = batch_access_manager;
+#endif
gl::GLApi* api = gl::g_current_gl_context;
api->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &max_texture_size_);
// When the passthrough command decoder is used, the max_texture_size
@@ -713,13 +746,13 @@ SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
FormatInfo& info = format_info_[i];
if (!viz::GLSupportsFormat(format))
continue;
- GLuint image_internal_format = viz::GLInternalFormat(format);
- GLenum gl_format = viz::GLDataFormat(format);
- GLenum gl_type = viz::GLDataType(format);
- bool uncompressed_format_valid =
+ const GLuint image_internal_format = viz::GLInternalFormat(format);
+ const GLenum gl_format = viz::GLDataFormat(format);
+ const GLenum gl_type = viz::GLDataType(format);
+ const bool uncompressed_format_valid =
validators->texture_internal_format.IsValid(image_internal_format) &&
validators->texture_format.IsValid(gl_format);
- bool compressed_format_valid =
+ const bool compressed_format_valid =
validators->compressed_texture_format.IsValid(image_internal_format);
if ((uncompressed_format_valid || compressed_format_valid) &&
validators->pixel_type.IsValid(gl_type)) {
@@ -748,26 +781,32 @@ SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
}
}
if (!info.enabled || !enable_scanout_images ||
- !IsGpuMemoryBufferFormatSupported(format))
+ !IsGpuMemoryBufferFormatSupported(format)) {
continue;
- gfx::BufferFormat buffer_format = viz::BufferFormat(format);
+ }
+ const gfx::BufferFormat buffer_format = viz::BufferFormat(format);
switch (buffer_format) {
case gfx::BufferFormat::RGBA_8888:
case gfx::BufferFormat::BGRA_8888:
case gfx::BufferFormat::RGBA_F16:
case gfx::BufferFormat::R_8:
+ case gfx::BufferFormat::BGRA_1010102:
+ case gfx::BufferFormat::RGBA_1010102:
break;
default:
continue;
}
+ if (!gpu_memory_buffer_formats_.Has(buffer_format))
+ continue;
info.allow_scanout = true;
info.buffer_format = buffer_format;
- DCHECK_EQ(info.gl_format,
+ DCHECK_EQ(info.image_internal_format,
gl::BufferFormatToGLInternalFormat(buffer_format));
if (base::Contains(gpu_preferences.texture_target_exception_list,
gfx::BufferUsageAndFormat(gfx::BufferUsage::SCANOUT,
- buffer_format)))
+ buffer_format))) {
info.target_for_scanout = gpu::GetPlatformSpecificTextureTarget();
+ }
}
}
@@ -778,13 +817,18 @@ std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryGLTexture::CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
bool is_thread_safe) {
- DCHECK(!is_thread_safe);
- return CreateSharedImage(mailbox, format, size, color_space, usage,
- base::span<const uint8_t>());
+ if (is_thread_safe) {
+ return MakeEglImageBacking(mailbox, format, size, color_space, usage);
+ } else {
+ return CreateSharedImageInternal(mailbox, format, surface_handle, size,
+ color_space, usage,
+ base::span<const uint8_t>());
+ }
}
std::unique_ptr<SharedImageBacking>
@@ -795,151 +839,8 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
const gfx::ColorSpace& color_space,
uint32_t usage,
base::span<const uint8_t> pixel_data) {
- const FormatInfo& format_info = format_info_[format];
- if (!format_info.enabled) {
- LOG(ERROR) << "CreateSharedImage: invalid format";
- return nullptr;
- }
-
- const bool use_buffer = usage & SHARED_IMAGE_USAGE_SCANOUT;
- if (use_buffer && !format_info.allow_scanout) {
- LOG(ERROR) << "CreateSharedImage: SCANOUT shared images unavailable";
- return nullptr;
- }
-
- if (size.width() < 1 || size.height() < 1 ||
- size.width() > max_texture_size_ || size.height() > max_texture_size_) {
- LOG(ERROR) << "CreateSharedImage: invalid size";
- return nullptr;
- }
-
- GLenum target = use_buffer ? format_info.target_for_scanout : GL_TEXTURE_2D;
-
- // If we have initial data to upload, ensure it is sized appropriately.
- if (!pixel_data.empty()) {
- if (format_info.is_compressed) {
- const char* error_message = "unspecified";
- if (!gles2::ValidateCompressedTexDimensions(
- target, 0 /* level */, size.width(), size.height(), 1 /* depth */,
- format_info.image_internal_format, &error_message)) {
- LOG(ERROR) << "CreateSharedImage: "
- "ValidateCompressedTexDimensionsFailed with error: "
- << error_message;
- return nullptr;
- }
-
- GLsizei bytes_required = 0;
- if (!gles2::GetCompressedTexSizeInBytes(
- nullptr /* function_name */, size.width(), size.height(),
- 1 /* depth */, format_info.image_internal_format, &bytes_required,
- nullptr /* error_state */)) {
- LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
- "initial texture upload.";
- return nullptr;
- }
-
- if (bytes_required < 0 ||
- pixel_data.size() != static_cast<size_t>(bytes_required)) {
- LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
- "size.";
- return nullptr;
- }
- } else {
- uint32_t bytes_required;
- if (!gles2::GLES2Util::ComputeImageDataSizes(
- size.width(), size.height(), 1 /* depth */, format_info.gl_format,
- format_info.gl_type, 4 /* alignment */, &bytes_required, nullptr,
- nullptr)) {
- LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
- "initial texture upload.";
- return nullptr;
- }
- if (pixel_data.size() != bytes_required) {
- LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
- "size.";
- return nullptr;
- }
- }
- }
-
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
-
- const bool for_framebuffer_attachment =
- (usage & (SHARED_IMAGE_USAGE_RASTER |
- SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
- GLuint service_id = MakeTextureAndSetParameters(
- api, target, for_framebuffer_attachment && texture_usage_angle_);
-
- scoped_refptr<gl::GLImage> image;
- // TODO(piman): We pretend the texture was created in an ES2 context, so that
- // it can be used in other ES2 contexts, and so we have to pass gl_format as
- // the internal format in the LevelInfo. https://crbug.com/628064
- GLuint level_info_internal_format = format_info.gl_format;
- bool is_cleared = false;
- bool needs_subimage_upload = false;
- bool has_immutable_storage = false;
- if (use_buffer) {
- image = image_factory_->CreateAnonymousImage(
- size, format_info.buffer_format, gfx::BufferUsage::SCANOUT,
- &is_cleared);
- // Scanout images have different constraints than GL images and might fail
- // to allocate even if GL images can be created.
- if (!image) {
- // TODO(dcastagna): Use BufferUsage::GPU_READ_WRITE instead
- // BufferUsage::GPU_READ once we add it.
- image = image_factory_->CreateAnonymousImage(
- size, format_info.buffer_format, gfx::BufferUsage::GPU_READ,
- &is_cleared);
- }
- // The allocated image should not require copy.
- if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND ||
- !image->BindTexImage(target)) {
- LOG(ERROR) << "CreateSharedImage: Failed to "
- << (image ? "bind" : "create") << " image";
- api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
- }
- level_info_internal_format = image->GetInternalFormat();
- if (color_space.IsValid())
- image->SetColorSpace(color_space);
- needs_subimage_upload = !pixel_data.empty();
- } else if (format_info.supports_storage) {
- api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
- size.width(), size.height());
- has_immutable_storage = true;
- needs_subimage_upload = !pixel_data.empty();
- } else if (format_info.is_compressed) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
- api->glCompressedTexImage2DFn(target, 0, format_info.image_internal_format,
- size.width(), size.height(), 0,
- pixel_data.size(), pixel_data.data());
- } else {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
- api->glTexImage2DFn(target, 0, format_info.image_internal_format,
- size.width(), size.height(), 0,
- format_info.adjusted_format, format_info.gl_type,
- pixel_data.data());
- }
-
- // If we are using a buffer or TexStorage API but have data to upload, do so
- // now via TexSubImage2D.
- if (needs_subimage_upload) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
- api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
- format_info.adjusted_format, format_info.gl_type,
- pixel_data.data());
- }
-
- return MakeBacking(
- use_passthrough_, mailbox, target, service_id, image,
- gles2::Texture::BOUND, level_info_internal_format, format_info.gl_format,
- format_info.gl_type, format_info.swizzle,
- pixel_data.empty() ? is_cleared : true, has_immutable_storage, format,
- size, color_space, usage, attribs);
+ return CreateSharedImageInternal(mailbox, format, kNullSurfaceHandle, size,
+ color_space, usage, pixel_data);
}
std::unique_ptr<SharedImageBacking>
@@ -982,8 +883,17 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
// bindable. Currently NativeBufferNeedsPlatformSpecificTextureTarget can
// only return false on Chrome OS where GLImageNativePixmap is used which is
// always bindable.
+#if DCHECK_IS_ON()
+ bool texture_2d_support = false;
+#if defined(OS_MACOSX)
+ // If the PlatformSpecificTextureTarget on Mac is GL_TEXTURE_2D, this is
+ // supported.
+ texture_2d_support =
+ (gpu::GetPlatformSpecificTextureTarget() == GL_TEXTURE_2D);
+#endif // defined(OS_MACOSX)
DCHECK(handle.type == gfx::SHARED_MEMORY_BUFFER || target != GL_TEXTURE_2D ||
- image->ShouldBindOrCopy() == gl::GLImage::BIND);
+ texture_2d_support || image->ShouldBindOrCopy() == gl::GLImage::BIND);
+#endif // DCHECK_IS_ON()
if (color_space.IsValid())
image->SetColorSpace(color_space);
@@ -1114,7 +1024,7 @@ SharedImageBackingFactoryGLTexture::MakeBacking(
return std::make_unique<SharedImageBackingPassthroughGLTexture>(
mailbox, format, size, color_space, usage,
- std::move(passthrough_texture), is_cleared);
+ std::move(passthrough_texture));
} else {
gles2::Texture* texture = new gles2::Texture(service_id);
texture->SetLightweightRef();
@@ -1137,6 +1047,209 @@ SharedImageBackingFactoryGLTexture::MakeBacking(
}
}
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryGLTexture::MakeEglImageBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+#if defined(OS_ANDROID)
+ const FormatInfo& format_info = format_info_[format];
+ if (!format_info.enabled) {
+ DLOG(ERROR) << "MakeEglImageBacking: invalid format";
+ return nullptr;
+ }
+
+ DCHECK(!(usage & SHARED_IMAGE_USAGE_SCANOUT));
+
+ if (size.width() < 1 || size.height() < 1 ||
+ size.width() > max_texture_size_ || size.height() > max_texture_size_) {
+ DLOG(ERROR) << "MakeEglImageBacking: Invalid size";
+ return nullptr;
+ }
+
+ // Calculate SharedImage size in bytes.
+ size_t estimated_size;
+ if (!viz::ResourceSizes::MaybeSizeInBytes(size, format, &estimated_size)) {
+ DLOG(ERROR) << "MakeEglImageBacking: Failed to calculate SharedImage size";
+ return nullptr;
+ }
+
+ return std::make_unique<SharedImageBackingEglImage>(
+ mailbox, format, size, color_space, usage, estimated_size,
+ format_info.gl_format, format_info.gl_type, batch_access_manager_,
+ workarounds_);
+#else
+ return nullptr;
+#endif
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryGLTexture::CreateSharedImageInternal(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ const FormatInfo& format_info = format_info_[format];
+ if (!format_info.enabled) {
+ LOG(ERROR) << "CreateSharedImage: invalid format";
+ return nullptr;
+ }
+
+ const bool use_buffer = usage & SHARED_IMAGE_USAGE_SCANOUT;
+ if (use_buffer && !format_info.allow_scanout) {
+ LOG(ERROR) << "CreateSharedImage: SCANOUT shared images unavailable";
+ return nullptr;
+ }
+
+ if (size.width() < 1 || size.height() < 1 ||
+ size.width() > max_texture_size_ || size.height() > max_texture_size_) {
+ LOG(ERROR) << "CreateSharedImage: invalid size";
+ return nullptr;
+ }
+
+ GLenum target = use_buffer ? format_info.target_for_scanout : GL_TEXTURE_2D;
+
+ // If we have initial data to upload, ensure it is sized appropriately.
+ if (!pixel_data.empty()) {
+ if (format_info.is_compressed) {
+ const char* error_message = "unspecified";
+ if (!gles2::ValidateCompressedTexDimensions(
+ target, 0 /* level */, size.width(), size.height(), 1 /* depth */,
+ format_info.image_internal_format, &error_message)) {
+ LOG(ERROR) << "CreateSharedImage: "
+ "ValidateCompressedTexDimensionsFailed with error: "
+ << error_message;
+ return nullptr;
+ }
+
+ GLsizei bytes_required = 0;
+ if (!gles2::GetCompressedTexSizeInBytes(
+ nullptr /* function_name */, size.width(), size.height(),
+ 1 /* depth */, format_info.image_internal_format, &bytes_required,
+ nullptr /* error_state */)) {
+ LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
+ "initial texture upload.";
+ return nullptr;
+ }
+
+ if (bytes_required < 0 ||
+ pixel_data.size() != static_cast<size_t>(bytes_required)) {
+ LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
+ "size.";
+ return nullptr;
+ }
+ } else {
+ uint32_t bytes_required;
+ uint32_t unpadded_row_size = 0u;
+ uint32_t padded_row_size = 0u;
+ if (!gles2::GLES2Util::ComputeImageDataSizes(
+ size.width(), size.height(), 1 /* depth */, format_info.gl_format,
+ format_info.gl_type, 4 /* alignment */, &bytes_required,
+ &unpadded_row_size, &padded_row_size)) {
+ LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
+ "initial texture upload.";
+ return nullptr;
+ }
+
+ // The GL spec, used in the computation for required bytes in the function
+ // above, assumes no padding is required for the last row in the image.
+ // But the client data does include this padding, so we add it for the
+ // data validation check here.
+ uint32_t padding = padded_row_size - unpadded_row_size;
+ bytes_required += padding;
+ if (pixel_data.size() != bytes_required) {
+ LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
+ "size.";
+ return nullptr;
+ }
+ }
+ }
+
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+
+ const bool for_framebuffer_attachment =
+ (usage & (SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
+ GLuint service_id = MakeTextureAndSetParameters(
+ api, target, for_framebuffer_attachment && texture_usage_angle_);
+
+ scoped_refptr<gl::GLImage> image;
+ // TODO(piman): We pretend the texture was created in an ES2 context, so that
+ // it can be used in other ES2 contexts, and so we have to pass gl_format as
+ // the internal format in the LevelInfo. https://crbug.com/628064
+ GLuint level_info_internal_format = format_info.gl_format;
+ bool is_cleared = false;
+ bool needs_subimage_upload = false;
+ bool has_immutable_storage = false;
+ if (use_buffer) {
+ image = image_factory_->CreateAnonymousImage(
+ size, format_info.buffer_format, gfx::BufferUsage::SCANOUT,
+ surface_handle, &is_cleared);
+ // Scanout images have different constraints than GL images and might fail
+ // to allocate even if GL images can be created.
+ if (!image) {
+ // TODO(dcastagna): Use BufferUsage::GPU_READ_WRITE instead
+ // BufferUsage::GPU_READ once we add it.
+ image = image_factory_->CreateAnonymousImage(
+ size, format_info.buffer_format, gfx::BufferUsage::GPU_READ,
+ surface_handle, &is_cleared);
+ }
+ // The allocated image should not require copy.
+ if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND ||
+ !image->BindTexImage(target)) {
+ LOG(ERROR) << "CreateSharedImage: Failed to "
+ << (image ? "bind" : "create") << " image";
+ api->glDeleteTexturesFn(1, &service_id);
+ return nullptr;
+ }
+ level_info_internal_format = image->GetInternalFormat();
+ if (color_space.IsValid())
+ image->SetColorSpace(color_space);
+ needs_subimage_upload = !pixel_data.empty();
+ } else if (format_info.supports_storage) {
+ api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
+ size.width(), size.height());
+ has_immutable_storage = true;
+ needs_subimage_upload = !pixel_data.empty();
+ } else if (format_info.is_compressed) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
+ api->glCompressedTexImage2DFn(target, 0, format_info.image_internal_format,
+ size.width(), size.height(), 0,
+ pixel_data.size(), pixel_data.data());
+ } else {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
+ api->glTexImage2DFn(target, 0, format_info.image_internal_format,
+ size.width(), size.height(), 0,
+ format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+
+ // If we are using a buffer or TexStorage API but have data to upload, do so
+ // now via TexSubImage2D.
+ if (needs_subimage_upload) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
+ api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
+ format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+
+ return MakeBacking(
+ use_passthrough_, mailbox, target, service_id, image,
+ gles2::Texture::BOUND, level_info_internal_format, format_info.gl_format,
+ format_info.gl_type, format_info.swizzle,
+ pixel_data.empty() ? is_cleared : true, has_immutable_storage, format,
+ size, color_space, usage, attribs);
+}
+
SharedImageBackingFactoryGLTexture::FormatInfo::FormatInfo() = default;
SharedImageBackingFactoryGLTexture::FormatInfo::~FormatInfo() = default;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
index bf49d246fcb..257cca42041 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
@@ -8,6 +8,7 @@
#include <memory>
#include "base/memory/scoped_refptr.h"
+#include "build/build_config.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/service/shared_image_backing_factory.h"
#include "gpu/command_buffer/service/texture_manager.h"
@@ -22,6 +23,7 @@ class ColorSpace;
namespace gpu {
class SharedImageBacking;
+class SharedImageBatchAccessManager;
class GpuDriverBugWorkarounds;
struct GpuFeatureInfo;
struct GpuPreferences;
@@ -41,16 +43,19 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
bool supports_unpack_subimage = false;
};
- SharedImageBackingFactoryGLTexture(const GpuPreferences& gpu_preferences,
- const GpuDriverBugWorkarounds& workarounds,
- const GpuFeatureInfo& gpu_feature_info,
- ImageFactory* image_factory);
+ SharedImageBackingFactoryGLTexture(
+ const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info,
+ ImageFactory* image_factory,
+ SharedImageBatchAccessManager* batch_access_manager);
~SharedImageBackingFactoryGLTexture() override;
// SharedImageBackingFactory implementation.
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
@@ -108,6 +113,24 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
uint32_t usage,
const UnpackStateAttribs& attribs);
+ // This is meant to be used only on Android. Return nullptr for other
+ // platforms.
+ std::unique_ptr<SharedImageBacking> MakeEglImageBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage);
+
+ std::unique_ptr<SharedImageBacking> CreateSharedImageInternal(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data);
+
struct FormatInfo {
FormatInfo();
~FormatInfo();
@@ -158,6 +181,11 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
int32_t max_texture_size_ = 0;
bool texture_usage_angle_ = false;
UnpackStateAttribs attribs;
+ GpuDriverBugWorkarounds workarounds_;
+
+#if defined(OS_ANDROID)
+ SharedImageBatchAccessManager* batch_access_manager_ = nullptr;
+#endif
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
index 7741f5627fb..fb37ea94ee4 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
@@ -4,8 +4,12 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
+#include <thread>
+
#include "base/bind_helpers.h"
#include "base/optional.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
@@ -18,6 +22,7 @@
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/shared_image_test_utils.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/command_buffer/tests/texture_image_factory.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
@@ -41,48 +46,77 @@
namespace gpu {
namespace {
+void CreateSharedContext(const GpuDriverBugWorkarounds& workarounds,
+ scoped_refptr<gl::GLSurface>& surface,
+ scoped_refptr<gl::GLContext>& context,
+ scoped_refptr<SharedContextState>& context_state,
+ scoped_refptr<gles2::FeatureInfo>& feature_info) {
+ surface = gl::init::CreateOffscreenGLSurface(gfx::Size());
+ ASSERT_TRUE(surface);
+ context =
+ gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
+ ASSERT_TRUE(context);
+ bool result = context->MakeCurrent(surface.get());
+ ASSERT_TRUE(result);
+
+ scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
+ feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
+ context_state = base::MakeRefCounted<SharedContextState>(
+ std::move(share_group), surface, context,
+ false /* use_virtualized_gl_contexts */, base::DoNothing());
+ context_state->InitializeGrContext(GpuPreferences(), workarounds, nullptr);
+ context_state->InitializeGL(GpuPreferences(), feature_info);
+}
+
+bool IsAndroid() {
+#if defined(OS_ANDROID)
+ return true;
+#else
+ return false;
+#endif
+}
+
class SharedImageBackingFactoryGLTextureTestBase
- : public testing::TestWithParam<bool> {
+ : public testing::TestWithParam<std::tuple<bool, viz::ResourceFormat>> {
public:
+ SharedImageBackingFactoryGLTextureTestBase(bool is_thread_safe)
+ : shared_image_manager_(
+ std::make_unique<SharedImageManager>(is_thread_safe)) {}
+ ~SharedImageBackingFactoryGLTextureTestBase() {
+ // |context_state_| must be destroyed on its own context.
+ context_state_->MakeCurrent(surface_.get(), true /* needs_gl */);
+ }
+
void SetUpBase(const GpuDriverBugWorkarounds& workarounds,
ImageFactory* factory) {
- surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
- ASSERT_TRUE(surface_);
- context_ = gl::init::CreateGLContext(nullptr, surface_.get(),
- gl::GLContextAttribs());
- ASSERT_TRUE(context_);
- bool result = context_->MakeCurrent(surface_.get());
- ASSERT_TRUE(result);
+ scoped_refptr<gles2::FeatureInfo> feature_info;
+ CreateSharedContext(workarounds, surface_, context_, context_state_,
+ feature_info);
+ supports_etc1_ =
+ feature_info->validators()->compressed_texture_format.IsValid(
+ GL_ETC1_RGB8_OES);
+ supports_ar30_ = feature_info->feature_flags().chromium_image_ar30;
+ supports_ab30_ = feature_info->feature_flags().chromium_image_ab30;
GpuPreferences preferences;
preferences.use_passthrough_cmd_decoder = use_passthrough();
backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>(
- preferences, workarounds, GpuFeatureInfo(), factory);
-
- scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
- auto feature_info =
- base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
- context_state_ = base::MakeRefCounted<SharedContextState>(
- std::move(share_group), surface_, context_,
- false /* use_virtualized_gl_contexts */, base::DoNothing());
- context_state_->InitializeGrContext(workarounds, nullptr);
- context_state_->InitializeGL(GpuPreferences(), feature_info);
+ preferences, workarounds, GpuFeatureInfo(), factory,
+ shared_image_manager_->batch_access_manager());
memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
shared_image_representation_factory_ =
std::make_unique<SharedImageRepresentationFactory>(
- &shared_image_manager_, nullptr);
-
- supports_etc1_ =
- feature_info->validators()->compressed_texture_format.IsValid(
- GL_ETC1_RGB8_OES);
+ shared_image_manager_.get(), nullptr);
}
bool use_passthrough() {
- return GetParam() && gles2::PassthroughCommandDecoderSupported();
+ return std::get<0>(GetParam()) &&
+ gles2::PassthroughCommandDecoderSupported();
}
- bool supports_etc1() { return supports_etc1_; }
+ viz::ResourceFormat get_format() { return std::get<1>(GetParam()); }
GrContext* gr_context() { return context_state_->gr_context(); }
@@ -92,35 +126,103 @@ class SharedImageBackingFactoryGLTextureTestBase
scoped_refptr<SharedContextState> context_state_;
std::unique_ptr<SharedImageBackingFactoryGLTexture> backing_factory_;
gles2::MailboxManagerImpl mailbox_manager_;
- SharedImageManager shared_image_manager_;
+ std::unique_ptr<SharedImageManager> shared_image_manager_;
std::unique_ptr<MemoryTypeTracker> memory_type_tracker_;
std::unique_ptr<SharedImageRepresentationFactory>
shared_image_representation_factory_;
bool supports_etc1_ = false;
+ bool supports_ar30_ = false;
+ bool supports_ab30_ = false;
};
class SharedImageBackingFactoryGLTextureTest
: public SharedImageBackingFactoryGLTextureTestBase {
public:
+ SharedImageBackingFactoryGLTextureTest()
+ : SharedImageBackingFactoryGLTextureTestBase(false) {}
+ void SetUp() override {
+ GpuDriverBugWorkarounds workarounds;
+ workarounds.max_texture_size = INT_MAX - 1;
+ SetUpBase(workarounds, &image_factory_);
+ }
+
+ protected:
+ TextureImageFactory image_factory_;
+};
+
+class SharedImageBackingFactoryGLTextureThreadSafeTest
+ : public SharedImageBackingFactoryGLTextureTestBase {
+ public:
+ SharedImageBackingFactoryGLTextureThreadSafeTest()
+ : SharedImageBackingFactoryGLTextureTestBase(true) {}
+ ~SharedImageBackingFactoryGLTextureThreadSafeTest() {
+ // |context_state2_| must be destroyed on its own context.
+ context_state2_->MakeCurrent(surface2_.get(), true /* needs_gl */);
+ }
void SetUp() override {
GpuDriverBugWorkarounds workarounds;
workarounds.max_texture_size = INT_MAX - 1;
SetUpBase(workarounds, &image_factory_);
+
+ // Create 2nd context/context_state which are not part of same shared group.
+ scoped_refptr<gles2::FeatureInfo> feature_info;
+ CreateSharedContext(workarounds, surface2_, context2_, context_state2_,
+ feature_info);
+ feature_info.reset();
}
protected:
+ scoped_refptr<gl::GLSurface> surface2_;
+ scoped_refptr<gl::GLContext> context2_;
+ scoped_refptr<SharedContextState> context_state2_;
TextureImageFactory image_factory_;
};
+class CreateAndValidateSharedImageRepresentations {
+ public:
+ CreateAndValidateSharedImageRepresentations(
+ SharedImageBackingFactoryGLTexture* backing_factory,
+ viz::ResourceFormat format,
+ bool is_thread_safe,
+ gles2::MailboxManagerImpl* mailbox_manager,
+ SharedImageManager* shared_image_manager,
+ MemoryTypeTracker* memory_type_tracker,
+ SharedImageRepresentationFactory* shared_image_representation_factory,
+ SharedContextState* context_state);
+ ~CreateAndValidateSharedImageRepresentations();
+
+ gfx::Size size() { return size_; }
+ Mailbox mailbox() { return mailbox_; }
+
+ private:
+ gles2::MailboxManagerImpl* mailbox_manager_;
+ gfx::Size size_;
+ Mailbox mailbox_;
+ std::unique_ptr<SharedImageBacking> backing_;
+ std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image_;
+};
+
TEST_P(SharedImageBackingFactoryGLTextureTest, Basic) {
auto mailbox = Mailbox::GenerateForSharedImage();
- auto format = viz::ResourceFormat::RGBA_8888;
+ auto format = get_format();
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
- EXPECT_TRUE(backing);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+
+ // As long as either |chromium_image_ar30| or |chromium_image_ab30| is
+ // enabled, we can create a non-scanout SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102.
+ if ((format == viz::ResourceFormat::BGRA_1010102 ||
+ format == viz::ResourceFormat::RGBA_1010102) &&
+ !supports_ar30_ && !supports_ab30_) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
// Check clearing.
if (!backing->IsCleared()) {
@@ -147,8 +249,8 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Basic) {
// Next, validate via a SharedImageRepresentationGLTexture.
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
- shared_image_manager_.Register(std::move(backing),
- memory_type_tracker_.get());
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
EXPECT_TRUE(shared_image);
if (!use_passthrough()) {
auto gl_representation =
@@ -185,46 +287,66 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Basic) {
EXPECT_TRUE(skia_representation);
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
- base::Optional<SharedImageRepresentationSkia::ScopedWriteAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access;
- scoped_write_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
- auto* surface = scoped_write_access->surface();
- EXPECT_TRUE(surface);
- EXPECT_EQ(size.width(), surface->width());
- EXPECT_EQ(size.height(), surface->height());
+ scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ // We use |supports_ar30_| and |supports_ab30_| to detect RGB10A2/BGR10A2
+ // support. It's possible Skia might support these formats even if the Chrome
+ // feature flags are false. We just check here that the feature flags don't
+ // allow Chrome to do something that Skia doesn't support.
+ if ((format != viz::ResourceFormat::BGRA_1010102 || supports_ar30_) &&
+ (format != viz::ResourceFormat::RGBA_1010102 || supports_ab30_)) {
+ ASSERT_TRUE(scoped_write_access);
+ auto* surface = scoped_write_access->surface();
+ ASSERT_TRUE(surface);
+ EXPECT_EQ(size.width(), surface->width());
+ EXPECT_EQ(size.height(), surface->height());
+ }
EXPECT_TRUE(begin_semaphores.empty());
EXPECT_TRUE(end_semaphores.empty());
scoped_write_access.reset();
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
scoped_read_access;
- scoped_read_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
+ scoped_read_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
auto* promise_texture = scoped_read_access->promise_image_texture();
EXPECT_TRUE(promise_texture);
EXPECT_TRUE(begin_semaphores.empty());
EXPECT_TRUE(end_semaphores.empty());
- GrBackendTexture backend_texture = promise_texture->backendTexture();
- EXPECT_TRUE(backend_texture.isValid());
- EXPECT_EQ(size.width(), backend_texture.width());
- EXPECT_EQ(size.height(), backend_texture.height());
- scoped_read_access.reset();
- skia_representation.reset();
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+ scoped_read_access.reset();
+ skia_representation.reset();
- shared_image.reset();
- EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+ shared_image.reset();
+ EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
}
TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
auto mailbox = Mailbox::GenerateForSharedImage();
- auto format = viz::ResourceFormat::RGBA_8888;
+ auto format = get_format();
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_SCANOUT;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
- EXPECT_TRUE(backing);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+
+ // We can only create a scanout SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102 if the corresponding
+ // |chromium_image_ar30| or |chromium_image_ab30| is enabled.
+ if ((format == viz::ResourceFormat::BGRA_1010102 && !supports_ar30_) ||
+ (format == viz::ResourceFormat::RGBA_1010102 && !supports_ab30_)) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
// Check clearing.
if (!backing->IsCleared()) {
@@ -250,8 +372,8 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
// Next, validate via a SharedImageRepresentationGLTexture.
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
- shared_image_manager_.Register(std::move(backing),
- memory_type_tracker_.get());
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
EXPECT_TRUE(shared_image);
if (!use_passthrough()) {
auto gl_representation =
@@ -296,20 +418,21 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
EXPECT_TRUE(skia_representation);
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
- base::Optional<SharedImageRepresentationSkia::ScopedWriteAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access;
- scoped_write_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
+ scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
auto* surface = scoped_write_access->surface();
EXPECT_TRUE(surface);
EXPECT_EQ(size.width(), surface->width());
EXPECT_EQ(size.height(), surface->height());
scoped_write_access.reset();
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
scoped_read_access;
- scoped_read_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
+ scoped_read_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
auto* promise_texture = scoped_read_access->promise_image_texture();
EXPECT_TRUE(promise_texture);
EXPECT_TRUE(begin_semaphores.empty());
@@ -328,15 +451,17 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
if (!use_passthrough() &&
context_state_->feature_info()->feature_flags().ext_texture_rg) {
- // Create a R-8 image texture, and check that the internal_format is that of
- // the image (GL_RGBA for TextureImageFactory). This only matters for the
- // validating decoder.
+ // Create a R-8 image texture, and check that the internal_format is that
+ // of the image (GL_RGBA for TextureImageFactory). This only matters for
+ // the validating decoder.
auto format = viz::ResourceFormat::RED_8;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_TRUE(backing);
- shared_image = shared_image_manager_.Register(std::move(backing),
- memory_type_tracker_.get());
+ shared_image = shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
auto gl_representation =
shared_image_representation_factory_->ProduceGLTexture(mailbox);
ASSERT_TRUE(gl_representation);
@@ -352,11 +477,11 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
}
TEST_P(SharedImageBackingFactoryGLTextureTest, InitialData) {
+ // TODO(andrescj): these loop over the formats can be replaced by test
+ // parameters.
for (auto format :
- {viz::ResourceFormat::RGBA_8888, viz::ResourceFormat::ETC1}) {
- if (format == viz::ResourceFormat::ETC1 && !supports_etc1())
- continue;
-
+ {viz::ResourceFormat::RGBA_8888, viz::ResourceFormat::ETC1,
+ viz::ResourceFormat::BGRA_1010102, viz::ResourceFormat::RGBA_1010102}) {
auto mailbox = Mailbox::GenerateForSharedImage();
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
@@ -365,13 +490,29 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, InitialData) {
viz::ResourceSizes::CheckedSizeInBytes<unsigned int>(size, format));
auto backing = backing_factory_->CreateSharedImage(
mailbox, format, size, color_space, usage, initial_data);
- EXPECT_TRUE(backing);
+
+ if (format == viz::ResourceFormat::ETC1 && !supports_etc1_) {
+ EXPECT_FALSE(backing);
+ continue;
+ }
+
+ // As long as either |chromium_image_ar30| or |chromium_image_ab30| is
+ // enabled, we can create a non-scanout SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102.
+ if ((format == viz::ResourceFormat::BGRA_1010102 ||
+ format == viz::ResourceFormat::RGBA_1010102) &&
+ !supports_ar30_ && !supports_ab30_) {
+ EXPECT_FALSE(backing);
+ continue;
+ }
+
+ ASSERT_TRUE(backing);
EXPECT_TRUE(backing->IsCleared());
// Validate via a SharedImageRepresentationGLTexture(Passthrough).
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
- shared_image_manager_.Register(std::move(backing),
- memory_type_tracker_.get());
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
EXPECT_TRUE(shared_image);
GLenum expected_target = GL_TEXTURE_2D;
if (!use_passthrough()) {
@@ -407,7 +548,7 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, InitialData) {
TEST_P(SharedImageBackingFactoryGLTextureTest, InitialDataImage) {
auto mailbox = Mailbox::GenerateForSharedImage();
- auto format = viz::ResourceFormat::RGBA_8888;
+ auto format = get_format();
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_SCANOUT;
@@ -415,10 +556,20 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, InitialDataImage) {
auto backing = backing_factory_->CreateSharedImage(
mailbox, format, size, color_space, usage, initial_data);
+ // We can only create a scanout SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102 if the corresponding
+ // |chromium_image_ar30| or |chromium_image_ab30| is enabled.
+ if ((format == viz::ResourceFormat::BGRA_1010102 && !supports_ar30_) ||
+ (format == viz::ResourceFormat::RGBA_1010102 && !supports_ab30_)) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
+
// Validate via a SharedImageRepresentationGLTexture(Passthrough).
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
- shared_image_manager_.Register(std::move(backing),
- memory_type_tracker_.get());
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
EXPECT_TRUE(shared_image);
if (!use_passthrough()) {
auto gl_representation =
@@ -446,7 +597,7 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, InitialDataImage) {
TEST_P(SharedImageBackingFactoryGLTextureTest, InitialDataWrongSize) {
auto mailbox = Mailbox::GenerateForSharedImage();
- auto format = viz::ResourceFormat::RGBA_8888;
+ auto format = get_format();
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
@@ -465,44 +616,61 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, InvalidFormat) {
auto format = viz::ResourceFormat::YUV_420_BIPLANAR;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_FALSE(backing);
}
TEST_P(SharedImageBackingFactoryGLTextureTest, InvalidSize) {
auto mailbox = Mailbox::GenerateForSharedImage();
- auto format = viz::ResourceFormat::RGBA_8888;
+ auto format = get_format();
gfx::Size size(0, 0);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_FALSE(backing);
size = gfx::Size(INT_MAX, INT_MAX);
- backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ backing = backing_factory_->CreateSharedImage(mailbox, format, surface_handle,
+ size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_FALSE(backing);
}
TEST_P(SharedImageBackingFactoryGLTextureTest, EstimatedSize) {
auto mailbox = Mailbox::GenerateForSharedImage();
- auto format = viz::ResourceFormat::RGBA_8888;
+ auto format = get_format();
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
- EXPECT_TRUE(backing);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+
+ // As long as either |chromium_image_ar30| or |chromium_image_ab30| is
+ // enabled, we can create a non-scanout SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102.
+ if ((format == viz::ResourceFormat::BGRA_1010102 ||
+ format == viz::ResourceFormat::RGBA_1010102) &&
+ !supports_ar30_ && !supports_ab30_) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
size_t backing_estimated_size = backing->estimated_size();
EXPECT_GT(backing_estimated_size, 0u);
std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
- shared_image_manager_.Register(std::move(backing),
- memory_type_tracker_.get());
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
EXPECT_EQ(backing_estimated_size, memory_type_tracker_->GetMemRepresented());
shared_image.reset();
@@ -608,6 +776,8 @@ class SharedImageBackingFactoryGLTextureWithGMBTest
: public SharedImageBackingFactoryGLTextureTestBase,
public gpu::ImageFactory {
public:
+ SharedImageBackingFactoryGLTextureWithGMBTest()
+ : SharedImageBackingFactoryGLTextureTestBase(false) {}
void SetUp() override { SetUpBase(GpuDriverBugWorkarounds(), this); }
scoped_refptr<gl::GLImage> GetImageFromMailbox(Mailbox mailbox) {
@@ -649,7 +819,7 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
GpuMemoryBufferImportEmpty) {
auto mailbox = Mailbox::GenerateForSharedImage();
gfx::Size size(256, 256);
- gfx::BufferFormat format = gfx::BufferFormat::RGBA_8888;
+ gfx::BufferFormat format = viz::BufferFormat(get_format());
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
@@ -664,7 +834,7 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
GpuMemoryBufferImportNative) {
auto mailbox = Mailbox::GenerateForSharedImage();
gfx::Size size(256, 256);
- gfx::BufferFormat format = gfx::BufferFormat::RGBA_8888;
+ gfx::BufferFormat format = viz::BufferFormat(get_format());
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
@@ -673,11 +843,20 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
auto backing = backing_factory_->CreateSharedImage(
mailbox, kClientId, std::move(handle), format, kNullSurfaceHandle, size,
color_space, usage);
+
+ // We can only create a GMB SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102 if the corresponding
+ // |chromium_image_ar30| or |chromium_image_ab30| is enabled.
+ if ((get_format() == viz::ResourceFormat::BGRA_1010102 && !supports_ar30_) ||
+ (get_format() == viz::ResourceFormat::RGBA_1010102 && !supports_ab30_)) {
+ EXPECT_FALSE(backing);
+ return;
+ }
ASSERT_TRUE(backing);
std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
- shared_image_manager_.Register(std::move(backing),
- memory_type_tracker_.get());
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
scoped_refptr<gl::GLImage> image = GetImageFromMailbox(mailbox);
ASSERT_EQ(image->GetType(), gl::GLImage::Type::NONE);
auto* stub_image = static_cast<StubImage*>(image.get());
@@ -692,7 +871,7 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
GpuMemoryBufferImportSharedMemory) {
auto mailbox = Mailbox::GenerateForSharedImage();
gfx::Size size(256, 256);
- gfx::BufferFormat format = gfx::BufferFormat::RGBA_8888;
+ gfx::BufferFormat format = viz::BufferFormat(get_format());
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
@@ -709,10 +888,20 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
auto backing = backing_factory_->CreateSharedImage(
mailbox, kClientId, std::move(handle), format, kNullSurfaceHandle, size,
color_space, usage);
+
+ // We can only create a GMB SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102 if the corresponding
+ // |chromium_image_ar30| or |chromium_image_ab30| is enabled.
+ if ((get_format() == viz::ResourceFormat::BGRA_1010102 && !supports_ar30_) ||
+ (get_format() == viz::ResourceFormat::RGBA_1010102 && !supports_ab30_)) {
+ EXPECT_FALSE(backing);
+ return;
+ }
ASSERT_TRUE(backing);
+
std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
- shared_image_manager_.Register(std::move(backing),
- memory_type_tracker_.get());
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
scoped_refptr<gl::GLImage> image = GetImageFromMailbox(mailbox);
ASSERT_EQ(image->GetType(), gl::GLImage::Type::MEMORY);
auto* shm_image = static_cast<gl::GLImageSharedMemory*>(image.get());
@@ -726,7 +915,7 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
return;
auto mailbox = Mailbox::GenerateForSharedImage();
gfx::Size size(256, 256);
- gfx::BufferFormat format = gfx::BufferFormat::RGBA_8888;
+ gfx::BufferFormat format = viz::BufferFormat(get_format());
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
@@ -735,11 +924,20 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
auto backing = backing_factory_->CreateSharedImage(
mailbox, kClientId, std::move(handle), format, kNullSurfaceHandle, size,
color_space, usage);
+
+ // We can only create a GMB SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102 if the corresponding
+ // |chromium_image_ar30| or |chromium_image_ab30| is enabled.
+ if ((get_format() == viz::ResourceFormat::BGRA_1010102 && !supports_ar30_) ||
+ (get_format() == viz::ResourceFormat::RGBA_1010102 && !supports_ab30_)) {
+ EXPECT_FALSE(backing);
+ return;
+ }
ASSERT_TRUE(backing);
std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
- shared_image_manager_.Register(std::move(backing),
- memory_type_tracker_.get());
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
auto representation =
shared_image_representation_factory_->ProduceRGBEmulationGLTexture(
@@ -747,7 +945,7 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
EXPECT_TRUE(representation);
EXPECT_TRUE(representation->GetTexture()->service_id());
EXPECT_EQ(size, representation->size());
- EXPECT_EQ(viz::ResourceFormat::RGBA_8888, representation->format());
+ EXPECT_EQ(get_format(), representation->format());
EXPECT_EQ(color_space, representation->color_space());
EXPECT_EQ(usage, representation->usage());
@@ -760,12 +958,256 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
EXPECT_EQ(stub_image->update_counter(), 1);
}
+// Intent of this test is to create at thread safe backing and test if all
+// representations are working.
+TEST_P(SharedImageBackingFactoryGLTextureThreadSafeTest, BasicThreadSafe) {
+ // SharedImageBackingFactoryGLTextureThreadSafeTest tests are only meant for
+ // android platform.
+ if (!IsAndroid())
+ return;
+
+ CreateAndValidateSharedImageRepresentations shared_image(
+ backing_factory_.get(), get_format(), true /* is_thread_safe */,
+ &mailbox_manager_, shared_image_manager_.get(),
+ memory_type_tracker_.get(), shared_image_representation_factory_.get(),
+ context_state_.get());
+}
+
+// Intent of this test is to use the shared image mailbox system by 2 different
+// threads each running their own GL context which are not part of same shared
+// group. One thread will be writing to the backing and other thread will be
+// reading from it.
+TEST_P(SharedImageBackingFactoryGLTextureThreadSafeTest, OneWriterOneReader) {
+ if (!IsAndroid())
+ return;
+
+ // Create it on 1st SharedContextState |context_state_|.
+ CreateAndValidateSharedImageRepresentations shared_image(
+ backing_factory_.get(), get_format(), true /* is_thread_safe */,
+ &mailbox_manager_, shared_image_manager_.get(),
+ memory_type_tracker_.get(), shared_image_representation_factory_.get(),
+ context_state_.get());
+
+ auto mailbox = shared_image.mailbox();
+ auto size = shared_image.size();
+
+ // Writer will write to the backing. We will create a GLTexture representation
+ // and write green color to it.
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+
+ // Begin writing to the underlying texture of the backing via ScopedAccess.
+ std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
+ writer_scoped_access = gl_representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+
+ DCHECK(writer_scoped_access);
+
+ // Create an FBO.
+ GLuint fbo = 0;
+ gl::GLApi* api = gl::g_current_gl_context;
+ api->glGenFramebuffersEXTFn(1, &fbo);
+ api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
+
+ // Attach the texture to FBO.
+ api->glFramebufferTexture2DEXTFn(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ gl_representation->GetTexture()->target(),
+ gl_representation->GetTexture()->service_id(), 0);
+
+ // Set the clear color to green.
+ api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
+ api->glClearFn(GL_COLOR_BUFFER_BIT);
+ gl_representation->GetTexture()->SetLevelCleared(
+ gl_representation->GetTexture()->target(), 0, true);
+
+ // End writing.
+ writer_scoped_access.reset();
+ gl_representation.reset();
+
+ // Read from the backing in a separate thread. Read is done via
+ // SkiaGLRepresentation. ReadPixels() creates/produces a SkiaGLRepresentation
+ // which in turn wraps a GLTextureRepresentation when for GL mode. Hence
+ // testing reading via SkiaGLRepresentation is equivalent to testing via
+ // GLTextureRepresentation.
+ std::vector<uint8_t> dst_pixels;
+
+ // Launch 2nd thread.
+ std::thread second_thread([&]() {
+ // Do ReadPixels() on 2nd SharedContextState |context_state2_|.
+ dst_pixels = ReadPixels(mailbox, size, context_state2_.get(),
+ shared_image_representation_factory_.get());
+ });
+
+ // Wait for this thread to be done.
+ second_thread.join();
+
+ // Compare the pixel values.
+ EXPECT_EQ(dst_pixels[0], 0);
+ EXPECT_EQ(dst_pixels[1], 255);
+ EXPECT_EQ(dst_pixels[2], 0);
+ EXPECT_EQ(dst_pixels[3], 255);
+}
+
+CreateAndValidateSharedImageRepresentations::
+ CreateAndValidateSharedImageRepresentations(
+ SharedImageBackingFactoryGLTexture* backing_factory,
+ viz::ResourceFormat format,
+ bool is_thread_safe,
+ gles2::MailboxManagerImpl* mailbox_manager,
+ SharedImageManager* shared_image_manager,
+ MemoryTypeTracker* memory_type_tracker,
+ SharedImageRepresentationFactory* shared_image_representation_factory,
+ SharedContextState* context_state)
+ : mailbox_manager_(mailbox_manager), size_(256, 256) {
+ // Make the context current.
+ DCHECK(context_state);
+ EXPECT_TRUE(
+ context_state->MakeCurrent(context_state->surface(), true /* needs_gl*/));
+ mailbox_ = Mailbox::GenerateForSharedImage();
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+
+ // SHARED_IMAGE_USAGE_DISPLAY for skia read and SHARED_IMAGE_USAGE_RASTER for
+ // skia write.
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_RASTER;
+ if (!is_thread_safe)
+ usage |= SHARED_IMAGE_USAGE_DISPLAY;
+ backing_ = backing_factory->CreateSharedImage(
+ mailbox_, format, surface_handle, size_, color_space, usage,
+ is_thread_safe);
+
+ // As long as either |chromium_image_ar30| or |chromium_image_ab30| is
+ // enabled, we can create a non-scanout SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102.
+ const bool supports_ar30 =
+ context_state->feature_info()->feature_flags().chromium_image_ar30;
+ const bool supports_ab30 =
+ context_state->feature_info()->feature_flags().chromium_image_ab30;
+ if ((format == viz::ResourceFormat::BGRA_1010102 ||
+ format == viz::ResourceFormat::RGBA_1010102) &&
+ !supports_ar30 && !supports_ab30) {
+ EXPECT_FALSE(backing_);
+ return;
+ }
+ EXPECT_TRUE(backing_);
+ if (!backing_)
+ return;
+
+ // Check clearing.
+ if (!backing_->IsCleared()) {
+ backing_->SetCleared();
+ EXPECT_TRUE(backing_->IsCleared());
+ }
+
+ GLenum expected_target = GL_TEXTURE_2D;
+ shared_image_ =
+ shared_image_manager->Register(std::move(backing_), memory_type_tracker);
+
+ // Create and validate GLTexture representation.
+ auto gl_representation =
+ shared_image_representation_factory->ProduceGLTexture(mailbox_);
+
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexture()->service_id());
+ EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
+ EXPECT_EQ(size_, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+
+ // Create and Validate Skia Representations.
+ auto skia_representation =
+ shared_image_representation_factory->ProduceSkia(mailbox_, context_state);
+ EXPECT_TRUE(skia_representation);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
+ scoped_write_access;
+ scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ // We use |supports_ar30| and |supports_ab30| to detect RGB10A2/BGR10A2
+ // support. It's possible Skia might support these formats even if the Chrome
+ // feature flags are false. We just check here that the feature flags don't
+ // allow Chrome to do something that Skia doesn't support.
+ if ((format != viz::ResourceFormat::BGRA_1010102 || supports_ar30) &&
+ (format != viz::ResourceFormat::RGBA_1010102 || supports_ab30)) {
+ EXPECT_TRUE(scoped_write_access);
+ if (!scoped_write_access)
+ return;
+ auto* surface = scoped_write_access->surface();
+ EXPECT_TRUE(surface);
+ if (!surface)
+ return;
+ EXPECT_EQ(size_.width(), surface->width());
+ EXPECT_EQ(size_.height(), surface->height());
+ }
+ EXPECT_TRUE(begin_semaphores.empty());
+ EXPECT_TRUE(end_semaphores.empty());
+ scoped_write_access.reset();
+
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access;
+ scoped_read_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+ auto* promise_texture = scoped_read_access->promise_image_texture();
+ EXPECT_TRUE(promise_texture);
+ EXPECT_TRUE(begin_semaphores.empty());
+ EXPECT_TRUE(end_semaphores.empty());
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size_.width(), backend_texture.width());
+ EXPECT_EQ(size_.height(), backend_texture.height());
+ scoped_read_access.reset();
+ skia_representation.reset();
+}
+
+CreateAndValidateSharedImageRepresentations::
+ ~CreateAndValidateSharedImageRepresentations() {
+ shared_image_.reset();
+ EXPECT_FALSE(mailbox_manager_->ConsumeTexture(mailbox_));
+}
+
+#if !defined(OS_ANDROID)
+const auto kResourceFormats =
+ ::testing::Values(viz::ResourceFormat::RGBA_8888,
+ viz::ResourceFormat::BGRA_1010102,
+ viz::ResourceFormat::RGBA_1010102);
+#else
+// High bit depth rendering is not supported on Android.
+const auto kResourceFormats = ::testing::Values(viz::ResourceFormat::RGBA_8888);
+#endif
+
+std::string TestParamToString(
+ const testing::TestParamInfo<std::tuple<bool, viz::ResourceFormat>>&
+ param_info) {
+ const bool allow_passthrough = std::get<0>(param_info.param);
+ const viz::ResourceFormat format = std::get<1>(param_info.param);
+ return base::StringPrintf(
+ "%s_%s", (allow_passthrough ? "AllowPassthrough" : "DisallowPassthrough"),
+ gfx::BufferFormatToString(viz::BufferFormat(format)));
+}
+
INSTANTIATE_TEST_SUITE_P(Service,
SharedImageBackingFactoryGLTextureTest,
- ::testing::Bool());
+ ::testing::Combine(::testing::Bool(),
+ kResourceFormats),
+ TestParamToString);
+INSTANTIATE_TEST_SUITE_P(Service,
+ SharedImageBackingFactoryGLTextureThreadSafeTest,
+ ::testing::Combine(::testing::Bool(),
+ kResourceFormats),
+ TestParamToString);
INSTANTIATE_TEST_SUITE_P(Service,
SharedImageBackingFactoryGLTextureWithGMBTest,
- ::testing::Bool());
+ ::testing::Combine(::testing::Bool(),
+ kResourceFormats),
+ TestParamToString);
} // anonymous namespace
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
index c59b4c91f39..2fb768ad10e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
@@ -38,6 +38,7 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryIOSurface
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
index 0f51e0b817d..dbab51ac01e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
@@ -68,7 +68,7 @@ GLFormatInfo GetGLFormatInfo(viz::ResourceFormat format) {
// Technically we should use GL_RGB but CGLTexImageIOSurface2D() (and
// OpenGL ES 3.0, for the case) support only GL_RGBA (the hardware ignores
// the alpha channel anyway), see https://crbug.com/797347.
- case viz::BGRX_1010102:
+ case viz::BGRA_1010102:
info.format = GL_RGBA;
info.internal_format = GL_RGBA;
break;
@@ -88,22 +88,6 @@ void FlushIOSurfaceGLOperations() {
api->glFlushFn();
}
-base::Optional<WGPUTextureFormat> GetWGPUFormat(viz::ResourceFormat format) {
- switch (format) {
- case viz::RED_8:
- case viz::ALPHA_8:
- case viz::LUMINANCE_8:
- return WGPUTextureFormat_R8Unorm;
- case viz::RG_88:
- return WGPUTextureFormat_RG8Unorm;
- case viz::RGBA_8888:
- case viz::BGRA_8888:
- return WGPUTextureFormat_BGRA8Unorm;
- default:
- return {};
- }
-}
-
base::Optional<WGPUTextureFormat> GetWGPUFormat(gfx::BufferFormat format) {
switch (format) {
case gfx::BufferFormat::R_8:
@@ -297,31 +281,28 @@ class SharedImageRepresentationDawnIOSurface
}
WGPUTexture BeginAccess(WGPUTextureUsage usage) final {
- WGPUTextureDescriptor desc;
- desc.nextInChain = nullptr;
- desc.format = wgpu_format_;
- desc.usage = usage;
- desc.dimension = WGPUTextureDimension_2D;
- desc.size = {size().width(), size().height(), 1};
- desc.arrayLayerCount = 1;
- desc.mipLevelCount = 1;
- desc.sampleCount = 1;
-
- texture_ =
- dawn_native::metal::WrapIOSurface(device_, &desc, io_surface_.get(), 0);
+ WGPUTextureDescriptor texture_descriptor;
+ texture_descriptor.nextInChain = nullptr;
+ texture_descriptor.format = wgpu_format_;
+ texture_descriptor.usage = usage;
+ texture_descriptor.dimension = WGPUTextureDimension_2D;
+ texture_descriptor.size = {size().width(), size().height(), 1};
+ texture_descriptor.arrayLayerCount = 1;
+ texture_descriptor.mipLevelCount = 1;
+ texture_descriptor.sampleCount = 1;
+
+ dawn_native::metal::ExternalImageDescriptorIOSurface descriptor;
+ descriptor.cTextureDescriptor = &texture_descriptor;
+ descriptor.isCleared = IsCleared();
+ descriptor.ioSurface = io_surface_.get();
+ descriptor.plane = 0;
+
+ texture_ = dawn_native::metal::WrapIOSurface(device_, &descriptor);
if (texture_) {
// Keep a reference to the texture so that it stays valid (its content
// might be destroyed).
dawn_procs_.textureReference(texture_);
-
- // Assume that the user of this representation will write to the texture
- // so set the cleared flag so that other representations don't overwrite
- // the result.
- // TODO(cwallez@chromium.org): This is incorrect and allows reading
- // uninitialized data. When !IsCleared we should tell dawn_native to
- // consider the texture lazy-cleared.
- SetCleared();
}
return texture_;
@@ -331,8 +312,10 @@ class SharedImageRepresentationDawnIOSurface
if (!texture_) {
return;
}
- // TODO(cwallez@chromium.org): query dawn_native to know if the texture was
- // cleared and set IsCleared appropriately.
+
+ if (dawn_native::IsTextureSubresourceInitialized(texture_, 0, 1, 0, 1)) {
+ SetCleared();
+ }
// All further operations on the textures are errors (they would be racy
// with other backings).
@@ -370,7 +353,7 @@ class SharedImageRepresentationDawnIOSurface
// guarded on the context provider already successfully using Metal.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability"
-class SharedImageBackingIOSurface : public SharedImageBacking {
+class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
public:
SharedImageBackingIOSurface(const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -380,26 +363,50 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
base::ScopedCFTypeRef<IOSurfaceRef> io_surface,
base::Optional<WGPUTextureFormat> dawn_format,
size_t estimated_size)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- estimated_size,
- false /* is_thread_safe */),
+ : ClearTrackingSharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size,
+ false /* is_thread_safe */),
io_surface_(std::move(io_surface)),
dawn_format_(dawn_format) {
DCHECK(io_surface_);
}
- ~SharedImageBackingIOSurface() final { DCHECK(!io_surface_); }
+ ~SharedImageBackingIOSurface() final {
+ TRACE_EVENT0("gpu", "SharedImageBackingFactoryIOSurface::Destroy");
+ DCHECK(io_surface_);
- bool IsCleared() const final { return is_cleared_; }
- void SetCleared() final {
if (legacy_texture_) {
- legacy_texture_->SetLevelCleared(legacy_texture_->target(), 0, true);
+ legacy_texture_->RemoveLightweightRef(have_context());
+ legacy_texture_ = nullptr;
}
+ mtl_texture_.reset();
+ io_surface_.reset();
+ }
+
+ gfx::Rect ClearedRect() const final {
+ // If a |legacy_texture_| exists, defer to that. Once created,
+ // |legacy_texture_| is never destroyed, so no need to synchronize with
+ // ClearedRect.
+ if (legacy_texture_) {
+ return legacy_texture_->GetLevelClearedRect(legacy_texture_->target(), 0);
+ } else {
+ return ClearTrackingSharedImageBacking::ClearedRect();
+ }
+ }
- is_cleared_ = true;
+ void SetClearedRect(const gfx::Rect& cleared_rect) final {
+ // If a |legacy_texture_| exists, defer to that. Once created,
+ // |legacy_texture_| is never destroyed, so no need to synchronize with
+ // SetClearedRect.
+ if (legacy_texture_) {
+ legacy_texture_->SetLevelClearedRect(legacy_texture_->target(), 0,
+ cleared_rect);
+ } else {
+ ClearTrackingSharedImageBacking::SetClearedRect(cleared_rect);
+ }
}
void Update(std::unique_ptr<gfx::GpuFence> in_fence) final {}
@@ -412,20 +419,14 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
return false;
}
+ // Make sure our |legacy_texture_| has the right initial cleared rect.
+ legacy_texture_->SetLevelClearedRect(
+ legacy_texture_->target(), 0,
+ ClearTrackingSharedImageBacking::ClearedRect());
+
mailbox_manager->ProduceTexture(mailbox(), legacy_texture_);
return true;
}
- void Destroy() final {
- TRACE_EVENT0("gpu", "SharedImageBackingFactoryIOSurface::Destroy");
- DCHECK(io_surface_);
-
- if (legacy_texture_) {
- legacy_texture_->RemoveLightweightRef(have_context());
- legacy_texture_ = nullptr;
- }
- mtl_texture_.reset();
- io_surface_.reset();
- }
protected:
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
@@ -534,10 +535,7 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
}
// If the backing is already cleared, no need to clear it again.
- gfx::Rect cleared_rect;
- if (is_cleared_) {
- cleared_rect = gfx::Rect(size());
- }
+ gfx::Rect cleared_rect = ClearedRect();
// Manually create a gles2::Texture wrapping our driver texture.
gles2::Texture* texture = new gles2::Texture(service_id);
@@ -563,7 +561,6 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
base::ScopedCFTypeRef<IOSurfaceRef> io_surface_;
base::Optional<WGPUTextureFormat> dawn_format_;
base::scoped_nsprotocol<id<MTLTexture>> mtl_texture_;
- bool is_cleared_ = false;
// A texture for the associated legacy mailbox.
gles2::Texture* legacy_texture_ = nullptr;
@@ -613,6 +610,7 @@ std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryIOSurface::CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
@@ -643,9 +641,18 @@ SharedImageBackingFactoryIOSurface::CreateSharedImage(
gfx::IOSurfaceSetColorSpace(io_surface, color_space);
+ // OpenGL textures bound to IOSurfaces won't work on macOS unless the internal
+ // format is BGRA, so force a BGRA internal format for viz::RGBA_8888.
+ base::Optional<WGPUTextureFormat> wgpu_format =
+ format == viz::RGBA_8888 ? WGPUTextureFormat_BGRA8Unorm
+ : viz::ToWGPUFormat(format);
+ if (wgpu_format.value() == WGPUTextureFormat_Undefined) {
+ wgpu_format = base::nullopt;
+ }
+
return std::make_unique<SharedImageBackingIOSurface>(
mailbox, format, size, color_space, usage, std::move(io_surface),
- GetWGPUFormat(format), estimated_size);
+ wgpu_format, estimated_size);
}
std::unique_ptr<SharedImageBacking>
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
index 6a8ccda5171..a9e0e608e95 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
@@ -53,7 +53,7 @@ class SharedImageBackingFactoryIOSurfaceTest : public testing::Test {
context_state_ = base::MakeRefCounted<SharedContextState>(
std::move(share_group), surface_, context_,
false /* use_virtualized_gl_contexts */, base::DoNothing());
- context_state_->InitializeGrContext(workarounds, nullptr);
+ context_state_->InitializeGrContext(GpuPreferences(), workarounds, nullptr);
auto feature_info =
base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
context_state_->InitializeGL(GpuPreferences(), std::move(feature_info));
@@ -79,6 +79,52 @@ class SharedImageBackingFactoryIOSurfaceTest : public testing::Test {
std::unique_ptr<MemoryTypeTracker> memory_type_tracker_;
std::unique_ptr<SharedImageRepresentationFactory>
shared_image_representation_factory_;
+
+ void CheckSkiaPixels(const Mailbox& mailbox,
+ const gfx::Size& size,
+ const std::vector<uint8_t> expected_color) {
+ auto skia_representation =
+ shared_image_representation_factory_->ProduceSkia(mailbox,
+ context_state_);
+ ASSERT_NE(skia_representation, nullptr);
+
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access =
+ skia_representation->BeginScopedReadAccess(nullptr, nullptr);
+ EXPECT_TRUE(scoped_read_access);
+
+ auto* promise_texture = scoped_read_access->promise_image_texture();
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+
+ // Create an Sk Image from GrBackendTexture.
+ auto sk_image = SkImage::MakeFromTexture(
+ gr_context(), backend_texture, kTopLeft_GrSurfaceOrigin,
+ kRGBA_8888_SkColorType, kOpaque_SkAlphaType, nullptr);
+
+ const SkImageInfo dst_info =
+ SkImageInfo::Make(size.width(), size.height(), kRGBA_8888_SkColorType,
+ kOpaque_SkAlphaType, nullptr);
+
+ const int num_pixels = size.width() * size.height();
+ std::vector<uint8_t> dst_pixels(num_pixels * 4);
+
+ // Read back pixels from Sk Image.
+ EXPECT_TRUE(sk_image->readPixels(dst_info, dst_pixels.data(),
+ dst_info.minRowBytes(), 0, 0));
+
+ for (int i = 0; i < num_pixels; i++) {
+ // Compare the pixel values.
+ const uint8_t* pixel = dst_pixels.data() + (i * 4);
+ EXPECT_EQ(pixel[0], expected_color[0]);
+ EXPECT_EQ(pixel[1], expected_color[1]);
+ EXPECT_EQ(pixel[2], expected_color[2]);
+ EXPECT_EQ(pixel[3], expected_color[3]);
+ }
+ }
};
// Basic test to check creation and deletion of IOSurface backed shared image.
@@ -87,10 +133,12 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Basic) {
viz::ResourceFormat format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
gfx::ColorSpace color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_TRUE(backing);
// Check clearing.
@@ -139,11 +187,12 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Basic) {
EXPECT_TRUE(skia_representation);
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
- base::Optional<SharedImageRepresentationSkia::ScopedWriteAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access;
- scoped_write_access.emplace(skia_representation.get(), &begin_semaphores,
- &end_semaphores);
+ scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
auto* surface = scoped_write_access->surface();
EXPECT_TRUE(surface);
EXPECT_EQ(size.width(), surface->width());
@@ -152,9 +201,10 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Basic) {
EXPECT_TRUE(end_semaphores.empty());
scoped_write_access.reset();
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
scoped_read_access;
- scoped_read_access.emplace(skia_representation.get(), nullptr, nullptr);
+ scoped_read_access =
+ skia_representation->BeginScopedReadAccess(nullptr, nullptr);
auto* promise_texture = scoped_read_access->promise_image_texture();
EXPECT_TRUE(promise_texture);
GrBackendTexture backend_texture = promise_texture->backendTexture();
@@ -177,9 +227,11 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, GL_SkiaGL) {
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(1, 1);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_TRUE(backing);
GLenum expected_target = GL_TEXTURE_RECTANGLE;
@@ -188,68 +240,101 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, GL_SkiaGL) {
memory_type_tracker_.get());
// Create a SharedImageRepresentationGLTexture.
- auto gl_representation =
- shared_image_representation_factory_->ProduceGLTexture(mailbox);
- EXPECT_TRUE(gl_representation);
- EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
-
- // Create an FBO.
- GLuint fbo = 0;
- gl::GLApi* api = gl::g_current_gl_context;
- api->glGenFramebuffersEXTFn(1, &fbo);
- api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
-
- // Attach the texture to FBO.
- api->glFramebufferTexture2DEXTFn(
- GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- gl_representation->GetTexture()->target(),
- gl_representation->GetTexture()->service_id(), 0);
-
- // Set the clear color to green.
- api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
- api->glClearFn(GL_COLOR_BUFFER_BIT);
- gl_representation.reset();
-
- // Next create a SharedImageRepresentationSkia to read back the texture data.
- auto skia_representation = shared_image_representation_factory_->ProduceSkia(
- mailbox, context_state_);
- EXPECT_TRUE(skia_representation);
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
- scoped_read_access;
- scoped_read_access.emplace(skia_representation.get(), nullptr, nullptr);
- auto* promise_texture = scoped_read_access->promise_image_texture();
- EXPECT_TRUE(promise_texture);
- GrBackendTexture backend_texture = promise_texture->backendTexture();
- EXPECT_TRUE(backend_texture.isValid());
- EXPECT_EQ(size.width(), backend_texture.width());
- EXPECT_EQ(size.height(), backend_texture.height());
-
- // Create an Sk Image from GrBackendTexture.
- auto sk_image = SkImage::MakeFromTexture(
- gr_context(), promise_texture->backendTexture(), kTopLeft_GrSurfaceOrigin,
- kRGBA_8888_SkColorType, kOpaque_SkAlphaType, nullptr);
-
- SkImageInfo dst_info =
- SkImageInfo::Make(size.width(), size.height(), kRGBA_8888_SkColorType,
- kOpaque_SkAlphaType, nullptr);
+ {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
+
+ // Access the SharedImageRepresentationGLTexutre
+ auto scoped_write_access = gl_representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+
+ // Create an FBO.
+ GLuint fbo = 0;
+ gl::GLApi* api = gl::g_current_gl_context;
+ api->glGenFramebuffersEXTFn(1, &fbo);
+ api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
+
+ // Attach the texture to FBO.
+ api->glFramebufferTexture2DEXTFn(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ gl_representation->GetTexture()->target(),
+ gl_representation->GetTexture()->service_id(), 0);
+
+ // Set the clear color to green.
+ api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
+ api->glClearFn(GL_COLOR_BUFFER_BIT);
+
+ gl_representation->GetTexture()->SetLevelCleared(
+ gl_representation->GetTexture()->target(), 0, true);
+ }
- const int num_pixels = size.width() * size.height();
- std::unique_ptr<uint8_t[]> dst_pixels(new uint8_t[num_pixels * 4]());
+ CheckSkiaPixels(mailbox, size, {0, 255, 0, 255});
+ factory_ref.reset();
+ EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+}
- // Read back pixels from Sk Image.
- EXPECT_TRUE(sk_image->readPixels(dst_info, dst_pixels.get(),
- dst_info.minRowBytes(), 0, 0));
- scoped_read_access.reset();
+// Test which ensures that legacy texture clear status is kept in sync with the
+// SharedImageBacking.
+TEST_F(SharedImageBackingFactoryIOSurfaceTest, LegacyClearing) {
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
+ viz::ResourceFormat format = viz::ResourceFormat::RGBA_8888;
+ gfx::Size size(256, 256);
+ gfx::ColorSpace color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
- // Compare the pixel values.
- EXPECT_EQ(dst_pixels[0], 0);
- EXPECT_EQ(dst_pixels[1], 255);
- EXPECT_EQ(dst_pixels[2], 0);
- EXPECT_EQ(dst_pixels[3], 255);
+ // Create a backing.
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+ EXPECT_TRUE(backing);
+ backing->SetCleared();
+ EXPECT_TRUE(backing->IsCleared());
- skia_representation.reset();
- factory_ref.reset();
- EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+ // Also create a legacy mailbox.
+ EXPECT_TRUE(backing->ProduceLegacyMailbox(&mailbox_manager_));
+ TextureBase* texture_base = mailbox_manager_.ConsumeTexture(mailbox);
+ auto* texture = gles2::Texture::CheckedCast(texture_base);
+ EXPECT_TRUE(texture);
+ GLenum target = texture->target();
+
+ // Check initial state.
+ EXPECT_TRUE(texture->IsLevelCleared(target, 0));
+ EXPECT_TRUE(backing->IsCleared());
+
+ // Un-clear the representation.
+ backing->SetClearedRect(gfx::Rect());
+ EXPECT_FALSE(texture->IsLevelCleared(target, 0));
+ EXPECT_FALSE(backing->IsCleared());
+
+ // Partially clear the representation.
+ gfx::Rect partial_clear_rect(0, 0, 128, 128);
+ backing->SetClearedRect(partial_clear_rect);
+ EXPECT_EQ(partial_clear_rect, texture->GetLevelClearedRect(target, 0));
+ EXPECT_EQ(partial_clear_rect, backing->ClearedRect());
+
+ // Fully clear the representation.
+ backing->SetCleared();
+ EXPECT_TRUE(texture->IsLevelCleared(target, 0));
+ EXPECT_TRUE(backing->IsCleared());
+
+ // Un-clear the texture.
+ texture->SetLevelClearedRect(target, 0, gfx::Rect());
+ EXPECT_FALSE(texture->IsLevelCleared(target, 0));
+ EXPECT_FALSE(backing->IsCleared());
+
+ // Partially clear the texture.
+ texture->SetLevelClearedRect(target, 0, partial_clear_rect);
+ EXPECT_EQ(partial_clear_rect, texture->GetLevelClearedRect(target, 0));
+ EXPECT_EQ(partial_clear_rect, backing->ClearedRect());
+
+ // Fully clear the representation.
+ texture->SetLevelCleared(target, 0, true);
+ EXPECT_TRUE(texture->IsLevelCleared(target, 0));
+ EXPECT_TRUE(backing->IsCleared());
}
#if BUILDFLAG(USE_DAWN)
@@ -275,9 +360,11 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) {
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(1, 1);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_WEBGPU | SHARED_IMAGE_USAGE_DISPLAY;
auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, false /* is_thread_safe */);
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
EXPECT_TRUE(backing);
std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
@@ -291,8 +378,11 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) {
// Clear the shared image to green using Dawn.
{
- wgpu::Texture texture = wgpu::Texture::Acquire(
- dawn_representation->BeginAccess(WGPUTextureUsage_OutputAttachment));
+ auto scoped_access = dawn_representation->BeginScopedAccess(
+ WGPUTextureUsage_OutputAttachment,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ ASSERT_TRUE(scoped_access);
+ wgpu::Texture texture = wgpu::Texture::Acquire(scoped_access->texture());
wgpu::RenderPassColorAttachmentDescriptor color_desc;
color_desc.attachment = texture.CreateView();
@@ -315,54 +405,248 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) {
queue.Submit(1, &commands);
}
- dawn_representation->EndAccess();
+ CheckSkiaPixels(mailbox, size, {0, 255, 0, 255});
- // Next create a SharedImageRepresentationSkia to read back the texture data.
- auto skia_representation = shared_image_representation_factory_->ProduceSkia(
- mailbox, context_state_);
- EXPECT_TRUE(skia_representation);
- base::Optional<SharedImageRepresentationSkia::ScopedReadAccess>
- scoped_read_access;
- scoped_read_access.emplace(skia_representation.get(), nullptr, nullptr);
- auto* promise_texture = scoped_read_access->promise_image_texture();
- EXPECT_TRUE(promise_texture);
- GrBackendTexture backend_texture = promise_texture->backendTexture();
- EXPECT_TRUE(backend_texture.isValid());
- EXPECT_EQ(size.width(), backend_texture.width());
- EXPECT_EQ(size.height(), backend_texture.height());
+ // Shut down Dawn
+ device = wgpu::Device();
+ dawnProcSetProcs(nullptr);
- // Create an Sk Image from GrBackendTexture.
- auto sk_image = SkImage::MakeFromTexture(
- gr_context(), promise_texture->backendTexture(), kTopLeft_GrSurfaceOrigin,
- kRGBA_8888_SkColorType, kOpaque_SkAlphaType, nullptr);
+ factory_ref.reset();
+ EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+}
+
+// 1. Draw a color to texture through GL
+// 2. Do not call SetCleared so we can test Dawn Lazy clear
+// 3. Begin render pass in Dawn, but do not do anything
+// 4. Verify through CheckSkiaPixel that GL drawn color not seen
+TEST_F(SharedImageBackingFactoryIOSurfaceTest, GL_Dawn_Skia_UnclearTexture) {
+ // Create a backing using mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ const auto format = viz::ResourceFormat::RGBA_8888;
+ const gfx::Size size(1, 1);
+ const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ const uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY |
+ SHARED_IMAGE_USAGE_WEBGPU;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+ EXPECT_TRUE(backing);
- SkImageInfo dst_info =
- SkImageInfo::Make(size.width(), size.height(), kRGBA_8888_SkColorType,
- kOpaque_SkAlphaType, nullptr);
+ GLenum expected_target = GL_TEXTURE_RECTANGLE;
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
- const int num_pixels = size.width() * size.height();
- std::unique_ptr<uint8_t[]> dst_pixels(new uint8_t[num_pixels * 4]());
+ {
+ // Create a SharedImageRepresentationGLTexture.
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
+
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough::ScopedAccess>
+ gl_scoped_access = gl_representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(gl_scoped_access);
+
+ // Create an FBO.
+ GLuint fbo = 0;
+ gl::GLApi* api = gl::g_current_gl_context;
+ api->glGenFramebuffersEXTFn(1, &fbo);
+ api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
+
+ // Attach the texture to FBO.
+ api->glFramebufferTexture2DEXTFn(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ gl_representation->GetTexture()->target(),
+ gl_representation->GetTexture()->service_id(), 0);
+
+ // Set the clear color to green.
+ api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
+ api->glClearFn(GL_COLOR_BUFFER_BIT);
+
+ // Don't set cleared, we want to see if Dawn will lazy clear the texture
+ EXPECT_FALSE(factory_ref->IsCleared());
+ }
- // Read back pixels from Sk Image.
- EXPECT_TRUE(sk_image->readPixels(dst_info, dst_pixels.get(),
- dst_info.minRowBytes(), 0, 0));
- scoped_read_access.reset();
+ // Create a Dawn Metal device
+ dawn_native::Instance instance;
+ instance.DiscoverDefaultAdapters();
- // Compare the pixel values.
- EXPECT_EQ(dst_pixels[0], 0);
- EXPECT_EQ(dst_pixels[1], 255);
- EXPECT_EQ(dst_pixels[2], 0);
- EXPECT_EQ(dst_pixels[3], 255);
+ std::vector<dawn_native::Adapter> adapters = instance.GetAdapters();
+ auto adapter_it = std::find_if(
+ adapters.begin(), adapters.end(), [](dawn_native::Adapter adapter) {
+ return adapter.GetBackendType() == dawn_native::BackendType::Metal;
+ });
+ ASSERT_NE(adapter_it, adapters.end());
+
+ wgpu::Device device = wgpu::Device::Acquire(adapter_it->CreateDevice());
+ DawnProcTable procs = dawn_native::GetProcs();
+ dawnProcSetProcs(&procs);
+ {
+ auto dawn_representation =
+ shared_image_representation_factory_->ProduceDawn(mailbox,
+ device.Get());
+ ASSERT_TRUE(dawn_representation);
+
+ auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
+ WGPUTextureUsage_OutputAttachment,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ ASSERT_TRUE(dawn_scoped_access);
+
+ wgpu::Texture texture =
+ wgpu::Texture::Acquire(dawn_scoped_access->texture());
+ wgpu::RenderPassColorAttachmentDescriptor color_desc;
+ color_desc.attachment = texture.CreateView();
+ color_desc.resolveTarget = nullptr;
+ color_desc.loadOp = wgpu::LoadOp::Load;
+ color_desc.storeOp = wgpu::StoreOp::Store;
+
+ wgpu::RenderPassDescriptor renderPassDesc;
+ renderPassDesc.colorAttachmentCount = 1;
+ renderPassDesc.colorAttachments = &color_desc;
+ renderPassDesc.depthStencilAttachment = nullptr;
+
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+ wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+ pass.EndPass();
+ wgpu::CommandBuffer commands = encoder.Finish();
+
+ wgpu::Queue queue = device.CreateQueue();
+ queue.Submit(1, &commands);
+ }
+
+ // Check skia pixels returns black since texture was lazy cleared in Dawn
+ EXPECT_TRUE(factory_ref->IsCleared());
+ CheckSkiaPixels(mailbox, size, {0, 0, 0, 0});
// Shut down Dawn
device = wgpu::Device();
dawnProcSetProcs(nullptr);
- skia_representation.reset();
factory_ref.reset();
- EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+}
+
+// 1. Draw a color to texture through Dawn
+// 2. Set the renderpass storeOp = Clear
+// 3. Texture in Dawn will stay as uninitialized
+// 3. Expect skia to fail to access the texture because texture is not
+// initialized
+TEST_F(SharedImageBackingFactoryIOSurfaceTest, UnclearDawn_SkiaFails) {
+ // Create a backing using mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ const auto format = viz::ResourceFormat::RGBA_8888;
+ const gfx::Size size(1, 1);
+ const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY |
+ SHARED_IMAGE_USAGE_WEBGPU;
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+ ASSERT_NE(backing, nullptr);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ // Create dawn device
+ dawn_native::Instance instance;
+ instance.DiscoverDefaultAdapters();
+
+ std::vector<dawn_native::Adapter> adapters = instance.GetAdapters();
+ auto adapter_it = std::find_if(
+ adapters.begin(), adapters.end(), [](dawn_native::Adapter adapter) {
+ return adapter.GetBackendType() == dawn_native::BackendType::Metal;
+ });
+ ASSERT_NE(adapter_it, adapters.end());
+
+ wgpu::Device device = wgpu::Device::Acquire(adapter_it->CreateDevice());
+ DawnProcTable procs = dawn_native::GetProcs();
+ dawnProcSetProcs(&procs);
+ {
+ auto dawn_representation =
+ shared_image_representation_factory_->ProduceDawn(mailbox,
+ device.Get());
+ ASSERT_TRUE(dawn_representation);
+
+ auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
+ WGPUTextureUsage_OutputAttachment,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ ASSERT_TRUE(dawn_scoped_access);
+
+ wgpu::Texture texture =
+ wgpu::Texture::Acquire(dawn_scoped_access->texture());
+ wgpu::RenderPassColorAttachmentDescriptor color_desc;
+ color_desc.attachment = texture.CreateView();
+ color_desc.resolveTarget = nullptr;
+ color_desc.loadOp = wgpu::LoadOp::Clear;
+ color_desc.storeOp = wgpu::StoreOp::Clear;
+ color_desc.clearColor = {0, 255, 0, 255};
+
+ wgpu::RenderPassDescriptor renderPassDesc;
+ renderPassDesc.colorAttachmentCount = 1;
+ renderPassDesc.colorAttachments = &color_desc;
+ renderPassDesc.depthStencilAttachment = nullptr;
+
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+ wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+ pass.EndPass();
+ wgpu::CommandBuffer commands = encoder.Finish();
+
+ wgpu::Queue queue = device.CreateQueue();
+ queue.Submit(1, &commands);
+ }
+
+ // Shut down Dawn
+ device = wgpu::Device();
+ dawnProcSetProcs(nullptr);
+
+ EXPECT_FALSE(factory_ref->IsCleared());
+
+ // Produce skia representation
+ auto skia_representation = shared_image_representation_factory_->ProduceSkia(
+ mailbox, context_state_);
+ ASSERT_NE(skia_representation, nullptr);
+
+ // Expect BeginScopedReadAccess to fail because sharedImage is uninitialized
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access =
+ skia_representation->BeginScopedReadAccess(nullptr, nullptr);
+ EXPECT_EQ(scoped_read_access, nullptr);
}
#endif // BUILDFLAG(USE_DAWN)
+// Test that Skia trying to access uninitialized SharedImage will fail
+TEST_F(SharedImageBackingFactoryIOSurfaceTest, SkiaAccessFirstFails) {
+ // Create a mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ const auto format = viz::ResourceFormat::RGBA_8888;
+ const gfx::Size size(1, 1);
+ const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY;
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, usage,
+ false /* is_thread_safe */);
+ ASSERT_NE(backing, nullptr);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ auto skia_representation = shared_image_representation_factory_->ProduceSkia(
+ mailbox, context_state_);
+ ASSERT_NE(skia_representation, nullptr);
+ EXPECT_FALSE(skia_representation->IsCleared());
+
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access =
+ skia_representation->BeginScopedReadAccess(nullptr, nullptr);
+ // Expect BeginScopedReadAccess to fail because sharedImage is uninitialized
+ EXPECT_EQ(scoped_read_access, nullptr);
+}
} // anonymous namespace
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.cc
new file mode 100644
index 00000000000..f91847ed2b2
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.cc
@@ -0,0 +1,74 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_factory_ozone.h"
+
+#include <dawn/dawn_proc_table.h>
+#include <dawn_native/DawnNative.h>
+
+#include "base/logging.h"
+#include "gpu/command_buffer/service/shared_image_backing_ozone.h"
+#include "ui/gl/buildflags.h"
+
+namespace gpu {
+
+SharedImageBackingFactoryOzone::SharedImageBackingFactoryOzone(
+ SharedContextState* shared_context_state)
+ : shared_context_state_(shared_context_state) {
+#if BUILDFLAG(USE_DAWN)
+ dawn_procs_ = base::MakeRefCounted<base::RefCountedData<DawnProcTable>>(
+ dawn_native::GetProcs());
+#endif // BUILDFLAG(USE_DAWN)
+}
+
+SharedImageBackingFactoryOzone::~SharedImageBackingFactoryOzone() = default;
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryOzone::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ bool is_thread_safe) {
+ DCHECK(!is_thread_safe);
+ return SharedImageBackingOzone::Create(dawn_procs_, shared_context_state_,
+ mailbox, format, size, color_space,
+ usage, surface_handle);
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryOzone::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ NOTIMPLEMENTED_LOG_ONCE();
+ return nullptr;
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryOzone::CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat buffer_format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ NOTIMPLEMENTED_LOG_ONCE();
+ return nullptr;
+}
+
+bool SharedImageBackingFactoryOzone::CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) {
+ NOTIMPLEMENTED_LOG_ONCE();
+ return false;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.h
new file mode 100644
index 00000000000..f843faa1bfd
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.h
@@ -0,0 +1,66 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_OZONE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_OZONE_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_refptr.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/gpu_gles2_export.h"
+
+struct DawnProcTable;
+
+namespace gpu {
+
+class SharedContextState;
+
+// Implementation of SharedImageBackingFactory that produces NativePixmap
+// backed SharedImages.
+class GPU_GLES2_EXPORT SharedImageBackingFactoryOzone
+ : public SharedImageBackingFactory {
+ public:
+ SharedImageBackingFactoryOzone(SharedContextState* shared_context_state);
+
+ ~SharedImageBackingFactoryOzone() override;
+
+ // SharedImageBackingFactory implementation
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ bool is_thread_safe) override;
+
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+
+ bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) override;
+
+ private:
+ SharedContextState* const shared_context_state_;
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs_ = nullptr;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_OZONE_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc
index 84bf3dbee62..5354bf4a389 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "components/viz/common/resources/resource_format.h"
@@ -22,6 +23,8 @@
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/shared_image_representation_gl_ozone.h"
+#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/buffer_types.h"
@@ -31,9 +34,14 @@
#include "ui/gfx/gpu_fence.h"
#include "ui/gfx/native_pixmap.h"
#include "ui/gfx/native_widget_types.h"
+#include "ui/gl/buildflags.h"
#include "ui/ozone/public/ozone_platform.h"
#include "ui/ozone/public/surface_factory_ozone.h"
+#if BUILDFLAG(USE_DAWN)
+#include "gpu/command_buffer/service/shared_image_representation_dawn_ozone.h"
+#endif // BUILDFLAG(USE_DAWN)
+
namespace gpu {
namespace {
@@ -57,12 +65,14 @@ gfx::BufferUsage GetBufferUsage(uint32_t usage) {
} // namespace
std::unique_ptr<SharedImageBackingOzone> SharedImageBackingOzone::Create(
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs,
SharedContextState* context_state,
const Mailbox& mailbox,
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
- uint32_t usage) {
+ uint32_t usage,
+ SurfaceHandle surface_handle) {
gfx::BufferFormat buffer_format = viz::BufferFormat(format);
gfx::BufferUsage buffer_usage = GetBufferUsage(usage);
VkDevice vk_device = VK_NULL_HANDLE;
@@ -75,35 +85,23 @@ std::unique_ptr<SharedImageBackingOzone> SharedImageBackingOzone::Create(
ui::SurfaceFactoryOzone* surface_factory =
ui::OzonePlatform::GetInstance()->GetSurfaceFactoryOzone();
scoped_refptr<gfx::NativePixmap> pixmap = surface_factory->CreateNativePixmap(
- gfx::kNullAcceleratedWidget, vk_device, size, buffer_format,
- buffer_usage);
+ surface_handle, vk_device, size, buffer_format, buffer_usage);
if (!pixmap) {
return nullptr;
}
- return base::WrapUnique(
- new SharedImageBackingOzone(mailbox, format, size, color_space, usage,
- context_state, std::move(pixmap)));
+ return base::WrapUnique(new SharedImageBackingOzone(
+ mailbox, format, size, color_space, usage, context_state,
+ std::move(pixmap), std::move(dawn_procs)));
}
SharedImageBackingOzone::~SharedImageBackingOzone() = default;
-bool SharedImageBackingOzone::IsCleared() const {
- NOTIMPLEMENTED_LOG_ONCE();
- return false;
-}
-
-void SharedImageBackingOzone::SetCleared() {
- NOTIMPLEMENTED_LOG_ONCE();
-}
-
void SharedImageBackingOzone::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
NOTIMPLEMENTED_LOG_ONCE();
return;
}
-void SharedImageBackingOzone::Destroy() {}
-
bool SharedImageBackingOzone::ProduceLegacyMailbox(
MailboxManager* mailbox_manager) {
NOTREACHED();
@@ -114,15 +112,24 @@ std::unique_ptr<SharedImageRepresentationDawn>
SharedImageBackingOzone::ProduceDawn(SharedImageManager* manager,
MemoryTypeTracker* tracker,
WGPUDevice device) {
- NOTIMPLEMENTED_LOG_ONCE();
+#if BUILDFLAG(USE_DAWN)
+ DCHECK(dawn_procs_);
+ WGPUTextureFormat webgpu_format = viz::ToWGPUFormat(format());
+ if (webgpu_format == WGPUTextureFormat_Undefined) {
+ return nullptr;
+ }
+ return std::make_unique<SharedImageRepresentationDawnOzone>(
+ manager, this, tracker, device, webgpu_format, pixmap_, dawn_procs_);
+#else // !BUILDFLAG(USE_DAWN)
return nullptr;
+#endif
}
std::unique_ptr<SharedImageRepresentationGLTexture>
SharedImageBackingOzone::ProduceGLTexture(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
- NOTIMPLEMENTED_LOG_ONCE();
- return nullptr;
+ return SharedImageRepresentationGLOzone::Create(manager, this, tracker,
+ pixmap_, format());
}
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
@@ -138,6 +145,23 @@ SharedImageBackingOzone::ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
scoped_refptr<SharedContextState> context_state) {
+ if (context_state->GrContextIsGL()) {
+ auto gl_representation = ProduceGLTexture(manager, tracker);
+ if (!gl_representation) {
+ LOG(ERROR) << "SharedImageBackingOzone::ProduceSkia failed to create GL "
+ "representation";
+ return nullptr;
+ }
+ auto skia_representation = SharedImageRepresentationSkiaGL::Create(
+ std::move(gl_representation), std::move(context_state), manager, this,
+ tracker);
+ if (!skia_representation) {
+ LOG(ERROR) << "SharedImageBackingOzone::ProduceSkia failed to create "
+ "Skia representation";
+ return nullptr;
+ }
+ return skia_representation;
+ }
NOTIMPLEMENTED_LOG_ONCE();
return nullptr;
}
@@ -156,14 +180,16 @@ SharedImageBackingOzone::SharedImageBackingOzone(
const gfx::ColorSpace& color_space,
uint32_t usage,
SharedContextState* context_state,
- scoped_refptr<gfx::NativePixmap> pixmap)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- GetPixmapSizeInBytes(*pixmap),
- false),
- pixmap_(std::move(pixmap)) {}
+ scoped_refptr<gfx::NativePixmap> pixmap,
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs)
+ : ClearTrackingSharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ GetPixmapSizeInBytes(*pixmap),
+ false),
+ pixmap_(std::move(pixmap)),
+ dawn_procs_(std::move(dawn_procs)) {}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h
index 08d93456272..4427150af02 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h
@@ -10,6 +10,7 @@
#include <memory>
#include "base/macros.h"
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/common/mailbox.h"
@@ -19,6 +20,7 @@
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/ipc/common/surface_handle.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_fence.h"
@@ -29,22 +31,21 @@ namespace gpu {
// Implementation of SharedImageBacking that uses a NativePixmap created via
// an Ozone surface factory. The memory associated with the pixmap can be
// aliased by both GL and Vulkan for use in rendering or compositing.
-class SharedImageBackingOzone final : public SharedImageBacking {
+class SharedImageBackingOzone final : public ClearTrackingSharedImageBacking {
public:
static std::unique_ptr<SharedImageBackingOzone> Create(
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs,
SharedContextState* context_state,
const Mailbox& mailbox,
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
- uint32_t usage);
+ uint32_t usage,
+ SurfaceHandle surface_handle);
~SharedImageBackingOzone() override;
// gpu::SharedImageBacking:
- bool IsCleared() const override;
- void SetCleared() override;
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
- void Destroy() override;
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
protected:
@@ -67,15 +68,18 @@ class SharedImageBackingOzone final : public SharedImageBacking {
MemoryTypeTracker* tracker) override;
private:
- SharedImageBackingOzone(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- SharedContextState* context_state,
- scoped_refptr<gfx::NativePixmap> pixmap);
+ SharedImageBackingOzone(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ SharedContextState* context_state,
+ scoped_refptr<gfx::NativePixmap> pixmap,
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs);
scoped_refptr<gfx::NativePixmap> pixmap_;
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs_;
DISALLOW_COPY_AND_ASSIGN(SharedImageBackingOzone);
};
diff --git a/chromium/gpu/command_buffer/service/shared_image_batch_access_manager.cc b/chromium/gpu/command_buffer/service/shared_image_batch_access_manager.cc
new file mode 100644
index 00000000000..6b70249884e
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_batch_access_manager.cc
@@ -0,0 +1,80 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_batch_access_manager.h"
+
+#include "gpu/command_buffer/service/shared_image_backing_egl_image.h"
+#include "ui/gl/shared_gl_fence_egl.h"
+
+namespace gpu {
+
+SharedImageBatchAccessManager::SharedImageBatchAccessManager() = default;
+SharedImageBatchAccessManager::~SharedImageBatchAccessManager() = default;
+
+bool SharedImageBatchAccessManager::IsDoingBatchReads() {
+ base::AutoLock lock(lock_);
+
+ auto it = backings_.find(gl::g_current_gl_context);
+ return (it != backings_.end());
+}
+
+void SharedImageBatchAccessManager::RegisterEglBackingForEndReadFence(
+ SharedImageBackingEglImage* egl_backing) {
+ base::AutoLock lock(lock_);
+
+ auto it = backings_.find(gl::g_current_gl_context);
+ DCHECK(it != backings_.end());
+
+ it->second.emplace(egl_backing);
+}
+
+void SharedImageBatchAccessManager::UnregisterEglBacking(
+ SharedImageBackingEglImage* egl_backing) {
+ base::AutoLock lock(lock_);
+
+ // Search this backing on all the contexts since the backing could be
+ // destroyed from any context.
+ for (auto& it : backings_)
+ it.second.erase(egl_backing);
+}
+
+bool SharedImageBatchAccessManager::BeginBatchReadAccess() {
+ base::AutoLock lock(lock_);
+
+ // On a given context, only one batch access should be active. Hence return
+ // false if we already have a context here.
+ if (backings_.find(gl::g_current_gl_context) != backings_.end())
+ return false;
+
+ backings_.emplace(gl::g_current_gl_context, SetOfBackings());
+ return true;
+}
+
+bool SharedImageBatchAccessManager::EndBatchReadAccess() {
+ base::AutoLock lock(lock_);
+
+ // One batch access should be active on this context from the corresponding
+ // BeginBatchReadAccess().
+ auto it = backings_.find(gl::g_current_gl_context);
+ if (it == backings_.end())
+ return false;
+
+ // If there are registered backings, create the egl fence and supply to the
+ // backings.
+ if (!it->second.empty()) {
+ // Create a shared egl fence.
+ auto shared_egl_fence = base::MakeRefCounted<gl::SharedGLFenceEGL>();
+
+ // Pass it to all the registered backings.
+ for (auto* registered_backing : it->second) {
+ registered_backing->SetEndReadFence(shared_egl_fence);
+ }
+ }
+
+ // Remove the entry for this context.
+ backings_.erase(it);
+ return true;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_batch_access_manager.h b/chromium/gpu/command_buffer/service/shared_image_batch_access_manager.h
new file mode 100644
index 00000000000..af26af69860
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_batch_access_manager.h
@@ -0,0 +1,48 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BATCH_ACCESS_MANAGER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BATCH_ACCESS_MANAGER_H_
+
+#include <set>
+
+#include "base/containers/flat_map.h"
+#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
+#include "gpu/gpu_gles2_export.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+
+class SharedImageBackingEglImage;
+class SharedImageManager;
+
+class GPU_GLES2_EXPORT SharedImageBatchAccessManager {
+ public:
+ SharedImageBatchAccessManager();
+ ~SharedImageBatchAccessManager();
+
+ bool IsDoingBatchReads();
+
+ private:
+ friend class SharedImageManager;
+ friend class SharedImageBackingEglImage;
+
+ using SetOfBackings = std::set<SharedImageBackingEglImage*>;
+
+ void RegisterEglBackingForEndReadFence(
+ SharedImageBackingEglImage* egl_backing);
+ void UnregisterEglBacking(SharedImageBackingEglImage* egl_backing);
+ bool BeginBatchReadAccess();
+ bool EndBatchReadAccess();
+
+ base::Lock lock_;
+ base::flat_map<gl::GLApi*, SetOfBackings> backings_ GUARDED_BY(lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageBatchAccessManager);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BATCH_ACCESS_MANAGER_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc
index 8d2c5e131f9..e1848e8fed9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc
@@ -9,6 +9,7 @@
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "build/build_config.h"
+#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
@@ -27,12 +28,17 @@
#include "ui/gl/gl_implementation.h"
#include "ui/gl/trace_util.h"
-#if (defined(USE_X11) || defined(OS_FUCHSIA)) && BUILDFLAG(ENABLE_VULKAN)
+#if (defined(USE_X11) || defined(OS_FUCHSIA) || defined(OS_WIN)) && \
+ BUILDFLAG(ENABLE_VULKAN)
#include "gpu/command_buffer/service/external_vk_image_factory.h"
#elif defined(OS_ANDROID) && BUILDFLAG(ENABLE_VULKAN)
+#include "gpu/command_buffer/service/external_vk_image_factory.h"
#include "gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
#elif defined(OS_MACOSX)
#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h"
+#elif defined(OS_CHROMEOS)
+#include "gpu/command_buffer/service/shared_image_backing_factory_ozone.h"
#endif
#if defined(OS_WIN)
@@ -85,25 +91,46 @@ SharedImageFactory::SharedImageFactory(
bool use_gl = gl::GetGLImplementation() != gl::kGLImplementationNone;
if (use_gl) {
gl_backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>(
- gpu_preferences, workarounds, gpu_feature_info, image_factory);
+ gpu_preferences, workarounds, gpu_feature_info, image_factory,
+ shared_image_manager->batch_access_manager());
}
// For X11
-#if (defined(USE_X11) || defined(OS_FUCHSIA)) && BUILDFLAG(ENABLE_VULKAN)
+#if (defined(USE_X11) || defined(OS_FUCHSIA) || defined(OS_WIN)) && \
+ BUILDFLAG(ENABLE_VULKAN)
if (using_vulkan_) {
interop_backing_factory_ =
std::make_unique<ExternalVkImageFactory>(context_state);
}
#elif defined(OS_ANDROID) && BUILDFLAG(ENABLE_VULKAN)
// For Android
- interop_backing_factory_ = std::make_unique<SharedImageBackingFactoryAHB>(
- workarounds, gpu_feature_info);
+ if (using_vulkan_) {
+ external_vk_image_factory_ =
+ std::make_unique<ExternalVkImageFactory>(context_state);
+ const auto& enabled_extensions = context_state->vk_context_provider()
+ ->GetDeviceQueue()
+ ->enabled_extensions();
+ if (gfx::HasExtension(
+ enabled_extensions,
+ VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) {
+ interop_backing_factory_ = std::make_unique<SharedImageBackingFactoryAHB>(
+ workarounds, gpu_feature_info);
+ }
+ } else {
+ interop_backing_factory_ = std::make_unique<SharedImageBackingFactoryAHB>(
+ workarounds, gpu_feature_info);
+ }
#elif defined(OS_MACOSX)
// OSX
DCHECK(!using_vulkan_);
interop_backing_factory_ =
std::make_unique<SharedImageBackingFactoryIOSurface>(
workarounds, gpu_feature_info, use_gl);
+#elif defined(OS_CHROMEOS)
+ if (context_state && context_state->vk_context_provider()) {
+ interop_backing_factory_ =
+ std::make_unique<SharedImageBackingFactoryOzone>(context_state);
+ }
#else
// Others
if (using_vulkan_)
@@ -120,8 +147,10 @@ SharedImageFactory::SharedImageFactory(
// For Windows
bool use_passthrough = gpu_preferences.use_passthrough_cmd_decoder &&
gles2::PassthroughCommandDecoderSupported();
- interop_backing_factory_ =
- std::make_unique<SharedImageBackingFactoryD3D>(use_passthrough);
+ if (use_passthrough) {
+ // Only supported for passthrough command decoder.
+ interop_backing_factory_ = std::make_unique<SharedImageBackingFactoryD3D>();
+ }
#endif // OS_WIN
#if defined(OS_FUCHSIA)
@@ -137,13 +166,15 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
+ gpu::SurfaceHandle surface_handle,
uint32_t usage) {
bool allow_legacy_mailbox = false;
- auto* factory = GetFactoryByUsage(usage, &allow_legacy_mailbox);
+ auto* factory = GetFactoryByUsage(usage, format, &allow_legacy_mailbox);
if (!factory)
return false;
- auto backing = factory->CreateSharedImage(
- mailbox, format, size, color_space, usage, IsSharedBetweenThreads(usage));
+ auto backing = factory->CreateSharedImage(mailbox, format, surface_handle,
+ size, color_space, usage,
+ IsSharedBetweenThreads(usage));
return RegisterBacking(std::move(backing), allow_legacy_mailbox);
}
@@ -196,7 +227,9 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
// TODO(piman): depending on handle.type, choose platform-specific backing
// factory, e.g. SharedImageBackingFactoryAHB.
bool allow_legacy_mailbox = false;
- auto* factory = GetFactoryByUsage(usage, &allow_legacy_mailbox, handle.type);
+ auto resource_format = viz::GetResourceFormat(format);
+ auto* factory = GetFactoryByUsage(usage, resource_format,
+ &allow_legacy_mailbox, handle.type);
if (!factory)
return false;
auto backing =
@@ -334,12 +367,13 @@ void SharedImageFactory::RegisterSharedImageBackingFactoryForTesting(
bool SharedImageFactory::IsSharedBetweenThreads(uint32_t usage) {
// If |shared_image_manager_| is thread safe, it means the display is running
// on a separate thread (which uses a separate GL context or VkDeviceQueue).
- return shared_image_manager_->is_thread_safe() &&
+ return shared_image_manager_->display_context_on_another_thread() &&
(usage & SHARED_IMAGE_USAGE_DISPLAY);
}
SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
uint32_t usage,
+ viz::ResourceFormat format,
bool* allow_legacy_mailbox,
gfx::GpuMemoryBufferType gmb_type) {
if (backing_factory_for_testing_)
@@ -352,9 +386,10 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
using_metal_ && (usage & SHARED_IMAGE_USAGE_OOP_RASTERIZATION);
bool share_between_threads = IsSharedBetweenThreads(usage);
bool share_between_gl_vulkan = gl_usage && vulkan_usage;
- bool using_interop_factory = share_between_threads ||
- share_between_gl_vulkan || using_dawn ||
- share_between_gl_metal;
+ bool using_interop_factory = share_between_gl_vulkan || using_dawn ||
+ share_between_gl_metal ||
+ (usage & SHARED_IMAGE_USAGE_VIDEO_DECODE) ||
+ (share_between_threads && vulkan_usage);
// TODO(vasilyt): Android required AHB for overlays
// What about other platforms?
@@ -367,9 +402,9 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER |
SHARED_IMAGE_USAGE_OOP_RASTERIZATION |
SHARED_IMAGE_USAGE_DISPLAY;
- bool using_wrapped_sk_image = wrapped_sk_image_factory_ &&
- (usage == kWrappedSkImageUsage) &&
- !using_interop_factory;
+ bool using_wrapped_sk_image =
+ wrapped_sk_image_factory_ && (usage == kWrappedSkImageUsage) &&
+ !using_interop_factory && !share_between_threads;
using_interop_factory |= vulkan_usage && !using_wrapped_sk_image;
if (gmb_type != gfx::EMPTY_BUFFER) {
@@ -389,17 +424,13 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
using_interop_factory |= interop_factory_supports_gmb;
}
- *allow_legacy_mailbox =
- !using_wrapped_sk_image && !using_interop_factory && !using_vulkan_;
+ *allow_legacy_mailbox = !using_wrapped_sk_image && !using_interop_factory &&
+ !using_vulkan_ && !share_between_threads;
if (using_wrapped_sk_image)
return wrapped_sk_image_factory_.get();
if (using_interop_factory) {
- LOG_IF(ERROR, !interop_backing_factory_)
- << "Unable to create SharedImage backing: GL / Vulkan interoperability "
- "is not supported on this platform";
-
// TODO(crbug.com/969114): Not all shared image factory implementations
// support concurrent read/write usage.
if (usage & SHARED_IMAGE_USAGE_CONCURRENT_READ_WRITE) {
@@ -408,7 +439,26 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
return nullptr;
}
+#if defined(OS_ANDROID)
+ // On android, we sometime choose VkImage based backing factory as an
+ // interop if the format is not supported by the AHB backing factory.
+ auto* ahb_backing_factory = static_cast<SharedImageBackingFactoryAHB*>(
+ interop_backing_factory_.get());
+ if (ahb_backing_factory && ahb_backing_factory->IsFormatSupported(format))
+ return ahb_backing_factory;
+ if (share_between_threads) {
+ LOG(FATAL) << "ExternalVkImageFactory currently do not support "
+ "cross-thread usage.";
+ }
+ *allow_legacy_mailbox = false;
+ return external_vk_image_factory_.get();
+#else // defined(OS_ANDROID)
+ LOG_IF(ERROR, !interop_backing_factory_)
+ << "Unable to create SharedImage backing: GL / Vulkan interoperability "
+ "is not supported on this platform";
+
return interop_backing_factory_.get();
+#endif // !defined(OS_ANDROID)
}
return gl_backing_factory_.get();
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h
index 5dc0255ec89..0b91c32da13 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.h
@@ -25,6 +25,7 @@ class VulkanContextProvider;
} // namespace viz
namespace gpu {
+class ExternalVkImageFactory;
class GpuDriverBugWorkarounds;
class ImageFactory;
class MailboxManager;
@@ -62,6 +63,7 @@ class GPU_GLES2_EXPORT SharedImageFactory {
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
+ gpu::SurfaceHandle surface_handle,
uint32_t usage);
bool CreateSharedImage(const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -114,6 +116,7 @@ class GPU_GLES2_EXPORT SharedImageFactory {
bool IsSharedBetweenThreads(uint32_t usage);
SharedImageBackingFactory* GetFactoryByUsage(
uint32_t usage,
+ viz::ResourceFormat format,
bool* allow_legacy_mailbox,
gfx::GpuMemoryBufferType gmb_type = gfx::EMPTY_BUFFER);
MailboxManager* mailbox_manager_;
@@ -136,6 +139,13 @@ class GPU_GLES2_EXPORT SharedImageFactory {
// D3D12.
std::unique_ptr<SharedImageBackingFactory> interop_backing_factory_;
+#if defined(OS_ANDROID)
+ // On android we have two interop factory which is |interop_backing_factory_|
+ // and |external_vk_image_factory_| and we choose one of those
+ // based on the format it supports.
+ std::unique_ptr<ExternalVkImageFactory> external_vk_image_factory_;
+#endif
+
// Non-null if compositing with SkiaRenderer.
std::unique_ptr<raster::WrappedSkImageFactory> wrapped_sk_image_factory_;
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
index 4bac263600c..22a310d87ef 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
@@ -63,9 +63,10 @@ TEST_F(SharedImageFactoryTest, Basic) {
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
- EXPECT_TRUE(
- factory_->CreateSharedImage(mailbox, format, size, color_space, usage));
+ EXPECT_TRUE(factory_->CreateSharedImage(mailbox, format, size, color_space,
+ surface_handle, usage));
TextureBase* texture_base = mailbox_manager_.ConsumeTexture(mailbox);
// Validation of the produced backing/mailbox is handled in individual backing
// factory unittests.
@@ -79,11 +80,12 @@ TEST_F(SharedImageFactoryTest, DuplicateMailbox) {
auto format = viz::ResourceFormat::RGBA_8888;
gfx::Size size(256, 256);
auto color_space = gfx::ColorSpace::CreateSRGB();
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
- EXPECT_TRUE(
- factory_->CreateSharedImage(mailbox, format, size, color_space, usage));
- EXPECT_FALSE(
- factory_->CreateSharedImage(mailbox, format, size, color_space, usage));
+ EXPECT_TRUE(factory_->CreateSharedImage(mailbox, format, size, color_space,
+ surface_handle, usage));
+ EXPECT_FALSE(factory_->CreateSharedImage(mailbox, format, size, color_space,
+ surface_handle, usage));
GpuPreferences preferences;
GpuDriverBugWorkarounds workarounds;
@@ -92,8 +94,8 @@ TEST_F(SharedImageFactoryTest, DuplicateMailbox) {
preferences, workarounds, GpuFeatureInfo(), nullptr, &mailbox_manager_,
&shared_image_manager_, &image_factory_, nullptr,
/*enable_wrapped_sk_image=*/false);
- EXPECT_FALSE(other_factory->CreateSharedImage(mailbox, format, size,
- color_space, usage));
+ EXPECT_FALSE(other_factory->CreateSharedImage(
+ mailbox, format, size, color_space, surface_handle, usage));
}
TEST_F(SharedImageFactoryTest, DestroyInexistentMailbox) {
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc
index 0ce71513416..e4f292e9ccb 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc
@@ -10,6 +10,7 @@
#include <utility>
#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/process_memory_dump.h"
@@ -19,6 +20,10 @@
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "ui/gl/trace_util.h"
+#if defined(OS_ANDROID)
+#include "gpu/command_buffer/service/shared_image_batch_access_manager.h"
+#endif
+
#if DCHECK_IS_ON()
#define CALLED_ON_VALID_THREAD() \
do { \
@@ -46,23 +51,40 @@ bool operator<(const std::unique_ptr<SharedImageBacking>& lhs,
return lhs->mailbox() < rhs;
}
-class SharedImageManager::AutoLock {
+class SCOPED_LOCKABLE SharedImageManager::AutoLock {
public:
explicit AutoLock(SharedImageManager* manager)
- : auto_lock_(manager->is_thread_safe() ? &manager->lock_.value()
- : nullptr) {}
+ EXCLUSIVE_LOCK_FUNCTION(manager->lock_)
+ : start_time_(base::TimeTicks::Now()),
+ auto_lock_(manager->is_thread_safe() ? &manager->lock_.value()
+ : nullptr) {
+ if (manager->is_thread_safe()) {
+ UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
+ "GPU.SharedImageManager.TimeToAcquireLock",
+ base::TimeTicks::Now() - start_time_,
+ base::TimeDelta::FromMicroseconds(1), base::TimeDelta::FromSeconds(1),
+ 50);
+ }
+ }
- ~AutoLock() = default;
+ ~AutoLock() UNLOCK_FUNCTION() = default;
private:
+ base::TimeTicks start_time_;
base::AutoLockMaybe auto_lock_;
DISALLOW_COPY_AND_ASSIGN(AutoLock);
};
-SharedImageManager::SharedImageManager(bool thread_safe) {
+SharedImageManager::SharedImageManager(bool thread_safe,
+ bool display_context_on_another_thread)
+ : display_context_on_another_thread_(display_context_on_another_thread) {
+ DCHECK(!display_context_on_another_thread || thread_safe);
if (thread_safe)
lock_.emplace();
+#if defined(OS_ANDROID)
+ batch_access_manager_ = std::make_unique<SharedImageBatchAccessManager>();
+#endif
CALLED_ON_VALID_THREAD();
}
@@ -86,7 +108,6 @@ SharedImageManager::Register(std::unique_ptr<SharedImageBacking> backing,
(*lower_bound)->mailbox() == backing->mailbox()) {
LOG(ERROR) << "SharedImageManager::Register: Trying to register an "
"already registered mailbox.";
- backing->Destroy();
return nullptr;
}
@@ -314,4 +335,29 @@ void SharedImageManager::OnMemoryDump(const Mailbox& mailbox,
backing->OnMemoryDump(dump_name, dump, pmd, client_tracing_id);
}
+scoped_refptr<gfx::NativePixmap> SharedImageManager::GetNativePixmap(
+ const gpu::Mailbox& mailbox) {
+ AutoLock autolock(this);
+ auto found = images_.find(mailbox);
+ if (found == images_.end())
+ return nullptr;
+ return (*found)->GetNativePixmap();
+}
+
+bool SharedImageManager::BeginBatchReadAccess() {
+#if defined(OS_ANDROID)
+ return batch_access_manager_->BeginBatchReadAccess();
+#else
+ return true;
+#endif
+}
+
+bool SharedImageManager::EndBatchReadAccess() {
+#if defined(OS_ANDROID)
+ return batch_access_manager_->EndBatchReadAccess();
+#else
+ return true;
+#endif
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.h b/chromium/gpu/command_buffer/service/shared_image_manager.h
index 5b34808b75e..34ce9ba3596 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.h
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.h
@@ -10,16 +10,26 @@
#include "base/optional.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/gpu_gles2_export.h"
namespace gpu {
class SharedImageRepresentationFactoryRef;
+class SharedImageBatchAccessManager;
class GPU_GLES2_EXPORT SharedImageManager {
public:
- explicit SharedImageManager(bool thread_safe = false);
+ // If |thread_safe| is set, the manager itself can be safely accessed from
+ // other threads but the backings themselves may not be thread-safe so
+ // representations should not be created on other threads. When
+ // |display_context_on_another_thread| is set, we make sure that all
+ // SharedImages that will be used in the display context have thread-safe
+ // backings and therefore it is safe to create representations on the thread
+ // that holds the display context.
+ explicit SharedImageManager(bool thread_safe = false,
+ bool display_context_on_another_thread = false);
~SharedImageManager();
// Registers a SharedImageBacking with the manager and returns a
@@ -66,12 +76,40 @@ class GPU_GLES2_EXPORT SharedImageManager {
bool is_thread_safe() const { return !!lock_; }
+ bool display_context_on_another_thread() const {
+ return display_context_on_another_thread_;
+ }
+
+ // Returns the NativePixmap backing |mailbox|. Returns null if the SharedImage
+ // doesn't exist or is not backed by a NativePixmap. The caller is not
+ // expected to read from or write into the provided NativePixmap because it
+ // can be modified by the client at any time. The primary purpose of this
+ // method is to facilitate pageflip testing on the viz thread.
+ scoped_refptr<gfx::NativePixmap> GetNativePixmap(const gpu::Mailbox& mailbox);
+
+ SharedImageBatchAccessManager* batch_access_manager() const {
+#if defined(OS_ANDROID)
+ return batch_access_manager_.get();
+#else
+ return nullptr;
+#endif
+ }
+
+ bool BeginBatchReadAccess();
+ bool EndBatchReadAccess();
+
private:
class AutoLock;
// The lock for protecting |images_|.
base::Optional<base::Lock> lock_;
- base::flat_set<std::unique_ptr<SharedImageBacking>> images_;
+ base::flat_set<std::unique_ptr<SharedImageBacking>> images_ GUARDED_BY(lock_);
+
+ const bool display_context_on_another_thread_;
+
+#if defined(OS_ANDROID)
+ std::unique_ptr<SharedImageBatchAccessManager> batch_access_manager_;
+#endif
THREAD_CHECKER(thread_checker_);
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc
index f1b39a2986d..e068fe28615 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc
@@ -11,6 +11,7 @@
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/test_shared_image_backing.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/command_buffer/tests/texture_image_factory.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
@@ -24,54 +25,9 @@
#include "ui/gl/gl_surface.h"
#include "ui/gl/init/gl_factory.h"
-using ::testing::Return;
-using ::testing::StrictMock;
-
namespace gpu {
namespace {
-class MockSharedImageRepresentationGLTexture
- : public SharedImageRepresentationGLTexture {
- public:
- MockSharedImageRepresentationGLTexture(SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker)
- : SharedImageRepresentationGLTexture(manager, backing, tracker) {}
-
- MOCK_METHOD0(GetTexture, gles2::Texture*());
-};
-
-class MockSharedImageBacking : public SharedImageBacking {
- public:
- MockSharedImageBacking(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- size_t estimated_size)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- estimated_size,
- false /* is_thread_safe */) {}
-
- MOCK_CONST_METHOD0(IsCleared, bool());
- MOCK_METHOD0(SetCleared, void());
- MOCK_METHOD1(Update, void(std::unique_ptr<gfx::GpuFence>));
- MOCK_METHOD0(Destroy, void());
- MOCK_METHOD1(ProduceLegacyMailbox, bool(MailboxManager*));
-
- private:
- std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker) {
- return std::make_unique<StrictMock<MockSharedImageRepresentationGLTexture>>(
- manager, this, tracker);
- }
-};
-
TEST(SharedImageManagerTest, BasicRefCounting) {
const size_t kSizeBytes = 1024;
SharedImageManager manager;
@@ -83,11 +39,10 @@ TEST(SharedImageManagerTest, BasicRefCounting) {
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
- auto mock_backing = std::make_unique<StrictMock<MockSharedImageBacking>>(
+ auto backing = std::make_unique<TestSharedImageBacking>(
mailbox, format, size, color_space, usage, kSizeBytes);
- auto* mock_backing_ptr = mock_backing.get();
- auto factory_ref = manager.Register(std::move(mock_backing), tracker.get());
+ auto factory_ref = manager.Register(std::move(backing), tracker.get());
EXPECT_EQ(kSizeBytes, tracker->GetMemRepresented());
// Taking/releasing an additional ref/representation with the same tracker
@@ -106,8 +61,6 @@ TEST(SharedImageManagerTest, BasicRefCounting) {
EXPECT_EQ(0u, tracker2->GetMemRepresented());
}
- // We should get one call to destroy when we release the factory ref.
- EXPECT_CALL(*mock_backing_ptr, Destroy());
factory_ref.reset();
EXPECT_EQ(0u, tracker->GetMemRepresented());
}
@@ -123,11 +76,10 @@ TEST(SharedImageManagerTest, TransferRefSameTracker) {
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
- auto mock_backing = std::make_unique<StrictMock<MockSharedImageBacking>>(
+ auto backing = std::make_unique<TestSharedImageBacking>(
mailbox, format, size, color_space, usage, kSizeBytes);
- auto* mock_backing_ptr = mock_backing.get();
- auto factory_ref = manager.Register(std::move(mock_backing), tracker.get());
+ auto factory_ref = manager.Register(std::move(backing), tracker.get());
EXPECT_EQ(kSizeBytes, tracker->GetMemRepresented());
// Take an additional ref/representation.
@@ -137,8 +89,6 @@ TEST(SharedImageManagerTest, TransferRefSameTracker) {
factory_ref.reset();
EXPECT_EQ(kSizeBytes, tracker->GetMemRepresented());
- // We should get one call to destroy when we release the gl representation.
- EXPECT_CALL(*mock_backing_ptr, Destroy());
gl_representation.reset();
EXPECT_EQ(0u, tracker->GetMemRepresented());
}
@@ -155,11 +105,10 @@ TEST(SharedImageManagerTest, TransferRefNewTracker) {
auto color_space = gfx::ColorSpace::CreateSRGB();
uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
- auto mock_backing = std::make_unique<StrictMock<MockSharedImageBacking>>(
+ auto backing = std::make_unique<TestSharedImageBacking>(
mailbox, format, size, color_space, usage, kSizeBytes);
- auto* mock_backing_ptr = mock_backing.get();
- auto factory_ref = manager.Register(std::move(mock_backing), tracker.get());
+ auto factory_ref = manager.Register(std::move(backing), tracker.get());
EXPECT_EQ(kSizeBytes, tracker->GetMemRepresented());
// Take an additional ref/representation with a new tracker. Memory should
@@ -176,8 +125,6 @@ TEST(SharedImageManagerTest, TransferRefNewTracker) {
// We can now safely destroy the original tracker.
tracker.reset();
- // We should get one call to destroy when we release the gl representation.
- EXPECT_CALL(*mock_backing_ptr, Destroy());
gl_representation.reset();
EXPECT_EQ(0u, tracker2->GetMemRepresented());
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.cc b/chromium/gpu/command_buffer/service/shared_image_representation.cc
index 320c827563f..e17f9f5a175 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/texture_manager.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
namespace gpu {
@@ -18,61 +19,198 @@ SharedImageRepresentation::SharedImageRepresentation(
}
SharedImageRepresentation::~SharedImageRepresentation() {
+ // CHECK here as we'll crash later anyway, and this makes it clearer what the
+ // error is.
+ CHECK(!has_scoped_access_) << "Destroying a SharedImageRepresentation with "
+ "outstanding Scoped*Access objects.";
manager_->OnRepresentationDestroyed(backing_->mailbox(), this);
}
-bool SharedImageRepresentationGLTexture::BeginAccess(GLenum mode) {
- return true;
+std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
+SharedImageRepresentationGLTextureBase::BeginScopedAccess(
+ GLenum mode,
+ AllowUnclearedAccess allow_uncleared) {
+ if (allow_uncleared != AllowUnclearedAccess::kYes && !IsCleared()) {
+ LOG(ERROR) << "Attempt to access an uninitialized SharedImage";
+ return nullptr;
+ }
+
+ if (!BeginAccess(mode))
+ return nullptr;
+
+ UpdateClearedStateOnBeginAccess();
+
+ constexpr GLenum kReadAccess = 0x8AF6;
+ if (mode == kReadAccess)
+ backing()->OnReadSucceeded();
+ else
+ backing()->OnWriteSucceeded();
+
+ return std::make_unique<ScopedAccess>(
+ util::PassKey<SharedImageRepresentationGLTextureBase>(), this);
}
-bool SharedImageRepresentationGLTexturePassthrough::BeginAccess(GLenum mode) {
+bool SharedImageRepresentationGLTextureBase::BeginAccess(GLenum mode) {
return true;
}
+bool SharedImageRepresentationGLTextureBase::
+ SupportsMultipleConcurrentReadAccess() {
+ return false;
+}
+
+gpu::TextureBase* SharedImageRepresentationGLTexture::GetTextureBase() {
+ return GetTexture();
+}
+
+void SharedImageRepresentationGLTexture::UpdateClearedStateOnEndAccess() {
+ auto* texture = GetTexture();
+ // Operations on the gles2::Texture may have cleared or uncleared it. Make
+ // sure this state is reflected back in the SharedImage.
+ gfx::Rect cleared_rect = texture->GetLevelClearedRect(texture->target(), 0);
+ if (cleared_rect != ClearedRect())
+ SetClearedRect(cleared_rect);
+}
+
+void SharedImageRepresentationGLTexture::UpdateClearedStateOnBeginAccess() {
+ auto* texture = GetTexture();
+ // Operations outside of the gles2::Texture may have cleared or uncleared it.
+ // Make sure this state is reflected back in gles2::Texture.
+ gfx::Rect cleared_rect = ClearedRect();
+ if (cleared_rect != texture->GetLevelClearedRect(texture->target(), 0))
+ texture->SetLevelClearedRect(texture->target(), 0, cleared_rect);
+}
+
+gpu::TextureBase*
+SharedImageRepresentationGLTexturePassthrough::GetTextureBase() {
+ return GetTexturePassthrough().get();
+}
+
+bool SharedImageRepresentationSkia::SupportsMultipleConcurrentReadAccess() {
+ return false;
+}
+
SharedImageRepresentationSkia::ScopedWriteAccess::ScopedWriteAccess(
+ util::PassKey<SharedImageRepresentationSkia> /* pass_key */,
SharedImageRepresentationSkia* representation,
+ sk_sp<SkSurface> surface)
+ : ScopedAccessBase(representation), surface_(std::move(surface)) {}
+
+SharedImageRepresentationSkia::ScopedWriteAccess::~ScopedWriteAccess() {
+ representation()->EndWriteAccess(std::move(surface_));
+}
+
+std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
+SharedImageRepresentationSkia::BeginScopedWriteAccess(
int final_msaa_count,
const SkSurfaceProps& surface_props,
std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores)
- : representation_(representation),
- surface_(representation_->BeginWriteAccess(final_msaa_count,
- surface_props,
- begin_semaphores,
- end_semaphores)) {
- if (success())
- representation->backing()->OnWriteSucceeded();
+ std::vector<GrBackendSemaphore>* end_semaphores,
+ AllowUnclearedAccess allow_uncleared) {
+ if (allow_uncleared != AllowUnclearedAccess::kYes && !IsCleared()) {
+ LOG(ERROR) << "Attempt to write to an uninitialized SharedImage";
+ return nullptr;
+ }
+
+ sk_sp<SkSurface> surface = BeginWriteAccess(final_msaa_count, surface_props,
+ begin_semaphores, end_semaphores);
+ if (!surface)
+ return nullptr;
+
+ return std::make_unique<ScopedWriteAccess>(
+ util::PassKey<SharedImageRepresentationSkia>(), this, std::move(surface));
}
-SharedImageRepresentationSkia::ScopedWriteAccess::ScopedWriteAccess(
- SharedImageRepresentationSkia* representation,
+std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
+SharedImageRepresentationSkia::BeginScopedWriteAccess(
std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores)
- : ScopedWriteAccess(representation,
- 0 /* final_msaa_count */,
- SkSurfaceProps(0 /* flags */, kUnknown_SkPixelGeometry),
- begin_semaphores,
- end_semaphores) {}
-
-SharedImageRepresentationSkia::ScopedWriteAccess::~ScopedWriteAccess() {
- if (success())
- representation_->EndWriteAccess(std::move(surface_));
+ std::vector<GrBackendSemaphore>* end_semaphores,
+ AllowUnclearedAccess allow_uncleared) {
+ return BeginScopedWriteAccess(
+ 0 /* final_msaa_count */,
+ SkSurfaceProps(0 /* flags */, kUnknown_SkPixelGeometry), begin_semaphores,
+ end_semaphores, allow_uncleared);
}
SharedImageRepresentationSkia::ScopedReadAccess::ScopedReadAccess(
+ util::PassKey<SharedImageRepresentationSkia> /* pass_key */,
SharedImageRepresentationSkia* representation,
+ sk_sp<SkPromiseImageTexture> promise_image_texture)
+ : ScopedAccessBase(representation),
+ promise_image_texture_(std::move(promise_image_texture)) {}
+
+SharedImageRepresentationSkia::ScopedReadAccess::~ScopedReadAccess() {
+ representation()->EndReadAccess();
+}
+
+std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+SharedImageRepresentationSkia::BeginScopedReadAccess(
std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores)
- : representation_(representation),
- promise_image_texture_(
- representation_->BeginReadAccess(begin_semaphores, end_semaphores)) {
- if (success())
- representation->backing()->OnReadSucceeded();
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ if (!IsCleared()) {
+ LOG(ERROR) << "Attempt to read from an uninitialized SharedImage";
+ return nullptr;
+ }
+
+ sk_sp<SkPromiseImageTexture> promise_image_texture =
+ BeginReadAccess(begin_semaphores, end_semaphores);
+ if (!promise_image_texture)
+ return nullptr;
+
+ return std::make_unique<ScopedReadAccess>(
+ util::PassKey<SharedImageRepresentationSkia>(), this,
+ std::move(promise_image_texture));
}
-SharedImageRepresentationSkia::ScopedReadAccess::~ScopedReadAccess() {
- if (success())
- representation_->EndReadAccess();
+SharedImageRepresentationOverlay::ScopedReadAccess::ScopedReadAccess(
+ util::PassKey<SharedImageRepresentationOverlay> pass_key,
+ SharedImageRepresentationOverlay* representation,
+ gl::GLImage* gl_image)
+ : ScopedAccessBase(representation), gl_image_(gl_image) {}
+
+std::unique_ptr<SharedImageRepresentationOverlay::ScopedReadAccess>
+SharedImageRepresentationOverlay::BeginScopedReadAccess(bool needs_gl_image) {
+ if (!IsCleared()) {
+ LOG(ERROR) << "Attempt to read from an uninitialized SharedImage";
+ return nullptr;
+ }
+
+ if (!BeginReadAccess())
+ return nullptr;
+
+ return std::make_unique<ScopedReadAccess>(
+ util::PassKey<SharedImageRepresentationOverlay>(), this,
+ needs_gl_image ? GetGLImage() : nullptr);
+}
+
+SharedImageRepresentationDawn::ScopedAccess::ScopedAccess(
+ util::PassKey<SharedImageRepresentationDawn> /* pass_key */,
+ SharedImageRepresentationDawn* representation,
+ WGPUTexture texture)
+ : ScopedAccessBase(representation), texture_(texture) {}
+
+SharedImageRepresentationDawn::ScopedAccess::~ScopedAccess() {
+ representation()->EndAccess();
+}
+
+std::unique_ptr<SharedImageRepresentationDawn::ScopedAccess>
+SharedImageRepresentationDawn::BeginScopedAccess(
+ WGPUTextureUsage usage,
+ AllowUnclearedAccess allow_uncleared) {
+ if (allow_uncleared != AllowUnclearedAccess::kYes && !IsCleared()) {
+ LOG(ERROR) << "Attempt to access an uninitialized SharedImage";
+ return nullptr;
+ }
+
+ WGPUTexture texture = BeginAccess(usage);
+ if (!texture)
+ return nullptr;
+ return std::make_unique<ScopedAccess>(
+ util::PassKey<SharedImageRepresentationDawn>(), this, texture);
+}
+
+SharedImageRepresentationFactoryRef::~SharedImageRepresentationFactoryRef() {
+ backing()->MarkForDestruction();
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.h b/chromium/gpu/command_buffer/service/shared_image_representation.h
index 53dce99f869..7d0d2411334 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.h
@@ -9,6 +9,7 @@
#include <dawn/webgpu.h>
#include "base/callback_helpers.h"
+#include "base/util/type_safety/pass_key.h"
#include "build/build_config.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
@@ -28,6 +29,8 @@ class GLImage;
}
namespace gpu {
+class TextureBase;
+
namespace gles2 {
class Texture;
class TexturePassthrough;
@@ -43,6 +46,9 @@ enum class RepresentationAccessMode {
// api.
class GPU_GLES2_EXPORT SharedImageRepresentation {
public:
+ // Used by derived classes.
+ enum class AllowUnclearedAccess { kYes, kNo };
+
SharedImageRepresentation(SharedImageManager* manager,
SharedImageBacking* backing,
MemoryTypeTracker* tracker);
@@ -55,6 +61,10 @@ class GPU_GLES2_EXPORT SharedImageRepresentation {
MemoryTypeTracker* tracker() { return tracker_; }
bool IsCleared() const { return backing_->IsCleared(); }
void SetCleared() { backing_->SetCleared(); }
+ gfx::Rect ClearedRect() const { return backing_->ClearedRect(); }
+ void SetClearedRect(const gfx::Rect& cleared_rect) {
+ backing_->SetClearedRect(cleared_rect);
+ }
// Indicates that the underlying graphics context has been lost, and the
// backing should be treated as destroyed.
@@ -68,11 +78,36 @@ class GPU_GLES2_EXPORT SharedImageRepresentation {
SharedImageBacking* backing() const { return backing_; }
bool has_context() const { return has_context_; }
+ // Helper class for derived classes' Scoped*Access objects. Has tracking to
+ // ensure a Scoped*Access does not outlive the representation it's associated
+ // with.
+ template <typename RepresentationClass>
+ class ScopedAccessBase {
+ public:
+ ScopedAccessBase(RepresentationClass* representation)
+ : representation_(representation) {
+ DCHECK(!representation_->has_scoped_access_);
+ representation_->has_scoped_access_ = true;
+ }
+ ~ScopedAccessBase() {
+ DCHECK(representation_->has_scoped_access_);
+ representation_->has_scoped_access_ = false;
+ }
+
+ RepresentationClass* representation() { return representation_; }
+
+ private:
+ RepresentationClass* const representation_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAccessBase);
+ };
+
private:
SharedImageManager* const manager_;
SharedImageBacking* const backing_;
MemoryTypeTracker* const tracker_;
bool has_context_ = true;
+ bool has_scoped_access_ = false;
};
class SharedImageRepresentationFactoryRef : public SharedImageRepresentation {
@@ -82,6 +117,8 @@ class SharedImageRepresentationFactoryRef : public SharedImageRepresentation {
MemoryTypeTracker* tracker)
: SharedImageRepresentation(manager, backing, tracker) {}
+ ~SharedImageRepresentationFactoryRef() override;
+
const Mailbox& mailbox() const { return backing()->mailbox(); }
void Update(std::unique_ptr<gfx::GpuFence> in_fence) {
backing()->Update(std::move(in_fence));
@@ -93,179 +130,113 @@ class SharedImageRepresentationFactoryRef : public SharedImageRepresentation {
bool PresentSwapChain() { return backing()->PresentSwapChain(); }
};
-class GPU_GLES2_EXPORT SharedImageRepresentationGLTexture
+class GPU_GLES2_EXPORT SharedImageRepresentationGLTextureBase
: public SharedImageRepresentation {
public:
- class ScopedAccess {
+ class ScopedAccess
+ : public ScopedAccessBase<SharedImageRepresentationGLTextureBase> {
public:
- ScopedAccess() = default;
-
- ScopedAccess(SharedImageRepresentationGLTexture* representation,
- GLenum mode)
- : representation_(representation),
- success_(representation_->BeginAccess(mode)) {
- constexpr GLenum kReadAccess = 0x8AF6;
- if (success()) {
- if (mode == kReadAccess)
- representation_->backing()->OnReadSucceeded();
- else
- representation_->backing()->OnWriteSucceeded();
- }
- }
-
- ScopedAccess(ScopedAccess&& other) { *this = std::move(other); }
-
- ~ScopedAccess() { reset(); }
-
- ScopedAccess& operator=(ScopedAccess&& other) {
- reset();
- representation_ = other.representation_;
- success_ = other.success_;
- other.representation_ = nullptr;
- other.success_ = false;
- return *this;
- }
-
- bool success() const { return success_; }
-
- void reset() {
- if (representation_ && success()) {
- representation_->EndAccess();
- representation_ = nullptr;
- success_ = false;
- }
+ ScopedAccess(util::PassKey<SharedImageRepresentationGLTextureBase> pass_key,
+ SharedImageRepresentationGLTextureBase* representation)
+ : ScopedAccessBase(representation) {}
+ ~ScopedAccess() {
+ representation()->UpdateClearedStateOnEndAccess();
+ representation()->EndAccess();
}
-
- private:
- SharedImageRepresentationGLTexture* representation_ = nullptr;
- bool success_ = false;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedAccess);
};
- SharedImageRepresentationGLTexture(SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker)
+ SharedImageRepresentationGLTextureBase(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker)
: SharedImageRepresentation(manager, backing, tracker) {}
- virtual gles2::Texture* GetTexture() = 0;
+ std::unique_ptr<ScopedAccess> BeginScopedAccess(
+ GLenum mode,
+ AllowUnclearedAccess allow_uncleared);
+
+ virtual gpu::TextureBase* GetTextureBase() = 0;
protected:
friend class SharedImageRepresentationSkiaGL;
+ friend class SharedImageRepresentationGLTextureImpl;
+
+ // Can be overridden to handle clear state tracking when GL access begins or
+ // ends.
+ virtual void UpdateClearedStateOnBeginAccess() {}
+ virtual void UpdateClearedStateOnEndAccess() {}
// TODO(ericrk): Make these pure virtual and ensure real implementations
// exist.
virtual bool BeginAccess(GLenum mode);
virtual void EndAccess() {}
+
+ virtual bool SupportsMultipleConcurrentReadAccess();
};
-class GPU_GLES2_EXPORT SharedImageRepresentationGLTexturePassthrough
- : public SharedImageRepresentation {
+class GPU_GLES2_EXPORT SharedImageRepresentationGLTexture
+ : public SharedImageRepresentationGLTextureBase {
public:
- class ScopedAccess {
- public:
- ScopedAccess() = default;
-
- ScopedAccess(SharedImageRepresentationGLTexturePassthrough* representation,
- GLenum mode)
- : representation_(representation),
- success_(representation_->BeginAccess(mode)) {
- constexpr GLenum kReadAccess = 0x8AF6;
- if (success()) {
- if (mode == kReadAccess)
- representation_->backing()->OnReadSucceeded();
- else
- representation_->backing()->OnWriteSucceeded();
- }
- }
-
- ScopedAccess(ScopedAccess&& other) { *this = std::move(other); }
-
- ~ScopedAccess() { reset(); }
-
- ScopedAccess& operator=(ScopedAccess&& other) {
- reset();
- representation_ = other.representation_;
- success_ = other.success_;
- other.representation_ = nullptr;
- other.success_ = false;
- return *this;
- }
+ SharedImageRepresentationGLTexture(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker)
+ : SharedImageRepresentationGLTextureBase(manager, backing, tracker) {}
- bool success() const { return success_; }
+ // TODO(ericrk): Move this to the ScopedAccess object. crbug.com/1003686
+ virtual gles2::Texture* GetTexture() = 0;
- void reset() {
- if (representation_ && success())
- representation_->EndAccess();
- representation_ = nullptr;
- success_ = false;
- }
+ gpu::TextureBase* GetTextureBase() override;
- private:
- SharedImageRepresentationGLTexturePassthrough* representation_ = nullptr;
- bool success_ = false;
- DISALLOW_COPY_AND_ASSIGN(ScopedAccess);
- };
+ protected:
+ void UpdateClearedStateOnBeginAccess() override;
+ void UpdateClearedStateOnEndAccess() override;
+};
+class GPU_GLES2_EXPORT SharedImageRepresentationGLTexturePassthrough
+ : public SharedImageRepresentationGLTextureBase {
+ public:
SharedImageRepresentationGLTexturePassthrough(SharedImageManager* manager,
SharedImageBacking* backing,
MemoryTypeTracker* tracker)
- : SharedImageRepresentation(manager, backing, tracker) {}
+ : SharedImageRepresentationGLTextureBase(manager, backing, tracker) {}
+ // TODO(ericrk): Move this to the ScopedAccess object. crbug.com/1003686
virtual const scoped_refptr<gles2::TexturePassthrough>&
GetTexturePassthrough() = 0;
- protected:
- friend class SharedImageRepresentationSkiaGL;
-
- // TODO(ericrk): Make these pure virtual and ensure real implementations
- // exist.
- virtual bool BeginAccess(GLenum mode);
- virtual void EndAccess() {}
+ gpu::TextureBase* GetTextureBase() override;
};
class GPU_GLES2_EXPORT SharedImageRepresentationSkia
: public SharedImageRepresentation {
public:
- class GPU_GLES2_EXPORT ScopedWriteAccess {
+ class GPU_GLES2_EXPORT ScopedWriteAccess
+ : public ScopedAccessBase<SharedImageRepresentationSkia> {
public:
- ScopedWriteAccess(SharedImageRepresentationSkia* representation,
- int final_msaa_count,
- const SkSurfaceProps& surface_props,
- std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores);
- ScopedWriteAccess(SharedImageRepresentationSkia* representation,
- std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores);
+ ScopedWriteAccess(util::PassKey<SharedImageRepresentationSkia> pass_key,
+ SharedImageRepresentationSkia* representation,
+ sk_sp<SkSurface> surface);
~ScopedWriteAccess();
- bool success() const { return !!surface_; }
SkSurface* surface() const { return surface_.get(); }
private:
- SharedImageRepresentationSkia* const representation_;
sk_sp<SkSurface> surface_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedWriteAccess);
};
- class GPU_GLES2_EXPORT ScopedReadAccess {
+ class GPU_GLES2_EXPORT ScopedReadAccess
+ : public ScopedAccessBase<SharedImageRepresentationSkia> {
public:
- ScopedReadAccess(SharedImageRepresentationSkia* representation,
- std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores);
+ ScopedReadAccess(util::PassKey<SharedImageRepresentationSkia> pass_key,
+ SharedImageRepresentationSkia* representation,
+ sk_sp<SkPromiseImageTexture> promise_image_texture);
~ScopedReadAccess();
- bool success() const { return !!promise_image_texture_; }
SkPromiseImageTexture* promise_image_texture() const {
return promise_image_texture_.get();
}
private:
- SharedImageRepresentationSkia* const representation_;
sk_sp<SkPromiseImageTexture> promise_image_texture_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedReadAccess);
};
SharedImageRepresentationSkia(SharedImageManager* manager,
@@ -273,6 +244,28 @@ class GPU_GLES2_EXPORT SharedImageRepresentationSkia
MemoryTypeTracker* tracker)
: SharedImageRepresentation(manager, backing, tracker) {}
+ // Note: See BeginWriteAccess below for a description of the semaphore
+ // parameters.
+ std::unique_ptr<ScopedWriteAccess> BeginScopedWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores,
+ AllowUnclearedAccess allow_uncleared);
+
+ std::unique_ptr<ScopedWriteAccess> BeginScopedWriteAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores,
+ AllowUnclearedAccess allow_uncleared);
+
+ // Note: See BeginReadAccess below for a description of the semaphore
+ // parameters.
+ std::unique_ptr<ScopedReadAccess> BeginScopedReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores);
+
+ virtual bool SupportsMultipleConcurrentReadAccess();
+
protected:
// Begin the write access. The implementations should insert semaphores into
// begin_semaphores vector which client will wait on before writing the
@@ -301,56 +294,63 @@ class GPU_GLES2_EXPORT SharedImageRepresentationSkia
virtual void EndReadAccess() = 0;
};
-class SharedImageRepresentationDawn : public SharedImageRepresentation {
+class GPU_GLES2_EXPORT SharedImageRepresentationDawn
+ : public SharedImageRepresentation {
public:
SharedImageRepresentationDawn(SharedImageManager* manager,
SharedImageBacking* backing,
MemoryTypeTracker* tracker)
: SharedImageRepresentation(manager, backing, tracker) {}
- // TODO(penghuang): Add ScopedAccess helper class.
+ class GPU_GLES2_EXPORT ScopedAccess
+ : public ScopedAccessBase<SharedImageRepresentationDawn> {
+ public:
+ ScopedAccess(util::PassKey<SharedImageRepresentationDawn> pass_key,
+ SharedImageRepresentationDawn* representation,
+ WGPUTexture texture);
+ ~ScopedAccess();
+
+ WGPUTexture texture() const { return texture_; }
+
+ private:
+ WGPUTexture texture_ = 0;
+ };
+
+ // Calls BeginAccess and returns a ScopedAccess object which will EndAccess
+ // when it goes out of scope. The Representation must outlive the returned
+ // ScopedAccess.
+ std::unique_ptr<ScopedAccess> BeginScopedAccess(
+ WGPUTextureUsage usage,
+ AllowUnclearedAccess allow_uncleared);
+
+ private:
// This can return null in case of a Dawn validation error, for example if
// usage is invalid.
virtual WGPUTexture BeginAccess(WGPUTextureUsage usage) = 0;
virtual void EndAccess() = 0;
};
-class SharedImageRepresentationOverlay : public SharedImageRepresentation {
+class GPU_GLES2_EXPORT SharedImageRepresentationOverlay
+ : public SharedImageRepresentation {
public:
SharedImageRepresentationOverlay(SharedImageManager* manager,
SharedImageBacking* backing,
MemoryTypeTracker* tracker)
: SharedImageRepresentation(manager, backing, tracker) {}
- class ScopedReadAccess {
+ class ScopedReadAccess
+ : public ScopedAccessBase<SharedImageRepresentationOverlay> {
public:
- ScopedReadAccess(SharedImageRepresentationOverlay* representation,
- bool needs_gl_image)
- : representation_(representation) {
- representation_->BeginReadAccess();
- gl_image_ = needs_gl_image ? representation_->GetGLImage() : nullptr;
- }
- ScopedReadAccess(ScopedReadAccess&& other) { *this = std::move(other); }
- ~ScopedReadAccess() {
- if (representation_)
- representation_->EndReadAccess();
- }
-
- ScopedReadAccess& operator=(ScopedReadAccess&& other) {
- representation_ = other.representation_;
- other.representation_ = nullptr;
- gl_image_ = other.gl_image_;
- other.gl_image_ = nullptr;
- return *this;
- }
+ ScopedReadAccess(util::PassKey<SharedImageRepresentationOverlay> pass_key,
+ SharedImageRepresentationOverlay* representation,
+ gl::GLImage* gl_image);
+ ~ScopedReadAccess() { representation()->EndReadAccess(); }
gl::GLImage* gl_image() const {
- DCHECK(representation_);
return gl_image_;
}
private:
- SharedImageRepresentationOverlay* representation_;
gl::GLImage* gl_image_;
};
@@ -359,10 +359,12 @@ class SharedImageRepresentationOverlay : public SharedImageRepresentation {
const gfx::Rect& bounds) = 0;
#endif
+ std::unique_ptr<ScopedReadAccess> BeginScopedReadAccess(bool needs_gl_image);
+
protected:
// TODO(weiliangc): Currently this only handles Android pre-SurfaceControl
// case. Add appropriate fence later.
- virtual void BeginReadAccess() = 0;
+ virtual bool BeginReadAccess() = 0;
virtual void EndReadAccess() = 0;
// TODO(weiliangc): Add API to backing AHardwareBuffer.
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_d3d.cc b/chromium/gpu/command_buffer/service/shared_image_representation_d3d.cc
new file mode 100644
index 00000000000..c400fffed1e
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_d3d.cc
@@ -0,0 +1,136 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_representation_d3d.h"
+
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/shared_image_backing_d3d.h"
+
+namespace gpu {
+
+SharedImageRepresentationGLTexturePassthroughD3D::
+ SharedImageRepresentationGLTexturePassthroughD3D(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture)
+ : SharedImageRepresentationGLTexturePassthrough(manager, backing, tracker),
+ texture_(std::move(texture)) {}
+
+const scoped_refptr<gles2::TexturePassthrough>&
+SharedImageRepresentationGLTexturePassthroughD3D::GetTexturePassthrough() {
+ return texture_;
+}
+
+SharedImageRepresentationGLTexturePassthroughD3D::
+ ~SharedImageRepresentationGLTexturePassthroughD3D() = default;
+
+bool SharedImageRepresentationGLTexturePassthroughD3D::BeginAccess(
+ GLenum mode) {
+ SharedImageBackingD3D* d3d_image_backing =
+ static_cast<SharedImageBackingD3D*>(backing());
+ return d3d_image_backing->BeginAccessD3D11();
+}
+
+void SharedImageRepresentationGLTexturePassthroughD3D::EndAccess() {
+ SharedImageBackingD3D* d3d_image_backing =
+ static_cast<SharedImageBackingD3D*>(backing());
+ d3d_image_backing->EndAccessD3D11();
+}
+
+#if BUILDFLAG(USE_DAWN)
+SharedImageRepresentationDawnD3D::SharedImageRepresentationDawnD3D(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device)
+ : SharedImageRepresentationDawn(manager, backing, tracker),
+ device_(device),
+ dawn_procs_(dawn_native::GetProcs()) {
+ DCHECK(device_);
+
+ // Keep a reference to the device so that it stays valid (it might become
+ // lost in which case operations will be noops).
+ dawn_procs_.deviceReference(device_);
+}
+
+SharedImageRepresentationDawnD3D::~SharedImageRepresentationDawnD3D() {
+ EndAccess();
+ dawn_procs_.deviceRelease(device_);
+}
+
+WGPUTexture SharedImageRepresentationDawnD3D::BeginAccess(
+ WGPUTextureUsage usage) {
+ SharedImageBackingD3D* d3d_image_backing =
+ static_cast<SharedImageBackingD3D*>(backing());
+
+ const HANDLE shared_handle = d3d_image_backing->GetSharedHandle();
+ const viz::ResourceFormat viz_resource_format = d3d_image_backing->format();
+ WGPUTextureFormat wgpu_format = viz::ToWGPUFormat(viz_resource_format);
+ if (wgpu_format == WGPUTextureFormat_Undefined) {
+ DLOG(ERROR) << "Unsupported viz format found: " << viz_resource_format;
+ return nullptr;
+ }
+
+ uint64_t shared_mutex_acquire_key;
+ if (!d3d_image_backing->BeginAccessD3D12(&shared_mutex_acquire_key)) {
+ return nullptr;
+ }
+
+ WGPUTextureDescriptor texture_descriptor;
+ texture_descriptor.nextInChain = nullptr;
+ texture_descriptor.format = wgpu_format;
+ texture_descriptor.usage = usage;
+ texture_descriptor.dimension = WGPUTextureDimension_2D;
+ texture_descriptor.size = {size().width(), size().height(), 1};
+ texture_descriptor.arrayLayerCount = 1;
+ texture_descriptor.mipLevelCount = 1;
+ texture_descriptor.sampleCount = 1;
+
+ dawn_native::d3d12::ExternalImageDescriptorDXGISharedHandle descriptor;
+ descriptor.cTextureDescriptor = &texture_descriptor;
+ descriptor.isCleared = IsCleared();
+ descriptor.sharedHandle = shared_handle;
+ descriptor.acquireMutexKey = shared_mutex_acquire_key;
+ descriptor.isSwapChainTexture =
+ (d3d_image_backing->usage() &
+ SHARED_IMAGE_USAGE_WEBGPU_SWAP_CHAIN_TEXTURE);
+
+ texture_ = dawn_native::d3d12::WrapSharedHandle(device_, &descriptor);
+ if (texture_) {
+ // Keep a reference to the texture so that it stays valid (its content
+ // might be destroyed).
+ dawn_procs_.textureReference(texture_);
+ } else {
+ d3d_image_backing->EndAccessD3D12();
+ }
+
+ return texture_;
+}
+
+void SharedImageRepresentationDawnD3D::EndAccess() {
+ if (!texture_) {
+ return;
+ }
+
+ SharedImageBackingD3D* d3d_image_backing =
+ static_cast<SharedImageBackingD3D*>(backing());
+
+ if (dawn_native::IsTextureSubresourceInitialized(texture_, 0, 1, 0, 1)) {
+ SetCleared();
+ }
+
+ // All further operations on the textures are errors (they would be racy
+ // with other backings).
+ dawn_procs_.textureDestroy(texture_);
+
+ dawn_procs_.textureRelease(texture_);
+ texture_ = nullptr;
+
+ d3d_image_backing->EndAccessD3D12();
+}
+#endif // BUILDFLAG(USE_DAWN)
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_d3d.h b/chromium/gpu/command_buffer/service/shared_image_representation_d3d.h
new file mode 100644
index 00000000000..72e41f183a1
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_d3d.h
@@ -0,0 +1,69 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_D3D_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_D3D_H_
+
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/buildflags.h"
+
+// Usage of BUILDFLAG(USE_DAWN) needs to be after the include for
+// ui/gl/buildflags.h
+#if BUILDFLAG(USE_DAWN)
+#include <dawn_native/D3D12Backend.h>
+#endif // BUILDFLAG(USE_DAWN)
+
+namespace gpu {
+
+// Representation of a SharedImageBackingD3D as a GL TexturePassthrough.
+class SharedImageRepresentationGLTexturePassthroughD3D
+ : public SharedImageRepresentationGLTexturePassthrough {
+ public:
+ SharedImageRepresentationGLTexturePassthroughD3D(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture);
+ ~SharedImageRepresentationGLTexturePassthroughD3D() override;
+
+ const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
+ override;
+
+ private:
+ bool BeginAccess(GLenum mode) override;
+ void EndAccess() override;
+
+ scoped_refptr<gles2::TexturePassthrough> texture_;
+};
+
+// Representation of a SharedImageBackingD3D as a Dawn Texture
+#if BUILDFLAG(USE_DAWN)
+class SharedImageRepresentationDawnD3D : public SharedImageRepresentationDawn {
+ public:
+ SharedImageRepresentationDawnD3D(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device);
+
+ ~SharedImageRepresentationDawnD3D() override;
+
+ WGPUTexture BeginAccess(WGPUTextureUsage usage) override;
+ void EndAccess() override;
+
+ private:
+ WGPUDevice device_;
+ WGPUTexture texture_ = nullptr;
+
+ // TODO(cwallez@chromium.org): Load procs only once when the factory is
+ // created and pass a pointer to them around?
+ DawnProcTable dawn_procs_;
+};
+#endif // BUILDFLAG(USE_DAWN)
+
+} // namespace gpu
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_D3D_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc
new file mode 100644
index 00000000000..f8d6e811db2
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc
@@ -0,0 +1,110 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_representation_dawn_ozone.h"
+
+#include <dawn_native/VulkanBackend.h>
+
+#include <vulkan/vulkan.h>
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_refptr.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/native_pixmap.h"
+
+namespace gpu {
+
+SharedImageRepresentationDawnOzone::SharedImageRepresentationDawnOzone(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device,
+ WGPUTextureFormat format,
+ scoped_refptr<gfx::NativePixmap> pixmap,
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs)
+ : SharedImageRepresentationDawn(manager, backing, tracker),
+ device_(device),
+ format_(format),
+ pixmap_(pixmap),
+ dawn_procs_(dawn_procs) {
+ DCHECK(device_);
+
+ // Keep a reference to the device so that it stays valid (it might become
+ // lost in which case operations will be noops).
+ dawn_procs_->data.deviceReference(device_);
+}
+
+SharedImageRepresentationDawnOzone::~SharedImageRepresentationDawnOzone() {
+ EndAccess();
+ dawn_procs_->data.deviceRelease(device_);
+}
+
+WGPUTexture SharedImageRepresentationDawnOzone::BeginAccess(
+ WGPUTextureUsage usage) {
+ // It doesn't make sense to have two overlapping BeginAccess calls on the same
+ // representation.
+ if (texture_) {
+ return nullptr;
+ }
+ DCHECK(pixmap_->GetNumberOfPlanes() == 1)
+ << "Multi-plane formats are not supported.";
+ // TODO(hob): Synchronize access to the dma-buf by waiting on all semaphores
+ // tracked by SharedImageBackingOzone.
+ gfx::Size pixmap_size = pixmap_->GetBufferSize();
+ WGPUTextureDescriptor texture_descriptor = {};
+ texture_descriptor.nextInChain = nullptr;
+ texture_descriptor.format = format_;
+ texture_descriptor.usage = usage;
+ texture_descriptor.dimension = WGPUTextureDimension_2D;
+ texture_descriptor.size = {pixmap_size.width(), pixmap_size.height(), 1};
+ texture_descriptor.arrayLayerCount = 1;
+ texture_descriptor.mipLevelCount = 1;
+ texture_descriptor.sampleCount = 1;
+
+ dawn_native::vulkan::ExternalImageDescriptorDmaBuf descriptor = {};
+ descriptor.cTextureDescriptor = &texture_descriptor;
+ descriptor.isCleared = IsCleared();
+ // Import the dma-buf into Dawn via the Vulkan backend. As per the Vulkan
+ // documentation, importing memory from a file descriptor transfers
+ // ownership of the fd from the application to the Vulkan implementation.
+ // Thus, we need to dup the fd so the fd corresponding to the dmabuf isn't
+ // closed twice (once by ScopedFD and once by the Vulkan implementation).
+ int fd = dup(pixmap_->GetDmaBufFd(0));
+ descriptor.memoryFD = fd;
+ descriptor.stride = pixmap_->GetDmaBufPitch(0);
+ descriptor.drmModifier = pixmap_->GetBufferFormatModifier();
+ descriptor.waitFDs = {};
+
+ texture_ = dawn_native::vulkan::WrapVulkanImage(device_, &descriptor);
+ if (texture_) {
+ // Keep a reference to the texture so that it stays valid (its content
+ // might be destroyed).
+ dawn_procs_->data.textureReference(texture_);
+ } else {
+ close(fd);
+ }
+
+ return texture_;
+}
+
+void SharedImageRepresentationDawnOzone::EndAccess() {
+ if (!texture_) {
+ return;
+ }
+
+ if (dawn_native::IsTextureSubresourceInitialized(texture_, 0, 1, 0, 1)) {
+ SetCleared();
+ }
+
+ // TODO(hob): Synchronize access to the dma-buf by exporting the VkSemaphore
+ // from the WebGPU texture.
+ dawn_procs_->data.textureDestroy(texture_);
+ dawn_procs_->data.textureRelease(texture_);
+ texture_ = nullptr;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.h b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.h
new file mode 100644
index 00000000000..526961212d1
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.h
@@ -0,0 +1,54 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_DAWN_OZONE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_DAWN_OZONE_H_
+
+#include <dawn/dawn_proc_table.h>
+#include <dawn/webgpu.h>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_refptr.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "ui/gfx/native_pixmap.h"
+
+namespace gpu {
+
+// SharedImageRepresentation of a Ozone-backed SharedImage to be used by Dawn.
+// On access, the pixmap backing the SharedImage is imported into Dawn for
+// rendering.
+class SharedImageRepresentationDawnOzone
+ : public SharedImageRepresentationDawn {
+ public:
+ SharedImageRepresentationDawnOzone(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device,
+ WGPUTextureFormat format,
+ scoped_refptr<gfx::NativePixmap> pixmap,
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs);
+
+ ~SharedImageRepresentationDawnOzone() override;
+
+ WGPUTexture BeginAccess(WGPUTextureUsage usage) override;
+
+ void EndAccess() override;
+
+ private:
+ const WGPUDevice device_;
+ const WGPUTextureFormat format_;
+ scoped_refptr<gfx::NativePixmap> pixmap_;
+ WGPUTexture texture_ = nullptr;
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationDawnOzone);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_DAWN_OZONE_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
new file mode 100644
index 00000000000..0e705ba9506
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
@@ -0,0 +1,111 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_representation_gl_ozone.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/memory/scoped_refptr.h"
+#include "components/viz/common/resources/resource_format.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gfx/native_pixmap.h"
+#include "ui/gl/buffer_format_utils.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_gl_api_implementation.h"
+#include "ui/gl/gl_image_native_pixmap.h"
+#include "ui/gl/gl_image_shared_memory.h"
+#include "ui/gl/gl_version_info.h"
+#include "ui/gl/scoped_binders.h"
+#include "ui/gl/trace_util.h"
+
+namespace gpu {
+
+// static
+std::unique_ptr<SharedImageRepresentationGLOzone>
+SharedImageRepresentationGLOzone::Create(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gfx::NativePixmap> pixmap,
+ viz::ResourceFormat format) {
+ gl::GLApi* api = gl::g_current_gl_context;
+ DCHECK(api);
+
+ GLuint internal_format = viz::TextureStorageFormat(format);
+
+ GLuint gl_texture_service_id;
+ api->glGenTexturesFn(1, &gl_texture_service_id);
+ gl::ScopedTextureBinder binder(GL_TEXTURE_2D, gl_texture_service_id);
+
+ gfx::BufferFormat buffer_format = viz::BufferFormat(format);
+ auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(
+ pixmap->GetBufferSize(), buffer_format);
+ if (!image->Initialize(pixmap)) {
+ DLOG(ERROR) << "Unable to initialize EGL image from pixmap";
+ return nullptr;
+ }
+
+ api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ if (!image->BindTexImage(GL_TEXTURE_2D)) {
+ DLOG(ERROR) << "Unable to bind EGL image to GL_TEXTURE_2D";
+ return nullptr;
+ }
+
+ gles2::Texture* texture = new gles2::Texture(gl_texture_service_id);
+ texture->SetLightweightRef();
+ texture->SetTarget(GL_TEXTURE_2D, 1 /*max_levels=*/);
+ texture->sampler_state_.min_filter = GL_LINEAR;
+ texture->sampler_state_.mag_filter = GL_LINEAR;
+ texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+
+ GLenum gl_format = viz::GLDataFormat(format);
+ GLenum gl_type = viz::GLDataType(format);
+ texture->SetLevelInfo(GL_TEXTURE_2D, 0, internal_format,
+ pixmap->GetBufferSize().width(),
+ pixmap->GetBufferSize().height(), 1, 0, gl_format,
+ gl_type, backing->ClearedRect());
+ texture->SetLevelImage(GL_TEXTURE_2D, 0, image.get(), gles2::Texture::BOUND);
+ texture->SetImmutable(true, true);
+
+ return base::WrapUnique<SharedImageRepresentationGLOzone>(
+ new SharedImageRepresentationGLOzone(manager, backing, tracker, texture));
+}
+
+SharedImageRepresentationGLOzone::SharedImageRepresentationGLOzone(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture)
+ : SharedImageRepresentationGLTexture(manager, backing, tracker),
+ texture_(texture) {}
+
+SharedImageRepresentationGLOzone::~SharedImageRepresentationGLOzone() {
+ texture_->RemoveLightweightRef(has_context());
+}
+
+gles2::Texture* SharedImageRepresentationGLOzone::GetTexture() {
+ return texture_;
+}
+
+bool SharedImageRepresentationGLOzone::BeginAccess(GLenum mode) {
+ // TODO(hob): Synchronize access to the dma-buf by waiting on all semaphores
+ // tracked by SharedImageBackingOzone.
+ return true;
+}
+
+void SharedImageRepresentationGLOzone::EndAccess() {
+ // TODO(hob): Synchronize access to the dma-buf by signaling completion via
+ // glSignalSemaphoreEXT.
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.h b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.h
new file mode 100644
index 00000000000..b5ebe1bff27
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.h
@@ -0,0 +1,54 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_GL_OZONE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_GL_OZONE_H_
+
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gfx/native_pixmap.h"
+
+namespace gpu {
+
+// Representation of an Ozone-backed SharedImage that can be accessed as a
+// GL texture.
+class SharedImageRepresentationGLOzone
+ : public SharedImageRepresentationGLTexture {
+ public:
+ // Creates and initializes a SharedImageRepresentationGLOzone. On failure,
+ // returns nullptr.
+ static std::unique_ptr<SharedImageRepresentationGLOzone> Create(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gfx::NativePixmap> pixmap,
+ viz::ResourceFormat format);
+
+ ~SharedImageRepresentationGLOzone() override;
+
+ // SharedImageRepresentationGLTexture implementation.
+ gles2::Texture* GetTexture() override;
+ bool BeginAccess(GLenum mode) override;
+ void EndAccess() override;
+
+ private:
+ SharedImageRepresentationGLOzone(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture);
+
+ gles2::Texture* texture_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLOzone);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_GL_OZONE_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
index 4577518adc6..d23d5358e2f 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h"
+#include "base/memory/ptr_util.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/skia_utils.h"
@@ -32,16 +33,16 @@ std::ostream& operator<<(std::ostream& os, RepresentationAccessMode mode) {
// static method.
std::unique_ptr<SharedImageRepresentationSkiaGL>
SharedImageRepresentationSkiaGL::Create(
- std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation,
+ std::unique_ptr<SharedImageRepresentationGLTextureBase> gl_representation,
scoped_refptr<SharedContextState> context_state,
SharedImageManager* manager,
SharedImageBacking* backing,
MemoryTypeTracker* tracker) {
GrBackendTexture backend_texture;
if (!GetGrBackendTexture(context_state->feature_info(),
- gl_representation->GetTexture()->target(),
+ gl_representation->GetTextureBase()->target(),
backing->size(),
- gl_representation->GetTexture()->service_id(),
+ gl_representation->GetTextureBase()->service_id(),
backing->format(), &backend_texture)) {
return nullptr;
}
@@ -53,33 +54,8 @@ SharedImageRepresentationSkiaGL::Create(
std::move(context_state), manager, backing, tracker));
}
-std::unique_ptr<SharedImageRepresentationSkiaGL>
-SharedImageRepresentationSkiaGL::CreateForPassthrough(
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- passthrough_representation,
- scoped_refptr<SharedContextState> context_state,
- SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker) {
- GrBackendTexture backend_texture;
- if (!GetGrBackendTexture(
- context_state->feature_info(),
- passthrough_representation->GetTexturePassthrough()->target(),
- backing->size(),
- passthrough_representation->GetTexturePassthrough()->service_id(),
- backing->format(), &backend_texture)) {
- return nullptr;
- }
- auto promise_texture = SkPromiseImageTexture::Make(backend_texture);
- if (!promise_texture)
- return nullptr;
- return base::WrapUnique(new SharedImageRepresentationSkiaGL(
- std::move(passthrough_representation), std::move(promise_texture),
- std::move(context_state), manager, backing, tracker));
-}
-
SharedImageRepresentationSkiaGL::SharedImageRepresentationSkiaGL(
- std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation,
+ std::unique_ptr<SharedImageRepresentationGLTextureBase> gl_representation,
sk_sp<SkPromiseImageTexture> promise_texture,
scoped_refptr<SharedContextState> context_state,
SharedImageManager* manager,
@@ -95,27 +71,9 @@ SharedImageRepresentationSkiaGL::SharedImageRepresentationSkiaGL(
#endif
}
-SharedImageRepresentationSkiaGL::SharedImageRepresentationSkiaGL(
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- passthrough_representation,
- sk_sp<SkPromiseImageTexture> promise_texture,
- scoped_refptr<SharedContextState> context_state,
- SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker)
- : SharedImageRepresentationSkia(manager, backing, tracker),
- passthrough_representation_(std::move(passthrough_representation)),
- promise_texture_(std::move(promise_texture)),
- context_state_(std::move(context_state)) {
- DCHECK(passthrough_representation_);
-#if DCHECK_IS_ON()
- context_ = gl::GLContext::GetCurrent();
-#endif
-}
-
SharedImageRepresentationSkiaGL::~SharedImageRepresentationSkiaGL() {
DCHECK_EQ(RepresentationAccessMode::kNone, mode_);
- DCHECK(!surface_);
+ surface_.reset();
}
sk_sp<SkSurface> SharedImageRepresentationSkiaGL::BeginWriteAccess(
@@ -124,43 +82,41 @@ sk_sp<SkSurface> SharedImageRepresentationSkiaGL::BeginWriteAccess(
std::vector<GrBackendSemaphore>* begin_semaphores,
std::vector<GrBackendSemaphore>* end_semaphores) {
DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
- DCHECK(!surface_);
CheckContext();
- if (gl_representation_ &&
- !gl_representation_->BeginAccess(
+ if (!gl_representation_->BeginAccess(
GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM)) {
return nullptr;
- } else if (passthrough_representation_ &&
- !passthrough_representation_->BeginAccess(
- GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM)) {
- return nullptr;
}
+ mode_ = RepresentationAccessMode::kWrite;
+
+ if (surface_)
+ return surface_;
+
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
- auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
+ // TODO(https://crbug.com/1054033): Switch back to
+ // MakeFromBackendTextureAsRenderTarget once we no longer use GLRendererCopier
+ // with surfaceless surfaces.
+ auto surface = SkSurface::MakeFromBackendTexture(
context_state_->gr_context(), promise_texture_->backendTexture(),
kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
backing()->color_space().ToSkColorSpace(), &surface_props);
- surface_ = surface.get();
- mode_ = RepresentationAccessMode::kWrite;
+ surface_ = surface;
return surface;
}
void SharedImageRepresentationSkiaGL::EndWriteAccess(sk_sp<SkSurface> surface) {
DCHECK_EQ(mode_, RepresentationAccessMode::kWrite);
DCHECK(surface_);
- DCHECK_EQ(surface.get(), surface_);
- DCHECK(surface->unique());
+ DCHECK_EQ(surface.get(), surface_.get());
- if (gl_representation_) {
- gl_representation_->EndAccess();
- } else {
- passthrough_representation_->EndAccess();
- }
+ surface.reset();
+ DCHECK(surface_->unique());
+
+ gl_representation_->EndAccess();
mode_ = RepresentationAccessMode::kNone;
- surface_ = nullptr;
}
sk_sp<SkPromiseImageTexture> SharedImageRepresentationSkiaGL::BeginReadAccess(
@@ -169,12 +125,8 @@ sk_sp<SkPromiseImageTexture> SharedImageRepresentationSkiaGL::BeginReadAccess(
DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
CheckContext();
- if (gl_representation_ && !gl_representation_->BeginAccess(
- GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM)) {
- return nullptr;
- } else if (passthrough_representation_ &&
- !passthrough_representation_->BeginAccess(
- GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM)) {
+ if (!gl_representation_->BeginAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM)) {
return nullptr;
}
mode_ = RepresentationAccessMode::kRead;
@@ -185,13 +137,8 @@ void SharedImageRepresentationSkiaGL::EndReadAccess() {
DCHECK_EQ(mode_, RepresentationAccessMode::kRead);
CheckContext();
- if (gl_representation_) {
- gl_representation_->EndAccess();
- } else {
- passthrough_representation_->EndAccess();
- }
+ gl_representation_->EndAccess();
mode_ = RepresentationAccessMode::kNone;
- surface_ = nullptr;
}
void SharedImageRepresentationSkiaGL::CheckContext() {
@@ -200,4 +147,8 @@ void SharedImageRepresentationSkiaGL::CheckContext() {
#endif
}
-} // namespace gpu \ No newline at end of file
+bool SharedImageRepresentationSkiaGL::SupportsMultipleConcurrentReadAccess() {
+ return gl_representation_->SupportsMultipleConcurrentReadAccess();
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h
index 5e11a7e0ca6..f3f1111fe36 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h
@@ -19,14 +19,7 @@ class GPU_GLES2_EXPORT SharedImageRepresentationSkiaGL
: public SharedImageRepresentationSkia {
public:
static std::unique_ptr<SharedImageRepresentationSkiaGL> Create(
- std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation,
- scoped_refptr<SharedContextState> context_state,
- SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker);
- static std::unique_ptr<SharedImageRepresentationSkiaGL> CreateForPassthrough(
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- passthrough_representation,
+ std::unique_ptr<SharedImageRepresentationGLTextureBase> gl_representation,
scoped_refptr<SharedContextState> context_state,
SharedImageManager* manager,
SharedImageBacking* backing,
@@ -45,17 +38,11 @@ class GPU_GLES2_EXPORT SharedImageRepresentationSkiaGL
std::vector<GrBackendSemaphore>* end_semaphores) override;
void EndReadAccess() override;
+ bool SupportsMultipleConcurrentReadAccess() override;
+
private:
SharedImageRepresentationSkiaGL(
- std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation,
- sk_sp<SkPromiseImageTexture> promise_texture,
- scoped_refptr<SharedContextState> context_state,
- SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker);
- SharedImageRepresentationSkiaGL(
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- passthrough_representation,
+ std::unique_ptr<SharedImageRepresentationGLTextureBase> gl_representation,
sk_sp<SkPromiseImageTexture> promise_texture,
scoped_refptr<SharedContextState> context_state,
SharedImageManager* manager,
@@ -64,12 +51,10 @@ class GPU_GLES2_EXPORT SharedImageRepresentationSkiaGL
void CheckContext();
- std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation_;
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- passthrough_representation_;
+ std::unique_ptr<SharedImageRepresentationGLTextureBase> gl_representation_;
sk_sp<SkPromiseImageTexture> promise_texture_;
scoped_refptr<SharedContextState> context_state_;
- SkSurface* surface_ = nullptr;
+ sk_sp<SkSurface> surface_;
RepresentationAccessMode mode_ = RepresentationAccessMode::kNone;
#if DCHECK_IS_ON()
gl::GLContext* context_;
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_representation_unittest.cc
new file mode 100644
index 00000000000..26b6f780000
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_unittest.cc
@@ -0,0 +1,248 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_representation.h"
+
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/test_shared_image_backing.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSurface.h"
+
+namespace gpu {
+
+class SharedImageRepresentationTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
+ mailbox_ = Mailbox::GenerateForSharedImage();
+ auto format = viz::ResourceFormat::RGBA_8888;
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
+
+ auto backing = std::make_unique<TestSharedImageBacking>(
+ mailbox_, format, size, color_space, usage, 0 /* estimated_size */);
+ factory_ref_ = manager_.Register(std::move(backing), tracker_.get());
+ }
+
+ protected:
+ gpu::Mailbox mailbox_;
+ SharedImageManager manager_;
+ std::unique_ptr<MemoryTypeTracker> tracker_;
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref_;
+};
+
+TEST_F(SharedImageRepresentationTest, GLTextureClearing) {
+ auto representation = manager_.ProduceGLTexture(mailbox_, tracker_.get());
+ EXPECT_FALSE(representation->IsCleared());
+
+ // We should not be able to begin access when |allow_uncleared| is false.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ EXPECT_FALSE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // Begin/End access should not modify clear status on its own.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // Clearing underlying GL texture should clear the SharedImage.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ ASSERT_TRUE(scoped_access);
+ representation->GetTexture()->SetLevelCleared(GL_TEXTURE_2D, 0,
+ true /* cleared */);
+ }
+ EXPECT_TRUE(representation->IsCleared());
+
+ // We can now begin access with |allow_uncleared| == false.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ EXPECT_TRUE(scoped_access);
+ }
+
+ // Reset the representation to uncleared. This should unclear the texture on
+ // BeginAccess.
+ representation->SetClearedRect(gfx::Rect());
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ ASSERT_TRUE(scoped_access);
+ EXPECT_FALSE(
+ representation->GetTexture()->IsLevelCleared(GL_TEXTURE_2D, 0));
+ }
+ EXPECT_FALSE(representation->IsCleared());
+}
+
+TEST_F(SharedImageRepresentationTest, GLTexturePassthroughClearing) {
+ auto representation =
+ manager_.ProduceGLTexturePassthrough(mailbox_, tracker_.get());
+ EXPECT_FALSE(representation->IsCleared());
+
+ // We should not be able to begin access when |allow_uncleared| is false.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ EXPECT_FALSE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // Begin/End access will not clear the representation on its own.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // Clear the SharedImage.
+ representation->SetCleared();
+ EXPECT_TRUE(representation->IsCleared());
+
+ // We can now begin accdess with |allow_uncleared| == false.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ EXPECT_TRUE(scoped_access);
+ }
+}
+
+TEST_F(SharedImageRepresentationTest, SkiaClearing) {
+ auto representation = manager_.ProduceSkia(mailbox_, tracker_.get(), nullptr);
+ EXPECT_FALSE(representation->IsCleared());
+
+ // We should not be able to begin read access.
+ {
+ auto scoped_access =
+ representation->BeginScopedReadAccess(nullptr, nullptr);
+ EXPECT_FALSE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // We should not be able to begin write access when |allow_uncleared| is
+ // false.
+ {
+ auto scoped_access = representation->BeginScopedWriteAccess(
+ nullptr, nullptr, SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ EXPECT_FALSE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // We can begin write access when |allow_uncleared| is true.
+ {
+ auto scoped_access = representation->BeginScopedWriteAccess(
+ nullptr, nullptr,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // Clear the SharedImage.
+ representation->SetCleared();
+ EXPECT_TRUE(representation->IsCleared());
+
+ // We can now begin read access.
+ {
+ auto scoped_access =
+ representation->BeginScopedReadAccess(nullptr, nullptr);
+ EXPECT_TRUE(scoped_access);
+ }
+ EXPECT_TRUE(representation->IsCleared());
+
+ // We can also begin write access with |allow_uncleared| == false.
+ {
+ auto scoped_access = representation->BeginScopedWriteAccess(
+ nullptr, nullptr, SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ EXPECT_TRUE(scoped_access);
+ }
+ EXPECT_TRUE(representation->IsCleared());
+}
+
+TEST_F(SharedImageRepresentationTest, DawnClearing) {
+ auto representation =
+ manager_.ProduceDawn(mailbox_, tracker_.get(), nullptr /* device */);
+ EXPECT_FALSE(representation->IsCleared());
+
+ // We should not be able to begin access with |allow_uncleared| == false.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ WGPUTextureUsage_None,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ EXPECT_FALSE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // We can begin access when |allow_uncleared| is true.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ WGPUTextureUsage_None,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ EXPECT_TRUE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // Clear the SharedImage.
+ representation->SetCleared();
+ EXPECT_TRUE(representation->IsCleared());
+
+ // We can also begin access with |allow_uncleared| == false.
+ {
+ auto scoped_access = representation->BeginScopedAccess(
+ WGPUTextureUsage_None,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ EXPECT_TRUE(scoped_access);
+ }
+ EXPECT_TRUE(representation->IsCleared());
+}
+
+TEST_F(SharedImageRepresentationTest, OverlayClearing) {
+ auto representation = manager_.ProduceOverlay(mailbox_, tracker_.get());
+ EXPECT_FALSE(representation->IsCleared());
+
+ // We should not be able to begin read ccess.
+ {
+ auto scoped_access =
+ representation->BeginScopedReadAccess(false /* needs_gl_image */);
+ EXPECT_FALSE(scoped_access);
+ }
+ EXPECT_FALSE(representation->IsCleared());
+
+ // Clear the SharedImage.
+ representation->SetCleared();
+ EXPECT_TRUE(representation->IsCleared());
+
+ // We can now begin read access.
+ {
+ auto scoped_access =
+ representation->BeginScopedReadAccess(false /* needs_gl_image */);
+ EXPECT_TRUE(scoped_access);
+ }
+ EXPECT_TRUE(representation->IsCleared());
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_test_utils.cc b/chromium/gpu/command_buffer/service/shared_image_test_utils.cc
new file mode 100644
index 00000000000..a19b15ab07d
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_test_utils.cc
@@ -0,0 +1,64 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_test_utils.h"
+
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace gpu {
+
+std::vector<uint8_t> ReadPixels(
+ Mailbox mailbox,
+ gfx::Size size,
+ SharedContextState* context_state,
+ SharedImageRepresentationFactory* representation_factory) {
+ DCHECK(context_state);
+ EXPECT_TRUE(
+ context_state->MakeCurrent(context_state->surface(), true /* needs_gl*/));
+ auto skia_representation =
+ representation_factory->ProduceSkia(mailbox, context_state);
+ EXPECT_TRUE(skia_representation);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access;
+ scoped_read_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+ auto* promise_texture = scoped_read_access->promise_image_texture();
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
+ EXPECT_TRUE(promise_texture);
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+
+ // Create an Sk Image from GrBackendTexture.
+ auto sk_image = SkImage::MakeFromTexture(
+ context_state->gr_context(), promise_texture->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, kRGBA_8888_SkColorType, kOpaque_SkAlphaType,
+ nullptr);
+
+ SkImageInfo dst_info =
+ SkImageInfo::Make(size.width(), size.height(), kRGBA_8888_SkColorType,
+ kOpaque_SkAlphaType, nullptr);
+
+ const int num_pixels = size.width() * size.height();
+ std::vector<uint8_t> dst_pixels(num_pixels * 4);
+
+ // Read back pixels from Sk Image.
+ EXPECT_TRUE(sk_image->readPixels(dst_info, dst_pixels.data(),
+ dst_info.minRowBytes(), 0, 0));
+ scoped_read_access.reset();
+
+ return dst_pixels;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_test_utils.h b/chromium/gpu/command_buffer/service/shared_image_test_utils.h
new file mode 100644
index 00000000000..eefd28ebf4e
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_test_utils.h
@@ -0,0 +1,28 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_TEST_UTILS_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_TEST_UTILS_H_
+
+#include <cstdint>
+#include <vector>
+
+namespace gfx {
+class Size;
+} // namespace gfx
+
+namespace gpu {
+class SharedContextState;
+class SharedImageRepresentationFactory;
+struct Mailbox;
+
+std::vector<uint8_t> ReadPixels(
+ Mailbox mailbox,
+ gfx::Size size,
+ SharedContextState* context_state,
+ SharedImageRepresentationFactory* representation_factory);
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_TEST_UTILS_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_video.cc b/chromium/gpu/command_buffer/service/shared_image_video.cc
index 990245d6b70..ba043e36a48 100644
--- a/chromium/gpu/command_buffer/service/shared_image_video.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_video.cc
@@ -24,6 +24,7 @@
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_fence_helper.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_util.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
@@ -32,86 +33,6 @@
namespace gpu {
-namespace {
-sk_sp<SkPromiseImageTexture> CreatePromiseTextureVideo(
- viz::VulkanContextProvider* context_provider,
- base::android::ScopedHardwareBufferHandle ahb_handle,
- gfx::Size size,
- viz::ResourceFormat format) {
- VulkanImplementation* vk_implementation =
- context_provider->GetVulkanImplementation();
- VkDevice vk_device = context_provider->GetDeviceQueue()->GetVulkanDevice();
- VkPhysicalDevice vk_physical_device =
- context_provider->GetDeviceQueue()->GetVulkanPhysicalDevice();
-
- // Create a VkImage and import AHB.
- VkImage vk_image;
- VkImageCreateInfo vk_image_info;
- VkDeviceMemory vk_device_memory;
- VkDeviceSize mem_allocation_size;
- VulkanYCbCrInfo ycbcr_info;
- if (!vk_implementation->CreateVkImageAndImportAHB(
- vk_device, vk_physical_device, size, std::move(ahb_handle), &vk_image,
- &vk_image_info, &vk_device_memory, &mem_allocation_size,
- &ycbcr_info)) {
- return nullptr;
- }
-
- GrVkYcbcrConversionInfo fYcbcrConversionInfo(
- static_cast<VkSamplerYcbcrModelConversion>(
- ycbcr_info.suggested_ycbcr_model),
- static_cast<VkSamplerYcbcrRange>(ycbcr_info.suggested_ycbcr_range),
- static_cast<VkChromaLocation>(ycbcr_info.suggested_xchroma_offset),
- static_cast<VkChromaLocation>(ycbcr_info.suggested_ychroma_offset),
- VK_FILTER_LINEAR, // VkFilter
- 0, // VkBool32 forceExplicitReconstruction
- ycbcr_info.external_format,
- static_cast<VkFormatFeatureFlags>(ycbcr_info.format_features));
-
- // Create backend texture from the VkImage.
- GrVkAlloc alloc = {vk_device_memory, 0, mem_allocation_size, 0};
- GrVkImageInfo vk_info = {vk_image,
- alloc,
- vk_image_info.tiling,
- vk_image_info.initialLayout,
- vk_image_info.format,
- vk_image_info.mipLevels,
- VK_QUEUE_FAMILY_EXTERNAL,
- GrProtected::kNo,
- fYcbcrConversionInfo};
-
- // TODO(bsalomon): Determine whether it makes sense to attempt to reuse this
- // if the vk_info stays the same on subsequent calls.
- auto promise_texture = SkPromiseImageTexture::Make(
- GrBackendTexture(size.width(), size.height(), vk_info));
- if (!promise_texture) {
- vkDestroyImage(vk_device, vk_image, nullptr);
- vkFreeMemory(vk_device, vk_device_memory, nullptr);
- return nullptr;
- }
-
- return promise_texture;
-}
-
-void DestroyVkPromiseTextureVideo(
- viz::VulkanContextProvider* context_provider,
- sk_sp<SkPromiseImageTexture> promise_texture) {
- DCHECK(promise_texture);
- DCHECK(promise_texture->unique());
-
- GrVkImageInfo vk_image_info;
- bool result =
- promise_texture->backendTexture().getVkImageInfo(&vk_image_info);
- DCHECK(result);
-
- VulkanFenceHelper* fence_helper =
- context_provider->GetDeviceQueue()->GetFenceHelper();
- fence_helper->EnqueueImageCleanupForSubmittedWork(
- vk_image_info.fImage, vk_image_info.fAlloc.fMemory);
-}
-
-} // namespace
-
SharedImageVideo::SharedImageVideo(
const Mailbox& mailbox,
const gfx::Size& size,
@@ -146,11 +67,14 @@ SharedImageVideo::~SharedImageVideo() {
context_state_->RemoveContextLostObserver(this);
}
-bool SharedImageVideo::IsCleared() const {
- return true;
+gfx::Rect SharedImageVideo::ClearedRect() const {
+ // SharedImageVideo objects are always created from pre-initialized textures
+ // provided by the media decoder. Always treat these as cleared (return the
+ // full rectangle).
+ return gfx::Rect(size());
}
-void SharedImageVideo::SetCleared() {}
+void SharedImageVideo::SetClearedRect(const gfx::Rect& cleared_rect) {}
void SharedImageVideo::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
DCHECK(!in_fence);
@@ -163,8 +87,6 @@ bool SharedImageVideo::ProduceLegacyMailbox(MailboxManager* mailbox_manager) {
return true;
}
-void SharedImageVideo::Destroy() {}
-
size_t SharedImageVideo::EstimatedSizeForMemTracking() const {
// This backing contributes to gpu memory only if its bound to the texture and
// not when the backing is created.
@@ -296,10 +218,13 @@ class SharedImageRepresentationVideoSkiaVk
DCHECK(end_access_semaphore_ == VK_NULL_HANDLE);
// |promise_texture_| could be null if we never being read.
- if (!promise_texture_)
+ if (!vulkan_image_)
return;
- DestroyVkPromiseTextureVideo(context_state_->vk_context_provider(),
- std::move(promise_texture_));
+ VulkanFenceHelper* fence_helper = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper();
+ fence_helper->EnqueueVulkanObjectCleanupForSubmittedWork(
+ std::move(vulkan_image_));
}
sk_sp<SkSurface> BeginWriteAccess(
@@ -317,20 +242,19 @@ class SharedImageRepresentationVideoSkiaVk
sk_sp<SkPromiseImageTexture> BeginReadAccess(
std::vector<GrBackendSemaphore>* begin_semaphores,
std::vector<GrBackendSemaphore>* end_semaphores) override {
+ DCHECK(!scoped_hardware_buffer_);
+ auto* video_backing = static_cast<SharedImageVideo*>(backing());
+ DCHECK(video_backing);
+ auto* stream_texture_sii = video_backing->stream_texture_sii_.get();
+
+ // GetAHardwareBuffer() renders the latest image and gets AHardwareBuffer
+ // from it.
+ scoped_hardware_buffer_ = stream_texture_sii->GetAHardwareBuffer();
if (!scoped_hardware_buffer_) {
- auto* video_backing = static_cast<SharedImageVideo*>(backing());
- DCHECK(video_backing);
- auto* stream_texture_sii = video_backing->stream_texture_sii_.get();
-
- // GetAHardwareBuffer() renders the latest image and gets AHardwareBuffer
- // from it.
- scoped_hardware_buffer_ = stream_texture_sii->GetAHardwareBuffer();
- if (!scoped_hardware_buffer_) {
- LOG(ERROR) << "Failed to get the hardware buffer.";
- return nullptr;
- }
- DCHECK(scoped_hardware_buffer_->buffer());
+ LOG(ERROR) << "Failed to get the hardware buffer.";
+ return nullptr;
}
+ DCHECK(scoped_hardware_buffer_->buffer());
// Wait on the sync fd attached to the buffer to make sure buffer is
// ready before the read. This is done by inserting the sync fd semaphore
@@ -340,16 +264,41 @@ class SharedImageRepresentationVideoSkiaVk
return nullptr;
}
- if (!promise_texture_) {
- // Create the promise texture.
- promise_texture_ = CreatePromiseTextureVideo(
- context_state_->vk_context_provider(),
- scoped_hardware_buffer_->TakeBuffer(), size(), format());
+ if (!vulkan_image_) {
+ DCHECK(!promise_texture_);
+ gfx::GpuMemoryBufferHandle gmb_handle(
+ scoped_hardware_buffer_->TakeBuffer());
+ auto* device_queue =
+ context_state_->vk_context_provider()->GetDeviceQueue();
+ vulkan_image_ = VulkanImage::CreateFromGpuMemoryBufferHandle(
+ device_queue, std::move(gmb_handle), size(), ToVkFormat(format()),
+ 0 /* usage */);
+ if (!vulkan_image_)
+ return nullptr;
+
+ // We always use VK_IMAGE_TILING_OPTIMAL while creating the vk image in
+ // VulkanImplementationAndroid::CreateVkImageAndImportAHB. Hence pass the
+ // tiling parameter as VK_IMAGE_TILING_OPTIMAL to below call rather than
+ // passing |vk_image_info.tiling|. This is also to ensure that the promise
+ // image created here at [1] as well the fullfil image created via the
+ // current function call are consistent and both are using
+ // VK_IMAGE_TILING_OPTIMAL. [1] -
+ // https://cs.chromium.org/chromium/src/components/viz/service/display_embedder/skia_output_surface_impl.cc?rcl=db5ffd448ba5d66d9d3c5c099754e5067c752465&l=789.
+ DCHECK_EQ(static_cast<int32_t>(vulkan_image_->image_tiling()),
+ static_cast<int32_t>(VK_IMAGE_TILING_OPTIMAL));
+
+ // TODO(bsalomon): Determine whether it makes sense to attempt to reuse
+ // this if the vk_info stays the same on subsequent calls.
+ promise_texture_ = SkPromiseImageTexture::Make(
+ GrBackendTexture(size().width(), size().height(),
+ CreateGrVkImageInfo(vulkan_image_.get())));
+ DCHECK(promise_texture_);
}
return promise_texture_;
}
void EndReadAccess() override {
+ DCHECK(scoped_hardware_buffer_);
DCHECK(end_access_semaphore_ != VK_NULL_HANDLE);
SemaphoreHandle semaphore_handle = vk_implementation()->GetSemaphoreHandle(
@@ -364,6 +313,7 @@ class SharedImageRepresentationVideoSkiaVk
fence_helper()->EnqueueSemaphoreCleanupForSubmittedWork(
end_access_semaphore_);
end_access_semaphore_ = VK_NULL_HANDLE;
+ scoped_hardware_buffer_ = nullptr;
}
private:
@@ -423,6 +373,7 @@ class SharedImageRepresentationVideoSkiaVk
->GetFenceHelper();
}
+ std::unique_ptr<VulkanImage> vulkan_image_;
sk_sp<SkPromiseImageTexture> promise_texture_;
scoped_refptr<SharedContextState> context_state_;
std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
@@ -494,14 +445,24 @@ std::unique_ptr<SharedImageRepresentationSkia> SharedImageVideo::ProduceSkia(
}
DCHECK(context_state->GrContextIsGL());
- auto* texture = stream_texture_sii_->GetTexture();
- DCHECK(texture);
+ auto* texture_base = stream_texture_sii_->GetTextureBase();
+ DCHECK(texture_base);
- // In GL mode, create the SharedImageRepresentationGLTextureVideo
+ // In GL mode, create the SharedImageRepresentationGLTexture*Video
// representation to use with SharedImageRepresentationVideoSkiaGL.
- auto gl_representation =
- std::make_unique<SharedImageRepresentationGLTextureVideo>(
- manager, this, tracker, texture);
+ std::unique_ptr<gpu::SharedImageRepresentationGLTextureBase>
+ gl_representation;
+ if (texture_base->GetType() == gpu::TextureBase::Type::kValidated) {
+ gl_representation =
+ std::make_unique<SharedImageRepresentationGLTextureVideo>(
+ manager, this, tracker, gles2::Texture::CheckedCast(texture_base));
+ } else {
+ gl_representation =
+ std::make_unique<SharedImageRepresentationGLTexturePassthroughVideo>(
+ manager, this, tracker,
+ gles2::TexturePassthrough::CheckedCast(texture_base));
+ }
+
return SharedImageRepresentationSkiaGL::Create(std::move(gl_representation),
std::move(context_state),
manager, this, tracker);
@@ -523,7 +484,7 @@ class SharedImageRepresentationOverlayVideo
stream_image_(backing->stream_texture_sii_) {}
protected:
- void BeginReadAccess() override {
+ bool BeginReadAccess() override {
// A |CodecImage| is already in a SurfaceView, render content to the
// overlay.
if (!stream_image_->HasTextureOwner()) {
@@ -531,6 +492,7 @@ class SharedImageRepresentationOverlayVideo
"SharedImageRepresentationOverlayVideo::BeginReadAccess");
stream_image_->RenderToOverlay();
}
+ return true;
}
void EndReadAccess() override {}
diff --git a/chromium/gpu/command_buffer/service/shared_image_video.h b/chromium/gpu/command_buffer/service/shared_image_video.h
index 58800e20eb5..22cb260eec9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_video.h
+++ b/chromium/gpu/command_buffer/service/shared_image_video.h
@@ -42,11 +42,10 @@ class GPU_GLES2_EXPORT SharedImageVideo
~SharedImageVideo() override;
// SharedImageBacking implementation.
- bool IsCleared() const override;
- void SetCleared() override;
+ gfx::Rect ClearedRect() const override;
+ void SetClearedRect(const gfx::Rect& cleared_rect) override;
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
- void Destroy() override;
size_t EstimatedSizeForMemTracking() const override;
// SharedContextState::ContextLostObserver implementation.
diff --git a/chromium/gpu/command_buffer/service/skia_utils.cc b/chromium/gpu/command_buffer/service/skia_utils.cc
index f95190590e1..b79adbb0f6d 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.cc
+++ b/chromium/gpu/command_buffer/service/skia_utils.cc
@@ -20,6 +20,7 @@
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_fence_helper.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_image.h"
#endif
namespace gpu {
@@ -42,6 +43,11 @@ void CleanupAfterSkiaFlush(void* context) {
template <class T>
void DeleteSkObject(SharedContextState* context_state, sk_sp<T> sk_object) {
DCHECK(sk_object && sk_object->unique());
+
+ if (context_state->context_lost())
+ return;
+ DCHECK(!context_state->gr_context()->abandoned());
+
if (!context_state->GrContextIsVulkan())
return;
@@ -128,6 +134,11 @@ void AddVulkanCleanupTaskForSkiaFlush(
void DeleteGrBackendTexture(SharedContextState* context_state,
GrBackendTexture* backend_texture) {
DCHECK(backend_texture && backend_texture->isValid());
+
+ if (context_state->context_lost())
+ return;
+ DCHECK(!context_state->gr_context()->abandoned());
+
if (!context_state->GrContextIsVulkan()) {
context_state->gr_context()->deleteBackendTexture(
std::move(*backend_texture));
@@ -140,9 +151,8 @@ void DeleteGrBackendTexture(SharedContextState* context_state,
fence_helper->EnqueueCleanupTaskForSubmittedWork(base::BindOnce(
[](const sk_sp<GrContext>& gr_context, GrBackendTexture backend_texture,
gpu::VulkanDeviceQueue* device_queue, bool is_lost) {
- // If underlying Vulkan device is destroyed, gr_context should have been
- // abandoned, the deleteBackendTexture() should be noop.
- gr_context->deleteBackendTexture(std::move(backend_texture));
+ if (!gr_context->abandoned())
+ gr_context->deleteBackendTexture(std::move(backend_texture));
},
sk_ref_sp(context_state->gr_context()), std::move(*backend_texture)));
#endif
@@ -158,6 +168,20 @@ void DeleteSkSurface(SharedContextState* context_state,
}
#if BUILDFLAG(ENABLE_VULKAN)
+GrVkImageInfo CreateGrVkImageInfo(VulkanImage* image) {
+ DCHECK(image);
+ VkPhysicalDevice physical_device =
+ image->device_queue()->GetVulkanPhysicalDevice();
+ GrVkYcbcrConversionInfo gr_ycbcr_info = CreateGrVkYcbcrConversionInfo(
+ physical_device, image->image_tiling(), image->ycbcr_info());
+ GrVkAlloc alloc(image->device_memory(), /*offset=*/0, image->device_size(),
+ /*flags=*/0);
+ bool is_protected = image->flags() & VK_IMAGE_CREATE_PROTECTED_BIT;
+ return GrVkImageInfo(
+ image->image(), alloc, image->image_tiling(), image->image_layout(),
+ image->format(), /*levelCount=*/1, image->queue_family_index(),
+ is_protected ? GrProtected::kYes : GrProtected::kNo, gr_ycbcr_info);
+}
GrVkYcbcrConversionInfo CreateGrVkYcbcrConversionInfo(
VkPhysicalDevice physical_device,
@@ -185,6 +209,17 @@ GrVkYcbcrConversionInfo CreateGrVkYcbcrConversionInfo(
: format_props.optimalTilingFeatures;
}
+ // As per the spec here [1], if the format does not support
+ // VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT,
+ // chromaFilter must be VK_FILTER_NEAREST.
+ // [1] -
+ // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkSamplerYcbcrConversionCreateInfo.html.
+ VkFilter chroma_filter =
+ (format_features &
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT)
+ ? VK_FILTER_LINEAR
+ : VK_FILTER_NEAREST;
+
return GrVkYcbcrConversionInfo(
vk_format, ycbcr_info->external_format,
static_cast<VkSamplerYcbcrModelConversion>(
@@ -192,7 +227,7 @@ GrVkYcbcrConversionInfo CreateGrVkYcbcrConversionInfo(
static_cast<VkSamplerYcbcrRange>(ycbcr_info->suggested_ycbcr_range),
static_cast<VkChromaLocation>(ycbcr_info->suggested_xchroma_offset),
static_cast<VkChromaLocation>(ycbcr_info->suggested_ychroma_offset),
- static_cast<VkFilter>(VK_FILTER_LINEAR),
+ chroma_filter,
/*forceExplicitReconstruction=*/false, format_features);
}
diff --git a/chromium/gpu/command_buffer/service/skia_utils.h b/chromium/gpu/command_buffer/service/skia_utils.h
index a383163b37b..0208f72b4a5 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.h
+++ b/chromium/gpu/command_buffer/service/skia_utils.h
@@ -32,6 +32,10 @@ class VulkanContextProvider;
namespace gpu {
+#if BUILDFLAG(ENABLE_VULKAN)
+class VulkanImage;
+#endif
+
namespace gles2 {
class FeatureInfo;
} // namespace gles2
@@ -74,6 +78,8 @@ GPU_GLES2_EXPORT void DeleteSkSurface(SharedContextState* context_state,
sk_sp<SkSurface> sk_surface);
#if BUILDFLAG(ENABLE_VULKAN)
+GPU_GLES2_EXPORT GrVkImageInfo CreateGrVkImageInfo(VulkanImage* image);
+
GPU_GLES2_EXPORT GrVkYcbcrConversionInfo CreateGrVkYcbcrConversionInfo(
VkPhysicalDevice physical_device,
VkImageTiling tiling,
diff --git a/chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h b/chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h
index 9c048f72d94..e1621e572f3 100644
--- a/chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h
+++ b/chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h
@@ -10,10 +10,7 @@
namespace gpu {
class TextureOwner;
-
-namespace gles2 {
-class Texture;
-} // namespace gles2
+class TextureBase;
// This class is a specialized GLImage that lets SharedImageVideo draw video
// frames.
@@ -32,7 +29,7 @@ class GPU_GLES2_EXPORT StreamTextureSharedImageInterface
// texture.
virtual void UpdateAndBindTexImage() = 0;
virtual bool HasTextureOwner() const = 0;
- virtual gles2::Texture* GetTexture() const = 0;
+ virtual TextureBase* GetTextureBase() const = 0;
// Notify the texture of overlay decision, When overlay promotion is true,
// this also sets the bounds of where the overlay is.
diff --git a/chromium/gpu/command_buffer/service/surface_texture_gl_owner_unittest.cc b/chromium/gpu/command_buffer/service/surface_texture_gl_owner_unittest.cc
index 2e1509fd461..fc6cd44b66b 100644
--- a/chromium/gpu/command_buffer/service/surface_texture_gl_owner_unittest.cc
+++ b/chromium/gpu/command_buffer/service/surface_texture_gl_owner_unittest.cc
@@ -36,8 +36,7 @@ class SurfaceTextureGLOwnerTest : public testing::Test {
void SetUp() override {
gl::init::InitializeStaticGLBindingsImplementation(
gl::kGLImplementationEGLGLES2, false);
- gl::init::InitializeGLOneOffPlatformImplementation(false, false, false,
- true);
+ gl::init::InitializeGLOneOffPlatformImplementation(false, false, true);
surface_ = new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240));
surface_->Initialize();
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.cc b/chromium/gpu/command_buffer/service/sync_point_manager.cc
index 69856b51347..cede40713ae 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager.cc
+++ b/chromium/gpu/command_buffer/service/sync_point_manager.cc
@@ -342,7 +342,7 @@ SyncPointManager::~SyncPointManager() {
scoped_refptr<SyncPointOrderData> SyncPointManager::CreateSyncPointOrderData() {
base::AutoLock auto_lock(lock_);
- SequenceId sequence_id = SequenceId::FromUnsafeValue(next_sequence_id_++);
+ SequenceId sequence_id = sequence_id_generator_.GenerateNextId();
scoped_refptr<SyncPointOrderData> order_data =
new SyncPointOrderData(this, sequence_id);
DCHECK(!order_data_map_.count(sequence_id));
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.h b/chromium/gpu/command_buffer/service/sync_point_manager.h
index 8a44c181719..c6fe88dc0be 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager.h
+++ b/chromium/gpu/command_buffer/service/sync_point_manager.h
@@ -338,7 +338,7 @@ class GPU_EXPORT SyncPointManager {
// Map of sequence id to order data.
OrderDataMap order_data_map_;
- uint32_t next_sequence_id_ = 1;
+ SequenceId::Generator sequence_id_generator_;
mutable base::Lock lock_;
diff --git a/chromium/gpu/command_buffer/service/test_helper.cc b/chromium/gpu/command_buffer/service/test_helper.cc
index 55ad0276ebc..75c6fa0130a 100644
--- a/chromium/gpu/command_buffer/service/test_helper.cc
+++ b/chromium/gpu/command_buffer/service/test_helper.cc
@@ -928,50 +928,6 @@ void TestHelper::SetupProgramSuccessExpectations(
}
}
- if (feature_info->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl, GetProgramInterfaceiv(service_id, GL_FRAGMENT_INPUT_NV,
- GL_ACTIVE_RESOURCES, _))
- .WillOnce(SetArgPointee<3>(int(num_varyings)))
- .RetiresOnSaturation();
- size_t max_varying_len = 0;
- for (size_t ii = 0; ii < num_varyings; ++ii) {
- size_t len = strlen(varyings[ii].name) + 1;
- max_varying_len = std::max(max_varying_len, len);
- }
- EXPECT_CALL(*gl, GetProgramInterfaceiv(service_id, GL_FRAGMENT_INPUT_NV,
- GL_MAX_NAME_LENGTH, _))
- .WillOnce(SetArgPointee<3>(int(max_varying_len)))
- .RetiresOnSaturation();
- for (size_t ii = 0; ii < num_varyings; ++ii) {
- VaryingInfo& info = varyings[ii];
- EXPECT_CALL(*gl, GetProgramResourceName(service_id, GL_FRAGMENT_INPUT_NV,
- ii, max_varying_len, _, _))
- .WillOnce(DoAll(SetArgPointee<4>(strlen(info.name)),
- SetArrayArgument<5>(
- info.name, info.name + strlen(info.name) + 1)))
- .RetiresOnSaturation();
- if (ProgramManager::HasBuiltInPrefix(info.name))
- continue;
-
- static const GLenum kPropsArray[] = {GL_LOCATION, GL_TYPE,
- GL_ARRAY_SIZE};
- static const size_t kPropsSize = base::size(kPropsArray);
- EXPECT_CALL(
- *gl, GetProgramResourceiv(
- service_id, GL_FRAGMENT_INPUT_NV, ii, kPropsSize,
- _ /*testing::ElementsAreArray(kPropsArray, kPropsSize)*/,
- kPropsSize, _, _))
- .WillOnce(testing::Invoke([info](GLuint, GLenum, GLuint, GLsizei,
- const GLenum*, GLsizei,
- GLsizei* length, GLint* params) {
- *length = kPropsSize;
- params[0] = info.real_location;
- params[1] = info.type;
- params[2] = info.size;
- }))
- .RetiresOnSaturation();
- }
- }
if (feature_info->gl_version_info().is_es3_capable &&
!feature_info->disable_shader_translator()) {
for (size_t ii = 0; ii < num_program_outputs; ++ii) {
diff --git a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
new file mode 100644
index 00000000000..d07cfd627d2
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
@@ -0,0 +1,243 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/test_shared_image_backing.h"
+#include "build/build_config.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "third_party/skia/include/gpu/mock/GrMockTypes.h"
+
+namespace gpu {
+namespace {
+class TestSharedImageRepresentationGLTexture
+ : public SharedImageRepresentationGLTexture {
+ public:
+ TestSharedImageRepresentationGLTexture(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture)
+ : SharedImageRepresentationGLTexture(manager, backing, tracker),
+ texture_(texture) {}
+
+ gles2::Texture* GetTexture() override { return texture_; }
+ bool BeginAccess(GLenum mode) override {
+ return static_cast<TestSharedImageBacking*>(backing())->can_access();
+ }
+
+ private:
+ gles2::Texture* const texture_;
+};
+
+class TestSharedImageRepresentationGLTexturePassthrough
+ : public SharedImageRepresentationGLTexturePassthrough {
+ public:
+ TestSharedImageRepresentationGLTexturePassthrough(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture)
+ : SharedImageRepresentationGLTexturePassthrough(manager,
+ backing,
+ tracker),
+ texture_(std::move(texture)) {}
+
+ const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
+ override {
+ return texture_;
+ }
+ bool BeginAccess(GLenum mode) override {
+ return static_cast<TestSharedImageBacking*>(backing())->can_access();
+ }
+
+ private:
+ const scoped_refptr<gles2::TexturePassthrough> texture_;
+};
+
+class TestSharedImageRepresentationSkia : public SharedImageRepresentationSkia {
+ public:
+ TestSharedImageRepresentationSkia(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker)
+ : SharedImageRepresentationSkia(manager, backing, tracker) {}
+
+ protected:
+ sk_sp<SkSurface> BeginWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
+ if (!static_cast<TestSharedImageBacking*>(backing())->can_access()) {
+ return nullptr;
+ }
+ return SkSurface::MakeRasterN32Premul(size().width(), size().height());
+ }
+ void EndWriteAccess(sk_sp<SkSurface> surface) override {}
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
+ if (!static_cast<TestSharedImageBacking*>(backing())->can_access()) {
+ return nullptr;
+ }
+ GrBackendTexture backend_tex(size().width(), size().height(),
+ GrMipMapped::kNo, GrMockTextureInfo());
+ return SkPromiseImageTexture::Make(backend_tex);
+ }
+ void EndReadAccess() override {}
+};
+
+class TestSharedImageRepresentationDawn : public SharedImageRepresentationDawn {
+ public:
+ TestSharedImageRepresentationDawn(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker)
+ : SharedImageRepresentationDawn(manager, backing, tracker) {}
+
+ WGPUTexture BeginAccess(WGPUTextureUsage usage) override {
+ if (!static_cast<TestSharedImageBacking*>(backing())->can_access()) {
+ return nullptr;
+ }
+
+ // Return a dummy value.
+ return reinterpret_cast<WGPUTexture>(203);
+ }
+
+ void EndAccess() override {}
+};
+
+class TestSharedImageRepresentationOverlay
+ : public SharedImageRepresentationOverlay {
+ public:
+ TestSharedImageRepresentationOverlay(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker)
+ : SharedImageRepresentationOverlay(manager, backing, tracker) {}
+
+ bool BeginReadAccess() override { return true; }
+ void EndReadAccess() override {}
+ gl::GLImage* GetGLImage() override { return nullptr; }
+
+#if defined(OS_ANDROID)
+ void NotifyOverlayPromotion(bool promotion,
+ const gfx::Rect& bounds) override {}
+#endif
+};
+
+} // namespace
+
+TestSharedImageBacking::TestSharedImageBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ size_t estimated_size,
+ GLuint texture_id)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size,
+ false /* is_thread_safe */),
+ service_id_(texture_id) {
+ texture_ = new gles2::Texture(service_id_);
+ texture_->SetLightweightRef();
+ texture_->SetTarget(GL_TEXTURE_2D, 1);
+ texture_->sampler_state_.min_filter = GL_LINEAR;
+ texture_->sampler_state_.mag_filter = GL_LINEAR;
+ texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture_->SetLevelInfo(GL_TEXTURE_2D, 0, GLInternalFormat(format),
+ size.width(), size.height(), 1, 0,
+ GLDataFormat(format), GLDataType(format), gfx::Rect());
+ texture_->SetImmutable(true, true);
+ texture_passthrough_ = base::MakeRefCounted<gles2::TexturePassthrough>(
+ service_id_, GL_TEXTURE_2D);
+}
+
+TestSharedImageBacking::TestSharedImageBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ size_t estimated_size)
+ : TestSharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size,
+ 203 /* texture_id */) {
+ // Using a dummy |texture_id|, so lose our context so we don't do anything
+ // real with it.
+ OnContextLost();
+}
+
+TestSharedImageBacking::~TestSharedImageBacking() {
+ // Pretend our context is lost to avoid actual cleanup in |texture_| or
+ // |passthrough_texture_|.
+ texture_->RemoveLightweightRef(false /* have_context */);
+ texture_passthrough_->MarkContextLost();
+ texture_passthrough_.reset();
+
+ if (have_context())
+ glDeleteTextures(1, &service_id_);
+}
+
+gfx::Rect TestSharedImageBacking::ClearedRect() const {
+ return texture_->GetLevelClearedRect(texture_->target(), 0);
+}
+
+void TestSharedImageBacking::SetClearedRect(const gfx::Rect& cleared_rect) {
+ texture_->SetLevelClearedRect(texture_->target(), 0, cleared_rect);
+}
+
+bool TestSharedImageBacking::ProduceLegacyMailbox(
+ MailboxManager* mailbox_manager) {
+ return false;
+}
+
+std::unique_ptr<SharedImageRepresentationGLTexture>
+TestSharedImageBacking::ProduceGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ return std::make_unique<TestSharedImageRepresentationGLTexture>(
+ manager, this, tracker, texture_);
+}
+
+std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+TestSharedImageBacking::ProduceGLTexturePassthrough(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ return std::make_unique<TestSharedImageRepresentationGLTexturePassthrough>(
+ manager, this, tracker, texture_passthrough_);
+}
+
+std::unique_ptr<SharedImageRepresentationSkia>
+TestSharedImageBacking::ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) {
+ return std::make_unique<TestSharedImageRepresentationSkia>(manager, this,
+ tracker);
+}
+
+std::unique_ptr<SharedImageRepresentationDawn>
+TestSharedImageBacking::ProduceDawn(SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) {
+ return std::make_unique<TestSharedImageRepresentationDawn>(manager, this,
+ tracker);
+}
+
+std::unique_ptr<SharedImageRepresentationOverlay>
+TestSharedImageBacking::ProduceOverlay(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ return std::make_unique<TestSharedImageRepresentationOverlay>(manager, this,
+ tracker);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/test_shared_image_backing.h b/chromium/gpu/command_buffer/service/test_shared_image_backing.h
new file mode 100644
index 00000000000..14d3654c67d
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/test_shared_image_backing.h
@@ -0,0 +1,78 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_TEST_SHARED_IMAGE_BACKING_H_
+#define GPU_COMMAND_BUFFER_SERVICE_TEST_SHARED_IMAGE_BACKING_H_
+
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+
+namespace gpu {
+
+// Test implementation of a gles2::Texture backed backing.
+class TestSharedImageBacking : public SharedImageBacking {
+ public:
+ // Constructor which uses a dummy GL texture ID for the backing.
+ TestSharedImageBacking(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ size_t estimated_size);
+ // Constructor which uses a provided GL texture ID for the backing.
+ TestSharedImageBacking(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ size_t estimated_size,
+ GLuint texture_id);
+ ~TestSharedImageBacking() override;
+
+ gfx::Rect ClearedRect() const override;
+ void SetClearedRect(const gfx::Rect& cleared_rect) override;
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {}
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
+ void OnMemoryDump(const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) override {}
+
+ // Helper functions
+ GLuint service_id() const { return service_id_; }
+ void set_can_access(bool can_access) { can_access_ = can_access; }
+ bool can_access() const { return can_access_; }
+
+ protected:
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+
+ // ProduceSkia/Dawn/Overlay all create dummy representations that
+ // don't link up to a real texture.
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) override;
+ std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) override;
+ std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+
+ private:
+ const GLuint service_id_ = 0;
+ gles2::Texture* texture_ = nullptr;
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
+ bool can_access_ = true;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_TEST_SHARED_IMAGE_BACKING_H_
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index 473f3511c18..b1e0575482f 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -15,7 +15,6 @@
#include "base/bits.h"
#include "base/format_macros.h"
#include "base/lazy_instance.h"
-#include "base/metrics/histogram_macros.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -223,6 +222,13 @@ class FormatTypeValidator {
// Exposed by GL_EXT_texture_norm16
{GL_R16_EXT, GL_RED, GL_UNSIGNED_SHORT},
+ {GL_RG16_EXT, GL_RG, GL_UNSIGNED_SHORT},
+ {GL_RGB16_EXT, GL_RGB, GL_UNSIGNED_SHORT},
+ {GL_RGBA16_EXT, GL_RGBA, GL_UNSIGNED_SHORT},
+ {GL_R16_SNORM_EXT, GL_RED, GL_SHORT},
+ {GL_RG16_SNORM_EXT, GL_RG, GL_SHORT},
+ {GL_RGB16_SNORM_EXT, GL_RGB, GL_SHORT},
+ {GL_RGBA16_SNORM_EXT, GL_RGBA, GL_SHORT},
};
static const FormatType kSupportedFormatTypesES2Only[] = {
@@ -254,9 +260,6 @@ class FormatTypeValidator {
{GL_RG, GL_RG, GL_FLOAT},
{GL_RED, GL_RED, GL_HALF_FLOAT_OES},
{GL_RG, GL_RG, GL_HALF_FLOAT_OES},
-
- // Exposed by GL_EXT_texture_norm16
- {GL_RED, GL_RED, GL_UNSIGNED_SHORT},
};
for (size_t ii = 0; ii < base::size(kSupportedFormatTypes); ++ii) {
@@ -364,8 +367,8 @@ bool SizedFormatAvailable(const FeatureInfo* feature_info,
}
if (internal_format == GL_RGB10_A2_EXT &&
- (feature_info->feature_flags().chromium_image_xr30 ||
- feature_info->feature_flags().chromium_image_xb30)) {
+ (feature_info->feature_flags().chromium_image_ar30 ||
+ feature_info->feature_flags().chromium_image_ab30)) {
return true;
}
@@ -1364,16 +1367,16 @@ void Texture::SetLevelInfo(GLenum target,
ScopedMemTrackerChange change(this);
estimated_size_ -= info.estimated_size;
- if (format != GL_NONE) {
- // Uncompressed image
- GLES2Util::ComputeImageDataSizes(width, height, depth, format, type, 4,
- &info.estimated_size, nullptr, nullptr);
- } else if (internal_format != GL_NONE) {
+ if (::gpu::gles2::IsCompressedTextureFormat(internal_format)) {
// Compressed image
GLsizei compressed_size = 0;
GetCompressedTexSizeInBytes(nullptr, width, height, depth,
internal_format, &compressed_size, nullptr);
info.estimated_size = compressed_size;
+ } else if (format != GL_NONE) {
+ // Uncompressed image
+ GLES2Util::ComputeImageDataSizes(width, height, depth, format, type, 4,
+ &info.estimated_size, nullptr, nullptr);
} else {
// No image
info.estimated_size = 0;
@@ -1830,14 +1833,24 @@ bool Texture::ClearLevel(DecoderContext* decoder, GLenum target, GLint level) {
}
if (info.target == GL_TEXTURE_3D || info.target == GL_TEXTURE_2D_ARRAY) {
- // For 3D textures, we always clear the entire texture.
- DCHECK(info.cleared_rect == gfx::Rect());
- bool cleared = decoder->ClearLevel3D(
- this, info.target, info.level,
- TextureManager::AdjustTexFormat(decoder->GetFeatureInfo(), info.format),
- info.type, info.width, info.height, info.depth);
- if (!cleared)
- return false;
+ if (decoder->IsCompressedTextureFormat(info.internal_format)) {
+ DCHECK(IsImmutable());
+ bool cleared = decoder->ClearCompressedTextureLevel3D(
+ this, info.target, info.level, info.internal_format, info.width,
+ info.height, info.depth);
+ if (!cleared)
+ return false;
+ } else {
+ // For 3D textures, we always clear the entire texture.
+ DCHECK(info.cleared_rect == gfx::Rect());
+ bool cleared =
+ decoder->ClearLevel3D(this, info.target, info.level,
+ TextureManager::AdjustTexFormat(
+ decoder->GetFeatureInfo(), info.format),
+ info.type, info.width, info.height, info.depth);
+ if (!cleared)
+ return false;
+ }
} else {
if (decoder->IsCompressedTextureFormat(info.internal_format)) {
// An uncleared level of a compressed texture can only occur when
@@ -2107,16 +2120,27 @@ TextureRef::~TextureRef() {
}
bool TextureRef::BeginAccessSharedImage(GLenum mode) {
- shared_image_scoped_access_.emplace(shared_image_.get(), mode);
- if (!shared_image_scoped_access_->success()) {
- shared_image_scoped_access_.reset();
+ // When accessing through TextureManager, we are using legacy GL logic which
+ // tracks clearning internally. Always allow access to uncleared
+ // SharedImages.
+ shared_image_scoped_access_ = shared_image_->BeginScopedAccess(
+ mode, SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ if (!shared_image_scoped_access_) {
return false;
}
+ // After beginning access, the returned gles2::Texture's cleared status
+ // should match the SharedImage's.
+ DCHECK_EQ(shared_image_->ClearedRect(),
+ texture_->GetLevelClearedRect(texture_->target(), 0));
return true;
}
void TextureRef::EndAccessSharedImage() {
shared_image_scoped_access_.reset();
+ // After ending access, the SharedImages cleared rect should be synchronized
+ // with |texture_|'s.
+ DCHECK_EQ(shared_image_->ClearedRect(),
+ texture_->GetLevelClearedRect(texture_->target(), 0));
}
void TextureRef::ForceContextLost() {
@@ -3697,13 +3721,6 @@ void TextureManager::DoTexImage(DecoderTextureState* texture_state,
}
}
GLenum error = ERRORSTATE_PEEK_GL_ERROR(error_state, function_name);
- if (args.command_type == DoTexImageArguments::CommandType::kTexImage3D) {
- UMA_HISTOGRAM_CUSTOM_ENUMERATION("GPU.Error_TexImage3D", error,
- GetAllGLErrors());
- } else {
- UMA_HISTOGRAM_CUSTOM_ENUMERATION("GPU.Error_TexImage2D", error,
- GetAllGLErrors());
- }
if (error == GL_NO_ERROR) {
bool set_as_cleared = (args.pixels != nullptr || unpack_buffer_bound);
SetLevelInfo(
@@ -3819,12 +3836,15 @@ GLenum TextureManager::ExtractFormatFromStorageFormat(GLenum internalformat) {
switch (internalformat) {
case GL_COMPRESSED_R11_EAC:
case GL_COMPRESSED_SIGNED_R11_EAC:
+ case GL_COMPRESSED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_SIGNED_RED_RGTC1_EXT:
case GL_RED:
case GL_R8:
case GL_R8_SNORM:
case GL_R16F:
case GL_R32F:
case GL_R16_EXT:
+ case GL_R16_SNORM_EXT:
return GL_RED;
case GL_R8UI:
case GL_R8I:
@@ -3835,9 +3855,13 @@ GLenum TextureManager::ExtractFormatFromStorageFormat(GLenum internalformat) {
return GL_RED_INTEGER;
case GL_COMPRESSED_RG11_EAC:
case GL_COMPRESSED_SIGNED_RG11_EAC:
+ case GL_COMPRESSED_RED_GREEN_RGTC2_EXT:
+ case GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT:
case GL_RG:
case GL_RG8:
case GL_RG8_SNORM:
+ case GL_RG16:
+ case GL_RG16_SNORM:
case GL_RG16F:
case GL_RG32F:
return GL_RG;
@@ -3849,6 +3873,8 @@ GLenum TextureManager::ExtractFormatFromStorageFormat(GLenum internalformat) {
case GL_RG32I:
return GL_RG_INTEGER;
case GL_ATC_RGB_AMD:
+ case GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT:
+ case GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT:
case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG:
case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
@@ -3859,9 +3885,11 @@ GLenum TextureManager::ExtractFormatFromStorageFormat(GLenum internalformat) {
case GL_RGB:
case GL_RGB8:
case GL_SRGB8:
+ case GL_RGB16:
case GL_R11F_G11F_B10F:
case GL_RGB565:
case GL_RGB8_SNORM:
+ case GL_RGB16_SNORM:
case GL_RGB9_E5:
case GL_RGB16F:
case GL_RGB32F:
@@ -3887,15 +3915,47 @@ GLenum TextureManager::ExtractFormatFromStorageFormat(GLenum internalformat) {
case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ case GL_COMPRESSED_RGBA_BPTC_UNORM_EXT:
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT:
case GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
case GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+ case GL_COMPRESSED_RGBA_ASTC_4x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x12_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR:
case GL_RGBA:
case GL_RGBA8:
+ case GL_RGBA16:
case GL_SRGB8_ALPHA8:
case GL_RGBA8_SNORM:
+ case GL_RGBA16_SNORM:
case GL_RGBA4:
case GL_RGB5_A1:
case GL_RGB10_A2:
@@ -3978,6 +4038,8 @@ GLenum TextureManager::ExtractTypeFromStorageFormat(GLenum internalformat) {
return GL_SHORT;
case GL_R16_EXT:
return GL_UNSIGNED_SHORT;
+ case GL_R16_SNORM_EXT:
+ return GL_SHORT;
case GL_R32UI:
return GL_UNSIGNED_INT;
case GL_R32I:
@@ -3998,6 +4060,10 @@ GLenum TextureManager::ExtractTypeFromStorageFormat(GLenum internalformat) {
return GL_UNSIGNED_SHORT;
case GL_RG16I:
return GL_SHORT;
+ case GL_RG16_EXT:
+ return GL_UNSIGNED_SHORT;
+ case GL_RG16_SNORM_EXT:
+ return GL_SHORT;
case GL_RG32UI:
return GL_UNSIGNED_INT;
case GL_RG32I:
@@ -4025,6 +4091,10 @@ GLenum TextureManager::ExtractTypeFromStorageFormat(GLenum internalformat) {
return GL_UNSIGNED_SHORT;
case GL_RGB16I:
return GL_SHORT;
+ case GL_RGB16_EXT:
+ return GL_UNSIGNED_SHORT;
+ case GL_RGB16_SNORM_EXT:
+ return GL_SHORT;
case GL_RGB32UI:
return GL_UNSIGNED_INT;
case GL_RGB32I:
@@ -4055,6 +4125,10 @@ GLenum TextureManager::ExtractTypeFromStorageFormat(GLenum internalformat) {
return GL_UNSIGNED_SHORT;
case GL_RGBA16I:
return GL_SHORT;
+ case GL_RGBA16_EXT:
+ return GL_UNSIGNED_SHORT;
+ case GL_RGBA16_SNORM_EXT:
+ return GL_SHORT;
case GL_RGBA32I:
return GL_INT;
case GL_RGBA32UI:
@@ -4089,6 +4163,78 @@ GLenum TextureManager::ExtractTypeFromStorageFormat(GLenum internalformat) {
return GL_HALF_FLOAT_OES;
case GL_BGRA8_EXT:
return GL_UNSIGNED_BYTE;
+ // Compressed Formats
+ // S3TC
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
+ // ASTC
+ case GL_COMPRESSED_RGBA_ASTC_4x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x12_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR:
+ // BPTC
+ case GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGBA_BPTC_UNORM_EXT:
+ case GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT:
+ case GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT:
+ // RGTC
+ case GL_COMPRESSED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_SIGNED_RED_RGTC1_EXT:
+ case GL_COMPRESSED_RED_GREEN_RGTC2_EXT:
+ case GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT:
+ // ETC2/EAC
+ case GL_COMPRESSED_R11_EAC:
+ case GL_COMPRESSED_SIGNED_R11_EAC:
+ case GL_COMPRESSED_RGB8_ETC2:
+ case GL_COMPRESSED_SRGB8_ETC2:
+ case GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+ case GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+ case GL_COMPRESSED_RG11_EAC:
+ case GL_COMPRESSED_SIGNED_RG11_EAC:
+ case GL_COMPRESSED_RGBA8_ETC2_EAC:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
+ // ETC1
+ case GL_ETC1_RGB8_OES:
+ // PVRTC
+ case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG:
+ case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG:
+ case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG:
+ // ATC
+ case GL_ATC_RGB_AMD:
+ case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
+ return GL_UNSIGNED_BYTE;
default:
return GL_NONE;
}
@@ -4176,10 +4322,11 @@ bool Texture::CompatibleWithSamplerUniformType(
return category == SAMPLER_SHADOW;
}
- if (level_info->type == GL_NONE && level_info->format == GL_NONE &&
- level_info->internal_format != GL_NONE) {
- // This is probably a compressed texture format. All compressed formats are
- // sampled as float.
+ DCHECK(memory_tracking_ref_);
+ if (memory_tracking_ref_->manager()
+ ->feature_info_->validators()
+ ->compressed_texture_format.IsValid(level_info->internal_format)) {
+ // Compressed texture format. All compressed formats are sampled as float
return category == SAMPLER_FLOAT;
}
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index aedf7ccef99..0fece4ee111 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -41,16 +41,18 @@ class ServiceDiscardableManager;
class SharedImageBackingGLTexture;
class SharedImageBackingFactoryGLTexture;
class SharedImageBackingAHB;
+class SharedImageBackingEglImage;
class SharedImageRepresentationGLTexture;
+class SharedImageRepresentationEglImageGLTexture;
class SharedImageRepresentationGLTextureAHB;
class SharedImageRepresentationSkiaGLAHB;
class SharedImageBackingIOSurface;
class SharedImageRepresentationGLTextureIOSurface;
class SharedImageRepresentationSkiaIOSurface;
-class SharedImageBackingD3D;
+class SharedImageRepresentationGLOzone;
class SharedImageVideo;
class StreamTexture;
-class SharedImageBackingFactoryD3D;
+class TestSharedImageBacking;
namespace gles2 {
class GLStreamTextureImage;
@@ -436,20 +438,21 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
friend class gpu::SharedImageBackingGLTexture;
friend class gpu::SharedImageBackingFactoryGLTexture;
friend class gpu::SharedImageBackingAHB;
+ friend class gpu::SharedImageBackingEglImage;
friend class gpu::SharedImageRepresentationGLTextureAHB;
+ friend class gpu::SharedImageRepresentationEglImageGLTexture;
friend class gpu::SharedImageRepresentationSkiaGLAHB;
friend class gpu::SharedImageBackingIOSurface;
- friend class gpu::SharedImageBackingD3D;
- friend class gpu::SharedImageBackingFactoryD3D;
friend class gpu::SharedImageRepresentationGLTextureIOSurface;
friend class gpu::SharedImageRepresentationSkiaIOSurface;
+ friend class gpu::SharedImageRepresentationGLOzone;
friend class gpu::StreamTexture;
+ friend class gpu::TestSharedImageBacking;
friend class AbstractTextureImplOnSharedContext;
friend class TextureDefinition;
friend class TextureManager;
friend class TextureRef;
friend class TextureTestHelper;
- friend class TestSharedImageBacking;
FRIEND_TEST_ALL_PREFIXES(TextureMemoryTrackerTest, LightweightRef);
~Texture() override;
@@ -794,7 +797,7 @@ class GPU_GLES2_EXPORT TextureRef : public base::RefCounted<TextureRef> {
SharedImageRepresentationGLTexture* shared_image() const {
return shared_image_.get();
}
- const base::Optional<SharedImageRepresentationGLTexture::ScopedAccess>&
+ const std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>&
shared_image_scoped_access() const {
return shared_image_scoped_access_;
}
@@ -823,7 +826,7 @@ class GPU_GLES2_EXPORT TextureRef : public base::RefCounted<TextureRef> {
bool force_context_lost_;
std::unique_ptr<SharedImageRepresentationGLTexture> shared_image_;
- base::Optional<SharedImageRepresentationGLTexture::ScopedAccess>
+ std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
shared_image_scoped_access_;
DISALLOW_COPY_AND_ASSIGN(TextureRef);
diff --git a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
index 4a3de4b410c..7fe99835b97 100644
--- a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
@@ -71,7 +71,7 @@ class TextureManagerTest : public GpuServiceTest {
static const GLint kMax3dLevels = 10;
static const bool kUseDefaultTextures = false;
- TextureManagerTest() {
+ TextureManagerTest() : discardable_manager_(GpuPreferences()) {
GpuDriverBugWorkarounds gpu_driver_bug_workaround;
feature_info_ =
new FeatureInfo(gpu_driver_bug_workaround, GpuFeatureInfo());
@@ -638,8 +638,8 @@ class TextureTestBase : public GpuServiceTest {
static const bool kUseDefaultTextures = false;
TextureTestBase()
- : feature_info_(new FeatureInfo()) {
- }
+ : feature_info_(new FeatureInfo()),
+ discardable_manager_(GpuPreferences()) {}
~TextureTestBase() override { texture_ref_ = nullptr; }
protected:
@@ -2186,7 +2186,8 @@ class CountingMemoryTracker : public MemoryTracker {
}
~CountingMemoryTracker() override = default;
- void TrackMemoryAllocatedChange(uint64_t delta) override {
+ void TrackMemoryAllocatedChange(int64_t delta) override {
+ DCHECK(delta >= 0 || current_size_ >= static_cast<uint64_t>(-delta));
current_size_ += delta;
}
@@ -2207,7 +2208,9 @@ class SharedTextureTest : public GpuServiceTest {
public:
static const bool kUseDefaultTextures = false;
- SharedTextureTest() : feature_info_(new FeatureInfo()) {}
+ SharedTextureTest()
+ : feature_info_(new FeatureInfo()),
+ discardable_manager_(GpuPreferences()) {}
~SharedTextureTest() override = default;
@@ -2750,18 +2753,19 @@ TEST_F(TextureFormatTypeValidationTest, ES3Basic) {
ExpectInvalid(true, GL_RGB_INTEGER, GL_INT, GL_RGBA8);
}
-TEST_F(TextureFormatTypeValidationTest, ES2WithTextureNorm16) {
- SetupFeatureInfo("GL_EXT_texture_norm16", "OpenGL ES 2.0",
- CONTEXT_TYPE_OPENGLES2);
-
- ExpectValid(true, GL_RED, GL_UNSIGNED_SHORT, GL_RED);
-}
-
TEST_F(TextureFormatTypeValidationTest, ES3WithTextureNorm16) {
SetupFeatureInfo("GL_EXT_texture_norm16", "OpenGL ES 3.0",
CONTEXT_TYPE_OPENGLES3);
ExpectValid(true, GL_RED, GL_UNSIGNED_SHORT, GL_R16_EXT);
+ ExpectValid(true, GL_RG, GL_UNSIGNED_SHORT, GL_RG16_EXT);
+ ExpectValid(true, GL_RGB, GL_UNSIGNED_SHORT, GL_RGB16_EXT);
+ ExpectValid(true, GL_RGBA, GL_UNSIGNED_SHORT, GL_RGBA16_EXT);
+
+ ExpectValid(true, GL_RED, GL_SHORT, GL_R16_SNORM_EXT);
+ ExpectValid(true, GL_RG, GL_SHORT, GL_RG16_SNORM_EXT);
+ ExpectValid(true, GL_RGB, GL_SHORT, GL_RGB16_SNORM_EXT);
+ ExpectValid(true, GL_RGBA, GL_SHORT, GL_RGBA16_SNORM_EXT);
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
index 80bfe5eaff4..80b434f474c 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
@@ -14,12 +14,11 @@
#include "base/logging.h"
#include "base/macros.h"
+#include "base/numerics/checked_math.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/mailbox.h"
-#include "gpu/command_buffer/common/webgpu_cmd_enums.h"
#include "gpu/command_buffer/common/webgpu_cmd_format.h"
-#include "gpu/command_buffer/common/webgpu_cmd_ids.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/dawn_platform.h"
#include "gpu/command_buffer/service/dawn_service_memory_transfer_service.h"
@@ -41,13 +40,11 @@ constexpr size_t kMaxWireBufferSize =
class WireServerCommandSerializer : public dawn_wire::CommandSerializer {
public:
- explicit WireServerCommandSerializer(DecoderClient* client);
+ WireServerCommandSerializer(DecoderClient* client,
+ DawnDeviceClientID device_client_id);
~WireServerCommandSerializer() override = default;
void* GetCmdSpace(size_t size) final;
bool Flush() final;
- void SendAdapterProperties(uint32_t request_adapter_serial,
- uint32_t adapter_server_id,
- const dawn_native::Adapter& adapter);
private:
DecoderClient* client_;
@@ -55,19 +52,31 @@ class WireServerCommandSerializer : public dawn_wire::CommandSerializer {
size_t put_offset_;
};
-WireServerCommandSerializer::WireServerCommandSerializer(DecoderClient* client)
+WireServerCommandSerializer::WireServerCommandSerializer(
+ DecoderClient* client,
+ DawnDeviceClientID device_client_id)
: client_(client),
buffer_(kMaxWireBufferSize),
- put_offset_(sizeof(cmds::DawnReturnDataHeader)) {
- cmds::DawnReturnDataHeader* return_data_header =
- reinterpret_cast<cmds::DawnReturnDataHeader*>(&buffer_[0]);
- return_data_header->return_data_type = DawnReturnDataType::kDawnCommands;
+ put_offset_(offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer)) {
+ // We prepopulate the message with the header and keep it between flushes so
+ // we never need to write it again.
+ cmds::DawnReturnCommandsInfoHeader* header =
+ reinterpret_cast<cmds::DawnReturnCommandsInfoHeader*>(&buffer_[0]);
+ header->return_data_header.return_data_type =
+ DawnReturnDataType::kDawnCommands;
+ header->device_client_id = device_client_id;
}
void* WireServerCommandSerializer::GetCmdSpace(size_t size) {
// TODO(enga): Handle chunking commands if size +
- // sizeof(cmds::DawnReturnDataHeader)> kMaxWireBufferSize.
- if (size + sizeof(cmds::DawnReturnDataHeader) > kMaxWireBufferSize) {
+ // offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer)>
+ // kMaxWireBufferSize.
+ size_t total_wire_buffer_size =
+ (base::CheckedNumeric<size_t>(size) +
+ base::CheckedNumeric<size_t>(
+ offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer)))
+ .ValueOrDie();
+ if (total_wire_buffer_size > kMaxWireBufferSize) {
NOTREACHED();
return nullptr;
}
@@ -86,7 +95,8 @@ void* WireServerCommandSerializer::GetCmdSpace(size_t size) {
// TODO(enga): Keep track of how much command space the application is using
// and adjust the buffer size accordingly.
- DCHECK_EQ(put_offset_, sizeof(cmds::DawnReturnDataHeader));
+ DCHECK_EQ(put_offset_,
+ offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer));
next_offset = put_offset_ + size;
}
@@ -96,7 +106,8 @@ void* WireServerCommandSerializer::GetCmdSpace(size_t size) {
}
bool WireServerCommandSerializer::Flush() {
- if (put_offset_ > sizeof(cmds::DawnReturnDataHeader)) {
+ if (put_offset_ >
+ offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer)) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
"WireServerCommandSerializer::Flush", "bytes", put_offset_);
@@ -105,44 +116,11 @@ bool WireServerCommandSerializer::Flush() {
"DawnReturnCommands", return_trace_id++);
client_->HandleReturnData(base::make_span(buffer_.data(), put_offset_));
- put_offset_ = sizeof(cmds::DawnReturnDataHeader);
+ put_offset_ = offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer);
}
return true;
}
-void WireServerCommandSerializer::SendAdapterProperties(
- uint32_t request_adapter_serial,
- uint32_t adapter_service_id,
- const dawn_native::Adapter& adapter) {
- WGPUDeviceProperties adapter_properties = adapter.GetAdapterProperties();
-
- size_t serialized_adapter_properties_size =
- dawn_wire::SerializedWGPUDevicePropertiesSize(&adapter_properties);
- std::vector<char> serialized_buffer(sizeof(cmds::DawnReturnDataHeader) +
- sizeof(cmds::DawnReturnAdapterIDs) +
- serialized_adapter_properties_size);
-
- // Set Dawn return data header
- reinterpret_cast<cmds::DawnReturnDataHeader*>(serialized_buffer.data())
- ->return_data_type = DawnReturnDataType::kRequestedDawnAdapterProperties;
-
- // Set adapter ids
- cmds::DawnReturnAdapterInfo* return_adapter_info =
- reinterpret_cast<cmds::DawnReturnAdapterInfo*>(
- serialized_buffer.data() + sizeof(cmds::DawnReturnDataHeader));
- return_adapter_info->adapter_ids.request_adapter_serial =
- request_adapter_serial;
- return_adapter_info->adapter_ids.adapter_service_id = adapter_service_id;
-
- // Set serialized adapter properties
- dawn_wire::SerializeWGPUDeviceProperties(
- &adapter_properties, return_adapter_info->deserialized_buffer);
-
- client_->HandleReturnData(base::make_span(
- reinterpret_cast<const uint8_t*>(serialized_buffer.data()),
- serialized_buffer.size()));
-}
-
dawn_native::DeviceType PowerPreferenceToDawnDeviceType(
PowerPreference power_preference) {
switch (power_preference) {
@@ -159,6 +137,176 @@ dawn_native::DeviceType PowerPreferenceToDawnDeviceType(
}
}
+class DawnDeviceAndWireServer {
+ public:
+ DawnDeviceAndWireServer(
+ DecoderClient* client,
+ WGPUDevice wgpu_device,
+ DawnDeviceClientID device_client_id,
+ DawnServiceMemoryTransferService* memory_transfer_service);
+ ~DawnDeviceAndWireServer();
+
+ WGPUDevice GetWGPUDevice() const;
+ void PerformPollingWork();
+ error::Error HandleDawnCommands(const volatile char* dawn_commands,
+ size_t size);
+
+ error::Error AssociateMailbox(
+ SharedImageRepresentationFactory* shared_image_representation_factory,
+ const Mailbox& mailbox,
+ uint32_t texture_id,
+ uint32_t texture_generation,
+ uint32_t usage);
+ error::Error DissociateMailbox(uint32_t texture_id,
+ uint32_t texture_generation);
+
+ private:
+ WGPUDevice wgpu_device_ = nullptr;
+ std::unique_ptr<dawn_wire::WireServer> wire_server_;
+ std::unique_ptr<WireServerCommandSerializer> wire_serializer_;
+ const DawnProcTable dawn_procs_;
+
+ // Helper struct which holds a representation and its ScopedAccess, ensuring
+ // safe destruction order.
+ struct SharedImageRepresentationAndAccess {
+ std::unique_ptr<SharedImageRepresentationDawn> representation;
+ std::unique_ptr<SharedImageRepresentationDawn::ScopedAccess> access;
+ };
+
+ // Map from the <ID, generation> pair for a wire texture to the shared image
+ // representation and access for it.
+ base::flat_map<std::tuple<uint32_t, uint32_t>,
+ std::unique_ptr<SharedImageRepresentationAndAccess>>
+ associated_shared_image_map_;
+};
+
+DawnDeviceAndWireServer::DawnDeviceAndWireServer(
+ DecoderClient* client,
+ WGPUDevice wgpu_device,
+ DawnDeviceClientID device_client_id,
+ DawnServiceMemoryTransferService* memory_transfer_service)
+ : wgpu_device_(wgpu_device),
+ wire_serializer_(
+ std::make_unique<WireServerCommandSerializer>(client,
+ device_client_id)),
+ dawn_procs_(dawn_native::GetProcs()) {
+ DCHECK(client);
+ DCHECK(wgpu_device);
+ DCHECK(memory_transfer_service);
+
+ dawn_wire::WireServerDescriptor descriptor = {};
+ descriptor.device = wgpu_device_;
+ descriptor.procs = &dawn_procs_;
+ descriptor.serializer = wire_serializer_.get();
+ descriptor.memoryTransferService = memory_transfer_service;
+ wire_server_ = std::make_unique<dawn_wire::WireServer>(descriptor);
+}
+
+DawnDeviceAndWireServer::~DawnDeviceAndWireServer() {
+ associated_shared_image_map_.clear();
+
+ // Reset the wire server first so all objects are destroyed before the
+ // device.
+ // TODO(enga): Handle Device/Context lost.
+ wire_server_ = nullptr;
+ dawn_procs_.deviceRelease(wgpu_device_);
+}
+
+WGPUDevice DawnDeviceAndWireServer::GetWGPUDevice() const {
+ return wgpu_device_;
+}
+
+void DawnDeviceAndWireServer::PerformPollingWork() {
+ dawn_procs_.deviceTick(wgpu_device_);
+ wire_serializer_->Flush();
+}
+
+error::Error DawnDeviceAndWireServer::HandleDawnCommands(
+ const volatile char* dawn_commands,
+ size_t size) {
+ if (!wire_server_->HandleCommands(dawn_commands, size)) {
+ NOTREACHED();
+ return error::kLostContext;
+ }
+ wire_serializer_->Flush();
+ return error::kNoError;
+}
+
+error::Error DawnDeviceAndWireServer::AssociateMailbox(
+ SharedImageRepresentationFactory* shared_image_representation_factory,
+ const Mailbox& mailbox,
+ uint32_t texture_id,
+ uint32_t texture_generation,
+ uint32_t usage) {
+ static constexpr uint32_t kAllowedTextureUsages = static_cast<uint32_t>(
+ WGPUTextureUsage_CopySrc | WGPUTextureUsage_CopyDst |
+ WGPUTextureUsage_Sampled | WGPUTextureUsage_OutputAttachment);
+ if (usage & ~kAllowedTextureUsages) {
+ DLOG(ERROR) << "AssociateMailbox: Invalid usage";
+ return error::kInvalidArguments;
+ }
+ WGPUTextureUsage wgpu_usage = static_cast<WGPUTextureUsage>(usage);
+
+ // Create a WGPUTexture from the mailbox.
+ std::unique_ptr<SharedImageRepresentationDawn> shared_image =
+ shared_image_representation_factory->ProduceDawn(mailbox, wgpu_device_);
+ if (!shared_image) {
+ DLOG(ERROR) << "AssociateMailbox: Couldn't produce shared image";
+ return error::kInvalidArguments;
+ }
+
+ // TODO(cwallez@chromium.org): Handle texture clearing. We should either
+ // pre-clear textures, or implement a way to detect whether DAWN has cleared
+ // a texture. crbug.com/1036080
+ std::unique_ptr<SharedImageRepresentationDawn::ScopedAccess>
+ shared_image_access = shared_image->BeginScopedAccess(
+ wgpu_usage, SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ if (!shared_image_access) {
+ DLOG(ERROR) << "AssociateMailbox: Couldn't begin shared image access";
+ return error::kInvalidArguments;
+ }
+
+ // Inject the texture in the dawn_wire::Server and remember which shared image
+ // it is associated with.
+ if (!wire_server_->InjectTexture(shared_image_access->texture(), texture_id,
+ texture_generation)) {
+ DLOG(ERROR) << "AssociateMailbox: Invalid texture ID";
+ return error::kInvalidArguments;
+ }
+
+ std::unique_ptr<SharedImageRepresentationAndAccess>
+ representation_and_access =
+ std::make_unique<SharedImageRepresentationAndAccess>();
+ representation_and_access->representation = std::move(shared_image);
+ representation_and_access->access = std::move(shared_image_access);
+
+ std::tuple<uint32_t, uint32_t> id_and_generation{texture_id,
+ texture_generation};
+ auto insertion = associated_shared_image_map_.emplace(
+ id_and_generation, std::move(representation_and_access));
+
+ // InjectTexture already validated that the (ID, generation) can't have been
+ // registered before.
+ DCHECK(insertion.second);
+
+ return error::kNoError;
+}
+
+error::Error DawnDeviceAndWireServer::DissociateMailbox(
+ uint32_t texture_id,
+ uint32_t texture_generation) {
+ std::tuple<uint32_t, uint32_t> id_and_generation{texture_id,
+ texture_generation};
+ auto it = associated_shared_image_map_.find(id_and_generation);
+ if (it == associated_shared_image_map_.end()) {
+ DLOG(ERROR) << "DissociateMailbox: Invalid texture ID";
+ return error::kInvalidArguments;
+ }
+
+ associated_shared_image_map_.erase(it);
+ return error::kNoError;
+}
+
} // namespace
class WebGPUDecoderImpl final : public WebGPUDecoder {
@@ -241,14 +389,11 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
bool HasPollingWork() const override { return true; }
void PerformPollingWork() override {
- DCHECK(wire_serializer_);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
"WebGPUDecoderImpl::PerformPollingWork");
- // TODO(jiawei.shao@intel.com): support multiple Dawn devices.
- if (wgpu_device_) {
- dawn_procs_.deviceTick(wgpu_device_);
+ for (auto& iter : dawn_device_and_wire_servers_) {
+ iter.second->PerformPollingWork();
}
- wire_serializer_->Flush();
}
TextureBase* GetTextureBase(uint32_t client_id) override {
@@ -340,6 +485,16 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
NOTREACHED();
return false;
}
+ bool ClearCompressedTextureLevel3D(gles2::Texture* texture,
+ unsigned target,
+ int level,
+ unsigned format,
+ int width,
+ int height,
+ int depth) override {
+ NOTREACHED();
+ return false;
+ }
bool ClearLevel3D(gles2::Texture* texture,
unsigned target,
int level,
@@ -396,24 +551,25 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
error::Error InitDawnDeviceAndSetWireServer(
int32_t requested_adapter_index,
+ DawnDeviceClientID device_client_id,
const WGPUDeviceProperties& requested_device_properties);
+ void SendAdapterProperties(DawnRequestAdapterSerial request_adapter_serial,
+ uint32_t adapter_service_id,
+ const dawn_native::Adapter& adapter);
+ void SendRequestedDeviceInfo(DawnDeviceClientID device_client_id,
+ bool is_request_device_success);
+
std::unique_ptr<SharedImageRepresentationFactory>
shared_image_representation_factory_;
- // Map from the <ID, generation> pair for a wire texture to the shared image
- // representation for it.
- base::flat_map<std::tuple<uint32_t, uint32_t>,
- std::unique_ptr<SharedImageRepresentationDawn>>
- associated_shared_image_map_;
+
+ base::flat_map<DawnDeviceClientID, std::unique_ptr<DawnDeviceAndWireServer>>
+ dawn_device_and_wire_servers_;
std::unique_ptr<dawn_platform::Platform> dawn_platform_;
- std::unique_ptr<WireServerCommandSerializer> wire_serializer_;
std::unique_ptr<DawnServiceMemoryTransferService> memory_transfer_service_;
std::unique_ptr<dawn_native::Instance> dawn_instance_;
std::vector<dawn_native::Adapter> dawn_adapters_;
- DawnProcTable dawn_procs_;
- WGPUDevice wgpu_device_ = nullptr;
- std::unique_ptr<dawn_wire::WireServer> wire_server_;
DISALLOW_COPY_AND_ASSIGN(WebGPUDecoderImpl);
};
@@ -452,22 +608,13 @@ WebGPUDecoderImpl::WebGPUDecoderImpl(
shared_image_manager,
memory_tracker)),
dawn_platform_(new DawnPlatform()),
- wire_serializer_(new WireServerCommandSerializer(client)),
memory_transfer_service_(new DawnServiceMemoryTransferService(this)),
- dawn_instance_(new dawn_native::Instance()),
- dawn_procs_(dawn_native::GetProcs()) {
+ dawn_instance_(new dawn_native::Instance()) {
dawn_instance_->SetPlatform(dawn_platform_.get());
}
WebGPUDecoderImpl::~WebGPUDecoderImpl() {
- associated_shared_image_map_.clear();
-
- // Reset the wire server first so all objects are destroyed before the device.
- // TODO(enga): Handle Device/Context lost.
- wire_server_ = nullptr;
- if (wgpu_device_ != nullptr) {
- dawn_procs_.deviceRelease(wgpu_device_);
- }
+ dawn_device_and_wire_servers_.clear();
}
ContextResult WebGPUDecoderImpl::Initialize() {
@@ -477,35 +624,33 @@ ContextResult WebGPUDecoderImpl::Initialize() {
error::Error WebGPUDecoderImpl::InitDawnDeviceAndSetWireServer(
int32_t requested_adapter_index,
+ DawnDeviceClientID device_client_id,
const WGPUDeviceProperties& request_device_properties) {
DCHECK_LE(0, requested_adapter_index);
- // TODO(jiawei.shao@intel.com): support multiple Dawn devices.
- if (wgpu_device_ != nullptr) {
- DCHECK(wire_server_);
- return error::kNoError;
- }
-
DCHECK_LT(static_cast<size_t>(requested_adapter_index),
dawn_adapters_.size());
+ if (dawn_device_and_wire_servers_.find(device_client_id) !=
+ dawn_device_and_wire_servers_.end()) {
+ return error::kLostContext;
+ }
+
dawn_native::DeviceDescriptor device_descriptor;
if (request_device_properties.textureCompressionBC) {
device_descriptor.requiredExtensions.push_back("texture_compression_bc");
}
- wgpu_device_ = dawn_adapters_[requested_adapter_index].CreateDevice();
- if (wgpu_device_ == nullptr) {
- return error::kLostContext;
+ WGPUDevice wgpu_device =
+ dawn_adapters_[requested_adapter_index].CreateDevice(&device_descriptor);
+ if (wgpu_device == nullptr) {
+ return error::kInvalidArguments;
}
- dawn_wire::WireServerDescriptor descriptor = {};
- descriptor.device = wgpu_device_;
- descriptor.procs = &dawn_procs_;
- descriptor.serializer = wire_serializer_.get();
- descriptor.memoryTransferService = memory_transfer_service_.get();
-
- wire_server_ = std::make_unique<dawn_wire::WireServer>(descriptor);
+ dawn_device_and_wire_servers_[device_client_id] =
+ std::make_unique<DawnDeviceAndWireServer>(client(), wgpu_device,
+ device_client_id,
+ memory_transfer_service_.get());
return error::kNoError;
}
@@ -515,14 +660,14 @@ void WebGPUDecoderImpl::DiscoverAdapters() {
std::vector<dawn_native::Adapter> adapters = dawn_instance_->GetAdapters();
for (const dawn_native::Adapter& adapter : adapters) {
#if defined(OS_WIN)
- // On Windows 10, we pick D3D12 backend because the rest of Chromium renders
- // with D3D11. By the same token, we pick the first adapter because ANGLE also
- // picks the first adapter. Later, we'll need to centralize adapter picking
- // such that Dawn and ANGLE are told which adapter to use by Chromium. If we
- // decide to handle multiple adapters, code on the Chromium side will need to
- // change to do appropriate cross adapter copying to make this happen, either
- // manually or by using DirectComposition.
- if (adapter.GetBackendType() == dawn_native::BackendType::D3D12) {
+ // On Windows 10, we pick D3D12 backend because the rest of Chromium renders
+ // with D3D11. By the same token, we pick the first adapter because ANGLE
+ // also picks the first adapter. Later, we'll need to centralize adapter
+ // picking such that Dawn and ANGLE are told which adapter to use by
+ // Chromium. If we decide to handle multiple adapters, code on the Chromium
+ // side will need to change to do appropriate cross adapter copying to make
+ // this happen, either manually or by using DirectComposition.
+ if (adapter.GetBackendType() == dawn_native::BackendType::D3D12) {
#else
if (adapter.GetBackendType() != dawn_native::BackendType::Null &&
adapter.GetBackendType() != dawn_native::BackendType::OpenGL) {
@@ -531,7 +676,7 @@ void WebGPUDecoderImpl::DiscoverAdapters() {
#if defined(OS_WIN)
break;
#endif
- }
+ }
}
}
@@ -658,14 +803,62 @@ error::Error WebGPUDecoderImpl::DoCommands(unsigned int num_commands,
return result;
}
+void WebGPUDecoderImpl::SendAdapterProperties(
+ DawnRequestAdapterSerial request_adapter_serial,
+ uint32_t adapter_service_id,
+ const dawn_native::Adapter& adapter) {
+ WGPUDeviceProperties adapter_properties = adapter.GetAdapterProperties();
+
+ size_t serialized_adapter_properties_size =
+ dawn_wire::SerializedWGPUDevicePropertiesSize(&adapter_properties);
+ std::vector<char> serialized_buffer(
+ offsetof(cmds::DawnReturnAdapterInfo, deserialized_buffer) +
+ serialized_adapter_properties_size);
+
+ cmds::DawnReturnAdapterInfo* return_adapter_info =
+ reinterpret_cast<cmds::DawnReturnAdapterInfo*>(serialized_buffer.data());
+
+ // Set Dawn return data header
+ return_adapter_info->header = {};
+ DCHECK_EQ(DawnReturnDataType::kRequestedDawnAdapterProperties,
+ return_adapter_info->header.return_data_header.return_data_type);
+ return_adapter_info->header.request_adapter_serial = request_adapter_serial;
+ return_adapter_info->header.adapter_service_id = adapter_service_id;
+
+ // Set serialized adapter properties
+ dawn_wire::SerializeWGPUDeviceProperties(
+ &adapter_properties, return_adapter_info->deserialized_buffer);
+
+ client()->HandleReturnData(base::make_span(
+ reinterpret_cast<const uint8_t*>(serialized_buffer.data()),
+ serialized_buffer.size()));
+}
+
+void WebGPUDecoderImpl::SendRequestedDeviceInfo(
+ DawnDeviceClientID device_client_id,
+ bool is_request_device_success) {
+ cmds::DawnReturnRequestDeviceInfo return_request_device_info;
+ DCHECK_EQ(DawnReturnDataType::kRequestedDeviceReturnInfo,
+ return_request_device_info.return_data_header.return_data_type);
+ return_request_device_info.device_client_id = device_client_id;
+ return_request_device_info.is_request_device_success =
+ is_request_device_success;
+
+ client()->HandleReturnData(base::make_span(
+ reinterpret_cast<const uint8_t*>(&return_request_device_info),
+ sizeof(return_request_device_info)));
+}
+
error::Error WebGPUDecoderImpl::HandleRequestAdapter(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
const volatile webgpu::cmds::RequestAdapter& c =
*static_cast<const volatile webgpu::cmds::RequestAdapter*>(cmd_data);
-
PowerPreference power_preference =
static_cast<PowerPreference>(c.power_preference);
+ DawnRequestAdapterSerial request_adapter_serial =
+ static_cast<DawnRequestAdapterSerial>(c.request_adapter_serial);
+
int32_t requested_adapter_index = GetPreferredAdapterIndex(power_preference);
if (requested_adapter_index < 0) {
return error::kLostContext;
@@ -676,9 +869,9 @@ error::Error WebGPUDecoderImpl::HandleRequestAdapter(
DCHECK_LT(static_cast<size_t>(requested_adapter_index),
dawn_adapters_.size());
const dawn_native::Adapter& adapter = dawn_adapters_[requested_adapter_index];
- wire_serializer_->SendAdapterProperties(
- static_cast<uint32_t>(c.request_adapter_serial),
- static_cast<uint32_t>(requested_adapter_index), adapter);
+ SendAdapterProperties(request_adapter_serial,
+ static_cast<uint32_t>(requested_adapter_index),
+ adapter);
return error::kNoError;
}
@@ -688,7 +881,8 @@ error::Error WebGPUDecoderImpl::HandleRequestDevice(
const volatile void* cmd_data) {
const volatile webgpu::cmds::RequestDevice& c =
*static_cast<const volatile webgpu::cmds::RequestDevice*>(cmd_data);
-
+ DawnDeviceClientID device_client_id =
+ static_cast<DawnDeviceClientID>(c.device_client_id);
uint32_t adapter_service_id = static_cast<uint32_t>(c.adapter_service_id);
uint32_t request_device_properties_shm_id =
static_cast<uint32_t>(c.request_device_properties_shm_id);
@@ -698,22 +892,25 @@ error::Error WebGPUDecoderImpl::HandleRequestDevice(
static_cast<uint32_t>(c.request_device_properties_size);
WGPUDeviceProperties device_properties = {};
- if (!request_device_properties_size) {
- return InitDawnDeviceAndSetWireServer(adapter_service_id,
- device_properties);
- }
+ if (request_device_properties_size) {
+ const volatile char* shm_device_properties =
+ GetSharedMemoryAs<const volatile char*>(
+ request_device_properties_shm_id,
+ request_device_properties_shm_offset,
+ request_device_properties_size);
+ if (!shm_device_properties) {
+ return error::kOutOfBounds;
+ }
- const volatile char* shm_device_properties =
- GetSharedMemoryAs<const volatile char*>(
- request_device_properties_shm_id,
- request_device_properties_shm_offset, request_device_properties_size);
- if (!shm_device_properties) {
- return error::kOutOfBounds;
+ dawn_wire::DeserializeWGPUDeviceProperties(&device_properties,
+ shm_device_properties);
}
- dawn_wire::DeserializeWGPUDeviceProperties(&device_properties,
- shm_device_properties);
- return InitDawnDeviceAndSetWireServer(adapter_service_id, device_properties);
+ error::Error init_dawn_device_error = InitDawnDeviceAndSetWireServer(
+ adapter_service_id, device_client_id, device_properties);
+ SendRequestedDeviceInfo(device_client_id,
+ !error::IsError(init_dawn_device_error));
+ return init_dawn_device_error;
}
error::Error WebGPUDecoderImpl::HandleDawnCommands(
@@ -721,10 +918,11 @@ error::Error WebGPUDecoderImpl::HandleDawnCommands(
const volatile void* cmd_data) {
const volatile webgpu::cmds::DawnCommands& c =
*static_cast<const volatile webgpu::cmds::DawnCommands*>(cmd_data);
-
uint32_t size = static_cast<uint32_t>(c.size);
uint32_t commands_shm_id = static_cast<uint32_t>(c.commands_shm_id);
uint32_t commands_shm_offset = static_cast<uint32_t>(c.commands_shm_offset);
+ DawnDeviceClientID device_client_id =
+ static_cast<DawnDeviceClientID>(c.device_client_id);
const volatile char* shm_commands = GetSharedMemoryAs<const volatile char*>(
commands_shm_id, commands_shm_offset, size);
@@ -738,12 +936,13 @@ error::Error WebGPUDecoderImpl::HandleDawnCommands(
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
"WebGPUDecoderImpl::HandleDawnCommands", "bytes", size);
- if (!wire_server_->HandleCommands(shm_commands, size)) {
- NOTREACHED();
- return error::kLostContext;
+
+ auto iter = dawn_device_and_wire_servers_.find(device_client_id);
+ if (iter == dawn_device_and_wire_servers_.end()) {
+ return error::kInvalidArguments;
}
- wire_serializer_->Flush();
- return error::kNoError;
+
+ return iter->second->HandleDawnCommands(shm_commands, size);
}
error::Error WebGPUDecoderImpl::HandleAssociateMailboxImmediate(
@@ -752,8 +951,8 @@ error::Error WebGPUDecoderImpl::HandleAssociateMailboxImmediate(
const volatile webgpu::cmds::AssociateMailboxImmediate& c =
*static_cast<const volatile webgpu::cmds::AssociateMailboxImmediate*>(
cmd_data);
-
- uint32_t device_id = static_cast<uint32_t>(c.device_id);
+ DawnDeviceClientID device_client_id =
+ static_cast<DawnDeviceClientID>(c.device_client_id());
uint32_t device_generation = static_cast<uint32_t>(c.device_generation);
uint32_t id = static_cast<uint32_t>(c.id);
uint32_t generation = static_cast<uint32_t>(c.generation);
@@ -774,52 +973,17 @@ error::Error WebGPUDecoderImpl::HandleAssociateMailboxImmediate(
DLOG_IF(ERROR, !mailbox.Verify())
<< "AssociateMailbox was passed an invalid mailbox";
- // TODO(cwallez@chromium.org): Use device_id/generation when the decoder
- // supports multiple devices.
- if (device_id != 0 || device_generation != 0) {
- DLOG(ERROR) << "AssociateMailbox: Invalid device ID";
- return error::kInvalidArguments;
- }
-
- static constexpr uint32_t kAllowedTextureUsages = static_cast<uint32_t>(
- WGPUTextureUsage_CopySrc | WGPUTextureUsage_CopyDst |
- WGPUTextureUsage_Sampled | WGPUTextureUsage_OutputAttachment);
- if (usage & ~kAllowedTextureUsages) {
- DLOG(ERROR) << "AssociateMailbox: Invalid usage";
- return error::kInvalidArguments;
- }
- WGPUTextureUsage wgpu_usage = static_cast<WGPUTextureUsage>(usage);
-
- // Create a WGPUTexture from the mailbox.
- std::unique_ptr<SharedImageRepresentationDawn> shared_image =
- shared_image_representation_factory_->ProduceDawn(mailbox, wgpu_device_);
- if (!shared_image) {
- DLOG(ERROR) << "AssociateMailbox: Couldn't produce shared image";
- return error::kInvalidArguments;
- }
-
- WGPUTexture texture = shared_image->BeginAccess(wgpu_usage);
- if (!texture) {
- DLOG(ERROR) << "AssociateMailbox: Couldn't begin shared image access";
+ // Get the correct DawnDeviceAndWireServer
+ auto iter = dawn_device_and_wire_servers_.find(device_client_id);
+ if (iter == dawn_device_and_wire_servers_.end() || device_generation != 0) {
+ DLOG(ERROR) << "AssociateMailbox: Invalid device client ID";
return error::kInvalidArguments;
}
+ DawnDeviceAndWireServer* dawn_device_and_wire_server = iter->second.get();
- // Inject the texture in the dawn_wire::Server and remember which shared image
- // it is associated with.
- if (!wire_server_->InjectTexture(texture, id, generation)) {
- DLOG(ERROR) << "AssociateMailbox: Invalid texture ID";
- return error::kInvalidArguments;
- }
-
- std::tuple<uint32_t, uint32_t> id_and_generation{id, generation};
- auto insertion = associated_shared_image_map_.emplace(
- id_and_generation, std::move(shared_image));
-
- // InjectTexture already validated that the (ID, generation) can't have been
- // registered before.
- DCHECK(insertion.second);
-
- return error::kNoError;
+ return dawn_device_and_wire_server->AssociateMailbox(
+ shared_image_representation_factory_.get(), mailbox, id, generation,
+ usage);
}
error::Error WebGPUDecoderImpl::HandleDissociateMailbox(
@@ -827,20 +991,37 @@ error::Error WebGPUDecoderImpl::HandleDissociateMailbox(
const volatile void* cmd_data) {
const volatile webgpu::cmds::DissociateMailbox& c =
*static_cast<const volatile webgpu::cmds::DissociateMailbox*>(cmd_data);
-
+ DawnDeviceClientID device_client_id =
+ static_cast<DawnDeviceClientID>(c.device_client_id());
uint32_t texture_id = static_cast<uint32_t>(c.texture_id);
uint32_t texture_generation = static_cast<uint32_t>(c.texture_generation);
- std::tuple<uint32_t, uint32_t> id_and_generation{texture_id,
- texture_generation};
- auto it = associated_shared_image_map_.find(id_and_generation);
- if (it == associated_shared_image_map_.end()) {
- DLOG(ERROR) << "DissociateMailbox: Invalid texture ID";
+ // Get the correct DawnDeviceAndWireServer
+ auto iter = dawn_device_and_wire_servers_.find(device_client_id);
+ if (iter == dawn_device_and_wire_servers_.end()) {
+ DLOG(ERROR) << "AssociateMailbox: Invalid device client ID";
return error::kInvalidArguments;
}
+ DawnDeviceAndWireServer* dawn_device_and_wire_server = iter->second.get();
- it->second->EndAccess();
- associated_shared_image_map_.erase(it);
+ return dawn_device_and_wire_server->DissociateMailbox(texture_id,
+ texture_generation);
+}
+
+error::Error WebGPUDecoderImpl::HandleRemoveDevice(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile webgpu::cmds::RemoveDevice& c =
+ *static_cast<const volatile webgpu::cmds::RemoveDevice*>(cmd_data);
+ DawnDeviceClientID device_client_id =
+ static_cast<DawnDeviceClientID>(c.device_client_id);
+
+ auto it = dawn_device_and_wire_servers_.find(device_client_id);
+ if (it == dawn_device_and_wire_servers_.end()) {
+ return error::kInvalidArguments;
+ }
+
+ dawn_device_and_wire_servers_.erase(it);
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc
index 788d0923d4f..055a3caa239 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc
@@ -4,9 +4,9 @@
#include "gpu/command_buffer/service/webgpu_decoder.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
-#include "gpu/command_buffer/common/webgpu_cmd_enums.h"
#include "gpu/command_buffer/common/webgpu_cmd_format.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/decoder_client.h"
@@ -35,7 +35,6 @@ class WebGPUDecoderTest : public ::testing::Test {
if (!WebGPUSupported()) {
return;
}
-
// Shared image factories for some backends take a dependency on GL.
// Failure to create a test context with a surface and making it current
// will result in a "NoContext" context being current that asserts on all
@@ -65,11 +64,17 @@ class WebGPUDecoderTest : public ::testing::Test {
constexpr uint32_t kAdapterServiceID = 0;
cmds::RequestDevice requestDeviceCmd;
- requestDeviceCmd.Init(kAdapterServiceID, 0, 0, 0);
+ requestDeviceCmd.Init(kDeviceClientID, kAdapterServiceID, 0, 0, 0);
ASSERT_EQ(error::kNoError, ExecuteCmd(requestDeviceCmd));
+ GpuPreferences gpu_preferences;
+#if defined(OS_WIN)
+ // D3D shared images are only supported with passthrough command decoder.
+ gpu_preferences.use_passthrough_cmd_decoder = true;
+#endif // OS_WIN
+
factory_ = std::make_unique<SharedImageFactory>(
- GpuPreferences(), GpuDriverBugWorkarounds(), GpuFeatureInfo(),
+ gpu_preferences, GpuDriverBugWorkarounds(), GpuFeatureInfo(),
/*context_state=*/nullptr, /*mailbox_manager=*/nullptr,
&shared_image_manager_, /*image_factory=*/nullptr, /*tracker=*/nullptr,
/*enable_wrapped_sk_image=*/false);
@@ -122,6 +127,8 @@ class WebGPUDecoderTest : public ::testing::Test {
std::unique_ptr<SharedImageFactory> factory_;
scoped_refptr<gl::GLSurface> gl_surface_;
scoped_refptr<gl::GLContext> gl_context_;
+
+ static const DawnDeviceClientID kDeviceClientID = 0u;
};
TEST_F(WebGPUDecoderTest, DawnCommands) {
@@ -131,7 +138,7 @@ TEST_F(WebGPUDecoderTest, DawnCommands) {
}
cmds::DawnCommands cmd;
- cmd.Init(0, 0, 0);
+ cmd.Init(kDeviceClientID, 0, 0, 0);
EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
}
@@ -149,21 +156,33 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) {
gpu::Mailbox mailbox = Mailbox::GenerateForSharedImage();
EXPECT_TRUE(factory_->CreateSharedImage(
mailbox, viz::ResourceFormat::RGBA_8888, {1, 1},
- gfx::ColorSpace::CreateSRGB(), SHARED_IMAGE_USAGE_WEBGPU));
+ gfx::ColorSpace::CreateSRGB(), gfx::kNullAcceleratedWidget,
+ SHARED_IMAGE_USAGE_WEBGPU));
// Error case: invalid mailbox
{
gpu::Mailbox bad_mailbox;
AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Sampled, bad_mailbox.name);
+ cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Sampled,
+ bad_mailbox.name);
EXPECT_EQ(error::kInvalidArguments,
ExecuteImmediateCmd(cmd.cmd, sizeof(bad_mailbox.name)));
}
- // Error case: device doesn't exist.
+ // Error case: device client id doesn't exist.
+ {
+ AssociateMailboxCmdStorage cmd;
+ cmd.cmd.Init(kDeviceClientID + 1, 0, 1, 0, WGPUTextureUsage_Sampled,
+ mailbox.name);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
+ }
+
+ // Error case: device generation is invalid.
{
AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(42, 42, 1, 0, WGPUTextureUsage_Sampled, mailbox.name);
+ cmd.cmd.Init(kDeviceClientID, 42, 1, 0, WGPUTextureUsage_Sampled,
+ mailbox.name);
EXPECT_EQ(error::kInvalidArguments,
ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
}
@@ -171,7 +190,8 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) {
// Error case: texture ID invalid for the wire server.
{
AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(0, 0, 42, 42, WGPUTextureUsage_Sampled, mailbox.name);
+ cmd.cmd.Init(kDeviceClientID, 0, 42, 42, WGPUTextureUsage_Sampled,
+ mailbox.name);
EXPECT_EQ(error::kInvalidArguments,
ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
}
@@ -179,7 +199,8 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) {
// Error case: invalid usage.
{
AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(0, 0, 42, 42, WGPUTextureUsage_Sampled, mailbox.name);
+ cmd.cmd.Init(kDeviceClientID, 0, 42, 42, WGPUTextureUsage_Sampled,
+ mailbox.name);
EXPECT_EQ(error::kInvalidArguments,
ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
}
@@ -187,7 +208,8 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) {
// Error case: invalid texture usage.
{
AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Force32, mailbox.name);
+ cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Force32,
+ mailbox.name);
EXPECT_EQ(error::kInvalidArguments,
ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
}
@@ -199,7 +221,8 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) {
// and generation invalid.
{
AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Sampled, mailbox.name);
+ cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Sampled,
+ mailbox.name);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
}
@@ -207,7 +230,8 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) {
// Error case: associated to an already associated texture.
{
AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Sampled, mailbox.name);
+ cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Sampled,
+ mailbox.name);
EXPECT_EQ(error::kInvalidArguments,
ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
}
@@ -215,7 +239,7 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) {
// Dissociate the image from the control case to remove its reference.
{
cmds::DissociateMailbox cmd;
- cmd.Init(1, 0);
+ cmd.Init(kDeviceClientID, 1, 0);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
}
@@ -229,12 +253,14 @@ TEST_F(WebGPUDecoderTest, DissociateMailbox) {
gpu::Mailbox mailbox = Mailbox::GenerateForSharedImage();
EXPECT_TRUE(factory_->CreateSharedImage(
mailbox, viz::ResourceFormat::RGBA_8888, {1, 1},
- gfx::ColorSpace::CreateSRGB(), SHARED_IMAGE_USAGE_WEBGPU));
+ gfx::ColorSpace::CreateSRGB(), kNullSurfaceHandle,
+ SHARED_IMAGE_USAGE_WEBGPU));
// Associate a mailbox so we can later dissociate it.
{
AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Sampled, mailbox.name);
+ cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Sampled,
+ mailbox.name);
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
}
@@ -242,14 +268,28 @@ TEST_F(WebGPUDecoderTest, DissociateMailbox) {
// Error case: wrong texture ID
{
cmds::DissociateMailbox cmd;
- cmd.Init(42, 42);
+ cmd.Init(kDeviceClientID, 42, 0);
+ EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
+ }
+
+ // Error case: wrong texture generation
+ {
+ cmds::DissociateMailbox cmd;
+ cmd.Init(kDeviceClientID, 1, 42);
+ EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
+ }
+
+ // Error case: invalid client device ID
+ {
+ cmds::DissociateMailbox cmd;
+ cmd.Init(kDeviceClientID + 1, 1, 0);
EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
}
// Success case
{
cmds::DissociateMailbox cmd;
- cmd.Init(1, 0);
+ cmd.Init(kDeviceClientID, 1, 0);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
}
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
index a6d5768392e..11d8b4b8103 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
@@ -38,9 +38,13 @@ namespace raster {
namespace {
-class WrappedSkImage : public SharedImageBacking {
+class WrappedSkImage : public ClearTrackingSharedImageBacking {
public:
~WrappedSkImage() override {
+ promise_texture_.reset();
+ if (backend_texture_.isValid())
+ DeleteGrBackendTexture(context_state_, &backend_texture_);
+
DCHECK(context_state_->context_lost() ||
context_state_->IsCurrent(nullptr));
if (!context_state_->context_lost())
@@ -52,15 +56,6 @@ class WrappedSkImage : public SharedImageBacking {
return false;
}
- void Destroy() override {
- promise_texture_.reset();
- gpu::DeleteSkImage(context_state_, std::move(image_));
- }
-
- bool IsCleared() const override { return cleared_; }
-
- void SetCleared() override { cleared_ = true; }
-
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {}
void OnMemoryDump(const std::string& dump_name,
@@ -90,7 +85,7 @@ class WrappedSkImage : public SharedImageBacking {
DCHECK(context_state_->IsCurrent(nullptr));
return SkSurface::MakeFromBackendTexture(
- context_state_->gr_context(), image_->getBackendTexture(false),
+ context_state_->gr_context(), backend_texture_,
kTopLeft_GrSurfaceOrigin, final_msaa_count, GetSkColorType(),
color_space().ToSkColorSpace(), &surface_props);
}
@@ -113,13 +108,13 @@ class WrappedSkImage : public SharedImageBacking {
uint32_t usage,
size_t estimated_size,
SharedContextState* context_state)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- estimated_size,
- false /* is_thread_safe */),
+ : ClearTrackingSharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ estimated_size,
+ false /* is_thread_safe */),
context_state_(context_state) {
DCHECK(!!context_state_);
}
@@ -144,25 +139,24 @@ class WrappedSkImage : public SharedImageBacking {
if (!data.empty()) {
if (format() == viz::ResourceFormat::ETC1) {
- auto sk_data = SkData::MakeWithCopy(data.data(), data.size());
- image_ = SkImage::MakeFromCompressed(
- context_state_->gr_context(), sk_data, size().width(),
- size().height(), SkImage::kETC1_CompressionType);
+ backend_texture_ =
+ context_state_->gr_context()->createCompressedBackendTexture(
+ size().width(), size().height(), SkImage::kETC1_CompressionType,
+ data.data(), data.size(), GrMipMapped::kNo, is_protected);
} else {
SkBitmap bitmap;
if (!bitmap.installPixels(info, const_cast<uint8_t*>(data.data()),
info.minRowBytes())) {
return false;
}
- image_ = SkImage::MakeFromBitmap(bitmap);
- // Move image to GPU
- if (image_)
- image_ = image_->makeTextureImage(context_state_->gr_context());
+ backend_texture_ = context_state_->gr_context()->createBackendTexture(
+ bitmap.pixmap(), GrRenderable::kNo, is_protected);
}
- if (!image_)
+ if (!backend_texture_.isValid())
return false;
+ SetCleared();
OnWriteSucceeded();
} else {
// Initializing to bright green makes it obvious if the pixels are not
@@ -170,42 +164,41 @@ class WrappedSkImage : public SharedImageBacking {
// We don't do this on release builds because there is a slight overhead.
#if DCHECK_IS_ON()
- auto backend_texture = context_state_->gr_context()->createBackendTexture(
+ backend_texture_ = context_state_->gr_context()->createBackendTexture(
size().width(), size().height(), GetSkColorType(), SkColors::kBlue,
GrMipMapped::kNo, GrRenderable::kYes, is_protected);
#else
- auto backend_texture = context_state_->gr_context()->createBackendTexture(
+ backend_texture_ = context_state_->gr_context()->createBackendTexture(
size().width(), size().height(), GetSkColorType(), GrMipMapped::kNo,
GrRenderable::kYes, is_protected);
#endif
- image_ = SkImage::MakeFromAdoptedTexture(
- context_state_->gr_context(), backend_texture,
- GrSurfaceOrigin::kTopLeft_GrSurfaceOrigin, info.colorType(),
- info.alphaType(), color_space().ToSkColorSpace());
}
- auto backend_texture = image_->getBackendTexture(true);
- DCHECK(backend_texture.isValid());
+ if (!backend_texture_.isValid()) {
+ DLOG(ERROR) << "createBackendTexture() failed with SkColorType:"
+ << GetSkColorType();
+ return false;
+ }
- promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
+ promise_texture_ = SkPromiseImageTexture::Make(backend_texture_);
- switch (backend_texture.backend()) {
+ switch (backend_texture_.backend()) {
case GrBackendApi::kOpenGL: {
GrGLTextureInfo tex_info;
- if (backend_texture.getGLTextureInfo(&tex_info))
+ if (backend_texture_.getGLTextureInfo(&tex_info))
tracing_id_ = tex_info.fID;
break;
}
case GrBackendApi::kVulkan: {
GrVkImageInfo image_info;
- if (backend_texture.getVkImageInfo(&image_info))
+ if (backend_texture_.getVkImageInfo(&image_info))
tracing_id_ = reinterpret_cast<uint64_t>(image_info.fImage);
break;
}
#if BUILDFLAG(SKIA_USE_DAWN)
case GrBackendApi::kDawn: {
GrDawnTextureInfo tex_info;
- if (backend_texture.getDawnTextureInfo(&tex_info))
+ if (backend_texture_.getDawnTextureInfo(&tex_info))
tracing_id_ = reinterpret_cast<uint64_t>(tex_info.fTexture.Get());
break;
}
@@ -220,11 +213,8 @@ class WrappedSkImage : public SharedImageBacking {
SharedContextState* const context_state_;
+ GrBackendTexture backend_texture_;
sk_sp<SkPromiseImageTexture> promise_texture_;
- // TODO(penghuang): manage texture directly with GrBackendTexture,
- sk_sp<SkImage> image_;
-
- bool cleared_ = false;
uint64_t tracing_id_ = 0;
@@ -269,6 +259,8 @@ class WrappedSkImageRepresentation : public SharedImageRepresentationSkia {
// TODO(ericrk): Handle begin/end correctness checks.
}
+ bool SupportsMultipleConcurrentReadAccess() override { return true; }
+
private:
WrappedSkImage* wrapped_sk_image() {
return static_cast<WrappedSkImage*>(backing());
@@ -287,6 +279,7 @@ WrappedSkImageFactory::~WrappedSkImageFactory() = default;
std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.h b/chromium/gpu/command_buffer/service/wrapped_sk_image.h
index b549965f3af..9cb0df471d4 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.h
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.h
@@ -31,6 +31,7 @@ class GPU_GLES2_EXPORT WrappedSkImageFactory
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
diff --git a/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt
index 43dc08f0801..4f3488056a7 100644
--- a/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt
@@ -6,8 +6,9 @@
// WebGPU commands. Note the first 2 characters (usually 'wg') are
// completely ignored.
-GL_APICALL void GL_APIENTRY wgDawnCommands (const char* commands, size_t size);
-GL_APICALL void GL_APIENTRY wgAssociateMailbox (GLuint device_id, GLuint device_generation, GLuint id, GLuint generation, GLuint usage, const GLbyte* mailbox);
-GL_APICALL void GL_APIENTRY wgDissociateMailbox (GLuint texture_id, GLuint texture_generation);
-GL_APICALL void GL_APIENTRY wgRequestAdapter (GLuint request_adapter_serial, EnumClassPowerPreference power_preference = PowerPreference::kDefault);
-GL_APICALL void GL_APIENTRY wgRequestDevice (GLuint adapter_service_id, const char* dawn_request_device_properties, size_t request_device_properties_size);
+GL_APICALL void GL_APIENTRY wgDawnCommands (GLuint64 device_client_id, const char* commands, size_t size);
+GL_APICALL void GL_APIENTRY wgAssociateMailbox (GLuint64 device_client_id, GLuint device_generation, GLuint id, GLuint generation, GLuint usage, const GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY wgDissociateMailbox (GLuint64 device_client_id, GLuint texture_id, GLuint texture_generation);
+GL_APICALL void GL_APIENTRY wgRequestAdapter (GLuint64 request_adapter_serial, EnumClassPowerPreference power_preference = PowerPreference::kDefault);
+GL_APICALL void GL_APIENTRY wgRequestDevice (GLuint64 device_client_id, GLuint adapter_service_id, const char* dawn_request_device_properties, size_t request_device_properties_size);
+GL_APICALL void GL_APIENTRY wgRemoveDevice (GLuint64 device_client_id);
diff --git a/chromium/gpu/config/BUILD.gn b/chromium/gpu/config/BUILD.gn
index d954e1ac669..a0e03e049c7 100644
--- a/chromium/gpu/config/BUILD.gn
+++ b/chromium/gpu/config/BUILD.gn
@@ -10,13 +10,9 @@ import("//gpu/vulkan/features.gni")
group("config") {
if (is_component_build) {
- public_deps = [
- "//gpu",
- ]
+ public_deps = [ "//gpu" ]
} else {
- public_deps = [
- ":config_sources",
- ]
+ public_deps = [ ":config_sources" ]
}
}
@@ -63,9 +59,7 @@ action("process_json") {
args += [ "linux" ]
}
- public_deps = [
- ":workaround_list",
- ]
+ public_deps = [ ":workaround_list" ]
}
action("workaround_list") {
@@ -77,9 +71,7 @@ action("workaround_list") {
skia_workaround_list,
]
- outputs = [
- "$target_gen_dir/gpu_driver_bug_workaround_autogen.h",
- ]
+ outputs = [ "$target_gen_dir/gpu_driver_bug_workaround_autogen.h" ]
args = [
"--output-file",
@@ -99,9 +91,7 @@ if (enable_vulkan) {
configs += [ "//gpu:gpu_implementation" ]
- deps = [
- "//gpu/ipc/common:vulkan_interface",
- ]
+ deps = [ "//gpu/ipc/common:vulkan_interface" ]
all_dependent_configs = [ "//third_party/vulkan:vulkan_config" ]
@@ -121,6 +111,8 @@ jumbo_source_set("config_sources") {
visibility = [ "//gpu/*" ]
sources = [
+ "device_perf_info.cc",
+ "device_perf_info.h",
"dx_diag_node.cc",
"dx_diag_node.h",
"gpu_blocklist.cc",
@@ -164,6 +156,8 @@ jumbo_source_set("config_sources") {
"gpu_test_expectations_parser.h",
"gpu_util.cc",
"gpu_util.h",
+ "skia_limits.cc",
+ "skia_limits.h",
]
if (is_fuchsia) {
@@ -174,9 +168,7 @@ jumbo_source_set("config_sources") {
configs += [ "//gpu:gpu_implementation" ]
- public_deps = [
- "//components/crash/core/common:crash_key",
- ]
+ public_deps = [ "//components/crash/core/common:crash_key" ]
if (enable_vulkan) {
public_deps += [ ":vulkan_info" ]
@@ -186,6 +178,8 @@ jumbo_source_set("config_sources") {
":process_json",
"//base",
"//build:branding_buildflags",
+ "//build:chromecast_buildflags",
+ "//gpu/command_buffer/common:common_sources",
"//gpu/ipc/common:gpu_preferences_interface",
"//gpu/vulkan:buildflags",
"//media:media_buildflags",
@@ -222,4 +216,7 @@ jumbo_source_set("config_sources") {
if (is_linux || is_mac) {
deps += [ "//third_party/angle:angle_gpu_info_util" ]
}
+ if (use_x11) {
+ deps += [ "//ui/gfx/linux:gpu_memory_buffer_support_x11" ]
+ }
}
diff --git a/chromium/gpu/config/PRESUBMIT.py b/chromium/gpu/config/PRESUBMIT.py
new file mode 100644
index 00000000000..9b8bc15a2af
--- /dev/null
+++ b/chromium/gpu/config/PRESUBMIT.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Enforces workaround list is alphabetically sorted.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details on the presubmit API built into depot_tools.
+"""
+
+import difflib
+import os.path
+
+def _CheckGPUWorkaroundListSorted(input_api, output_api):
+ """Check: gpu_workaround_list.txt feature list sorted alphabetically.
+ """
+ filename = os.path.join(input_api.PresubmitLocalPath(),
+ 'gpu_workaround_list.txt')
+
+ workaround_list = [line.rstrip('\n') for line in open(filename)]
+
+ workaround_list_sorted = sorted(workaround_list, key=lambda s: s.lower())
+ if workaround_list == workaround_list_sorted:
+ return []
+ # Diff the sorted/unsorted versions.
+ differ = difflib.Differ()
+ diff = differ.compare(workaround_list, workaround_list_sorted)
+ return [output_api.PresubmitError(
+ 'gpu_workaround_list.txt features must be sorted alphabetically. '
+ 'Diff of feature order follows:', long_text='\n'.join(diff))]
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CheckGPUWorkaroundListSorted(input_api, output_api)
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _CheckGPUWorkaroundListSorted(input_api, output_api)
diff --git a/chromium/gpu/config/device_perf_info.cc b/chromium/gpu/config/device_perf_info.cc
new file mode 100644
index 00000000000..af290dca079
--- /dev/null
+++ b/chromium/gpu/config/device_perf_info.cc
@@ -0,0 +1,32 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/config/device_perf_info.h"
+
+#include "base/no_destructor.h"
+#include "base/synchronization/lock.h"
+
+namespace gpu {
+
+namespace {
+// Global instance in browser process.
+base::Optional<DevicePerfInfo> g_device_perf_info;
+
+base::Lock& GetLock() {
+ static base::NoDestructor<base::Lock> lock;
+ return *lock;
+}
+} // namespace
+
+base::Optional<DevicePerfInfo> GetDevicePerfInfo() {
+ base::AutoLock lock(GetLock());
+ return g_device_perf_info;
+}
+
+void SetDevicePerfInfo(const DevicePerfInfo& device_perf_info) {
+ base::AutoLock lock(GetLock());
+ g_device_perf_info = device_perf_info;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/config/device_perf_info.h b/chromium/gpu/config/device_perf_info.h
new file mode 100644
index 00000000000..10cdaac4747
--- /dev/null
+++ b/chromium/gpu/config/device_perf_info.h
@@ -0,0 +1,66 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_CONFIG_DEVICE_PERF_INFO_H_
+#define GPU_CONFIG_DEVICE_PERF_INFO_H_
+
+#include <string>
+#include <vector>
+
+#include "base/optional.h"
+#include "build/build_config.h"
+#include "gpu/gpu_export.h"
+
+#if defined(OS_WIN)
+#include <d3dcommon.h>
+#endif
+
+namespace gpu {
+
+enum class IntelGpuGeneration {
+ kNonIntel = 0,
+ kUnknownIntel = 1, // Intel GPU, but not one of the following generations.
+ // Don't care about Gen1 ~ Gen5. Detection code starts with Gen6.
+ kGen6 = 6,
+ kGen7 = 7,
+ kGen8 = 8,
+ kGen9 = 9,
+ kGen10 = 10,
+ kGen11 = 11,
+ kGen12 = 12,
+ kMaxValue = kGen12,
+};
+
+enum class HasDiscreteGpu {
+ kNo = 0,
+ kYes = 1,
+ kUnknown = 2,
+ kMaxValue = kUnknown,
+};
+
+struct GPU_EXPORT DevicePerfInfo {
+ uint32_t total_physical_memory_mb = 0u;
+ uint32_t total_disk_space_mb = 0u;
+ uint32_t hardware_concurrency = 0u;
+#if defined(OS_WIN)
+ // system commit limit (n pages) x page size.
+ uint32_t system_commit_limit_mb = 0u;
+ // If multiple GPUs are detected, this holds the highest feature level.
+ D3D_FEATURE_LEVEL d3d11_feature_level = D3D_FEATURE_LEVEL_1_0_CORE;
+ HasDiscreteGpu has_discrete_gpu = HasDiscreteGpu::kUnknown;
+#endif
+
+ // The following fields are only filled on the browser side. They don't
+ // need to be part of mojom.
+ IntelGpuGeneration intel_gpu_generation = IntelGpuGeneration::kNonIntel;
+ bool software_rendering = false;
+};
+
+// Thread-safe getter and setter of global instance of DevicePerfInfo.
+GPU_EXPORT base::Optional<DevicePerfInfo> GetDevicePerfInfo();
+GPU_EXPORT void SetDevicePerfInfo(const DevicePerfInfo& device_perf_info);
+
+} // namespace gpu
+
+#endif // GPU_CONFIG_DEVICE_PERF_INFO_H_
diff --git a/chromium/gpu/config/gpu_control_list.cc b/chromium/gpu/config/gpu_control_list.cc
index dac27c419fb..b4ab8204012 100644
--- a/chromium/gpu/config/gpu_control_list.cc
+++ b/chromium/gpu/config/gpu_control_list.cc
@@ -96,6 +96,14 @@ int CompareLexicalNumberStrings(
return 0;
}
+bool isOldIntelDriver(const std::vector<std::string>& version) {
+ DCHECK_EQ(4u, version.size());
+ unsigned value = 0;
+ bool valid = base::StringToUint(version[2], &value);
+ DCHECK(valid);
+ return value < 100;
+}
+
// A mismatch is identified only if both |input| and |pattern| are not empty.
bool StringMismatch(const std::string& input, const std::string& pattern) {
if (input.empty() || pattern.empty())
@@ -234,14 +242,59 @@ void GpuControlList::Entry::LogControlListMatch(
control_list_logging_name.c_str());
}
-bool GpuControlList::DriverInfo::Contains(const GPUInfo& gpu_info) const {
+bool GpuControlList::DriverInfo::Contains(const GPUInfo& gpu_info,
+ VersionSchema version_schema) const {
const GPUInfo::GPUDevice& active_gpu = gpu_info.active_gpu();
if (StringMismatch(active_gpu.driver_vendor, driver_vendor)) {
return false;
}
- if (driver_version.IsSpecified() && !active_gpu.driver_version.empty() &&
- !driver_version.Contains(active_gpu.driver_version)) {
- return false;
+ if (driver_version.IsSpecified() && !active_gpu.driver_version.empty()) {
+ if (version_schema == kCommon) {
+ if (!driver_version.Contains(active_gpu.driver_version))
+ return false;
+ } else if (version_schema == kIntelDriver) {
+ std::vector<std::string> version;
+ if (!ProcessVersionString(active_gpu.driver_version, '.', &version))
+ return false;
+ std::vector<std::string> ref_version, ref_version2;
+ bool valid = ProcessVersionString(driver_version.value1, '.',
+ &ref_version);
+ DCHECK(valid);
+ if (driver_version.value2) {
+ valid = ProcessVersionString(driver_version.value2, '.', &ref_version2);
+ DCHECK(valid);
+ }
+ // If either of the two versions doesn't match the Intel driver version + // schema, or they belong to different generation of version schema, they
+ // should not be compared.
+ if (version.size() != 4 || ref_version.size() != 4)
+ return false;
+ if (isOldIntelDriver(version) != isOldIntelDriver(ref_version))
+ return false;
+ if (!ref_version2.empty()) {
+ if (ref_version2.size() != 4
+ || isOldIntelDriver(ref_version) != isOldIntelDriver(ref_version2))
+ return false;
+ }
+
+ std::string build_num, ref_build_num, ref_build_num2;
+ if (isOldIntelDriver(version)) {
+ build_num = version[3];
+ ref_build_num = ref_version[3];
+ if (!ref_version2.empty())
+ ref_build_num2 = ref_version2[3];
+ } else {
+ build_num = version[2] + "." + version[3];
+ ref_build_num = ref_version[2] + "." + ref_version[3];
+ if (!ref_version2.empty())
+ ref_build_num2 = ref_version2[2] + "." + ref_version2[3];
+ }
+ Version ref_driver_version(driver_version);
+ ref_driver_version.value1 = ref_build_num.c_str();
+ if (!ref_build_num2.empty())
+ ref_driver_version.value2 = ref_build_num2.c_str();
+ if (!ref_driver_version.Contains(build_num))
+ return false;
+ }
}
return true;
}
@@ -312,13 +365,13 @@ bool GpuControlList::More::Contains(const GPUInfo& gpu_info) const {
break;
case kSupported:
#if defined(OS_WIN)
- if (!gpu_info.supports_overlays)
+ if (!gpu_info.overlay_info.supports_overlays)
return false;
#endif // OS_WIN
break;
case kUnsupported:
#if defined(OS_WIN)
- if (gpu_info.supports_overlays)
+ if (gpu_info.overlay_info.supports_overlays)
return false;
#endif // OS_WIN
break;
@@ -342,7 +395,8 @@ bool GpuControlList::Conditions::Contains(OsType target_os_type,
if (os_version.IsSpecified() && !os_version.Contains(target_os_version))
return false;
}
- if (vendor_id != 0 || gpu_series_list_size > 0 || intel_gpu_generation.IsSpecified()) {
+ if (vendor_id != 0 || intel_gpu_series_list_size > 0 ||
+ intel_gpu_generation.IsSpecified()) {
std::vector<GPUInfo::GPUDevice> candidates;
switch (multi_gpu_category) {
case kMultiGpuCategoryPrimary:
@@ -369,14 +423,14 @@ bool GpuControlList::Conditions::Contains(OsType target_os_type,
}
bool found = false;
- if (gpu_series_list_size > 0) {
+ if (intel_gpu_series_list_size > 0) {
for (size_t ii = 0; !found && ii < candidates.size(); ++ii) {
- GpuSeriesType candidate_series = GetGpuSeriesType(
+ IntelGpuSeriesType candidate_series = GetIntelGpuSeriesType(
candidates[ii].vendor_id, candidates[ii].device_id);
- if (candidate_series == GpuSeriesType::kUnknown)
+ if (candidate_series == IntelGpuSeriesType::kUnknown)
continue;
- for (size_t jj = 0; jj < gpu_series_list_size; ++jj) {
- if (candidate_series == gpu_series_list[jj]) {
+ for (size_t jj = 0; jj < intel_gpu_series_list_size; ++jj) {
+ if (candidate_series == intel_gpu_series_list[jj]) {
found = true;
break;
}
@@ -447,8 +501,30 @@ bool GpuControlList::Conditions::Contains(OsType target_os_type,
case kMultiGpuStyleNone:
break;
}
- if (driver_info && !driver_info->Contains(gpu_info)) {
- return false;
+
+ if (driver_info) {
+ // On Windows, if either current gpu or the gpu condition is from Intel,
+ // the driver version should be compared based on the Intel graphics driver
+ // version schema.
+ // https://www.intel.com/content/www/us/en/support/articles/000005654/graphics-drivers.html
+ VersionSchema version_schema = kCommon;
+ if (target_os_type == kOsWin) {
+ if (vendor_id == 0x8086
+ || intel_gpu_series_list_size > 0
+ || intel_gpu_generation.IsSpecified()
+ || (driver_info->driver_vendor
+ && std::string(driver_info->driver_vendor).find("Intel")
+ != std::string::npos)) {
+ version_schema = kIntelDriver;
+ } else {
+ const GPUInfo::GPUDevice& active_gpu = gpu_info.active_gpu();
+ if (active_gpu.vendor_id == 0x8086
+ || active_gpu.driver_vendor.find("Intel") != std::string::npos)
+ version_schema = kIntelDriver;
+ }
+ }
+ if (!driver_info->Contains(gpu_info, version_schema))
+ return false;
}
if (gl_strings && !gl_strings->Contains(gpu_info)) {
return false;
diff --git a/chromium/gpu/config/gpu_control_list.h b/chromium/gpu/config/gpu_control_list.h
index 4b60cc8476d..063da31aece 100644
--- a/chromium/gpu/config/gpu_control_list.h
+++ b/chromium/gpu/config/gpu_control_list.h
@@ -85,6 +85,15 @@ class GPU_EXPORT GpuControlList {
kVersionStyleUnknown
};
+ enum VersionSchema {
+ // All digits are meaningful when distinguishing versions.
+ kCommon,
+ // The version format of Intel graphics driver is AA.BB.CCC.DDDD.
+ // DDDD(old schema) or CCC.DDDD(new schema) is the build number.
+ // That is, indicates the actual driver number.
+ kIntelDriver,
+ };
+
enum SupportedOrNot {
kSupported,
kUnsupported,
@@ -124,7 +133,8 @@ class GPU_EXPORT GpuControlList {
const char* driver_vendor;
Version driver_version;
- bool Contains(const GPUInfo& gpu_info) const;
+ bool Contains(const GPUInfo& gpu_info,
+ VersionSchema version_schema = kCommon) const;
};
struct GPU_EXPORT GLStrings {
@@ -182,8 +192,8 @@ class GPU_EXPORT GpuControlList {
const DriverInfo* driver_info;
const GLStrings* gl_strings;
const MachineModelInfo* machine_model_info;
- size_t gpu_series_list_size;
- const GpuSeriesType* gpu_series_list;
+ size_t intel_gpu_series_list_size;
+ const IntelGpuSeriesType* intel_gpu_series_list;
Version intel_gpu_generation;
const More* more;
diff --git a/chromium/gpu/config/gpu_control_list_entry_unittest.cc b/chromium/gpu/config/gpu_control_list_entry_unittest.cc
index eeb183b6c09..03f97c197fe 100644
--- a/chromium/gpu/config/gpu_control_list_entry_unittest.cc
+++ b/chromium/gpu/config/gpu_control_list_entry_unittest.cc
@@ -1052,10 +1052,10 @@ TEST_F(GpuControlListEntryTest, HardwareOverlay) {
const Entry& entry = GetEntry(kGpuControlListEntryTest_HardwareOverlay);
GPUInfo gpu_info;
gpu_info.gpu.vendor_id = 0x8086;
- gpu_info.supports_overlays = true;
+ gpu_info.overlay_info.supports_overlays = true;
EXPECT_FALSE(entry.Contains(kOsWin, "10.0", gpu_info));
- gpu_info.supports_overlays = false;
+ gpu_info.overlay_info.supports_overlays = false;
EXPECT_TRUE(entry.Contains(kOsWin, "10.0", gpu_info));
}
#endif // OS_WIN
@@ -1111,4 +1111,59 @@ TEST_F(GpuControlListEntryTest, TestSubpixelFontRenderingDontCare) {
EXPECT_TRUE(entry.Contains(kOsChromeOS, "10.0", gpu_info));
}
+TEST_F(GpuControlListEntryTest, IntelDriverVendorEntry) {
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_IntelDriverVendorEntry);
+ GPUInfo gpu_info;
+ gpu_info.gpu.driver_vendor = "Intel(R) UHD Graphics 630";
+ gpu_info.gpu.driver_version = "25.20.100.5000";
+ EXPECT_TRUE(entry.Contains(kOsLinux, "", gpu_info));
+ gpu_info.gpu.driver_version = "23.20.100.6500";
+ EXPECT_FALSE(entry.Contains(kOsLinux, "", gpu_info));
+
+ gpu_info.gpu.driver_version = "25.20.100.5000";
+ EXPECT_FALSE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "23.20.100.6500";
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+}
+
+TEST_F(GpuControlListEntryTest, IntelDriverVersionEntry) {
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_IntelDriverVersionEntry);
+ GPUInfo gpu_info;
+ gpu_info.gpu.vendor_id = 0x8086;
+ gpu_info.gpu.driver_version = "23.20.100.8000";
+ EXPECT_TRUE(entry.Contains(kOsLinux, "", gpu_info));
+ gpu_info.gpu.driver_version = "25.20.100.6000";
+ EXPECT_FALSE(entry.Contains(kOsLinux, "", gpu_info));
+
+ gpu_info.gpu.driver_version = "23.20.100.8000";
+ EXPECT_FALSE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "25.20.100.6000";
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "24.20.99.6000";
+ EXPECT_FALSE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "24.20.101.6000";
+ EXPECT_FALSE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "25.20.100.7000";
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+}
+
+TEST_F(GpuControlListEntryTest, IntelOldDriverVersionEntry) {
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_IntelOldDriverVersionEntry);
+ GPUInfo gpu_info;
+ gpu_info.gpu.vendor_id = 0x8086;
+ gpu_info.gpu.driver_version = "23.20.10.8000";
+ EXPECT_FALSE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "25.20.10.6000";
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "24.20.100.6000";
+ EXPECT_FALSE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "24.20.11.6000";
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "25.20.9.7000";
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_testing.json b/chromium/gpu/config/gpu_control_list_testing.json
index d32c8ab200d..8450540737c 100644
--- a/chromium/gpu/config/gpu_control_list_testing.json
+++ b/chromium/gpu/config/gpu_control_list_testing.json
@@ -224,7 +224,7 @@
"vendor_id": "0x8086",
"driver_version": {
"op": "<",
- "value": "10.7"
+ "value": "24.20.100.7000"
},
"features": [
"test_feature_1"
@@ -726,9 +726,9 @@
{
"id": 60,
"description": "GpuControlListEntryTest.GpuSeries",
- "gpu_series": [
- "intel_skylake",
- "intel_kabylake"
+ "intel_gpu_series": [
+ "skylake",
+ "kabylake"
],
"features": [
"test_feature_0"
@@ -737,8 +737,8 @@
{
"id": 61,
"description": "GpuControlListEntryTest.GpuSeriesActive",
- "gpu_series": [
- "intel_kabylake"
+ "intel_gpu_series": [
+ "kabylake"
],
"multi_gpu_category": "active",
"features": [
@@ -748,8 +748,8 @@
{
"id": 62,
"description": "GpuControlListEntryTest.GpuSeriesAny",
- "gpu_series": [
- "intel_kabylake"
+ "intel_gpu_series": [
+ "kabylake"
],
"multi_gpu_category": "any",
"features": [
@@ -759,8 +759,8 @@
{
"id": 63,
"description": "GpuControlListEntryTest.GpuSeriesPrimary",
- "gpu_series": [
- "intel_kabylake"
+ "intel_gpu_series": [
+ "kabylake"
],
"multi_gpu_category": "primary",
"features": [
@@ -770,8 +770,8 @@
{
"id": 64,
"description": "GpuControlListEntryTest.GpuSeriesSecondary",
- "gpu_series": [
- "intel_kabylake"
+ "intel_gpu_series": [
+ "kabylake"
],
"multi_gpu_category": "secondary",
"features": [
@@ -783,8 +783,8 @@
"description": "GpuControlListEntryTest.GpuSeriesInException",
"exceptions": [
{
- "gpu_series": [
- "intel_kabylake"
+ "intel_gpu_series": [
+ "kabylake"
]
}
],
@@ -899,6 +899,43 @@
"features": [
"test_feature_0"
]
+ },
+ {
+ "id": 75,
+ "description": "GpuControlListEntryTest.IntelDriverVendorEntry",
+ "driver_vendor": "Intel.*",
+ "driver_version": {
+ "op": "between",
+ "value": "24.20.100.6000",
+ "value2": "26.20.100.7000"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 76,
+ "description": "GpuControlListEntryTest.IntelDriverVersionEntry",
+ "vendor_id": "0x8086",
+ "driver_version": {
+ "op": "<=",
+ "value": "24.20.100.7000"
+ },
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 77,
+ "description": "GpuControlListEntryTest.IntelOldDriverVersionEntry",
+ "vendor_id": "0x8086",
+ "driver_version": {
+ "op": "<=",
+ "value": "24.20.10.7000"
+ },
+ "features": [
+ "test_feature_0"
+ ]
}
]
}
diff --git a/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
index 6564a31921b..167430b615f 100644
--- a/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
+++ b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
@@ -525,8 +525,8 @@ const int kFeatureListForGpuControlTestingEntry20[1] = {
const GpuControlList::DriverInfo kDriverInfoForGpuControlTestingEntry20 = {
nullptr, // driver_vendor
- {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "10.7",
- nullptr}, // driver_version
+ {GpuControlList::kLT, GpuControlList::kVersionStyleNumerical,
+ "24.20.100.7000", nullptr}, // driver_version
};
const GpuControlList::More kMoreForEntry20_1440601243 = {
@@ -1620,9 +1620,9 @@ const int kFeatureListForGpuControlTestingEntry60[1] = {
TEST_FEATURE_0,
};
-const GpuSeriesType kGpuSeriesForEntry60[2] = {
- GpuSeriesType::kIntelSkylake,
- GpuSeriesType::kIntelKabylake,
+const IntelGpuSeriesType kIntelGpuSeriesForEntry60[2] = {
+ IntelGpuSeriesType::kSkylake,
+ IntelGpuSeriesType::kKabylake,
};
const GpuControlList::More kMoreForEntry60_1440601243 = {
@@ -1646,8 +1646,8 @@ const int kFeatureListForGpuControlTestingEntry61[1] = {
TEST_FEATURE_0,
};
-const GpuSeriesType kGpuSeriesForEntry61[1] = {
- GpuSeriesType::kIntelKabylake,
+const IntelGpuSeriesType kIntelGpuSeriesForEntry61[1] = {
+ IntelGpuSeriesType::kKabylake,
};
const GpuControlList::More kMoreForEntry61_1440601243 = {
@@ -1671,8 +1671,8 @@ const int kFeatureListForGpuControlTestingEntry62[1] = {
TEST_FEATURE_0,
};
-const GpuSeriesType kGpuSeriesForEntry62[1] = {
- GpuSeriesType::kIntelKabylake,
+const IntelGpuSeriesType kIntelGpuSeriesForEntry62[1] = {
+ IntelGpuSeriesType::kKabylake,
};
const GpuControlList::More kMoreForEntry62_1440601243 = {
@@ -1696,8 +1696,8 @@ const int kFeatureListForGpuControlTestingEntry63[1] = {
TEST_FEATURE_0,
};
-const GpuSeriesType kGpuSeriesForEntry63[1] = {
- GpuSeriesType::kIntelKabylake,
+const IntelGpuSeriesType kIntelGpuSeriesForEntry63[1] = {
+ IntelGpuSeriesType::kKabylake,
};
const GpuControlList::More kMoreForEntry63_1440601243 = {
@@ -1721,8 +1721,8 @@ const int kFeatureListForGpuControlTestingEntry64[1] = {
TEST_FEATURE_0,
};
-const GpuSeriesType kGpuSeriesForEntry64[1] = {
- GpuSeriesType::kIntelKabylake,
+const IntelGpuSeriesType kIntelGpuSeriesForEntry64[1] = {
+ IntelGpuSeriesType::kKabylake,
};
const GpuControlList::More kMoreForEntry64_1440601243 = {
@@ -1763,8 +1763,8 @@ const GpuControlList::More kMoreForEntry65_1440601243 = {
GpuControlList::kDontCare, // subpixel_font_rendering
};
-const GpuSeriesType kGpuSeriesForEntry65Exception0[1] = {
- GpuSeriesType::kIntelKabylake,
+const IntelGpuSeriesType kIntelGpuSeriesForEntry65Exception0[1] = {
+ IntelGpuSeriesType::kKabylake,
};
const GpuControlList::More kMoreForEntry65_1440601243Exception0 = {
@@ -2061,6 +2061,87 @@ const GpuControlList::More kMoreForEntry74_1440601243 = {
GpuControlList::kDontCare, // subpixel_font_rendering
};
+const int kFeatureListForGpuControlTestingEntry75[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForGpuControlTestingEntry75 = {
+ "Intel.*", // driver_vendor
+ {GpuControlList::kBetween, GpuControlList::kVersionStyleNumerical,
+ "24.20.100.6000", "26.20.100.7000"}, // driver_version
+};
+
+const GpuControlList::More kMoreForEntry75_1440601243 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // direct_rendering_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
+ GpuControlList::kDontCare, // subpixel_font_rendering
+};
+
+const int kFeatureListForGpuControlTestingEntry76[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForGpuControlTestingEntry76 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kLE, GpuControlList::kVersionStyleNumerical,
+ "24.20.100.7000", nullptr}, // driver_version
+};
+
+const GpuControlList::More kMoreForEntry76_1440601243 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // direct_rendering_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
+ GpuControlList::kDontCare, // subpixel_font_rendering
+};
+
+const int kFeatureListForGpuControlTestingEntry77[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::DriverInfo kDriverInfoForGpuControlTestingEntry77 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kLE, GpuControlList::kVersionStyleNumerical,
+ "24.20.10.7000", nullptr}, // driver_version
+};
+
+const GpuControlList::More kMoreForEntry77_1440601243 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // direct_rendering_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr,
+ nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
+ GpuControlList::kDontCare, // subpixel_font_rendering
+};
+
} // namespace gpu
#endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_ARRAYS_AND_STRUCTS_AUTOGEN_H_
diff --git a/chromium/gpu/config/gpu_control_list_testing_autogen.cc b/chromium/gpu/config/gpu_control_list_testing_autogen.cc
index dcb6daf8b92..32458385b4a 100644
--- a/chromium/gpu/config/gpu_control_list_testing_autogen.cc
+++ b/chromium/gpu/config/gpu_control_list_testing_autogen.cc
@@ -39,8 +39,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
&kDriverInfoForGpuControlTestingEntry1, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry1_1440601243, // more data
@@ -71,8 +71,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry2_1440601243, // more data
@@ -103,8 +103,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry3_1440601243, // more data
@@ -135,8 +135,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry4_1440601243, // more data
@@ -167,8 +167,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry5_1440601243, // more data
@@ -199,8 +199,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry6_1440601243, // more data
@@ -231,8 +231,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry7_1440601243, // more data
@@ -263,8 +263,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry8_1440601243, // more data
@@ -295,8 +295,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry9_1440601243, // more data
@@ -327,8 +327,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry10_1440601243, // more data
@@ -359,8 +359,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry11, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry11_1440601243, // more data
@@ -391,8 +391,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry12, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry12_1440601243, // more data
@@ -423,8 +423,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry13, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry13_1440601243, // more data
@@ -455,8 +455,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry14, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry14_1440601243, // more data
@@ -487,8 +487,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry15, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry15_1440601243, // more data
@@ -519,8 +519,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry16_1440601243, // more data
@@ -551,8 +551,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry17_1440601243, // more data
@@ -583,8 +583,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
&kDriverInfoForGpuControlTestingEntry18, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry18_1440601243, // more data
@@ -615,8 +615,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
&kDriverInfoForGpuControlTestingEntry19, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry19_1440601243, // more data
@@ -647,8 +647,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
&kDriverInfoForGpuControlTestingEntry20, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry20_1440601243, // more data
@@ -679,8 +679,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry21_1440601243, // more data
@@ -711,8 +711,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry22_1440601243, // more data
@@ -743,8 +743,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry23_1440601243, // more data
@@ -775,8 +775,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry24_1440601243, // more data
@@ -808,8 +808,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry25_1440601243, // more data
@@ -840,8 +840,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
&kMachineModelInfoForEntry26, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry26_1440601243, // more data
@@ -872,8 +872,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry27_1440601243, // more data
@@ -904,8 +904,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
&kMachineModelInfoForEntry28, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry28_1440601243, // more data
@@ -936,8 +936,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
&kMachineModelInfoForEntry29, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry29_1440601243, // more data
@@ -969,8 +969,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry30_1440601243, // more data
@@ -1002,8 +1002,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry31_1440601243, // more data
@@ -1035,8 +1035,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry32_1440601243, // more data
@@ -1068,8 +1068,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry33_1440601243, // more data
@@ -1101,8 +1101,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry34_1440601243, // more data
@@ -1134,8 +1134,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry35_1440601243, // more data
@@ -1166,8 +1166,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry36_1440601243, // more data
@@ -1199,8 +1199,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry37_1440601243, // more data
@@ -1231,8 +1231,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry38_1440601243, // more data
@@ -1263,8 +1263,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry39_1440601243, // more data
@@ -1295,8 +1295,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry40_1440601243, // more data
@@ -1327,8 +1327,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry41_1440601243, // more data
@@ -1359,8 +1359,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry42_1440601243, // more data
@@ -1391,8 +1391,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry43_1440601243, // more data
@@ -1423,8 +1423,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry44_1440601243, // more data
@@ -1456,8 +1456,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry45_1440601243, // more data
@@ -1489,8 +1489,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry46_1440601243, // more data
@@ -1521,8 +1521,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry47_1440601243, // more data
@@ -1553,8 +1553,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry48_1440601243, // more data
@@ -1585,8 +1585,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
&kDriverInfoForGpuControlTestingEntry49, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry49_1440601243, // more data
@@ -1617,8 +1617,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
&kDriverInfoForGpuControlTestingEntry50, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry50_1440601243, // more data
@@ -1649,8 +1649,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry51_1440601243, // more data
@@ -1681,8 +1681,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry52_1440601243, // more data
@@ -1713,8 +1713,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
&kDriverInfoForGpuControlTestingEntry53, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry53_1440601243, // more data
@@ -1745,8 +1745,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry54_1440601243, // more data
@@ -1777,8 +1777,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry55_1440601243, // more data
@@ -1809,8 +1809,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry56_1440601243, // more data
@@ -1841,8 +1841,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry57_1440601243, // more data
@@ -1873,8 +1873,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry58_1440601243, // more data
@@ -1905,8 +1905,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry59_1440601243, // more data
@@ -1937,8 +1937,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- base::size(kGpuSeriesForEntry60), // gpu_series size
- kGpuSeriesForEntry60, // gpu_series
+ base::size(kIntelGpuSeriesForEntry60), // intel_gpu_series size
+ kIntelGpuSeriesForEntry60, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry60_1440601243, // more data
@@ -1969,8 +1969,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- base::size(kGpuSeriesForEntry61), // gpu_series size
- kGpuSeriesForEntry61, // gpu_series
+ base::size(kIntelGpuSeriesForEntry61), // intel_gpu_series size
+ kIntelGpuSeriesForEntry61, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry61_1440601243, // more data
@@ -1992,17 +1992,17 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
{
GpuControlList::kOsAny, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
- nullptr, nullptr}, // os_version
- 0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
- GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
- GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
- nullptr, // driver info
- nullptr, // GL strings
- nullptr, // machine model info
- base::size(kGpuSeriesForEntry62), // gpu_series size
- kGpuSeriesForEntry62, // gpu_series
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ base::size(kIntelGpuSeriesForEntry62), // intel_gpu_series size
+ kIntelGpuSeriesForEntry62, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry62_1440601243, // more data
@@ -2033,8 +2033,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- base::size(kGpuSeriesForEntry63), // gpu_series size
- kGpuSeriesForEntry63, // gpu_series
+ base::size(kIntelGpuSeriesForEntry63), // intel_gpu_series size
+ kIntelGpuSeriesForEntry63, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry63_1440601243, // more data
@@ -2065,8 +2065,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- base::size(kGpuSeriesForEntry64), // gpu_series size
- kGpuSeriesForEntry64, // gpu_series
+ base::size(kIntelGpuSeriesForEntry64), // intel_gpu_series size
+ kIntelGpuSeriesForEntry64, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry64_1440601243, // more data
@@ -2097,8 +2097,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry65_1440601243, // more data
@@ -2129,8 +2129,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
&kDriverInfoForGpuControlTestingEntry66, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry66_1440601243, // more data
@@ -2161,8 +2161,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry67_1440601243, // more data
@@ -2193,8 +2193,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "9",
nullptr}, // intel_gpu_generation
&kMoreForEntry68_1440601243, // more data
@@ -2225,8 +2225,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "9",
nullptr}, // intel_gpu_generation
&kMoreForEntry69_1440601243, // more data
@@ -2257,8 +2257,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "9",
nullptr}, // intel_gpu_generation
&kMoreForEntry70_1440601243, // more data
@@ -2289,8 +2289,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "9",
nullptr}, // intel_gpu_generation
&kMoreForEntry71_1440601243, // more data
@@ -2321,8 +2321,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kLT, GpuControlList::kVersionStyleNumerical, "9",
nullptr}, // intel_gpu_generation
&kMoreForEntry72_1440601243, // more data
@@ -2353,8 +2353,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry73_1440601243, // more data
@@ -2385,8 +2385,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry74, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry74_1440601243, // more data
@@ -2394,6 +2394,102 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
0, // exceptions count
nullptr, // exceptions
},
+ {
+ 75, // id
+ "GpuControlListEntryTest.IntelDriverVendorEntry",
+ base::size(kFeatureListForGpuControlTestingEntry75), // features size
+ kFeatureListForGpuControlTestingEntry75, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // DisabledWebGLExtensions size
+ nullptr, // DisabledWebGLExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForGpuControlTestingEntry75, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // intel_gpu_generation
+ &kMoreForEntry75_1440601243, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 76, // id
+ "GpuControlListEntryTest.IntelDriverVersionEntry",
+ base::size(kFeatureListForGpuControlTestingEntry76), // features size
+ kFeatureListForGpuControlTestingEntry76, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // DisabledWebGLExtensions size
+ nullptr, // DisabledWebGLExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForGpuControlTestingEntry76, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // intel_gpu_generation
+ &kMoreForEntry76_1440601243, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
+ {
+ 77, // id
+ "GpuControlListEntryTest.IntelOldDriverVersionEntry",
+ base::size(kFeatureListForGpuControlTestingEntry77), // features size
+ kFeatureListForGpuControlTestingEntry77, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // DisabledWebGLExtensions size
+ nullptr, // DisabledWebGLExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // os_version
+ 0x8086, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForGpuControlTestingEntry77, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ nullptr, nullptr}, // intel_gpu_generation
+ &kMoreForEntry77_1440601243, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
};
-const size_t kGpuControlListTestingEntryCount = 74;
+const size_t kGpuControlListTestingEntryCount = 77;
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
index f70d8b3b2d4..fc12d787732 100644
--- a/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
+++ b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
@@ -87,6 +87,9 @@ enum GpuControlListTestingEntryEnum {
kGpuControlListEntryTest_GpuGenerationSecondary = 71,
kGpuControlListEntryTest_SubpixelFontRendering = 72,
kGpuControlListEntryTest_SubpixelFontRenderingDontCare = 73,
+ kGpuControlListEntryTest_IntelDriverVendorEntry = 74,
+ kGpuControlListEntryTest_IntelDriverVersionEntry = 75,
+ kGpuControlListEntryTest_IntelOldDriverVersionEntry = 76,
};
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h b/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h
index 0db38fff526..7ed250d24af 100644
--- a/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h
+++ b/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h
@@ -25,8 +25,8 @@ const GpuControlList::Conditions kExceptionsForEntry4[1] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry4_1440601243Exception0, // more data
@@ -46,8 +46,8 @@ const GpuControlList::Conditions kExceptionsForEntry5[1] = {
nullptr, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry5_1440601243Exception0, // more data
@@ -67,8 +67,8 @@ const GpuControlList::Conditions kExceptionsForEntry21[1] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry21Exception0, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry21_1440601243Exception0, // more data
@@ -88,8 +88,8 @@ const GpuControlList::Conditions kExceptionsForEntry27[1] = {
nullptr, // driver info
nullptr, // GL strings
&kMachineModelInfoForEntry27Exception0, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry27_1440601243Exception0, // more data
@@ -109,8 +109,8 @@ const GpuControlList::Conditions kExceptionsForEntry29[1] = {
nullptr, // driver info
nullptr, // GL strings
&kMachineModelInfoForEntry29Exception0, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry29_1440601243Exception0, // more data
@@ -131,8 +131,8 @@ const GpuControlList::Conditions kExceptionsForEntry44[2] = {
&kDriverInfoForGpuControlTestingEntry44Exception0, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry44_1440601243Exception0, // more data
@@ -150,8 +150,8 @@ const GpuControlList::Conditions kExceptionsForEntry44[2] = {
&kDriverInfoForGpuControlTestingEntry44Exception1, // driver info
nullptr, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry44_1440601243Exception1, // more data
@@ -171,8 +171,8 @@ const GpuControlList::Conditions kExceptionsForEntry51[1] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry51Exception0, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry51_1440601243Exception0, // more data
@@ -183,17 +183,18 @@ const GpuControlList::Conditions kExceptionsForEntry65[1] = {
{
GpuControlList::kOsAny, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
- nullptr, nullptr}, // os_version
- 0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
- GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
- GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
- nullptr, // driver info
- nullptr, // GL strings
- nullptr, // machine model info
- base::size(kGpuSeriesForEntry65Exception0), // gpu_series size
- kGpuSeriesForEntry65Exception0, // gpu_series
+ nullptr, nullptr}, // os_version
+ 0x00, // vendor_id
+ 0, // DeviceIDs size
+ nullptr, // DeviceIDs
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ base::size(
+ kIntelGpuSeriesForEntry65Exception0), // intel_gpu_series size
+ kIntelGpuSeriesForEntry65Exception0, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry65_1440601243Exception0, // more data
@@ -213,8 +214,8 @@ const GpuControlList::Conditions kExceptionsForEntry73[3] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry73Exception0, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry73_1440601243Exception0, // more data
@@ -231,8 +232,8 @@ const GpuControlList::Conditions kExceptionsForEntry73[3] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry73Exception1, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry73_1440601243Exception1, // more data
@@ -249,8 +250,8 @@ const GpuControlList::Conditions kExceptionsForEntry73[3] = {
nullptr, // driver info
&kGLStringsForGpuControlTestingEntry73Exception2, // GL strings
nullptr, // machine model info
- 0, // gpu_series size
- nullptr, // gpu_series
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
nullptr, nullptr}, // intel_gpu_generation
&kMoreForEntry73_1440601243Exception2, // more data
diff --git a/chromium/gpu/config/gpu_crash_keys.cc b/chromium/gpu/config/gpu_crash_keys.cc
index 00b84c58d38..a50a2213b52 100644
--- a/chromium/gpu/config/gpu_crash_keys.cc
+++ b/chromium/gpu/config/gpu_crash_keys.cc
@@ -34,6 +34,7 @@ crash_reporter::CrashKeyString<4> gpu_watchdog_kill_after_power_resume(
"gpu-watchdog-kill-after-power-resume");
crash_reporter::CrashKeyString<4> gpu_watchdog_crashed_in_gpu_init(
"gpu-watchdog-crashed-in-gpu-init");
+crash_reporter::CrashKeyString<16> num_of_processors("num-of-processors");
} // namespace crash_keys
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_crash_keys.h b/chromium/gpu/config/gpu_crash_keys.h
index 0de342e49ab..23e33e01bb6 100644
--- a/chromium/gpu/config/gpu_crash_keys.h
+++ b/chromium/gpu/config/gpu_crash_keys.h
@@ -39,6 +39,8 @@ extern GPU_EXPORT crash_reporter::CrashKeyString<4>
gpu_watchdog_kill_after_power_resume;
extern GPU_EXPORT crash_reporter::CrashKeyString<4>
gpu_watchdog_crashed_in_gpu_init;
+extern GPU_EXPORT crash_reporter::CrashKeyString<16> num_of_processors;
+
} // namespace crash_keys
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_driver_bug_list.json b/chromium/gpu/config/gpu_driver_bug_list.json
index a352cd89238..7fee20a6677 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.json
+++ b/chromium/gpu/config/gpu_driver_bug_list.json
@@ -682,8 +682,8 @@
},
"vendor_id": "0x8086",
"driver_version": {
- "op": "<=",
- "value": "9.18.0.0"
+ "op": "<",
+ "value": "0.0.0.3000"
},
"features": [
"disable_d3d11"
@@ -1851,8 +1851,8 @@
"value": "10.13.2"
}
},
- "gpu_series" : [
- "intel_skylake"
+ "intel_gpu_series" : [
+ "skylake"
],
"features" : [
"emulate_isnan_on_float"
@@ -2283,10 +2283,10 @@
"os": {
"type": "win"
},
- "gpu_series": [
- "intel_broadwell",
- "intel_skylake",
- "intel_cherrytrail"
+ "intel_gpu_series": [
+ "broadwell",
+ "skylake",
+ "cherrytrail"
],
"features": [
"disable_accelerated_vpx_decode"
@@ -2518,10 +2518,10 @@
"os": {
"type": "macosx"
},
- "gpu_series": [
- "intel_skylake",
- "intel_kabylake",
- "intel_coffeelake"
+ "intel_gpu_series": [
+ "skylake",
+ "kabylake",
+ "coffeelake"
],
"multi_gpu_category": "any",
"features": [
@@ -2950,30 +2950,6 @@
]
},
{
- "id":281,
- "cr_bugs": [838725],
- "description": "Disable AImageReader on Imagination Technologies GPU as its buggy.",
- "os": {
- "type": "android"
- },
- "gl_vendor": "Imagination Technologies.*",
- "features": [
- "disable_aimagereader"
- ]
- },
- {
- "id":282,
- "cr_bugs": [838725],
- "description": "Disable AImageReader on NVIDIA GPU for as its buggy.",
- "os": {
- "type": "android"
- },
- "gl_vendor": "NVIDIA.*",
- "features": [
- "disable_aimagereader"
- ]
- },
- {
"id": 283,
"cr_bugs": [893177],
"description": "Some drivers can't recover after OUT_OF_MEM and context lost",
@@ -3026,8 +3002,8 @@
"type": "win"
},
"driver_version": {
- "op": ">=",
- "value": "24"
+ "op": ">",
+ "value": "0.0.100.0"
},
"hardware_overlay": "unsupported",
"features": [
@@ -3173,9 +3149,9 @@
"os": {
"type" : "win"
},
- "gpu_series": [
- "intel_sandybridge",
- "intel_ivybridge"
+ "intel_gpu_series": [
+ "sandybridge",
+ "ivybridge"
],
"features": [
"disable_direct_composition"
@@ -3208,7 +3184,7 @@
"vendor_id": "0x8086",
"driver_version": {
"op": "<",
- "value": "24.0.0.0"
+ "value": "0.0.0.9999"
},
"features": [
"disable_nv12_dynamic_textures"
@@ -3486,6 +3462,19 @@
]
},
{
+ "id": 326,
+ "description": "Migrating OpenGL contexts between low- and high-power GPUs is unreliable on Mac NVIDIA laptops",
+ "cr_bugs": [681341],
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x10de",
+ "multi_gpu_category": "any",
+ "features": [
+ "force_low_power_gpu"
+ ]
+ },
+ {
"id": 327,
"cr_bugs": [1027981],
"description": "Disable dual source blending support",
@@ -3500,6 +3489,120 @@
"features": [
"disable_dual_source_blending_support"
]
+ },
+ {
+ "id": 328,
+ "cr_bugs": [1041166],
+ "description": "Disable D3D11VideoDecoder due to crashes on NVIDIA",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x10de",
+ "features": [
+ "disable_d3d11_video_decoder"
+ ]
+ },
+ {
+ "id": 329,
+ "cr_bugs": [1012464],
+ "description": "Limit D3D11VideoDecoder to 11.0 due to crashes on AMD",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x1002",
+ "features": [
+ "limit_d3d11_video_decoder_to_11_0"
+ ]
+ },
+ {
+ "id": 331,
+ "cr_bugs": [1046462],
+ "description": "GL and Vulkan interop doesn't work with AMD GPU properly",
+ "os": {
+ "type": "linux"
+ },
+ "driver_vendor": "Mesa",
+ "driver_version": {
+ "op": "<",
+ "value": "19.3.5"
+ },
+ "vendor_id": "0x1002",
+ "disabled_extensions": [
+ "GL_EXT_memory_object_fd",
+ "GL_EXT_semaphore_fd"
+ ]
+ },
+ {
+ "id": 332,
+ "cr_bugs": [1045586],
+ "description": "Send empty video hdr metadata due to crashes on AMD",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x1002",
+ "features": [
+ "use_empty_video_hdr_metadata"
+ ]
+ },
+ {
+ "id": 333,
+ "cr_bugs": [1042615],
+ "description": "Disable Qcomm_tiled_rendering extension for webview",
+ "os": {
+ "type": "android"
+ },
+ "gl_vendor": "Qualcomm.*",
+ "features": [
+ "disable_qcomm_tiled_rendering"
+ ]
+ },
+ {
+ "id": 334,
+ "description": "NVidia drivers seem corrupt memory when all siblings of eglImage are destroyed",
+ "cr_bugs": [1052114],
+ "os": {
+ "type": "android"
+ },
+ "gl_vendor": "NVIDIA.*",
+ "features": [
+ "dont_delete_source_texture_for_egl_image"
+ ]
+ },
+ {
+ "id": 336,
+ "cr_bugs": [625785],
+ "description": "DXVA video decoder crashes on some AMD GPUs",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x1002",
+ "device_id": ["0x15d8", "0x15dd"],
+ "exceptions": [
+ {
+ "driver_version": {
+ "op": "between",
+ "value": "26.20.11030.2",
+ "value2": "26.20.11999.99999"
+ },
+ "driver_version": {
+ "op": "between",
+ "value": "26.20.12020.15",
+ "value2": "26.20.12999.99999"
+ },
+ "driver_version": {
+ "op": "between",
+ "value": "26.20.13001.27002",
+ "value2": "26.20.13999.99999"
+ },
+ "driver_version": {
+ "op": ">=",
+ "value": "26.20.14001.7001"
+ }
+ }
+ ],
+ "features": [
+ "disable_dxva_video_decoder"
+ ]
}
]
}
diff --git a/chromium/gpu/config/gpu_extra_info.h b/chromium/gpu/config/gpu_extra_info.h
index 39bce1eab21..24d45fdb127 100644
--- a/chromium/gpu/config/gpu_extra_info.h
+++ b/chromium/gpu/config/gpu_extra_info.h
@@ -9,6 +9,11 @@
#include <vector>
#include "gpu/gpu_export.h"
+#include "ui/gfx/buffer_types.h"
+
+#if defined(USE_X11)
+typedef unsigned long VisualID;
+#endif
namespace gpu {
@@ -52,6 +57,13 @@ struct GPU_EXPORT GpuExtraInfo {
// List of the currently available ANGLE features. May be empty if not
// applicable.
ANGLEFeatures angle_features;
+
+#if defined(USE_X11)
+ VisualID system_visual = 0;
+ VisualID rgba_visual = 0;
+
+ std::vector<gfx::BufferUsageAndFormat> gpu_memory_buffer_support_x11;
+#endif
};
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_finch_features.cc b/chromium/gpu/config/gpu_finch_features.cc
index 4fe158b82fb..c920171a041 100644
--- a/chromium/gpu/config/gpu_finch_features.cc
+++ b/chromium/gpu/config/gpu_finch_features.cc
@@ -11,33 +11,11 @@
#endif
namespace features {
-namespace {
#if defined(OS_ANDROID)
-bool FieldIsInBlacklist(const char* current_value, std::string blacklist_str) {
- std::vector<std::string> blacklist = base::SplitString(
- blacklist_str, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
- for (const std::string& value : blacklist) {
- if (value == current_value)
- return true;
- }
-
- return false;
-}
-#endif
-
-} // namespace
-
-#if defined(OS_ANDROID)
-// Use android AImageReader when playing videos with MediaPlayer.
-const base::Feature kAImageReaderMediaPlayer{"AImageReaderMediaPlayer",
- base::FEATURE_ENABLED_BY_DEFAULT};
-
-// Use android SurfaceControl API for managing display compositor's buffer queue
-// and using overlays on Android.
-// Note that the feature only works with VizDisplayCompositor enabled.
-const base::Feature kAndroidSurfaceControl{"AndroidSurfaceControl",
- base::FEATURE_ENABLED_BY_DEFAULT};
+// Used only by webview to disable SurfaceControl.
+const base::Feature kDisableSurfaceControlForWebview{
+ "DisableSurfaceControlForWebview", base::FEATURE_DISABLED_BY_DEFAULT};
#endif
// Enable GPU Rasterization by default. This can still be overridden by
@@ -55,7 +33,7 @@ const base::Feature kDefaultEnableGpuRasterization{
// Enable out of process rasterization by default. This can still be overridden
// by --enable-oop-rasterization or --disable-oop-rasterization.
-#if defined(OS_ANDROID)
+#if defined(OS_ANDROID) || defined(OS_CHROMEOS) || defined(OS_MACOSX)
const base::Feature kDefaultEnableOopRasterization{
"DefaultEnableOopRasterization", base::FEATURE_ENABLED_BY_DEFAULT};
#else
@@ -84,15 +62,17 @@ const base::Feature kGpuUseDisplayThreadPriority{
"GpuUseDisplayThreadPriority", base::FEATURE_DISABLED_BY_DEFAULT};
#endif
-// Allow GPU watchdog to keep waiting for ackowledgement if one is already
-// issued from the monitored thread.
-const base::Feature kGpuWatchdogNoTerminationAwaitingAcknowledge{
- "GpuWatchdogNoTerminationAwaitingAcknowledge",
- base::FEATURE_DISABLED_BY_DEFAULT};
-
// Gpu watchdog V2 to simplify the logic and reduce GPU hangs
const base::Feature kGpuWatchdogV2{"GpuWatchdogV2",
- base::FEATURE_DISABLED_BY_DEFAULT};
+ base::FEATURE_ENABLED_BY_DEFAULT};
+
+// Use a different set of watchdog timeouts on V1
+const base::Feature kGpuWatchdogV1NewTimeout{"GpuWatchdogV1NewTimeout",
+ base::FEATURE_ENABLED_BY_DEFAULT};
+
+// Use a different set of watchdog timeouts on V2
+const base::Feature kGpuWatchdogV2NewTimeout{"GpuWatchdogV2NewTimeout",
+ base::FEATURE_DISABLED_BY_DEFAULT};
#if defined(OS_MACOSX)
// Enable use of Metal for OOP rasterization.
@@ -125,32 +105,26 @@ const base::Feature kVaapiJpegImageDecodeAcceleration{
const base::Feature kVaapiWebPImageDecodeAcceleration{
"VaapiWebPImageDecodeAcceleration", base::FEATURE_DISABLED_BY_DEFAULT};
-// Enable Vulkan graphics backend if --use-vulkan flag is not used. Otherwise
+// Enable Vulkan graphics backend for compositing and rasterization. Defaults to
+// native implementation if --use-vulkan flag is not used. Otherwise
// --use-vulkan will be followed.
const base::Feature kVulkan{"Vulkan", base::FEATURE_DISABLED_BY_DEFAULT};
-#if defined(OS_ANDROID)
-bool IsAndroidSurfaceControlEnabled() {
- if (!gl::SurfaceControl::IsSupported())
- return false;
-
- if (!base::FeatureList::IsEnabled(kAndroidSurfaceControl))
- return false;
+// Enable SkiaRenderer Dawn graphics backend. On Windows this will use D3D12,
+// and on Linux this will use Vulkan.
+const base::Feature kSkiaDawn{"SkiaDawn", base::FEATURE_DISABLED_BY_DEFAULT};
- if (FieldIsInBlacklist(base::android::BuildInfo::GetInstance()->model(),
- base::GetFieldTrialParamValueByFeature(
- kAndroidSurfaceControl, "blacklisted_models"))) {
- return false;
- }
+// Used to enable shared image mailbox and disable legacy texture mailbox on
+// webview.
+const base::Feature kEnableSharedImageForWebview{
+ "EnableSharedImageForWebview", base::FEATURE_DISABLED_BY_DEFAULT};
- if (FieldIsInBlacklist(
- base::android::BuildInfo::GetInstance()->android_build_id(),
- base::GetFieldTrialParamValueByFeature(kAndroidSurfaceControl,
- "blacklisted_build_ids"))) {
+#if defined(OS_ANDROID)
+bool IsAndroidSurfaceControlEnabled() {
+ if (base::FeatureList::IsEnabled(kDisableSurfaceControlForWebview))
return false;
- }
- return true;
+ return gl::SurfaceControl::IsSupported();
}
#endif
diff --git a/chromium/gpu/config/gpu_finch_features.h b/chromium/gpu/config/gpu_finch_features.h
index ed12b079d11..afd2201d075 100644
--- a/chromium/gpu/config/gpu_finch_features.h
+++ b/chromium/gpu/config/gpu_finch_features.h
@@ -17,8 +17,7 @@ namespace features {
// All features in alphabetical order. The features should be documented
// alongside the definition of their values in the .cc file.
#if defined(OS_ANDROID)
-GPU_EXPORT extern const base::Feature kAImageReaderMediaPlayer;
-GPU_EXPORT extern const base::Feature kAndroidSurfaceControl;
+GPU_EXPORT extern const base::Feature kDisableSurfaceControlForWebview;
#endif // defined(OS_ANDROID)
GPU_EXPORT extern const base::Feature kDefaultEnableGpuRasterization;
@@ -33,11 +32,12 @@ GPU_EXPORT extern const base::Feature kGpuProcessHighPriorityWin;
GPU_EXPORT extern const base::Feature kGpuUseDisplayThreadPriority;
-GPU_EXPORT extern const base::Feature
- kGpuWatchdogNoTerminationAwaitingAcknowledge;
-
GPU_EXPORT extern const base::Feature kGpuWatchdogV2;
+GPU_EXPORT extern const base::Feature kGpuWatchdogV1NewTimeout;
+
+GPU_EXPORT extern const base::Feature kGpuWatchdogV2NewTimeout;
+
#if defined(OS_MACOSX)
GPU_EXPORT extern const base::Feature kMetal;
#endif
@@ -52,6 +52,10 @@ GPU_EXPORT extern const base::Feature kVaapiWebPImageDecodeAcceleration;
GPU_EXPORT extern const base::Feature kVulkan;
+GPU_EXPORT extern const base::Feature kSkiaDawn;
+
+GPU_EXPORT extern const base::Feature kEnableSharedImageForWebview;
+
#if defined(OS_ANDROID)
GPU_EXPORT bool IsAndroidSurfaceControlEnabled();
#endif
diff --git a/chromium/gpu/config/gpu_info.cc b/chromium/gpu/config/gpu_info.cc
index da699bb0632..f063cd44e1f 100644
--- a/chromium/gpu/config/gpu_info.cc
+++ b/chromium/gpu/config/gpu_info.cc
@@ -4,9 +4,15 @@
#include <stdint.h>
+#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_util.h"
+#if defined(OS_MACOSX)
+#include <GLES2/gl2.h>
+#include <GLES2/gl2extchromium.h>
+#endif // OS_MACOSX
+
namespace {
void EnumerateGPUDevice(const gpu::GPUInfo::GPUDevice& device,
@@ -115,6 +121,18 @@ void EnumerateDx12VulkanVersionInfo(const gpu::Dx12VulkanVersionInfo& info,
gpu::VulkanVersionToString(info.vulkan_version));
enumerator->EndDx12VulkanVersionInfo();
}
+
+void EnumerateOverlayInfo(const gpu::OverlayInfo& info,
+ gpu::GPUInfo::Enumerator* enumerator) {
+ enumerator->BeginOverlayInfo();
+ enumerator->AddBool("directComposition", info.direct_composition);
+ enumerator->AddBool("supportsOverlays", info.supports_overlays);
+ enumerator->AddString("yuy2OverlaySupport",
+ gpu::OverlaySupportToString(info.yuy2_overlay_support));
+ enumerator->AddString("nv12OverlaySupport",
+ gpu::OverlaySupportToString(info.nv12_overlay_support));
+ enumerator->EndOverlayInfo();
+}
#endif
} // namespace
@@ -134,6 +152,19 @@ const char* OverlaySupportToString(gpu::OverlaySupport support) {
}
#endif // OS_WIN
+#if defined(OS_MACOSX)
+GPU_EXPORT bool ValidateMacOSSpecificTextureTarget(int target) {
+ switch (target) {
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_RECTANGLE_ARB:
+ return true;
+
+ default:
+ return false;
+ }
+}
+#endif // OS_MACOSX
+
VideoDecodeAcceleratorCapabilities::VideoDecodeAcceleratorCapabilities()
: flags(0) {}
@@ -183,11 +214,10 @@ GPUInfo::GPUInfo()
sandboxed(false),
in_process_gpu(true),
passthrough_cmd_decoder(false),
+#if defined(OS_MACOSX)
+ macos_specific_texture_target(gpu::GetPlatformSpecificTextureTarget()),
+#endif // OS_MACOSX
jpeg_decode_accelerator_supported(false),
-#if defined(USE_X11)
- system_visual(0),
- rgba_visual(0),
-#endif
oop_rasterization_supported(false),
subpixel_font_rendering(true) {
}
@@ -242,13 +272,13 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
bool in_process_gpu;
bool passthrough_cmd_decoder;
bool can_support_threaded_texture_mailbox;
+#if defined(OS_MACOSX)
+ uint32_t macos_specific_texture_target;
+#endif // OS_MACOSX
#if defined(OS_WIN)
- bool direct_composition;
- bool supports_overlays;
- OverlaySupport yuy2_overlay_support;
- OverlaySupport nv12_overlay_support;
DxDiagNode dx_diagnostics;
Dx12VulkanVersionInfo dx12_vulkan_version_info;
+ OverlayInfo overlay_info;
#endif
VideoDecodeAcceleratorCapabilities video_decode_accelerator_capabilities;
@@ -259,11 +289,6 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
ImageDecodeAcceleratorSupportedProfiles
image_decode_accelerator_supported_profiles;
-#if defined(USE_X11)
- VisualID system_visual;
- VisualID rgba_visual;
-#endif
-
bool oop_rasterization_supported;
bool subpixel_font_rendering;
@@ -311,14 +336,13 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
enumerator->AddBool("passthroughCmdDecoder", passthrough_cmd_decoder);
enumerator->AddBool("canSupportThreadedTextureMailbox",
can_support_threaded_texture_mailbox);
+#if defined(OS_MACOSX)
+ enumerator->AddInt("macOSSpecificTextureTarget",
+ macos_specific_texture_target);
+#endif // OS_MACOSX
// TODO(kbr): add dx_diagnostics on Windows.
#if defined(OS_WIN)
- enumerator->AddBool("directComposition", direct_composition);
- enumerator->AddBool("supportsOverlays", supports_overlays);
- enumerator->AddString("yuy2OverlaySupport",
- OverlaySupportToString(yuy2_overlay_support));
- enumerator->AddString("nv12OverlaySupport",
- OverlaySupportToString(nv12_overlay_support));
+ EnumerateOverlayInfo(overlay_info, enumerator);
EnumerateDx12VulkanVersionInfo(dx12_vulkan_version_info, enumerator);
#endif
enumerator->AddInt("videoDecodeAcceleratorFlags",
@@ -334,10 +358,6 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
jpeg_decode_accelerator_supported);
for (const auto& profile : image_decode_accelerator_supported_profiles)
EnumerateImageDecodeAcceleratorSupportedProfile(profile, enumerator);
-#if defined(USE_X11)
- enumerator->AddInt64("systemVisual", system_visual);
- enumerator->AddInt64("rgbaVisual", rgba_visual);
-#endif
enumerator->AddBool("oopRasterizationSupported", oop_rasterization_supported);
enumerator->AddBool("subpixelFontRendering", subpixel_font_rendering);
#if BUILDFLAG(ENABLE_VULKAN)
diff --git a/chromium/gpu/config/gpu_info.h b/chromium/gpu/config/gpu_info.h
index cce6a59730c..50cec940d73 100644
--- a/chromium/gpu/config/gpu_info.h
+++ b/chromium/gpu/config/gpu_info.h
@@ -23,10 +23,6 @@
#include "gpu/vulkan/buildflags.h"
#include "ui/gfx/geometry/size.h"
-#if defined(USE_X11)
-typedef unsigned long VisualID;
-#endif
-
#if BUILDFLAG(ENABLE_VULKAN)
#include "gpu/config/vulkan_info.h"
#endif
@@ -35,31 +31,31 @@ namespace gpu {
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
-enum class GpuSeriesType {
+enum class IntelGpuSeriesType {
kUnknown = 0,
// Intel 6th gen
- kIntelSandybridge = 1,
+ kSandybridge = 1,
// Intel 7th gen
- kIntelBaytrail = 2,
- kIntelIvybridge = 3,
- kIntelHaswell = 4,
+ kBaytrail = 2,
+ kIvybridge = 3,
+ kHaswell = 4,
// Intel 8th gen
- kIntelCherrytrail = 5,
- kIntelBroadwell = 6,
+ kCherrytrail = 5,
+ kBroadwell = 6,
// Intel 9th gen
- kIntelApollolake = 7,
- kIntelSkylake = 8,
- kIntelGeminilake = 9,
- kIntelKabylake = 10,
- kIntelCoffeelake = 11,
- kIntelWhiskeylake = 12,
- kIntelCometlake = 13,
+ kApollolake = 7,
+ kSkylake = 8,
+ kGeminilake = 9,
+ kKabylake = 10,
+ kCoffeelake = 11,
+ kWhiskeylake = 12,
+ kCometlake = 13,
// Intel 10th gen
- kIntelCannonlake = 14,
+ kCannonlake = 14,
// Intel 11th gen
- kIntelIcelake = 15,
+ kIcelake = 15,
// Please also update |gpu_series_map| in process_json.py.
- kMaxValue = kIntelIcelake,
+ kMaxValue = kIcelake,
};
// Video profile. This *must* match media::VideoCodecProfile.
@@ -190,8 +186,32 @@ struct GPU_EXPORT Dx12VulkanVersionInfo {
// The support Vulkan API version in the gpu driver;
uint32_t vulkan_version = 0;
};
+
+struct GPU_EXPORT OverlayInfo {
+ OverlayInfo& operator=(const OverlayInfo& other) = default;
+ bool operator==(const OverlayInfo& other) const {
+ return direct_composition == other.direct_composition &&
+ supports_overlays == other.supports_overlays &&
+ yuy2_overlay_support == other.yuy2_overlay_support &&
+ nv12_overlay_support == other.nv12_overlay_support;
+ }
+ bool operator!=(const OverlayInfo& other) const { return !(*this == other); }
+
+ // True if we use direct composition surface on Windows.
+ bool direct_composition = false;
+
+ // True if we use direct composition surface overlays on Windows.
+ bool supports_overlays = false;
+ OverlaySupport yuy2_overlay_support = OverlaySupport::kNone;
+ OverlaySupport nv12_overlay_support = OverlaySupport::kNone;
+};
+
#endif
+#if defined(OS_MACOSX)
+GPU_EXPORT bool ValidateMacOSSpecificTextureTarget(int target);
+#endif // OS_MACOSX
+
struct GPU_EXPORT GPUInfo {
struct GPU_EXPORT GPUDevice {
GPUDevice();
@@ -332,19 +352,20 @@ struct GPU_EXPORT GPUInfo {
// is only implemented on Android.
bool can_support_threaded_texture_mailbox = false;
-#if defined(OS_WIN)
- // True if we use direct composition surface on Windows.
- bool direct_composition = false;
-
- // True if we use direct composition surface overlays on Windows.
- bool supports_overlays = false;
- OverlaySupport yuy2_overlay_support = OverlaySupport::kNone;
- OverlaySupport nv12_overlay_support = OverlaySupport::kNone;
+#if defined(OS_MACOSX)
+ // Enum describing which texture target is used for native GpuMemoryBuffers on
+ // MacOS. Valid values are GL_TEXTURE_2D and GL_TEXTURE_RECTANGLE_ARB.
+ uint32_t macos_specific_texture_target;
+#endif // OS_MACOSX
+#if defined(OS_WIN)
// The information returned by the DirectX Diagnostics Tool.
DxDiagNode dx_diagnostics;
Dx12VulkanVersionInfo dx12_vulkan_version_info;
+
+ // The GPU hardware overlay info.
+ OverlayInfo overlay_info;
#endif
VideoDecodeAcceleratorCapabilities video_decode_accelerator_capabilities;
@@ -355,11 +376,6 @@ struct GPU_EXPORT GPUInfo {
ImageDecodeAcceleratorSupportedProfiles
image_decode_accelerator_supported_profiles;
-#if defined(USE_X11)
- VisualID system_visual;
- VisualID rgba_visual;
-#endif
-
bool oop_rasterization_supported;
bool subpixel_font_rendering;
@@ -417,6 +433,9 @@ struct GPU_EXPORT GPUInfo {
virtual void BeginDx12VulkanVersionInfo() = 0;
virtual void EndDx12VulkanVersionInfo() = 0;
+ virtual void BeginOverlayInfo() = 0;
+ virtual void EndOverlayInfo() = 0;
+
protected:
virtual ~Enumerator() = default;
};
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index cdc50836c75..59d409b5257 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -35,6 +35,8 @@
#include "ui/gl/init/gl_factory.h"
#if defined(USE_X11)
+#include "ui/gfx/linux/gpu_memory_buffer_support_x11.h"
+#include "ui/gfx/switches.h"
#include "ui/gl/gl_visual_picker_glx.h"
#endif
@@ -260,18 +262,11 @@ bool CollectGraphicsInfoGL(GPUInfo* gpu_info) {
gfx::HasExtension(extension_set, "GL_KHR_robustness") ||
gfx::HasExtension(extension_set, "GL_ARB_robustness");
if (supports_robustness) {
- glGetIntegerv(GL_RESET_NOTIFICATION_STRATEGY_ARB,
+ glGetIntegerv(
+ GL_RESET_NOTIFICATION_STRATEGY_ARB,
reinterpret_cast<GLint*>(&gpu_info->gl_reset_notification_strategy));
}
-#if defined(USE_X11)
- if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL) {
- gl::GLVisualPickerGLX* visual_picker = gl::GLVisualPickerGLX::GetInstance();
- gpu_info->system_visual = visual_picker->system_visual().visualid;
- gpu_info->rgba_visual = visual_picker->rgba_visual().visualid;
- }
-#endif
-
// Unconditionally check oop raster status regardless of preferences
// so that finch trials can turn it on.
gpu_info->oop_rasterization_supported = SupportsOOPRaster(gl_info);
@@ -403,7 +398,8 @@ void CollectGraphicsInfoForTesting(GPUInfo* gpu_info) {
#endif // OS_ANDROID
}
-bool CollectGpuExtraInfo(GpuExtraInfo* gpu_extra_info) {
+bool CollectGpuExtraInfo(GpuExtraInfo* gpu_extra_info,
+ const GpuPreferences& prefs) {
// Populate the list of ANGLE features by querying the functions exposed by
// EGL_ANGLE_feature_control if it's available.
if (gl::GLSurfaceEGL::IsANGLEFeatureControlSupported()) {
@@ -428,6 +424,39 @@ bool CollectGpuExtraInfo(GpuExtraInfo* gpu_extra_info) {
}
}
+#if defined(USE_X11)
+ // Create the GLVisualPickerGLX singleton now while the GbmSupportX11
+ // singleton is busy being created on another thread.
+ gl::GLVisualPickerGLX* visual_picker;
+ if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL)
+ visual_picker = gl::GLVisualPickerGLX::GetInstance();
+
+ // TODO(https://crbug.com/1031269): Enable by default.
+ if (prefs.enable_native_gpu_memory_buffers) {
+ gpu_extra_info->gpu_memory_buffer_support_x11 =
+ ui::GpuMemoryBufferSupportX11::GetInstance()->supported_configs();
+ }
+
+ if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL) {
+ gpu_extra_info->system_visual = visual_picker->system_visual().visualid;
+ gpu_extra_info->rgba_visual = visual_picker->rgba_visual().visualid;
+
+ // With GLX, only BGR(A) buffer formats are supported. EGL does not have
+ // this restriction.
+ gpu_extra_info->gpu_memory_buffer_support_x11.erase(
+ std::remove_if(gpu_extra_info->gpu_memory_buffer_support_x11.begin(),
+ gpu_extra_info->gpu_memory_buffer_support_x11.end(),
+ [&](gfx::BufferUsageAndFormat usage_and_format) {
+ return !visual_picker->GetFbConfigForFormat(
+ usage_and_format.format);
+ }),
+ gpu_extra_info->gpu_memory_buffer_support_x11.end());
+ } else if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE) {
+ // ANGLE does not yet support EGL_EXT_image_dma_buf_import[_modifiers].
+ gpu_extra_info->gpu_memory_buffer_support_x11.clear();
+ }
+#endif
+
return true;
}
diff --git a/chromium/gpu/config/gpu_info_collector.h b/chromium/gpu/config/gpu_info_collector.h
index 8edb8801b41..b2423096815 100644
--- a/chromium/gpu/config/gpu_info_collector.h
+++ b/chromium/gpu/config/gpu_info_collector.h
@@ -10,8 +10,13 @@
#include "build/build_config.h"
#include "gpu/config/gpu_extra_info.h"
#include "gpu/config/gpu_info.h"
+#include "gpu/config/gpu_preferences.h"
#include "gpu/gpu_export.h"
+#if defined(OS_WIN)
+#include <d3dcommon.h>
+#endif // OS_WIN
+
namespace angle {
struct SystemInfo;
}
@@ -42,6 +47,18 @@ GPU_EXPORT bool CollectContextGraphicsInfo(GPUInfo* gpu_info);
GPU_EXPORT bool GetDxDiagnostics(DxDiagNode* output);
GPU_EXPORT void RecordGpuSupportedRuntimeVersionHistograms(
Dx12VulkanVersionInfo* dx12_vulkan_version_info);
+
+// Iterate through all adapters and create a hardware D3D11 device on each
+// adapter. If succeeded, query the highest feature level it supports and
+// weather it's a discrete GPU.
+// Set |d3d11_feature_level| to the highest from all adapters.
+// Set |is_discrete_gpu| to true if one of the adapters is discrete.
+// Return false if info collection fails.
+GPU_EXPORT bool CollectD3D11FeatureInfo(D3D_FEATURE_LEVEL* d3d11_feature_level,
+ bool* has_discrete_gpu);
+
+// Collect the hardware overlay support flags.
+GPU_EXPORT void CollectHardwareOverlayInfo(OverlayInfo* overlay_info);
#endif // OS_WIN
// Create a GL context and collect GL strings and versions.
@@ -61,7 +78,8 @@ void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
GPU_EXPORT void CollectGraphicsInfoForTesting(GPUInfo* gpu_info);
// Collect Graphics info related to the current process
-GPU_EXPORT bool CollectGpuExtraInfo(GpuExtraInfo* gpu_extra_info);
+GPU_EXPORT bool CollectGpuExtraInfo(GpuExtraInfo* gpu_extra_info,
+ const GpuPreferences& prefs);
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_collector_linux.cc b/chromium/gpu/config/gpu_info_collector_linux.cc
index 2ecc1922ce5..86b76f9778a 100644
--- a/chromium/gpu/config/gpu_info_collector_linux.cc
+++ b/chromium/gpu/config/gpu_info_collector_linux.cc
@@ -3,6 +3,8 @@
// found in the LICENSE file.
#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+#include "build/chromecast_buildflags.h"
#include "gpu/config/gpu_info_collector.h"
#include "third_party/angle/src/gpu_info_util/SystemInfo.h"
@@ -13,6 +15,13 @@ bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "gpu_info_collector::CollectGraphicsInfo");
+#if BUILDFLAG(IS_CHROMECAST)
+ // Chromecast doesn't go through CollectBasicGraphicsInfo() step, so
+ // this is the only place we need to set machine model.
+ // TODO(zmo): maybe distinguish different Chromecast products.
+ gpu_info->machine_model_name = "Chromecast";
+#endif // IS_CHROMECAST
+
return CollectGraphicsInfoGL(gpu_info);
}
diff --git a/chromium/gpu/config/gpu_info_collector_mac.mm b/chromium/gpu/config/gpu_info_collector_mac.mm
index 62b5c074de5..fc58409f497 100644
--- a/chromium/gpu/config/gpu_info_collector_mac.mm
+++ b/chromium/gpu/config/gpu_info_collector_mac.mm
@@ -5,6 +5,7 @@
#include "gpu/config/gpu_info_collector.h"
#include "base/trace_event/trace_event.h"
+#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "third_party/angle/src/gpu_info_util/SystemInfo.h"
namespace gpu {
@@ -13,6 +14,10 @@ bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
TRACE_EVENT0("gpu", "gpu_info_collector::CollectGraphicsInfo");
+
+ gpu_info->macos_specific_texture_target =
+ gpu::GetPlatformSpecificTextureTarget();
+
return CollectGraphicsInfoGL(gpu_info);
}
diff --git a/chromium/gpu/config/gpu_info_collector_win.cc b/chromium/gpu/config/gpu_info_collector_win.cc
index 34ef68895bb..6d5a302af48 100644
--- a/chromium/gpu/config/gpu_info_collector_win.cc
+++ b/chromium/gpu/config/gpu_info_collector_win.cc
@@ -14,6 +14,7 @@
#include <windows.h>
#include <d3d11.h>
+#include <d3d11_3.h>
#include <d3d12.h>
#include <dxgi.h>
#include <wrl/client.h>
@@ -33,6 +34,7 @@
#include "build/branding_buildflags.h"
#include "gpu/config/gpu_util.h"
#include "third_party/vulkan/include/vulkan/vulkan.h"
+#include "ui/gl/direct_composition_surface_win.h"
namespace gpu {
@@ -40,12 +42,15 @@ namespace {
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
-// This should match enum D3DFeatureLevel in \tools\metrics\histograms\enums.xml
+// This should match enum D3D12FeatureLevel in
+// \tools\metrics\histograms\enums.xml
enum class D3D12FeatureLevel {
kD3DFeatureLevelUnknown = 0,
kD3DFeatureLevel_12_0 = 1,
kD3DFeatureLevel_12_1 = 2,
- kMaxValue = kD3DFeatureLevel_12_1,
+ kD3DFeatureLevel_11_0 = 3,
+ kD3DFeatureLevel_11_1 = 4,
+ kMaxValue = kD3DFeatureLevel_11_1,
};
inline D3D12FeatureLevel ConvertToHistogramFeatureLevel(
@@ -57,12 +62,24 @@ inline D3D12FeatureLevel ConvertToHistogramFeatureLevel(
return D3D12FeatureLevel::kD3DFeatureLevel_12_0;
case D3D_FEATURE_LEVEL_12_1:
return D3D12FeatureLevel::kD3DFeatureLevel_12_1;
+ case D3D_FEATURE_LEVEL_11_0:
+ return D3D12FeatureLevel::kD3DFeatureLevel_11_0;
+ case D3D_FEATURE_LEVEL_11_1:
+ return D3D12FeatureLevel::kD3DFeatureLevel_11_1;
default:
NOTREACHED();
return D3D12FeatureLevel::kD3DFeatureLevelUnknown;
}
}
+OverlaySupport FlagsToOverlaySupport(UINT flags) {
+ if (flags & DXGI_OVERLAY_SUPPORT_FLAG_SCALING)
+ return OverlaySupport::kScaling;
+ if (flags & DXGI_OVERLAY_SUPPORT_FLAG_DIRECT)
+ return OverlaySupport::kDirect;
+ return OverlaySupport::kNone;
+}
+
} // namespace
#if BUILDFLAG(GOOGLE_CHROME_BRANDING) && defined(OFFICIAL_BUILD)
@@ -79,6 +96,26 @@ bool GetAMDSwitchableInfo(bool* is_switchable,
}
#endif
+// This has to be called after a context is created, active GPU is identified,
+// and GPU driver bug workarounds are computed again. Otherwise the workaround
+// |disable_direct_composition| may not be correctly applied.
+// Also, this has to be called after falling back to SwiftShader decision is
+// finalized because this function depends on GL is ANGLE's GLES or not.
+void CollectHardwareOverlayInfo(OverlayInfo* overlay_info) {
+ if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE) {
+ overlay_info->direct_composition =
+ gl::DirectCompositionSurfaceWin::IsDirectCompositionSupported();
+ overlay_info->supports_overlays =
+ gl::DirectCompositionSurfaceWin::AreOverlaysSupported();
+ overlay_info->nv12_overlay_support = FlagsToOverlaySupport(
+ gl::DirectCompositionSurfaceWin::GetOverlaySupportFlags(
+ DXGI_FORMAT_NV12));
+ overlay_info->yuy2_overlay_support = FlagsToOverlaySupport(
+ gl::DirectCompositionSurfaceWin::GetOverlaySupportFlags(
+ DXGI_FORMAT_YUY2));
+ }
+}
+
bool CollectDriverInfoD3D(GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "CollectDriverInfoD3D");
@@ -162,19 +199,19 @@ void GetGpuSupportedD3D12Version(Dx12VulkanVersionInfo* info) {
info->supports_dx12 = false;
info->d3d12_feature_level = 0;
- base::NativeLibrary d3d12_library =
- base::LoadNativeLibrary(base::FilePath(L"d3d12.dll"), nullptr);
- if (!d3d12_library) {
+ base::ScopedNativeLibrary d3d12_library(
+ base::FilePath(FILE_PATH_LITERAL("d3d12.dll")));
+ if (!d3d12_library.is_valid())
return;
- }
// The order of feature levels to attempt to create in D3D CreateDevice
- const D3D_FEATURE_LEVEL feature_levels[] = {D3D_FEATURE_LEVEL_12_1,
- D3D_FEATURE_LEVEL_12_0};
+ const D3D_FEATURE_LEVEL feature_levels[] = {
+ D3D_FEATURE_LEVEL_12_1, D3D_FEATURE_LEVEL_12_0, D3D_FEATURE_LEVEL_11_1,
+ D3D_FEATURE_LEVEL_11_0};
PFN_D3D12_CREATE_DEVICE D3D12CreateDevice =
reinterpret_cast<PFN_D3D12_CREATE_DEVICE>(
- GetProcAddress(d3d12_library, "D3D12CreateDevice"));
+ d3d12_library.GetFunctionPointer("D3D12CreateDevice"));
if (D3D12CreateDevice) {
// For the default adapter only. (*pAdapter == nullptr)
// Check to see if the adapter supports Direct3D 12, but don't create the
@@ -183,13 +220,11 @@ void GetGpuSupportedD3D12Version(Dx12VulkanVersionInfo* info) {
if (SUCCEEDED(D3D12CreateDevice(nullptr, level, _uuidof(ID3D12Device),
nullptr))) {
info->d3d12_feature_level = level;
- info->supports_dx12 = true;
+ info->supports_dx12 = (level >= D3D_FEATURE_LEVEL_12_0) ? true : false;
break;
}
}
}
-
- base::UnloadNativeLibrary(d3d12_library);
}
bool BadAMDVulkanDriverVersion() {
@@ -263,19 +298,21 @@ bool InitVulkan(base::NativeLibrary* vulkan_library,
return true;
}
}
- base::UnloadNativeLibrary(*vulkan_library);
+
+ // From the crash reports, unloading the library here might cause a crash in
+ // the Vulkan loader or in the Vulkan driver. To work around it, don't
+ // explicitly unload the DLL. Instead, GPU process shutdown will unload all
+ // loaded DLLs.
+ // base::UnloadNativeLibrary(*vulkan_library);
return false;
}
bool InitVulkanInstanceProc(
const VkInstance& vk_instance,
const PFN_vkGetInstanceProcAddr& vkGetInstanceProcAddr,
- PFN_vkDestroyInstance* vkDestroyInstance,
PFN_vkEnumeratePhysicalDevices* vkEnumeratePhysicalDevices,
PFN_vkEnumerateDeviceExtensionProperties*
vkEnumerateDeviceExtensionProperties) {
- *vkDestroyInstance = reinterpret_cast<PFN_vkDestroyInstance>(
- vkGetInstanceProcAddr(vk_instance, "vkDestroyInstance"));
*vkEnumeratePhysicalDevices =
reinterpret_cast<PFN_vkEnumeratePhysicalDevices>(
@@ -286,7 +323,7 @@ bool InitVulkanInstanceProc(
vkGetInstanceProcAddr(vk_instance,
"vkEnumerateDeviceExtensionProperties"));
- if ((*vkDestroyInstance) && (*vkEnumeratePhysicalDevices) &&
+ if ((*vkEnumeratePhysicalDevices) &&
(*vkEnumerateDeviceExtensionProperties)) {
return true;
}
@@ -304,7 +341,6 @@ void GetGpuSupportedVulkanVersionAndExtensions(
PFN_vkCreateInstance vkCreateInstance;
PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;
PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;
- PFN_vkDestroyInstance vkDestroyInstance;
VkInstance vk_instance = VK_NULL_HANDLE;
uint32_t physical_device_count = 0;
info->supports_vulkan = false;
@@ -334,12 +370,12 @@ void GetGpuSupportedVulkanVersionAndExtensions(
create_info.pApplicationInfo = &app_info;
// Get the Vulkan API version supported in the GPU driver
- for (int minor_version = 1; minor_version >= 0; --minor_version) {
+ for (int minor_version = 2; minor_version >= 0; --minor_version) {
app_info.apiVersion = VK_MAKE_VERSION(1, minor_version, 0);
VkResult result = vkCreateInstance(&create_info, nullptr, &vk_instance);
if (result == VK_SUCCESS && vk_instance &&
InitVulkanInstanceProc(vk_instance, vkGetInstanceProcAddr,
- &vkDestroyInstance, &vkEnumeratePhysicalDevices,
+ &vkEnumeratePhysicalDevices,
&vkEnumerateDeviceExtensionProperties)) {
result = vkEnumeratePhysicalDevices(vk_instance, &physical_device_count,
nullptr);
@@ -348,7 +384,8 @@ void GetGpuSupportedVulkanVersionAndExtensions(
info->vulkan_version = app_info.apiVersion;
break;
} else {
- vkDestroyInstance(vk_instance, nullptr);
+ // Skip destroy here. GPU process shutdown will unload all loaded DLLs.
+ // vkDestroyInstance(vk_instance, nullptr);
vk_instance = VK_NULL_HANDLE;
}
}
@@ -383,11 +420,14 @@ void GetGpuSupportedVulkanVersionAndExtensions(
}
}
- if (vk_instance) {
- vkDestroyInstance(vk_instance, nullptr);
- }
-
- base::UnloadNativeLibrary(vulkan_library);
+ // From the crash reports, calling the following two functions might cause a
+ // crash in the Vulkan loader or in the Vulkan driver. To work around it,
+ // don't explicitly unload the DLL. Instead, GPU process shutdown will unload
+ // all loaded DLLs.
+ // if (vk_instance) {
+ // vkDestroyInstance(vk_instance, nullptr);
+ // }
+ // base::UnloadNativeLibrary(vulkan_library);
}
void RecordGpuSupportedRuntimeVersionHistograms(Dx12VulkanVersionInfo* info) {
@@ -418,6 +458,70 @@ void RecordGpuSupportedRuntimeVersionHistograms(Dx12VulkanVersionInfo* info) {
}
}
+bool CollectD3D11FeatureInfo(D3D_FEATURE_LEVEL* d3d11_feature_level,
+ bool* has_discrete_gpu) {
+ Microsoft::WRL::ComPtr<IDXGIFactory> dxgi_factory;
+ if (FAILED(::CreateDXGIFactory(IID_PPV_ARGS(&dxgi_factory))))
+ return false;
+
+ base::ScopedNativeLibrary d3d11_library(
+ base::FilePath(FILE_PATH_LITERAL("d3d11.dll")));
+ if (!d3d11_library.is_valid())
+ return false;
+ PFN_D3D11_CREATE_DEVICE D3D11CreateDevice =
+ reinterpret_cast<PFN_D3D11_CREATE_DEVICE>(
+ d3d11_library.GetFunctionPointer("D3D11CreateDevice"));
+ if (!D3D11CreateDevice)
+ return false;
+
+ // The order of feature levels to attempt to create in D3D CreateDevice
+ const D3D_FEATURE_LEVEL kFeatureLevels[] = {
+ D3D_FEATURE_LEVEL_12_1, D3D_FEATURE_LEVEL_12_0, D3D_FEATURE_LEVEL_11_1,
+ D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_1, D3D_FEATURE_LEVEL_10_0,
+ D3D_FEATURE_LEVEL_9_3, D3D_FEATURE_LEVEL_9_2, D3D_FEATURE_LEVEL_9_1};
+
+ bool detected_discrete_gpu = false;
+ D3D_FEATURE_LEVEL max_level = D3D_FEATURE_LEVEL_1_0_CORE;
+ Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
+ for (UINT ii = 0; SUCCEEDED(dxgi_factory->EnumAdapters(ii, &dxgi_adapter));
+ ++ii) {
+ DXGI_ADAPTER_DESC desc;
+ if (SUCCEEDED(dxgi_adapter->GetDesc(&desc)) && desc.VendorId == 0x1414) {
+ // Bypass Microsoft software renderer.
+ continue;
+ }
+ Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device;
+ D3D_FEATURE_LEVEL returned_feature_level = D3D_FEATURE_LEVEL_1_0_CORE;
+ if (FAILED(D3D11CreateDevice(dxgi_adapter.Get(), D3D_DRIVER_TYPE_UNKNOWN,
+ /*Software=*/0,
+ /*Flags=*/0, kFeatureLevels,
+ _countof(kFeatureLevels), D3D11_SDK_VERSION,
+ &d3d11_device, &returned_feature_level,
+ /*ppImmediateContext=*/nullptr))) {
+ continue;
+ }
+ if (returned_feature_level > max_level)
+ max_level = returned_feature_level;
+ Microsoft::WRL::ComPtr<ID3D11Device3> d3d11_device_3;
+ if (FAILED(d3d11_device.As(&d3d11_device_3)))
+ continue;
+ D3D11_FEATURE_DATA_D3D11_OPTIONS2 data = {};
+ if (FAILED(d3d11_device_3->CheckFeatureSupport(D3D11_FEATURE_D3D11_OPTIONS2,
+ &data, sizeof(data)))) {
+ continue;
+ }
+ if (!data.UnifiedMemoryArchitecture)
+ detected_discrete_gpu = true;
+ }
+
+ if (max_level > D3D_FEATURE_LEVEL_1_0_CORE) {
+ *d3d11_feature_level = max_level;
+ *has_discrete_gpu = detected_discrete_gpu;
+ return true;
+ }
+ return false;
+}
+
bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "CollectGraphicsInfo");
diff --git a/chromium/gpu/config/gpu_info_unittest.cc b/chromium/gpu/config/gpu_info_unittest.cc
index f849760247a..13bb5d38eff 100644
--- a/chromium/gpu/config/gpu_info_unittest.cc
+++ b/chromium/gpu/config/gpu_info_unittest.cc
@@ -17,6 +17,7 @@ class TestGPUInfoEnumerator : public gpu::GPUInfo::Enumerator {
video_encode_accelerator_profile_active_(false),
image_decode_accelerator_profile_active_(false),
dx12_vulkan_version_info_active_(false),
+ overlay_info_active_(false),
aux_attributes_active_(false) {}
void AddInt64(const char* name, int64_t value) override {}
@@ -70,6 +71,10 @@ class TestGPUInfoEnumerator : public gpu::GPUInfo::Enumerator {
dx12_vulkan_version_info_active_ = false;
}
+ void BeginOverlayInfo() override { overlay_info_active_ = true; }
+
+ void EndOverlayInfo() override { overlay_info_active_ = false; }
+
void BeginAuxAttributes() override { aux_attributes_active_ = true; }
void EndAuxAttributes() override { aux_attributes_active_ = false; }
@@ -101,6 +106,7 @@ class TestGPUInfoEnumerator : public gpu::GPUInfo::Enumerator {
bool video_encode_accelerator_profile_active_;
bool image_decode_accelerator_profile_active_;
bool dx12_vulkan_version_info_active_;
+ bool overlay_info_active_;
bool aux_attributes_active_;
};
} // namespace
diff --git a/chromium/gpu/config/gpu_lists_version.h b/chromium/gpu/config/gpu_lists_version.h
index 410068f80c7..fc4c21b817c 100644
--- a/chromium/gpu/config/gpu_lists_version.h
+++ b/chromium/gpu/config/gpu_lists_version.h
@@ -3,6 +3,6 @@
#ifndef GPU_CONFIG_GPU_LISTS_VERSION_H_
#define GPU_CONFIG_GPU_LISTS_VERSION_H_
-#define GPU_LISTS_VERSION "e7fbe071abe9328cdce4ffedac9822435fbd3656"
+#define GPU_LISTS_VERSION "5277168d1691ce40fc04fb4adefd89d8eb39c81e"
#endif // GPU_CONFIG_GPU_LISTS_VERSION_H_
diff --git a/chromium/gpu/config/gpu_mode.h b/chromium/gpu/config/gpu_mode.h
index 368be7ad252..a14a9cb49ee 100644
--- a/chromium/gpu/config/gpu_mode.h
+++ b/chromium/gpu/config/gpu_mode.h
@@ -10,13 +10,18 @@ namespace gpu {
// What the GPU process is running for.
enum class GpuMode {
UNKNOWN,
- // The GPU process is running with hardare acceleration.
- HARDWARE_ACCELERATED,
+ // The GPU process is running with hardware acceleration, using only GL.
+ HARDWARE_GL,
+ // The GPU process is running with hardware acceleration, using Metal and GL.
+ HARDWARE_METAL,
+ // The GPU process is running with hardware acceleration, using Vulkan and GL.
+ HARDWARE_VULKAN,
// The GPU process is running for SwiftShader WebGL.
SWIFTSHADER,
- // The GPU process is running for the display compositor (OOP-D only).
+ // The GPU process is running for the display compositor.
DISPLAY_COMPOSITOR,
- // The GPU process is disabled and won't start (not OOP-D only).
+ // The GPU process is disabled and won't start. This is only used on Windows
+ // when GPU acceleration and SwiftShader are both disabled.
DISABLED,
};
diff --git a/chromium/gpu/config/gpu_preferences.h b/chromium/gpu/config/gpu_preferences.h
index b3bfe5256b7..b14f8e42186 100644
--- a/chromium/gpu/config/gpu_preferences.h
+++ b/chromium/gpu/config/gpu_preferences.h
@@ -135,8 +135,14 @@ struct GPU_EXPORT GpuPreferences {
// Enforce GL minimums.
bool enforce_gl_minimums = false;
- // Sets the total amount of memory that may be allocated for GPU resources
- uint32_t force_gpu_mem_available = 0;
+ // Sets the total amount of memory that may be allocated for GPU resources.
+ uint32_t force_gpu_mem_available_bytes = 0u;
+
+ // Sets the maximum discardable cache size limit for GPU resources.
+ uint32_t force_gpu_mem_discardable_limit_bytes = 0u;
+
+ // Sets maximum texture size.
+ uint32_t force_max_texture_size = 0u;
// Sets the maximum size of the in-memory gpu program cache, in kb
uint32_t gpu_program_cache_size = kDefaultMaxProgramCacheMemoryBytes;
@@ -233,11 +239,27 @@ struct GPU_EXPORT GpuPreferences {
// Enable measuring blocked time on GPU Main thread
bool enable_gpu_blocked_time_metric = false;
+ // Enable collecting perf data for device categorization purpose. Currently
+ // only enabled on Windows platform for the info collection GPU process.
+ bool enable_perf_data_collection = false;
+
#if defined(USE_OZONE)
// Determines message pump type for the GPU thread.
base::MessagePumpType message_pump_type = base::MessagePumpType::DEFAULT;
#endif
+ // ===================================
+ // Settings from //ui/gfx/switches.h
+
+ // Enable native CPU-mappable GPU memory buffer support on Linux.
+ bool enable_native_gpu_memory_buffers = false;
+
+ // ===================================
+ // Settings from //media/base/media_switches.h
+
+ // Force to disable new VideoDecoder.
+ bool force_disable_new_accelerated_video_decoder = false;
+
// Please update gpu_preferences_unittest.cc when making additions or
// changes to this struct.
};
diff --git a/chromium/gpu/config/gpu_preferences_unittest.cc b/chromium/gpu/config/gpu_preferences_unittest.cc
index c24a7205d5b..90a6b72f5bb 100644
--- a/chromium/gpu/config/gpu_preferences_unittest.cc
+++ b/chromium/gpu/config/gpu_preferences_unittest.cc
@@ -46,7 +46,10 @@ void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
right.enable_gpu_driver_debug_logging);
EXPECT_EQ(left.disable_gpu_program_cache, right.disable_gpu_program_cache);
EXPECT_EQ(left.enforce_gl_minimums, right.enforce_gl_minimums);
- EXPECT_EQ(left.force_gpu_mem_available, right.force_gpu_mem_available);
+ EXPECT_EQ(left.force_gpu_mem_available_bytes,
+ right.force_gpu_mem_available_bytes);
+ EXPECT_EQ(left.force_gpu_mem_discardable_limit_bytes,
+ right.force_gpu_mem_discardable_limit_bytes);
EXPECT_EQ(left.gpu_program_cache_size, right.gpu_program_cache_size);
EXPECT_EQ(left.disable_gpu_shader_disk_cache,
right.disable_gpu_shader_disk_cache);
@@ -74,9 +77,15 @@ void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
EXPECT_EQ(left.enable_webgpu, right.enable_webgpu);
EXPECT_EQ(left.enable_gpu_blocked_time_metric,
right.enable_gpu_blocked_time_metric);
+ EXPECT_EQ(left.enable_perf_data_collection,
+ right.enable_perf_data_collection);
#if defined(USE_OZONE)
EXPECT_EQ(left.message_pump_type, right.message_pump_type);
#endif
+ EXPECT_EQ(left.enable_native_gpu_memory_buffers,
+ right.enable_native_gpu_memory_buffers);
+ EXPECT_EQ(left.force_disable_new_accelerated_video_decoder,
+ right.force_disable_new_accelerated_video_decoder);
}
} // namespace
@@ -138,7 +147,8 @@ TEST(GpuPreferencesTest, EncodeDecode) {
GPU_PREFERENCES_FIELD(enable_gpu_driver_debug_logging, true)
GPU_PREFERENCES_FIELD(disable_gpu_program_cache, true)
GPU_PREFERENCES_FIELD(enforce_gl_minimums, true)
- GPU_PREFERENCES_FIELD(force_gpu_mem_available, 4096)
+ GPU_PREFERENCES_FIELD(force_gpu_mem_available_bytes, 4096)
+ GPU_PREFERENCES_FIELD(force_gpu_mem_discardable_limit_bytes, 8092)
GPU_PREFERENCES_FIELD(gpu_program_cache_size,
kDefaultMaxProgramCacheMemoryBytes - 1)
GPU_PREFERENCES_FIELD(disable_gpu_shader_disk_cache, true)
@@ -162,10 +172,13 @@ TEST(GpuPreferencesTest, EncodeDecode) {
GPU_PREFERENCES_FIELD(enable_gpu_benchmarking_extension, true)
GPU_PREFERENCES_FIELD(enable_webgpu, true)
GPU_PREFERENCES_FIELD(enable_gpu_blocked_time_metric, true)
+ GPU_PREFERENCES_FIELD(enable_perf_data_collection, true)
#if defined(USE_OZONE)
GPU_PREFERENCES_FIELD_ENUM(message_pump_type, base::MessagePumpType::UI,
base::MessagePumpType::UI)
#endif
+ GPU_PREFERENCES_FIELD(enable_native_gpu_memory_buffers, true);
+ GPU_PREFERENCES_FIELD(force_disable_new_accelerated_video_decoder, true);
input_prefs.texture_target_exception_list.emplace_back(
gfx::BufferUsage::SCANOUT, gfx::BufferFormat::RGBA_8888);
diff --git a/chromium/gpu/config/gpu_switches.cc b/chromium/gpu/config/gpu_switches.cc
index a66259c4727..26a018cbd00 100644
--- a/chromium/gpu/config/gpu_switches.cc
+++ b/chromium/gpu/config/gpu_switches.cc
@@ -7,12 +7,11 @@
namespace switches {
// Disable GPU rasterization, i.e. rasterize on the CPU only.
-// Overrides the kEnableGpuRasterization and kForceGpuRasterization flags.
+// Overrides the kEnableGpuRasterization flag.
const char kDisableGpuRasterization[] = "disable-gpu-rasterization";
// Allow heuristics to determine when a layer tile should be drawn with the
-// Skia GPU backend. Only valid with GPU accelerated compositing +
-// impl-side painting.
+// Skia GPU backend. Only valid with GPU accelerated compositing.
const char kEnableGpuRasterization[] = "enable-gpu-rasterization";
// Select a different set of GPU blacklist entries with the specificed
diff --git a/chromium/gpu/config/gpu_test_config.cc b/chromium/gpu/config/gpu_test_config.cc
index 776cd489a91..50714b8675f 100644
--- a/chromium/gpu/config/gpu_test_config.cc
+++ b/chromium/gpu/config/gpu_test_config.cc
@@ -43,6 +43,7 @@ GPUTestConfig::OS GetCurrentOS() {
return GPUTestConfig::kOsWin8;
if (major_version == 10)
return GPUTestConfig::kOsWin10;
+ return GPUTestConfig::kOsUnknown;
#elif defined(OS_MACOSX)
int32_t major_version = 0;
int32_t minor_version = 0;
@@ -75,12 +76,14 @@ GPUTestConfig::OS GetCurrentOS() {
return GPUTestConfig::kOsMacCatalina;
}
}
+ return GPUTestConfig::kOsUnknown;
#elif defined(OS_ANDROID)
return GPUTestConfig::kOsAndroid;
#elif defined(OS_FUCHSIA)
return GPUTestConfig::kOsFuchsia;
+#else
+#error "unknown os"
#endif
- return GPUTestConfig::kOsUnknown;
}
} // namespace anonymous
diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc
index 05f01258f29..e5bfb157700 100644
--- a/chromium/gpu/config/gpu_util.cc
+++ b/chromium/gpu/config/gpu_util.cc
@@ -4,16 +4,28 @@
#include "gpu/config/gpu_util.h"
+#if defined(OS_WIN)
+#include <windows.h>
+// Must be included after windows.h.
+#include <psapi.h>
+#endif // OS_WIN
+
#include <memory>
#include <set>
#include <string>
+#include <thread>
#include <vector>
+#include "base/base_paths.h"
#include "base/command_line.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/path_service.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
+#include "base/system/sys_info.h"
#include "build/build_config.h"
+#include "gpu/config/device_perf_info.h"
#include "gpu/config/gpu_blocklist.h"
#include "gpu/config/gpu_crash_keys.h"
#include "gpu/config/gpu_driver_bug_list.h"
@@ -34,6 +46,7 @@
#include "base/no_destructor.h"
#include "base/synchronization/lock.h"
#include "ui/gl/android/android_surface_control_compat.h"
+#include "ui/gl/gl_surface_egl.h"
#include "ui/gl/init/gl_factory.h"
#endif // OS_ANDROID
@@ -41,18 +54,71 @@ namespace gpu {
namespace {
+#if defined(OS_WIN)
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
+// This should match enum D3D11FeatureLevel in
+// \tools\metrics\histograms\enums.xml
+enum class D3D11FeatureLevel {
+ kUnknown = 0,
+ k9_1 = 4,
+ k9_2 = 5,
+ k9_3 = 6,
+ k10_0 = 7,
+ k10_1 = 8,
+ k11_0 = 9,
+ k11_1 = 10,
+ k12_0 = 11,
+ k12_1 = 12,
+ kMaxValue = k12_1,
+};
+
+inline D3D11FeatureLevel ConvertToHistogramD3D11FeatureLevel(
+ D3D_FEATURE_LEVEL d3d11_feature_level) {
+ switch (d3d11_feature_level) {
+ case D3D_FEATURE_LEVEL_1_0_CORE:
+ return D3D11FeatureLevel::kUnknown;
+ case D3D_FEATURE_LEVEL_9_1:
+ return D3D11FeatureLevel::k9_1;
+ case D3D_FEATURE_LEVEL_9_2:
+ return D3D11FeatureLevel::k9_2;
+ case D3D_FEATURE_LEVEL_9_3:
+ return D3D11FeatureLevel::k9_3;
+ case D3D_FEATURE_LEVEL_10_0:
+ return D3D11FeatureLevel::k10_0;
+ case D3D_FEATURE_LEVEL_10_1:
+ return D3D11FeatureLevel::k10_1;
+ case D3D_FEATURE_LEVEL_11_0:
+ return D3D11FeatureLevel::k11_0;
+ case D3D_FEATURE_LEVEL_11_1:
+ return D3D11FeatureLevel::k11_1;
+ case D3D_FEATURE_LEVEL_12_0:
+ return D3D11FeatureLevel::k12_0;
+ case D3D_FEATURE_LEVEL_12_1:
+ return D3D11FeatureLevel::k12_1;
+ default:
+ NOTREACHED();
+ return D3D11FeatureLevel::kUnknown;
+ }
+}
+#endif // OS_WIN
+
GpuFeatureStatus GetAndroidSurfaceControlFeatureStatus(
const std::set<int>& blacklisted_features,
const GpuPreferences& gpu_preferences) {
#if !defined(OS_ANDROID)
return kGpuFeatureStatusDisabled;
#else
- if (blacklisted_features.count(GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL))
- return kGpuFeatureStatusBlacklisted;
-
if (!gpu_preferences.enable_android_surface_control)
return kGpuFeatureStatusDisabled;
+ // SurfaceControl as used by Chrome requires using GpuFence for
+ // synchronization, this is based on Android native fence sync
+ // support. If that is unavailable, i.e. on emulator or SwiftShader,
+ // don't claim SurfaceControl support.
+ if (!gl::GLSurfaceEGL::IsAndroidNativeFenceSyncSupported())
+ return kGpuFeatureStatusDisabled;
+
DCHECK(gl::SurfaceControl::IsSupported());
return kGpuFeatureStatusEnabled;
#endif
@@ -291,13 +357,57 @@ void AdjustGpuFeatureStatusToWorkarounds(GpuFeatureInfo* gpu_feature_info) {
gpu_feature_info->status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL2] =
kGpuFeatureStatusBlacklisted;
}
+}
- if (gpu_feature_info->IsWorkaroundEnabled(DISABLE_AIMAGEREADER)) {
- gpu_feature_info->status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] =
- kGpuFeatureStatusBlacklisted;
+// Estimates roughly user total disk space by counting in the drives where
+// the exe is, where the temporary space is, where the user home is.
+// If total space and free space are of the same size, they are considered
+// the same drive. There could be corner cases this estimation is far from
+// the actual total disk space, but for histogram purpose, limited numbers
+// of outliers do not matter.
+uint32_t EstimateAmountOfTotalDiskSpaceMB() {
+ const base::BasePathKey kPathKeys[] = {base::DIR_EXE, base::DIR_TEMP,
+ base::DIR_HOME};
+ std::vector<uint32_t> total_space_vector, free_space_vector;
+ uint32_t sum = 0;
+ for (const auto& path_key : kPathKeys) {
+ base::FilePath path;
+ if (base::PathService::Get(path_key, &path)) {
+ uint32_t total_space = static_cast<uint32_t>(
+ base::SysInfo::AmountOfTotalDiskSpace(path) / 1024 / 1024);
+ uint32_t free_space = static_cast<uint32_t>(
+ base::SysInfo::AmountOfFreeDiskSpace(path) / 1024 / 1024);
+ bool duplicated = false;
+ for (size_t ii = 0; ii < total_space_vector.size(); ++ii) {
+ if (total_space == total_space_vector[ii] &&
+ free_space == free_space_vector[ii]) {
+ duplicated = true;
+ break;
+ }
+ }
+ if (!duplicated) {
+ total_space_vector.push_back(total_space);
+ free_space_vector.push_back(free_space);
+ sum += total_space;
+ }
+ }
}
+ return sum;
}
+#if defined(OS_WIN)
+uint32_t GetSystemCommitLimitMb() {
+ PERFORMANCE_INFORMATION perf_info = {sizeof(perf_info)};
+ if (::GetPerformanceInfo(&perf_info, sizeof(perf_info))) {
+ uint64_t limit = perf_info.CommitLimit;
+ limit *= perf_info.PageSize;
+ limit /= 1024 * 1024;
+ return static_cast<uint32_t>(limit);
+ }
+ return 0u;
+}
+#endif // OS_WIN
+
GPUInfo* g_gpu_info_cache = nullptr;
GpuFeatureInfo* g_gpu_feature_info_cache = nullptr;
@@ -681,7 +791,8 @@ bool EnableSwiftShaderIfNeeded(base::CommandLine* command_line,
#endif
}
-GpuSeriesType GetGpuSeriesType(uint32_t vendor_id, uint32_t device_id) {
+IntelGpuSeriesType GetIntelGpuSeriesType(uint32_t vendor_id,
+ uint32_t device_id) {
// Note that this function's output should only depend on vendor_id and
// device_id of a GPU. This is because we record a histogram on the output
// and we don't want to expose an extra bit other than the already recorded
@@ -699,87 +810,87 @@ GpuSeriesType GetGpuSeriesType(uint32_t vendor_id, uint32_t device_id) {
case 0x0100:
case 0x0110:
case 0x0120:
- return GpuSeriesType::kIntelSandybridge;
+ return IntelGpuSeriesType::kSandybridge;
case 0x0150:
if (device_id == 0x0155 || device_id == 0x0157)
- return GpuSeriesType::kIntelBaytrail;
+ return IntelGpuSeriesType::kBaytrail;
if (device_id == 0x0152 || device_id == 0x015A
|| device_id == 0x0156)
- return GpuSeriesType::kIntelIvybridge;
+ return IntelGpuSeriesType::kIvybridge;
break;
case 0x0160:
- return GpuSeriesType::kIntelIvybridge;
+ return IntelGpuSeriesType::kIvybridge;
default:
break;
}
break;
case 0x0F00:
- return GpuSeriesType::kIntelBaytrail;
+ return IntelGpuSeriesType::kBaytrail;
case 0x0400:
case 0x0A00:
case 0x0C00:
case 0x0D00:
- return GpuSeriesType::kIntelHaswell;
+ return IntelGpuSeriesType::kHaswell;
case 0x2200:
- return GpuSeriesType::kIntelCherrytrail;
+ return IntelGpuSeriesType::kCherrytrail;
case 0x1600:
- return GpuSeriesType::kIntelBroadwell;
+ return IntelGpuSeriesType::kBroadwell;
case 0x5A00:
if (device_id == 0x5A85 || device_id == 0x5A84)
- return GpuSeriesType::kIntelApollolake;
- return GpuSeriesType::kIntelCannonlake;
+ return IntelGpuSeriesType::kApollolake;
+ return IntelGpuSeriesType::kCannonlake;
case 0x1900:
- return GpuSeriesType::kIntelSkylake;
+ return IntelGpuSeriesType::kSkylake;
case 0x3100:
- return GpuSeriesType::kIntelGeminilake;
+ return IntelGpuSeriesType::kGeminilake;
case 0x5900:
- return GpuSeriesType::kIntelKabylake;
+ return IntelGpuSeriesType::kKabylake;
case 0x8700:
if (device_id == 0x87C0)
- return GpuSeriesType::kIntelKabylake;
+ return IntelGpuSeriesType::kKabylake;
if (device_id == 0x87CA)
- return GpuSeriesType::kIntelCoffeelake;
+ return IntelGpuSeriesType::kCoffeelake;
break;
case 0x3E00:
if (device_id == 0x3EA0 || device_id == 0x3EA1 || device_id == 0x3EA2
|| device_id == 0x3EA4 || device_id == 0x3EA3)
- return GpuSeriesType::kIntelWhiskeylake;
- return GpuSeriesType::kIntelCoffeelake;
+ return IntelGpuSeriesType::kWhiskeylake;
+ return IntelGpuSeriesType::kCoffeelake;
case 0x9B00:
- return GpuSeriesType::kIntelCometlake;
+ return IntelGpuSeriesType::kCometlake;
case 0x8A00:
- return GpuSeriesType::kIntelIcelake;
+ return IntelGpuSeriesType::kIcelake;
default:
break;
}
}
- return GpuSeriesType::kUnknown;
+ return IntelGpuSeriesType::kUnknown;
}
std::string GetIntelGpuGeneration(uint32_t vendor_id, uint32_t device_id) {
if (vendor_id == 0x8086) {
- GpuSeriesType gpu_series = GetGpuSeriesType(vendor_id, device_id);
+ IntelGpuSeriesType gpu_series = GetIntelGpuSeriesType(vendor_id, device_id);
switch (gpu_series) {
- case GpuSeriesType::kIntelSandybridge:
+ case IntelGpuSeriesType::kSandybridge:
return "6";
- case GpuSeriesType::kIntelBaytrail:
- case GpuSeriesType::kIntelIvybridge:
- case GpuSeriesType::kIntelHaswell:
+ case IntelGpuSeriesType::kBaytrail:
+ case IntelGpuSeriesType::kIvybridge:
+ case IntelGpuSeriesType::kHaswell:
return "7";
- case GpuSeriesType::kIntelCherrytrail:
- case GpuSeriesType::kIntelBroadwell:
+ case IntelGpuSeriesType::kCherrytrail:
+ case IntelGpuSeriesType::kBroadwell:
return "8";
- case GpuSeriesType::kIntelApollolake:
- case GpuSeriesType::kIntelSkylake:
- case GpuSeriesType::kIntelGeminilake:
- case GpuSeriesType::kIntelKabylake:
- case GpuSeriesType::kIntelCoffeelake:
- case GpuSeriesType::kIntelWhiskeylake:
- case GpuSeriesType::kIntelCometlake:
+ case IntelGpuSeriesType::kApollolake:
+ case IntelGpuSeriesType::kSkylake:
+ case IntelGpuSeriesType::kGeminilake:
+ case IntelGpuSeriesType::kKabylake:
+ case IntelGpuSeriesType::kCoffeelake:
+ case IntelGpuSeriesType::kWhiskeylake:
+ case IntelGpuSeriesType::kCometlake:
return "9";
- case GpuSeriesType::kIntelCannonlake:
+ case IntelGpuSeriesType::kCannonlake:
return "10";
- case GpuSeriesType::kIntelIcelake:
+ case IntelGpuSeriesType::kIcelake:
return "11";
default:
break;
@@ -788,6 +899,79 @@ std::string GetIntelGpuGeneration(uint32_t vendor_id, uint32_t device_id) {
return "";
}
+IntelGpuGeneration GetIntelGpuGeneration(const GPUInfo& gpu_info) {
+ const uint32_t kIntelVendorId = 0x8086;
+ IntelGpuGeneration latest = IntelGpuGeneration::kNonIntel;
+ std::vector<uint32_t> intel_device_ids;
+ if (gpu_info.gpu.vendor_id == kIntelVendorId)
+ intel_device_ids.push_back(gpu_info.gpu.device_id);
+ for (const auto& gpu : gpu_info.secondary_gpus) {
+ if (gpu.vendor_id == kIntelVendorId)
+ intel_device_ids.push_back(gpu.device_id);
+ }
+ if (intel_device_ids.empty())
+ return latest;
+ latest = IntelGpuGeneration::kUnknownIntel;
+ for (uint32_t device_id : intel_device_ids) {
+ std::string gen_str = gpu::GetIntelGpuGeneration(kIntelVendorId, device_id);
+ int gen_int = 0;
+ if (gen_str.empty() || !base::StringToInt(gen_str, &gen_int))
+ continue;
+ DCHECK_GE(gen_int, static_cast<int>(IntelGpuGeneration::kUnknownIntel));
+ DCHECK_LE(gen_int, static_cast<int>(IntelGpuGeneration::kMaxValue));
+ if (gen_int > static_cast<int>(latest))
+ latest = static_cast<IntelGpuGeneration>(gen_int);
+ }
+ return latest;
+}
+
+void CollectDevicePerfInfo(DevicePerfInfo* device_perf_info,
+ bool in_browser_process) {
+ DCHECK(device_perf_info);
+ device_perf_info->total_physical_memory_mb =
+ static_cast<uint32_t>(base::SysInfo::AmountOfPhysicalMemoryMB());
+ if (!in_browser_process)
+ device_perf_info->total_disk_space_mb = EstimateAmountOfTotalDiskSpaceMB();
+ device_perf_info->hardware_concurrency =
+ static_cast<uint32_t>(std::thread::hardware_concurrency());
+
+#if defined(OS_WIN)
+ device_perf_info->system_commit_limit_mb = GetSystemCommitLimitMb();
+ if (!in_browser_process) {
+ D3D_FEATURE_LEVEL d3d11_feature_level = D3D_FEATURE_LEVEL_1_0_CORE;
+ bool has_discrete_gpu = false;
+ if (CollectD3D11FeatureInfo(&d3d11_feature_level, &has_discrete_gpu)) {
+ device_perf_info->d3d11_feature_level = d3d11_feature_level;
+ device_perf_info->has_discrete_gpu =
+ has_discrete_gpu ? HasDiscreteGpu::kYes : HasDiscreteGpu::kNo;
+ }
+ }
+#endif
+}
+
+void RecordDevicePerfInfoHistograms() {
+ base::Optional<DevicePerfInfo> device_perf_info = GetDevicePerfInfo();
+ if (!device_perf_info.has_value())
+ return;
+ UMA_HISTOGRAM_COUNTS_1000("Hardware.TotalDiskSpace",
+ device_perf_info->total_disk_space_mb / 1024);
+ UMA_HISTOGRAM_COUNTS_100("Hardware.Concurrency",
+ device_perf_info->hardware_concurrency);
+#if defined(OS_WIN)
+ UMA_HISTOGRAM_COUNTS_100("Memory.Total.SystemCommitLimit",
+ device_perf_info->system_commit_limit_mb / 1024);
+ UMA_HISTOGRAM_ENUMERATION("GPU.D3D11FeatureLevel",
+ ConvertToHistogramD3D11FeatureLevel(
+ device_perf_info->d3d11_feature_level));
+ UMA_HISTOGRAM_ENUMERATION("GPU.HasDiscreteGpu",
+ device_perf_info->has_discrete_gpu);
+#endif // OS_WIN
+ UMA_HISTOGRAM_ENUMERATION("GPU.IntelGpuGeneration",
+ device_perf_info->intel_gpu_generation);
+ UMA_HISTOGRAM_BOOLEAN("GPU.SoftwareRendering",
+ device_perf_info->software_rendering);
+}
+
#if defined(OS_WIN)
std::string D3DFeatureLevelToString(uint32_t d3d_feature_level) {
if (d3d_feature_level == 0) {
@@ -819,6 +1003,8 @@ VulkanVersion ConvertToHistogramVulkanVersion(uint32_t vulkan_version) {
return VulkanVersion::kVulkanVersion_1_0_0;
case VK_MAKE_VERSION(1, 1, 0):
return VulkanVersion::kVulkanVersion_1_1_0;
+ case VK_MAKE_VERSION(1, 2, 0):
+ return VulkanVersion::kVulkanVersion_1_2_0;
default:
NOTREACHED();
return VulkanVersion::kVulkanVersionUnknown;
diff --git a/chromium/gpu/config/gpu_util.h b/chromium/gpu/config/gpu_util.h
index 2e3c85026ec..b4086559520 100644
--- a/chromium/gpu/config/gpu_util.h
+++ b/chromium/gpu/config/gpu_util.h
@@ -15,9 +15,11 @@ class CommandLine;
namespace gpu {
+struct DevicePerfInfo;
struct GPUInfo;
struct GpuPreferences;
-enum class GpuSeriesType;
+enum class IntelGpuSeriesType;
+enum class IntelGpuGeneration;
// Set GPU feature status if hardware acceleration is disabled.
GPU_EXPORT GpuFeatureInfo
@@ -74,12 +76,22 @@ GPU_EXPORT bool EnableSwiftShaderIfNeeded(
bool disable_software_rasterizer,
bool blacklist_needs_more_info);
-GPU_EXPORT GpuSeriesType GetGpuSeriesType(uint32_t vendor_id,
- uint32_t device_id);
+GPU_EXPORT IntelGpuSeriesType GetIntelGpuSeriesType(uint32_t vendor_id,
+ uint32_t device_id);
GPU_EXPORT std::string GetIntelGpuGeneration(uint32_t vendor_id,
uint32_t device_id);
+// If multiple Intel GPUs are detected, this returns the latest generation.
+GPU_EXPORT IntelGpuGeneration GetIntelGpuGeneration(const GPUInfo& gpu_info);
+
+// If this function is called in browser process (|in_browser_process| is set
+// to true), don't collect total disk space (which may block) and D3D related
+// info.
+GPU_EXPORT void CollectDevicePerfInfo(DevicePerfInfo* device_perf_info,
+ bool in_browser_process);
+GPU_EXPORT void RecordDevicePerfInfoHistograms();
+
#if defined(OS_WIN)
GPU_EXPORT std::string D3DFeatureLevelToString(uint32_t d3d_feature_level);
GPU_EXPORT std::string VulkanVersionToString(uint32_t vulkan_version);
@@ -92,7 +104,8 @@ enum class VulkanVersion {
kVulkanVersionUnknown = 0,
kVulkanVersion_1_0_0 = 1,
kVulkanVersion_1_1_0 = 2,
- kMaxValue = kVulkanVersion_1_1_0,
+ kVulkanVersion_1_2_0 = 3,
+ kMaxValue = kVulkanVersion_1_2_0,
};
GPU_EXPORT VulkanVersion
diff --git a/chromium/gpu/config/gpu_workaround_list.txt b/chromium/gpu/config/gpu_workaround_list.txt
index 70751969b07..54637af55a9 100644
--- a/chromium/gpu/config/gpu_workaround_list.txt
+++ b/chromium/gpu/config/gpu_workaround_list.txt
@@ -13,24 +13,27 @@ decode_encode_srgb_for_generatemipmap
depth_stencil_renderbuffer_resize_emulation
disable_2d_canvas_auto_flush
disable_accelerated_vpx_decode
-disable_aimagereader
disable_async_readpixels
disable_av_sample_buffer_display_layer
disable_blend_equation_advanced
disable_chromium_framebuffer_multisample
disable_d3d11
+disable_d3d11_video_decoder
disable_delayed_copy_nv12
disable_depth_texture
disable_direct_composition
disable_direct_composition_video_overlays
disable_discard_framebuffer
+disable_dual_source_blending_support
disable_dxgi_zero_copy_video
+disable_dxva_video_decoder
disable_es3_gl_context
disable_es3_gl_context_for_testing
disable_ext_draw_buffers
disable_gl_rgb_format
+disable_half_float_for_gmb
disable_larger_than_screen_overlays
-disable_non_empty_post_sub_buffers_for_onscreen_surfaces
+disable_multisampling_color_mask_usage
disable_nv12_dxgi_video
disable_nv12_dynamic_textures
disable_overlay_ca_layers
@@ -38,22 +41,23 @@ disable_post_sub_buffers_for_onscreen_surfaces
disable_program_cache
disable_program_caching_for_transform_feedback
disable_program_disk_cache
+disable_qcomm_tiled_rendering
disable_software_to_accelerated_canvas_upgrade
disable_texture_cube_map_seamless
disable_texture_storage
disable_timestamp_queries
-disable_multisampling_color_mask_usage
disable_vp_scaling
disable_webgl_rgb_multisampling_usage
disallow_large_instanced_draw
+dont_delete_source_texture_for_egl_image
dont_disable_webgl_when_compositor_context_lost
dont_initialize_uninitialized_locals
dont_remove_invariant_for_fragment_input
dont_use_eglclientwaitsync_with_timeout
dont_use_loops_to_initialize_variables
-etc1_power_of_two_only
emulate_abs_int_function
emulate_isnan_on_float
+etc1_power_of_two_only
exit_on_context_lost
flush_on_framebuffer_change
force_cube_complete
@@ -71,6 +75,8 @@ init_one_cube_map_level_before_copyteximage
init_texture_max_anisotropy
init_two_cube_map_levels_before_copyteximage
init_vertex_attributes
+limit_d3d11_video_decoder_to_11_0
+max_3d_array_texture_size_1024
max_copy_texture_chromium_size_1048576
max_copy_texture_chromium_size_262144
max_msaa_sample_count_2
@@ -80,9 +86,11 @@ msaa_is_slow
multisample_renderbuffer_resize_emulation
needs_offscreen_buffer_workaround
pack_parameters_workaround_with_pack_buffer
+prefer_draw_to_copy
rebind_transform_feedback_before_resume
regenerate_struct_names
rely_on_implicit_sync_for_swap_buffers
+remove_dynamic_indexing_of_swizzled_vector
remove_invariant_and_centroid_for_essl3
remove_pow_with_constant_exponent
reset_base_mipmap_level_before_texstorage
@@ -103,6 +111,9 @@ unpack_alignment_workaround_with_unpack_buffer
unpack_image_height_workaround_with_unpack_buffer
unpack_overlapping_rows_separately_unpack_buffer
use_client_side_arrays_for_stream_buffers
+use_copyteximage2d_instead_of_readpixels_on_multisampled_textures
+use_empty_video_hdr_metadata
+use_eqaa_storage_samples_2
use_es2_for_oopr
use_gpu_driver_workaround_for_testing
use_intermediary_for_copy_texture_image
@@ -111,10 +122,3 @@ use_unused_standard_shared_blocks
use_virtualized_gl_contexts
validate_multisample_buffer_allocation
wake_up_gpu_before_drawing
-use_copyteximage2d_instead_of_readpixels_on_multisampled_textures
-use_eqaa_storage_samples_2
-max_3d_array_texture_size_1024
-disable_half_float_for_gmb
-prefer_draw_to_copy
-remove_dynamic_indexing_of_swizzled_vector
-disable_dual_source_blending_support
diff --git a/chromium/gpu/config/process_json.py b/chromium/gpu/config/process_json.py
index 1c527687812..15a5e4c3536 100755
--- a/chromium/gpu/config/process_json.py
+++ b/chromium/gpu/config/process_json.py
@@ -36,6 +36,38 @@ _OS_TYPE_MAP = {
'': 'kOsAny',
}
+INTEL_DRIVER_VERSION_SCHEMA = '''
+The version format of Intel graphics driver is AA.BB.CCC.DDDD.
+DDDD(old schema) or CCC.DDDD(new schema) is the build number. That is,
+indicates the actual driver number. The comparison between old schema
+and new schema is NOT valid. In such a condition the only comparison
+operator that returns true is "not equal".
+
+AA.BB: You are free to specify the real number here, but they are meaningless
+when comparing two version numbers. Usually it's okay to leave it to "0.0".
+
+CCC: It's necessary for new schema. Regarding to old schema, you can speicy
+the real number or any number less than 100 in order to differentiate from
+new schema.
+
+DDDD: It's always meaningful. It must not be "0" under old schema.
+
+Legal: "24.20.100.7000", "0.0.100.7000", "0.0.0.7000", "0.0.100.0"
+Illegal: "24.0.0.0", "24.20.0.0", "0.0.99.0"
+'''
+
+
+def check_intel_driver_version(version):
+ ver_list = version.split('.')
+ if len(ver_list) != 4:
+ return False
+ for ver in ver_list:
+ if not ver.isdigit():
+ return False
+ if int(ver_list[2]) < 100 and ver_list[3] == '0':
+ return False
+ return True
+
def load_software_rendering_list_features(feature_type_filename):
header_file = open(feature_type_filename, 'r')
@@ -265,6 +297,7 @@ def write_machine_model_info(entry_id, is_exception, exception_id,
data_file, data_helper_file):
model_name_var_name = None
if machine_model_name:
+ assert isinstance(machine_model_name, list)
model_name_var_name = 'kMachineModelNameForEntry' + str(entry_id)
if is_exception:
model_name_var_name += 'Exception' + str(exception_id)
@@ -365,7 +398,7 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
device_id = None
multi_gpu_category = ''
multi_gpu_style = ''
- gpu_series_list = None
+ intel_gpu_series_list = None
intel_gpu_generation = None
driver_vendor = ''
driver_version = None
@@ -425,8 +458,8 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
multi_gpu_category = entry[key]
elif key == 'multi_gpu_style':
multi_gpu_style = entry[key]
- elif key == 'gpu_series':
- gpu_series_list = entry[key]
+ elif key == 'intel_gpu_series':
+ intel_gpu_series_list = entry[key]
elif key == 'intel_gpu_generation':
intel_gpu_generation = entry[key]
elif key == 'driver_vendor':
@@ -484,6 +517,15 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
write_multi_gpu_style(multi_gpu_style, data_file)
# group driver info
if driver_vendor != '' or driver_version != None:
+ if driver_version and os_type == 'win':
+ if (format(vendor_id, '#04x') == '0x8086' or intel_gpu_series_list
+ or intel_gpu_generation or 'Intel' in driver_vendor):
+ if not check_intel_driver_version(driver_version['value']):
+ assert False, INTEL_DRIVER_VERSION_SCHEMA
+ if 'value2' in driver_version:
+ if not check_intel_driver_version(driver_version['value2']):
+ assert False, INTEL_DRIVER_VERSION_SCHEMA
+
write_driver_info(entry_id, is_exception, exception_id, driver_vendor,
driver_version, unique_symbol_id,
data_file, data_helper_file)
@@ -500,8 +542,9 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
write_machine_model_info(entry_id, is_exception, exception_id,
machine_model_name, machine_model_version,
data_file, data_helper_file)
- write_gpu_series_list(entry_id, is_exception, exception_id, gpu_series_list,
- data_file, data_helper_file)
+ write_intel_gpu_series_list(entry_id, is_exception, exception_id,
+ intel_gpu_series_list,
+ data_file, data_helper_file)
write_version(intel_gpu_generation, 'intel_gpu_generation', data_file)
# group a bunch of less used conditions
if (gl_version != None or pixel_shader_version != None or in_process_gpu or
@@ -518,42 +561,43 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
data_file.write('nullptr, // more conditions\n')
-def write_gpu_series_list(entry_id, is_exception, exception_id, gpu_series_list,
- data_file, data_helper_file):
- if gpu_series_list:
- var_name = 'kGpuSeriesForEntry' + str(entry_id)
+def write_intel_gpu_series_list(entry_id, is_exception, exception_id,
+ intel_gpu_series_list,
+ data_file, data_helper_file):
+ if intel_gpu_series_list:
+ var_name = 'kIntelGpuSeriesForEntry' + str(entry_id)
if is_exception:
var_name += 'Exception' + str(exception_id)
- data_helper_file.write('const GpuSeriesType %s[%d] = {\n' %
- (var_name, len(gpu_series_list)))
- gpu_series_map = {
- 'intel_sandybridge': 'kIntelSandybridge',
- 'intel_baytrail': 'kIntelBaytrail',
- 'intel_ivybridge': 'kIntelIvybridge',
- 'intel_haswell': 'kIntelHaswell',
- 'intel_cherrytrail': 'kIntelCherrytrail',
- 'intel_broadwell': 'kIntelBroadwell',
- 'intel_apollolake': 'kIntelApollolake',
- 'intel_skylake': 'kIntelSkylake',
- 'intel_geminilake': 'kIntelGeminilake',
- 'intel_kabylake': 'kIntelKabylake',
- 'intel_coffeelake': 'kIntelCoffeelake',
- 'intel_whiskeylake': 'kIntelWhiskeylake',
- 'intel_cometlake': 'kIntelCometlake',
- 'intel_cannonlake': 'kIntelCannonlake',
- 'intel_icelake': 'kIntelIcelake'
+ data_helper_file.write('const IntelGpuSeriesType %s[%d] = {\n' %
+ (var_name, len(intel_gpu_series_list)))
+ intel_gpu_series_map = {
+ 'sandybridge': 'kSandybridge',
+ 'baytrail': 'kBaytrail',
+ 'ivybridge': 'kIvybridge',
+ 'haswell': 'kHaswell',
+ 'cherrytrail': 'kCherrytrail',
+ 'broadwell': 'kBroadwell',
+ 'apollolake': 'kApollolake',
+ 'skylake': 'kSkylake',
+ 'geminilake': 'kGeminilake',
+ 'kabylake': 'kKabylake',
+ 'coffeelake': 'kCoffeelake',
+ 'whiskeylake': 'kWhiskeylake',
+ 'cometlake': 'kCometlake',
+ 'cannonlake': 'kCannonlake',
+ 'icelake': 'kIcelake'
}
- for series in gpu_series_list:
- assert series in gpu_series_map
- data_helper_file.write('GpuSeriesType::%s,\n' %
- gpu_series_map[series])
+ for series in intel_gpu_series_list:
+ assert series in intel_gpu_series_map
+ data_helper_file.write('IntelGpuSeriesType::%s,\n' %
+ intel_gpu_series_map[series])
data_helper_file.write('};\n\n')
- data_file.write('base::size(%s), // gpu_series size\n' % var_name)
- data_file.write('%s, // gpu_series\n' % var_name)
+ data_file.write('base::size(%s), // intel_gpu_series size\n' % var_name)
+ data_file.write('%s, // intel_gpu_series\n' % var_name)
else:
- data_file.write('0, // gpu_series size\n')
- data_file.write('nullptr, // gpu_series\n')
+ data_file.write('0, // intel_gpu_series size\n')
+ data_file.write('nullptr, // intel_gpu_series\n')
def write_entry_more_data(entry_id, is_exception, exception_id, gl_type,
diff --git a/chromium/gpu/config/skia_limits.cc b/chromium/gpu/config/skia_limits.cc
new file mode 100644
index 00000000000..82fb1dc9a58
--- /dev/null
+++ b/chromium/gpu/config/skia_limits.cc
@@ -0,0 +1,58 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/config/skia_limits.h"
+
+#include <inttypes.h>
+
+#include "base/system/sys_info.h"
+#include "build/build_config.h"
+
+namespace gpu {
+
+void DetermineGrCacheLimitsFromAvailableMemory(
+ size_t* max_resource_cache_bytes,
+ size_t* max_glyph_cache_texture_bytes) {
+ // Default limits.
+ constexpr size_t kMaxGaneshResourceCacheBytes = 96 * 1024 * 1024;
+ constexpr size_t kMaxDefaultGlyphCacheTextureBytes = 2048 * 1024 * 4;
+
+ *max_resource_cache_bytes = kMaxGaneshResourceCacheBytes;
+ *max_glyph_cache_texture_bytes = kMaxDefaultGlyphCacheTextureBytes;
+
+// We can't call AmountOfPhysicalMemory under NACL, so leave the default.
+#if !defined(OS_NACL)
+ // The limit of the bytes allocated toward GPU resources in the GrContext's
+ // GPU cache.
+#if defined(OS_FUCHSIA)
+ // Reduce protected budget on fuchsia due to https://fxb/36620.
+ constexpr size_t kMaxLowEndGaneshResourceCacheBytes = 24 * 1024 * 1024;
+#else
+ constexpr size_t kMaxLowEndGaneshResourceCacheBytes = 48 * 1024 * 1024;
+#endif // defined(OS_FUCHSIA)
+ constexpr size_t kMaxHighEndGaneshResourceCacheBytes = 256 * 1024 * 1024;
+ // Limits for glyph cache textures.
+ constexpr size_t kMaxLowEndGlyphCacheTextureBytes = 1024 * 512 * 4;
+ // High-end / low-end memory cutoffs.
+ constexpr int64_t kHighEndMemoryThreshold = 4096LL * 1024 * 1024;
+
+ if (base::SysInfo::IsLowEndDevice()) {
+ *max_resource_cache_bytes = kMaxLowEndGaneshResourceCacheBytes;
+ *max_glyph_cache_texture_bytes = kMaxLowEndGlyphCacheTextureBytes;
+ } else if (base::SysInfo::AmountOfPhysicalMemory() >=
+ kHighEndMemoryThreshold) {
+ *max_resource_cache_bytes = kMaxHighEndGaneshResourceCacheBytes;
+ }
+#endif
+}
+
+void DefaultGrCacheLimitsForTests(size_t* max_resource_cache_bytes,
+ size_t* max_glyph_cache_texture_bytes) {
+ constexpr size_t kDefaultGlyphCacheTextureBytes = 2048 * 1024 * 4;
+ constexpr size_t kDefaultGaneshResourceCacheBytes = 96 * 1024 * 1024;
+ *max_resource_cache_bytes = kDefaultGaneshResourceCacheBytes;
+ *max_glyph_cache_texture_bytes = kDefaultGlyphCacheTextureBytes;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/config/skia_limits.h b/chromium/gpu/config/skia_limits.h
new file mode 100644
index 00000000000..1a84a2e6d8d
--- /dev/null
+++ b/chromium/gpu/config/skia_limits.h
@@ -0,0 +1,25 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_CONFIG_SKIA_LIMITS_H_
+#define GPU_CONFIG_SKIA_LIMITS_H_
+
+#include <memory>
+
+#include "base/optional.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+GPU_EXPORT void DetermineGrCacheLimitsFromAvailableMemory(
+ size_t* max_resource_cache_bytes,
+ size_t* max_glyph_cache_texture_bytes);
+
+GPU_EXPORT void DefaultGrCacheLimitsForTests(
+ size_t* max_resource_cache_bytes,
+ size_t* max_glyph_cache_texture_bytes);
+
+} // namespace gpu
+
+#endif // GPU_CONFIG_SKIA_LIMITS_H_
diff --git a/chromium/gpu/config/software_rendering_list.json b/chromium/gpu/config/software_rendering_list.json
index 545aeb58cb2..68bed9e49dd 100644
--- a/chromium/gpu/config/software_rendering_list.json
+++ b/chromium/gpu/config/software_rendering_list.json
@@ -46,7 +46,7 @@
{
"id": 5,
"description": "ATI/AMD cards with older drivers in Linux are crash-prone",
- "cr_bugs": [71381, 76428, 73910, 101225, 136240, 357314],
+ "cr_bugs": [71381, 76428, 73910, 101225, 136240, 357314, 719213],
"os": {
"type": "linux"
},
@@ -69,6 +69,9 @@
},
{
"driver_vendor": ".*ANGLE.*"
+ },
+ {
+ "driver_vendor": ".*Brahma.*"
}
],
"features": [
@@ -335,10 +338,15 @@
{
"id": 48,
"description": "Accelerated video decode is unavailable on Linux",
- "cr_bugs": [137247],
+ "cr_bugs": [137247, 1032907],
"os": {
"type": "linux"
},
+ "exceptions": [
+ {
+ "machine_model_name": ["Chromecast"]
+ }
+ ],
"features": [
"accelerated_video_decode"
]
@@ -359,14 +367,6 @@
"value": "9.2.1"
},
"gl_renderer": ".*SVGA3D.*"
- },
- {
- "driver_vendor": "Mesa",
- "driver_version": {
- "op": ">=",
- "value": "10.1.3"
- },
- "gl_renderer": ".*llvmpipe.*"
}
],
"features": [
@@ -894,8 +894,8 @@
"value": "3.19.1"
}
},
- "gpu_series": [
- "intel_haswell"
+ "intel_gpu_series": [
+ "haswell"
],
"features": [
"all"
@@ -935,23 +935,6 @@
]
},
{
- "id": 110,
- "description": "Only enable WebGL for the Mesa Gallium llvmpipe driver",
- "cr_bugs": [571899],
- "os": {
- "type": "linux"
- },
- "driver_vendor": "Mesa",
- "gl_vendor": "VMware.*",
- "gl_renderer": ".*llvmpipe.*",
- "features": [
- "all",
- {"exceptions": [
- "accelerated_webgl"
- ]}
- ]
- },
- {
"id": 111,
"description": "Apple Software Renderer used under VMWare experiences synchronization issues with GPU Raster",
"cr_bugs": [607829],
@@ -1122,7 +1105,7 @@
{
"id": 122,
"description": "GPU rasterization should only be enabled on NVIDIA and Intel and AMD RX-R2 GPUs with DX11+ or any GPU using ANGLE's GL backend.",
- "cr_bugs": [643850],
+ "cr_bugs": [1047002],
"os": {
"type": "win"
},
@@ -1312,7 +1295,7 @@
{
"id": 136,
"description": "GPU rasterization is blacklisted on NVidia Fermi architecture for now.",
- "cr_bugs": [643850],
+ "cr_bugs": [1047002],
"os": {
"type": "win"
},
@@ -1364,7 +1347,7 @@
{
"id": 139,
"description": "GPU Rasterization is disabled on pre-GCN AMD cards",
- "cr_bugs": [643850],
+ "cr_bugs": [1047002],
"os": {
"type": "win"
},
@@ -1460,9 +1443,8 @@
},
"vendor_id": "0x8086",
"driver_version": {
- "comment": "INF_version: 8.16.0.0",
"op": "<",
- "value": "8.16.0.0"
+ "value": "0.0.0.3000"
},
"features": [
"accelerated_webgl2"
@@ -1633,32 +1615,16 @@
}
},
"vendor_id": "0x8086",
- "gpu_series": [
- "intel_sandybridge",
- "intel_baytrail",
- "intel_ivybridge",
- "intel_haswell",
- "intel_cherrytrail",
- "intel_broadwell",
- "intel_apollolake",
- "intel_skylake",
- "intel_geminilake"
- ]
- },
- {
- "id": 156,
- "cr_bugs": [870964],
- "description": "Frequent crashes on Adreno (TM) on L and below",
- "os": {
- "type": "android",
- "version": {
- "op": "<",
- "value": "6.0"
- }
- },
- "gl_renderer": "Adreno.*",
- "features": [
- "oop_rasterization"
+ "intel_gpu_series": [
+ "sandybridge",
+ "baytrail",
+ "ivybridge",
+ "haswell",
+ "cherrytrail",
+ "broadwell",
+ "apollolake",
+ "skylake",
+ "geminilake"
]
},
{
diff --git a/chromium/gpu/config/vulkan_info.h b/chromium/gpu/config/vulkan_info.h
index 57b951ef349..be60f8ca105 100644
--- a/chromium/gpu/config/vulkan_info.h
+++ b/chromium/gpu/config/vulkan_info.h
@@ -26,7 +26,7 @@ class GPU_EXPORT VulkanPhysicalDeviceInfo {
VkPhysicalDevice device = VK_NULL_HANDLE;
VkPhysicalDeviceProperties properties = {};
- std::vector<VkLayerProperties> layers;
+ std::vector<VkExtensionProperties> extensions;
VkPhysicalDeviceFeatures features = {};
// Extended physical device features:
diff --git a/chromium/gpu/gles2_conform_support/BUILD.gn b/chromium/gpu/gles2_conform_support/BUILD.gn
index 24a0bd3dabd..c3bd1393c38 100644
--- a/chromium/gpu/gles2_conform_support/BUILD.gn
+++ b/chromium/gpu/gles2_conform_support/BUILD.gn
@@ -82,18 +82,14 @@ if (internal_gles2_conform_tests) {
}
action("generate_gles2_conform_tests") {
script = "generate_gles2_conform_tests.py"
- outputs = [
- "$target_gen_dir/gles2_conform_test_autogen.cc",
- ]
+ outputs = [ "$target_gen_dir/gles2_conform_test_autogen.cc" ]
args = [ rebase_path("$target_gen_dir") ]
}
executable("gles2_conform_test_windowless") {
testonly = true
# Include a dummy c++ file to force linking of libstdc++.
- sources = [
- "dummy.cc",
- ]
+ sources = [ "dummy.cc" ]
gtf_source = "//third_party/gles2_conform/GTF_ES/glsl/GTF/Source"
sources += [
@@ -476,14 +472,10 @@ test("gles2_conform_test") {
"//testing/gtest",
]
- data = [
- "gles2_conform_test_expectations.txt",
- ]
+ data = [ "gles2_conform_test_expectations.txt" ]
if (internal_gles2_conform_tests) {
- data_deps = [
- ":gles2_conform_test_windowless",
- ]
+ data_deps = [ ":gles2_conform_test_windowless" ]
deps += [ ":generate_gles2_conform_tests" ]
sources += [ "$target_gen_dir/gles2_conform_test_autogen.cc" ]
data += [ "//third_party/gles2_conform/GTF_ES/" ]
diff --git a/chromium/gpu/gles2_conform_support/egl/context.cc b/chromium/gpu/gles2_conform_support/egl/context.cc
index e8e4bf6173c..2e5c0300ab4 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.cc
+++ b/chromium/gpu/gles2_conform_support/egl/context.cc
@@ -61,6 +61,8 @@ Context::Context(Display* display, const Config* config)
is_destroyed_(false),
gpu_driver_bug_workarounds_(
platform_gpu_feature_info_.enabled_gpu_driver_bug_workarounds),
+ discardable_manager_(gpu::GpuPreferences()),
+ passthrough_discardable_manager_(gpu::GpuPreferences()),
translator_cache_(gpu::GpuPreferences()) {}
Context::~Context() {
diff --git a/chromium/gpu/ipc/BUILD.gn b/chromium/gpu/ipc/BUILD.gn
index cb84bcc837e..ba08eef5214 100644
--- a/chromium/gpu/ipc/BUILD.gn
+++ b/chromium/gpu/ipc/BUILD.gn
@@ -13,10 +13,14 @@ component("gl_in_process_context") {
"gl_in_process_context_export.h",
"gpu_in_process_thread_service.cc",
"gpu_in_process_thread_service.h",
+ "gpu_task_scheduler_helper.cc",
+ "gpu_task_scheduler_helper.h",
"in_process_command_buffer.cc",
"in_process_command_buffer.h",
"scheduler_sequence.cc",
"scheduler_sequence.h",
+ "shared_image_interface_in_process.cc",
+ "shared_image_interface_in_process.h",
"single_task_sequence.h",
]
@@ -64,9 +68,7 @@ component("gpu_thread_holder") {
defines = [ "IS_GPU_THREAD_HOLDER_IMPL" ]
- public_deps = [
- "//gpu/config",
- ]
+ public_deps = [ "//gpu/config" ]
deps = [
":gl_in_process_context",
diff --git a/chromium/gpu/ipc/client/BUILD.gn b/chromium/gpu/ipc/client/BUILD.gn
index a0ce8932fdc..dfeeb5b95b9 100644
--- a/chromium/gpu/ipc/client/BUILD.gn
+++ b/chromium/gpu/ipc/client/BUILD.gn
@@ -6,18 +6,16 @@ import("//build/config/ui.gni")
group("client") {
if (is_component_build) {
- public_deps = [
- "//gpu",
- ]
+ public_deps = [ "//gpu" ]
} else {
- public_deps = [
- ":ipc_client_sources",
- ]
+ public_deps = [ ":ipc_client_sources" ]
}
}
source_set("ipc_client_sources") {
sources = [
+ "client_shared_image_interface.cc",
+ "client_shared_image_interface.h",
"command_buffer_proxy_impl.cc",
"command_buffer_proxy_impl.h",
"gpu_channel_host.cc",
@@ -43,7 +41,5 @@ source_set("ipc_client_sources") {
"//ui/gfx/geometry",
"//ui/gl",
]
- public_deps = [
- "//ipc",
- ]
+ public_deps = [ "//ipc" ]
}
diff --git a/chromium/gpu/ipc/client/DEPS b/chromium/gpu/ipc/client/DEPS
index 5a3dccc1667..24647bdb0bd 100644
--- a/chromium/gpu/ipc/client/DEPS
+++ b/chromium/gpu/ipc/client/DEPS
@@ -10,7 +10,7 @@ specific_include_rules = {
"gpu_in_process_context_tests.cc": [
"+components/viz/test/test_gpu_memory_buffer_manager.h",
],
- "image_decode_accelerator_proxy.cc": [
+ "image_decode_accelerator_proxy(_unittest)?\.cc": [
"+cc/paint/paint_image.h",
],
"raster_in_process_context_tests.cc": [
diff --git a/chromium/gpu/ipc/client/OWNERS b/chromium/gpu/ipc/client/OWNERS
index 6759040cd4e..d0a06afaeb8 100644
--- a/chromium/gpu/ipc/client/OWNERS
+++ b/chromium/gpu/ipc/client/OWNERS
@@ -1,5 +1,4 @@
# GPU memory buffer interface.
-per-file gpu_memory_buffer*=reveman@chromium.org
per-file gpu_memory_buffer*=dcastagna@chromium.org
# IOSurface usage.
diff --git a/chromium/gpu/ipc/client/client_shared_image_interface.cc b/chromium/gpu/ipc/client/client_shared_image_interface.cc
new file mode 100644
index 00000000000..f9a9acbd066
--- /dev/null
+++ b/chromium/gpu/ipc/client/client_shared_image_interface.cc
@@ -0,0 +1,137 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/client/client_shared_image_interface.h"
+
+#include "gpu/ipc/client/shared_image_interface_proxy.h"
+#include "ui/gfx/gpu_fence.h"
+
+namespace gpu {
+
+ClientSharedImageInterface::ClientSharedImageInterface(
+ SharedImageInterfaceProxy* proxy)
+ : proxy_(proxy) {}
+
+ClientSharedImageInterface::~ClientSharedImageInterface() {
+ gpu::SyncToken sync_token;
+ auto mailboxes_to_delete = mailboxes_;
+ for (const auto& mailbox : mailboxes_to_delete)
+ DestroySharedImage(sync_token, mailbox);
+}
+
+void ClientSharedImageInterface::UpdateSharedImage(const SyncToken& sync_token,
+ const Mailbox& mailbox) {
+ proxy_->UpdateSharedImage(sync_token, mailbox);
+}
+
+void ClientSharedImageInterface::UpdateSharedImage(
+ const SyncToken& sync_token,
+ std::unique_ptr<gfx::GpuFence> acquire_fence,
+ const Mailbox& mailbox) {
+ proxy_->UpdateSharedImage(sync_token, std::move(acquire_fence), mailbox);
+}
+
+void ClientSharedImageInterface::PresentSwapChain(const SyncToken& sync_token,
+ const Mailbox& mailbox) {
+ proxy_->PresentSwapChain(sync_token, mailbox);
+}
+
+#if defined(OS_FUCHSIA)
+void ClientSharedImageInterface::RegisterSysmemBufferCollection(
+ gfx::SysmemBufferCollectionId id,
+ zx::channel token) {
+ proxy_->RegisterSysmemBufferCollection(id, std::move(token));
+}
+
+void ClientSharedImageInterface::ReleaseSysmemBufferCollection(
+ gfx::SysmemBufferCollectionId id) {
+ proxy_->ReleaseSysmemBufferCollection(id);
+}
+#endif // defined(OS_FUCHSIA)
+
+SyncToken ClientSharedImageInterface::GenUnverifiedSyncToken() {
+ return proxy_->GenUnverifiedSyncToken();
+}
+
+SyncToken ClientSharedImageInterface::GenVerifiedSyncToken() {
+ return proxy_->GenVerifiedSyncToken();
+}
+
+void ClientSharedImageInterface::Flush() {
+ proxy_->Flush();
+}
+
+scoped_refptr<gfx::NativePixmap> ClientSharedImageInterface::GetNativePixmap(
+ const gpu::Mailbox& mailbox) {
+ return proxy_->GetNativePixmap(mailbox);
+}
+
+Mailbox ClientSharedImageInterface::CreateSharedImage(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ gpu::SurfaceHandle surface_handle) {
+ DCHECK_EQ(surface_handle, kNullSurfaceHandle);
+ return AddMailbox(
+ proxy_->CreateSharedImage(format, size, color_space, usage));
+}
+
+Mailbox ClientSharedImageInterface::CreateSharedImage(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ return AddMailbox(
+ proxy_->CreateSharedImage(format, size, color_space, usage, pixel_data));
+}
+
+Mailbox ClientSharedImageInterface::CreateSharedImage(
+ gfx::GpuMemoryBuffer* gpu_memory_buffer,
+ GpuMemoryBufferManager* gpu_memory_buffer_manager,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ return AddMailbox(proxy_->CreateSharedImage(
+ gpu_memory_buffer, gpu_memory_buffer_manager, color_space, usage));
+}
+
+ClientSharedImageInterface::SwapChainMailboxes
+ClientSharedImageInterface::CreateSwapChain(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ auto mailboxes = proxy_->CreateSwapChain(format, size, color_space, usage);
+ AddMailbox(mailboxes.front_buffer);
+ AddMailbox(mailboxes.back_buffer);
+ return mailboxes;
+}
+
+void ClientSharedImageInterface::DestroySharedImage(const SyncToken& sync_token,
+ const Mailbox& mailbox) {
+ DCHECK(!mailbox.IsZero());
+
+ {
+ base::AutoLock lock(lock_);
+ DCHECK_NE(mailboxes_.count(mailbox), 0u);
+ mailboxes_.erase(mailbox);
+ }
+ proxy_->DestroySharedImage(sync_token, mailbox);
+}
+
+uint32_t ClientSharedImageInterface::UsageForMailbox(const Mailbox& mailbox) {
+ return proxy_->UsageForMailbox(mailbox);
+}
+
+Mailbox ClientSharedImageInterface::AddMailbox(const gpu::Mailbox& mailbox) {
+ if (mailbox.IsZero())
+ return mailbox;
+
+ base::AutoLock lock(lock_);
+ DCHECK_EQ(mailboxes_.count(mailbox), 0u);
+ mailboxes_.insert(mailbox);
+ return mailbox;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/client/client_shared_image_interface.h b/chromium/gpu/ipc/client/client_shared_image_interface.h
new file mode 100644
index 00000000000..78771d64ef4
--- /dev/null
+++ b/chromium/gpu/ipc/client/client_shared_image_interface.h
@@ -0,0 +1,77 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_CLIENT_CLIENT_SHARED_IMAGE_INTERFACE_H_
+#define GPU_IPC_CLIENT_CLIENT_SHARED_IMAGE_INTERFACE_H_
+
+#include "gpu/command_buffer/client/shared_image_interface.h"
+
+#include "base/containers/flat_set.h"
+#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
+#include "gpu/ipc/common/surface_handle.h"
+
+namespace gpu {
+class SharedImageInterfaceProxy;
+
+// Tracks shared images created by a single context and ensures they are deleted
+// if the context is lost.
+class GPU_EXPORT ClientSharedImageInterface : public SharedImageInterface {
+ public:
+ ClientSharedImageInterface(SharedImageInterfaceProxy* proxy);
+ ~ClientSharedImageInterface() override;
+
+ // SharedImageInterface implementation.
+ void UpdateSharedImage(const SyncToken& sync_token,
+ const Mailbox& mailbox) override;
+ void UpdateSharedImage(const SyncToken& sync_token,
+ std::unique_ptr<gfx::GpuFence> acquire_fence,
+ const Mailbox& mailbox) override;
+ void PresentSwapChain(const SyncToken& sync_token,
+ const Mailbox& mailbox) override;
+#if defined(OS_FUCHSIA)
+ void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
+ zx::channel token) override;
+ void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id) override;
+#endif // defined(OS_FUCHSIA)
+ SyncToken GenUnverifiedSyncToken() override;
+ SyncToken GenVerifiedSyncToken() override;
+ void Flush() override;
+ scoped_refptr<gfx::NativePixmap> GetNativePixmap(
+ const Mailbox& mailbox) override;
+ Mailbox CreateSharedImage(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle) override;
+ Mailbox CreateSharedImage(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+ Mailbox CreateSharedImage(gfx::GpuMemoryBuffer* gpu_memory_buffer,
+ GpuMemoryBufferManager* gpu_memory_buffer_manager,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+ SwapChainMailboxes CreateSwapChain(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+ void DestroySharedImage(const SyncToken& sync_token,
+ const Mailbox& mailbox) override;
+ uint32_t UsageForMailbox(const Mailbox& mailbox) override;
+
+ private:
+ Mailbox AddMailbox(const Mailbox& mailbox);
+
+ SharedImageInterfaceProxy* const proxy_;
+
+ base::Lock lock_;
+ base::flat_set<Mailbox> mailboxes_ GUARDED_BY(lock_);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_CLIENT_CLIENT_SHARED_IMAGE_INTERFACE_H_
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index 2ba4395ef04..43f0626a147 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -5,8 +5,6 @@
#include "gpu/ipc/client/command_buffer_proxy_impl.h"
#include <memory>
-#include <utility>
-#include <vector>
#include "base/bind.h"
#include "base/callback.h"
@@ -31,7 +29,6 @@
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/common/gpu_param_traits.h"
-#include "mojo/public/cpp/base/shared_memory_utils.h"
#include "mojo/public/cpp/system/buffer.h"
#include "mojo/public/cpp/system/platform_handle.h"
#include "ui/gfx/buffer_format_util.h"
@@ -658,7 +655,6 @@ void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox,
bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
DCHECK(channel_);
- last_state_lock_.AssertAcquired();
DCHECK_EQ(gpu::error::kNoError, last_state_.error);
last_state_lock_.Release();
@@ -696,7 +692,7 @@ bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
std::pair<base::UnsafeSharedMemoryRegion, base::WritableSharedMemoryMapping>
CommandBufferProxyImpl::AllocateAndMapSharedMemory(size_t size) {
base::UnsafeSharedMemoryRegion region =
- mojo::CreateUnsafeSharedMemoryRegion(size);
+ base::UnsafeSharedMemoryRegion::Create(size);
if (!region.IsValid()) {
DLOG(ERROR) << "AllocateAndMapSharedMemory: Allocation failed";
return {};
@@ -714,7 +710,6 @@ CommandBufferProxyImpl::AllocateAndMapSharedMemory(size_t size) {
void CommandBufferProxyImpl::SetStateFromMessageReply(
const gpu::CommandBuffer::State& state) {
CheckLock();
- last_state_lock_.AssertAcquired();
if (last_state_.error != gpu::error::kNoError)
return;
// Handle wraparound. It works as long as we don't have more than 2B state
@@ -727,7 +722,6 @@ void CommandBufferProxyImpl::SetStateFromMessageReply(
void CommandBufferProxyImpl::TryUpdateState() {
CheckLock();
- last_state_lock_.AssertAcquired();
if (last_state_.error == gpu::error::kNoError) {
shared_state()->Read(&last_state_);
if (last_state_.error != gpu::error::kNoError)
@@ -736,7 +730,6 @@ void CommandBufferProxyImpl::TryUpdateState() {
}
void CommandBufferProxyImpl::TryUpdateStateThreadSafe() {
- last_state_lock_.AssertAcquired();
if (last_state_.error == gpu::error::kNoError) {
shared_state()->Read(&last_state_);
if (last_state_.error != gpu::error::kNoError) {
@@ -749,7 +742,6 @@ void CommandBufferProxyImpl::TryUpdateStateThreadSafe() {
}
void CommandBufferProxyImpl::TryUpdateStateDontReportError() {
- last_state_lock_.AssertAcquired();
if (last_state_.error == gpu::error::kNoError)
shared_state()->Read(&last_state_);
}
@@ -779,7 +771,6 @@ void CommandBufferProxyImpl::OnBufferPresented(
void CommandBufferProxyImpl::OnGpuSyncReplyError() {
CheckLock();
- last_state_lock_.AssertAcquired();
last_state_.error = gpu::error::kLostContext;
last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage;
// This method may be inside a callstack from the GpuControlClient (we got a
@@ -792,7 +783,6 @@ void CommandBufferProxyImpl::OnGpuAsyncMessageError(
gpu::error::ContextLostReason reason,
gpu::error::Error error) {
CheckLock();
- last_state_lock_.AssertAcquired();
last_state_.error = error;
last_state_.context_lost_reason = reason;
// This method only occurs when receiving IPC messages, so we know it's not in
@@ -804,7 +794,6 @@ void CommandBufferProxyImpl::OnGpuAsyncMessageError(
void CommandBufferProxyImpl::OnGpuStateError() {
CheckLock();
- last_state_lock_.AssertAcquired();
DCHECK_NE(gpu::error::kNoError, last_state_.error);
// This method may be inside a callstack from the GpuControlClient (we
// encountered an error while trying to perform some action). So avoid
@@ -814,7 +803,6 @@ void CommandBufferProxyImpl::OnGpuStateError() {
void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) {
CheckLock();
- last_state_lock_.AssertAcquired();
last_state_.error = error;
last_state_.context_lost_reason = gpu::error::kUnknown;
// This method may be inside a callstack from the GpuControlClient (we
@@ -825,7 +813,6 @@ void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) {
void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() {
CheckLock();
- last_state_lock_.AssertAcquired();
// Inform the GpuControlClient of the lost state immediately, though this may
// be a re-entrant call to the client so we use the MaybeReentrant variant.
if (gpu_control_client_)
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index b2be647ee62..42deacd1a48 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -13,6 +13,8 @@
#include <queue>
#include <string>
#include <unordered_map>
+#include <utility>
+#include <vector>
#include "base/callback.h"
#include "base/compiler_specific.h"
@@ -169,7 +171,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
// Send an IPC message over the GPU channel. This is private to fully
// encapsulate the channel; all callers of this function must explicitly
// verify that the context has not been lost.
- bool Send(IPC::Message* msg);
+ bool Send(IPC::Message* msg) EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
std::pair<base::UnsafeSharedMemoryRegion, base::WritableSharedMemoryMapping>
AllocateAndMapSharedMemory(size_t size);
@@ -189,33 +191,38 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
// Try to read an updated copy of the state from shared memory, and calls
// OnGpuStateError() if the new state has an error.
- void TryUpdateState();
+ void TryUpdateState() EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
// Like above but calls the error handler and disconnects channel by posting
// a task.
- void TryUpdateStateThreadSafe();
+ void TryUpdateStateThreadSafe() EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
// Like the above but does not call the error event handler if the new state
// has an error.
- void TryUpdateStateDontReportError();
+ void TryUpdateStateDontReportError()
+ EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
// Sets the state, and calls OnGpuStateError() if the new state has an error.
- void SetStateFromMessageReply(const CommandBuffer::State& state);
+ void SetStateFromMessageReply(const CommandBuffer::State& state)
+ EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
// Loses the context after we received an invalid reply from the GPU
// process.
- void OnGpuSyncReplyError();
+ void OnGpuSyncReplyError() EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
// Loses the context when receiving a message from the GPU process.
void OnGpuAsyncMessageError(gpu::error::ContextLostReason reason,
- gpu::error::Error error);
+ gpu::error::Error error)
+ EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
// Loses the context after we receive an error state from the GPU process.
- void OnGpuStateError();
+ void OnGpuStateError() EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
// Sets an error on the last_state_ and loses the context due to client-side
// errors.
- void OnClientError(gpu::error::Error error);
+ void OnClientError(gpu::error::Error error)
+ EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
// Helper methods, don't call these directly.
- void DisconnectChannelInFreshCallStack();
+ void DisconnectChannelInFreshCallStack()
+ EXCLUSIVE_LOCKS_REQUIRED(last_state_lock_);
void LockAndDisconnectChannel();
void DisconnectChannel();
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.cc b/chromium/gpu/ipc/client/gpu_channel_host.cc
index 505affa44fa..6288951f676 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.cc
+++ b/chromium/gpu/ipc/client/gpu_channel_host.cc
@@ -9,18 +9,21 @@
#include "base/atomic_sequence_num.h"
#include "base/bind.h"
+#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_restrictions.h"
#include "base/threading/thread_task_runner_handle.h"
-#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
+#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
+#include "gpu/ipc/client/client_shared_image_interface.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/common/gpu_param_traits_macros.h"
#include "gpu/ipc/common/gpu_watchdog_timeout.h"
#include "ipc/ipc_channel_mojo.h"
#include "ipc/ipc_sync_message.h"
+#include "ipc/trace_ipc_message.h"
#include "mojo/public/cpp/bindings/lib/message_quota_checker.h"
#include "url/gurl.h"
@@ -50,12 +53,14 @@ GpuChannelHost::GpuChannelHost(int channel_id,
for (int32_t i = 0;
i <= static_cast<int32_t>(GpuChannelReservedRoutes::kMaxValue); ++i)
next_route_id_.GetNext();
+
+#if defined(OS_MACOSX)
+ gpu::SetMacOSSpecificTextureTarget(gpu_info.macos_specific_texture_target);
+#endif // defined(OS_MACOSX)
}
bool GpuChannelHost::Send(IPC::Message* msg) {
- TRACE_EVENT2("ipc", "GpuChannelHost::Send", "class",
- IPC_MESSAGE_ID_CLASS(msg->type()), "line",
- IPC_MESSAGE_ID_LINE(msg->type()));
+ TRACE_IPC_MESSAGE_SEND("ipc", "GpuChannelHost::Send", msg);
auto message = base::WrapUnique(msg);
@@ -106,12 +111,6 @@ bool GpuChannelHost::Send(IPC::Message* msg) {
base::TimeDelta::FromSeconds(1),
kGpuChannelHostMaxWaitTime, 50);
- // Histogram to measure how long the browser UI thread spends blocked.
- UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
- "GPU.GPUChannelHostWaitTime.MicroSeconds", wait_duration,
- base::TimeDelta::FromMicroseconds(10), base::TimeDelta::FromSeconds(10),
- 50);
-
// Continue waiting for the event if not signaled
if (!signaled)
pending_sync.done_event->Wait();
@@ -247,6 +246,11 @@ void GpuChannelHost::CrashGpuProcessForTesting() {
Send(new GpuChannelMsg_CrashForTesting());
}
+std::unique_ptr<ClientSharedImageInterface>
+GpuChannelHost::CreateClientSharedImageInterface() {
+ return std::make_unique<ClientSharedImageInterface>(&shared_image_interface_);
+}
+
GpuChannelHost::~GpuChannelHost() = default;
GpuChannelHost::Listener::RouteInfo::RouteInfo() = default;
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.h b/chromium/gpu/ipc/client/gpu_channel_host.h
index 1af60880511..6c17de8e0a3 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.h
+++ b/chromium/gpu/ipc/client/gpu_channel_host.h
@@ -38,6 +38,7 @@ class ChannelMojo;
struct GpuDeferredMessage;
namespace gpu {
+class ClientSharedImageInterface;
struct SyncToken;
class GpuChannelHost;
class GpuMemoryBufferManager;
@@ -135,9 +136,8 @@ class GPU_EXPORT GpuChannelHost
// otherwise ignored.
void CrashGpuProcessForTesting();
- SharedImageInterface* shared_image_interface() {
- return &shared_image_interface_;
- }
+ std::unique_ptr<ClientSharedImageInterface>
+ CreateClientSharedImageInterface();
ImageDecodeAcceleratorProxy* image_decode_accelerator_proxy() {
return &image_decode_accelerator_proxy_;
diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
index 630cd3ddb45..cfd6e613a1c 100644
--- a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
+++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
@@ -195,8 +195,7 @@ SyncToken ImageDecodeAcceleratorProxy::ScheduleImageDecode(
ChannelIdFromCommandBufferId(raster_decoder_command_buffer_id));
GpuChannelMsg_ScheduleImageDecode_Params params;
- params.encoded_data =
- std::vector<uint8_t>(encoded_data.cbegin(), encoded_data.cend());
+ params.encoded_data.assign(encoded_data.begin(), encoded_data.end());
params.output_size = output_size;
params.raster_decoder_route_id =
RouteIdFromCommandBufferId(raster_decoder_command_buffer_id);
diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc b/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc
index e6bb414290a..01fe16772b0 100644
--- a/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc
+++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc
@@ -8,6 +8,7 @@
#include "base/stl_util.h"
#include "base/test/task_environment.h"
+#include "cc/paint/paint_image.h"
#include "gpu/ipc/client/gpu_channel_host.h"
#include "gpu/ipc/client/image_decode_accelerator_proxy.h"
#include "gpu/ipc/common/command_buffer_id.h"
@@ -70,9 +71,11 @@ MATCHER_P(IpcMessageEqualTo, expected, "") {
class MockGpuChannelHost : public GpuChannelHost {
public:
- MockGpuChannelHost()
+ MockGpuChannelHost() : MockGpuChannelHost(GPUInfo()) {}
+
+ MockGpuChannelHost(const GPUInfo& info)
: GpuChannelHost(kChannelId,
- GPUInfo(),
+ info,
GpuFeatureInfo(),
mojo::ScopedMessagePipeHandle(mojo::MessagePipeHandle(
mojo::kInvalidHandleValue))) {}
@@ -89,7 +92,8 @@ class ImageDecodeAcceleratorProxyTest : public ::testing::Test {
: gpu_channel_host_(
base::MakeRefCounted<StrictMock<MockGpuChannelHost>>()),
proxy_(gpu_channel_host_.get(),
- (int32_t)GpuChannelReservedRoutes::kImageDecodeAccelerator) {}
+ static_cast<int32_t>(
+ GpuChannelReservedRoutes::kImageDecodeAccelerator)) {}
~ImageDecodeAcceleratorProxyTest() override = default;
@@ -107,8 +111,7 @@ TEST_F(ImageDecodeAcceleratorProxyTest, ScheduleImageDecodeSendsMessage) {
const gfx::ColorSpace color_space = gfx::ColorSpace::CreateSRGB();
GpuChannelMsg_ScheduleImageDecode_Params expected_params;
- expected_params.encoded_data =
- std::vector<uint8_t>(encoded_data.cbegin(), encoded_data.cend());
+ expected_params.encoded_data.assign(encoded_data.begin(), encoded_data.end());
expected_params.output_size = kOutputSize;
expected_params.raster_decoder_route_id = kRasterCmdBufferRouteId;
expected_params.transfer_cache_entry_id = 1u;
@@ -149,4 +152,115 @@ TEST_F(ImageDecodeAcceleratorProxyTest, ScheduleImageDecodeSendsMessage) {
EXPECT_EQ(token.release_count(), 1u);
}
+class ImageDecodeAcceleratorProxySubsamplingTest
+ : public testing::TestWithParam<cc::YUVSubsampling> {
+ public:
+ ImageDecodeAcceleratorProxySubsamplingTest() = default;
+ ~ImageDecodeAcceleratorProxySubsamplingTest() override = default;
+
+ protected:
+ base::test::SingleThreadTaskEnvironment task_environment_;
+};
+
+TEST_P(ImageDecodeAcceleratorProxySubsamplingTest, JPEGSubsamplingIsSupported) {
+ cc::ImageHeaderMetadata image_metadata;
+ image_metadata.yuv_subsampling = GetParam();
+ image_metadata.image_type = cc::ImageType::kJPEG;
+ image_metadata.all_data_received_prior_to_decode = true;
+ image_metadata.has_embedded_color_profile = false;
+ image_metadata.image_size = gfx::Size(100, 200);
+ image_metadata.jpeg_is_progressive = false;
+
+ ImageDecodeAcceleratorSupportedProfile profile;
+ profile.image_type = ImageDecodeAcceleratorType::kJpeg;
+ profile.min_encoded_dimensions = gfx::Size(0, 0);
+ profile.max_encoded_dimensions = gfx::Size(1920, 1080);
+
+ static_assert(
+ // TODO(andrescj): refactor to instead have a static_assert at the
+ // declaration site of ImageDecodeAcceleratorSubsampling to make sure it
+ // has the same number of entries as cc::YUVSubsampling.
+ static_cast<int>(ImageDecodeAcceleratorSubsampling::kMaxValue) == 2,
+ "ImageDecodeAcceleratorProxySubsamplingTest.JPEGSubsamplingIsSupported "
+ "must be adapted to support all subsampling factors in "
+ "ImageDecodeAcceleratorSubsampling");
+ switch (GetParam()) {
+ case cc::YUVSubsampling::k420:
+ profile.subsamplings.push_back(ImageDecodeAcceleratorSubsampling::k420);
+ break;
+ case cc::YUVSubsampling::k422:
+ profile.subsamplings.push_back(ImageDecodeAcceleratorSubsampling::k422);
+ break;
+ case cc::YUVSubsampling::k444:
+ profile.subsamplings.push_back(ImageDecodeAcceleratorSubsampling::k444);
+ break;
+ default:
+ return;
+ }
+ GPUInfo gpu_info;
+ gpu_info.image_decode_accelerator_supported_profiles.push_back(profile);
+
+ auto gpu_channel_host(
+ base::MakeRefCounted<StrictMock<MockGpuChannelHost>>(gpu_info));
+ ImageDecodeAcceleratorProxy proxy(
+ gpu_channel_host.get(),
+ static_cast<int32_t>(GpuChannelReservedRoutes::kImageDecodeAccelerator));
+
+ EXPECT_TRUE(proxy.IsImageSupported(&image_metadata));
+}
+
+TEST_P(ImageDecodeAcceleratorProxySubsamplingTest,
+ JPEGSubsamplingIsNotSupported) {
+ cc::ImageHeaderMetadata image_metadata;
+ image_metadata.yuv_subsampling = GetParam();
+ image_metadata.image_type = cc::ImageType::kJPEG;
+ image_metadata.all_data_received_prior_to_decode = true;
+ image_metadata.has_embedded_color_profile = false;
+ image_metadata.image_size = gfx::Size(100, 200);
+ image_metadata.jpeg_is_progressive = false;
+
+ ImageDecodeAcceleratorSupportedProfile profile;
+ profile.image_type = ImageDecodeAcceleratorType::kJpeg;
+ profile.min_encoded_dimensions = gfx::Size(0, 0);
+ profile.max_encoded_dimensions = gfx::Size(1920, 1080);
+
+ static_assert(
+ // TODO(andrescj): refactor to instead have a static_assert at the
+ // declaration site of ImageDecodeAcceleratorSubsampling to make sure it
+ // has the same number of entries as cc::YUVSubsampling.
+ static_cast<int>(ImageDecodeAcceleratorSubsampling::kMaxValue) == 2,
+ "ImageDecodeAcceleratorProxySubsamplingTest.JPEGSubsamplingIsNotSupported"
+ "must be adapted to support all subsampling factors in "
+ "ImageDecodeAcceleratorSubsampling");
+
+ // Advertise support for all subsamplings except the GetParam() one.
+ if (GetParam() != cc::YUVSubsampling::k420)
+ profile.subsamplings.push_back(ImageDecodeAcceleratorSubsampling::k420);
+ if (GetParam() != cc::YUVSubsampling::k422)
+ profile.subsamplings.push_back(ImageDecodeAcceleratorSubsampling::k422);
+ if (GetParam() != cc::YUVSubsampling::k444)
+ profile.subsamplings.push_back(ImageDecodeAcceleratorSubsampling::k444);
+
+ GPUInfo gpu_info;
+ gpu_info.image_decode_accelerator_supported_profiles.push_back(profile);
+
+ auto gpu_channel_host(
+ base::MakeRefCounted<StrictMock<MockGpuChannelHost>>(gpu_info));
+ ImageDecodeAcceleratorProxy proxy(
+ gpu_channel_host.get(),
+ static_cast<int32_t>(GpuChannelReservedRoutes::kImageDecodeAccelerator));
+
+ EXPECT_FALSE(proxy.IsImageSupported(&image_metadata));
+}
+
+INSTANTIATE_TEST_SUITE_P(ImageDecodeAcceleratorProxySubsample,
+ ImageDecodeAcceleratorProxySubsamplingTest,
+ testing::Values(cc::YUVSubsampling::k410,
+ cc::YUVSubsampling::k411,
+ cc::YUVSubsampling::k420,
+ cc::YUVSubsampling::k422,
+ cc::YUVSubsampling::k440,
+ cc::YUVSubsampling::k444,
+ cc::YUVSubsampling::kUnknown));
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
index 81d8f4f4f36..64b88276295 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
@@ -12,7 +12,6 @@
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/common/gpu_param_traits_macros.h"
-#include "mojo/public/cpp/base/shared_memory_utils.h"
#include "ui/gfx/gpu_fence.h"
namespace gpu {
@@ -70,12 +69,14 @@ Mailbox SharedImageInterfaceProxy::CreateSharedImage(
params.usage = usage;
{
base::AutoLock lock(lock_);
+ AddMailbox(params.mailbox, usage);
params.release_id = ++next_release_id_;
// Note: we enqueue the IPC under the lock to guarantee monotonicity of the
// release ids as seen by the service.
last_flush_id_ = host_->EnqueueDeferredMessage(
GpuChannelMsg_CreateSharedImage(route_id_, params));
}
+
return params.mailbox;
}
@@ -117,6 +118,8 @@ Mailbox SharedImageInterfaceProxy::CreateSharedImage(
params.release_id = ++next_release_id_;
last_flush_id_ = host_->EnqueueDeferredMessage(
GpuChannelMsg_CreateSharedImageWithData(route_id_, params));
+
+ AddMailbox(params.mailbox, usage);
return params.mailbox;
}
@@ -163,6 +166,9 @@ Mailbox SharedImageInterfaceProxy::CreateSharedImage(
gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer,
sync_token);
}
+
+ base::AutoLock lock(lock_);
+ AddMailbox(params.mailbox, usage);
return mailbox;
}
@@ -228,6 +234,10 @@ void SharedImageInterfaceProxy::DestroySharedImage(const SyncToken& sync_token,
}
{
base::AutoLock lock(lock_);
+
+ DCHECK_NE(mailbox_to_usage_.count(mailbox), 0u);
+ mailbox_to_usage_.erase(mailbox);
+
last_flush_id_ = host_->EnqueueDeferredMessage(
GpuChannelMsg_DestroySharedImage(route_id_, mailbox),
std::move(dependencies));
@@ -268,7 +278,7 @@ bool SharedImageInterfaceProxy::GetSHMForPixelData(
GetRemainingSize(upload_buffer_, upload_buffer_offset_) <
pixel_data.size()) {
size_t size_to_alloc = std::max(kUploadBufferSize, pixel_data.size());
- auto shm = mojo::CreateReadOnlySharedMemoryRegion(size_to_alloc);
+ auto shm = base::ReadOnlySharedMemoryRegion::Create(size_to_alloc);
if (!shm.IsValid())
return false;
@@ -333,6 +343,10 @@ SharedImageInterfaceProxy::CreateSwapChain(viz::ResourceFormat format,
params.usage = usage;
{
base::AutoLock lock(lock_);
+
+ AddMailbox(params.front_buffer_mailbox, usage);
+ AddMailbox(params.back_buffer_mailbox, usage);
+
params.release_id = ++next_release_id_;
last_flush_id_ = host_->EnqueueDeferredMessage(
GpuChannelMsg_CreateSwapChain(route_id_, params));
@@ -387,4 +401,30 @@ void SharedImageInterfaceProxy::ReleaseSysmemBufferCollection(
}
#endif // defined(OS_FUCHSIA)
+scoped_refptr<gfx::NativePixmap> SharedImageInterfaceProxy::GetNativePixmap(
+ const gpu::Mailbox& mailbox) {
+ // Clients outside of the GPU process cannot obtain the backing NativePixmap
+ // for SharedImages.
+ return nullptr;
+}
+
+void SharedImageInterfaceProxy::AddMailbox(const Mailbox& mailbox,
+ uint32_t usage) {
+ lock_.AssertAcquired();
+
+ DCHECK_EQ(mailbox_to_usage_.count(mailbox), 0u);
+ mailbox_to_usage_[mailbox] = usage;
+}
+
+uint32_t SharedImageInterfaceProxy::UsageForMailbox(const Mailbox& mailbox) {
+ base::AutoLock lock(lock_);
+
+ // The mailbox may have been destroyed if the context on which the shared
+ // image was created is deleted.
+ auto it = mailbox_to_usage_.find(mailbox);
+ if (it == mailbox_to_usage_.end())
+ return 0u;
+ return it->second;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.h b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
index 820aecd3be8..b22b61d3237 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.h
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
@@ -15,55 +15,59 @@
namespace gpu {
class GpuChannelHost;
-// Implementation of SharedImageInterface that sends commands over GPU channel
-// IPCs.
-class SharedImageInterfaceProxy : public SharedImageInterface {
+// Proxy that sends commands over GPU channel IPCs for managing shared images.
+class SharedImageInterfaceProxy {
public:
explicit SharedImageInterfaceProxy(GpuChannelHost* host, int32_t route_id);
- ~SharedImageInterfaceProxy() override;
+ ~SharedImageInterfaceProxy();
Mailbox CreateSharedImage(viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
- uint32_t usage) override;
+ uint32_t usage);
Mailbox CreateSharedImage(viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage,
- base::span<const uint8_t> pixel_data) override;
+ base::span<const uint8_t> pixel_data);
Mailbox CreateSharedImage(gfx::GpuMemoryBuffer* gpu_memory_buffer,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
const gfx::ColorSpace& color_space,
- uint32_t usage) override;
- void UpdateSharedImage(const SyncToken& sync_token,
- const Mailbox& mailbox) override;
+ uint32_t usage);
+ void UpdateSharedImage(const SyncToken& sync_token, const Mailbox& mailbox);
void UpdateSharedImage(const SyncToken& sync_token,
std::unique_ptr<gfx::GpuFence> acquire_fence,
- const Mailbox& mailbox) override;
+ const Mailbox& mailbox);
- void DestroySharedImage(const SyncToken& sync_token,
- const Mailbox& mailbox) override;
- SyncToken GenVerifiedSyncToken() override;
- SyncToken GenUnverifiedSyncToken() override;
- void Flush() override;
+ void DestroySharedImage(const SyncToken& sync_token, const Mailbox& mailbox);
+ SyncToken GenVerifiedSyncToken();
+ SyncToken GenUnverifiedSyncToken();
+ void Flush();
- SwapChainMailboxes CreateSwapChain(viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage) override;
- void PresentSwapChain(const SyncToken& sync_token,
- const Mailbox& mailbox) override;
+ SharedImageInterface::SwapChainMailboxes CreateSwapChain(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage);
+ void PresentSwapChain(const SyncToken& sync_token, const Mailbox& mailbox);
#if defined(OS_FUCHSIA)
void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token) override;
- void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id) override;
+ zx::channel token);
+ void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id);
#endif // defined(OS_FUCHSIA)
+ scoped_refptr<gfx::NativePixmap> GetNativePixmap(const gpu::Mailbox& mailbox);
+
+ uint32_t UsageForMailbox(const Mailbox& mailbox);
+
private:
bool GetSHMForPixelData(base::span<const uint8_t> pixel_data,
size_t* shm_offset,
bool* done_with_shm) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void AddMailbox(const Mailbox& mailbox, uint32_t usage)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
GpuChannelHost* const host_;
const int32_t route_id_;
base::Lock lock_;
@@ -74,6 +78,8 @@ class SharedImageInterfaceProxy : public SharedImageInterface {
base::MappedReadOnlyRegion upload_buffer_ GUARDED_BY(lock_);
// The offset into |upload_buffer_| at which data is no longer used.
size_t upload_buffer_offset_ GUARDED_BY(lock_) = 0;
+
+ base::flat_map<Mailbox, uint32_t> mailbox_to_usage_ GUARDED_BY(lock_);
};
} // namespace gpu
diff --git a/chromium/gpu/ipc/command_buffer_task_executor.cc b/chromium/gpu/ipc/command_buffer_task_executor.cc
index 9bdab6666ad..3d5e9498935 100644
--- a/chromium/gpu/ipc/command_buffer_task_executor.cc
+++ b/chromium/gpu/ipc/command_buffer_task_executor.cc
@@ -18,33 +18,25 @@ CommandBufferTaskExecutor::CommandBufferTaskExecutor(
const GpuFeatureInfo& gpu_feature_info,
SyncPointManager* sync_point_manager,
MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group,
gl::GLSurfaceFormat share_group_surface_format,
SharedImageManager* shared_image_manager,
- gles2::ProgramCache* program_cache,
- scoped_refptr<SharedContextState> shared_context_state)
+ gles2::ProgramCache* program_cache)
: gpu_preferences_(gpu_preferences),
gpu_feature_info_(gpu_feature_info),
sync_point_manager_(sync_point_manager),
mailbox_manager_(mailbox_manager),
- share_group_(share_group),
share_group_surface_format_(share_group_surface_format),
program_cache_(program_cache),
+ discardable_manager_(gpu_preferences_),
+ passthrough_discardable_manager_(gpu_preferences_),
shader_translator_cache_(gpu_preferences_),
- shared_image_manager_(shared_image_manager),
- shared_context_state_(std::move(shared_context_state)) {
+ shared_image_manager_(shared_image_manager) {
DCHECK(mailbox_manager_);
DCHECK(shared_image_manager_);
}
CommandBufferTaskExecutor::~CommandBufferTaskExecutor() = default;
-scoped_refptr<gl::GLShareGroup> CommandBufferTaskExecutor::share_group() {
- if (!share_group_)
- share_group_ = base::MakeRefCounted<gl::GLShareGroup>();
- return share_group_;
-}
-
gles2::Outputter* CommandBufferTaskExecutor::outputter() {
if (!outputter_) {
outputter_ =
diff --git a/chromium/gpu/ipc/command_buffer_task_executor.h b/chromium/gpu/ipc/command_buffer_task_executor.h
index c9056a3e54a..3b34f04d2d1 100644
--- a/chromium/gpu/ipc/command_buffer_task_executor.h
+++ b/chromium/gpu/ipc/command_buffer_task_executor.h
@@ -43,16 +43,13 @@ class ProgramCache;
// the GPU thread used by InProcessCommandBuffer.
class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor {
public:
- CommandBufferTaskExecutor(
- const GpuPreferences& gpu_preferences,
- const GpuFeatureInfo& gpu_feature_info,
- SyncPointManager* sync_point_manager,
- MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group,
- gl::GLSurfaceFormat share_group_surface_format,
- SharedImageManager* shared_image_manager,
- gles2::ProgramCache* program_cache,
- scoped_refptr<SharedContextState> shared_context_state);
+ CommandBufferTaskExecutor(const GpuPreferences& gpu_preferences,
+ const GpuFeatureInfo& gpu_feature_info,
+ SyncPointManager* sync_point_manager,
+ MailboxManager* mailbox_manager,
+ gl::GLSurfaceFormat share_group_surface_format,
+ SharedImageManager* shared_image_manager,
+ gles2::ProgramCache* program_cache);
virtual ~CommandBufferTaskExecutor();
// Always use virtualized GL contexts if this returns true.
@@ -74,6 +71,11 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor {
// Called if InProcessCommandBuffer is not passed a client TaskRunner.
virtual void PostNonNestableToClient(base::OnceClosure callback) = 0;
+ // Returns the shared offscreen context state.
+ virtual scoped_refptr<SharedContextState> GetSharedContextState() = 0;
+
+ virtual scoped_refptr<gl::GLShareGroup> GetShareGroup() = 0;
+
const GpuPreferences& gpu_preferences() const { return gpu_preferences_; }
const GpuFeatureInfo& gpu_feature_info() const { return gpu_feature_info_; }
gl::GLSurfaceFormat share_group_surface_format() const {
@@ -98,12 +100,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor {
}
SharedImageManager* shared_image_manager() { return shared_image_manager_; }
- scoped_refptr<SharedContextState> shared_context_state() {
- return shared_context_state_;
- }
-
// These methods construct accessed fields if not already initialized.
- scoped_refptr<gl::GLShareGroup> share_group();
gles2::Outputter* outputter();
gles2::ProgramCache* program_cache();
@@ -113,7 +110,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor {
SyncPointManager* sync_point_manager_;
MailboxManager* mailbox_manager_;
std::unique_ptr<gles2::Outputter> outputter_;
- scoped_refptr<gl::GLShareGroup> share_group_;
gl::GLSurfaceFormat share_group_surface_format_;
std::unique_ptr<gles2::ProgramCache> owned_program_cache_;
gles2::ProgramCache* program_cache_;
@@ -123,7 +119,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor {
gles2::ShaderTranslatorCache shader_translator_cache_;
gles2::FramebufferCompletenessCache framebuffer_completeness_cache_;
SharedImageManager* shared_image_manager_;
- const scoped_refptr<SharedContextState> shared_context_state_;
// No-op default initialization is used in in-process mode.
GpuProcessActivityFlags activity_flags_;
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index 4f885a6d4eb..40f74b8d82f 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -9,25 +9,17 @@ import("//ui/ozone/ozone.gni")
group("common") {
if (is_component_build) {
- public_deps = [
- "//gpu",
- ]
+ public_deps = [ "//gpu" ]
} else {
- public_deps = [
- ":ipc_common_sources",
- ]
+ public_deps = [ ":ipc_common_sources" ]
}
}
group("command_buffer_traits") {
if (is_component_build) {
- public_deps = [
- "//gpu",
- ]
+ public_deps = [ "//gpu" ]
} else {
- public_deps = [
- ":command_buffer_traits_sources",
- ]
+ public_deps = [ ":command_buffer_traits_sources" ]
}
}
@@ -82,6 +74,7 @@ source_set("ipc_common_sources") {
"gpu_param_traits.cc",
"gpu_param_traits.h",
"gpu_param_traits_macros.h",
+ "gpu_peak_memory.h",
"gpu_watchdog_timeout.h",
"memory_stats.cc",
"memory_stats.h",
@@ -180,13 +173,9 @@ if (is_android) {
# Depend on this to use surface_handle.h without pulling in all of gpu ipc.
source_set("surface_handle_type") {
- public = [
- "surface_handle.h",
- ]
+ public = [ "surface_handle.h" ]
- public_deps = [
- "//ui/gfx:native_widget_types",
- ]
+ public_deps = [ "//ui/gfx:native_widget_types" ]
}
component("vulkan_ycbcr_info") {
@@ -194,19 +183,13 @@ component("vulkan_ycbcr_info") {
"vulkan_ycbcr_info.cc",
"vulkan_ycbcr_info.h",
]
- deps = [
- "//base",
- ]
+ deps = [ "//base" ]
configs += [ "//gpu:gpu_implementation" ]
}
source_set("vulkan_types") {
- sources = [
- "vulkan_types.h",
- ]
- public_deps = [
- "//ui/gfx",
- ]
+ sources = [ "vulkan_types.h" ]
+ public_deps = [ "//ui/gfx" ]
all_dependent_configs = [ "//third_party/vulkan:vulkan_config" ]
configs += [ "//gpu:gpu_implementation" ]
}
@@ -216,10 +199,12 @@ mojom("interfaces") {
sources = [
"capabilities.mojom",
"context_result.mojom",
+ "device_perf_info.mojom",
"dx_diag_node.mojom",
"gpu_extra_info.mojom",
"gpu_feature_info.mojom",
"gpu_info.mojom",
+ "gpu_peak_memory.mojom",
"mailbox.mojom",
"mailbox_holder.mojom",
"memory_stats.mojom",
@@ -234,17 +219,23 @@ mojom("interfaces") {
"//ui/gfx/geometry/mojom",
"//ui/gfx/mojom",
]
+ enabled_features = []
if (enable_vulkan) {
public_deps += [ ":vulkan_interface" ]
- enabled_features = [ "supports_vulkan" ]
+ enabled_features += [ "supports_vulkan" ]
}
+ if (use_x11) {
+ enabled_features += [ "use_x11" ]
+ }
+
+ export_class_attribute_blink = "BLINK_PLATFORM_EXPORT"
+ export_define_blink = "BLINK_PLATFORM_IMPLEMENTATION=1"
+ export_header_blink = "third_party/blink/public/platform/web_common.h"
}
mojom("gpu_preferences_interface") {
generate_java = true
- sources = [
- "gpu_preferences.mojom",
- ]
+ sources = [ "gpu_preferences.mojom" ]
public_deps = [
"//mojo/public/mojom/base",
@@ -259,23 +250,27 @@ mojom("gpu_preferences_interface") {
mojom("vulkan_interface") {
generate_java = true
+
+ # TODO(crbug.com/1062364): This interface code is used by some javascript targets even
+ # when vulkan isn't enabled, but the C++ fuzzer code will fail to compile if
+ # the headers aren't available.
+ if (!enable_vulkan) {
+ enable_fuzzing = false
+ }
+
sources = [
"vulkan_info.mojom",
"vulkan_types.mojom",
]
- public_deps = [
- "//mojo/public/mojom/base",
- ]
+ public_deps = [ "//mojo/public/mojom/base" ]
js_generate_struct_deserializers = true
}
mojom("test_interfaces") {
testonly = true
- sources = [
- "traits_test_service.mojom",
- ]
+ sources = [ "traits_test_service.mojom" ]
public_deps = [
":gpu_preferences_interface",
@@ -288,9 +283,7 @@ mojom("test_interfaces") {
}
source_set("vulkan_types_mojom_traits") {
- sources = [
- "vulkan_types_mojom_traits.h",
- ]
+ sources = [ "vulkan_types_mojom_traits.h" ]
deps = [
":vulkan_interface_shared_cpp_sources",
@@ -301,6 +294,7 @@ source_set("vulkan_types_mojom_traits") {
source_set("mojom_traits") {
sources = [
"context_result_mojom_traits.h",
+ "gpu_peak_memory_mojom_traits.h",
"mailbox_holder_mojom_traits.h",
"mailbox_mojom_traits.h",
"memory_stats_mojom_traits.h",
diff --git a/chromium/gpu/ipc/common/device_perf_info.mojom b/chromium/gpu/ipc/common/device_perf_info.mojom
new file mode 100644
index 00000000000..de7b95826f8
--- /dev/null
+++ b/chromium/gpu/ipc/common/device_perf_info.mojom
@@ -0,0 +1,40 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module gpu.mojom;
+
+// Corresponds to D3D_FEATURE_LEVEL in <d3dcommon.h>
+enum Direct3DFeatureLevel {
+ k1_0_Core,
+ k9_1,
+ k9_2,
+ k9_3,
+ k10_0,
+ k10_1,
+ k11_0,
+ k11_1,
+ k12_0,
+ k12_1,
+};
+
+// Corresponds to |gpu::HasDisceretGpu| in "gpu/config/device_perf_info.h"
+enum HasDiscreteGpu {
+ kUnknown,
+ kNo,
+ kYes,
+};
+
+// Corresponds to |gpu::DevicePerfInfo| in "gpu/config/device_perf_info.h"
+struct DevicePerfInfo {
+ uint32 total_physical_memory_mb;
+ uint32 total_disk_space_mb;
+ uint32 hardware_concurrency;
+
+ [EnableIf=is_win]
+ uint32 system_commit_limit_mb;
+ [EnableIf=is_win]
+ Direct3DFeatureLevel d3d11_feature_level;
+ [EnableIf=is_win]
+ HasDiscreteGpu has_discrete_gpu;
+};
diff --git a/chromium/gpu/ipc/common/device_perf_info.typemap b/chromium/gpu/ipc/common/device_perf_info.typemap
new file mode 100644
index 00000000000..6259f3261f0
--- /dev/null
+++ b/chromium/gpu/ipc/common/device_perf_info.typemap
@@ -0,0 +1,13 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom = "//gpu/ipc/common/device_perf_info.mojom"
+public_headers = [ "//gpu/config/device_perf_info.h" ]
+traits_headers = [ "//gpu/ipc/common/device_perf_info_mojom_traits.h" ]
+sources = [ "//gpu/ipc/common/device_perf_info_mojom_traits.cc" ]
+type_mappings = [
+ "gpu.mojom.IntelGpuGeneration=::gpu::IntelGpuGeneration",
+ "gpu.mojom.D3D_FEATURE_LEVEL=::D3D_FEATURE_LEVEL",
+ "gpu.mojom.DevicePerfInfo=::gpu::DevicePerfInfo",
+]
diff --git a/chromium/gpu/ipc/common/device_perf_info_mojom_traits.cc b/chromium/gpu/ipc/common/device_perf_info_mojom_traits.cc
new file mode 100644
index 00000000000..e4e29e70f5f
--- /dev/null
+++ b/chromium/gpu/ipc/common/device_perf_info_mojom_traits.cc
@@ -0,0 +1,131 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/common/device_perf_info_mojom_traits.h"
+
+namespace mojo {
+
+#if defined(OS_WIN)
+// static
+gpu::mojom::Direct3DFeatureLevel
+EnumTraits<gpu::mojom::Direct3DFeatureLevel, D3D_FEATURE_LEVEL>::ToMojom(
+ D3D_FEATURE_LEVEL d3d_feature_level) {
+ switch (d3d_feature_level) {
+ case D3D_FEATURE_LEVEL_1_0_CORE:
+ return gpu::mojom::Direct3DFeatureLevel::k1_0_Core;
+ case D3D_FEATURE_LEVEL_9_1:
+ return gpu::mojom::Direct3DFeatureLevel::k9_1;
+ case D3D_FEATURE_LEVEL_9_2:
+ return gpu::mojom::Direct3DFeatureLevel::k9_2;
+ case D3D_FEATURE_LEVEL_9_3:
+ return gpu::mojom::Direct3DFeatureLevel::k9_3;
+ case D3D_FEATURE_LEVEL_10_0:
+ return gpu::mojom::Direct3DFeatureLevel::k10_0;
+ case D3D_FEATURE_LEVEL_10_1:
+ return gpu::mojom::Direct3DFeatureLevel::k10_1;
+ case D3D_FEATURE_LEVEL_11_0:
+ return gpu::mojom::Direct3DFeatureLevel::k11_0;
+ case D3D_FEATURE_LEVEL_11_1:
+ return gpu::mojom::Direct3DFeatureLevel::k11_1;
+ case D3D_FEATURE_LEVEL_12_0:
+ return gpu::mojom::Direct3DFeatureLevel::k12_0;
+ case D3D_FEATURE_LEVEL_12_1:
+ return gpu::mojom::Direct3DFeatureLevel::k12_1;
+ }
+ NOTREACHED() << "Invalid D3D_FEATURE_LEVEL:" << d3d_feature_level;
+ return gpu::mojom::Direct3DFeatureLevel::k1_0_Core;
+}
+
+// static
+bool EnumTraits<gpu::mojom::Direct3DFeatureLevel, D3D_FEATURE_LEVEL>::FromMojom(
+ gpu::mojom::Direct3DFeatureLevel input,
+ D3D_FEATURE_LEVEL* out) {
+ switch (input) {
+ case gpu::mojom::Direct3DFeatureLevel::k1_0_Core:
+ *out = D3D_FEATURE_LEVEL_1_0_CORE;
+ return true;
+ case gpu::mojom::Direct3DFeatureLevel::k9_1:
+ *out = D3D_FEATURE_LEVEL_9_1;
+ return true;
+ case gpu::mojom::Direct3DFeatureLevel::k9_2:
+ *out = D3D_FEATURE_LEVEL_9_2;
+ return true;
+ case gpu::mojom::Direct3DFeatureLevel::k9_3:
+ *out = D3D_FEATURE_LEVEL_9_3;
+ return true;
+ case gpu::mojom::Direct3DFeatureLevel::k10_0:
+ *out = D3D_FEATURE_LEVEL_10_0;
+ return true;
+ case gpu::mojom::Direct3DFeatureLevel::k10_1:
+ *out = D3D_FEATURE_LEVEL_10_1;
+ return true;
+ case gpu::mojom::Direct3DFeatureLevel::k11_0:
+ *out = D3D_FEATURE_LEVEL_11_0;
+ return true;
+ case gpu::mojom::Direct3DFeatureLevel::k11_1:
+ *out = D3D_FEATURE_LEVEL_11_1;
+ return true;
+ case gpu::mojom::Direct3DFeatureLevel::k12_0:
+ *out = D3D_FEATURE_LEVEL_12_0;
+ return true;
+ case gpu::mojom::Direct3DFeatureLevel::k12_1:
+ *out = D3D_FEATURE_LEVEL_12_1;
+ return true;
+ }
+ NOTREACHED() << "Invalid D3D_FEATURE_LEVEL: " << input;
+ return false;
+}
+#endif // OS_WIN
+
+gpu::mojom::HasDiscreteGpu
+EnumTraits<gpu::mojom::HasDiscreteGpu, gpu::HasDiscreteGpu>::ToMojom(
+ gpu::HasDiscreteGpu has_discrete_gpu) {
+ switch (has_discrete_gpu) {
+ case gpu::HasDiscreteGpu::kUnknown:
+ return gpu::mojom::HasDiscreteGpu::kUnknown;
+ case gpu::HasDiscreteGpu::kNo:
+ return gpu::mojom::HasDiscreteGpu::kNo;
+ case gpu::HasDiscreteGpu::kYes:
+ return gpu::mojom::HasDiscreteGpu::kYes;
+ }
+ NOTREACHED() << "Invalid gpu::HasDiscreteGpu: "
+ << static_cast<int>(has_discrete_gpu);
+ return gpu::mojom::HasDiscreteGpu::kUnknown;
+}
+
+// static
+bool EnumTraits<gpu::mojom::HasDiscreteGpu, gpu::HasDiscreteGpu>::FromMojom(
+ gpu::mojom::HasDiscreteGpu input,
+ gpu::HasDiscreteGpu* out) {
+ switch (input) {
+ case gpu::mojom::HasDiscreteGpu::kUnknown:
+ *out = gpu::HasDiscreteGpu::kUnknown;
+ return true;
+ case gpu::mojom::HasDiscreteGpu::kNo:
+ *out = gpu::HasDiscreteGpu::kNo;
+ return true;
+ case gpu::mojom::HasDiscreteGpu::kYes:
+ *out = gpu::HasDiscreteGpu::kYes;
+ return true;
+ }
+ NOTREACHED() << "Invalid gpu::mojom::HasDiscreteGpu: " << input;
+ return false;
+}
+
+// static
+bool StructTraits<gpu::mojom::DevicePerfInfoDataView, gpu::DevicePerfInfo>::
+ Read(gpu::mojom::DevicePerfInfoDataView data, gpu::DevicePerfInfo* out) {
+ out->total_physical_memory_mb = data.total_physical_memory_mb();
+ out->total_disk_space_mb = data.total_disk_space_mb();
+ out->hardware_concurrency = data.hardware_concurrency();
+ bool rt = true;
+#if defined(OS_WIN)
+ out->system_commit_limit_mb = data.system_commit_limit_mb();
+ rt &= data.ReadD3d11FeatureLevel(&out->d3d11_feature_level);
+ rt &= data.ReadHasDiscreteGpu(&out->has_discrete_gpu);
+#endif // OS_WIN
+ return rt;
+}
+
+} // namespace mojo
diff --git a/chromium/gpu/ipc/common/device_perf_info_mojom_traits.h b/chromium/gpu/ipc/common/device_perf_info_mojom_traits.h
new file mode 100644
index 00000000000..cb0da52a86a
--- /dev/null
+++ b/chromium/gpu/ipc/common/device_perf_info_mojom_traits.h
@@ -0,0 +1,67 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_DEVICE_PERF_INFO_MOJOM_TRAITS_H_
+#define GPU_IPC_COMMON_DEVICE_PERF_INFO_MOJOM_TRAITS_H_
+
+#include "gpu/ipc/common/device_perf_info.mojom.h"
+
+#include "build/build_config.h"
+
+namespace mojo {
+
+#if defined(OS_WIN)
+template <>
+struct EnumTraits<gpu::mojom::Direct3DFeatureLevel, D3D_FEATURE_LEVEL> {
+ static gpu::mojom::Direct3DFeatureLevel ToMojom(
+ D3D_FEATURE_LEVEL d3d_feature_level);
+ static bool FromMojom(gpu::mojom::Direct3DFeatureLevel input,
+ D3D_FEATURE_LEVEL* out);
+};
+#endif // OS_WIN
+
+template <>
+struct EnumTraits<gpu::mojom::HasDiscreteGpu, gpu::HasDiscreteGpu> {
+ static gpu::mojom::HasDiscreteGpu ToMojom(
+ gpu::HasDiscreteGpu has_discrete_gpu);
+ static bool FromMojom(gpu::mojom::HasDiscreteGpu input,
+ gpu::HasDiscreteGpu* out);
+};
+
+template <>
+struct StructTraits<gpu::mojom::DevicePerfInfoDataView, gpu::DevicePerfInfo> {
+ static bool Read(gpu::mojom::DevicePerfInfoDataView data,
+ gpu::DevicePerfInfo* out);
+
+ static uint32_t total_physical_memory_mb(const gpu::DevicePerfInfo& info) {
+ return info.total_physical_memory_mb;
+ }
+
+ static uint32_t total_disk_space_mb(const gpu::DevicePerfInfo& info) {
+ return info.total_disk_space_mb;
+ }
+
+ static uint32_t hardware_concurrency(const gpu::DevicePerfInfo& info) {
+ return info.hardware_concurrency;
+ }
+
+#if defined(OS_WIN)
+ static uint32_t system_commit_limit_mb(const gpu::DevicePerfInfo& info) {
+ return info.system_commit_limit_mb;
+ }
+
+ static D3D_FEATURE_LEVEL d3d11_feature_level(
+ const gpu::DevicePerfInfo& info) {
+ return info.d3d11_feature_level;
+ }
+
+ static gpu::HasDiscreteGpu has_discrete_gpu(const gpu::DevicePerfInfo& info) {
+ return info.has_discrete_gpu;
+ }
+#endif
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_DEVICE_PERF_INFO_MOJOM_TRAITS_H_
diff --git a/chromium/gpu/ipc/common/generate_vulkan_types.py b/chromium/gpu/ipc/common/generate_vulkan_types.py
index ef27fab9b6e..dc49332d5e4 100755
--- a/chromium/gpu/ipc/common/generate_vulkan_types.py
+++ b/chromium/gpu/ipc/common/generate_vulkan_types.py
@@ -284,6 +284,22 @@ module gpu.mojom;
WriteMojomTypes(_STRUCTS, mojom_file)
+def NormalizedCamelCase(identifier):
+ result = identifier[0].upper()
+ lowercase_next = True
+ for i in range(1, len(identifier)):
+ if identifier[i].isupper():
+ if lowercase_next:
+ result += identifier[i].lower()
+ else:
+ result += identifier[i]
+ lowercase_next = True
+ else:
+ lowercase_next = False
+ result += identifier[i]
+ return result
+
+
def WriteStructTraits(name, traits_header_file, traits_source_file):
traits_header_file.write(
"""
@@ -350,7 +366,7 @@ bool StructTraits<gpu::mojom::%sDataView, %s>::Read(
if field_type == "char":
assert array_len
- read_method = "Read%s%s" % (field_name[0].upper(), field_name[1:])
+ read_method = "Read%s" % (NormalizedCamelCase(field_name))
traits_source_file.write(
"""
base::StringPiece %s;
@@ -359,7 +375,7 @@ bool StructTraits<gpu::mojom::%sDataView, %s>::Read(
%s.copy(out->%s, sizeof(out->%s));
""" % (field_name, read_method, field_name, field_name, field_name, field_name))
elif array_len:
- read_method = "Read%s%s" % (field_name[0].upper(), field_name[1:])
+ read_method = "Read%s" % (NormalizedCamelCase(field_name))
traits_source_file.write(
"""
base::span<%s> %s(out->%s);
@@ -369,9 +385,9 @@ bool StructTraits<gpu::mojom::%sDataView, %s>::Read(
elif field_type in _structs or field_type in _enums:
traits_source_file.write(
"""
- if (!data.Read%s%s(&out->%s))
+ if (!data.Read%s(&out->%s))
return false;
-""" % (field_name[0].upper(), field_name[1:], field_name))
+""" % (NormalizedCamelCase(field_name), field_name))
else:
traits_source_file.write(
"""
diff --git a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
index bc89051424c..e2138c149a4 100644
--- a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
+++ b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
@@ -29,6 +29,7 @@ IPC_ENUM_TRAITS_MIN_MAX_VALUE(
IPC_ENUM_TRAITS_MAX_VALUE(gl::GpuPreference, gl::GpuPreference::kMaxValue)
IPC_ENUM_TRAITS_MAX_VALUE(gpu::ContextType, gpu::CONTEXT_TYPE_LAST)
IPC_ENUM_TRAITS_MAX_VALUE(gpu::ColorSpace, gpu::COLOR_SPACE_LAST)
+IPC_ENUM_TRAITS_MAX_VALUE(gfx::SurfaceOrigin, gfx::SurfaceOrigin::kBottomLeft)
IPC_STRUCT_TRAITS_BEGIN(gpu::Capabilities::ShaderPrecision)
IPC_STRUCT_TRAITS_MEMBER(min_range)
@@ -124,24 +125,22 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::Capabilities)
IPC_STRUCT_TRAITS_MEMBER(image_ycbcr_422)
IPC_STRUCT_TRAITS_MEMBER(image_ycbcr_420v)
IPC_STRUCT_TRAITS_MEMBER(image_ycbcr_420v_disabled_for_video_frames)
- IPC_STRUCT_TRAITS_MEMBER(image_xr30)
- IPC_STRUCT_TRAITS_MEMBER(image_xb30)
+ IPC_STRUCT_TRAITS_MEMBER(image_ar30)
+ IPC_STRUCT_TRAITS_MEMBER(image_ab30)
IPC_STRUCT_TRAITS_MEMBER(render_buffer_format_bgra8888)
IPC_STRUCT_TRAITS_MEMBER(occlusion_query)
IPC_STRUCT_TRAITS_MEMBER(occlusion_query_boolean)
IPC_STRUCT_TRAITS_MEMBER(timer_queries)
IPC_STRUCT_TRAITS_MEMBER(surfaceless)
- IPC_STRUCT_TRAITS_MEMBER(flips_vertically)
+ IPC_STRUCT_TRAITS_MEMBER(surface_origin)
IPC_STRUCT_TRAITS_MEMBER(msaa_is_slow)
IPC_STRUCT_TRAITS_MEMBER(disable_one_component_textures)
IPC_STRUCT_TRAITS_MEMBER(gpu_rasterization)
IPC_STRUCT_TRAITS_MEMBER(chromium_image_rgb_emulation)
IPC_STRUCT_TRAITS_MEMBER(dc_layers)
- IPC_STRUCT_TRAITS_MEMBER(use_dc_overlays_for_video)
IPC_STRUCT_TRAITS_MEMBER(protected_video_swap_chain)
IPC_STRUCT_TRAITS_MEMBER(gpu_vsync)
IPC_STRUCT_TRAITS_MEMBER(shared_image_swap_chain)
- IPC_STRUCT_TRAITS_MEMBER(disable_non_empty_post_sub_buffers)
IPC_STRUCT_TRAITS_MEMBER(avoid_stencil_buffers)
IPC_STRUCT_TRAITS_MEMBER(disable_2d_canvas_copy_on_write)
IPC_STRUCT_TRAITS_MEMBER(texture_npot)
@@ -152,7 +151,6 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::Capabilities)
IPC_STRUCT_TRAITS_MEMBER(separate_stencil_ref_mask_writemask)
IPC_STRUCT_TRAITS_MEMBER(use_gpu_fences_for_overlay_planes)
IPC_STRUCT_TRAITS_MEMBER(context_supports_distance_field_text)
- IPC_STRUCT_TRAITS_MEMBER(glyph_cache_max_texture_bytes)
IPC_STRUCT_TRAITS_MEMBER(chromium_nonblocking_readback)
IPC_STRUCT_TRAITS_MEMBER(mesa_framebuffer_flip_y)
@@ -194,6 +192,7 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::ContextCreationAttribs)
IPC_STRUCT_TRAITS_MEMBER(single_buffer)
IPC_STRUCT_TRAITS_MEMBER(color_space)
IPC_STRUCT_TRAITS_MEMBER(enable_gles2_interface)
+ IPC_STRUCT_TRAITS_MEMBER(enable_grcontext)
IPC_STRUCT_TRAITS_MEMBER(enable_raster_interface)
IPC_STRUCT_TRAITS_MEMBER(enable_oop_rasterization)
IPC_STRUCT_TRAITS_MEMBER(enable_swap_timestamps_if_supported)
diff --git a/chromium/gpu/ipc/common/gpu_extra_info.mojom b/chromium/gpu/ipc/common/gpu_extra_info.mojom
index f3f45ce281f..556fa80e4b6 100644
--- a/chromium/gpu/ipc/common/gpu_extra_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_extra_info.mojom
@@ -5,6 +5,8 @@
// gpu/config/gpu_extra_info.h
module gpu.mojom;
+import "ui/gfx/mojom/buffer_types.mojom";
+
// gpu::ANGLEFeature
struct ANGLEFeature {
string name;
@@ -19,4 +21,11 @@ struct ANGLEFeature {
struct GpuExtraInfo {
// List of features queried from ANGLE
array<ANGLEFeature> angle_features;
+
+ [EnableIf=use_x11]
+ uint64 system_visual;
+ [EnableIf=use_x11]
+ uint64 rgba_visual;
+ [EnableIf=use_x11]
+ array<gfx.mojom.BufferUsageAndFormat> gpu_memory_buffer_support_x11;
};
diff --git a/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.cc b/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.cc
index 19d27d8c313..fe765a71caa 100644
--- a/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "gpu/ipc/common/gpu_extra_info_mojom_traits.h"
+
#include "build/build_config.h"
+#include "ui/gfx/mojom/buffer_types_mojom_traits.h"
namespace mojo {
@@ -20,7 +22,15 @@ bool StructTraits<gpu::mojom::ANGLEFeatureDataView, gpu::ANGLEFeature>::Read(
bool StructTraits<gpu::mojom::GpuExtraInfoDataView, gpu::GpuExtraInfo>::Read(
gpu::mojom::GpuExtraInfoDataView data,
gpu::GpuExtraInfo* out) {
- return data.ReadAngleFeatures(&out->angle_features);
+ if (!data.ReadAngleFeatures(&out->angle_features))
+ return false;
+#if defined(USE_X11)
+ out->system_visual = data.system_visual();
+ out->rgba_visual = data.rgba_visual();
+ if (!data.ReadGpuMemoryBufferSupportX11(&out->gpu_memory_buffer_support_x11))
+ return false;
+#endif
+ return true;
}
} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.h b/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.h
index 9fd5c226490..cc80a9c35c1 100644
--- a/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.h
+++ b/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.h
@@ -50,6 +50,21 @@ struct StructTraits<gpu::mojom::GpuExtraInfoDataView, gpu::GpuExtraInfo> {
const gpu::GpuExtraInfo& input) {
return input.angle_features;
}
+
+#if defined(USE_X11)
+ static uint64_t system_visual(const gpu::GpuExtraInfo& input) {
+ return input.system_visual;
+ }
+
+ static uint64_t rgba_visual(const gpu::GpuExtraInfo& input) {
+ return input.rgba_visual;
+ }
+
+ static const std::vector<gfx::BufferUsageAndFormat>&
+ gpu_memory_buffer_support_x11(const gpu::GpuExtraInfo& input) {
+ return input.gpu_memory_buffer_support_x11;
+ }
+#endif
};
} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index 9942b5eb9da..82d120931a8 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -124,6 +124,15 @@ struct Dx12VulkanVersionInfo {
uint32 vulkan_version;
};
+// gpu::OverlayInfo
+[EnableIf=is_win]
+struct OverlayInfo {
+ bool direct_composition;
+ bool supports_overlays;
+ OverlaySupport yuy2_overlay_support;
+ OverlaySupport nv12_overlay_support;
+};
+
// Corresponds to |gpu::GPUInfo| in gpu/config/gpu_info.h
struct GpuInfo {
mojo_base.mojom.TimeDelta initialization_time;
@@ -151,18 +160,15 @@ struct GpuInfo {
bool passthrough_cmd_decoder;
bool can_support_threaded_texture_mailbox;
- [EnableIf=is_win]
- bool direct_composition;
- [EnableIf=is_win]
- bool supports_overlays;
- [EnableIf=is_win]
- OverlaySupport yuy2_overlay_support;
- [EnableIf=is_win]
- OverlaySupport nv12_overlay_support;
+ [EnableIf=is_mac]
+ uint32 macos_specific_texture_target;
+
[EnableIf=is_win]
DxDiagNode dx_diagnostics;
[EnableIf=is_win]
Dx12VulkanVersionInfo dx12_vulkan_version_info;
+ [EnableIf=is_win]
+ OverlayInfo overlay_info;
VideoDecodeAcceleratorCapabilities video_decode_accelerator_capabilities;
array<VideoEncodeAcceleratorSupportedProfile>
@@ -172,8 +178,6 @@ struct GpuInfo {
array<ImageDecodeAcceleratorSupportedProfile>
image_decode_accelerator_supported_profiles;
- uint64 system_visual;
- uint64 rgba_visual;
bool oop_rasterization_supported;
bool subpixel_font_rendering;
diff --git a/chromium/gpu/ipc/common/gpu_info.typemap b/chromium/gpu/ipc/common/gpu_info.typemap
index dfc6ea1dd37..4c035e6e388 100644
--- a/chromium/gpu/ipc/common/gpu_info.typemap
+++ b/chromium/gpu/ipc/common/gpu_info.typemap
@@ -5,9 +5,7 @@
mojom = "//gpu/ipc/common/gpu_info.mojom"
public_headers = [ "//gpu/config/gpu_info.h" ]
traits_headers = [ "//gpu/ipc/common/gpu_info_mojom_traits.h" ]
-sources = [
- "//gpu/ipc/common/gpu_info_mojom_traits.cc",
-]
+sources = [ "//gpu/ipc/common/gpu_info_mojom_traits.cc" ]
public_deps = [
"//gpu/config",
"//ui/gfx/geometry/mojom",
@@ -15,6 +13,7 @@ public_deps = [
type_mappings = [
"gpu.mojom.CollectInfoResult=::gpu::CollectInfoResult",
"gpu.mojom.Dx12VulkanVersionInfo=::gpu::Dx12VulkanVersionInfo",
+ "gpu.mojom.OverlayInfo=::gpu::OverlayInfo",
"gpu.mojom.GpuDevice=::gpu::GPUInfo::GPUDevice",
"gpu.mojom.GpuInfo=::gpu::GPUInfo",
"gpu.mojom.VideoCodecProfile=::gpu::VideoCodecProfile",
diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
index 248daaf61ce..264b9d57a70 100644
--- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
@@ -361,6 +361,15 @@ bool StructTraits<gpu::mojom::Dx12VulkanVersionInfoDataView,
out->vulkan_version = data.vulkan_version();
return true;
}
+
+bool StructTraits<gpu::mojom::OverlayInfoDataView, gpu::OverlayInfo>::Read(
+ gpu::mojom::OverlayInfoDataView data,
+ gpu::OverlayInfo* out) {
+ out->direct_composition = data.direct_composition();
+ out->supports_overlays = data.supports_overlays();
+ return data.ReadYuy2OverlaySupport(&out->yuy2_overlay_support) &&
+ data.ReadNv12OverlaySupport(&out->nv12_overlay_support);
+}
#endif
bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
@@ -375,21 +384,19 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
out->passthrough_cmd_decoder = data.passthrough_cmd_decoder();
out->can_support_threaded_texture_mailbox =
data.can_support_threaded_texture_mailbox();
+#if defined(OS_MACOSX)
+ if (!gpu::ValidateMacOSSpecificTextureTarget(
+ data.macos_specific_texture_target())) {
+ return false;
+ }
+ out->macos_specific_texture_target = data.macos_specific_texture_target();
+#endif // OS_MACOSX
out->jpeg_decode_accelerator_supported =
data.jpeg_decode_accelerator_supported();
-#if defined(USE_X11)
- out->system_visual = data.system_visual();
- out->rgba_visual = data.rgba_visual();
-#endif
out->oop_rasterization_supported = data.oop_rasterization_supported();
out->subpixel_font_rendering = data.subpixel_font_rendering();
-#if defined(OS_WIN)
- out->direct_composition = data.direct_composition();
- out->supports_overlays = data.supports_overlays();
-#endif
-
return data.ReadInitializationTime(&out->initialization_time) &&
data.ReadGpu(&out->gpu) &&
data.ReadSecondaryGpus(&out->secondary_gpus) &&
@@ -407,8 +414,7 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
data.ReadGlWsExtensions(&out->gl_ws_extensions) &&
data.ReadDirectRenderingVersion(&out->direct_rendering_version) &&
#if defined(OS_WIN)
- data.ReadYuy2OverlaySupport(&out->yuy2_overlay_support) &&
- data.ReadNv12OverlaySupport(&out->nv12_overlay_support) &&
+ data.ReadOverlayInfo(&out->overlay_info) &&
data.ReadDxDiagnostics(&out->dx_diagnostics) &&
data.ReadDx12VulkanVersionInfo(&out->dx12_vulkan_version_info) &&
#endif
diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
index b8b22696781..5fc0b439a94 100644
--- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
@@ -10,6 +10,7 @@
#include "gpu/config/gpu_info.h"
#include "gpu/ipc/common/dx_diag_node_mojom_traits.h"
#include "gpu/ipc/common/gpu_info.mojom.h"
+#include "ui/gfx/buffer_types.h"
#include "ui/gfx/geometry/mojom/geometry_mojom_traits.h"
namespace mojo {
@@ -114,7 +115,7 @@ struct StructTraits<gpu::mojom::VideoDecodeAcceleratorCapabilitiesDataView,
}
static std::vector<gpu::VideoDecodeAcceleratorSupportedProfile>
- supported_profiles(const gpu::VideoDecodeAcceleratorCapabilities& input) {
+ supported_profiles(const gpu::VideoDecodeAcceleratorCapabilities& input) {
return input.supported_profiles;
}
};
@@ -228,6 +229,30 @@ struct StructTraits<gpu::mojom::Dx12VulkanVersionInfoDataView,
return input.vulkan_version;
}
};
+
+template <>
+struct StructTraits<gpu::mojom::OverlayInfoDataView, gpu::OverlayInfo> {
+ static bool Read(gpu::mojom::OverlayInfoDataView data, gpu::OverlayInfo* out);
+
+ static bool direct_composition(const gpu::OverlayInfo& input) {
+ return input.direct_composition;
+ }
+
+ static bool supports_overlays(const gpu::OverlayInfo& input) {
+ return input.supports_overlays;
+ }
+
+ static gpu::OverlaySupport yuy2_overlay_support(
+ const gpu::OverlayInfo& input) {
+ return input.yuy2_overlay_support;
+ }
+
+ static gpu::OverlaySupport nv12_overlay_support(
+ const gpu::OverlayInfo& input) {
+ return input.nv12_overlay_support;
+ }
+};
+
#endif
template <>
@@ -328,22 +353,13 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
return input.can_support_threaded_texture_mailbox;
}
-#if defined(OS_WIN)
- static bool direct_composition(const gpu::GPUInfo& input) {
- return input.direct_composition;
+#if defined(OS_MACOSX)
+ static uint32_t macos_specific_texture_target(const gpu::GPUInfo& input) {
+ return input.macos_specific_texture_target;
}
+#endif // OS_MACOSX
- static bool supports_overlays(const gpu::GPUInfo& input) {
- return input.supports_overlays;
- }
-
- static gpu::OverlaySupport yuy2_overlay_support(const gpu::GPUInfo& input) {
- return input.yuy2_overlay_support;
- }
-
- static gpu::OverlaySupport nv12_overlay_support(const gpu::GPUInfo& input) {
- return input.nv12_overlay_support;
- }
+#if defined(OS_WIN)
static const gpu::DxDiagNode& dx_diagnostics(const gpu::GPUInfo& input) {
return input.dx_diagnostics;
@@ -353,6 +369,10 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
const gpu::GPUInfo& input) {
return input.dx12_vulkan_version_info;
}
+
+ static const gpu::OverlayInfo& overlay_info(const gpu::GPUInfo& input) {
+ return input.overlay_info;
+ }
#endif
static const gpu::VideoDecodeAcceleratorCapabilities&
@@ -374,20 +394,6 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
return input.image_decode_accelerator_supported_profiles;
}
- static uint64_t system_visual(const gpu::GPUInfo& input) {
-#if defined(USE_X11)
- return input.system_visual;
-#endif
- return 0;
- }
-
- static uint64_t rgba_visual(const gpu::GPUInfo& input) {
-#if defined(USE_X11)
- return input.rgba_visual;
-#endif
- return 0;
- }
-
static bool oop_rasterization_supported(const gpu::GPUInfo& input) {
return input.oop_rasterization_supported;
}
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
index 59c7a15611d..500ff3cb9e4 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
@@ -19,16 +19,15 @@ namespace gpu {
GpuMemoryBufferImplDXGI::~GpuMemoryBufferImplDXGI() {}
std::unique_ptr<GpuMemoryBufferImplDXGI>
-GpuMemoryBufferImplDXGI::CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- DestructionCallback callback) {
+GpuMemoryBufferImplDXGI::CreateFromHandle(gfx::GpuMemoryBufferHandle handle,
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage,
+ DestructionCallback callback) {
DCHECK(handle.dxgi_handle.IsValid());
- return base::WrapUnique(new GpuMemoryBufferImplDXGI(
- handle.id, size, format, std::move(callback),
- base::win::ScopedHandle(handle.dxgi_handle.GetHandle())));
+ return base::WrapUnique(
+ new GpuMemoryBufferImplDXGI(handle.id, size, format, std::move(callback),
+ std::move(handle.dxgi_handle)));
}
base::OnceClosure GpuMemoryBufferImplDXGI::AllocateForTesting(
@@ -78,7 +77,7 @@ base::OnceClosure GpuMemoryBufferImplDXGI::AllocateForTesting(
DCHECK(SUCCEEDED(hr));
gfx::GpuMemoryBufferId kBufferId(1);
- handle->dxgi_handle = IPC::PlatformFileForTransit(texture_handle);
+ handle->dxgi_handle.Set(texture_handle);
handle->type = gfx::DXGI_SHARED_HANDLE;
handle->id = kBufferId;
return base::DoNothing();
@@ -115,7 +114,7 @@ gfx::GpuMemoryBufferHandle GpuMemoryBufferImplDXGI::CloneHandle() const {
&duplicated_handle, 0, FALSE, DUPLICATE_SAME_ACCESS);
if (!result)
DPLOG(ERROR) << "Failed to duplicate DXGI resource handle.";
- handle.dxgi_handle = IPC::PlatformFileForTransit(duplicated_handle);
+ handle.dxgi_handle.Set(duplicated_handle);
return handle;
}
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h
index 233e3a5c4a7..21895c4af99 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h
@@ -26,7 +26,7 @@ class GPU_EXPORT GpuMemoryBufferImplDXGI : public GpuMemoryBufferImpl {
gfx::DXGI_SHARED_HANDLE;
static std::unique_ptr<GpuMemoryBufferImplDXGI> CreateFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
+ gfx::GpuMemoryBufferHandle handle,
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc
index a97204a0242..2d1d14dbf17 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc
@@ -191,8 +191,8 @@ bool GpuMemoryBufferImplSharedMemory::IsSizeValidForFormat(
case gfx::BufferFormat::RGBX_8888:
case gfx::BufferFormat::BGRA_8888:
case gfx::BufferFormat::BGRX_8888:
- case gfx::BufferFormat::BGRX_1010102:
- case gfx::BufferFormat::RGBX_1010102:
+ case gfx::BufferFormat::BGRA_1010102:
+ case gfx::BufferFormat::RGBA_1010102:
case gfx::BufferFormat::RGBA_F16:
return true;
case gfx::BufferFormat::YVU_420:
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
index 907b55c14da..94d4a6c5cbd 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
@@ -21,7 +21,7 @@
#include "mojo/public/cpp/test_support/test_utils.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/buffer_format_util.h"
-#include "ui/gfx/mojom/buffer_types_mojom_traits.h"
+#include "ui/gfx/mojom/buffer_types.mojom.h"
#if defined(OS_WIN) || defined(USE_OZONE)
#include "ui/gl/init/gl_factory.h"
@@ -102,8 +102,9 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, CreateFromHandle) {
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
};
for (auto usage : usages) {
- if (!TestFixture::gpu_memory_buffer_support()->IsConfigurationSupported(
- TypeParam::kBufferType, format, usage)) {
+ if (!TestFixture::gpu_memory_buffer_support()
+ ->IsConfigurationSupportedForTest(TypeParam::kBufferType, format,
+ usage)) {
continue;
}
@@ -142,8 +143,9 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, CreateFromHandleSmallBuffer) {
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
};
for (auto usage : usages) {
- if (!TestFixture::gpu_memory_buffer_support()->IsConfigurationSupported(
- TypeParam::kBufferType, format, usage)) {
+ if (!TestFixture::gpu_memory_buffer_support()
+ ->IsConfigurationSupportedForTest(TypeParam::kBufferType, format,
+ usage)) {
continue;
}
@@ -177,9 +179,10 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, Map) {
const gfx::Size kBufferSize(4, 4);
for (auto format : gfx::GetBufferFormatsForTesting()) {
- if (!TestFixture::gpu_memory_buffer_support()->IsConfigurationSupported(
- TypeParam::kBufferType, format,
- gfx::BufferUsage::GPU_READ_CPU_READ_WRITE)) {
+ if (!TestFixture::gpu_memory_buffer_support()
+ ->IsConfigurationSupportedForTest(
+ TypeParam::kBufferType, format,
+ gfx::BufferUsage::GPU_READ_CPU_READ_WRITE)) {
continue;
}
@@ -231,9 +234,10 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, PersistentMap) {
const gfx::Size kBufferSize(4, 4);
for (auto format : gfx::GetBufferFormatsForTesting()) {
- if (!TestFixture::gpu_memory_buffer_support()->IsConfigurationSupported(
- TypeParam::kBufferType, format,
- gfx::BufferUsage::GPU_READ_CPU_READ_WRITE)) {
+ if (!TestFixture::gpu_memory_buffer_support()
+ ->IsConfigurationSupportedForTest(
+ TypeParam::kBufferType, format,
+ gfx::BufferUsage::GPU_READ_CPU_READ_WRITE)) {
continue;
}
@@ -316,8 +320,9 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, SerializeAndDeserialize) {
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
};
for (auto usage : usages) {
- if (!TestFixture::gpu_memory_buffer_support()->IsConfigurationSupported(
- TypeParam::kBufferType, format, usage))
+ if (!TestFixture::gpu_memory_buffer_support()
+ ->IsConfigurationSupportedForTest(TypeParam::kBufferType, format,
+ usage))
continue;
bool destroyed = false;
@@ -363,8 +368,9 @@ TYPED_TEST_P(GpuMemoryBufferImplCreateTest, Create) {
gfx::BufferUsage usage = gfx::BufferUsage::GPU_READ;
for (auto format : gfx::GetBufferFormatsForTesting()) {
- if (!TestFixture::gpu_memory_buffer_support()->IsConfigurationSupported(
- TypeParam::kBufferType, format, usage))
+ if (!TestFixture::gpu_memory_buffer_support()
+ ->IsConfigurationSupportedForTest(TypeParam::kBufferType, format,
+ usage))
continue;
bool destroyed = false;
std::unique_ptr<TypeParam> buffer(TypeParam::Create(
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
index 02f52ceeaea..c878874d5fa 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
@@ -7,6 +7,8 @@
#include "base/logging.h"
#include "build/build_config.h"
#include "gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.h"
+#include "ui/gfx/buffer_format_util.h"
+#include "ui/gfx/buffer_usage_util.h"
#if defined(OS_MACOSX)
#include "gpu/ipc/common/gpu_memory_buffer_impl_io_surface.h"
@@ -79,7 +81,7 @@ bool GpuMemoryBufferSupport::IsNativeGpuMemoryBufferConfigurationSupported(
format == gfx::BufferFormat::BGRX_8888 ||
format == gfx::BufferFormat::R_8 ||
format == gfx::BufferFormat::RGBA_F16 ||
- format == gfx::BufferFormat::BGRX_1010102 ||
+ format == gfx::BufferFormat::BGRA_1010102 ||
format == gfx::BufferFormat::YUV_420_BIPLANAR;
case gfx::BufferUsage::SCANOUT_VDA_WRITE:
case gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE:
@@ -112,8 +114,13 @@ bool GpuMemoryBufferSupport::IsNativeGpuMemoryBufferConfigurationSupported(
#elif defined(USE_OZONE)
return ui::OzonePlatform::GetInstance()->IsNativePixmapConfigSupported(format,
usage);
-#elif defined(OS_LINUX)
- return false; // TODO(julian.isorce): Add linux support.
+#elif defined(USE_X11)
+ // On X11, GPU memory buffer support can only be determined after GPU
+ // initialization.
+ // viz::HostGpuMemoryBufferManager::IsNativeGpuMemoryBufferConfiguration()
+ // should be used instead.
+ NOTREACHED();
+ return false;
#elif defined(OS_WIN)
switch (usage) {
case gfx::BufferUsage::GPU_READ:
@@ -136,12 +143,18 @@ bool GpuMemoryBufferSupport::IsNativeGpuMemoryBufferConfigurationSupported(
#endif
}
-bool GpuMemoryBufferSupport::IsConfigurationSupported(
+bool GpuMemoryBufferSupport::IsConfigurationSupportedForTest(
gfx::GpuMemoryBufferType type,
gfx::BufferFormat format,
gfx::BufferUsage usage) {
- if (type == GetNativeGpuMemoryBufferType())
+ if (type == GetNativeGpuMemoryBufferType()) {
+#if defined(USE_X11)
+ // On X11, we require GPUInfo to determine configuration support.
+ return false;
+#else
return IsNativeGpuMemoryBufferConfigurationSupported(format, usage);
+#endif
+ }
if (type == gfx::SHARED_MEMORY_BUFFER) {
return GpuMemoryBufferImplSharedMemory::IsConfigurationSupported(format,
@@ -186,7 +199,8 @@ GpuMemoryBufferSupport::CreateGpuMemoryBufferImplFromHandle(
#endif
default:
// TODO(dcheng): Remove default case (https://crbug.com/676224).
- NOTREACHED();
+ NOTREACHED() << gfx::BufferFormatToString(format) << ", "
+ << gfx::BufferUsageToString(usage);
return nullptr;
}
}
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_support.h b/chromium/gpu/ipc/common/gpu_memory_buffer_support.h
index 4963ff8719e..2999e942d66 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_support.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_support.h
@@ -45,9 +45,9 @@ class GPU_EXPORT GpuMemoryBufferSupport {
#endif
// Returns whether the provided buffer format is supported.
- bool IsConfigurationSupported(gfx::GpuMemoryBufferType type,
- gfx::BufferFormat format,
- gfx::BufferUsage usage);
+ bool IsConfigurationSupportedForTest(gfx::GpuMemoryBufferType type,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage);
// Creates a GpuMemoryBufferImpl from the given |handle|. |size| and |format|
// should match what was used to allocate the |handle|. |callback|, if
diff --git a/chromium/gpu/ipc/common/gpu_peak_memory.h b/chromium/gpu/ipc/common/gpu_peak_memory.h
new file mode 100644
index 00000000000..2baa46317be
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_peak_memory.h
@@ -0,0 +1,21 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_GPU_PEAK_MEMORY_H_
+#define GPU_IPC_COMMON_GPU_PEAK_MEMORY_H_
+
+namespace gpu {
+
+enum class GpuPeakMemoryAllocationSource {
+ UNKNOWN,
+ COMMAND_BUFFER,
+ SHARED_CONTEXT_STATE,
+ SHARED_IMAGE_STUB,
+ SKIA,
+ GPU_PEAK_MEMORY_ALLOCATION_SOURCE_MAX = SKIA,
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_COMMON_GPU_PEAK_MEMORY_H_
diff --git a/chromium/gpu/ipc/common/gpu_peak_memory.mojom b/chromium/gpu/ipc/common/gpu_peak_memory.mojom
new file mode 100644
index 00000000000..e184b5423ae
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_peak_memory.mojom
@@ -0,0 +1,16 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// gpu/common/gpu_peak_memory.h
+module gpu.mojom;
+
+// gpu:GpuPeakMemoryAllocationSource
+enum GpuPeakMemoryAllocationSource
+{
+ UNKNOWN,
+ COMMAND_BUFFER,
+ SHARED_CONTEXT_STATE,
+ SHARED_IMAGE_STUB,
+ SKIA,
+};
diff --git a/chromium/gpu/ipc/common/gpu_peak_memory.typemap b/chromium/gpu/ipc/common/gpu_peak_memory.typemap
new file mode 100644
index 00000000000..da13728dcfa
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_peak_memory.typemap
@@ -0,0 +1,9 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom = "//gpu/ipc/common/gpu_peak_memory.mojom"
+public_headers = [ "//gpu/ipc/common/gpu_peak_memory.h" ]
+traits_headers = [ "//gpu/ipc/common/gpu_peak_memory_mojom_traits.h" ]
+public_deps = [ "//gpu/ipc/common" ]
+type_mappings = [ "gpu.mojom.GpuPeakMemoryAllocationSource=::gpu::GpuPeakMemoryAllocationSource" ]
diff --git a/chromium/gpu/ipc/common/gpu_peak_memory_mojom_traits.h b/chromium/gpu/ipc/common/gpu_peak_memory_mojom_traits.h
new file mode 100644
index 00000000000..8e3b39cf1fe
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_peak_memory_mojom_traits.h
@@ -0,0 +1,61 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_GPU_PEAK_MEMORY_MOJOM_TRAITS_H_
+#define GPU_IPC_COMMON_GPU_PEAK_MEMORY_MOJOM_TRAITS_H_
+
+#include "gpu/ipc/common/gpu_peak_memory.h"
+#include "gpu/ipc/common/gpu_peak_memory.mojom-shared.h"
+
+namespace mojo {
+
+template <>
+struct EnumTraits<gpu::mojom::GpuPeakMemoryAllocationSource,
+ gpu::GpuPeakMemoryAllocationSource> {
+ static gpu::mojom::GpuPeakMemoryAllocationSource ToMojom(
+ gpu::GpuPeakMemoryAllocationSource gpu_peak_memory_allocation_source) {
+ switch (gpu_peak_memory_allocation_source) {
+ case gpu::GpuPeakMemoryAllocationSource::UNKNOWN:
+ return gpu::mojom::GpuPeakMemoryAllocationSource::UNKNOWN;
+ case gpu::GpuPeakMemoryAllocationSource::COMMAND_BUFFER:
+ return gpu::mojom::GpuPeakMemoryAllocationSource::COMMAND_BUFFER;
+ case gpu::GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE:
+ return gpu::mojom::GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE;
+ case gpu::GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB:
+ return gpu::mojom::GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB;
+ case gpu::GpuPeakMemoryAllocationSource::SKIA:
+ return gpu::mojom::GpuPeakMemoryAllocationSource::SKIA;
+ }
+ NOTREACHED() << "Invalid GpuPeakMemoryAllocationSource:"
+ << static_cast<int>(gpu_peak_memory_allocation_source);
+ return gpu::mojom::GpuPeakMemoryAllocationSource::UNKNOWN;
+ }
+
+ static bool FromMojom(gpu::mojom::GpuPeakMemoryAllocationSource input,
+ gpu::GpuPeakMemoryAllocationSource* out) {
+ switch (input) {
+ case gpu::mojom::GpuPeakMemoryAllocationSource::UNKNOWN:
+ *out = gpu::GpuPeakMemoryAllocationSource::UNKNOWN;
+ return true;
+ case gpu::mojom::GpuPeakMemoryAllocationSource::COMMAND_BUFFER:
+ *out = gpu::GpuPeakMemoryAllocationSource::COMMAND_BUFFER;
+ return true;
+ case gpu::mojom::GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE:
+ *out = gpu::GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE;
+ return true;
+ case gpu::mojom::GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB:
+ *out = gpu::GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB;
+ return true;
+ case gpu::mojom::GpuPeakMemoryAllocationSource::SKIA:
+ *out = gpu::GpuPeakMemoryAllocationSource::SKIA;
+ return true;
+ }
+ NOTREACHED() << "Invalid GpuPeakMemoryAllocationSource: " << input;
+ return false;
+ }
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_GPU_PEAK_MEMORY_MOJOM_TRAITS_H_
diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom
index 305d8a294c7..d235e868d90 100644
--- a/chromium/gpu/ipc/common/gpu_preferences.mojom
+++ b/chromium/gpu/ipc/common/gpu_preferences.mojom
@@ -55,7 +55,9 @@ struct GpuPreferences {
bool enable_gpu_driver_debug_logging;
bool disable_gpu_program_cache;
bool enforce_gl_minimums;
- uint32 force_gpu_mem_available;
+ uint32 force_gpu_mem_available_bytes;
+ uint32 force_gpu_mem_discardable_limit_bytes;
+ uint32 force_max_texture_size;
uint32 gpu_program_cache_size;
bool disable_gpu_shader_disk_cache;
bool enable_threaded_texture_mailboxes;
@@ -82,7 +84,12 @@ struct GpuPreferences {
bool enable_gpu_benchmarking_extension;
bool enable_webgpu;
bool enable_gpu_blocked_time_metric;
+ bool enable_perf_data_collection;
[EnableIf=use_ozone]
mojo_base.mojom.MessagePumpType message_pump_type;
+
+ bool enable_native_gpu_memory_buffers;
+
+ bool force_disable_new_accelerated_video_decoder;
};
diff --git a/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h b/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h
index 48441160c64..14401dda3ea 100644
--- a/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h
+++ b/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h
@@ -124,7 +124,10 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
prefs.enable_gpu_driver_debug_logging();
out->disable_gpu_program_cache = prefs.disable_gpu_program_cache();
out->enforce_gl_minimums = prefs.enforce_gl_minimums();
- out->force_gpu_mem_available = prefs.force_gpu_mem_available();
+ out->force_gpu_mem_available_bytes = prefs.force_gpu_mem_available_bytes();
+ out->force_gpu_mem_discardable_limit_bytes =
+ prefs.force_gpu_mem_discardable_limit_bytes();
+ out->force_max_texture_size = prefs.force_max_texture_size();
out->gpu_program_cache_size = prefs.gpu_program_cache_size();
out->disable_gpu_shader_disk_cache = prefs.disable_gpu_shader_disk_cache();
out->enable_threaded_texture_mailboxes =
@@ -169,12 +172,19 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->enable_webgpu = prefs.enable_webgpu();
out->enable_gpu_blocked_time_metric =
prefs.enable_gpu_blocked_time_metric();
+ out->enable_perf_data_collection = prefs.enable_perf_data_collection();
#if defined(USE_OZONE)
if (!prefs.ReadMessagePumpType(&out->message_pump_type))
return false;
#endif
+ out->enable_native_gpu_memory_buffers =
+ prefs.enable_native_gpu_memory_buffers();
+
+ out->force_disable_new_accelerated_video_decoder =
+ prefs.force_disable_new_accelerated_video_decoder();
+
return true;
}
@@ -245,8 +255,16 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool enforce_gl_minimums(const gpu::GpuPreferences& prefs) {
return prefs.enforce_gl_minimums;
}
- static uint32_t force_gpu_mem_available(const gpu::GpuPreferences& prefs) {
- return prefs.force_gpu_mem_available;
+ static uint32_t force_gpu_mem_available_bytes(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.force_gpu_mem_available_bytes;
+ }
+ static uint32_t force_gpu_mem_discardable_limit_bytes(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.force_gpu_mem_discardable_limit_bytes;
+ }
+ static uint32_t force_max_texture_size(const gpu::GpuPreferences& prefs) {
+ return prefs.force_max_texture_size;
}
static uint32_t gpu_program_cache_size(const gpu::GpuPreferences& prefs) {
return prefs.gpu_program_cache_size;
@@ -330,12 +348,23 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool enable_gpu_blocked_time_metric(const gpu::GpuPreferences& prefs) {
return prefs.enable_gpu_blocked_time_metric;
}
+ static bool enable_perf_data_collection(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_perf_data_collection;
+ }
#if defined(USE_OZONE)
static base::MessagePumpType message_pump_type(
const gpu::GpuPreferences& prefs) {
return prefs.message_pump_type;
}
#endif
+ static bool enable_native_gpu_memory_buffers(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.enable_native_gpu_memory_buffers;
+ }
+ static bool force_disable_new_accelerated_video_decoder(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.force_disable_new_accelerated_video_decoder;
+ }
};
} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_watchdog_timeout.h b/chromium/gpu/ipc/common/gpu_watchdog_timeout.h
index f67352aa7d2..4857f201211 100644
--- a/chromium/gpu/ipc/common/gpu_watchdog_timeout.h
+++ b/chromium/gpu/ipc/common/gpu_watchdog_timeout.h
@@ -17,12 +17,15 @@ namespace gpu {
#if defined(CYGPROFILE_INSTRUMENTATION)
constexpr base::TimeDelta kGpuWatchdogTimeout =
base::TimeDelta::FromSeconds(30);
-#elif defined(OS_WIN) || defined(OS_MACOSX)
+#elif defined(OS_MACOSX)
constexpr base::TimeDelta kGpuWatchdogTimeout =
- base::TimeDelta::FromSeconds(15);
+ base::TimeDelta::FromSeconds(25);
+#elif defined(OS_WIN)
+constexpr base::TimeDelta kGpuWatchdogTimeout =
+ base::TimeDelta::FromSeconds(30);
#else
constexpr base::TimeDelta kGpuWatchdogTimeout =
- base::TimeDelta::FromSeconds(10);
+ base::TimeDelta::FromSeconds(15);
#endif
// It usually takes longer to finish a GPU task when the system just resumes
diff --git a/chromium/gpu/ipc/common/mojom_traits_unittest.cc b/chromium/gpu/ipc/common/mojom_traits_unittest.cc
index 07f4c8bd26a..c22716df683 100644
--- a/chromium/gpu/ipc/common/mojom_traits_unittest.cc
+++ b/chromium/gpu/ipc/common/mojom_traits_unittest.cc
@@ -178,10 +178,6 @@ TEST_F(StructTraitsTest, GpuInfo) {
const std::vector<gpu::VideoEncodeAcceleratorSupportedProfile>
video_encode_accelerator_supported_profiles;
const bool jpeg_decode_accelerator_supported = true;
-#if defined(USE_X11)
- const VisualID system_visual = 0x1234;
- const VisualID rgba_visual = 0x5678;
-#endif
gpu::GPUInfo input;
input.initialization_time = initialization_time;
@@ -210,10 +206,10 @@ TEST_F(StructTraitsTest, GpuInfo) {
input.in_process_gpu = in_process_gpu;
input.passthrough_cmd_decoder = passthrough_cmd_decoder;
#if defined(OS_WIN)
- input.direct_composition = direct_composition;
- input.supports_overlays = supports_overlays;
- input.yuy2_overlay_support = yuy2_overlay_support;
- input.nv12_overlay_support = nv12_overlay_support;
+ input.overlay_info.direct_composition = direct_composition;
+ input.overlay_info.supports_overlays = supports_overlays;
+ input.overlay_info.yuy2_overlay_support = yuy2_overlay_support;
+ input.overlay_info.nv12_overlay_support = nv12_overlay_support;
input.dx_diagnostics = dx_diagnostics;
#endif
input.video_decode_accelerator_capabilities =
@@ -221,10 +217,6 @@ TEST_F(StructTraitsTest, GpuInfo) {
input.video_encode_accelerator_supported_profiles =
video_encode_accelerator_supported_profiles;
input.jpeg_decode_accelerator_supported = jpeg_decode_accelerator_supported;
-#if defined(USE_X11)
- input.system_visual = system_visual;
- input.rgba_visual = rgba_visual;
-#endif
mojo::Remote<mojom::TraitsTestService> remote = GetTraitsTestRemote();
gpu::GPUInfo output;
@@ -277,10 +269,10 @@ TEST_F(StructTraitsTest, GpuInfo) {
EXPECT_EQ(in_process_gpu, output.in_process_gpu);
EXPECT_EQ(passthrough_cmd_decoder, output.passthrough_cmd_decoder);
#if defined(OS_WIN)
- EXPECT_EQ(direct_composition, output.direct_composition);
- EXPECT_EQ(supports_overlays, output.supports_overlays);
- EXPECT_EQ(yuy2_overlay_support, output.yuy2_overlay_support);
- EXPECT_EQ(nv12_overlay_support, output.nv12_overlay_support);
+ EXPECT_EQ(direct_composition, output.overlay_info.direct_composition);
+ EXPECT_EQ(supports_overlays, output.overlay_info.supports_overlays);
+ EXPECT_EQ(yuy2_overlay_support, output.overlay_info.yuy2_overlay_support);
+ EXPECT_EQ(nv12_overlay_support, output.overlay_info.nv12_overlay_support);
EXPECT_EQ(dx_diagnostics.values, output.dx_diagnostics.values);
#endif
EXPECT_EQ(output.video_decode_accelerator_capabilities.flags,
@@ -302,10 +294,6 @@ TEST_F(StructTraitsTest, GpuInfo) {
video_decode_accelerator_capabilities.supported_profiles.size());
EXPECT_EQ(output.video_encode_accelerator_supported_profiles.size(),
video_encode_accelerator_supported_profiles.size());
-#if defined(USE_X11)
- EXPECT_EQ(system_visual, output.system_visual);
- EXPECT_EQ(rgba_visual, output.rgba_visual);
-#endif
}
TEST_F(StructTraitsTest, EmptyGpuInfo) {
diff --git a/chromium/gpu/ipc/common/typemaps.gni b/chromium/gpu/ipc/common/typemaps.gni
index e27bfe23ec2..0cafc3c9278 100644
--- a/chromium/gpu/ipc/common/typemaps.gni
+++ b/chromium/gpu/ipc/common/typemaps.gni
@@ -5,11 +5,13 @@
typemaps = [
"//gpu/ipc/common/capabilities.typemap",
"//gpu/ipc/common/context_result.typemap",
+ "//gpu/ipc/common/device_perf_info.typemap",
+ "//gpu/ipc/common/dx_diag_node.typemap",
"//gpu/ipc/common/gpu_feature_info.typemap",
"//gpu/ipc/common/gpu_info.typemap",
+ "//gpu/ipc/common/gpu_peak_memory.typemap",
"//gpu/ipc/common/gpu_preferences.typemap",
"//gpu/ipc/common/gpu_extra_info.typemap",
- "//gpu/ipc/common/dx_diag_node.typemap",
"//gpu/ipc/common/mailbox.typemap",
"//gpu/ipc/common/mailbox_holder.typemap",
"//gpu/ipc/common/memory_stats.typemap",
diff --git a/chromium/gpu/ipc/common/vulkan_info.mojom b/chromium/gpu/ipc/common/vulkan_info.mojom
index f80cc07f3e4..914139e7008 100644
--- a/chromium/gpu/ipc/common/vulkan_info.mojom
+++ b/chromium/gpu/ipc/common/vulkan_info.mojom
@@ -9,7 +9,7 @@ import "gpu/ipc/common/vulkan_types.mojom";
struct VulkanPhysicalDeviceInfo {
VkPhysicalDeviceProperties properties;
- array<VkLayerProperties> layers;
+ array<VkExtensionProperties> extensions;
VkPhysicalDeviceFeatures features;
bool feature_sampler_ycbcr_conversion;
bool feature_protected_memory;
diff --git a/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h b/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h
index 9b67d962139..62480c4ef38 100644
--- a/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h
+++ b/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h
@@ -21,9 +21,9 @@ struct StructTraits<gpu::mojom::VulkanPhysicalDeviceInfoDataView,
return input.properties;
}
- static const std::vector<VkLayerProperties>& layers(
+ static const std::vector<VkExtensionProperties>& extensions(
const gpu::VulkanPhysicalDeviceInfo& input) {
- return input.layers;
+ return input.extensions;
}
static const VkPhysicalDeviceFeatures& features(
@@ -50,7 +50,7 @@ struct StructTraits<gpu::mojom::VulkanPhysicalDeviceInfoDataView,
gpu::VulkanPhysicalDeviceInfo* out) {
if (!data.ReadProperties(&out->properties))
return false;
- if (!data.ReadLayers(&out->layers))
+ if (!data.ReadExtensions(&out->extensions))
return false;
if (!data.ReadFeatures(&out->features))
return false;
diff --git a/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc
index 9dc3878dcc8..6792fbb6e62 100644
--- a/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc
+++ b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc
@@ -69,7 +69,7 @@ bool StructTraits<gpu::mojom::VkPhysicalDevicePropertiesDataView,
deviceName.copy(out->deviceName, sizeof(out->deviceName));
base::span<uint8_t> pipelineCacheUUID(out->pipelineCacheUUID);
- if (!data.ReadPipelineCacheUUID(&pipelineCacheUUID))
+ if (!data.ReadPipelineCacheUuid(&pipelineCacheUUID))
return false;
if (!data.ReadLimits(&out->limits))
diff --git a/chromium/gpu/ipc/gl_in_process_context.cc b/chromium/gpu/ipc/gl_in_process_context.cc
index cee793b2982..a710c6a808a 100644
--- a/chromium/gpu/ipc/gl_in_process_context.cc
+++ b/chromium/gpu/ipc/gl_in_process_context.cc
@@ -75,11 +75,11 @@ ContextResult GLInProcessContext::Initialize(
command_buffer_ = std::make_unique<InProcessCommandBuffer>(
task_executor, GURL("chrome://gpu/GLInProcessContext::Initialize"));
- auto result =
- command_buffer_->Initialize(surface, is_offscreen, window, attribs,
- gpu_memory_buffer_manager, image_factory,
- /*gpu_channel_manager_delegate=*/nullptr,
- std::move(task_runner), nullptr, nullptr);
+ auto result = command_buffer_->Initialize(
+ surface, is_offscreen, window, attribs, gpu_memory_buffer_manager,
+ image_factory,
+ /*gpu_channel_manager_delegate=*/nullptr, std::move(task_runner),
+ /*task_sequence=*/nullptr, nullptr, nullptr);
if (result != ContextResult::kSuccess) {
DLOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
return result;
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.cc b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
index 8ec827abf16..c6a5ccdfc0b 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.cc
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
@@ -14,27 +14,30 @@
namespace gpu {
+GpuInProcessThreadServiceDelegate::GpuInProcessThreadServiceDelegate() =
+ default;
+GpuInProcessThreadServiceDelegate::~GpuInProcessThreadServiceDelegate() =
+ default;
+
GpuInProcessThreadService::GpuInProcessThreadService(
+ GpuInProcessThreadServiceDelegate* delegate,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
Scheduler* scheduler,
SyncPointManager* sync_point_manager,
MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group,
gl::GLSurfaceFormat share_group_surface_format,
const GpuFeatureInfo& gpu_feature_info,
const GpuPreferences& gpu_preferences,
SharedImageManager* shared_image_manager,
- gles2::ProgramCache* program_cache,
- scoped_refptr<SharedContextState> shared_context_state)
+ gles2::ProgramCache* program_cache)
: CommandBufferTaskExecutor(gpu_preferences,
gpu_feature_info,
sync_point_manager,
mailbox_manager,
- share_group,
share_group_surface_format,
shared_image_manager,
- program_cache,
- std::move(shared_context_state)),
+ program_cache),
+ delegate_(delegate),
task_runner_(task_runner),
scheduler_(scheduler) {}
@@ -67,4 +70,13 @@ void GpuInProcessThreadService::PostNonNestableToClient(
NOTREACHED();
}
+scoped_refptr<SharedContextState>
+GpuInProcessThreadService::GetSharedContextState() {
+ return delegate_->GetSharedContextState();
+}
+
+scoped_refptr<gl::GLShareGroup> GpuInProcessThreadService::GetShareGroup() {
+ return delegate_->GetShareGroup();
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.h b/chromium/gpu/ipc/gpu_in_process_thread_service.h
index f6f51a1ab4f..442861adc7c 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.h
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/single_thread_task_runner.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
@@ -24,23 +25,37 @@ namespace gles2 {
class ProgramCache;
} // namespace gles2
+class GL_IN_PROCESS_CONTEXT_EXPORT GpuInProcessThreadServiceDelegate {
+ public:
+ GpuInProcessThreadServiceDelegate();
+
+ GpuInProcessThreadServiceDelegate(const GpuInProcessThreadServiceDelegate&) =
+ delete;
+ GpuInProcessThreadServiceDelegate& operator=(
+ const GpuInProcessThreadServiceDelegate&) = delete;
+
+ virtual ~GpuInProcessThreadServiceDelegate();
+
+ virtual scoped_refptr<SharedContextState> GetSharedContextState() = 0;
+ virtual scoped_refptr<gl::GLShareGroup> GetShareGroup() = 0;
+};
+
// Default Service class when no service is specified. GpuInProcessThreadService
// is used by Mus and unit tests.
class GL_IN_PROCESS_CONTEXT_EXPORT GpuInProcessThreadService
: public CommandBufferTaskExecutor {
public:
GpuInProcessThreadService(
+ GpuInProcessThreadServiceDelegate* delegate,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
Scheduler* scheduler,
SyncPointManager* sync_point_manager,
MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group,
gl::GLSurfaceFormat share_group_surface_format,
const GpuFeatureInfo& gpu_feature_info,
const GpuPreferences& gpu_preferences,
SharedImageManager* shared_image_manager,
- gles2::ProgramCache* program_cache,
- scoped_refptr<SharedContextState> shared_context_state);
+ gles2::ProgramCache* program_cache);
~GpuInProcessThreadService() override;
// CommandBufferTaskExecutor implementation.
@@ -50,8 +65,11 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GpuInProcessThreadService
void ScheduleOutOfOrderTask(base::OnceClosure task) override;
void ScheduleDelayedWork(base::OnceClosure task) override;
void PostNonNestableToClient(base::OnceClosure callback) override;
+ scoped_refptr<SharedContextState> GetSharedContextState() override;
+ scoped_refptr<gl::GLShareGroup> GetShareGroup() override;
private:
+ GpuInProcessThreadServiceDelegate* const delegate_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
Scheduler* scheduler_;
diff --git a/chromium/gpu/ipc/gpu_task_scheduler_helper.cc b/chromium/gpu/ipc/gpu_task_scheduler_helper.cc
new file mode 100644
index 00000000000..b322404ca57
--- /dev/null
+++ b/chromium/gpu/ipc/gpu_task_scheduler_helper.cc
@@ -0,0 +1,70 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/gpu_task_scheduler_helper.h"
+
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/ipc/command_buffer_task_executor.h"
+#include "gpu/ipc/single_task_sequence.h"
+
+namespace gpu {
+
+GpuTaskSchedulerHelper::GpuTaskSchedulerHelper(
+ std::unique_ptr<SingleTaskSequence> task_sequence)
+ : using_command_buffer_(false),
+ task_sequence_(std::move(task_sequence)),
+ initialized_(true) {}
+
+GpuTaskSchedulerHelper::GpuTaskSchedulerHelper(
+ gpu::CommandBufferTaskExecutor* command_buffer_task_executor)
+ : using_command_buffer_(true),
+ task_sequence_(command_buffer_task_executor->CreateSequence()),
+ initialized_(false) {}
+
+GpuTaskSchedulerHelper::~GpuTaskSchedulerHelper() = default;
+
+void GpuTaskSchedulerHelper::Initialize(
+ gpu::CommandBufferHelper* command_buffer_helper) {
+ DCHECK(using_command_buffer_);
+ DCHECK(!initialized_);
+ DCHECK(command_buffer_helper);
+ command_buffer_helper_ = command_buffer_helper;
+ initialized_ = true;
+}
+
+void GpuTaskSchedulerHelper::ScheduleGpuTask(
+ base::OnceClosure callback,
+ std::vector<gpu::SyncToken> sync_tokens) {
+ // There are two places where this function is called: inside
+ // SkiaOutputSurface, where |using_command_buffer_| is false, or by other
+ // users when sharing with command buffer, where we should ahve
+ // |command_buffer_helper_| already set up.
+ DCHECK(!using_command_buffer_ || command_buffer_helper_);
+ DCHECK(initialized_);
+ if (command_buffer_helper_)
+ command_buffer_helper_->Flush();
+
+ task_sequence_->ScheduleTask(std::move(callback), std::move(sync_tokens));
+}
+
+void GpuTaskSchedulerHelper::ScheduleOrRetainGpuTask(
+ base::OnceClosure task,
+ std::vector<SyncToken> sync_tokens) {
+ DCHECK(!using_command_buffer_);
+ DCHECK(!command_buffer_helper_);
+ task_sequence_->ScheduleOrRetainTask(std::move(task), sync_tokens);
+}
+
+SequenceId GpuTaskSchedulerHelper::GetSequenceId() {
+ DCHECK(!using_command_buffer_);
+ DCHECK(!command_buffer_helper_);
+ return task_sequence_->GetSequenceId();
+}
+
+gpu::SingleTaskSequence* GpuTaskSchedulerHelper::GetTaskSequence() const {
+ DCHECK(using_command_buffer_);
+ return task_sequence_.get();
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/gpu_task_scheduler_helper.h b/chromium/gpu/ipc/gpu_task_scheduler_helper.h
new file mode 100644
index 00000000000..ff4ca477ec7
--- /dev/null
+++ b/chromium/gpu/ipc/gpu_task_scheduler_helper.h
@@ -0,0 +1,100 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_GPU_TASK_SCHEDULER_HELPER_H_
+#define GPU_IPC_GPU_TASK_SCHEDULER_HELPER_H_
+
+#include "base/callback.h"
+#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/command_buffer/service/sequence_id.h"
+#include "gpu/ipc/gl_in_process_context_export.h"
+
+namespace viz {
+class VizProcessContextProvider;
+}
+
+namespace gpu {
+class CommandBufferTaskExecutor;
+class CommandBufferHelper;
+class SingleTaskSequence;
+class InProcessCommandBuffer;
+
+// This class is a wrapper around a |gpu::SingleTaskSequence|. When we have
+// SkiaRenderer enabled, this should behave exactly like a
+// |gpu::SingleTaskSequence|. When we have GLRenderer with CommandBuffer, we
+// need to initialize this class with a |CommandBufferHelper|. This is because
+// when this class is used outside of actual CommandBuffer, we would need to
+// make sure the order of post tasks still corresponds to the order that tasks
+// are posted to the CommandBuffer.
+// This class is per display compositor. When this is used with command buffer,
+// it is created on VizProcessContextProvider. When this is used with
+// SkiaRenderer, it is created on SkiaOutputSurfaceImpl. Each user of this class
+// would hold a reference.
+class GL_IN_PROCESS_CONTEXT_EXPORT GpuTaskSchedulerHelper
+ : public base::RefCounted<GpuTaskSchedulerHelper> {
+ public:
+ // This constructor is only used for SkiaOutputSurface.
+ explicit GpuTaskSchedulerHelper(
+ std::unique_ptr<SingleTaskSequence> task_sequence);
+ // This constructor is used for command buffer GLOutputSurface.
+ explicit GpuTaskSchedulerHelper(
+ CommandBufferTaskExecutor* command_buffer_task_executor);
+
+ // This function sets up the |command_buffer_helper| which flushes the command
+ // buffer when a user outside of the command buffer shares the same
+ // GpuTaskSchedulerHelper. This is only needed for sharing with the command
+ // buffer, thus no need to be called when using SkiaRenderer.
+ void Initialize(CommandBufferHelper* command_buffer_helper);
+
+ // This is called outside of CommandBuffer and would need to flush the command
+ // buffer if the CommandBufferHelper is present. CommandBuffer is a friend of
+ // this class and gets a direct pointer to the internal
+ // |gpu::SingleTaskSequence|.
+ void ScheduleGpuTask(base::OnceClosure task,
+ std::vector<SyncToken> sync_tokens);
+
+ // This is only called with SkiaOutputSurface, no need to flush command buffer
+ // here.
+ void ScheduleOrRetainGpuTask(base::OnceClosure task,
+ std::vector<SyncToken> sync_tokens);
+ // This is only called with SkiaOutputSurface.
+ SequenceId GetSequenceId();
+
+ private:
+ friend class base::RefCounted<GpuTaskSchedulerHelper>;
+ ~GpuTaskSchedulerHelper();
+
+ // If |using_command_buffer_| is true, we are using this class with
+ // GLOutputSurface. Otherwise we are using this class with
+ // SkiaOutputSurface.
+ bool using_command_buffer_;
+
+ friend class gpu::InProcessCommandBuffer;
+ friend class viz::VizProcessContextProvider;
+ // Only used for inside CommandBuffer implementation.
+ SingleTaskSequence* GetTaskSequence() const;
+
+ // This |task_sequence_| handles task scheduling.
+ const std::unique_ptr<SingleTaskSequence> task_sequence_;
+
+ // When this class is used with the command buffer, this bool indicates
+ // whether we have initialized the |command_buffer_helper_|. The command
+ // buffer requires the |task_sequence_| in order to initialize on gpu thread,
+ // but the |command_buffer_helper_| is created after initialization of the
+ // command buffer.
+ bool initialized_;
+
+ // In the case where the TaskSequence is shared between command buffer and
+ // other users, |command_buffer_helper_| is used to flush the command buffer
+ // before posting tasks from a different user. This gives the command buffer a
+ // chance to post any pending tasks and maintains the ordering between command
+ // buffer and other user tasks.
+ CommandBufferHelper* command_buffer_helper_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuTaskSchedulerHelper);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_GPU_TASK_SCHEDULER_HELPER_H_
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
index 9c46b54ad5d..7347699f443 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
@@ -23,7 +23,7 @@ GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations(
gfx::BufferFormat::RG_88, gfx::BufferFormat::BGR_565,
gfx::BufferFormat::RGBA_4444, gfx::BufferFormat::RGBX_8888,
gfx::BufferFormat::RGBA_8888, gfx::BufferFormat::BGRX_8888,
- gfx::BufferFormat::BGRX_1010102, gfx::BufferFormat::RGBX_1010102,
+ gfx::BufferFormat::BGRA_1010102, gfx::BufferFormat::RGBA_1010102,
gfx::BufferFormat::BGRA_8888, gfx::BufferFormat::RGBA_F16,
gfx::BufferFormat::YVU_420, gfx::BufferFormat::YUV_420_BIPLANAR,
gfx::BufferFormat::P010};
@@ -42,7 +42,7 @@ GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations(
for (auto format : kBufferFormats) {
for (auto usage : kUsages) {
if (support->IsNativeGpuMemoryBufferConfigurationSupported(format, usage))
- configurations.insert(std::make_pair(format, usage));
+ configurations.insert(gfx::BufferUsageAndFormat(usage, format));
}
}
#endif // defined(USE_OZONE) || defined(OS_MACOSX) || defined(OS_WIN) ||
@@ -60,9 +60,9 @@ bool GetImageNeedsPlatformSpecificTextureTarget(gfx::BufferFormat format,
GpuMemoryBufferSupport support;
GpuMemoryBufferConfigurationSet native_configurations =
GetNativeGpuMemoryBufferConfigurations(&support);
- return native_configurations.find(std::make_pair(format, usage)) !=
- native_configurations.end();
-#else // defined(USE_OZONE) || defined(OS_MACOSX)
+ return base::Contains(native_configurations,
+ gfx::BufferUsageAndFormat(usage, format));
+#else
return false;
#endif
}
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.h b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
index f3deeb62a01..162d10c3f96 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
@@ -15,8 +15,7 @@
namespace gpu {
-using GpuMemoryBufferConfigurationKey =
- std::pair<gfx::BufferFormat, gfx::BufferUsage>;
+using GpuMemoryBufferConfigurationKey = gfx::BufferUsageAndFormat;
using GpuMemoryBufferConfigurationSet =
std::unordered_set<GpuMemoryBufferConfigurationKey>;
@@ -27,8 +26,8 @@ namespace std {
template <>
struct hash<gpu::GpuMemoryBufferConfigurationKey> {
size_t operator()(const gpu::GpuMemoryBufferConfigurationKey& key) const {
- return base::HashInts(static_cast<int>(key.first),
- static_cast<int>(key.second));
+ return base::HashInts(static_cast<int>(key.format),
+ static_cast<int>(key.usage));
}
};
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index 08162a8e826..338b2da5b0f 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -54,7 +54,6 @@
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
-#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/command_buffer/service/webgpu_decoder.h"
#include "gpu/config/gpu_crash_keys.h"
@@ -67,10 +66,12 @@
#include "gpu/ipc/host/gpu_memory_buffer_support.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
#include "gpu/ipc/service/image_transport_surface.h"
+#include "gpu/ipc/shared_image_interface_in_process.h"
#include "gpu/ipc/single_task_sequence.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_fence.h"
#include "ui/gfx/gpu_fence_handle.h"
+#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
#include "ui/gl/gl_image_shared_memory.h"
@@ -138,186 +139,27 @@ void ReleaseGLSurfaceOnClientThread(gl::GLSurface* surface,
} // namespace
-class InProcessCommandBuffer::SharedImageInterface
- : public gpu::SharedImageInterface {
- public:
- explicit SharedImageInterface(InProcessCommandBuffer* parent)
- : parent_(parent),
- gpu_thread_weak_ptr_(
- parent_->gpu_thread_weak_ptr_factory_.GetWeakPtr()),
- command_buffer_id_(NextCommandBufferId()) {}
-
- ~SharedImageInterface() override = default;
-
- Mailbox CreateSharedImage(viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage) override {
- auto mailbox = Mailbox::GenerateForSharedImage();
- {
- base::AutoLock lock(lock_);
- // Note: we enqueue the task under the lock to guarantee monotonicity of
- // the release ids as seen by the service. Unretained is safe because
- // InProcessCommandBuffer synchronizes with the GPU thread at destruction
- // time, cancelling tasks, before |this| is destroyed.
- parent_->ScheduleGpuTask(base::BindOnce(
- &InProcessCommandBuffer::CreateSharedImageOnGpuThread,
- gpu_thread_weak_ptr_, mailbox, format, size, color_space, usage,
- MakeSyncToken(next_fence_sync_release_++)));
- }
- return mailbox;
- }
-
- Mailbox CreateSharedImage(viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- base::span<const uint8_t> pixel_data) override {
- auto mailbox = Mailbox::GenerateForSharedImage();
- std::vector<uint8_t> pixel_data_copy(pixel_data.begin(), pixel_data.end());
- {
- base::AutoLock lock(lock_);
- // Note: we enqueue the task under the lock to guarantee monotonicity of
- // the release ids as seen by the service. Unretained is safe because
- // InProcessCommandBuffer synchronizes with the GPU thread at destruction
- // time, cancelling tasks, before |this| is destroyed.
- parent_->ScheduleGpuTask(base::BindOnce(
- &InProcessCommandBuffer::CreateSharedImageWithDataOnGpuThread,
- gpu_thread_weak_ptr_, mailbox, format, size, color_space, usage,
- MakeSyncToken(next_fence_sync_release_++),
- std::move(pixel_data_copy)));
- }
- return mailbox;
- }
-
- Mailbox CreateSharedImage(gfx::GpuMemoryBuffer* gpu_memory_buffer,
- GpuMemoryBufferManager* gpu_memory_buffer_manager,
- const gfx::ColorSpace& color_space,
- uint32_t usage) override {
- DCHECK(gpu_memory_buffer->GetType() == gfx::NATIVE_PIXMAP ||
- gpu_memory_buffer->GetType() == gfx::ANDROID_HARDWARE_BUFFER ||
- gpu_memory_buffer_manager);
-
- // TODO(piman): DCHECK GMB format support.
- DCHECK(gpu::IsImageSizeValidForGpuMemoryBufferFormat(
- gpu_memory_buffer->GetSize(), gpu_memory_buffer->GetFormat()));
-
- auto mailbox = Mailbox::GenerateForSharedImage();
- gfx::GpuMemoryBufferHandle handle = gpu_memory_buffer->CloneHandle();
- bool requires_sync_token = handle.type == gfx::IO_SURFACE_BUFFER;
- SyncToken sync_token;
- {
- base::AutoLock lock(lock_);
- sync_token = MakeSyncToken(next_fence_sync_release_++);
- // Note: we enqueue the task under the lock to guarantee monotonicity of
- // the release ids as seen by the service. Unretained is safe because
- // InProcessCommandBuffer synchronizes with the GPU thread at destruction
- // time, cancelling tasks, before |this| is destroyed.
- parent_->ScheduleGpuTask(base::BindOnce(
- &InProcessCommandBuffer::CreateGMBSharedImageOnGpuThread,
- gpu_thread_weak_ptr_, mailbox, std::move(handle),
- gpu_memory_buffer->GetFormat(), gpu_memory_buffer->GetSize(),
- color_space, usage, sync_token));
- }
- if (requires_sync_token) {
- sync_token.SetVerifyFlush();
- gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer,
- sync_token);
- }
- return mailbox;
- }
-
- SwapChainMailboxes CreateSwapChain(viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage) override {
- NOTREACHED();
- return {};
- }
-
- void PresentSwapChain(const SyncToken& sync_token,
- const Mailbox& mailbox) override {
- NOTREACHED();
- }
-
-#if defined(OS_FUCHSIA)
- void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token) override {
- NOTREACHED();
- }
- void ReleaseSysmemBufferCollection(
- gfx::SysmemBufferCollectionId id) override {
- NOTREACHED();
- }
-#endif // defined(OS_FUCHSIA)
-
- void UpdateSharedImage(const SyncToken& sync_token,
- const Mailbox& mailbox) override {
- UpdateSharedImage(sync_token, nullptr, mailbox);
- }
-
- void UpdateSharedImage(const SyncToken& sync_token,
- std::unique_ptr<gfx::GpuFence> acquire_fence,
- const Mailbox& mailbox) override {
- DCHECK(!acquire_fence);
- base::AutoLock lock(lock_);
- // Note: we enqueue the task under the lock to guarantee monotonicity of
- // the release ids as seen by the service. Unretained is safe because
- // InProcessCommandBuffer synchronizes with the GPU thread at destruction
- // time, cancelling tasks, before |this| is destroyed.
- parent_->ScheduleGpuTask(
- base::BindOnce(&InProcessCommandBuffer::UpdateSharedImageOnGpuThread,
- gpu_thread_weak_ptr_, mailbox,
- MakeSyncToken(next_fence_sync_release_++)),
- {sync_token});
- }
-
- void DestroySharedImage(const SyncToken& sync_token,
- const Mailbox& mailbox) override {
- // Use sync token dependency to ensure that the destroy task does not run
- // before sync token is released.
- parent_->ScheduleGpuTask(
- base::BindOnce(&InProcessCommandBuffer::DestroySharedImageOnGpuThread,
- gpu_thread_weak_ptr_, mailbox),
- {sync_token});
- }
-
- SyncToken GenUnverifiedSyncToken() override {
- base::AutoLock lock(lock_);
- return MakeSyncToken(next_fence_sync_release_ - 1);
- }
+InProcessCommandBuffer::SharedImageInterfaceHelper::SharedImageInterfaceHelper(
+ InProcessCommandBuffer* command_buffer)
+ : command_buffer_(command_buffer) {}
- SyncToken GenVerifiedSyncToken() override {
- base::AutoLock lock(lock_);
- SyncToken sync_token = MakeSyncToken(next_fence_sync_release_ - 1);
- sync_token.SetVerifyFlush();
- return sync_token;
- }
-
- void Flush() override {
- // No need to flush in this implementation.
- }
-
- CommandBufferId command_buffer_id() const { return command_buffer_id_; }
-
- private:
- SyncToken MakeSyncToken(uint64_t release_id) {
- return SyncToken(CommandBufferNamespace::IN_PROCESS, command_buffer_id_,
- release_id);
- }
-
- InProcessCommandBuffer* const parent_;
- base::WeakPtr<InProcessCommandBuffer> gpu_thread_weak_ptr_;
-
- const CommandBufferId command_buffer_id_;
+void InProcessCommandBuffer::SharedImageInterfaceHelper::SetError() {
+ // Signal errors by losing the command buffer.
+ command_buffer_->command_buffer_->SetParseError(error::kLostContext);
+}
- // Accessed on any thread. release_id_lock_ protects access to
- // next_fence_sync_release_.
- base::Lock lock_;
- uint64_t next_fence_sync_release_ = 1;
+void InProcessCommandBuffer::SharedImageInterfaceHelper::WrapTaskWithGpuCheck(
+ base::OnceClosure task) {
+ command_buffer_->RunTaskOnGpuThread(std::move(task));
+}
- DISALLOW_COPY_AND_ASSIGN(SharedImageInterface);
-};
+bool InProcessCommandBuffer::SharedImageInterfaceHelper::EnableWrappedSkImage()
+ const {
+ // We need WrappedSkImage to support creating a SharedImage with pixel data
+ // when GL is unavailable. This is used in various unit tests.
+ return command_buffer_->context_state_ &&
+ !command_buffer_->context_state_->GrContextIsGL();
+}
InProcessCommandBuffer::InProcessCommandBuffer(
CommandBufferTaskExecutor* task_executor,
@@ -335,7 +177,6 @@ InProcessCommandBuffer::InProcessCommandBuffer(
// and not the current (client) sequence except for webview (see Initialize).
DETACH_FROM_SEQUENCE(gpu_sequence_checker_);
DCHECK(task_executor_);
- shared_image_interface_ = std::make_unique<SharedImageInterface>(this);
}
InProcessCommandBuffer::~InProcessCommandBuffer() {
@@ -412,6 +253,7 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
ImageFactory* image_factory,
GpuChannelManagerDelegate* gpu_channel_manager_delegate,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ SingleTaskSequence* task_sequence,
gpu::raster::GrShaderCache* gr_shader_cache,
GpuProcessActivityFlags* activity_flags) {
DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
@@ -447,7 +289,17 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
base::BindOnce(&InProcessCommandBuffer::InitializeOnGpuThread,
base::Unretained(this), params);
- task_sequence_ = task_executor_->CreateSequence();
+ // If a |task_sequence| is passed in, command buffer is meant to share it with
+ // other users, take the raw pointer in this case because the |task_sequence|
+ // would be kept alive by VizProcessContextProvider. If no |task_sequence| is
+ // passed in, create one here.
+ if (task_sequence) {
+ task_sequence_ = task_sequence;
+ } else {
+ task_scheduler_holder_ =
+ base::MakeRefCounted<gpu::GpuTaskSchedulerHelper>(task_executor_);
+ task_sequence_ = task_scheduler_holder_->GetTaskSequence();
+ }
// Here we block by using a WaitableEvent to make sure InitializeOnGpuThread
// is finished as part of Initialize function. This also makes sure we won't
@@ -460,8 +312,14 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
WrapTaskWithResult(std::move(init_task), &result, &completion), {});
completion.Wait();
- if (result == gpu::ContextResult::kSuccess)
+ if (result == gpu::ContextResult::kSuccess) {
capabilities_ = capabilities;
+ shared_image_interface_ = std::make_unique<SharedImageInterfaceInProcess>(
+ task_executor_, task_sequence_, NextCommandBufferId(),
+ context_group_->mailbox_manager(), image_factory_,
+ context_group_->memory_tracker(),
+ std::make_unique<SharedImageInterfaceHelper>(this));
+ }
return result;
}
@@ -535,7 +393,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
command_buffer_ = std::make_unique<CommandBufferService>(
this, context_group_->memory_tracker());
- context_state_ = task_executor_->shared_context_state();
+ context_state_ = task_executor_->GetSharedContextState();
if (!surface_) {
if (is_offscreen_) {
@@ -603,14 +461,6 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
task_executor_->sync_point_manager()->CreateSyncPointClientState(
GetNamespaceID(), GetCommandBufferID(),
task_sequence_->GetSequenceId());
- // Make the SharedImageInterface use the same sequence as the command buffer,
- // it's necessary for WebView because of the blocking behavior.
- // TODO(piman): see if it's worth using a different sequence for non-WebView.
- shared_image_client_state_ =
- task_executor_->sync_point_manager()->CreateSyncPointClientState(
- CommandBufferNamespace::IN_PROCESS,
- shared_image_interface_->command_buffer_id(),
- task_sequence_->GetSequenceId());
if (context_group_->use_passthrough_cmd_decoder()) {
// When using the passthrough command decoder, never share with other
@@ -619,7 +469,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
} else {
// When using the validating command decoder, always use the global share
// group.
- gl_share_group_ = task_executor_->share_group();
+ gl_share_group_ = task_executor_->GetShareGroup();
}
if (params.attribs.context_type == CONTEXT_TYPE_WEBGPU) {
@@ -642,9 +492,8 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
// TODO(khushalsagar): A lot of this initialization code is duplicated in
// GpuChannelManager. Pull it into a common util method.
scoped_refptr<gl::GLContext> real_context =
- use_virtualized_gl_context_
- ? gl_share_group_->GetSharedContext(surface_.get())
- : nullptr;
+ use_virtualized_gl_context_ ? gl_share_group_->shared_context()
+ : nullptr;
if (real_context &&
(!real_context->MakeCurrent(surface_.get()) ||
real_context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) {
@@ -669,7 +518,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
task_executor_->gpu_feature_info().ApplyToGLContext(real_context.get());
if (use_virtualized_gl_context_)
- gl_share_group_->SetSharedContext(surface_.get(), real_context.get());
+ gl_share_group_->SetSharedContext(real_context.get());
}
if (!real_context->MakeCurrent(surface_.get())) {
@@ -682,18 +531,9 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
if (params.attribs.enable_raster_interface &&
!params.attribs.enable_gles2_interface) {
gr_shader_cache_ = params.gr_shader_cache;
- if (!context_state_) {
- context_state_ = base::MakeRefCounted<SharedContextState>(
- gl_share_group_, surface_, real_context,
- use_virtualized_gl_context_, base::DoNothing(),
- task_executor_->gpu_preferences().gr_context_type);
- context_state_->InitializeGL(task_executor_->gpu_preferences(),
- context_group_->feature_info());
- context_state_->InitializeGrContext(workarounds, params.gr_shader_cache,
- params.activity_flags);
- }
- if (!context_state_->MakeCurrent(nullptr, /*needs_gl=*/true)) {
+ if (!context_state_ ||
+ !context_state_->MakeCurrent(nullptr, /*needs_gl=*/true)) {
DestroyOnGpuThread();
LOG(ERROR) << "Failed to make context current.";
return ContextResult::kTransientFailure;
@@ -715,7 +555,8 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
this, command_buffer_.get(), task_executor_->outputter(),
task_executor_->gpu_feature_info(), task_executor_->gpu_preferences(),
context_group_->memory_tracker(),
- task_executor_->shared_image_manager(), context_state_));
+ task_executor_->shared_image_manager(), context_state_,
+ true /*is_privileged*/));
} else {
decoder_.reset(gles2::GLES2Decoder::Create(this, command_buffer_.get(),
task_executor_->outputter(),
@@ -799,8 +640,9 @@ void InProcessCommandBuffer::Destroy() {
client_thread_weak_ptr_factory_.InvalidateWeakPtrs();
gpu_control_client_ = nullptr;
+ shared_image_interface_ = nullptr;
// Here we block by using a WaitableEvent to make sure DestroyOnGpuThread is
- // finshed as part of Destroy.
+ // finished as part of Destroy.
base::WaitableEvent completion(
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
@@ -828,8 +670,6 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
// Clean up GL resources if possible.
bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
- if (shared_image_factory_)
- shared_image_factory_->DestroyAllSharedImages(have_context);
base::Optional<gles2::ProgramCache::ScopedCacheUse> cache_use;
if (have_context)
cache_use = CreateCacheUse();
@@ -852,10 +692,6 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
sync_point_client_state_->Destroy();
sync_point_client_state_ = nullptr;
}
- if (shared_image_client_state_) {
- shared_image_client_state_->Destroy();
- shared_image_client_state_ = nullptr;
- }
gl_share_group_ = nullptr;
context_group_ = nullptr;
if (context_state_)
@@ -883,8 +719,9 @@ void InProcessCommandBuffer::OnParseError() {
if (gpu_channel_manager_delegate_) {
// Tell the browser about this context loss so it can determine whether
// client APIs like WebGL need to be blocked from automatically running.
+ // |offscreen| is used to determine if it's compositing context or not.
gpu_channel_manager_delegate_->DidLoseContext(
- is_offscreen_, state.context_lost_reason, active_url_.url());
+ /*offscreen=*/false, state.context_lost_reason, active_url_.url());
// Check the error reason and robustness extension to get a better idea if
// the GL context was lost. We might try restarting the GPU process to
@@ -1480,123 +1317,6 @@ void InProcessCommandBuffer::GetGpuFenceOnGpuThread(
base::BindOnce(std::move(callback), std::move(gpu_fence)));
}
-void InProcessCommandBuffer::LazyCreateSharedImageFactory() {
- if (shared_image_factory_)
- return;
-
- // We need WrappedSkImage to support creating a SharedImage with pixel data
- // when GL is unavailable. This is used in various unit tests.
- const bool enable_wrapped_sk_image =
- context_state_ && !context_state_->GrContextIsGL();
- shared_image_factory_ = std::make_unique<SharedImageFactory>(
- GetGpuPreferences(), context_group_->feature_info()->workarounds(),
- GetGpuFeatureInfo(), context_state_.get(),
- context_group_->mailbox_manager(), task_executor_->shared_image_manager(),
- image_factory_, nullptr, enable_wrapped_sk_image);
-}
-
-void InProcessCommandBuffer::CreateSharedImageOnGpuThread(
- const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const SyncToken& sync_token) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
- // |shared_image_factory_| never writes to the surface, so skip unnecessary
- // MakeCurrent to improve performance. https://crbug.com/457431
- if (context_ && !context_->IsCurrent(nullptr) && !MakeCurrent())
- return;
- LazyCreateSharedImageFactory();
- if (!shared_image_factory_->CreateSharedImage(mailbox, format, size,
- color_space, usage)) {
- // Signal errors by losing the command buffer.
- command_buffer_->SetParseError(error::kLostContext);
- return;
- }
- context_group_->mailbox_manager()->PushTextureUpdates(sync_token);
- shared_image_client_state_->ReleaseFenceSync(sync_token.release_count());
-}
-
-void InProcessCommandBuffer::CreateSharedImageWithDataOnGpuThread(
- const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const SyncToken& sync_token,
- std::vector<uint8_t> pixel_data) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
- // |shared_image_factory_| never writes to the surface, so skip unnecessary
- // MakeCurrent to improve performance. https://crbug.com/457431
- if (context_ && !context_->IsCurrent(nullptr) && !MakeCurrent())
- return;
- LazyCreateSharedImageFactory();
- if (!shared_image_factory_->CreateSharedImage(
- mailbox, format, size, color_space, usage, pixel_data)) {
- // Signal errors by losing the command buffer.
- command_buffer_->SetParseError(error::kLostContext);
- return;
- }
- context_group_->mailbox_manager()->PushTextureUpdates(sync_token);
- shared_image_client_state_->ReleaseFenceSync(sync_token.release_count());
-}
-
-void InProcessCommandBuffer::CreateGMBSharedImageOnGpuThread(
- const Mailbox& mailbox,
- gfx::GpuMemoryBufferHandle handle,
- gfx::BufferFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const SyncToken& sync_token) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
- if (!MakeCurrent())
- return;
- LazyCreateSharedImageFactory();
- // TODO(piman): add support for SurfaceHandle (for backbuffers for ozone/drm).
- SurfaceHandle surface_handle = kNullSurfaceHandle;
- if (!shared_image_factory_->CreateSharedImage(
- mailbox, kInProcessCommandBufferClientId, std::move(handle), format,
- surface_handle, size, color_space, usage)) {
- // Signal errors by losing the command buffer.
- command_buffer_->SetParseError(error::kLostContext);
- return;
- }
- context_group_->mailbox_manager()->PushTextureUpdates(sync_token);
- shared_image_client_state_->ReleaseFenceSync(sync_token.release_count());
-}
-
-void InProcessCommandBuffer::UpdateSharedImageOnGpuThread(
- const Mailbox& mailbox,
- const SyncToken& sync_token) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
- if (!MakeCurrent())
- return;
- if (!shared_image_factory_ ||
- !shared_image_factory_->UpdateSharedImage(mailbox)) {
- // Signal errors by losing the command buffer.
- command_buffer_->SetParseError(error::kLostContext);
- return;
- }
- context_group_->mailbox_manager()->PushTextureUpdates(sync_token);
- shared_image_client_state_->ReleaseFenceSync(sync_token.release_count());
-}
-
-void InProcessCommandBuffer::DestroySharedImageOnGpuThread(
- const Mailbox& mailbox) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
- // |shared_image_factory_| never writes to the surface, so skip unnecessary
- // MakeCurrent to improve performance. https://crbug.com/457431
- if (!context_->IsCurrent(nullptr) && !MakeCurrent())
- return;
- if (!shared_image_factory_ ||
- !shared_image_factory_->DestroySharedImage(mailbox)) {
- // Signal errors by losing the command buffer.
- command_buffer_->SetParseError(error::kLostContext);
- }
-}
-
void InProcessCommandBuffer::SetLock(base::Lock*) {
// No support for using on multiple threads.
NOTREACHED();
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index 939e083cf05..38694d05493 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -42,6 +42,7 @@
#include "gpu/config/gpu_preferences.h"
#include "gpu/ipc/command_buffer_task_executor.h"
#include "gpu/ipc/gl_in_process_context_export.h"
+#include "gpu/ipc/gpu_task_scheduler_helper.h"
#include "gpu/ipc/service/context_url.h"
#include "gpu/ipc/service/display_context.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h"
@@ -60,14 +61,18 @@ struct GpuFenceHandle;
class Size;
}
+namespace viz {
+class GpuTaskSchedulerHelper;
+}
+
namespace gpu {
class SharedContextState;
class GpuChannelManagerDelegate;
class GpuProcessActivityFlags;
class GpuMemoryBufferManager;
class ImageFactory;
-class SharedImageFactory;
class SharedImageInterface;
+class SharedImageInterfaceInProcess;
class SyncPointClientState;
struct ContextCreationAttribs;
struct SwapBuffersCompleteParams;
@@ -108,6 +113,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
ImageFactory* image_factory,
GpuChannelManagerDelegate* gpu_channel_manager_delegate,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ SingleTaskSequence* task_sequence,
gpu::raster::GrShaderCache* gr_shader_cache,
GpuProcessActivityFlags* activity_flags);
@@ -201,6 +207,22 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
gpu::SharedImageInterface* GetSharedImageInterface() const;
+ // This is wrapper for VizSharedImageInterface implementation that is only
+ // used in InProcessCommandBuffer.
+ class SharedImageInterfaceHelper {
+ public:
+ explicit SharedImageInterfaceHelper(InProcessCommandBuffer* command_buffer);
+ ~SharedImageInterfaceHelper() = default;
+
+ void SetError();
+ void WrapTaskWithGpuCheck(base::OnceClosure task);
+
+ bool EnableWrappedSkImage() const;
+
+ private:
+ InProcessCommandBuffer* command_buffer_;
+ };
+
// Provides a callback that can be used to preserve the back buffer for the
// GLSurface associated with the command buffer, even after the command buffer
// has been destroyed. The back buffer is evicted once the callback is
@@ -209,9 +231,17 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
// and |surface_handle| provided in Initialize outlive this callback.
base::ScopedClosureRunner GetCacheBackBufferCb();
- private:
- class SharedImageInterface;
+ gpu::SharedImageManager* GetSharedImageManager() {
+ return task_executor_->shared_image_manager();
+ }
+
+ gpu::MemoryTracker* GetMemoryTracker() {
+ // Should only be called after initialization.
+ DCHECK(context_group_);
+ return context_group_->memory_tracker();
+ }
+ private:
struct InitializeOnGpuThreadParams {
SurfaceHandle surface_handle;
const ContextCreationAttribs& attribs;
@@ -287,30 +317,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
void GetGpuFenceOnGpuThread(
uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback);
- void LazyCreateSharedImageFactory();
- void CreateSharedImageOnGpuThread(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const SyncToken& sync_token);
- void CreateSharedImageWithDataOnGpuThread(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const SyncToken& sync_token,
- std::vector<uint8_t> pixel_data);
- void CreateGMBSharedImageOnGpuThread(const Mailbox& mailbox,
- gfx::GpuMemoryBufferHandle handle,
- gfx::BufferFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const SyncToken& sync_token);
- void UpdateSharedImageOnGpuThread(const Mailbox& mailbox,
- const SyncToken& sync_token);
- void DestroySharedImageOnGpuThread(const Mailbox& mailbox);
void SetDisplayTransformOnGpuThread(gfx::OverlayTransform transform);
// Sets |active_url_| as the active GPU process URL. Should be called on GPU
@@ -347,8 +353,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
scoped_refptr<gl::GLContext> context_;
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<SyncPointClientState> sync_point_client_state_;
- scoped_refptr<SyncPointClientState> shared_image_client_state_;
- std::unique_ptr<SharedImageFactory> shared_image_factory_;
// Used to throttle PerformDelayedWorkOnGpuThread.
bool delayed_work_pending_ = false;
@@ -377,8 +381,13 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
// Accessed on both threads:
base::WaitableEvent flush_event_;
CommandBufferTaskExecutor* const task_executor_;
- std::unique_ptr<gpu::SingleTaskSequence> task_sequence_;
- std::unique_ptr<SharedImageInterface> shared_image_interface_;
+
+ // If no SingleTaskSequence is passed in, create our own.
+ scoped_refptr<GpuTaskSchedulerHelper> task_scheduler_holder_;
+
+ // Pointer to the SingleTaskSequence that actually does the scheduling.
+ SingleTaskSequence* task_sequence_;
+ std::unique_ptr<SharedImageInterfaceInProcess> shared_image_interface_;
// The group of contexts that share namespaces with this context.
scoped_refptr<gles2::ContextGroup> context_group_;
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
index 21ad0705ac8..5a027baac2c 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
@@ -5,9 +5,11 @@
#include "gpu/ipc/in_process_gpu_thread_holder.h"
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/command_line.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/service/mailbox_manager_factory.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/service_utils.h"
@@ -15,7 +17,7 @@
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/config/gpu_info_collector.h"
#include "gpu/config/gpu_util.h"
-#include "gpu/ipc/gpu_in_process_thread_service.h"
+#include "ui/gl/init/gl_factory.h"
namespace gpu {
@@ -70,10 +72,39 @@ void InProcessGpuThreadHolder::InitializeOnGpuThread(
task_runner(), sync_point_manager_.get(), gpu_preferences_);
mailbox_manager_ = gles2::CreateMailboxManager(gpu_preferences_);
shared_image_manager_ = std::make_unique<SharedImageManager>();
+
+ share_group_ = new gl::GLShareGroup();
+ surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
+ gl::GLContextAttribs attribs = gles2::GenerateGLContextAttribs(
+ ContextCreationAttribs(), false /* use_passthrough_decoder */);
+ context_ =
+ gl::init::CreateGLContext(share_group_.get(), surface_.get(), attribs);
+ CHECK(context_->MakeCurrent(surface_.get()));
+ GpuDriverBugWorkarounds gpu_driver_bug_workarounds(
+ gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
+
+ bool use_virtualized_gl_context_ = false;
+#if defined(OS_MACOSX)
+ // Virtualize GpuPreference:::kLowPower contexts by default on OS X to prevent
+ // performance regressions when enabling FCM. https://crbug.com/180463
+ use_virtualized_gl_context_ = true;
+#endif
+ use_virtualized_gl_context_ |=
+ gpu_driver_bug_workarounds.use_virtualized_gl_contexts;
+
+ context_state_ = base::MakeRefCounted<SharedContextState>(
+ share_group_, surface_, context_, use_virtualized_gl_context_,
+ base::DoNothing(), gpu_preferences_.gr_context_type);
+ auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
+ gpu_driver_bug_workarounds, gpu_feature_info_);
+ context_state_->InitializeGL(gpu_preferences_, feature_info);
+ context_state_->InitializeGrContext(gpu_preferences_,
+ gpu_driver_bug_workarounds, nullptr);
+
task_executor_ = std::make_unique<GpuInProcessThreadService>(
- task_runner(), scheduler_.get(), sync_point_manager_.get(),
- mailbox_manager_.get(), nullptr, gl::GLSurfaceFormat(), gpu_feature_info_,
- gpu_preferences_, shared_image_manager_.get(), nullptr, nullptr);
+ this, task_runner(), scheduler_.get(), sync_point_manager_.get(),
+ mailbox_manager_.get(), gl::GLSurfaceFormat(), gpu_feature_info_,
+ gpu_preferences_, shared_image_manager_.get(), nullptr);
completion->Signal();
}
@@ -83,6 +114,23 @@ void InProcessGpuThreadHolder::DeleteOnGpuThread() {
scheduler_.reset();
sync_point_manager_.reset();
shared_image_manager_.reset();
+
+ context_state_.reset();
+ context_.reset();
+ surface_.reset();
+ share_group_.reset();
+}
+
+scoped_refptr<SharedContextState>
+InProcessGpuThreadHolder::GetSharedContextState() {
+ DCHECK(context_state_);
+ return context_state_;
+}
+
+scoped_refptr<gl::GLShareGroup> InProcessGpuThreadHolder::GetShareGroup() {
+ if (!share_group_)
+ share_group_ = base::MakeRefCounted<gl::GLShareGroup>();
+ return share_group_;
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.h b/chromium/gpu/ipc/in_process_gpu_thread_holder.h
index e7501ac633f..1762e540470 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.h
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.h
@@ -11,8 +11,10 @@
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/threading/thread.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_preferences.h"
+#include "gpu/ipc/gpu_in_process_thread_service.h"
namespace gpu {
class CommandBufferTaskExecutor;
@@ -26,7 +28,8 @@ class SyncPointManager;
// default GpuPreferences and GpuFeatureInfo will be constructed from the
// command line when this class is first created.
class COMPONENT_EXPORT(GPU_THREAD_HOLDER) InProcessGpuThreadHolder
- : public base::Thread {
+ : public base::Thread,
+ public GpuInProcessThreadServiceDelegate {
public:
InProcessGpuThreadHolder();
~InProcessGpuThreadHolder() override;
@@ -43,6 +46,10 @@ class COMPONENT_EXPORT(GPU_THREAD_HOLDER) InProcessGpuThreadHolder
// executor will be created the first time this is called.
CommandBufferTaskExecutor* GetTaskExecutor();
+ // gpu::GpuInProcessThreadServiceDelegate implementation:
+ scoped_refptr<gpu::SharedContextState> GetSharedContextState() override;
+ scoped_refptr<gl::GLShareGroup> GetShareGroup() override;
+
private:
void InitializeOnGpuThread(base::WaitableEvent* completion);
void DeleteOnGpuThread();
@@ -50,6 +57,11 @@ class COMPONENT_EXPORT(GPU_THREAD_HOLDER) InProcessGpuThreadHolder
GpuPreferences gpu_preferences_;
GpuFeatureInfo gpu_feature_info_;
+ scoped_refptr<gl::GLShareGroup> share_group_;
+ scoped_refptr<gl::GLSurface> surface_;
+ scoped_refptr<gl::GLContext> context_;
+ scoped_refptr<SharedContextState> context_state_;
+
std::unique_ptr<SyncPointManager> sync_point_manager_;
std::unique_ptr<Scheduler> scheduler_;
std::unique_ptr<MailboxManager> mailbox_manager_;
diff --git a/chromium/gpu/ipc/raster_in_process_context.cc b/chromium/gpu/ipc/raster_in_process_context.cc
index 813a19d8f5c..86e091926c2 100644
--- a/chromium/gpu/ipc/raster_in_process_context.cc
+++ b/chromium/gpu/ipc/raster_in_process_context.cc
@@ -66,8 +66,8 @@ ContextResult RasterInProcessContext::Initialize(
auto result = command_buffer_->Initialize(
nullptr /* surface */, true /* is_offscreen */, kNullSurfaceHandle,
attribs, gpu_memory_buffer_manager, image_factory,
- gpu_channel_manager_delegate, client_task_runner_, gr_shader_cache,
- activity_flags);
+ gpu_channel_manager_delegate, client_task_runner_,
+ nullptr /* task_sequence */, gr_shader_cache, activity_flags);
if (result != ContextResult::kSuccess) {
DLOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
return result;
diff --git a/chromium/gpu/ipc/scheduler_sequence.h b/chromium/gpu/ipc/scheduler_sequence.h
index a3612b7eef4..a8d85bb129e 100644
--- a/chromium/gpu/ipc/scheduler_sequence.h
+++ b/chromium/gpu/ipc/scheduler_sequence.h
@@ -19,6 +19,7 @@
namespace viz {
class Display;
class OutputSurfaceProviderImpl;
+class OverlayProcessorAndroid;
} // namespace viz
namespace gpu {
@@ -35,6 +36,11 @@ class GL_IN_PROCESS_CONTEXT_EXPORT ScopedAllowScheduleGpuTask {
// guaranteed to be able to support. Talk to boliu@ if in doubt.
friend class viz::Display;
friend class viz::OutputSurfaceProviderImpl;
+ // Overlay is not supported for WebView. However the initialization and
+ // destruction of OverlayProcessor requires posting task to gpu thread, which
+ // would trigger DCHECK, even though the task posting would not run on
+ // WebView.
+ friend class viz::OverlayProcessorAndroid;
ScopedAllowScheduleGpuTask();
#if DCHECK_IS_ON()
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index c3ca00c900f..a6cdd1737d6 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -53,14 +53,12 @@ jumbo_component("service") {
"webgpu_command_buffer_stub.h",
]
defines = [ "GPU_IPC_SERVICE_IMPLEMENTATION" ]
- if (is_chromecast) {
- defines += [ "IS_CHROMECAST" ]
- }
if (subpixel_font_rendering_disabled) {
defines += [ "SUBPIXEL_FONT_RENDERING_DISABLED" ]
}
public_deps = [
"//base",
+ "//build:chromecast_buildflags",
"//components/viz/common",
"//gpu/config",
"//ipc",
@@ -136,6 +134,7 @@ jumbo_component("service") {
if (use_x11) {
sources += [ "x_util.h" ]
libs += [ "X11" ]
+ deps += [ "//ui/gfx/linux:gpu_memory_buffer_support_x11" ]
}
if (use_ozone) {
deps += [ "//ui/ozone" ]
@@ -150,9 +149,7 @@ jumbo_component("service") {
source_set("test_support") {
testonly = true
- sources = [
- "gpu_memory_buffer_factory_test_template.h",
- ]
+ sources = [ "gpu_memory_buffer_factory_test_template.h" ]
public_deps = [
":service",
"//testing/gtest:gtest",
diff --git a/chromium/gpu/ipc/service/DEPS b/chromium/gpu/ipc/service/DEPS
index d335ceef805..1ec17f75e3c 100644
--- a/chromium/gpu/ipc/service/DEPS
+++ b/chromium/gpu/ipc/service/DEPS
@@ -15,3 +15,9 @@ include_rules = [
"+ui/platform_window",
"+media/gpu/android/texture_owner.h",
]
+
+specific_include_rules = {
+ "image_decode_accelerator_stub_unittest\.cc": [
+ "+skia/ext/skia_memory_dump_provider.h",
+ ],
+}
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
index 28a7a9dd845..ec99e708f3b 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
@@ -261,7 +261,7 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
scoped_refptr<gl::GLContext> context;
if (use_virtualized_gl_context_ && share_group_) {
- context = share_group_->GetSharedContext(surface_.get());
+ context = share_group_->shared_context();
if (context && (!context->MakeCurrent(surface_.get()) ||
context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) {
context = nullptr;
@@ -281,7 +281,7 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
// Ensure that context creation did not lose track of the intended share
// group.
DCHECK(context->share_group() == share_group_.get());
- share_group_->SetSharedContext(surface_.get(), context.get());
+ share_group_->SetSharedContext(context.get());
// This needs to be called against the real shared context, not the
// virtual context created below.
diff --git a/chromium/gpu/ipc/service/gpu_channel.cc b/chromium/gpu/ipc/service/gpu_channel.cc
index b7749a86961..f57d754b3ae 100644
--- a/chromium/gpu/ipc/service/gpu_channel.cc
+++ b/chromium/gpu/ipc/service/gpu_channel.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "gpu/ipc/service/gpu_channel.h"
+#include "base/memory/ptr_util.h"
#include <utility>
@@ -734,7 +735,8 @@ void GpuChannel::OnCreateCommandBuffer(
stub = std::make_unique<WebGPUCommandBufferStub>(
this, init_params, command_buffer_id, sequence_id, stream_id, route_id);
} else if (init_params.attribs.enable_raster_interface &&
- !init_params.attribs.enable_gles2_interface) {
+ !init_params.attribs.enable_gles2_interface &&
+ !init_params.attribs.enable_grcontext) {
stub = std::make_unique<RasterCommandBufferStub>(
this, init_params, command_buffer_id, sequence_id, stream_id, route_id);
} else {
diff --git a/chromium/gpu/ipc/service/gpu_channel.h b/chromium/gpu/ipc/service/gpu_channel.h
index f836c02dc4e..da02450fb9e 100644
--- a/chromium/gpu/ipc/service/gpu_channel.h
+++ b/chromium/gpu/ipc/service/gpu_channel.h
@@ -106,6 +106,8 @@ class GPU_IPC_SERVICE_EXPORT GpuChannel : public IPC::Listener,
return io_task_runner_;
}
+ bool is_gpu_host() const { return is_gpu_host_; }
+
// IPC::Listener implementation:
bool OnMessageReceived(const IPC::Message& msg) override;
void OnChannelConnected(int32_t peer_pid) override;
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index 39544aca67e..e0776477344 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -15,6 +15,7 @@
#include "base/single_thread_task_runner.h"
#include "base/system/sys_info.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/traced_value.h"
#include "build/build_config.h"
#include "components/viz/common/features.h"
#include "gpu/command_buffer/common/context_creation_attribs.h"
@@ -35,6 +36,9 @@
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "third_party/skia/include/core/SkGraphics.h"
+#if defined(OS_WIN)
+#include "ui/gl/gl_angle_util_win.h"
+#endif
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_share_group.h"
#include "ui/gl/gl_version_info.h"
@@ -50,36 +54,169 @@ const int kMaxGpuIdleTimeMs = 40;
// draw.
const int kMaxKeepAliveTimeMs = 200;
#endif
+#if defined(OS_WIN)
+void TrimD3DResources() {
+ // Graphics drivers periodically allocate internal memory buffers in
+ // order to speed up subsequent rendering requests. These memory allocations
+ // in general lead to increased memory usage by the overall system.
+ // Calling Trim discards internal memory buffers allocated for the app,
+ // reducing its memory footprint.
+ // Calling Trim method does not change the rendering state of the
+ // graphics device and has no effect on rendering operations.
+ // There is a brief performance hit when internal buffers are reallocated
+ // during the first rendering operations after the Trim call, therefore
+ // apps should only call Trim when going idle for a period of time or during
+ // low memory conditions.
+ Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
+ gl::QueryD3D11DeviceObjectFromANGLE();
+ if (d3d11_device) {
+ Microsoft::WRL::ComPtr<IDXGIDevice3> dxgi_device;
+ if (SUCCEEDED(d3d11_device.As(&dxgi_device))) {
+ dxgi_device->Trim();
+ }
+ }
}
+#endif
+
+void FormatAllocationSourcesForTracing(
+ base::trace_event::TracedValue* dict,
+ base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>&
+ allocation_sources) {
+ dict->SetInteger("UNKNOWN",
+ allocation_sources[GpuPeakMemoryAllocationSource::UNKNOWN]);
+ dict->SetInteger(
+ "COMMAND_BUFFER",
+ allocation_sources[GpuPeakMemoryAllocationSource::COMMAND_BUFFER]);
+ dict->SetInteger(
+ "SHARED_CONTEXT_STATE",
+ allocation_sources[GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE]);
+ dict->SetInteger(
+ "SHARED_IMAGE_STUB",
+ allocation_sources[GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB]);
+ dict->SetInteger("SKIA",
+ allocation_sources[GpuPeakMemoryAllocationSource::SKIA]);
+}
+
+} // namespace
GpuChannelManager::GpuPeakMemoryMonitor::GpuPeakMemoryMonitor()
: weak_factory_(this) {}
-GpuChannelManager::GpuPeakMemoryMonitor::~GpuPeakMemoryMonitor() {}
+GpuChannelManager::GpuPeakMemoryMonitor::~GpuPeakMemoryMonitor() = default;
-uint64_t GpuChannelManager::GpuPeakMemoryMonitor::GetPeakMemoryUsage(
- uint32_t sequence_num) {
+base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
+GpuChannelManager::GpuPeakMemoryMonitor::GetPeakMemoryUsage(
+ uint32_t sequence_num,
+ uint64_t* out_peak_memory) {
auto sequence = sequence_trackers_.find(sequence_num);
- if (sequence != sequence_trackers_.end())
- return sequence->second;
- return 0u;
+ base::flat_map<GpuPeakMemoryAllocationSource, uint64_t> allocation_per_source;
+ *out_peak_memory = 0u;
+ if (sequence != sequence_trackers_.end()) {
+ *out_peak_memory = sequence->second.total_memory_;
+ allocation_per_source = sequence->second.peak_memory_per_source_;
+ }
+ return allocation_per_source;
}
void GpuChannelManager::GpuPeakMemoryMonitor::StartGpuMemoryTracking(
uint32_t sequence_num) {
- sequence_trackers_.emplace(sequence_num, current_memory_);
+ sequence_trackers_.emplace(
+ sequence_num,
+ SequenceTracker(current_memory_, current_memory_per_source_));
+ TRACE_EVENT_ASYNC_BEGIN2("gpu", "PeakMemoryTracking", sequence_num, "start",
+ current_memory_, "start_sources",
+ StartTrackingTracedValue());
}
void GpuChannelManager::GpuPeakMemoryMonitor::StopGpuMemoryTracking(
uint32_t sequence_num) {
- sequence_trackers_.erase(sequence_num);
+ auto sequence = sequence_trackers_.find(sequence_num);
+ if (sequence != sequence_trackers_.end()) {
+ TRACE_EVENT_ASYNC_END2("gpu", "PeakMemoryTracking", sequence_num, "peak",
+ sequence->second.total_memory_, "end_sources",
+ StopTrackingTracedValue(sequence->second));
+ sequence_trackers_.erase(sequence);
+ }
+}
+
+base::WeakPtr<MemoryTracker::Observer>
+GpuChannelManager::GpuPeakMemoryMonitor::GetWeakPtr() {
+ return weak_factory_.GetWeakPtr();
+}
+
+void GpuChannelManager::GpuPeakMemoryMonitor::InvalidateWeakPtrs() {
+ weak_factory_.InvalidateWeakPtrs();
+}
+
+GpuChannelManager::GpuPeakMemoryMonitor::SequenceTracker::SequenceTracker(
+ uint64_t current_memory,
+ base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
+ current_memory_per_source)
+ : initial_memory_(current_memory),
+ total_memory_(current_memory),
+ initial_memory_per_source_(current_memory_per_source),
+ peak_memory_per_source_(std::move(current_memory_per_source)) {}
+
+GpuChannelManager::GpuPeakMemoryMonitor::SequenceTracker::SequenceTracker(
+ const SequenceTracker& other) = default;
+
+GpuChannelManager::GpuPeakMemoryMonitor::SequenceTracker::~SequenceTracker() =
+ default;
+
+std::unique_ptr<base::trace_event::TracedValue>
+GpuChannelManager::GpuPeakMemoryMonitor::StartTrackingTracedValue() {
+ auto dict = std::make_unique<base::trace_event::TracedValue>();
+ FormatAllocationSourcesForTracing(dict.get(), current_memory_per_source_);
+ return dict;
+}
+
+std::unique_ptr<base::trace_event::TracedValue>
+GpuChannelManager::GpuPeakMemoryMonitor::StopTrackingTracedValue(
+ SequenceTracker& sequence) {
+ auto dict = std::make_unique<base::trace_event::TracedValue>();
+ dict->BeginDictionary("source_totals");
+ FormatAllocationSourcesForTracing(dict.get(),
+ sequence.peak_memory_per_source_);
+ dict->EndDictionary();
+ dict->BeginDictionary("difference");
+ int total_diff = sequence.total_memory_ - sequence.initial_memory_;
+ dict->SetInteger("TOTAL", total_diff);
+ dict->EndDictionary();
+ dict->BeginDictionary("source_difference");
+
+ for (auto it : sequence.peak_memory_per_source_) {
+ int diff = (it.second - sequence.initial_memory_per_source_[it.first]);
+ switch (it.first) {
+ case GpuPeakMemoryAllocationSource::UNKNOWN:
+ dict->SetInteger("UNKNOWN", diff);
+ break;
+ case GpuPeakMemoryAllocationSource::COMMAND_BUFFER:
+ dict->SetInteger("COMMAND_BUFFER", diff);
+ break;
+ case GpuPeakMemoryAllocationSource::SHARED_CONTEXT_STATE:
+ dict->SetInteger("SHARED_CONTEXT_STATE", diff);
+ break;
+ case GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB:
+ dict->SetInteger("SHARED_IMAGE_STUB", diff);
+ break;
+ case GpuPeakMemoryAllocationSource::SKIA:
+ dict->SetInteger("SKIA", diff);
+ break;
+ }
+ }
+
+ dict->EndDictionary();
+ return dict;
}
void GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange(
CommandBufferId id,
uint64_t old_size,
- uint64_t new_size) {
- current_memory_ += new_size - old_size;
+ uint64_t new_size,
+ GpuPeakMemoryAllocationSource source) {
+ uint64_t diff = new_size - old_size;
+ current_memory_ += diff;
+ current_memory_per_source_[source] += diff;
if (old_size < new_size) {
// When memory has increased, iterate over the sequences to update their
// peak.
@@ -89,8 +226,17 @@ void GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange(
// |peak_since_last_sequence_update_| on the the memory changes. Then only
// update the sequences with a new one is added, or the peak is requested.
for (auto& sequence : sequence_trackers_) {
- if (current_memory_ > sequence.second)
- sequence.second = current_memory_;
+ if (current_memory_ > sequence.second.total_memory_) {
+ sequence.second.total_memory_ = current_memory_;
+ for (auto& sequence : sequence_trackers_) {
+ TRACE_EVENT_ASYNC_STEP_INTO1("gpu", "PeakMemoryTracking",
+ sequence.first, "Peak", "peak",
+ current_memory_);
+ }
+ for (auto& source : current_memory_per_source_) {
+ sequence.second.peak_memory_per_source_[source.first] = source.second;
+ }
+ }
}
}
}
@@ -128,6 +274,8 @@ GpuChannelManager::GpuChannelManager(
default_offscreen_surface_(std::move(default_offscreen_surface)),
gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
gpu_feature_info_(gpu_feature_info),
+ discardable_manager_(gpu_preferences_),
+ passthrough_discardable_manager_(gpu_preferences_),
image_decode_accelerator_worker_(image_decode_accelerator_worker),
activity_flags_(std::move(activity_flags)),
memory_pressure_listener_(
@@ -136,6 +284,7 @@ GpuChannelManager::GpuChannelManager(
vulkan_context_provider_(vulkan_context_provider),
metal_context_provider_(metal_context_provider),
dawn_context_provider_(dawn_context_provider) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(io_task_runner);
DCHECK(scheduler);
@@ -151,6 +300,8 @@ GpuChannelManager::GpuChannelManager(
}
GpuChannelManager::~GpuChannelManager() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
// Clear |gpu_channels_| first to prevent reentrancy problems from GpuChannel
// destructor.
auto gpu_channels = std::move(gpu_channels_);
@@ -162,6 +313,10 @@ GpuChannelManager::~GpuChannelManager() {
default_offscreen_surface_ = nullptr;
}
+ // Inavlidate here as the |shared_context_state_| attempts to call back to
+ // |this| in the middle of the deletion.
+ peak_memory_monitor_.InvalidateWeakPtrs();
+
// Try to make the context current so that GPU resources can be destroyed
// correctly.
if (shared_context_state_)
@@ -169,12 +324,16 @@ GpuChannelManager::~GpuChannelManager() {
}
gles2::Outputter* GpuChannelManager::outputter() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (!outputter_)
outputter_.reset(new gles2::TraceOutputter("GpuChannelManager Trace"));
return outputter_.get();
}
gles2::ProgramCache* GpuChannelManager::program_cache() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (!program_cache_.get()) {
const GpuDriverBugWorkarounds& workarounds = gpu_driver_bug_workarounds_;
bool disable_disk_cache =
@@ -197,11 +356,28 @@ gles2::ProgramCache* GpuChannelManager::program_cache() {
}
void GpuChannelManager::RemoveChannel(int client_id) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ auto it = gpu_channels_.find(client_id);
+ if (it == gpu_channels_.end())
+ return;
+
delegate_->DidDestroyChannel(client_id);
- gpu_channels_.erase(client_id);
+
+ // Erase the |gpu_channels_| entry before destroying the GpuChannel object to
+ // avoid reentrancy problems from the GpuChannel destructor.
+ std::unique_ptr<GpuChannel> channel = std::move(it->second);
+ gpu_channels_.erase(it);
+ channel.reset();
+
+ if (gpu_channels_.empty()) {
+ delegate_->DidDestroyAllChannels();
+ }
}
GpuChannel* GpuChannelManager::LookupChannel(int32_t client_id) const {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
const auto& it = gpu_channels_.find(client_id);
return it != gpu_channels_.end() ? it->second.get() : nullptr;
}
@@ -210,6 +386,8 @@ GpuChannel* GpuChannelManager::EstablishChannel(int client_id,
uint64_t client_tracing_id,
bool is_gpu_host,
bool cache_shaders_on_disk) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (gr_shader_cache_ && cache_shaders_on_disk)
gr_shader_cache_->CacheClientIdOnDisk(client_id);
@@ -218,6 +396,9 @@ GpuChannel* GpuChannelManager::EstablishChannel(int client_id,
io_task_runner_, client_id, client_tracing_id, is_gpu_host,
image_decode_accelerator_worker_);
+ if (!gpu_channel)
+ return nullptr;
+
GpuChannel* gpu_channel_ptr = gpu_channel.get();
gpu_channels_[client_id] = std::move(gpu_channel);
return gpu_channel_ptr;
@@ -226,12 +407,16 @@ GpuChannel* GpuChannelManager::EstablishChannel(int client_id,
void GpuChannelManager::InternalDestroyGpuMemoryBuffer(
gfx::GpuMemoryBufferId id,
int client_id) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
gpu_memory_buffer_factory_->DestroyGpuMemoryBuffer(id, client_id);
}
void GpuChannelManager::DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
int client_id,
const SyncToken& sync_token) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (!sync_point_manager_->WaitOutOfOrder(
sync_token,
base::BindOnce(&GpuChannelManager::InternalDestroyGpuMemoryBuffer,
@@ -244,6 +429,8 @@ void GpuChannelManager::DestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id,
void GpuChannelManager::PopulateShaderCache(int32_t client_id,
const std::string& key,
const std::string& program) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (client_id == kGrShaderCacheClientId) {
if (gr_shader_cache_)
gr_shader_cache_->PopulateCache(key, program);
@@ -255,15 +442,25 @@ void GpuChannelManager::PopulateShaderCache(int32_t client_id,
}
void GpuChannelManager::LoseAllContexts() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ share_group_ = base::MakeRefCounted<gl::GLShareGroup>();
for (auto& kv : gpu_channels_) {
kv.second->MarkAllContextsLost();
}
task_runner_->PostTask(FROM_HERE,
base::BindOnce(&GpuChannelManager::DestroyAllChannels,
weak_factory_.GetWeakPtr()));
+ if (shared_context_state_) {
+ gr_cache_controller_.reset();
+ shared_context_state_->MarkContextLost();
+ shared_context_state_.reset();
+ }
}
void GpuChannelManager::DestroyAllChannels() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
// Clear |gpu_channels_| first to prevent reentrancy problems from GpuChannel
// destructor.
auto gpu_channels = std::move(gpu_channels_);
@@ -273,6 +470,8 @@ void GpuChannelManager::DestroyAllChannels() {
void GpuChannelManager::GetVideoMemoryUsageStats(
VideoMemoryUsageStats* video_memory_usage_stats) const {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
// For each context group, assign its memory usage to its PID
video_memory_usage_stats->process_map.clear();
uint64_t total_size = 0;
@@ -299,26 +498,38 @@ void GpuChannelManager::GetVideoMemoryUsageStats(
}
void GpuChannelManager::StartPeakMemoryMonitor(uint32_t sequence_num) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
peak_memory_monitor_.StartGpuMemoryTracking(sequence_num);
}
-uint64_t GpuChannelManager::GetPeakMemoryUsage(uint32_t sequence_num) {
- uint64_t total_memory = peak_memory_monitor_.GetPeakMemoryUsage(sequence_num);
+base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
+GpuChannelManager::GetPeakMemoryUsage(uint32_t sequence_num,
+ uint64_t* out_peak_memory) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ auto allocation_per_source =
+ peak_memory_monitor_.GetPeakMemoryUsage(sequence_num, out_peak_memory);
peak_memory_monitor_.StopGpuMemoryTracking(sequence_num);
- return total_memory;
+ return allocation_per_source;
}
#if defined(OS_ANDROID)
void GpuChannelManager::DidAccessGpu() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
last_gpu_access_time_ = base::TimeTicks::Now();
}
void GpuChannelManager::WakeUpGpu() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
begin_wake_up_time_ = base::TimeTicks::Now();
ScheduleWakeUpGpu();
}
void GpuChannelManager::ScheduleWakeUpGpu() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
base::TimeTicks now = base::TimeTicks::Now();
TRACE_EVENT2("gpu", "GpuChannelManager::ScheduleWakeUp", "idle_time",
(now - last_gpu_access_time_).InMilliseconds(),
@@ -340,13 +551,17 @@ void GpuChannelManager::ScheduleWakeUpGpu() {
}
void GpuChannelManager::DoWakeUpGpu() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
const CommandBufferStub* stub = nullptr;
for (const auto& kv : gpu_channels_) {
const GpuChannel* channel = kv.second.get();
stub = channel->GetOneStub();
if (stub) {
DCHECK(stub->decoder_context());
- break;
+ // With Vulkan, Dawn, etc, RasterDecoders don't use GL.
+ if (stub->decoder_context()->GetGLContext())
+ break;
}
}
if (!stub || !stub->decoder_context()->MakeCurrent())
@@ -356,6 +571,8 @@ void GpuChannelManager::DoWakeUpGpu() {
}
void GpuChannelManager::OnBackgroundCleanup() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
// Delete all the GL contexts when the channel does not use WebGL and Chrome
// goes to background on low-end devices.
std::vector<int> channels_to_clear;
@@ -384,6 +601,8 @@ void GpuChannelManager::OnBackgroundCleanup() {
#endif
void GpuChannelManager::OnApplicationBackgrounded() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (shared_context_state_) {
shared_context_state_->PurgeMemory(
base::MemoryPressureListener::MemoryPressureLevel::
@@ -396,6 +615,8 @@ void GpuChannelManager::OnApplicationBackgrounded() {
void GpuChannelManager::HandleMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (program_cache_)
program_cache_->HandleMemoryPressure(memory_pressure_level);
discardable_manager_.HandleMemoryPressure(memory_pressure_level);
@@ -404,10 +625,15 @@ void GpuChannelManager::HandleMemoryPressure(
shared_context_state_->PurgeMemory(memory_pressure_level);
if (gr_shader_cache_)
gr_shader_cache_->PurgeMemory(memory_pressure_level);
+#if defined(OS_WIN)
+ TrimD3DResources();
+#endif
}
scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
ContextResult* result) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (shared_context_state_ && !shared_context_state_->context_lost()) {
*result = ContextResult::kSuccess;
return shared_context_state_;
@@ -441,8 +667,7 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
}
scoped_refptr<gl::GLContext> context =
- use_virtualized_gl_contexts ? share_group->GetSharedContext(surface.get())
- : nullptr;
+ use_virtualized_gl_contexts ? share_group->shared_context() : nullptr;
if (context && (!context->MakeCurrent(surface.get()) ||
context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) {
context = nullptr;
@@ -450,6 +675,10 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
if (!context) {
gl::GLContextAttribs attribs = gles2::GenerateGLContextAttribs(
ContextCreationAttribs(), use_passthrough_decoder);
+
+ // Only skip validation if the GLContext will be used exclusively by the
+ // SharedContextState.
+ attribs.can_skip_validation = !use_virtualized_gl_contexts;
context =
gl::init::CreateGLContext(share_group.get(), surface.get(), attribs);
if (!context) {
@@ -467,7 +696,7 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
gpu_feature_info_.ApplyToGLContext(context.get());
if (use_virtualized_gl_contexts)
- share_group->SetSharedContext(surface.get(), context.get());
+ share_group->SetSharedContext(context.get());
}
// This should be either:
@@ -491,7 +720,8 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
base::BindOnce(&GpuChannelManager::OnContextLost, base::Unretained(this),
/*synthetic_loss=*/false),
gpu_preferences_.gr_context_type, vulkan_context_provider_,
- metal_context_provider_, dawn_context_provider_, peak_memory_monitor());
+ metal_context_provider_, dawn_context_provider_,
+ peak_memory_monitor_.GetWeakPtr());
// OOP-R needs GrContext for raster tiles.
bool need_gr_context =
@@ -508,12 +738,15 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
if (!shared_context_state_->InitializeGL(gpu_preferences_,
feature_info.get())) {
shared_context_state_ = nullptr;
+ LOG(ERROR) << "ContextResult::kFatalFailure: Failed to Initialize GL "
+ "for SharedContextState";
+ *result = ContextResult::kFatalFailure;
return nullptr;
}
}
- shared_context_state_->InitializeGrContext(gpu_driver_bug_workarounds_,
- gr_shader_cache(),
- &activity_flags_, watchdog_);
+ shared_context_state_->InitializeGrContext(
+ gpu_preferences_, gpu_driver_bug_workarounds_, gr_shader_cache(),
+ &activity_flags_, watchdog_);
}
gr_cache_controller_.emplace(shared_context_state_.get(), task_runner_);
@@ -523,6 +756,8 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
}
void GpuChannelManager::OnContextLost(bool synthetic_loss) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (synthetic_loss)
return;
@@ -539,17 +774,23 @@ void GpuChannelManager::OnContextLost(bool synthetic_loss) {
}
void GpuChannelManager::ScheduleGrContextCleanup() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (gr_cache_controller_)
gr_cache_controller_->ScheduleGrContextCleanup();
}
void GpuChannelManager::StoreShader(const std::string& key,
const std::string& shader) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
delegate_->StoreShaderToDisk(kGrShaderCacheClientId, key, shader);
}
void GpuChannelManager::SetImageDecodeAcceleratorWorkerForTesting(
ImageDecodeAcceleratorWorker* worker) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
DCHECK(gpu_channels_.empty());
image_decode_accelerator_worker_ = worker;
}
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index 217adb652cb..4846800441e 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -9,7 +9,6 @@
#include <memory>
#include <string>
-#include <unordered_map>
#include <vector>
#include "base/containers/flat_map.h"
@@ -19,6 +18,7 @@
#include "base/memory/weak_ptr.h"
#include "base/optional.h"
#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_checker.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/common/constants.h"
@@ -32,12 +32,19 @@
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_preferences.h"
+#include "gpu/ipc/common/gpu_peak_memory.h"
#include "gpu/ipc/service/gpu_ipc_service_export.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gl/gl_surface.h"
#include "url/gurl.h"
+namespace base {
+namespace trace_event {
+class TracedValue;
+} // namespace trace_event
+} // namespace base
+
namespace gl {
class GLShareGroup;
}
@@ -168,8 +175,11 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
// |sequence_num|. Repeated calls with the same value are ignored.
void StartPeakMemoryMonitor(uint32_t sequence_num);
- // Ends the tracking for |sequence_num| and returns the peak memory usage.
- uint64_t GetPeakMemoryUsage(uint32_t sequence_num);
+ // Ends the tracking for |sequence_num| and returns the peak memory per
+ // allocation source. Along with the total |out_peak_memory|.
+ base::flat_map<GpuPeakMemoryAllocationSource, uint64_t> GetPeakMemoryUsage(
+ uint32_t sequence_num,
+ uint64_t* out_peak_memory);
scoped_refptr<SharedContextState> GetSharedContextState(
ContextResult* result);
@@ -198,22 +208,51 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
GpuPeakMemoryMonitor();
~GpuPeakMemoryMonitor() override;
- uint64_t GetPeakMemoryUsage(uint32_t sequence_num);
+ base::flat_map<GpuPeakMemoryAllocationSource, uint64_t> GetPeakMemoryUsage(
+ uint32_t sequence_num,
+ uint64_t* out_peak_memory);
void StartGpuMemoryTracking(uint32_t sequence_num);
void StopGpuMemoryTracking(uint32_t sequence_num);
+ base::WeakPtr<MemoryTracker::Observer> GetWeakPtr();
+ void InvalidateWeakPtrs();
+
private:
+ struct SequenceTracker {
+ public:
+ SequenceTracker(uint64_t current_memory,
+ base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
+ current_memory_per_source);
+ SequenceTracker(const SequenceTracker&);
+ ~SequenceTracker();
+
+ uint64_t initial_memory_ = 0u;
+ uint64_t total_memory_ = 0u;
+ base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
+ initial_memory_per_source_;
+ base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
+ peak_memory_per_source_;
+ };
+ std::unique_ptr<base::trace_event::TracedValue> StartTrackingTracedValue();
+ std::unique_ptr<base::trace_event::TracedValue> StopTrackingTracedValue(
+ SequenceTracker& sequence);
// MemoryTracker::Observer:
- void OnMemoryAllocatedChange(CommandBufferId id,
- uint64_t old_size,
- uint64_t new_size) override;
+ void OnMemoryAllocatedChange(
+ CommandBufferId id,
+ uint64_t old_size,
+ uint64_t new_size,
+ GpuPeakMemoryAllocationSource source =
+ GpuPeakMemoryAllocationSource::UNKNOWN) override;
// Tracks all currently requested sequences mapped to the peak memory seen.
- base::flat_map<uint32_t, uint64_t> sequence_trackers_;
+ base::flat_map<uint32_t, SequenceTracker> sequence_trackers_;
// Tracks the total current memory across all MemoryTrackers.
uint64_t current_memory_ = 0u;
+ base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
+ current_memory_per_source_;
+
base::WeakPtrFactory<GpuPeakMemoryMonitor> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(GpuPeakMemoryMonitor);
};
@@ -231,7 +270,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
// These objects manage channels to individual renderer processes. There is
// one channel for each renderer process that has connected to this GPU
// process.
- std::unordered_map<int32_t, std::unique_ptr<GpuChannel>> gpu_channels_;
+ base::flat_map<int32_t, std::unique_ptr<GpuChannel>> gpu_channels_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
@@ -290,19 +329,21 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
// With --enable-vulkan, |vulkan_context_provider_| will be set from
// viz::GpuServiceImpl. The raster decoders will use it for rasterization if
- // --gr-context-type is also set to Vulkan.
+ // features::Vulkan is used.
viz::VulkanContextProvider* vulkan_context_provider_ = nullptr;
- // If features::SkiaOnMetad, |metal_context_provider_| will be set from
+ // If features::Metal, |metal_context_provider_| will be set from
// viz::GpuServiceImpl. The raster decoders will use it for rasterization.
viz::MetalContextProvider* metal_context_provider_ = nullptr;
- // With --gr-context-type=dawn, |dawn_context_provider_| will be set from
+ // With features::SkiaDawn, |dawn_context_provider_| will be set from
// viz::GpuServiceImpl. The raster decoders will use it for rasterization.
viz::DawnContextProvider* dawn_context_provider_ = nullptr;
GpuPeakMemoryMonitor peak_memory_monitor_;
+ THREAD_CHECKER(thread_checker_);
+
// Member variables should appear before the WeakPtrFactory, to ensure
// that any WeakPtrs to Controller are invalidated before its members
// variable's destructors are executed, rendering them invalid.
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
index 9e6809b7779..98897f9ad8a 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
@@ -5,7 +5,9 @@
#ifndef GPU_IPC_SERVICE_GPU_CHANNEL_MANAGER_DELEGATE_H_
#define GPU_IPC_SERVICE_GPU_CHANNEL_MANAGER_DELEGATE_H_
+#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
+#include "gpu/config/gpu_info.h"
#include "gpu/ipc/common/surface_handle.h"
#include "gpu/ipc/service/display_context.h"
@@ -35,6 +37,11 @@ class GpuChannelManagerDelegate {
// Notification from GPU that the channel is destroyed.
virtual void DidDestroyChannel(int client_id) = 0;
+ // Notification that all GPU channels are shutdown properly.
+ // Note this is NOT called in error conditions such as losing channel due to
+ // context loss, or from debug messages.
+ virtual void DidDestroyAllChannels() = 0;
+
// Tells the delegate that an offscreen context was destroyed for the provided
// |active_url|.
virtual void DidDestroyOffscreenContext(const GURL& active_url) = 0;
@@ -64,6 +71,9 @@ class GpuChannelManagerDelegate {
virtual gpu::Scheduler* GetGpuScheduler() = 0;
#if defined(OS_WIN)
+ // Tells the delegate that overlay info was updated.
+ virtual void DidUpdateOverlayInfo(const gpu::OverlayInfo& overlay_info) = 0;
+
// Tells the delegate that |child_window| was created in the GPU process and
// to send an IPC to make SetParent() syscall. This syscall is blocked by the
// GPU sandbox and must be made in the browser process.
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
index 8e1d1a70109..a5d21e45d41 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_unittest.cc
@@ -2,9 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <limits.h>
#include <stddef.h>
#include <stdint.h>
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/trace_arguments.h"
+#include "base/trace_event/trace_category.h"
+#include "base/trace_event/trace_event_filter.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
@@ -12,10 +19,66 @@
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_test_common.h"
+namespace {
+
+// static
+// Cache of the last TraceEvent seen by TestTraceEventFilter, as it cannot be
+// stored directly in the class, due to the filtering methods being const.
+std::unique_ptr<base::trace_event::TraceEvent> g_trace_event;
+
+// Testing filter to observe "gpu" trace events. The latest one seen is copied
+// into |g_trace_event|.
+class TestTraceEventFilter : public base::trace_event::TraceEventFilter {
+ public:
+ TestTraceEventFilter() { g_trace_event.reset(); }
+ ~TestTraceEventFilter() override { g_trace_event.reset(); }
+
+ static std::unique_ptr<base::trace_event::TraceEventFilter> Factory(
+ const std::string& predicate_name) {
+ std::unique_ptr<TestTraceEventFilter> res =
+ std::make_unique<TestTraceEventFilter>();
+ return res;
+ }
+
+ // base::trace_event::TraceEventFilter:
+ bool FilterTraceEvent(
+ const base::trace_event::TraceEvent& trace_event) const override {
+ const auto* category =
+ base::trace_event::CategoryRegistry::GetCategoryByStatePtr(
+ trace_event.category_group_enabled());
+
+ if (!strcmp(category->name(), "gpu")) {
+ CHECK_EQ(2u, trace_event.arg_size()) << trace_event.name();
+ // The first arg is always recorded as a uint64_t, whereas the second is
+ // a TracedValue. Here we force the first to be recorded as_uint, as on
+ // KitKat the union is failing to transpose correctly when using
+ // as_convertable.
+ std::unique_ptr<base::trace_event::TraceArguments> args =
+ std::make_unique<base::trace_event::TraceArguments>(
+ trace_event.arg_name(0), trace_event.arg_value(0).as_uint,
+ trace_event.arg_name(1), trace_event.arg_value(1).as_convertable);
+
+ g_trace_event = std::make_unique<base::trace_event::TraceEvent>(
+ trace_event.thread_id(), trace_event.timestamp(),
+ trace_event.thread_timestamp(),
+ trace_event.thread_instruction_count(), trace_event.phase(),
+ trace_event.category_group_enabled(), trace_event.name(),
+ trace_event.scope(), trace_event.id(), trace_event.bind_id(),
+ args.get(), trace_event.flags());
+ }
+ return true;
+ }
+};
+
+} // namespace
+
namespace gpu {
class GpuChannelManagerTest : public GpuChannelTestCommon {
public:
+ static constexpr uint64_t kUInt64_T_Max =
+ std::numeric_limits<uint64_t>::max();
+
GpuChannelManagerTest()
: GpuChannelTestCommon(true /* use_stub_bindings */) {}
~GpuChannelManagerTest() override = default;
@@ -24,9 +87,25 @@ class GpuChannelManagerTest : public GpuChannelTestCommon {
return &channel_manager()->peak_memory_monitor_;
}
+ // Returns the peak memory usage from the channel_manager(). This will stop
+ // tracking for |sequence_number|.
+ uint64_t GetManagersPeakMemoryUsage(uint32_t sequence_num) {
+ // Set default as max so that invalid cases can properly test 0u returns.
+ uint64_t peak_memory = kUInt64_T_Max;
+ auto allocation =
+ channel_manager()->GetPeakMemoryUsage(sequence_num, &peak_memory);
+ return peak_memory;
+ }
+
+ // Returns the peak memory usage currently stores in the GpuPeakMemoryMonitor.
+ // Does not shut down tracking for |sequence_num|.
uint64_t GetMonitorsPeakMemoryUsage(uint32_t sequence_num) {
- return channel_manager()->peak_memory_monitor_.GetPeakMemoryUsage(
- sequence_num);
+ // Set default as max so that invalid cases can properly test 0u returns.
+ uint64_t peak_memory = kUInt64_T_Max;
+ auto allocation =
+ channel_manager()->peak_memory_monitor_.GetPeakMemoryUsage(
+ sequence_num, &peak_memory);
+ return peak_memory;
}
// Helpers to call MemoryTracker::Observer methods of
@@ -35,7 +114,8 @@ class GpuChannelManagerTest : public GpuChannelTestCommon {
uint64_t old_size,
uint64_t new_size) {
static_cast<MemoryTracker::Observer*>(gpu_peak_memory_monitor())
- ->OnMemoryAllocatedChange(id, old_size, new_size);
+ ->OnMemoryAllocatedChange(id, old_size, new_size,
+ GpuPeakMemoryAllocationSource::UNKNOWN);
}
#if defined(OS_ANDROID)
@@ -115,6 +195,22 @@ TEST_F(GpuChannelManagerTest, OnBackgroundedWithWebGL) {
// Tests that peak memory usage is only reported for valid sequence numbers,
// and that polling shuts down the monitoring.
TEST_F(GpuChannelManagerTest, GpuPeakMemoryOnlyReportedForValidSequence) {
+ // Setup filtering to observe traces emitted.
+ base::trace_event::TraceLog* trace_log =
+ base::trace_event::TraceLog::GetInstance();
+ trace_log->SetFilterFactoryForTesting(TestTraceEventFilter::Factory);
+ const char config_json[] = R"(
+ {
+ "event_filters": [
+ {
+ "filter_predicate": "gpu",
+ "included_categories": ["*"]
+ }
+ ]
+ } )";
+ trace_log->SetEnabled(base::trace_event::TraceConfig(config_json),
+ base::trace_event::TraceLog::FILTERING_MODE);
+
GpuChannelManager* manager = channel_manager();
const CommandBufferId buffer_id =
CommandBufferIdFromChannelAndRoute(42, 1337);
@@ -125,16 +221,41 @@ TEST_F(GpuChannelManagerTest, GpuPeakMemoryOnlyReportedForValidSequence) {
manager->StartPeakMemoryMonitor(sequence_num);
EXPECT_EQ(current_memory, GetMonitorsPeakMemoryUsage(sequence_num));
+ // A trace should have been emitted.
+ EXPECT_NE(nullptr, g_trace_event);
+ EXPECT_STREQ("PeakMemoryTracking", g_trace_event->name());
+ EXPECT_STREQ("start", g_trace_event->arg_name(0));
+ EXPECT_EQ(current_memory, g_trace_event->arg_value(0).as_uint);
+ EXPECT_STREQ("start_sources", g_trace_event->arg_name(1));
+ EXPECT_NE(nullptr, g_trace_event->arg_value(1).as_pointer);
+ g_trace_event.reset();
+
// With no request to listen to memory it should report 0.
const uint32_t invalid_sequence_num = 1337;
EXPECT_EQ(0u, GetMonitorsPeakMemoryUsage(invalid_sequence_num));
- EXPECT_EQ(0u, manager->GetPeakMemoryUsage(invalid_sequence_num));
+ EXPECT_EQ(0u, GetManagersPeakMemoryUsage(invalid_sequence_num));
+ // There should be no trace emitted for invalid sequence.
+ EXPECT_EQ(nullptr, g_trace_event);
// The valid sequence should receive a report.
- EXPECT_EQ(current_memory, manager->GetPeakMemoryUsage(sequence_num));
+ EXPECT_EQ(current_memory, GetManagersPeakMemoryUsage(sequence_num));
// However it should be shut-down and no longer report anything.
EXPECT_EQ(0u, GetMonitorsPeakMemoryUsage(sequence_num));
- EXPECT_EQ(0u, manager->GetPeakMemoryUsage(sequence_num));
+ EXPECT_EQ(0u, GetManagersPeakMemoryUsage(sequence_num));
+
+ // A trace should have been emitted as well.
+ EXPECT_NE(nullptr, g_trace_event);
+ EXPECT_STREQ("PeakMemoryTracking", g_trace_event->name());
+ EXPECT_STREQ("peak", g_trace_event->arg_name(0));
+ EXPECT_EQ(current_memory, g_trace_event->arg_value(0).as_uint);
+ EXPECT_STREQ("end_sources", g_trace_event->arg_name(1));
+ EXPECT_NE(nullptr, g_trace_event->arg_value(1).as_pointer);
+ g_trace_event.reset();
+
+ // Tracing's globals are not reset between tests. Clear out our filter and
+ // disable tracing.
+ trace_log->SetFilterFactoryForTesting(nullptr);
+ trace_log->SetDisabled(base::trace_event::TraceLog::FILTERING_MODE);
}
// Tests that while a channel may exist for longer than a request to monitor,
@@ -158,7 +279,7 @@ TEST_F(GpuChannelManagerTest,
// the peak seen during the observation of |sequence_num|.
const uint64_t localized_peak_memory = 24;
OnMemoryAllocatedChange(buffer_id, reduced_memory, localized_peak_memory);
- EXPECT_EQ(localized_peak_memory, manager->GetPeakMemoryUsage(sequence_num));
+ EXPECT_EQ(localized_peak_memory, GetManagersPeakMemoryUsage(sequence_num));
}
// Checks that when there are more than one sequence, that each has a separately
@@ -185,8 +306,8 @@ TEST_F(GpuChannelManagerTest, GetPeakMemoryUsageCalculatedPerSequence) {
const uint64_t localized_peak_memory = 24;
OnMemoryAllocatedChange(buffer_id, reduced_memory, localized_peak_memory);
- EXPECT_EQ(initial_memory, manager->GetPeakMemoryUsage(sequence_num_1));
- EXPECT_EQ(localized_peak_memory, manager->GetPeakMemoryUsage(sequence_num_2));
+ EXPECT_EQ(initial_memory, GetManagersPeakMemoryUsage(sequence_num_1));
+ EXPECT_EQ(localized_peak_memory, GetManagersPeakMemoryUsage(sequence_num_2));
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.cc b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
index 22966507b37..a52769db1dd 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
@@ -7,6 +7,7 @@
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_manager.h"
#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
@@ -33,6 +34,7 @@ class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
void DidCreateContextSuccessfully() override {}
void DidCreateOffscreenContext(const GURL& active_url) override {}
void DidDestroyChannel(int client_id) override {}
+ void DidDestroyAllChannels() override {}
void DidDestroyOffscreenContext(const GURL& active_url) override {}
void DidLoseContext(bool offscreen,
error::ContextLostReason reason,
@@ -43,6 +45,7 @@ class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
void MaybeExitOnContextLost() override { is_exiting_ = true; }
bool IsExiting() const override { return is_exiting_; }
#if defined(OS_WIN)
+ void DidUpdateOverlayInfo(const gpu::OverlayInfo& overlay_info) override {}
void SendCreatedChildWindow(SurfaceHandle parent_window,
SurfaceHandle child_window) override {}
#endif
@@ -62,7 +65,9 @@ GpuChannelTestCommon::GpuChannelTestCommon(bool use_stub_bindings)
GpuChannelTestCommon::GpuChannelTestCommon(
std::vector<int32_t> enabled_workarounds,
bool use_stub_bindings)
- : task_runner_(new base::TestSimpleTaskRunner),
+ : memory_dump_manager_(
+ base::trace_event::MemoryDumpManager::CreateInstanceForTesting()),
+ task_runner_(new base::TestSimpleTaskRunner),
io_task_runner_(new base::TestSimpleTaskRunner),
sync_point_manager_(new SyncPointManager()),
shared_image_manager_(new SharedImageManager(false /* thread_safe */)),
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.h b/chromium/gpu/ipc/service/gpu_channel_test_common.h
index 9ab01faf7a6..1a15276f605 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.h
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.h
@@ -15,6 +15,9 @@
namespace base {
class TestSimpleTaskRunner;
+namespace trace_event {
+class MemoryDumpManager;
+} // namespace trace_event
} // namespace base
namespace IPC {
@@ -52,6 +55,7 @@ class GpuChannelTestCommon : public testing::Test {
base::UnsafeSharedMemoryRegion GetSharedMemoryRegion();
private:
+ std::unique_ptr<base::trace_event::MemoryDumpManager> memory_dump_manager_;
IPC::TestSink sink_;
scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
scoped_refptr<base::TestSimpleTaskRunner> io_task_runner_;
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index 1aeba67edd4..83c90521809 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -12,7 +12,8 @@
#include "base/threading/scoped_blocking_call.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
-#include "components/viz/common/features.h"
+#include "build/chromecast_buildflags.h"
+#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/config/gpu_driver_bug_list.h"
@@ -33,13 +34,16 @@
#include "ui/gl/gl_utils.h"
#include "ui/gl/init/gl_factory.h"
+#if defined(OS_MACOSX)
+#include <GLES2/gl2.h>
+#endif
+
#if defined(USE_OZONE)
#include "ui/ozone/public/ozone_platform.h"
#include "ui/ozone/public/surface_factory_ozone.h"
#endif
#if defined(OS_WIN)
-#include "ui/gl/direct_composition_surface_win.h"
#include "ui/gl/gl_surface_egl.h"
#endif
@@ -73,16 +77,6 @@ bool CollectGraphicsInfo(GPUInfo* gpu_info) {
return success;
}
-#if defined(OS_WIN)
-OverlaySupport FlagsToOverlaySupport(UINT flags) {
- if (flags & DXGI_OVERLAY_SUPPORT_FLAG_SCALING)
- return OverlaySupport::kScaling;
- if (flags & DXGI_OVERLAY_SUPPORT_FLAG_DIRECT)
- return OverlaySupport::kDirect;
- return OverlaySupport::kNone;
-}
-#endif // OS_WIN
-
void InitializePlatformOverlaySettings(GPUInfo* gpu_info) {
#if defined(OS_WIN)
// This has to be called after a context is created, active GPU is identified,
@@ -90,26 +84,15 @@ void InitializePlatformOverlaySettings(GPUInfo* gpu_info) {
// |disable_direct_composition| may not be correctly applied.
// Also, this has to be called after falling back to SwiftShader decision is
// finalized because this function depends on GL is ANGLE's GLES or not.
- if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE) {
- DCHECK(gpu_info);
- gpu_info->direct_composition =
- gl::DirectCompositionSurfaceWin::IsDirectCompositionSupported();
- gpu_info->supports_overlays =
- gl::DirectCompositionSurfaceWin::AreOverlaysSupported();
- gpu_info->nv12_overlay_support = FlagsToOverlaySupport(
- gl::DirectCompositionSurfaceWin::GetOverlaySupportFlags(
- DXGI_FORMAT_NV12));
- gpu_info->yuy2_overlay_support = FlagsToOverlaySupport(
- gl::DirectCompositionSurfaceWin::GetOverlaySupportFlags(
- DXGI_FORMAT_YUY2));
- }
+ DCHECK(gpu_info);
+ CollectHardwareOverlayInfo(&gpu_info->overlay_info);
#elif defined(OS_ANDROID)
if (gpu_info->gpu.vendor_string == "Qualcomm")
gl::SurfaceControl::EnableQualcommUBWC();
#endif
}
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && !defined(IS_CHROMECAST)
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && !BUILDFLAG(IS_CHROMECAST)
bool CanAccessNvidiaDeviceFile() {
bool res = true;
base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
@@ -120,7 +103,7 @@ bool CanAccessNvidiaDeviceFile() {
}
return res;
}
-#endif // OS_LINUX && !OS_CHROMEOS && !IS_CHROMECAST
+#endif // OS_LINUX && !OS_CHROMEOS && !BUILDFLAG(IS_CHROMECAST)
class GpuWatchdogInit {
public:
@@ -146,19 +129,27 @@ GpuInit::~GpuInit() {
bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
const GpuPreferences& gpu_preferences) {
gpu_preferences_ = gpu_preferences;
+
+ if (gpu_preferences_.enable_perf_data_collection) {
+ // This is only enabled on the info collection GPU process.
+ DevicePerfInfo device_perf_info;
+ CollectDevicePerfInfo(&device_perf_info, /*in_browser_process=*/false);
+ device_perf_info_ = device_perf_info;
+ }
+
// Blacklist decisions based on basic GPUInfo may not be final. It might
// need more context based GPUInfo. In such situations, switching to
// SwiftShader needs to wait until creating a context.
bool needs_more_info = true;
-#if !defined(OS_ANDROID) && !defined(IS_CHROMECAST)
+#if !defined(OS_ANDROID) && !BUILDFLAG(IS_CHROMECAST)
needs_more_info = false;
if (!PopGPUInfoCache(&gpu_info_)) {
CollectBasicGraphicsInfo(command_line, &gpu_info_);
}
#if defined(OS_WIN)
- GpuSeriesType gpu_series_type = GetGpuSeriesType(
+ IntelGpuSeriesType intel_gpu_series_type = GetIntelGpuSeriesType(
gpu_info_.active_gpu().vendor_id, gpu_info_.active_gpu().device_id);
- UMA_HISTOGRAM_ENUMERATION("GPU.GpuGeneration", gpu_series_type);
+ UMA_HISTOGRAM_ENUMERATION("GPU.IntelGpuSeriesType", intel_gpu_series_type);
#endif // OS_WIN
// Set keys for crash logging based on preliminary gpu info, in case we
@@ -181,7 +172,8 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
gpu_info_, gpu_preferences_, command_line, &needs_more_info);
}
-#endif // !OS_ANDROID && !IS_CHROMECAST
+#endif // !OS_ANDROID && !BUILDFLAG(IS_CHROMECAST)
+
gpu_info_.in_process_gpu = false;
bool use_swiftshader = false;
@@ -221,10 +213,22 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
delayed_watchdog_enable = true;
#endif
+#if defined(OS_LINUX)
// PreSandbox is mainly for resource handling and not related to the GPU
// driver, it doesn't need the GPU watchdog. The loadLibrary may take long
// time that killing and restarting the GPU process will not help.
+ if (gpu_preferences_.gpu_sandbox_start_early) {
+ // The sandbox will be started earlier than usual (i.e. before GL) so
+ // execute the pre-sandbox steps now.
+ sandbox_helper_->PreSandboxStartup();
+ }
+#else
+ // For some reasons MacOSX's VideoToolbox might crash when called after
+ // initializing GL, see crbug.com/1047643 and crbug.com/871280. On other
+ // operating systems like Windows and Android the pre-sandbox steps have
+ // always been executed before initializing GL so keep it this way.
sandbox_helper_->PreSandboxStartup();
+#endif
// Start the GPU watchdog only after anything that is expected to be time
// consuming has completed, otherwise the process is liable to be aborted.
@@ -273,7 +277,6 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
ui::OzonePlatform::InitParams params;
params.single_process = false;
params.using_mojo = features::IsOzoneDrmMojo();
- params.viz_display_compositor = features::IsVizDisplayCompositorEnabled();
ui::OzonePlatform::InitializeForGPU(params);
const std::vector<gfx::BufferFormat> supported_buffer_formats_for_texturing =
ui::OzonePlatform::GetInstance()
@@ -321,6 +324,38 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
}
}
+#if defined(OS_LINUX)
+ // The ContentSandboxHelper is currently the only one implementation of
+ // gpu::GpuSandboxHelper and it has no dependency. Except on Linux where
+ // VaapiWrapper checks the GL implementation to determine which display
+ // to use. So call PreSandboxStartup after GL initialization. But make
+ // sure the watchdog is paused as loadLibrary may take a long time and
+ // restarting the GPU process will not help.
+ if (!attempted_startsandbox) {
+ if (watchdog_thread_)
+ watchdog_thread_->PauseWatchdog();
+
+ // The sandbox is not started yet.
+ sandbox_helper_->PreSandboxStartup();
+
+ if (watchdog_thread_)
+ watchdog_thread_->ResumeWatchdog();
+ }
+#endif
+
+ // On MacOS, the default texture target for native GpuMemoryBuffers is
+ // GL_TEXTURE_RECTANGLE_ARB. This is due to CGL's requirements for creating
+ // a GL surface. However, when ANGLE is used on top of SwiftShader, it's
+ // necessary to use GL_TEXTURE_2D instead.
+ // TODO(crbug.com/1056312): The proper behavior is to check the config
+ // parameter set by the EGL_ANGLE_iosurface_client_buffer extension
+#if defined(OS_MACOSX)
+ if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE &&
+ gl::GetANGLEImplementation() == gl::ANGLEImplementation::kSwiftShader) {
+ gpu::SetMacOSSpecificTextureTarget(GL_TEXTURE_2D);
+ }
+#endif // defined(OS_MACOSX)
+
bool gl_disabled = gl::GetGLImplementation() == gl::kGLImplementationDisabled;
// Compute passthrough decoder status before ComputeGpuFeatureInfo below.
@@ -387,7 +422,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// Collect GPU process info
if (!gl_disabled) {
- if (!CollectGpuExtraInfo(&gpu_extra_info_))
+ if (!CollectGpuExtraInfo(&gpu_extra_info_, gpu_preferences))
return false;
}
@@ -489,12 +524,6 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
ui::OzonePlatform::GetInstance()->AfterSandboxEntry();
#endif
-#if defined(OS_ANDROID)
- // Disable AImageReader if the workaround is enabled.
- if (gpu_feature_info_.IsWorkaroundEnabled(DISABLE_AIMAGEREADER)) {
- base::android::AndroidImageReader::DisableSupport();
- }
-#endif
#if defined(USE_OZONE)
gpu_feature_info_.supported_buffer_formats_for_allocation_and_texturing =
std::move(supported_buffer_formats_for_texturing);
@@ -521,11 +550,6 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
default_offscreen_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
- // Disable AImageReader if the workaround is enabled.
- if (gpu_feature_info_.IsWorkaroundEnabled(DISABLE_AIMAGEREADER)) {
- base::android::AndroidImageReader::DisableSupport();
- }
-
UMA_HISTOGRAM_ENUMERATION("GPU.GLImplementation", gl::GetGLImplementation());
}
#else
@@ -537,7 +561,6 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
ui::OzonePlatform::InitParams params;
params.single_process = true;
params.using_mojo = features::IsOzoneDrmMojo();
- params.viz_display_compositor = features::IsVizDisplayCompositorEnabled();
ui::OzonePlatform::InitializeForGPU(params);
const std::vector<gfx::BufferFormat> supported_buffer_formats_for_texturing =
ui::OzonePlatform::GetInstance()
@@ -545,7 +568,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
->GetSupportedFormatsForTexturing();
#endif
bool needs_more_info = true;
-#if !defined(IS_CHROMECAST)
+#if !BUILDFLAG(IS_CHROMECAST)
needs_more_info = false;
if (!PopGPUInfoCache(&gpu_info_)) {
CollectBasicGraphicsInfo(command_line, &gpu_info_);
@@ -563,7 +586,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
InitializeSwitchableGPUs(
gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
}
-#endif // !IS_CHROMECAST
+#endif // !BUILDFLAG(IS_CHROMECAST)
bool use_swiftshader = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
diff --git a/chromium/gpu/ipc/service/gpu_init.h b/chromium/gpu/ipc/service/gpu_init.h
index 5263436f3dc..5f1d6fcdf02 100644
--- a/chromium/gpu/ipc/service/gpu_init.h
+++ b/chromium/gpu/ipc/service/gpu_init.h
@@ -7,6 +7,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "gpu/config/device_perf_info.h"
#include "gpu/config/gpu_extra_info.h"
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_info.h"
@@ -64,6 +65,9 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
const {
return gpu_feature_info_for_hardware_gpu_;
}
+ const base::Optional<DevicePerfInfo>& device_perf_info() const {
+ return device_perf_info_;
+ }
const GpuPreferences& gpu_preferences() const { return gpu_preferences_; }
std::unique_ptr<GpuWatchdogThread> TakeWatchdogThread() {
return std::move(watchdog_thread_);
@@ -96,6 +100,9 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
GpuExtraInfo gpu_extra_info_;
+ // The following data are collected by the info collection GPU process.
+ base::Optional<DevicePerfInfo> device_perf_info_;
+
#if BUILDFLAG(ENABLE_VULKAN)
std::unique_ptr<VulkanImplementation> vulkan_implementation_;
#endif
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc
index 25a5e44d7e1..d54b63479a3 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_dxgi.cc
@@ -82,7 +82,7 @@ gfx::GpuMemoryBufferHandle GpuMemoryBufferFactoryDXGI::CreateGpuMemoryBuffer(
if (!BufferSizeForBufferFormatChecked(size, format, &buffer_size))
return handle;
- handle.dxgi_handle = IPC::PlatformFileForTransit(texture_handle);
+ handle.dxgi_handle.Set(texture_handle);
handle.type = gfx::DXGI_SHARED_HANDLE;
handle.id = id;
@@ -107,10 +107,8 @@ GpuMemoryBufferFactoryDXGI::CreateImageForGpuMemoryBuffer(
if (handle.type != gfx::DXGI_SHARED_HANDLE)
return nullptr;
// Transfer ownership of handle to GLImageDXGI.
- base::win::ScopedHandle handle_owner;
- handle_owner.Set(handle.dxgi_handle.GetHandle());
auto image = base::MakeRefCounted<gl::GLImageDXGI>(size, nullptr);
- if (!image->InitializeHandle(std::move(handle_owner), 0, format))
+ if (!image->InitializeHandle(std::move(handle.dxgi_handle), 0, format))
return nullptr;
return image;
}
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc
index 5e5b56c6c34..168e0c1366d 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc
@@ -136,10 +136,12 @@ GpuMemoryBufferFactoryIOSurface::CreateImageForGpuMemoryBuffer(
}
scoped_refptr<gl::GLImage>
-GpuMemoryBufferFactoryIOSurface::CreateAnonymousImage(const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferUsage usage,
- bool* is_cleared) {
+GpuMemoryBufferFactoryIOSurface::CreateAnonymousImage(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage,
+ SurfaceHandle surface_handle,
+ bool* is_cleared) {
bool should_clear = false;
base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
gfx::CreateIOSurface(size, format, should_clear));
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h
index f5e73346b39..aeaa49fcada 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h
@@ -57,6 +57,7 @@ class GPU_IPC_SERVICE_EXPORT GpuMemoryBufferFactoryIOSurface
scoped_refptr<gl::GLImage> CreateAnonymousImage(const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
+ SurfaceHandle surface_handle,
bool* is_cleared) override;
unsigned RequiredTextureType() override;
bool SupportsFormatRGB() override;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
index dd1e489ad49..4e0d9492db0 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
@@ -15,14 +15,40 @@
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_enums.h"
#include "ui/gl/gl_image_native_pixmap.h"
+#include "ui/gl/gl_implementation.h"
#if defined(USE_OZONE)
#include "ui/ozone/public/ozone_platform.h"
#include "ui/ozone/public/surface_factory_ozone.h"
#endif
+#if defined(USE_X11)
+#include "ui/gfx/linux/gbm_buffer.h" // nogncheck
+#include "ui/gfx/linux/gpu_memory_buffer_support_x11.h" // nogncheck
+#include "ui/gl/gl_image_glx_native_pixmap.h" // nogncheck
+#endif
+
namespace gpu {
+namespace {
+
+// The boilerplate code to initialize each GLImage that we need is the same, but
+// the Initialize() methods are not virtual, so a template is needed.
+template <class Image, class Pixmap>
+scoped_refptr<Image> CreateImageFromPixmap(const gfx::Size& size,
+ gfx::BufferFormat format,
+ scoped_refptr<Pixmap> pixmap) {
+ auto image = base::MakeRefCounted<Image>(size, format);
+ if (!image->Initialize(std::move(pixmap))) {
+ LOG(ERROR) << "Failed to create GLImage " << size.ToString() << ", "
+ << gfx::BufferFormatToString(format);
+ return nullptr;
+ }
+ return image;
+}
+
+} // namespace
+
GpuMemoryBufferFactoryNativePixmap::GpuMemoryBufferFactoryNativePixmap()
: GpuMemoryBufferFactoryNativePixmap(nullptr) {}
@@ -49,6 +75,18 @@ GpuMemoryBufferFactoryNativePixmap::CreateGpuMemoryBuffer(
usage);
return CreateGpuMemoryBufferFromNativePixmap(id, size, format, usage,
client_id, std::move(pixmap));
+#elif defined(USE_X11)
+ std::unique_ptr<ui::GbmBuffer> buffer =
+ ui::GpuMemoryBufferSupportX11::GetInstance()->CreateBuffer(format, size,
+ usage);
+ if (!buffer)
+ return gfx::GpuMemoryBufferHandle();
+ gfx::NativePixmapHandle handle = buffer->ExportHandle();
+ scoped_refptr<gfx::NativePixmapDmaBuf> pixmap =
+ base::MakeRefCounted<gfx::NativePixmapDmaBuf>(size, format,
+ std::move(handle));
+ return CreateGpuMemoryBufferFromNativePixmap(id, size, format, usage,
+ client_id, std::move(pixmap));
#else
NOTIMPLEMENTED();
return gfx::GpuMemoryBufferHandle();
@@ -72,6 +110,9 @@ void GpuMemoryBufferFactoryNativePixmap::CreateGpuMemoryBufferAsync(
&GpuMemoryBufferFactoryNativePixmap::OnNativePixmapCreated, id,
size, format, usage, client_id, std::move(callback),
weak_factory_.GetWeakPtr()));
+#elif defined(USE_X11)
+ std::move(callback).Run(CreateGpuMemoryBuffer(id, size, format, usage,
+ client_id, surface_handle));
#else
NOTIMPLEMENTED();
std::move(callback).Run(gfx::GpuMemoryBufferHandle());
@@ -131,13 +172,22 @@ GpuMemoryBufferFactoryNativePixmap::CreateImageForGpuMemoryBuffer(
}
}
- auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(size, format);
- if (!image->Initialize(std::move(pixmap))) {
- LOG(ERROR) << "Failed to create GLImage " << size.ToString() << ", "
- << gfx::BufferFormatToString(format);
- return nullptr;
+ switch (gl::GetGLImplementation()) {
+ case gl::kGLImplementationEGLGLES2:
+ case gl::kGLImplementationEGLANGLE:
+ // EGL
+ return CreateImageFromPixmap<gl::GLImageNativePixmap>(size, format,
+ pixmap);
+#if defined(USE_X11)
+ case gl::kGLImplementationDesktopGL:
+ // GLX
+ return CreateImageFromPixmap<gl::GLImageGLXNativePixmap>(size, format,
+ pixmap);
+#endif
+ default:
+ NOTREACHED();
+ return nullptr;
}
- return image;
}
bool GpuMemoryBufferFactoryNativePixmap::SupportsCreateAnonymousImage() const {
@@ -153,13 +203,14 @@ GpuMemoryBufferFactoryNativePixmap::CreateAnonymousImage(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
+ SurfaceHandle surface_handle,
bool* is_cleared) {
scoped_refptr<gfx::NativePixmap> pixmap;
#if defined(USE_OZONE)
pixmap = ui::OzonePlatform::GetInstance()
->GetSurfaceFactoryOzone()
- ->CreateNativePixmap(gpu::kNullSurfaceHandle, GetVulkanDevice(),
- size, format, usage);
+ ->CreateNativePixmap(surface_handle, GetVulkanDevice(), size,
+ format, usage);
#else
NOTIMPLEMENTED();
#endif
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h
index 10aed05404c..58e08eb6f0e 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h
@@ -64,6 +64,7 @@ class GPU_IPC_SERVICE_EXPORT GpuMemoryBufferFactoryNativePixmap
scoped_refptr<gl::GLImage> CreateAnonymousImage(const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
+ SurfaceHandle surface_handle,
bool* is_cleared) override;
unsigned RequiredTextureType() override;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
index 900fd2a6209..9554db39a7d 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
@@ -68,8 +68,13 @@ TYPED_TEST_P(GpuMemoryBufferFactoryTest, CreateGpuMemoryBuffer) {
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
};
for (auto usage : usages) {
+#if defined(USE_X11)
+ // On X11, we require GPUInfo to determine configuration support.
+ continue;
+#else
if (!support.IsNativeGpuMemoryBufferConfigurationSupported(format, usage))
continue;
+#endif
gfx::GpuMemoryBufferHandle handle =
TestFixture::factory_.CreateGpuMemoryBuffer(kBufferId, buffer_size,
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index 873527c6fcf..cc5d8a9f1c9 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -9,6 +9,7 @@
#include "base/debug/alias.h"
#include "base/files/file_util.h"
#include "base/format_macros.h"
+#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop_current.h"
#include "base/metrics/histogram_functions.h"
#include "base/power_monitor/power_monitor.h"
@@ -19,6 +20,7 @@
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "gpu/config/gpu_crash_keys.h"
+#include "gpu/config/gpu_finch_features.h"
#include "ui/gl/shader_tracking.h"
#if defined(OS_WIN)
@@ -41,6 +43,27 @@ const int kGpuTimeout = 15000;
const int kGpuTimeout = 10000;
#endif
+// The same set of timeouts from Watchdog V2 so we can compare the results
+// between V1 and V2.
+#if defined(CYGPROFILE_INSTRUMENTATION)
+const int kNewGpuTimeout = 30000;
+#elif defined(OS_MACOSX)
+const int kNewGpuTimeout = 17000;
+#else
+const int kNewGpuTimeout = 15000;
+#endif
+
+// Histogram parameters for GPU.WatchdogThread.V1.ExtraThreadTime and
+// GPU.WatchdogThread.V1.WaitTime
+constexpr base::TimeDelta kMin = base::TimeDelta::FromSeconds(1);
+constexpr base::TimeDelta kMax = base::TimeDelta::FromSeconds(150);
+constexpr int kBuckets = 50;
+
+// Histogram recorded in OnWatchdogTimeout()
+void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.V1.Timeout", timeout_event);
+}
+
#if defined(USE_X11)
const base::FilePath::CharType kTtyFilePath[] =
FILE_PATH_LITERAL("/sys/class/tty/tty0/active");
@@ -50,7 +73,6 @@ const base::FilePath::CharType kTtyFilePath[] =
GpuWatchdogThreadImplV1::GpuWatchdogThreadImplV1()
: watched_task_runner_(base::ThreadTaskRunnerHandle::Get()),
- timeout_(base::TimeDelta::FromMilliseconds(kGpuTimeout)),
armed_(false),
task_observer_(this),
use_thread_cpu_time_(true),
@@ -65,6 +87,11 @@ GpuWatchdogThreadImplV1::GpuWatchdogThreadImplV1()
host_tty_(-1)
#endif
{
+ if (base::FeatureList::IsEnabled(features::kGpuWatchdogV1NewTimeout))
+ timeout_ = base::TimeDelta::FromMilliseconds(kNewGpuTimeout);
+ else
+ timeout_ = base::TimeDelta::FromMilliseconds(kGpuTimeout);
+
base::subtle::NoBarrier_Store(&awaiting_acknowledge_, false);
#if defined(OS_WIN)
@@ -79,7 +106,8 @@ GpuWatchdogThreadImplV1::GpuWatchdogThreadImplV1()
#if defined(USE_X11)
tty_file_ = base::OpenFile(base::FilePath(kTtyFilePath), "r");
- host_tty_ = GetActiveTTY();
+ UpdateActiveTTY();
+ host_tty_ = active_tty_;
#endif
base::MessageLoopCurrent::Get()->AddTaskObserver(&task_observer_);
}
@@ -138,6 +166,7 @@ void GpuWatchdogThreadImplV1::Init() {
void GpuWatchdogThreadImplV1::CleanUp() {
weak_factory_.InvalidateWeakPtrs();
+ more_gpu_thread_time_allowed_ = false;
armed_ = false;
}
@@ -235,6 +264,18 @@ GpuWatchdogThreadImplV1::~GpuWatchdogThreadImplV1() {
void GpuWatchdogThreadImplV1::OnAcknowledge() {
CHECK(base::PlatformThread::CurrentId() == GetThreadId());
+ // For metrics only
+ if (more_gpu_thread_time_allowed_) {
+ base::TimeDelta wait_time =
+ base::TimeTicks::Now() - last_timeout_timeticks_;
+ base::UmaHistogramCustomTimes("GPU.WatchdogThread.V1.ExtraThreadTime",
+ wait_time, kMin, kMax, kBuckets);
+ GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent::kProgressAfterMoreThreadTime);
+
+ more_gpu_thread_time_allowed_ = false;
+ }
+
// The check has already been acknowledged and another has already been
// scheduled by a previous call to OnAcknowledge. It is normal for a
// watched thread to see armed_ being true multiple times before
@@ -331,6 +372,9 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Should not get here while the system is suspended.
DCHECK(!suspension_counter_.HasRefs());
+ base::TimeTicks function_start = base::TimeTicks::Now();
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeout);
+
// If this metric is added too early (eg. watchdog creation time), it cannot
// be persistent. The histogram data will be lost after crash or browser exit.
// Delay the recording of kGpuWatchdogStart until the first OnCheckTimeout().
@@ -344,6 +388,9 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// when a machine wakes up from sleep or hibernation, which would otherwise
// appear to be a hang.
if (base::Time::Now() > suspension_timeout_) {
+ // Reset the timeticks after resume for metrics.
+ last_timeout_timeticks_ = function_start;
+
OnAcknowledge();
return;
}
@@ -359,6 +406,13 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
base::ThreadTicks current_cpu_time = GetWatchedThreadTime();
base::TimeDelta time_since_arm = current_cpu_time - arm_cpu_time_;
if (use_thread_cpu_time_ && (time_since_arm < timeout_)) {
+ // For metrics
+ if (!more_gpu_thread_time_allowed_) {
+ more_gpu_thread_time_allowed_ = true;
+ last_timeout_timeticks_ = function_start;
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kMoreThreadTime);
+ }
+
task_runner()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&GpuWatchdogThreadImplV1::OnCheckTimeout,
@@ -367,6 +421,7 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
return;
}
#endif
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeoutWait);
// For minimal developer annoyance, don't keep terminating. You need to skip
// the call to base::Process::Terminate below in a debugger for this to be
@@ -382,8 +437,13 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
#if defined(USE_X11)
// Don't crash if we're not on the TTY of our host X11 server.
- int active_tty = GetActiveTTY();
- if (host_tty_ != -1 && active_tty != -1 && host_tty_ != active_tty) {
+ UpdateActiveTTY();
+ if (host_tty_ != -1 && active_tty_ != -1 && host_tty_ != active_tty_) {
+ // Only record for the time there is a change on TTY
+ if (last_active_tty_ != active_tty_) {
+ GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent::kContinueOnNonHostServerTty);
+ }
OnAcknowledge();
return;
}
@@ -444,10 +504,22 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Check it one last time before crashing.
if (!base::subtle::NoBarrier_Load(&awaiting_acknowledge_)) {
+ { // For metrics only
+ base::TimeDelta wait_time;
+ if (more_gpu_thread_time_allowed_) {
+ more_gpu_thread_time_allowed_ = false;
+ wait_time = base::TimeTicks::Now() - last_timeout_timeticks_;
+ } else {
+ wait_time = base::TimeTicks::Now() - function_start;
+ }
+ base::UmaHistogramCustomTimes("GPU.WatchdogThread.V1.WaitTime", wait_time,
+ kMin, kMax, kBuckets);
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kProgressAfterWait);
+ }
OnAcknowledge();
return;
}
-
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKill);
GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogKill);
// Deliberately crash the process to create a crash dump.
@@ -539,16 +611,18 @@ base::ThreadTicks GpuWatchdogThreadImplV1::GetWatchedThreadTime() {
#endif
#if defined(USE_X11)
-int GpuWatchdogThreadImplV1::GetActiveTTY() const {
+void GpuWatchdogThreadImplV1::UpdateActiveTTY() {
+ last_active_tty_ = active_tty_;
+
+ active_tty_ = -1;
char tty_string[8] = {0};
if (tty_file_ && !fseek(tty_file_, 0, SEEK_SET) &&
fread(tty_string, 1, 7, tty_file_)) {
int tty_number;
- size_t num_res = sscanf(tty_string, "tty%d\n", &tty_number);
- if (num_res == 1)
- return tty_number;
+ if (sscanf(tty_string, "tty%d\n", &tty_number) == 1) {
+ active_tty_ = tty_number;
+ }
}
- return -1;
}
#endif
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.h b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
index 7d128d5d362..a2c633dc41f 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
@@ -31,6 +31,38 @@ enum class GpuWatchdogThreadEvent {
kMaxValue = kGpuWatchdogEnd,
};
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
+enum class GpuWatchdogTimeoutEvent {
+ // Recorded each time OnWatchdogTimeout() is called.
+ kTimeout,
+ // Recorded when a GPU main thread is killed for a detected hang.
+ kKill,
+ // Window only: Recorded when a hang is detected but we allow the GPU main
+ // thread to continue until it spent the full
+ // thread time doing the work.
+ kMoreThreadTime,
+ // Windows only: The GPU makes progress after givenmore thread time. The GPU
+ // main thread is not killed.
+ kProgressAfterMoreThreadTime,
+ // A gpu hang is detected but watchdog waits for 60 seconds before taking
+ // action.
+ kTimeoutWait,
+ // The GPU makes progress within 60 sec in OnWatchdogTimeout(). The GPU main
+ // thread is not killed.
+ kProgressAfterWait,
+ // Just continue if it's not on the TTY of our host X11 server.
+ kContinueOnNonHostServerTty,
+ // Windows only: After detecting GPU hang and continuing running through
+ // OnGpuWatchdogTimeout for the max cycles, the GPU main thread still cannot
+ // get the full thread time.
+ kLessThanFullThreadTimeAfterCapped,
+ // Windows only: The GPU main thread went through the
+ // kLessThanFullThreadTimeAfterCapped stage before the process is killed.
+ kKillOnLessThreadTime,
+ kMaxValue = kKillOnLessThreadTime,
+};
+
// A thread that intermitently sends tasks to a group of watched message loops
// and deliberately crashes if one of them does not respond after a timeout.
class GPU_IPC_SERVICE_EXPORT GpuWatchdogThread : public base::Thread,
@@ -182,7 +214,7 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
#endif
#if defined(USE_X11)
- int GetActiveTTY() const;
+ void UpdateActiveTTY();
#endif
scoped_refptr<base::SingleThreadTaskRunner> watched_task_runner_;
@@ -229,12 +261,21 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
base::Time check_time_;
base::TimeTicks check_timeticks_;
+ // The time in the last OnCheckTimeout()
+ base::TimeTicks last_timeout_timeticks_;
+
+ // After GPU hang detected, whether the GPU thread is allowed to continue due
+ // to not spending enough thread time.
+ bool more_gpu_thread_time_allowed_ = false;
+
// whether GpuWatchdogThreadEvent::kGpuWatchdogStart has been recorded.
bool is_watchdog_start_histogram_recorded = false;
#if defined(USE_X11)
FILE* tty_file_;
int host_tty_;
+ int active_tty_ = -1;
+ int last_active_tty_ = -1;
#endif
base::WeakPtrFactory<GpuWatchdogThreadImplV1> weak_factory_{this};
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc
index 925457ef637..420faeb36d2 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc
@@ -20,8 +20,7 @@ namespace {
constexpr auto kGpuWatchdogTimeoutForTesting =
base::TimeDelta::FromMilliseconds(1000);
-constexpr base::TimeDelta kMaxWaitTimeForTesting =
- base::TimeDelta::FromMilliseconds(4000);
+constexpr int kMaxExtraCyclesBeforeKillForTesting = 1;
// This task will run for duration_ms milliseconds.
void SimpleTask(base::TimeDelta duration) {
@@ -74,7 +73,7 @@ void GpuWatchdogTest::SetUp() {
watchdog_thread_ = gpu::GpuWatchdogThreadImplV2::Create(
/*start_backgrounded*/ false,
/*timeout*/ kGpuWatchdogTimeoutForTesting,
- /*max_wait_time*/ kMaxWaitTimeForTesting,
+ /*max_extra_cycles_before_kill*/ kMaxExtraCyclesBeforeKillForTesting,
/*test_mode*/ true);
}
@@ -143,13 +142,16 @@ void GpuWatchdogPowerTest::LongTaskOnResume(
TEST_F(GpuWatchdogTest, GpuInitializationHang) {
// GPU init takes longer than timeout.
#if defined(OS_WIN)
- SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor +
- kGpuWatchdogTimeoutForTesting *
- kMaxCountOfMoreGpuThreadTimeAllowed +
- kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(3000));
+ SimpleTask(
+ kGpuWatchdogTimeoutForTesting * kInitFactor +
+ kGpuWatchdogTimeoutForTesting * kMaxCountOfMoreGpuThreadTimeAllowed +
+ kGpuWatchdogTimeoutForTesting * kMaxExtraCyclesBeforeKillForTesting +
+ base::TimeDelta::FromMilliseconds(3000));
#else
SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor +
- kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(3000));
+ kGpuWatchdogTimeoutForTesting *
+ kMaxExtraCyclesBeforeKillForTesting +
+ base::TimeDelta::FromMilliseconds(3000));
#endif
// Gpu hangs. OnInitComplete() is not called
@@ -203,13 +205,15 @@ TEST_F(GpuWatchdogTest, GpuRunningATaskHang) {
base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 +
kGpuWatchdogTimeoutForTesting *
kMaxCountOfMoreGpuThreadTimeAllowed +
- kMaxWaitTimeForTesting +
+ kGpuWatchdogTimeoutForTesting *
+ kMaxExtraCyclesBeforeKillForTesting +
base::TimeDelta::FromMilliseconds(4000)));
#else
task_environment_.GetMainThreadTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 +
- kMaxWaitTimeForTesting +
+ kGpuWatchdogTimeoutForTesting *
+ kMaxExtraCyclesBeforeKillForTesting +
base::TimeDelta::FromMilliseconds(4000)));
#endif
@@ -262,7 +266,8 @@ TEST_F(GpuWatchdogTest, GpuSwitchingToForegroundHang) {
/*duration*/ kGpuWatchdogTimeoutForTesting * 2 +
kGpuWatchdogTimeoutForTesting *
kMaxCountOfMoreGpuThreadTimeAllowed +
- kMaxWaitTimeForTesting +
+ kGpuWatchdogTimeoutForTesting *
+ kMaxExtraCyclesBeforeKillForTesting +
base::TimeDelta::FromMilliseconds(4200),
/*time_to_switch_to_foreground*/
base::TimeDelta::FromMilliseconds(200)));
@@ -272,7 +277,8 @@ TEST_F(GpuWatchdogTest, GpuSwitchingToForegroundHang) {
base::BindOnce(&GpuWatchdogTest::LongTaskFromBackgroundToForeground,
base::Unretained(this),
/*duration*/ kGpuWatchdogTimeoutForTesting * 2 +
- kMaxWaitTimeForTesting +
+ kGpuWatchdogTimeoutForTesting *
+ kMaxExtraCyclesBeforeKillForTesting +
base::TimeDelta::FromMilliseconds(4200),
/*time_to_switch_to_foreground*/
base::TimeDelta::FromMilliseconds(200)));
@@ -305,13 +311,16 @@ TEST_F(GpuWatchdogTest, GpuInitializationPause) {
watchdog_thread_->ResumeWatchdog();
// The Gpu init continues for (init timeout + 4000) ms.
#if defined(OS_WIN)
- SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor +
- kGpuWatchdogTimeoutForTesting *
- kMaxCountOfMoreGpuThreadTimeAllowed +
- kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4000));
+ SimpleTask(
+ kGpuWatchdogTimeoutForTesting * kInitFactor +
+ kGpuWatchdogTimeoutForTesting * kMaxCountOfMoreGpuThreadTimeAllowed +
+ kGpuWatchdogTimeoutForTesting * kMaxExtraCyclesBeforeKillForTesting +
+ base::TimeDelta::FromMilliseconds(4000));
#else
SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor +
- kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4000));
+ kGpuWatchdogTimeoutForTesting *
+ kMaxExtraCyclesBeforeKillForTesting +
+ base::TimeDelta::FromMilliseconds(4000));
#endif
// A GPU hang should be detected.
@@ -354,7 +363,9 @@ TEST_F(GpuWatchdogPowerTest, GpuOnResumeHang) {
/*duration*/ kGpuWatchdogTimeoutForTesting * kRestartFactor +
kGpuWatchdogTimeoutForTesting *
kMaxCountOfMoreGpuThreadTimeAllowed +
- kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4200),
+ kGpuWatchdogTimeoutForTesting *
+ kMaxExtraCyclesBeforeKillForTesting +
+ base::TimeDelta::FromMilliseconds(4200),
/*time_to_power_resume*/
base::TimeDelta::FromMilliseconds(200)));
#else
@@ -363,7 +374,9 @@ TEST_F(GpuWatchdogPowerTest, GpuOnResumeHang) {
base::BindOnce(
&GpuWatchdogPowerTest::LongTaskOnResume, base::Unretained(this),
/*duration*/ kGpuWatchdogTimeoutForTesting * kRestartFactor +
- kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4200),
+ kGpuWatchdogTimeoutForTesting *
+ kMaxExtraCyclesBeforeKillForTesting +
+ base::TimeDelta::FromMilliseconds(4200),
/*time_to_power_resume*/
base::TimeDelta::FromMilliseconds(200)));
#endif
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
index 970e6e56022..9f638367e22 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
@@ -10,29 +10,36 @@
#include "base/bit_cast.h"
#include "base/debug/alias.h"
#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop_current.h"
+#include "base/metrics/field_trial_params.h"
#include "base/metrics/histogram_functions.h"
-#include "base/metrics/persistent_histogram_allocator.h"
#include "base/native_library.h"
#include "base/power_monitor/power_monitor.h"
#include "base/strings/string_number_conversions.h"
+#include "base/system/sys_info.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "gpu/config/gpu_crash_keys.h"
+#include "gpu/config/gpu_finch_features.h"
namespace gpu {
-GpuWatchdogThreadImplV2::GpuWatchdogThreadImplV2(base::TimeDelta timeout,
- base::TimeDelta max_wait_time,
- bool is_test_mode)
+GpuWatchdogThreadImplV2::GpuWatchdogThreadImplV2(
+ base::TimeDelta timeout,
+ int max_extra_cycles_before_kill,
+ bool is_test_mode)
: watchdog_timeout_(timeout),
in_gpu_initialization_(true),
- max_wait_time_(max_wait_time),
+ max_extra_cycles_before_kill_(max_extra_cycles_before_kill),
is_test_mode_(is_test_mode),
watched_gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()) {
base::MessageLoopCurrent::Get()->AddTaskObserver(this);
+ num_of_processors_ = base::SysInfo::NumberOfProcessors();
+
#if defined(OS_WIN)
// GetCurrentThread returns a pseudo-handle that cannot be used by one thread
// to identify another. DuplicateHandle creates a "real" handle that can be
@@ -44,11 +51,24 @@ GpuWatchdogThreadImplV2::GpuWatchdogThreadImplV2(base::TimeDelta timeout,
}
#endif
+#if defined(USE_X11)
+ tty_file_ = base::OpenFile(
+ base::FilePath(FILE_PATH_LITERAL("/sys/class/tty/tty0/active")), "r");
+ UpdateActiveTTY();
+ host_tty_ = active_tty_;
+#endif
+
Arm();
}
GpuWatchdogThreadImplV2::~GpuWatchdogThreadImplV2() {
DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
+ // Stop() might take too long and the watchdog timeout is triggered.
+ // Disarm first before calling Stop() to avoid a crash.
+ if (IsArmed())
+ Disarm();
+ PauseWatchdog();
+
Stop(); // stop the watchdog thread
base::MessageLoopCurrent::Get()->RemoveTaskObserver(this);
@@ -58,16 +78,21 @@ GpuWatchdogThreadImplV2::~GpuWatchdogThreadImplV2() {
if (watched_thread_handle_)
CloseHandle(watched_thread_handle_);
#endif
+
+#if defined(USE_X11)
+ if (tty_file_)
+ fclose(tty_file_);
+#endif
}
// static
std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create(
bool start_backgrounded,
base::TimeDelta timeout,
- base::TimeDelta max_wait_time,
+ int max_extra_cycles_before_kill,
bool is_test_mode) {
- auto watchdog_thread = base::WrapUnique(
- new GpuWatchdogThreadImplV2(timeout, max_wait_time, is_test_mode));
+ auto watchdog_thread = base::WrapUnique(new GpuWatchdogThreadImplV2(
+ timeout, max_extra_cycles_before_kill, is_test_mode));
base::Thread::Options options;
options.timer_slack = base::TIMER_SLACK_MAXIMUM;
watchdog_thread->StartWithOptions(options);
@@ -79,7 +104,31 @@ std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create(
// static
std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create(
bool start_backgrounded) {
- return Create(start_backgrounded, kGpuWatchdogTimeout, kMaxWaitTime, false);
+ base::TimeDelta gpu_watchdog_timeout = kGpuWatchdogTimeout;
+ int max_extra_cycles_before_kill = kMaxExtraCyclesBeforeKill;
+
+ if (base::FeatureList::IsEnabled(features::kGpuWatchdogV2NewTimeout)) {
+ const char kNewTimeOutParam[] = "new_time_out";
+ const char kMaxExtraCyclesBeforeKillParam[] =
+ "max_extra_cycles_before_kill";
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ constexpr int kFinchMaxExtraCyclesBeforeKill = 1;
+#else
+ constexpr int kFinchMaxExtraCyclesBeforeKill = 2;
+#endif
+
+ int timeout = base::GetFieldTrialParamByFeatureAsInt(
+ features::kGpuWatchdogV2NewTimeout, kNewTimeOutParam,
+ kGpuWatchdogTimeout.InSeconds());
+ gpu_watchdog_timeout = base::TimeDelta::FromSeconds(timeout);
+
+ max_extra_cycles_before_kill = base::GetFieldTrialParamByFeatureAsInt(
+ features::kGpuWatchdogV2NewTimeout, kMaxExtraCyclesBeforeKillParam,
+ kFinchMaxExtraCyclesBeforeKill);
+ }
+
+ return Create(start_backgrounded, gpu_watchdog_timeout,
+ max_extra_cycles_before_kill, false);
}
// Do not add power observer during watchdog init, PowerMonitor might not be up
@@ -170,7 +219,7 @@ void GpuWatchdogThreadImplV2::Init() {
base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_),
timeout);
- last_arm_disarm_counter_ = base::subtle::NoBarrier_Load(&arm_disarm_counter_);
+ last_arm_disarm_counter_ = ReadArmDisarmCounter();
watchdog_start_timeticks_ = base::TimeTicks::Now();
last_on_watchdog_timeout_timeticks_ = watchdog_start_timeticks_;
@@ -278,8 +327,7 @@ void GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask(
base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_),
timeout);
last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
- last_arm_disarm_counter_ =
- base::subtle::NoBarrier_Load(&arm_disarm_counter_);
+ last_arm_disarm_counter_ = ReadArmDisarmCounter();
#if defined(OS_WIN)
if (watched_thread_handle_) {
last_on_watchdog_timeout_thread_ticks_ = GetWatchedThreadTime();
@@ -357,7 +405,11 @@ void GpuWatchdogThreadImplV2::InProgress() {
bool GpuWatchdogThreadImplV2::IsArmed() {
// It's an odd number.
- return base::subtle::NoBarrier_Load(&arm_disarm_counter_) & 1;
+ return base::subtle::Release_Load(&arm_disarm_counter_) & 1;
+}
+
+base::subtle::Atomic32 GpuWatchdogThreadImplV2::ReadArmDisarmCounter() {
+ return base::subtle::Release_Load(&arm_disarm_counter_);
}
// Running on the watchdog thread.
@@ -377,40 +429,32 @@ void GpuWatchdogThreadImplV2::OnWatchdogTimeout() {
GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart);
}
- base::subtle::Atomic32 arm_disarm_counter =
- base::subtle::NoBarrier_Load(&arm_disarm_counter_);
+ auto arm_disarm_counter = ReadArmDisarmCounter();
GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeout);
if (power_resumed_event_)
num_of_timeout_after_power_resume_++;
if (foregrounded_event_)
num_of_timeout_after_foregrounded_++;
+#if defined(USE_X11)
+ UpdateActiveTTY();
+#endif
+
// Collect all needed info for gpu hang detection.
bool disarmed = arm_disarm_counter % 2 == 0; // even number
bool gpu_makes_progress = arm_disarm_counter != last_arm_disarm_counter_;
bool watched_thread_needs_more_time =
- WatchedThreadNeedsMoreTime(disarmed || gpu_makes_progress);
-
- // No gpu hang is detected. Continue with another OnWatchdogTimeout task
- if (disarmed || gpu_makes_progress || watched_thread_needs_more_time) {
+ WatchedThreadNeedsMoreThreadTime(disarmed || gpu_makes_progress);
+ bool no_gpu_hang = disarmed || gpu_makes_progress ||
+ watched_thread_needs_more_time ||
+ ContinueOnNonHostX11ServerTty();
+ bool allows_extra_timeout = WatchedThreadGetsExtraTimeout(no_gpu_hang);
+ no_gpu_hang = no_gpu_hang || allows_extra_timeout;
+
+ // No gpu hang. Continue with another OnWatchdogTimeout task.
+ if (no_gpu_hang) {
last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
- last_arm_disarm_counter_ =
- base::subtle::NoBarrier_Load(&arm_disarm_counter_);
-
- task_runner()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_),
- watchdog_timeout_);
- return;
- }
-
- // An experiment for all platforms: Wait for max_wait_time_ and see if GPU
- // will response.
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeoutWait);
- if (GpuRespondsAfterWaiting()) {
- last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
- last_arm_disarm_counter_ =
- base::subtle::NoBarrier_Load(&arm_disarm_counter_);
+ last_arm_disarm_counter_ = ReadArmDisarmCounter();
task_runner()->PostDelayedTask(
FROM_HERE,
@@ -421,34 +465,36 @@ void GpuWatchdogThreadImplV2::OnWatchdogTimeout() {
// Still armed without any progress. GPU possibly hangs.
GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKill);
- DeliberatelyTerminateToRecoverFromHang();
-}
-
-bool GpuWatchdogThreadImplV2::GpuIsAlive() {
- base::subtle::Atomic32 arm_disarm_counter =
- base::subtle::NoBarrier_Load(&arm_disarm_counter_);
- bool gpu_makes_progress = arm_disarm_counter != last_arm_disarm_counter_;
+#if defined(OS_WIN)
+ if (less_than_full_thread_time_after_capped_)
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKillOnLessThreadTime);
+#endif
- return (gpu_makes_progress);
+ DeliberatelyTerminateToRecoverFromHang();
}
-bool GpuWatchdogThreadImplV2::WatchedThreadNeedsMoreTime(
+bool GpuWatchdogThreadImplV2::WatchedThreadNeedsMoreThreadTime(
bool no_gpu_hang_detected) {
#if defined(OS_WIN)
if (!watched_thread_handle_)
return false;
- // For metrics only - If count_of_more_gpu_thread_time_allowed_ > 0, we know
- // extra time was extended in the previous OnWatchdogTimeout(). Now we find
- // gpu makes progress. Record this case.
- if (no_gpu_hang_detected && count_of_more_gpu_thread_time_allowed_ > 0) {
- GpuWatchdogTimeoutHistogram(
- GpuWatchdogTimeoutEvent::kProgressAfterMoreThreadTime);
- WindowsNumOfExtraTimeoutsHistogram();
+ // We allow extra thread time. When that runs out, we extend extra timeout
+ // cycles. Now, we are extending extra timeout cycles. Don't add extra thread
+ // time.
+ if (count_of_extra_cycles_ > 0)
+ return false;
+
+ WatchedThreadNeedsMoreThreadTimeHistogram(
+ no_gpu_hang_detected,
+ /*start_of_more_thread_time*/ false);
+
+ if (!no_gpu_hang_detected && count_of_more_gpu_thread_time_allowed_ >=
+ kMaxCountOfMoreGpuThreadTimeAllowed) {
+ less_than_full_thread_time_after_capped_ = true;
+ } else {
+ less_than_full_thread_time_after_capped_ = false;
}
- // For metrics only - The extra time was give in timeouts.
- time_in_extra_timeouts_ =
- count_of_more_gpu_thread_time_allowed_ * watchdog_timeout_;
// Calculate how many thread ticks the watched thread spent doing the work.
base::ThreadTicks now = GetWatchedThreadTime();
@@ -465,13 +511,15 @@ bool GpuWatchdogThreadImplV2::WatchedThreadNeedsMoreTime(
// Reset the remaining thread ticks.
remaining_watched_thread_ticks_ = watchdog_timeout_;
count_of_more_gpu_thread_time_allowed_ = 0;
+
return false;
} else {
+ // This is the start of allowing more thread time.
+ if (count_of_more_gpu_thread_time_allowed_ == 0) {
+ WatchedThreadNeedsMoreThreadTimeHistogram(
+ no_gpu_hang_detected, /*start_of_more_thread_time*/ true);
+ }
count_of_more_gpu_thread_time_allowed_++;
- // Only record it once for all extenteded timeout on the same detected gpu
- // hang, so we know this is equivlent one crash in our crash reports.
- if (count_of_more_gpu_thread_time_allowed_ == 1)
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kMoreThreadTime);
return true;
}
@@ -510,25 +558,24 @@ base::ThreadTicks GpuWatchdogThreadImplV2::GetWatchedThreadTime() {
}
#endif
-// This is an experiment on all platforms to see whether GPU will response
-// after waiting longer.
-bool GpuWatchdogThreadImplV2::GpuRespondsAfterWaiting() {
- base::TimeDelta duration;
- base::TimeTicks start_timeticks = base::TimeTicks::Now();
+bool GpuWatchdogThreadImplV2::WatchedThreadGetsExtraTimeout(bool no_gpu_hang) {
+ if (max_extra_cycles_before_kill_ == 0)
+ return false;
- while (duration < max_wait_time_) {
- // Sleep for 1 seconds each time and check if the GPU makes a progress.
- base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(1));
- duration = base::TimeTicks::Now() - start_timeticks;
+ // We want to record histograms even if there is no gpu hang.
+ bool allows_more_timeouts = false;
+ WatchedThreadGetsExtraTimeoutHistogram(no_gpu_hang);
- if (GpuIsAlive()) {
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kProgressAfterWait);
- GpuWatchdogWaitTimeHistogram(duration);
- return true;
+ if (no_gpu_hang) {
+ if (count_of_extra_cycles_ > 0) {
+ count_of_extra_cycles_ = 0;
}
+ } else if (count_of_extra_cycles_ < max_extra_cycles_before_kill_) {
+ count_of_extra_cycles_++;
+ allows_more_timeouts = true;
}
- return false;
+ return allows_more_timeouts;
}
void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
@@ -567,8 +614,10 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
base::TimeDelta timeticks_elapses =
function_begin_timeticks - last_on_watchdog_timeout_timeticks_;
base::debug::Alias(&timeticks_elapses);
+ base::debug::Alias(&max_extra_cycles_before_kill_);
#if defined(OS_WIN)
base::debug::Alias(&remaining_watched_thread_ticks_);
+ base::debug::Alias(&less_than_full_thread_time_after_capped_);
#endif
GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogKill);
@@ -579,8 +628,14 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
crash_keys::gpu_watchdog_kill_after_power_resume.Set(
WithinOneMinFromPowerResumed() ? "1" : "0");
+ crash_keys::num_of_processors.Set(base::NumberToString(num_of_processors_));
+
+ // Check the arm_disarm_counter value one more time.
+ auto last_arm_disarm_counter = ReadArmDisarmCounter();
+ base::debug::Alias(&last_arm_disarm_counter);
+
// Deliberately crash the process to create a crash dump.
- *((volatile int*)0) = 0xdeadface;
+ *static_cast<volatile int*>(nullptr) = 0x1337;
}
void GpuWatchdogThreadImplV2::GpuWatchdogHistogram(
@@ -619,7 +674,7 @@ void GpuWatchdogThreadImplV2::GpuWatchdogTimeoutHistogram(
}
#if defined(OS_WIN)
-void GpuWatchdogThreadImplV2::WindowsNumOfExtraTimeoutsHistogram() {
+void GpuWatchdogThreadImplV2::RecordExtraThreadTimeHistogram() {
// Record the number of timeouts the GPU main thread needs to make a progress
// after GPU OnWatchdogTimeout() is triggered. The maximum count is 6 which
// is more than kMaxCountOfMoreGpuThreadTimeAllowed(4);
@@ -657,47 +712,87 @@ void GpuWatchdogThreadImplV2::WindowsNumOfExtraTimeoutsHistogram() {
count, kMin, kMax, kBuckets);
}
}
-#endif
-void GpuWatchdogThreadImplV2::GpuWatchdogWaitTimeHistogram(
- base::TimeDelta wait_time) {
-#if defined(OS_WIN)
- // Add the time the GPU thread was given for full thread time.
- wait_time += time_in_extra_timeouts_;
-#endif
+void GpuWatchdogThreadImplV2::
+ RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(int count) {
+ constexpr int kMax = 4;
- // Record the wait time in OnWatchdogTimeout() for the GPU main thread to
- // make a progress. The maximum recodrding time is 150 seconds because
- // Windows need to add the time spent before reaching here (max 60 sec).
- constexpr base::TimeDelta kMin = base::TimeDelta::FromSeconds(1);
- constexpr base::TimeDelta kMax = base::TimeDelta::FromSeconds(150);
- constexpr int kBuckets = 50;
- bool recorded = false;
-
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime", wait_time, kMin,
- kMax, kBuckets);
-
- if (in_gpu_initialization_) {
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.Init", wait_time,
- kMin, kMax, kBuckets);
- recorded = true;
- }
+ base::UmaHistogramExactLinear("GPU.WatchdogThread.ExtraThreadTime.NumOfUsers",
+ count, kMax);
+}
- if (WithinOneMinFromPowerResumed()) {
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.PowerResume",
- wait_time, kMin, kMax, kBuckets);
- recorded = true;
+void GpuWatchdogThreadImplV2::WatchedThreadNeedsMoreThreadTimeHistogram(
+ bool no_gpu_hang_detected,
+ bool start_of_more_thread_time) {
+ if (start_of_more_thread_time) {
+ // This is the start of allowing more thread time. Only record it once for
+ // all following timeouts on the same detected gpu hang, so we know this
+ // is equivlent one crash in our crash reports.
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kMoreThreadTime);
+ RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(0);
+ } else {
+ if (count_of_more_gpu_thread_time_allowed_ > 0) {
+ if (no_gpu_hang_detected) {
+ // If count_of_more_gpu_thread_time_allowed_ > 0, we know extra time was
+ // extended in the previous OnWatchdogTimeout(). Now we find gpu makes
+ // progress. Record this case.
+ GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent::kProgressAfterMoreThreadTime);
+ RecordExtraThreadTimeHistogram();
+ } else {
+ if (count_of_more_gpu_thread_time_allowed_ >=
+ kMaxCountOfMoreGpuThreadTimeAllowed) {
+ GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent::kLessThanFullThreadTimeAfterCapped);
+ }
+ }
+
+ // Records the number of users who are still waiting. We can use this
+ // number to calculate the number of users who had already quit.
+ RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(
+ count_of_more_gpu_thread_time_allowed_);
+
+ // Used by GPU.WatchdogThread.WaitTime later
+ time_in_wait_for_full_thread_time_ =
+ count_of_more_gpu_thread_time_allowed_ * watchdog_timeout_;
+ }
}
+}
+#endif
- if (WithinOneMinFromForegrounded()) {
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.Foregrounded",
- wait_time, kMin, kMax, kBuckets);
- recorded = true;
- }
+void GpuWatchdogThreadImplV2::WatchedThreadGetsExtraTimeoutHistogram(
+ bool no_gpu_hang) {
+ constexpr int kMax = 60;
+ if (count_of_extra_cycles_ == 0 && !no_gpu_hang) {
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeoutWait);
+ base::UmaHistogramExactLinear("GPU.WatchdogThread.WaitTime.NumOfUsers", 0,
+ kMax);
+ } else if (count_of_extra_cycles_ > 0) {
+ int count = watchdog_timeout_.InSeconds() * count_of_extra_cycles_;
+ base::UmaHistogramExactLinear("GPU.WatchdogThread.WaitTime.NumOfUsers",
+ count, kMax);
+ if (no_gpu_hang) {
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kProgressAfterWait);
+ base::UmaHistogramExactLinear(
+ "GPU.WatchdogThread.WaitTime.ProgressAfterWait", count, kMax);
- if (!recorded) {
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.Normal",
- wait_time, kMin, kMax, kBuckets);
+#if defined(OS_WIN)
+ // Add the time the GPU thread was given for the full thread time up to 60
+ // seconds. GPU.WatchdogThread.WaitTime is essentially equal to
+ // GPU.WatchdogThread.WaitTime.ProgressAfterWait on non-Windows systems.
+ base::TimeDelta wait_time = base::TimeDelta::FromSeconds(count);
+ wait_time += time_in_wait_for_full_thread_time_;
+
+ constexpr base::TimeDelta kMinTime = base::TimeDelta::FromSeconds(1);
+ constexpr base::TimeDelta kMaxTime = base::TimeDelta::FromSeconds(150);
+ constexpr int kBuckets = 50;
+
+ // The time the GPU main thread takes to finish a task after a "hang" is
+ // dectedted.
+ base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime", wait_time,
+ kMinTime, kMaxTime, kBuckets);
+#endif
+ }
}
}
@@ -711,6 +806,40 @@ bool GpuWatchdogThreadImplV2::WithinOneMinFromForegrounded() {
return foregrounded_event_ && num_of_timeout_after_foregrounded_ <= count;
}
+#if defined(USE_X11)
+void GpuWatchdogThreadImplV2::UpdateActiveTTY() {
+ last_active_tty_ = active_tty_;
+
+ active_tty_ = -1;
+ char tty_string[8] = {0};
+ if (tty_file_ && !fseek(tty_file_, 0, SEEK_SET) &&
+ fread(tty_string, 1, 7, tty_file_)) {
+ int tty_number;
+ if (sscanf(tty_string, "tty%d\n", &tty_number) == 1) {
+ active_tty_ = tty_number;
+ }
+ }
+}
+#endif
+
+bool GpuWatchdogThreadImplV2::ContinueOnNonHostX11ServerTty() {
+#if defined(USE_X11)
+ if (host_tty_ == -1 || active_tty_ == -1)
+ return false;
+
+ // Don't crash if we're not on the TTY of our host X11 server.
+ if (active_tty_ != host_tty_) {
+ // Only record for the time there is a change on TTY
+ if (last_active_tty_ == active_tty_) {
+ GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent::kContinueOnNonHostServerTty);
+ }
+ return true;
+ }
+#endif
+ return false;
+}
+
// For gpu testing only. Return whether a GPU hang was detected or not.
bool GpuWatchdogThreadImplV2::IsGpuHangDetectedForTesting() {
DCHECK(is_test_mode_);
@@ -733,5 +862,4 @@ void GpuWatchdogThreadImplV2::WaitForPowerObserverAddedForTesting() {
base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(&event)));
event.Wait();
}
-
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
index f9a63c7d953..8fb593943ee 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
@@ -5,6 +5,7 @@
#ifndef GPU_IPC_SERVICE_GPU_WATCHDOG_THREAD_V2_H_
#define GPU_IPC_SERVICE_GPU_WATCHDOG_THREAD_V2_H_
+#include "build/build_config.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
namespace gpu {
@@ -14,30 +15,7 @@ namespace gpu {
// OnGPUWatchdogTimeout for at most 4 times before the gpu thread is killed.
constexpr int kMaxCountOfMoreGpuThreadTimeAllowed = 4;
#endif
-constexpr base::TimeDelta kMaxWaitTime = base::TimeDelta::FromSeconds(60);
-
-// These values are persisted to logs. Entries should not be renumbered and
-// numeric values should never be reused.
-enum class GpuWatchdogTimeoutEvent {
- // Recorded each time OnWatchdogTimeout() is called.
- kTimeout,
- // Recorded when a GPU main thread is killed for a detected hang.
- kKill,
- // Window only: Recorded when a hang is detected but we allow the GPU main
- // thread to continue until it spent the full
- // thread time doing the work.
- kMoreThreadTime,
- // Windows only: The GPU makes progress after givenmore thread time. The GPU
- // main thread is not killed.
- kProgressAfterMoreThreadTime,
- // A gpu hang is detected but watchdog waits for 60 seconds before taking
- // action.
- kTimeoutWait,
- // The GPU makes progress within 60 sec in OnWatchdogTimeout(). The GPU main
- // thread is not killed.
- kProgressAfterWait,
- kMaxValue = kProgressAfterWait,
-};
+constexpr int kMaxExtraCyclesBeforeKill = 0;
class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
: public GpuWatchdogThread,
@@ -49,7 +27,7 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
static std::unique_ptr<GpuWatchdogThreadImplV2> Create(
bool start_backgrounded,
base::TimeDelta timeout,
- base::TimeDelta max_wait_time,
+ int max_extra_cycles_before_kill,
bool test_mode);
~GpuWatchdogThreadImplV2() override;
@@ -62,6 +40,7 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
void OnGpuProcessTearDown() override;
void ResumeWatchdog() override;
void PauseWatchdog() override;
+ // Records "GPU.WatchdogThread.Event.V2" and "GPU.WatchdogThread.Event".
void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) override;
bool IsGpuHangDetectedForTesting() override;
void WaitForPowerObserverAddedForTesting() override;
@@ -90,7 +69,7 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
};
GpuWatchdogThreadImplV2(base::TimeDelta timeout,
- base::TimeDelta max_wait_time,
+ int max_extra_cycles_before_kill,
bool test_mode);
void OnAddPowerObserver();
void RestartWatchdogTimeoutTask(PauseResumeSource source_of_request);
@@ -100,35 +79,53 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
void Disarm();
void InProgress();
bool IsArmed();
+ base::subtle::Atomic32 ReadArmDisarmCounter();
void OnWatchdogTimeout();
- bool GpuIsAlive();
- bool WatchedThreadNeedsMoreTime(bool no_gpu_hang_detected);
+ bool WatchedThreadNeedsMoreThreadTime(bool no_gpu_hang_detected);
#if defined(OS_WIN)
base::ThreadTicks GetWatchedThreadTime();
#endif
- bool GpuRespondsAfterWaiting();
+ bool WatchedThreadGetsExtraTimeout(bool no_gpu_hang);
// Do not change the function name. It is used for [GPU HANG] carsh reports.
void DeliberatelyTerminateToRecoverFromHang();
// Histogram recorded in OnWatchdogTimeout()
+ // Records "GPU.WatchdogThread.Timeout"
void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event);
#if defined(OS_WIN)
- // The extra timeout the GPU main thread needs to make a progress.
- void WindowsNumOfExtraTimeoutsHistogram();
+ // The extra thread time the GPU main thread needs to make a progress.
+ // Records "GPU.WatchdogThread.ExtraThreadTime".
+ void RecordExtraThreadTimeHistogram();
+ // The number of users per timeout stay in Chrome after giving extra thread
+ // time. Records "GPU.WatchdogThread.ExtraThreadTime.NumOfUsers" and
+ // "GPU.WatchdogThread.Timeout".
+ void RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(int count);
+
+ // Histograms recorded for WatchedThreadNeedsMoreThreadTime() function.
+ void WatchedThreadNeedsMoreThreadTimeHistogram(
+ bool no_gpu_hang_detected,
+ bool start_of_more_thread_time);
#endif
- // The wait time in OnWatchdogTimeout() for the GPU main thread to make a
- // progress.
- void GpuWatchdogWaitTimeHistogram(base::TimeDelta wait_time);
+ // The number of users stay in Chrome after the extra timeout wait cycles.
+ // Records "GPU.WatchdogThread.WaitTime.ProgressAfterWait",
+ // "GPU.WatchdogThread.WaitTime.NumOfUsers" and "GPU.WatchdogThread.Timeout".
+ void WatchedThreadGetsExtraTimeoutHistogram(bool no_gpu_hang);
// Used for metrics. It's 1 minute after the event.
bool WithinOneMinFromPowerResumed();
bool WithinOneMinFromForegrounded();
+#if defined(USE_X11)
+ void UpdateActiveTTY();
+#endif
+ // The watchdog continues when it's not on the TTY of our host X11 server.
+ bool ContinueOnNonHostX11ServerTty();
+
// This counter is only written on the gpu thread, and read on both threads.
- base::subtle::Atomic32 arm_disarm_counter_ = 0;
+ volatile base::subtle::Atomic32 arm_disarm_counter_ = 0;
// The counter number read in the last OnWatchdogTimeout() on the watchdog
// thread.
int32_t last_arm_disarm_counter_ = 0;
@@ -172,8 +169,21 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
// continue due to not enough thread time.
int count_of_more_gpu_thread_time_allowed_ = 0;
- // The accumulated timeout time the GPU main thread was given.
- base::TimeDelta time_in_extra_timeouts_;
+ // The total timeout, up to 60 seconds, the watchdog thread waits for the GPU
+ // main thread to get full thread time.
+ base::TimeDelta time_in_wait_for_full_thread_time_;
+
+ // After detecting GPU hang and continuing running through
+ // OnGpuWatchdogTimeout for the max cycles, the GPU main thread still cannot
+ // get the full thread time.
+ bool less_than_full_thread_time_after_capped_ = false;
+#endif
+
+#if defined(USE_X11)
+ FILE* tty_file_ = nullptr;
+ int host_tty_ = -1;
+ int active_tty_ = -1;
+ int last_active_tty_ = -1;
#endif
// The system has entered the power suspension mode.
@@ -201,12 +211,21 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
// constructor.
bool in_gpu_initialization_ = false;
+ // The number of logical processors/cores on the current machine.
+ unsigned num_of_processors_;
+
+ // Don't kill the GPU process immediately after a gpu hang is detected. Wait
+ // for extra cycles of timeout. Kill it, if the GPU still doesn't respond
+ // after wait.
+ const int max_extra_cycles_before_kill_;
+ // how many cycles of timeout since we detect a hang.
+ int count_of_extra_cycles_ = 0;
+
// For the experiment and the debugging purpose
size_t num_of_timeout_after_power_resume_ = 0;
size_t num_of_timeout_after_foregrounded_ = 0;
bool foregrounded_event_ = false;
bool power_resumed_event_ = false;
- base::TimeDelta max_wait_time_;
// For gpu testing only.
const bool is_test_mode_;
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
index f61a9798e97..462ed98cc09 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
@@ -377,13 +377,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
// No need for us to call the resource cleaner. Skia should do that.
resource_cleaner.Release().Reset();
}
-#else
- // Right now, we only support Chrome OS because we need to use the
- // |native_pixmap_handle| member of a GpuMemoryBufferHandle.
- NOTIMPLEMENTED()
- << "Image decode acceleration is unsupported for this platform";
- return;
-#endif
// Insert the cache entry in the transfer cache. Note that this section
// validates several of the IPC parameters: |params.raster_decoder_route_id|,
@@ -441,6 +434,12 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
}
DCHECK(notify_gl_state_changed);
notify_gl_state_changed->RunAndReset();
+#else
+ // Right now, we only support Chrome OS because we need to use the
+ // |native_pixmap_handle| member of a GpuMemoryBufferHandle.
+ NOTIMPLEMENTED()
+ << "Image decode acceleration is unsupported for this platform";
+#endif
}
void ImageDecodeAcceleratorStub::FinishCompletedDecode(
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
index b190cfcc37d..af1f64ee5bf 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
@@ -6,19 +6,31 @@
#include <stdint.h>
#include <memory>
+#include <string>
#include <utility>
#include <vector>
#include "base/atomicops.h"
#include "base/bind.h"
+#include "base/callback_forward.h"
#include "base/containers/queue.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
+#include "base/numerics/checked_math.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/run_loop.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
#include "base/test/scoped_feature_list.h"
+#include "base/test/task_environment.h"
#include "base/test/test_simple_task_runner.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/process_memory_dump.h"
#include "cc/paint/image_transfer_cache_entry.h"
#include "cc/paint/transfer_cache_entry.h"
#include "gpu/command_buffer/common/buffer.h"
@@ -52,6 +64,7 @@
#include "gpu/ipc/service/image_decode_accelerator_stub.h"
#include "gpu/ipc/service/image_decode_accelerator_worker.h"
#include "ipc/ipc_message.h"
+#include "skia/ext/skia_memory_dump_provider.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/skia/include/core/SkImage.h"
@@ -66,6 +79,8 @@
#include "url/gurl.h"
using testing::InSequence;
+using testing::Mock;
+using testing::NiceMock;
using testing::StrictMock;
namespace gpu {
@@ -73,6 +88,12 @@ class MemoryTracker;
namespace {
+// The size of a decoded buffer to report for a successful decode.
+constexpr size_t kDecodedBufferByteSize = 123u;
+
+// The byte size Skia is expected to report for a buffer object.
+constexpr uint64_t kSkiaBufferObjectSize = 32768;
+
struct ExpectedCacheEntry {
uint32_t id = 0u;
SkISize dimensions;
@@ -80,13 +101,44 @@ struct ExpectedCacheEntry {
std::unique_ptr<MemoryTracker> CreateMockMemoryTracker(
const GPUCreateCommandBufferConfig& init_params) {
- return std::make_unique<gles2::MockMemoryTracker>();
+ return std::make_unique<NiceMock<gles2::MockMemoryTracker>>();
}
scoped_refptr<Buffer> MakeBufferForTesting() {
return MakeMemoryBuffer(sizeof(base::subtle::Atomic32));
}
+uint64_t GetMemoryDumpByteSize(
+ const base::trace_event::MemoryAllocatorDump* dump,
+ const std::string& entry_name) {
+ DCHECK(dump);
+ auto entry_it = std::find_if(
+ dump->entries().cbegin(), dump->entries().cend(),
+ [&entry_name](
+ const base::trace_event::MemoryAllocatorDump::Entry& entry) {
+ return entry.name == entry_name;
+ });
+ if (entry_it != dump->entries().cend()) {
+ EXPECT_EQ(std::string(base::trace_event::MemoryAllocatorDump::kUnitsBytes),
+ entry_it->units);
+ EXPECT_EQ(base::trace_event::MemoryAllocatorDump::Entry::EntryType::kUint64,
+ entry_it->entry_type);
+ return entry_it->value_uint64;
+ }
+ EXPECT_TRUE(false);
+ return 0u;
+}
+
+base::CheckedNumeric<uint64_t> GetExpectedTotalMippedSizeForPlanarImage(
+ const cc::ServiceImageTransferCacheEntry* decode_entry) {
+ base::CheckedNumeric<uint64_t> safe_total_image_size = 0u;
+ for (const auto& plane_image : decode_entry->plane_images()) {
+ safe_total_image_size += base::strict_cast<uint64_t>(
+ GrContext::ComputeImageSize(plane_image, GrMipMapped::kYes));
+ }
+ return safe_total_image_size;
+}
+
// This ImageFactory is defined so that we don't have to generate a real
// GpuMemoryBuffer with decoded data in these tests.
class TestImageFactory : public ImageFactory {
@@ -107,6 +159,7 @@ class TestImageFactory : public ImageFactory {
scoped_refptr<gl::GLImage> CreateAnonymousImage(const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
+ SurfaceHandle surface_handle,
bool* is_cleared) override {
NOTREACHED();
return nullptr;
@@ -151,7 +204,7 @@ class MockImageDecodeAcceleratorWorker : public ImageDecodeAcceleratorWorker {
}
decode_result->visible_size = next_decode.output_size;
decode_result->buffer_format = format_for_decodes_;
- decode_result->buffer_byte_size = 0u;
+ decode_result->buffer_byte_size = kDecodedBufferByteSize;
std::move(next_decode.decode_cb).Run(std::move(decode_result));
} else {
std::move(next_decode.decode_cb).Run(nullptr);
@@ -227,14 +280,19 @@ class ImageDecodeAcceleratorStubTest
channel_manager()->SetImageDecodeAcceleratorWorkerForTesting(
&image_decode_accelerator_worker_);
+ // Register Skia's memory dump provider so that we can inspect its reported
+ // memory usage.
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ skia::SkiaMemoryDumpProvider::GetInstance(), "Skia", nullptr);
+
// Initialize the GrContext so that texture uploading works.
ContextResult context_result;
scoped_refptr<SharedContextState> shared_context_state =
channel_manager()->GetSharedContextState(&context_result);
ASSERT_EQ(ContextResult::kSuccess, context_result);
ASSERT_TRUE(shared_context_state);
- shared_context_state->InitializeGrContext(GpuDriverBugWorkarounds(),
- nullptr);
+ shared_context_state->InitializeGrContext(
+ GpuPreferences(), GpuDriverBugWorkarounds(), nullptr);
GpuChannel* channel = CreateChannel(kChannelId, false /* is_gpu_host */);
ASSERT_TRUE(channel);
@@ -332,7 +390,8 @@ class ImageDecodeAcceleratorStubTest
SyncToken SendDecodeRequest(const gfx::Size& output_size,
uint64_t decode_release_count,
uint32_t transfer_cache_entry_id,
- uint64_t handle_release_count) {
+ uint64_t handle_release_count,
+ bool needs_mips = false) {
GpuChannel* channel = channel_manager()->LookupChannel(kChannelId);
DCHECK(channel);
@@ -361,7 +420,7 @@ class ImageDecodeAcceleratorStubTest
decode_params.discardable_handle_shm_offset = handle.byte_offset();
decode_params.discardable_handle_release_count = handle_release_count;
decode_params.target_color_space = gfx::ColorSpace();
- decode_params.needs_mips = false;
+ decode_params.needs_mips = needs_mips;
HandleMessage(
channel,
@@ -418,7 +477,191 @@ class ImageDecodeAcceleratorStubTest
}
}
+ cc::ServiceImageTransferCacheEntry* RunSimpleDecode(bool needs_mips) {
+ EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100)))
+ .Times(1);
+ const SyncToken decode_sync_token = SendDecodeRequest(
+ gfx::Size(100, 100) /* output_size */, 1u /* decode_release_count */,
+ 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */,
+ needs_mips);
+ if (!decode_sync_token.HasData())
+ return nullptr;
+ image_decode_accelerator_worker_.FinishOneDecode(true);
+ RunTasksUntilIdle();
+ if (!sync_point_manager()->IsSyncTokenReleased(decode_sync_token))
+ return nullptr;
+ ServiceTransferCache* transfer_cache = GetServiceTransferCache();
+ if (!transfer_cache)
+ return nullptr;
+ const int raster_decoder_id = GetRasterDecoderId();
+ if (raster_decoder_id < 0)
+ return nullptr;
+ auto* decode_entry = static_cast<cc::ServiceImageTransferCacheEntry*>(
+ transfer_cache->GetEntry(ServiceTransferCache::EntryKey(
+ raster_decoder_id, cc::TransferCacheEntryType::kImage,
+ 1u /* entry_id */)));
+ if (!Mock::VerifyAndClear(&image_decode_accelerator_worker_))
+ return nullptr;
+ return decode_entry;
+ }
+
+ // Requests a |detail_level| process memory dump and checks:
+ // - The total memory reported by the transfer cache.
+ // - The total GPU resources memory reported by Skia. Skia memory allocator
+ // dumps that share a global allocator dump with a transfer cache entry are
+ // not counted (and we check that the Skia dump importance is less than the
+ // corresponding transfer cache dump in that case).
+ // - The average transfer cache image entry byte size (this is only checked
+ // for background-level memory dumps).
+ void ExpectProcessMemoryDump(
+ base::trace_event::MemoryDumpLevelOfDetail detail_level,
+ uint64_t expected_total_transfer_cache_size,
+ uint64_t expected_total_skia_gpu_resources_size,
+ uint64_t expected_avg_image_size) {
+ // Request a process memory dump.
+ base::trace_event::MemoryDumpRequestArgs dump_args{};
+ dump_args.dump_guid = 1234u;
+ dump_args.dump_type =
+ base::trace_event::MemoryDumpType::EXPLICITLY_TRIGGERED;
+ dump_args.level_of_detail = detail_level;
+ dump_args.determinism = base::trace_event::MemoryDumpDeterminism::FORCE_GC;
+ std::unique_ptr<base::trace_event::ProcessMemoryDump> dump;
+ base::RunLoop run_loop;
+ base::trace_event::MemoryDumpManager::GetInstance()->CreateProcessDump(
+ dump_args,
+ base::BindOnce(
+ [](std::unique_ptr<base::trace_event::ProcessMemoryDump>* out_pmd,
+ base::RepeatingClosure quit_closure, bool success,
+ uint64_t dump_guid,
+ std::unique_ptr<base::trace_event::ProcessMemoryDump> pmd) {
+ if (success)
+ *out_pmd = std::move(pmd);
+ quit_closure.Run();
+ },
+ &dump, run_loop.QuitClosure()));
+ RunTasksUntilIdle();
+ run_loop.Run();
+
+ // Check the transfer cache dumps are as expected.
+ ServiceTransferCache* cache = GetServiceTransferCache();
+ ASSERT_TRUE(cache);
+ // This map will later allow us to answer the following question easily:
+ // which transfer cache entry memory dump points to a given shared global
+ // allocator dump?
+ std::map<
+ base::trace_event::MemoryAllocatorDumpGuid,
+ std::pair<base::trace_event::ProcessMemoryDump::MemoryAllocatorDumpEdge,
+ base::trace_event::MemoryAllocatorDump*>>
+ shared_dump_to_transfer_cache_entry_dump;
+ std::string transfer_cache_dump_name =
+ base::StringPrintf("gpu/transfer_cache/cache_0x%" PRIXPTR,
+ reinterpret_cast<uintptr_t>(cache));
+ if (detail_level ==
+ base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND) {
+ auto transfer_cache_dump_it =
+ dump->allocator_dumps().find(transfer_cache_dump_name);
+ ASSERT_NE(dump->allocator_dumps().end(), transfer_cache_dump_it);
+ EXPECT_EQ(expected_total_transfer_cache_size,
+ GetMemoryDumpByteSize(
+ transfer_cache_dump_it->second.get(),
+ base::trace_event::MemoryAllocatorDump::kNameSize));
+
+ std::string avg_image_size_dump_name =
+ transfer_cache_dump_name + "/avg_image_size";
+ auto avg_image_size_dump_it =
+ dump->allocator_dumps().find(avg_image_size_dump_name);
+ ASSERT_NE(dump->allocator_dumps().end(), avg_image_size_dump_it);
+ EXPECT_EQ(expected_avg_image_size,
+ GetMemoryDumpByteSize(avg_image_size_dump_it->second.get(),
+ "average_size"));
+ } else {
+ DCHECK_EQ(base::trace_event::MemoryDumpLevelOfDetail::DETAILED,
+ detail_level);
+ base::CheckedNumeric<uint64_t> safe_actual_transfer_cache_total_size(0u);
+ std::string entry_dump_prefix =
+ transfer_cache_dump_name + "/gpu/entry_0x";
+ for (const auto& allocator_dump : dump->allocator_dumps()) {
+ if (base::StartsWith(allocator_dump.first, entry_dump_prefix,
+ base::CompareCase::SENSITIVE)) {
+ ASSERT_TRUE(allocator_dump.second);
+ safe_actual_transfer_cache_total_size += GetMemoryDumpByteSize(
+ allocator_dump.second.get(),
+ base::trace_event::MemoryAllocatorDump::kNameSize);
+
+ // If the dump name for this entry does not end in /dma_buf (i.e., we
+ // haven't requested mipmaps from Skia), the allocator dump for this
+ // cache entry should point to a shared global allocator dump (i.e.,
+ // shared with Skia). Let's save this association in
+ // |shared_dump_to_transfer_cache_entry_dump| for later.
+ ASSERT_FALSE(allocator_dump.second->guid().empty());
+ auto edge_it =
+ dump->allocator_dumps_edges().find(allocator_dump.second->guid());
+ ASSERT_EQ(base::EndsWith(allocator_dump.first, "/dma_buf",
+ base::CompareCase::SENSITIVE),
+ dump->allocator_dumps_edges().end() == edge_it);
+ if (edge_it != dump->allocator_dumps_edges().end()) {
+ ASSERT_FALSE(edge_it->second.target.empty());
+ ASSERT_EQ(shared_dump_to_transfer_cache_entry_dump.end(),
+ shared_dump_to_transfer_cache_entry_dump.find(
+ edge_it->second.target));
+ shared_dump_to_transfer_cache_entry_dump[edge_it->second.target] =
+ std::make_pair(edge_it->second, allocator_dump.second.get());
+ }
+ }
+ }
+ ASSERT_TRUE(safe_actual_transfer_cache_total_size.IsValid());
+ EXPECT_EQ(expected_total_transfer_cache_size,
+ safe_actual_transfer_cache_total_size.ValueOrDie());
+ }
+
+ // Check that the Skia dumps are as expected. We won't count Skia dumps that
+ // point to a global allocator dump that's shared with a transfer cache
+ // dump.
+ base::CheckedNumeric<uint64_t> safe_actual_total_skia_gpu_resources_size(
+ 0u);
+ for (const auto& allocator_dump : dump->allocator_dumps()) {
+ if (base::StartsWith(allocator_dump.first, "skia/gpu_resources",
+ base::CompareCase::SENSITIVE)) {
+ ASSERT_TRUE(allocator_dump.second);
+ uint64_t skia_allocator_dump_size = GetMemoryDumpByteSize(
+ allocator_dump.second.get(),
+ base::trace_event::MemoryAllocatorDump::kNameSize);
+
+ // If this dump points to a global allocator dump that's shared with a
+ // transfer cache dump, we won't count it.
+ ASSERT_FALSE(allocator_dump.second->guid().empty());
+ auto edge_it =
+ dump->allocator_dumps_edges().find(allocator_dump.second->guid());
+ if (edge_it != dump->allocator_dumps_edges().end()) {
+ ASSERT_FALSE(edge_it->second.target.empty());
+ auto transfer_cache_dump_it =
+ shared_dump_to_transfer_cache_entry_dump.find(
+ edge_it->second.target);
+ if (transfer_cache_dump_it !=
+ shared_dump_to_transfer_cache_entry_dump.end()) {
+ // Not counting the Skia dump is only valid if its importance is
+ // less than the transfer cache dump and the values of the dumps are
+ // the same.
+ EXPECT_EQ(skia_allocator_dump_size,
+ GetMemoryDumpByteSize(
+ transfer_cache_dump_it->second.second,
+ base::trace_event::MemoryAllocatorDump::kNameSize));
+ EXPECT_LT(edge_it->second.importance,
+ transfer_cache_dump_it->second.first.importance);
+ continue;
+ }
+ }
+
+ safe_actual_total_skia_gpu_resources_size += skia_allocator_dump_size;
+ }
+ }
+ ASSERT_TRUE(safe_actual_total_skia_gpu_resources_size.IsValid());
+ EXPECT_EQ(expected_total_skia_gpu_resources_size,
+ safe_actual_total_skia_gpu_resources_size.ValueOrDie());
+ }
+
protected:
+ base::test::SingleThreadTaskEnvironment task_environment_;
StrictMock<MockImageDecodeAcceleratorWorker> image_decode_accelerator_worker_;
private:
@@ -716,6 +959,102 @@ TEST_P(ImageDecodeAcceleratorStubTest, WaitForDiscardableHandleRegistration) {
CheckTransferCacheEntries({{1u, SkISize::Make(100, 100)}});
}
+TEST_P(ImageDecodeAcceleratorStubTest, MemoryReportDetailedForUnmippedDecode) {
+ cc::ServiceImageTransferCacheEntry* decode_entry =
+ RunSimpleDecode(false /* needs_mips */);
+ ASSERT_TRUE(decode_entry);
+ ExpectProcessMemoryDump(
+ base::trace_event::MemoryDumpLevelOfDetail::DETAILED,
+ base::strict_cast<uint64_t>(
+ kDecodedBufferByteSize) /* expected_total_transfer_cache_size */,
+ 0u /* expected_total_skia_gpu_resources_size */,
+ 0u /* expected_avg_image_size */);
+}
+
+TEST_P(ImageDecodeAcceleratorStubTest,
+ MemoryReportBackgroundForUnmippedDecode) {
+ cc::ServiceImageTransferCacheEntry* decode_entry =
+ RunSimpleDecode(false /* needs_mips */);
+ ASSERT_TRUE(decode_entry);
+ ExpectProcessMemoryDump(
+ base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND,
+ base::strict_cast<uint64_t>(
+ kDecodedBufferByteSize) /* expected_total_transfer_cache_size */,
+ 0u /* expected_total_skia_gpu_resources_size */,
+ base::strict_cast<uint64_t>(
+ kDecodedBufferByteSize) /* expected_avg_image_size */);
+}
+
+TEST_P(ImageDecodeAcceleratorStubTest, MemoryReportDetailedForMippedDecode) {
+ cc::ServiceImageTransferCacheEntry* decode_entry =
+ RunSimpleDecode(true /* needs_mips */);
+ ASSERT_TRUE(decode_entry);
+ ASSERT_EQ(gfx::NumberOfPlanesForLinearBufferFormat(GetParam()),
+ decode_entry->plane_images().size());
+ base::CheckedNumeric<uint64_t> safe_expected_total_transfer_cache_size =
+ GetExpectedTotalMippedSizeForPlanarImage(decode_entry);
+ ASSERT_TRUE(safe_expected_total_transfer_cache_size.IsValid());
+ ExpectProcessMemoryDump(
+ base::trace_event::MemoryDumpLevelOfDetail::DETAILED,
+ safe_expected_total_transfer_cache_size.ValueOrDie(),
+ kSkiaBufferObjectSize /* expected_total_skia_gpu_resources_size */,
+ 0u /* expected_avg_image_size */);
+}
+
+TEST_P(ImageDecodeAcceleratorStubTest, MemoryReportBackgroundForMippedDecode) {
+ cc::ServiceImageTransferCacheEntry* decode_entry =
+ RunSimpleDecode(true /* needs_mips */);
+ ASSERT_TRUE(decode_entry);
+ ASSERT_EQ(gfx::NumberOfPlanesForLinearBufferFormat(GetParam()),
+ decode_entry->plane_images().size());
+ base::CheckedNumeric<uint64_t> safe_expected_total_transfer_cache_size =
+ GetExpectedTotalMippedSizeForPlanarImage(decode_entry);
+ ASSERT_TRUE(safe_expected_total_transfer_cache_size.IsValid());
+ ExpectProcessMemoryDump(
+ base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND,
+ safe_expected_total_transfer_cache_size.ValueOrDie(),
+ kSkiaBufferObjectSize,
+ safe_expected_total_transfer_cache_size
+ .ValueOrDie() /* expected_avg_image_size */);
+}
+
+TEST_P(ImageDecodeAcceleratorStubTest,
+ MemoryReportDetailedForDeferredMippedDecode) {
+ cc::ServiceImageTransferCacheEntry* decode_entry =
+ RunSimpleDecode(false /* needs_mips */);
+ ASSERT_TRUE(decode_entry);
+ decode_entry->EnsureMips();
+ ASSERT_EQ(gfx::NumberOfPlanesForLinearBufferFormat(GetParam()),
+ decode_entry->plane_images().size());
+ base::CheckedNumeric<uint64_t> safe_expected_total_transfer_cache_size =
+ GetExpectedTotalMippedSizeForPlanarImage(decode_entry);
+ ASSERT_TRUE(safe_expected_total_transfer_cache_size.IsValid());
+ ExpectProcessMemoryDump(
+ base::trace_event::MemoryDumpLevelOfDetail::DETAILED,
+ safe_expected_total_transfer_cache_size.ValueOrDie(),
+ kSkiaBufferObjectSize /* expected_total_skia_gpu_resources_size */,
+ 0u /* expected_avg_image_size */);
+}
+
+TEST_P(ImageDecodeAcceleratorStubTest,
+ MemoryReportBackgroundForDeferredMippedDecode) {
+ cc::ServiceImageTransferCacheEntry* decode_entry =
+ RunSimpleDecode(false /* needs_mips */);
+ ASSERT_TRUE(decode_entry);
+ decode_entry->EnsureMips();
+ ASSERT_EQ(gfx::NumberOfPlanesForLinearBufferFormat(GetParam()),
+ decode_entry->plane_images().size());
+ // For a deferred mip request, the transfer cache doesn't update its size
+ // computation, so it reports memory as if no mips had been generated.
+ ExpectProcessMemoryDump(
+ base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND,
+ base::strict_cast<uint64_t>(
+ kDecodedBufferByteSize) /* expected_total_transfer_cache_size */,
+ kSkiaBufferObjectSize,
+ base::strict_cast<uint64_t>(
+ kDecodedBufferByteSize) /* expected_avg_image_size */);
+}
+
// TODO(andrescj): test the deletion of transfer cache entries.
INSTANTIATE_TEST_SUITE_P(
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index f65ad035e90..a298747e3b7 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -53,7 +53,7 @@ class ImageTransportSurfaceOverlayMacBase : public BaseClass,
void PrepareToDestroy(bool have_context) override;
bool Resize(const gfx::Size& size,
float scale_factor,
- gl::GLSurface::ColorSpace color_space,
+ const gfx::ColorSpace& color_space,
bool has_alpha) override;
bool IsOffscreen() override;
gfx::SwapResult SwapBuffers(
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index c1af03a268d..eb46993b8f4 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -307,7 +307,7 @@ template <typename BaseClass>
bool ImageTransportSurfaceOverlayMacBase<BaseClass>::Resize(
const gfx::Size& pixel_size,
float scale_factor,
- gl::GLSurface::ColorSpace color_space,
+ const gfx::ColorSpace& color_space,
bool has_alpha) {
pixel_size_ = pixel_size;
scale_factor_ = scale_factor;
diff --git a/chromium/gpu/ipc/service/image_transport_surface_win.cc b/chromium/gpu/ipc/service/image_transport_surface_win.cc
index 957ee5068bc..dd52c11ee5e 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_win.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_win.cc
@@ -27,9 +27,7 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
SurfaceHandle surface_handle,
gl::GLSurfaceFormat format) {
DCHECK_NE(surface_handle, kNullSurfaceHandle);
-
scoped_refptr<gl::GLSurface> surface;
- bool override_vsync_for_multi_window_swap = false;
if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE) {
auto vsync_provider =
@@ -58,10 +56,6 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
surface_handle, std::move(vsync_provider)));
if (!surface)
return nullptr;
- // This is unnecessary with DirectComposition because that doesn't block
- // swaps, but instead blocks the first draw into a surface during the next
- // frame.
- override_vsync_for_multi_window_swap = true;
}
} else {
surface = gl::init::CreateViewGLSurface(surface_handle);
@@ -69,8 +63,11 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
return nullptr;
}
+ // |override_vsync_for_multi_window_swap| is needed because Present() blocks
+ // when multiple windows use swap interval 1 all the time. With this flag the
+ // surface forces swap interval 0 when multiple windows are presenting.
return scoped_refptr<gl::GLSurface>(new PassThroughImageTransportSurface(
- delegate, surface.get(), override_vsync_for_multi_window_swap));
+ delegate, surface.get(), /*override_vsync_for_multi_window_swap=*/true));
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
index e9c23ac950b..e56d4324689 100644
--- a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
@@ -103,7 +103,7 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
}
if (!shared_context_state->IsGLInitialized()) {
- if (!shared_context_state->MakeCurrent(nullptr) ||
+ if (!shared_context_state->MakeCurrent(nullptr, true /* needs_gl */) ||
!shared_context_state->InitializeGL(
manager->gpu_preferences(),
base::MakeRefCounted<gles2::FeatureInfo>(
@@ -127,7 +127,7 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
this, command_buffer_.get(), manager->outputter(),
manager->gpu_feature_info(), manager->gpu_preferences(),
memory_tracker_.get(), manager->shared_image_manager(),
- shared_context_state));
+ shared_context_state, channel()->is_gpu_host()));
sync_point_client_state_ =
channel_->sync_point_manager()->CreateSyncPointClientState(
@@ -138,10 +138,7 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
: "0");
scoped_refptr<gl::GLContext> context = shared_context_state->context();
- // Raster decoder needs gl context for GPUTracing.
- // TODO(penghuang): get rid of the gl dependeny when GL is not used for
- // raster. https://crbug.com/c/1018725
- if (!shared_context_state->MakeCurrent(nullptr, true /* needs_gl */)) {
+ if (!shared_context_state->MakeCurrent(nullptr, false /* needs_gl */)) {
LOG(ERROR) << "ContextResult::kTransientFailure: "
"Failed to make context current.";
return gpu::ContextResult::kTransientFailure;
diff --git a/chromium/gpu/ipc/service/shared_image_stub.cc b/chromium/gpu/ipc/service/shared_image_stub.cc
index 3b9158c8d4d..30789237258 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.cc
+++ b/chromium/gpu/ipc/service/shared_image_stub.cc
@@ -6,6 +6,7 @@
#include <inttypes.h>
+#include "base/memory/ptr_util.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
#include "components/viz/common/features.h"
@@ -13,6 +14,7 @@
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/ipc/common/command_buffer_id.h"
+#include "gpu/ipc/common/gpu_peak_memory.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
@@ -162,7 +164,8 @@ void SharedImageStub::OnCreateSharedImage(
}
if (!factory_->CreateSharedImage(params.mailbox, params.format, params.size,
- params.color_space, params.usage)) {
+ params.color_space, gpu::kNullSurfaceHandle,
+ params.usage)) {
LOG(ERROR) << "SharedImageStub: Unable to create shared image";
OnError();
return;
@@ -440,12 +443,15 @@ void SharedImageStub::OnError() {
channel_->OnChannelError();
}
-void SharedImageStub::TrackMemoryAllocatedChange(uint64_t delta) {
+void SharedImageStub::TrackMemoryAllocatedChange(int64_t delta) {
+ DCHECK(delta >= 0 || size_ >= static_cast<uint64_t>(-delta));
uint64_t old_size = size_;
size_ += delta;
channel_->gpu_channel_manager()
->peak_memory_monitor()
- ->OnMemoryAllocatedChange(command_buffer_id_, old_size, size_);
+ ->OnMemoryAllocatedChange(
+ command_buffer_id_, old_size, size_,
+ GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB);
}
uint64_t SharedImageStub::GetSize() const {
diff --git a/chromium/gpu/ipc/service/shared_image_stub.h b/chromium/gpu/ipc/service/shared_image_stub.h
index 607f9cab7a9..1bc71f842cd 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.h
+++ b/chromium/gpu/ipc/service/shared_image_stub.h
@@ -40,7 +40,7 @@ class GPU_IPC_SERVICE_EXPORT SharedImageStub
bool OnMessageReceived(const IPC::Message& msg) override;
// MemoryTracker implementation:
- void TrackMemoryAllocatedChange(uint64_t delta) override;
+ void TrackMemoryAllocatedChange(int64_t delta) override;
uint64_t GetSize() const override;
uint64_t ClientTracingId() const override;
int ClientId() const override;
diff --git a/chromium/gpu/ipc/service/stream_texture_android.cc b/chromium/gpu/ipc/service/stream_texture_android.cc
index 0a6237b8bd4..be5e99b5493 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.cc
+++ b/chromium/gpu/ipc/service/stream_texture_android.cc
@@ -51,14 +51,8 @@ TextureOwner::Mode GetTextureOwnerMode() {
const bool a_image_reader_supported =
base::android::AndroidImageReader::GetInstance().IsSupported();
- // TODO(vikassoni) : Currently we have 2 different flags to enable/disable
- // AImageReader - one for MCVD and other for MediaPlayer here. Merge those 2
- // flags into a single flag. Keeping the 2 flags separate for now since finch
- // experiment using this flag is in progress.
- return a_image_reader_supported && base::FeatureList::IsEnabled(
- features::kAImageReaderMediaPlayer)
- ? TextureOwner::Mode::kAImageReaderInsecure
- : TextureOwner::Mode::kSurfaceTextureInsecure;
+ return a_image_reader_supported ? TextureOwner::Mode::kAImageReaderInsecure
+ : TextureOwner::Mode::kSurfaceTextureInsecure;
}
} // namespace
@@ -165,9 +159,8 @@ bool StreamTexture::HasTextureOwner() const {
return !!texture_owner_;
}
-gles2::Texture* StreamTexture::GetTexture() const {
- DCHECK(texture_owner_);
- return gles2::Texture::CheckedCast(texture_owner_->GetTextureBase());
+TextureBase* StreamTexture::GetTextureBase() const {
+ return texture_owner_->GetTextureBase();
}
void StreamTexture::NotifyOverlayPromotion(bool promotion,
diff --git a/chromium/gpu/ipc/service/stream_texture_android.h b/chromium/gpu/ipc/service/stream_texture_android.h
index 4f3f91f329c..9f364fbb115 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.h
+++ b/chromium/gpu/ipc/service/stream_texture_android.h
@@ -93,7 +93,7 @@ class StreamTexture : public StreamTextureSharedImageInterface,
bool IsUsingGpuMemory() const override;
void UpdateAndBindTexImage() override;
bool HasTextureOwner() const override;
- gles2::Texture* GetTexture() const override;
+ TextureBase* GetTextureBase() const override;
void NotifyOverlayPromotion(bool promotion, const gfx::Rect& bounds) override;
bool RenderToOverlay() override;
diff --git a/chromium/gpu/ipc/shared_image_interface_in_process.cc b/chromium/gpu/ipc/shared_image_interface_in_process.cc
new file mode 100644
index 00000000000..715ea24292b
--- /dev/null
+++ b/chromium/gpu/ipc/shared_image_interface_in_process.cc
@@ -0,0 +1,427 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/shared_image_interface_in_process.h"
+
+#include "base/bind.h"
+#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
+#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/sync_point_manager.h"
+#include "gpu/ipc/command_buffer_task_executor.h"
+#include "gpu/ipc/common/gpu_client_ids.h"
+#include "gpu/ipc/single_task_sequence.h"
+#include "ui/gl/gl_context.h"
+
+namespace gpu {
+SharedImageInterfaceInProcess::SharedImageInterfaceInProcess(
+ CommandBufferTaskExecutor* task_executor,
+ SingleTaskSequence* single_task_sequence,
+ CommandBufferId command_buffer_id,
+ MailboxManager* mailbox_manager,
+ ImageFactory* image_factory,
+ MemoryTracker* memory_tracker,
+ std::unique_ptr<CommandBufferHelper> command_buffer_helper)
+ : task_sequence_(single_task_sequence),
+ command_buffer_id_(command_buffer_id),
+ command_buffer_helper_(std::move(command_buffer_helper)),
+ shared_image_manager_(task_executor->shared_image_manager()),
+ mailbox_manager_(mailbox_manager),
+ sync_point_manager_(task_executor->sync_point_manager()) {
+ DETACH_FROM_SEQUENCE(gpu_sequence_checker_);
+ task_sequence_->ScheduleTask(
+ base::BindOnce(&SharedImageInterfaceInProcess::SetUpOnGpu,
+ base::Unretained(this), task_executor, image_factory,
+ memory_tracker),
+ {});
+}
+
+SharedImageInterfaceInProcess::~SharedImageInterfaceInProcess() {
+ base::WaitableEvent completion(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ task_sequence_->ScheduleTask(
+ base::BindOnce(&SharedImageInterfaceInProcess::DestroyOnGpu,
+ base::Unretained(this), &completion),
+ {});
+ completion.Wait();
+}
+
+void SharedImageInterfaceInProcess::SetUpOnGpu(
+ CommandBufferTaskExecutor* task_executor,
+ ImageFactory* image_factory,
+ MemoryTracker* memory_tracker) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+
+ context_state_ = task_executor->GetSharedContextState().get();
+ create_factory_ = base::BindOnce(
+ [](CommandBufferTaskExecutor* task_executor, ImageFactory* image_factory,
+ MemoryTracker* memory_tracker, MailboxManager* mailbox_manager,
+ bool enable_wrapped_sk_image) {
+ auto shared_image_factory = std::make_unique<SharedImageFactory>(
+ task_executor->gpu_preferences(),
+ GpuDriverBugWorkarounds(task_executor->gpu_feature_info()
+ .enabled_gpu_driver_bug_workarounds),
+ task_executor->gpu_feature_info(),
+ task_executor->GetSharedContextState().get(), mailbox_manager,
+ task_executor->shared_image_manager(), image_factory,
+ memory_tracker, enable_wrapped_sk_image);
+ return shared_image_factory;
+ },
+ task_executor, image_factory, memory_tracker, mailbox_manager_);
+
+ // Make the SharedImageInterface use the same sequence as the command buffer,
+ // it's necessary for WebView because of the blocking behavior.
+ // TODO(piman): see if it's worth using a different sequence for non-WebView.
+ sync_point_client_state_ = sync_point_manager_->CreateSyncPointClientState(
+ CommandBufferNamespace::IN_PROCESS, command_buffer_id_,
+ task_sequence_->GetSequenceId());
+}
+
+void SharedImageInterfaceInProcess::DestroyOnGpu(
+ base::WaitableEvent* completion) {
+ bool have_context = MakeContextCurrent();
+ if (shared_image_factory_)
+ shared_image_factory_->DestroyAllSharedImages(have_context);
+
+ if (sync_point_client_state_) {
+ sync_point_client_state_->Destroy();
+ sync_point_client_state_ = nullptr;
+ }
+ completion->Signal();
+}
+
+bool SharedImageInterfaceInProcess::MakeContextCurrent() {
+ if (!context_state_)
+ return false;
+
+ if (context_state_->context_lost())
+ return false;
+
+ // |shared_image_factory_| never writes to the surface, so skip unnecessary
+ // MakeCurrent to improve performance. https://crbug.com/457431
+ auto* context = context_state_->real_context();
+ if (context->IsCurrent(nullptr) ||
+ context->MakeCurrent(context_state_->surface()))
+ return true;
+
+ context_state_->MarkContextLost();
+ return false;
+}
+
+void SharedImageInterfaceInProcess::LazyCreateSharedImageFactory() {
+ // This function is always called right after we call MakeContextCurrent().
+ if (shared_image_factory_)
+ return;
+
+ // We need WrappedSkImage to support creating a SharedImage with pixel data
+ // when GL is unavailable. This is used in various unit tests.
+ const bool enable_wrapped_sk_image =
+ command_buffer_helper_ && command_buffer_helper_->EnableWrappedSkImage();
+ shared_image_factory_ =
+ std::move(create_factory_).Run(enable_wrapped_sk_image);
+}
+
+Mailbox SharedImageInterfaceInProcess::CreateSharedImage(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ gpu::SurfaceHandle surface_handle) {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ {
+ base::AutoLock lock(lock_);
+ // Note: we enqueue the task under the lock to guarantee monotonicity of
+ // the release ids as seen by the service. Unretained is safe because
+ // SharedImageInterfaceInProcess synchronizes with the GPU thread at
+ // destruction time, cancelling tasks, before |this| is destroyed.
+ ScheduleGpuTask(
+ base::BindOnce(
+ &SharedImageInterfaceInProcess::CreateSharedImageOnGpuThread,
+ base::Unretained(this), mailbox, format, surface_handle, size,
+ color_space, usage, MakeSyncToken(next_fence_sync_release_++)),
+ {});
+ }
+ return mailbox;
+}
+
+void SharedImageInterfaceInProcess::CreateSharedImageOnGpuThread(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ gpu::SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SyncToken& sync_token) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ if (!MakeContextCurrent())
+ return;
+
+ LazyCreateSharedImageFactory();
+
+ if (!shared_image_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, surface_handle, usage)) {
+ // Signal errors by losing the command buffer.
+ command_buffer_helper_->SetError();
+ return;
+ }
+ mailbox_manager_->PushTextureUpdates(sync_token);
+ sync_point_client_state_->ReleaseFenceSync(sync_token.release_count());
+}
+
+Mailbox SharedImageInterfaceInProcess::CreateSharedImage(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ std::vector<uint8_t> pixel_data_copy(pixel_data.begin(), pixel_data.end());
+ {
+ base::AutoLock lock(lock_);
+ // Note: we enqueue the task under the lock to guarantee monotonicity of
+ // the release ids as seen by the service. Unretained is safe because
+ // InProcessCommandBuffer synchronizes with the GPU thread at destruction
+ // time, cancelling tasks, before |this| is destroyed.
+ ScheduleGpuTask(base::BindOnce(&SharedImageInterfaceInProcess::
+ CreateSharedImageWithDataOnGpuThread,
+ base::Unretained(this), mailbox, format,
+ size, color_space, usage,
+ MakeSyncToken(next_fence_sync_release_++),
+ std::move(pixel_data_copy)),
+ {});
+ }
+ return mailbox;
+}
+
+void SharedImageInterfaceInProcess::CreateSharedImageWithDataOnGpuThread(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SyncToken& sync_token,
+ std::vector<uint8_t> pixel_data) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ if (!MakeContextCurrent())
+ return;
+
+ LazyCreateSharedImageFactory();
+
+ if (!shared_image_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, usage, pixel_data)) {
+ // Signal errors by losing the command buffer.
+ command_buffer_helper_->SetError();
+ return;
+ }
+ mailbox_manager_->PushTextureUpdates(sync_token);
+ sync_point_client_state_->ReleaseFenceSync(sync_token.release_count());
+}
+
+Mailbox SharedImageInterfaceInProcess::CreateSharedImage(
+ gfx::GpuMemoryBuffer* gpu_memory_buffer,
+ GpuMemoryBufferManager* gpu_memory_buffer_manager,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ DCHECK(gpu_memory_buffer->GetType() == gfx::NATIVE_PIXMAP ||
+ gpu_memory_buffer->GetType() == gfx::ANDROID_HARDWARE_BUFFER ||
+ gpu_memory_buffer_manager);
+
+ // TODO(piman): DCHECK GMB format support.
+ DCHECK(IsImageSizeValidForGpuMemoryBufferFormat(
+ gpu_memory_buffer->GetSize(), gpu_memory_buffer->GetFormat()));
+
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ gfx::GpuMemoryBufferHandle handle = gpu_memory_buffer->CloneHandle();
+ bool requires_sync_token = handle.type == gfx::IO_SURFACE_BUFFER;
+ SyncToken sync_token;
+ {
+ base::AutoLock lock(lock_);
+ sync_token = MakeSyncToken(next_fence_sync_release_++);
+ // Note: we enqueue the task under the lock to guarantee monotonicity of
+ // the release ids as seen by the service. Unretained is safe because
+ // InProcessCommandBuffer synchronizes with the GPU thread at destruction
+ // time, cancelling tasks, before |this| is destroyed.
+ ScheduleGpuTask(
+ base::BindOnce(
+ &SharedImageInterfaceInProcess::CreateGMBSharedImageOnGpuThread,
+ base::Unretained(this), mailbox, std::move(handle),
+ gpu_memory_buffer->GetFormat(), gpu_memory_buffer->GetSize(),
+ color_space, usage, sync_token),
+ {});
+ }
+ if (requires_sync_token) {
+ sync_token.SetVerifyFlush();
+ gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer,
+ sync_token);
+ }
+ return mailbox;
+}
+
+void SharedImageInterfaceInProcess::CreateGMBSharedImageOnGpuThread(
+ const Mailbox& mailbox,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SyncToken& sync_token) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ if (!MakeContextCurrent())
+ return;
+
+ LazyCreateSharedImageFactory();
+
+ // TODO(piman): add support for SurfaceHandle (for backbuffers for ozone/drm).
+ SurfaceHandle surface_handle = kNullSurfaceHandle;
+ if (!shared_image_factory_->CreateSharedImage(
+ mailbox, kInProcessCommandBufferClientId, std::move(handle), format,
+ surface_handle, size, color_space, usage)) {
+ // Signal errors by losing the command buffer.
+ // Signal errors by losing the command buffer.
+ command_buffer_helper_->SetError();
+ return;
+ }
+ mailbox_manager_->PushTextureUpdates(sync_token);
+ sync_point_client_state_->ReleaseFenceSync(sync_token.release_count());
+}
+
+SharedImageInterface::SwapChainMailboxes
+SharedImageInterfaceInProcess::CreateSwapChain(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ NOTREACHED();
+ return {};
+}
+
+void SharedImageInterfaceInProcess::PresentSwapChain(
+ const SyncToken& sync_token,
+ const Mailbox& mailbox) {
+ NOTREACHED();
+}
+
+#if defined(OS_FUCHSIA)
+void SharedImageInterfaceInProcess::RegisterSysmemBufferCollection(
+ gfx::SysmemBufferCollectionId id,
+ zx::channel token) {
+ NOTREACHED();
+}
+void SharedImageInterfaceInProcess::ReleaseSysmemBufferCollection(
+ gfx::SysmemBufferCollectionId id) {
+ NOTREACHED();
+}
+#endif // defined(OS_FUCHSIA)
+
+void SharedImageInterfaceInProcess::UpdateSharedImage(
+ const SyncToken& sync_token,
+ const Mailbox& mailbox) {
+ UpdateSharedImage(sync_token, nullptr, mailbox);
+}
+
+void SharedImageInterfaceInProcess::UpdateSharedImage(
+ const SyncToken& sync_token,
+ std::unique_ptr<gfx::GpuFence> acquire_fence,
+ const Mailbox& mailbox) {
+ DCHECK(!acquire_fence);
+ base::AutoLock lock(lock_);
+ // Note: we enqueue the task under the lock to guarantee monotonicity of
+ // the release ids as seen by the service. Unretained is safe because
+ // InProcessCommandBuffer synchronizes with the GPU thread at destruction
+ // time, cancelling tasks, before |this| is destroyed.
+ ScheduleGpuTask(
+ base::BindOnce(
+ &SharedImageInterfaceInProcess::UpdateSharedImageOnGpuThread,
+ base::Unretained(this), mailbox,
+ MakeSyncToken(next_fence_sync_release_++)),
+ {sync_token});
+}
+
+void SharedImageInterfaceInProcess::UpdateSharedImageOnGpuThread(
+ const Mailbox& mailbox,
+ const SyncToken& sync_token) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ if (!MakeContextCurrent())
+ return;
+
+ if (!shared_image_factory_ ||
+ !shared_image_factory_->UpdateSharedImage(mailbox)) {
+ // Signal errors by losing the command buffer.
+ command_buffer_helper_->SetError();
+ return;
+ }
+ mailbox_manager_->PushTextureUpdates(sync_token);
+ sync_point_client_state_->ReleaseFenceSync(sync_token.release_count());
+}
+
+void SharedImageInterfaceInProcess::DestroySharedImage(
+ const SyncToken& sync_token,
+ const Mailbox& mailbox) {
+ // Use sync token dependency to ensure that the destroy task does not run
+ // before sync token is released.
+ ScheduleGpuTask(
+ base::BindOnce(
+ &SharedImageInterfaceInProcess::DestroySharedImageOnGpuThread,
+ base::Unretained(this), mailbox),
+ {sync_token});
+}
+
+void SharedImageInterfaceInProcess::DestroySharedImageOnGpuThread(
+ const Mailbox& mailbox) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ if (!MakeContextCurrent())
+ return;
+
+ if (!shared_image_factory_ ||
+ !shared_image_factory_->DestroySharedImage(mailbox)) {
+ // Signal errors by losing the command buffer.
+ command_buffer_helper_->SetError();
+ }
+}
+
+SyncToken SharedImageInterfaceInProcess::GenUnverifiedSyncToken() {
+ base::AutoLock lock(lock_);
+ return MakeSyncToken(next_fence_sync_release_ - 1);
+}
+
+SyncToken SharedImageInterfaceInProcess::GenVerifiedSyncToken() {
+ base::AutoLock lock(lock_);
+ SyncToken sync_token = MakeSyncToken(next_fence_sync_release_ - 1);
+ sync_token.SetVerifyFlush();
+ return sync_token;
+}
+
+void SharedImageInterfaceInProcess::Flush() {
+ // No need to flush in this implementation.
+}
+
+scoped_refptr<gfx::NativePixmap> SharedImageInterfaceInProcess::GetNativePixmap(
+ const gpu::Mailbox& mailbox) {
+ DCHECK(shared_image_manager_->is_thread_safe());
+ return shared_image_manager_->GetNativePixmap(mailbox);
+}
+
+void SharedImageInterfaceInProcess::WrapTaskWithGpuUrl(base::OnceClosure task) {
+ if (command_buffer_helper_) {
+ command_buffer_helper_->WrapTaskWithGpuCheck(std::move(task));
+ } else {
+ std::move(task).Run();
+ }
+}
+
+void SharedImageInterfaceInProcess::ScheduleGpuTask(
+ base::OnceClosure task,
+ std::vector<SyncToken> sync_token_fences) {
+ base::OnceClosure gpu_task =
+ base::BindOnce(&SharedImageInterfaceInProcess::WrapTaskWithGpuUrl,
+ base::Unretained(this), std::move(task));
+
+ task_sequence_->ScheduleTask(std::move(gpu_task),
+ std::move(sync_token_fences));
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/shared_image_interface_in_process.h b/chromium/gpu/ipc/shared_image_interface_in_process.h
new file mode 100644
index 00000000000..60b1a3dc318
--- /dev/null
+++ b/chromium/gpu/ipc/shared_image_interface_in_process.h
@@ -0,0 +1,223 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_SHARED_IMAGE_INTERFACE_IN_PROCESS_H_
+#define GPU_IPC_SHARED_IMAGE_INTERFACE_IN_PROCESS_H_
+
+#include "build/build_config.h"
+#include "gpu/command_buffer/client/shared_image_interface.h"
+#include "gpu/command_buffer/common/command_buffer_id.h"
+#include "gpu/ipc/in_process_command_buffer.h"
+
+namespace gpu {
+class CommandBufferTaskExecutor;
+class ImageFactory;
+class MailboxManager;
+class MemoryTracker;
+class SyncPointClientState;
+struct SyncToken;
+class SharedContextState;
+class SharedImageFactory;
+class SharedImageManager;
+class SingleTaskSequence;
+
+// This is an implementation of the SharedImageInterface to be used on viz
+// compositor thread. This class also implements the corresponding parts
+// happening on gpu thread.
+// TODO(weiliangc): Currently this is implemented as backed by
+// InProcessCommandBuffer. Add constructor for using with SkiaRenderer.
+class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
+ : public SharedImageInterface {
+ public:
+ using CommandBufferHelper =
+ InProcessCommandBuffer::SharedImageInterfaceHelper;
+ SharedImageInterfaceInProcess(
+ CommandBufferTaskExecutor* task_executor,
+ SingleTaskSequence* task_sequence,
+ CommandBufferId command_buffer_id,
+ MailboxManager* mailbox_manager,
+ ImageFactory* image_factory,
+ MemoryTracker* memory_tracker,
+ std::unique_ptr<CommandBufferHelper> command_buffer_helper);
+ ~SharedImageInterfaceInProcess() override;
+
+ // The |SharedImageInterface| keeps ownership of the image until
+ // |DestroySharedImage| is called or the interface itself is destroyed (e.g.
+ // the GPU channel is lost).
+ Mailbox CreateSharedImage(
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle) override;
+
+ // Same behavior as the above, except that this version takes |pixel_data|
+ // which is used to populate the SharedImage. |pixel_data| should have the
+ // same format which would be passed to glTexImage2D to populate a similarly
+ // specified texture.
+ Mailbox CreateSharedImage(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+
+ // |usage| is a combination of |SharedImageUsage| bits that describes which
+ // API(s) the image will be used with. Format and size are derived from the
+ // GpuMemoryBuffer. |gpu_memory_buffer_manager| is the manager that created
+ // |gpu_memory_buffer|. If the |gpu_memory_buffer| was created on the client
+ // side (for NATIVE_PIXMAP or ANDROID_HARDWARE_BUFFER types only), without a
+ // GpuMemoryBufferManager, |gpu_memory_buffer_manager| can be nullptr.
+ // If valid, |color_space| will be applied to the shared
+ // image (possibly overwriting the one set on the GpuMemoryBuffer).
+ // The |SharedImageInterface| keeps ownership of the image until
+ // |DestroySharedImage| is called or the interface itself is destroyed (e.g.
+ // the GPU channel is lost).
+ Mailbox CreateSharedImage(gfx::GpuMemoryBuffer* gpu_memory_buffer,
+ GpuMemoryBufferManager* gpu_memory_buffer_manager,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+
+ // Updates a shared image after its GpuMemoryBuffer (if any) was modified on
+ // the CPU or through external devices, after |sync_token| has been released.
+ void UpdateSharedImage(const SyncToken& sync_token,
+ const Mailbox& mailbox) override;
+
+ // Updates a shared image after its GpuMemoryBuffer (if any) was modified on
+ // the CPU or through external devices, after |sync_token| has been released.
+ // If |acquire_fence| is not null, the fence is inserted in the GPU command
+ // stream and a server side wait is issued before any GPU command referring
+ // to this shared imaged is executed on the GPU.
+ void UpdateSharedImage(const SyncToken& sync_token,
+ std::unique_ptr<gfx::GpuFence> acquire_fence,
+ const Mailbox& mailbox) override;
+
+ // Destroys the shared image, unregistering its mailbox, after |sync_token|
+ // has been released. After this call, the mailbox can't be used to reference
+ // the image any more, however if the image was imported into other APIs,
+ // those may keep a reference to the underlying data.
+ void DestroySharedImage(const SyncToken& sync_token,
+ const Mailbox& mailbox) override;
+
+ // Creates a swap chain. Not reached in this implementation.
+ SwapChainMailboxes CreateSwapChain(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+
+ // Swaps front and back buffer of a swap chain. Not reached in this
+ // implementation.
+ void PresentSwapChain(const SyncToken& sync_token,
+ const Mailbox& mailbox) override;
+
+#if defined(OS_FUCHSIA)
+ // Registers a sysmem buffer collection. Not reached in this implementation.
+ void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
+ zx::channel token) override;
+
+ // Not reached in this implementation.
+ void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id) override;
+#endif // defined(OS_FUCHSIA)
+
+ // Generates an unverified SyncToken that is released after all previous
+ // commands on this interface have executed on the service side.
+ SyncToken GenUnverifiedSyncToken() override;
+
+ // Generates a verified SyncToken that is released after all previous
+ // commands on this interface have executed on the service side.
+ SyncToken GenVerifiedSyncToken() override;
+
+ // Flush the SharedImageInterface, issuing any deferred IPCs.
+ void Flush() override;
+
+ scoped_refptr<gfx::NativePixmap> GetNativePixmap(
+ const gpu::Mailbox& mailbox) override;
+
+ private:
+ struct SharedImageFactoryInput;
+
+ void SetUpOnGpu(CommandBufferTaskExecutor* task_executor,
+ ImageFactory* image_factory,
+ MemoryTracker* memory_tracker);
+ void DestroyOnGpu(base::WaitableEvent* completion);
+
+ SyncToken MakeSyncToken(uint64_t release_id) {
+ return SyncToken(CommandBufferNamespace::IN_PROCESS, command_buffer_id_,
+ release_id);
+ }
+
+ void ScheduleGpuTask(base::OnceClosure task,
+ std::vector<SyncToken> sync_token_fences);
+
+ // Only called on the gpu thread.
+ bool MakeContextCurrent();
+ void LazyCreateSharedImageFactory();
+ void CreateSharedImageOnGpuThread(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ gpu::SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SyncToken& sync_token);
+ void CreateSharedImageWithDataOnGpuThread(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SyncToken& sync_token,
+ std::vector<uint8_t> pixel_data);
+
+ void CreateGMBSharedImageOnGpuThread(const Mailbox& mailbox,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SyncToken& sync_token);
+ void UpdateSharedImageOnGpuThread(const Mailbox& mailbox,
+ const SyncToken& sync_token);
+ void DestroySharedImageOnGpuThread(const Mailbox& mailbox);
+ void WrapTaskWithGpuUrl(base::OnceClosure task);
+
+ // Used to schedule work on the gpu thread. This is a raw pointer for now
+ // since the ownership of SingleTaskSequence would be the same as the
+ // SharedImageInterfaceInProcess.
+ SingleTaskSequence* task_sequence_;
+ const CommandBufferId command_buffer_id_;
+ std::unique_ptr<CommandBufferHelper> command_buffer_helper_;
+
+ base::OnceCallback<std::unique_ptr<SharedImageFactory>(
+ bool enable_wrapped_sk_image)>
+ create_factory_;
+
+ // Sequence checker for tasks that run on the gpu "thread".
+ SEQUENCE_CHECKER(gpu_sequence_checker_);
+
+ // Accessed on any thread. release_id_lock_ protects access to
+ // next_fence_sync_release_.
+ base::Lock lock_;
+ uint64_t next_fence_sync_release_ = 1;
+
+ // Accessed on compositor thread.
+ // This is used to get NativePixmap, and is only used when SharedImageManager
+ // is thread safe.
+ SharedImageManager* shared_image_manager_;
+
+ // Accessed on GPU thread.
+ // TODO(weiliangc): Check whether can be removed when !UsesSync().
+ MailboxManager* mailbox_manager_;
+ // Used to check if context is lost at destruction time.
+ // TODO(weiliangc): SharedImageInterface should become active observer of
+ // whether context is lost.
+ SharedContextState* context_state_;
+ // Created and only used by this SharedImageInterface.
+ SyncPointManager* sync_point_manager_;
+ scoped_refptr<SyncPointClientState> sync_point_client_state_;
+ std::unique_ptr<SharedImageFactory> shared_image_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageInterfaceInProcess);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_SHARED_IMAGE_INTERFACE_IN_PROCESS_H_
diff --git a/chromium/gpu/ipc/webgpu_in_process_context.cc b/chromium/gpu/ipc/webgpu_in_process_context.cc
index eb52db7a902..1ee997439c1 100644
--- a/chromium/gpu/ipc/webgpu_in_process_context.cc
+++ b/chromium/gpu/ipc/webgpu_in_process_context.cc
@@ -61,7 +61,7 @@ ContextResult WebGPUInProcessContext::Initialize(
auto result = command_buffer_->Initialize(
surface, is_offscreen, kNullSurfaceHandle, attribs,
gpu_memory_buffer_manager, image_factory, gpu_channel_manager_delegate,
- client_task_runner_, nullptr, nullptr);
+ client_task_runner_, nullptr /* task_sequence */, nullptr, nullptr);
if (result != ContextResult::kSuccess) {
DLOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
return result;
@@ -95,7 +95,7 @@ const GpuFeatureInfo& WebGPUInProcessContext::GetGpuFeatureInfo() const {
return command_buffer_->GetGpuFeatureInfo();
}
-webgpu::WebGPUInterface* WebGPUInProcessContext::GetImplementation() {
+webgpu::WebGPUImplementation* WebGPUInProcessContext::GetImplementation() {
return webgpu_implementation_.get();
}
diff --git a/chromium/gpu/ipc/webgpu_in_process_context.h b/chromium/gpu/ipc/webgpu_in_process_context.h
index da208b87bc7..b20972003ac 100644
--- a/chromium/gpu/ipc/webgpu_in_process_context.h
+++ b/chromium/gpu/ipc/webgpu_in_process_context.h
@@ -25,7 +25,6 @@ struct GpuFeatureInfo;
struct SharedMemoryLimits;
namespace webgpu {
-class WebGPUInterface;
class WebGPUImplementation;
} // namespace webgpu
@@ -52,7 +51,7 @@ class WebGPUInProcessContext {
// Allows direct access to the WebGPUImplementation so a
// WebGPUInProcessContext can be used without making it current.
- gpu::webgpu::WebGPUInterface* GetImplementation();
+ gpu::webgpu::WebGPUImplementation* GetImplementation();
base::TestSimpleTaskRunner* GetTaskRunner();
// Test only functions.
diff --git a/chromium/gpu/khronos_glcts_support/BUILD.gn b/chromium/gpu/khronos_glcts_support/BUILD.gn
index ace65c5fe95..067641d0eeb 100644
--- a/chromium/gpu/khronos_glcts_support/BUILD.gn
+++ b/chromium/gpu/khronos_glcts_support/BUILD.gn
@@ -13,12 +13,8 @@ if (internal_khronos_glcts_tests) {
[ "//third_party/khronos_glcts/GTF_ES/glsl/GTF/mustpass_es20.run" ]
copy("glcts_resources") {
- sources = [
- "//third_party/khronos_glcts/cts/data",
- ]
- outputs = [
- "$root_out_dir/khronos_glcts_data/gl_cts/{{source_file_part}}",
- ]
+ sources = [ "//third_party/khronos_glcts/cts/data" ]
+ outputs = [ "$root_out_dir/khronos_glcts_data/gl_cts/{{source_file_part}}" ]
}
copy("glcts_gtf_resources") {
@@ -29,17 +25,14 @@ if (internal_khronos_glcts_tests) {
"//third_party/khronos_glcts/GTF_ES/glsl/GTF/GL2Tests",
"//third_party/khronos_glcts/GTF_ES/glsl/GTF/GLCoverage",
] + glcts_gtf_runfiles
- outputs = [
- "$root_out_dir/khronos_glcts_data/gl_cts/GTF/{{source_file_part}}",
- ]
+ outputs =
+ [ "$root_out_dir/khronos_glcts_data/gl_cts/GTF/{{source_file_part}}" ]
}
action("generate_khronos_glcts_tests") {
script = "generate_khronos_glcts_tests.py"
sources = [ "khronos_glcts_test.h" ] + glcts_gtf_runfiles
- outputs = [
- "$target_gen_dir/khronos_glcts_test_autogen.cc",
- ]
+ outputs = [ "$target_gen_dir/khronos_glcts_test_autogen.cc" ]
args = [ "--outdir=" + rebase_path("$target_gen_dir") ] + glcts_gtf_runfiles
}
@@ -118,9 +111,7 @@ if (internal_khronos_glcts_tests) {
"//third_party/khronos_glcts/framework/delibs/depool/dePoolTest.h",
]
- deps = [
- ":debase",
- ]
+ deps = [ ":debase" ]
configs += [ ":defaults_config" ]
configs -= [ "//build/config/compiler:chromium_code" ]
@@ -146,9 +137,7 @@ if (internal_khronos_glcts_tests) {
"//third_party/khronos_glcts/framework/delibs/dethread/deThreadTest.h",
]
- deps = [
- ":debase",
- ]
+ deps = [ ":debase" ]
configs += [ ":defaults_config" ]
configs -= [ "//build/config/compiler:chromium_code" ]
@@ -390,11 +379,10 @@ if (internal_khronos_glcts_tests) {
"//third_party/libpng:libpng",
]
- public_deps = [
- ":qphelper",
- ]
+ public_deps = [ ":qphelper" ]
- include_dirs = [ "//third_party/khronos_glcts/framework/delibs/libpng" ] #png.hpp
+ include_dirs =
+ [ "//third_party/khronos_glcts/framework/delibs/libpng" ] # png.hpp
configs += [ ":defaults_config" ]
configs -= [ "//build/config/compiler:chromium_code" ]
@@ -485,9 +473,7 @@ if (internal_khronos_glcts_tests) {
"//third_party/khronos_glcts/framework/opengl/gluVarTypeUtil.hpp",
]
- public_deps = [
- ":glwrapper",
- ]
+ public_deps = [ ":glwrapper" ]
deps = [
":delibs",
@@ -884,9 +870,7 @@ if (internal_khronos_glcts_tests) {
"//third_party/khronos_glcts/cts/gles2/es2cTestPackage.hpp",
]
- public_deps = [
- ":glcts_common",
- ]
+ public_deps = [ ":glcts_common" ]
deps = [
":delibs",
@@ -907,13 +891,9 @@ if (internal_khronos_glcts_tests) {
}
source_set("tcutil_platform_windowless") {
- sources = [
- "native/egl_native_windowless.cc",
- ]
+ sources = [ "native/egl_native_windowless.cc" ]
- deps = [
- ":khronos_glcts_framework",
- ]
+ deps = [ ":khronos_glcts_framework" ]
configs -= [ "//build/config/compiler:no_rtti" ]
configs += [ "//build/config/compiler:rtti" ]
@@ -956,9 +936,7 @@ if (!is_android) {
"//testing/gtest",
]
- data = [
- "khronos_glcts_test_expectations.txt",
- ]
+ data = [ "khronos_glcts_test_expectations.txt" ]
if (internal_khronos_glcts_tests) {
sources += [ "$target_gen_dir/khronos_glcts_test_autogen.cc" ]
diff --git a/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc b/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
index a6d8044934d..12bef6d06e4 100644
--- a/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
+++ b/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
@@ -376,58 +376,6 @@ sk_sp<GrGLInterface> CreateGLES2InterfaceBindings(
&GLES2Interface::BlitFramebufferCHROMIUM, impl, context_support);
functions->fGenerateMipmap =
gles_bind(&GLES2Interface::GenerateMipmap, impl, context_support);
- functions->fMatrixLoadf =
- gles_bind(&GLES2Interface::MatrixLoadfCHROMIUM, impl, context_support);
- functions->fMatrixLoadIdentity = gles_bind(
- &GLES2Interface::MatrixLoadIdentityCHROMIUM, impl, context_support);
- functions->fPathCommands =
- gles_bind(&GLES2Interface::PathCommandsCHROMIUM, impl, context_support);
- functions->fPathParameteri =
- gles_bind(&GLES2Interface::PathParameteriCHROMIUM, impl, context_support);
- functions->fPathParameterf =
- gles_bind(&GLES2Interface::PathParameterfCHROMIUM, impl, context_support);
- functions->fGenPaths =
- gles_bind(&GLES2Interface::GenPathsCHROMIUM, impl, context_support);
- functions->fIsPath =
- gles_bind(&GLES2Interface::IsPathCHROMIUM, impl, context_support);
- functions->fDeletePaths =
- gles_bind(&GLES2Interface::DeletePathsCHROMIUM, impl, context_support);
- functions->fPathStencilFunc = gles_bind(
- &GLES2Interface::PathStencilFuncCHROMIUM, impl, context_support);
- functions->fStencilFillPath = gles_bind(
- &GLES2Interface::StencilFillPathCHROMIUM, impl, context_support);
- functions->fStencilStrokePath = gles_bind(
- &GLES2Interface::StencilStrokePathCHROMIUM, impl, context_support);
- functions->fCoverFillPath =
- gles_bind(&GLES2Interface::CoverFillPathCHROMIUM, impl, context_support);
- functions->fCoverStrokePath = gles_bind(
- &GLES2Interface::CoverStrokePathCHROMIUM, impl, context_support);
- functions->fStencilThenCoverFillPath = gles_bind(
- &GLES2Interface::StencilThenCoverFillPathCHROMIUM, impl, context_support);
- functions->fStencilThenCoverStrokePath =
- gles_bind(&GLES2Interface::StencilThenCoverStrokePathCHROMIUM, impl,
- context_support);
- functions->fStencilFillPathInstanced = gles_bind(
- &GLES2Interface::StencilFillPathInstancedCHROMIUM, impl, context_support);
- functions->fStencilStrokePathInstanced =
- gles_bind(&GLES2Interface::StencilStrokePathInstancedCHROMIUM, impl,
- context_support);
- functions->fCoverFillPathInstanced = gles_bind(
- &GLES2Interface::CoverFillPathInstancedCHROMIUM, impl, context_support);
- functions->fCoverStrokePathInstanced = gles_bind(
- &GLES2Interface::CoverStrokePathInstancedCHROMIUM, impl, context_support);
- functions->fStencilThenCoverFillPathInstanced =
- gles_bind(&GLES2Interface::StencilThenCoverFillPathInstancedCHROMIUM,
- impl, context_support);
- functions->fStencilThenCoverStrokePathInstanced =
- gles_bind(&GLES2Interface::StencilThenCoverStrokePathInstancedCHROMIUM,
- impl, context_support);
- functions->fProgramPathFragmentInputGen =
- gles_bind(&GLES2Interface::ProgramPathFragmentInputGenCHROMIUM, impl,
- context_support);
- functions->fBindFragmentInputLocation =
- gles_bind(&GLES2Interface::BindFragmentInputLocationCHROMIUM, impl,
- context_support);
functions->fCoverageModulation = gles_bind(
&GLES2Interface::CoverageModulationCHROMIUM, impl, context_support);
functions->fWindowRectangles =
diff --git a/chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.cc b/chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.cc
index 1d1e0c62aaa..1954b772a87 100644
--- a/chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.cc
+++ b/chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.cc
@@ -394,21 +394,6 @@ void GLES2ImplementationWithGrContextSupport::ColorMask(GLboolean red,
ResetGrContextIfNeeded(kMisc_GrGLBackendState);
}
-// Calls that invalidate kPathRendering_GrGLBackendState
-void GLES2ImplementationWithGrContextSupport::PathStencilFuncCHROMIUM(
- GLenum func,
- GLint ref,
- GLuint mask) {
- BaseClass::PathStencilFuncCHROMIUM(func, ref, mask);
- ResetGrContextIfNeeded(kPathRendering_GrGLBackendState);
-}
-void GLES2ImplementationWithGrContextSupport::MatrixLoadfCHROMIUM(
- GLenum matrixMode,
- const GLfloat* m) {
- BaseClass::MatrixLoadfCHROMIUM(matrixMode, m);
- ResetGrContextIfNeeded(kPathRendering_GrGLBackendState);
-}
-
// Calls that invalidate many flags
void GLES2ImplementationWithGrContextSupport::BindBuffer(GLenum target,
GLuint buffer) {
diff --git a/chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.h b/chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.h
index 01caceaea44..2538ac39af5 100644
--- a/chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.h
+++ b/chromium/gpu/skia_bindings/gles2_implementation_with_grcontext_support.h
@@ -162,11 +162,6 @@ class GLES2ImplementationWithGrContextSupport
GLboolean blue,
GLboolean alpha) override;
- // Calls that invalidate kPathRendering_GrGLBackendState
- void PathStencilFuncCHROMIUM(GLenum func, GLint ref, GLuint mask) override;
- void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) override;
- // Note: MatrixLoadIdentity omitted on purpose
-
// Calls that invalidate different bits, depending on args
void BindBuffer(GLenum target, GLuint buffer) override;
void BindBufferBase(GLenum target, GLuint index, GLuint buffer) override;
diff --git a/chromium/gpu/vulkan/BUILD.gn b/chromium/gpu/vulkan/BUILD.gn
index 64500bfc7fc..990a586ace8 100644
--- a/chromium/gpu/vulkan/BUILD.gn
+++ b/chromium/gpu/vulkan/BUILD.gn
@@ -22,6 +22,12 @@ if (enable_vulkan) {
if (use_vulkan_xlib) {
defines = [ "USE_VULKAN_XLIB" ]
}
+ if (is_android) {
+ defines = [ "VK_USE_PLATFORM_ANDROID_KHR" ]
+ }
+ if (is_fuchsia) {
+ defines = [ "VK_USE_PLATFORM_FUCHSIA" ]
+ }
}
jumbo_component("vulkan") {
@@ -41,6 +47,8 @@ if (enable_vulkan) {
"vulkan_fence_helper.h",
"vulkan_function_pointers.cc",
"vulkan_function_pointers.h",
+ "vulkan_image.cc",
+ "vulkan_image.h",
"vulkan_implementation.cc",
"vulkan_implementation.h",
"vulkan_instance.cc",
@@ -57,32 +65,41 @@ if (enable_vulkan) {
public_configs = [ ":vulkan_config" ]
defines = [ "VULKAN_IMPLEMENTATION" ]
- if (is_android) {
- defines += [ "VK_USE_PLATFORM_ANDROID_KHR" ]
- }
all_dependent_configs = [ "//third_party/vulkan:vulkan_config" ]
deps = [
"//base",
+ "//base/util/type_safety",
+ "//gpu/ipc/common:vulkan_ycbcr_info",
"//ui/gfx",
]
- public_deps = [
- "//gpu/config:vulkan_info",
- ]
+ public_deps = [ "//gpu/config:vulkan_info" ]
data_deps = []
if (is_posix) {
sources += [
+ "vulkan_image_linux.cc",
"vulkan_posix_util.cc",
"vulkan_posix_util.h",
]
}
+ if (is_android) {
+ sources += [ "vulkan_image_android.cc" ]
+ }
+
+ if (is_win) {
+ sources += [ "vulkan_image_win.cc" ]
+ }
+
if (is_fuchsia) {
- sources += [ "fuchsia/vulkan_fuchsia_ext.h" ]
+ sources += [
+ "fuchsia/vulkan_fuchsia_ext.h",
+ "vulkan_image_fuchsia.cc",
+ ]
- public_deps += [ "//third_party/fuchsia-sdk/sdk:zx" ]
+ public_deps += [ "//third_party/fuchsia-sdk/sdk/pkg/zx" ]
data_deps += [ "//third_party/fuchsia-sdk:vulkan_base" ]
@@ -98,23 +115,45 @@ if (enable_vulkan) {
jumbo_static_library("test_support") {
testonly = true
- sources = [
- "tests/native_window.h",
- ]
+ sources = [ "tests/native_window.h" ]
deps = [
"//ui/gfx",
"//ui/gfx:native_widget_types",
]
- if (use_x11) {
- sources += [ "tests/native_window_x11.cc" ]
- deps += [ "//ui/gfx/x" ]
- configs += [ "//build/config/linux:x11" ]
+
+ if (use_x11 && !use_ozone) {
+ sources += [ "tests/native_window.cc" ]
+ deps += [
+ "//ui/platform_window",
+ "//ui/platform_window/x11",
+ ]
+ }
+
+ if (is_win) {
+ sources += [ "tests/native_window_win.cc" ]
+ }
+
+ if (use_ozone) {
+ sources += [ "tests/native_window.cc" ]
+ deps += [
+ "//ui/ozone",
+ "//ui/platform_window",
+ ]
+ }
+
+ if (is_android) {
+ sources += [ "tests/native_window_android.cc" ]
+ deps += [
+ "//ui/android:ui_java",
+ "//ui/gl",
+ ]
}
}
- # TODO(cblume): These tests should run on each platform -- crbug.com/858614
- if (use_x11) {
+ # TODO(penghuang): support more platforms
+ # https://crbug.com/1065499
+ if (is_android || use_x11 || is_win || use_ozone) {
test("vulkan_tests") {
sources = [
"tests/basic_vulkan_test.cc",
@@ -132,10 +171,19 @@ if (enable_vulkan) {
"//gpu/vulkan/init",
"//testing/gmock",
"//testing/gtest",
+ "//ui/events/platform",
"//ui/gfx",
"//ui/gfx:native_widget_types",
"//ui/gfx/geometry",
]
+
+ if (use_x11) {
+ deps += [ "//ui/events/platform/x11" ]
+ }
+
+ if (use_ozone) {
+ deps += [ "//ui/ozone" ]
+ }
}
}
}
diff --git a/chromium/gpu/vulkan/android/BUILD.gn b/chromium/gpu/vulkan/android/BUILD.gn
index b56b34b1327..1c02fa8f857 100644
--- a/chromium/gpu/vulkan/android/BUILD.gn
+++ b/chromium/gpu/vulkan/android/BUILD.gn
@@ -44,6 +44,7 @@ test("vk_tests") {
"//base/test:test_support",
"//components/viz/common:vulkan_context_provider",
"//testing/gtest",
+ "//ui/gfx",
]
sources = [
"run_all_unittests.cc",
diff --git a/chromium/gpu/vulkan/android/vulkan_android_unittests.cc b/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
index e9bfdbd3857..9bd5983d050 100644
--- a/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
+++ b/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
@@ -9,6 +9,7 @@
#include "components/viz/common/gpu/vulkan_in_process_context_provider.h"
#include "gpu/vulkan/android/vulkan_implementation_android.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_util.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -127,18 +128,15 @@ TEST_F(VulkanImplementationAndroidTest, CreateVkImageFromAHB) {
// Create a vkimage and import the AHB into it.
const gfx::Size size(hwb_desc.width, hwb_desc.height);
- VkImage vk_image;
- VkImageCreateInfo vk_image_info;
- VkDeviceMemory vk_device_memory;
- VkDeviceSize mem_allocation_size;
- EXPECT_TRUE(vk_implementation_->CreateVkImageAndImportAHB(
- vk_device_, vk_phy_device_, size,
- base::android::ScopedHardwareBufferHandle::Adopt(buffer), &vk_image,
- &vk_image_info, &vk_device_memory, &mem_allocation_size));
-
- // Free up resources.
- vkDestroyImage(vk_device_, vk_image, nullptr);
- vkFreeMemory(vk_device_, vk_device_memory, nullptr);
+ auto* device_queue = vk_context_provider_->GetDeviceQueue();
+ auto handle = base::android::ScopedHardwareBufferHandle::Adopt(buffer);
+ gfx::GpuMemoryBufferHandle gmb_handle(std::move(handle));
+ auto vulkan_image = VulkanImage::CreateFromGpuMemoryBufferHandle(
+ device_queue, std::move(gmb_handle), size, VK_FORMAT_R8G8B8A8_UNORM,
+ 0 /* usage */);
+
+ EXPECT_TRUE(vulkan_image);
+ vulkan_image->Destroy();
}
} // namespace gpu
diff --git a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
index ccff9c94128..edbb2a3bc62 100644
--- a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
+++ b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
@@ -11,6 +11,7 @@
#include "gpu/ipc/common/vulkan_ycbcr_info.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_instance.h"
#include "gpu/vulkan/vulkan_posix_util.h"
#include "gpu/vulkan/vulkan_surface.h"
@@ -20,50 +21,6 @@
namespace gpu {
-namespace {
-bool GetAhbProps(
- const VkDevice& vk_device,
- AHardwareBuffer* hardware_buffer,
- VkAndroidHardwareBufferFormatPropertiesANDROID* ahb_format_props,
- VkAndroidHardwareBufferPropertiesANDROID* ahb_props) {
- DCHECK(ahb_format_props);
- DCHECK(ahb_props);
-
- // To obtain format properties of an Android hardware buffer, include an
- // instance of VkAndroidHardwareBufferFormatPropertiesANDROID in the pNext
- // chain of the VkAndroidHardwareBufferPropertiesANDROID instance passed to
- // vkGetAndroidHardwareBufferPropertiesANDROID.
- ahb_format_props->sType =
- VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
- ahb_format_props->pNext = nullptr;
-
- ahb_props->sType =
- VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
- ahb_props->pNext = ahb_format_props;
-
- bool result = vkGetAndroidHardwareBufferPropertiesANDROID(
- vk_device, hardware_buffer, ahb_props);
- if (result != VK_SUCCESS) {
- LOG(ERROR)
- << "GetAhbProps: vkGetAndroidHardwareBufferPropertiesANDROID failed : "
- << result;
- return false;
- }
- return true;
-}
-
-VulkanYCbCrInfo GetYcbcrInfoFromBufferProps(
- const VkAndroidHardwareBufferFormatPropertiesANDROID& ahb_format_props) {
- return VulkanYCbCrInfo(VK_FORMAT_UNDEFINED, ahb_format_props.externalFormat,
- ahb_format_props.suggestedYcbcrModel,
- ahb_format_props.suggestedYcbcrRange,
- ahb_format_props.suggestedXChromaOffset,
- ahb_format_props.suggestedYChromaOffset,
- ahb_format_props.formatFeatures);
-}
-
-} // namespace
-
VulkanImplementationAndroid::VulkanImplementationAndroid() = default;
VulkanImplementationAndroid::~VulkanImplementationAndroid() = default;
@@ -79,9 +36,9 @@ bool VulkanImplementationAndroid::InitializeVulkanInstance(bool using_surface) {
gpu::GetVulkanFunctionPointers();
base::NativeLibraryLoadError native_library_load_error;
- vulkan_function_pointers->vulkan_loader_library_ = base::LoadNativeLibrary(
+ vulkan_function_pointers->vulkan_loader_library = base::LoadNativeLibrary(
base::FilePath("libvulkan.so"), &native_library_load_error);
- if (!vulkan_function_pointers->vulkan_loader_library_)
+ if (!vulkan_function_pointers->vulkan_loader_library)
return false;
return vulkan_instance_.Initialize(required_extensions, {});
@@ -121,16 +78,25 @@ bool VulkanImplementationAndroid::GetPhysicalDevicePresentationSupport(
std::vector<const char*>
VulkanImplementationAndroid::GetRequiredDeviceExtensions() {
+ return {};
+}
+
+std::vector<const char*>
+VulkanImplementationAndroid::GetOptionalDeviceExtensions() {
// VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME also requires
// VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME as per spec.
- return {VK_KHR_SWAPCHAIN_EXTENSION_NAME,
- VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
- VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
- VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
- VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
- VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
- VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
- VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME};
+ return {
+ VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
+ VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
+ VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME,
+ VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+ VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
+ };
}
VkFence VulkanImplementationAndroid::CreateVkFenceForGpuFence(
@@ -177,198 +143,16 @@ bool VulkanImplementationAndroid::CanImportGpuMemoryBuffer(
return false;
}
-bool VulkanImplementationAndroid::CreateImageFromGpuMemoryHandle(
- VkDevice vk_device,
+std::unique_ptr<VulkanImage>
+VulkanImplementationAndroid::CreateImageFromGpuMemoryHandle(
+ VulkanDeviceQueue* device_queue,
gfx::GpuMemoryBufferHandle gmb_handle,
gfx::Size size,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- base::Optional<VulkanYCbCrInfo>* ycbcr_info) {
+ VkFormat vk_formae) {
// TODO(sergeyu): Move code from CreateVkImageAndImportAHB() here and remove
// CreateVkImageAndImportAHB().
NOTIMPLEMENTED();
- return false;
-}
-
-bool VulkanImplementationAndroid::CreateVkImageAndImportAHB(
- const VkDevice& vk_device,
- const VkPhysicalDevice& vk_physical_device,
- const gfx::Size& size,
- base::android::ScopedHardwareBufferHandle ahb_handle,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- VulkanYCbCrInfo* ycbcr_info) {
- DCHECK(ahb_handle.is_valid());
- DCHECK(vk_image);
- DCHECK(vk_image_info);
- DCHECK(vk_device_memory);
- DCHECK(mem_allocation_size);
-
- // Get the image format properties of an Android hardware buffer.
- VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {};
- VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
- if (!GetAhbProps(vk_device, ahb_handle.get(), &ahb_format_props, &ahb_props))
- return false;
-
- // To create an image with an external format, include an instance of
- // VkExternalFormatANDROID in the pNext chain of VkImageCreateInfo.
- VkExternalFormatANDROID external_format;
- external_format.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
- external_format.pNext = nullptr;
-
- // If externalFormat is zero, the effect is as if the VkExternalFormatANDROID
- // structure was not present. Otherwise, the image will have the specified
- // external format.
- external_format.externalFormat = 0;
-
- // If image has an external format, format must be VK_FORMAT_UNDEFINED.
- if (ahb_format_props.format == VK_FORMAT_UNDEFINED) {
- // externalFormat must be 0 or a value returned in the externalFormat member
- // of VkAndroidHardwareBufferFormatPropertiesANDROID by an earlier call to
- // vkGetAndroidHardwareBufferPropertiesANDROID.
- external_format.externalFormat = ahb_format_props.externalFormat;
- }
-
- // To define a set of external memory handle types that may be used as backing
- // store for an image, add a VkExternalMemoryImageCreateInfo structure to the
- // pNext chain of the VkImageCreateInfo structure.
- VkExternalMemoryImageCreateInfo external_memory_image_info;
- external_memory_image_info.sType =
- VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
- external_memory_image_info.pNext = &external_format;
- external_memory_image_info.handleTypes =
- VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
-
- // Intended usage of the image.
- VkImageUsageFlags usage_flags = 0;
-
- // Get the AHB description.
- AHardwareBuffer_Desc ahb_desc = {};
- base::AndroidHardwareBufferCompat::GetInstance().Describe(ahb_handle.get(),
- &ahb_desc);
-
- // Get Vulkan Image usage flag equivalence of AHB usage.
- if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE) {
- usage_flags = usage_flags | VK_IMAGE_USAGE_SAMPLED_BIT |
- VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
- }
- if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT) {
- usage_flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
- }
- if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT) {
- usage_flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
- }
-
- // TODO(vikassoni) : AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP is supported from API
- // level 28 which is not part of current android_ndk version in chromium. Add
- // equvalent VK usage later.
-
- if (!usage_flags) {
- LOG(ERROR) << "No valid usage flags found";
- return false;
- }
-
- // Find the first set bit to use as memoryTypeIndex.
- uint32_t memory_type_bits = ahb_props.memoryTypeBits;
- int32_t type_index = -1;
- for (uint32_t i = 0; memory_type_bits;
- memory_type_bits = memory_type_bits >> 0x1, ++i) {
- if (memory_type_bits & 0x1) {
- type_index = i;
- break;
- }
- }
- if (type_index == -1) {
- LOG(ERROR) << "No valid memoryTypeIndex found";
- return false;
- }
-
- // Populate VkImageCreateInfo.
- vk_image_info->sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- vk_image_info->pNext = &external_memory_image_info;
- vk_image_info->flags = 0;
- vk_image_info->imageType = VK_IMAGE_TYPE_2D;
- vk_image_info->format = ahb_format_props.format;
- vk_image_info->extent = {static_cast<uint32_t>(size.width()),
- static_cast<uint32_t>(size.height()), 1};
- vk_image_info->mipLevels = 1;
- vk_image_info->arrayLayers = 1;
- vk_image_info->samples = VK_SAMPLE_COUNT_1_BIT;
- vk_image_info->tiling = VK_IMAGE_TILING_OPTIMAL;
- vk_image_info->usage = usage_flags;
- vk_image_info->sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- vk_image_info->queueFamilyIndexCount = 0;
- vk_image_info->pQueueFamilyIndices = 0;
- vk_image_info->initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
- // Create Vk Image.
- bool result = vkCreateImage(vk_device, vk_image_info, nullptr, vk_image);
- if (result != VK_SUCCESS) {
- LOG(ERROR) << "vkCreateImage failed : " << result;
- return false;
- }
-
- // To import memory created outside of the current Vulkan instance from an
- // Android hardware buffer, add a VkImportAndroidHardwareBufferInfoANDROID
- // structure to the pNext chain of the VkMemoryAllocateInfo structure.
- VkImportAndroidHardwareBufferInfoANDROID ahb_import_info;
- ahb_import_info.sType =
- VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
- ahb_import_info.pNext = nullptr;
- ahb_import_info.buffer = ahb_handle.get();
-
- // If the VkMemoryAllocateInfo pNext chain includes a
- // VkMemoryDedicatedAllocateInfo structure, then that structure includes a
- // handle of the sole buffer or image resource that the memory can be bound
- // to.
- VkMemoryDedicatedAllocateInfo dedicated_alloc_info;
- dedicated_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
- dedicated_alloc_info.pNext = &ahb_import_info;
- dedicated_alloc_info.image = *vk_image;
- dedicated_alloc_info.buffer = VK_NULL_HANDLE;
-
- // An instance of the VkMemoryAllocateInfo structure defines a memory import
- // operation.
- VkMemoryAllocateInfo mem_alloc_info;
- mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- mem_alloc_info.pNext = &dedicated_alloc_info;
-
- // If the parameters define an import operation and the external handle type
- // is VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID,
- // allocationSize must be the size returned by
- // vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware
- // buffer.
- mem_alloc_info.allocationSize = ahb_props.allocationSize;
- mem_alloc_info.memoryTypeIndex = type_index;
-
- // A Vulkan device operates on data in device memory via memory objects that
- // are represented in the API by a VkDeviceMemory handle.
- // Allocate memory.
- result =
- vkAllocateMemory(vk_device, &mem_alloc_info, nullptr, vk_device_memory);
- if (result != VK_SUCCESS) {
- LOG(ERROR) << "vkAllocateMemory failed : " << result;
- vkDestroyImage(vk_device, *vk_image, nullptr);
- return false;
- }
-
- // Attach memory to the image object.
- result = vkBindImageMemory(vk_device, *vk_image, *vk_device_memory, 0);
- if (result != VK_SUCCESS) {
- LOG(ERROR) << "vkBindImageMemory failed : " << result;
- vkDestroyImage(vk_device, *vk_image, nullptr);
- vkFreeMemory(vk_device, *vk_device_memory, nullptr);
- return false;
- }
-
- *mem_allocation_size = mem_alloc_info.allocationSize;
- if (ycbcr_info)
- *ycbcr_info = GetYcbcrInfoFromBufferProps(ahb_format_props);
- return true;
+ return nullptr;
}
bool VulkanImplementationAndroid::GetSamplerYcbcrConversionInfo(
@@ -377,13 +161,32 @@ bool VulkanImplementationAndroid::GetSamplerYcbcrConversionInfo(
VulkanYCbCrInfo* ycbcr_info) {
DCHECK(ycbcr_info);
- // Get the image format properties of an Android hardware buffer.
- VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {};
- VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
- if (!GetAhbProps(vk_device, ahb_handle.get(), &ahb_format_props, &ahb_props))
+ // To obtain format properties of an Android hardware buffer, include an
+ // instance of VkAndroidHardwareBufferFormatPropertiesANDROID in the pNext
+ // chain of the VkAndroidHardwareBufferPropertiesANDROID instance passed to
+ // vkGetAndroidHardwareBufferPropertiesANDROID.
+ VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID};
+ VkAndroidHardwareBufferPropertiesANDROID ahb_props = {
+ .sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID,
+ .pNext = &ahb_format_props,
+ };
+
+ VkResult result = vkGetAndroidHardwareBufferPropertiesANDROID(
+ vk_device, ahb_handle.get(), &ahb_props);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR)
+ << "GetAhbProps: vkGetAndroidHardwareBufferPropertiesANDROID failed : "
+ << result;
return false;
+ }
- *ycbcr_info = GetYcbcrInfoFromBufferProps(ahb_format_props);
+ *ycbcr_info = VulkanYCbCrInfo(
+ VK_FORMAT_UNDEFINED, ahb_format_props.externalFormat,
+ ahb_format_props.suggestedYcbcrModel,
+ ahb_format_props.suggestedYcbcrRange,
+ ahb_format_props.suggestedXChromaOffset,
+ ahb_format_props.suggestedYChromaOffset, ahb_format_props.formatFeatures);
return true;
}
diff --git a/chromium/gpu/vulkan/android/vulkan_implementation_android.h b/chromium/gpu/vulkan/android/vulkan_implementation_android.h
index 2f141cf9d28..6c7568b1a64 100644
--- a/chromium/gpu/vulkan/android/vulkan_implementation_android.h
+++ b/chromium/gpu/vulkan/android/vulkan_implementation_android.h
@@ -31,6 +31,7 @@ class COMPONENT_EXPORT(VULKAN_ANDROID) VulkanImplementationAndroid
const std::vector<VkQueueFamilyProperties>& queue_family_properties,
uint32_t queue_family_index) override;
std::vector<const char*> GetRequiredDeviceExtensions() override;
+ std::vector<const char*> GetOptionalDeviceExtensions() override;
VkFence CreateVkFenceForGpuFence(VkDevice vk_device) override;
std::unique_ptr<gfx::GpuFence> ExportVkFenceToGpuFence(
VkDevice vk_device,
@@ -43,25 +44,11 @@ class COMPONENT_EXPORT(VULKAN_ANDROID) VulkanImplementationAndroid
VkExternalMemoryHandleTypeFlagBits GetExternalImageHandleType() override;
bool CanImportGpuMemoryBuffer(
gfx::GpuMemoryBufferType memory_buffer_type) override;
- bool CreateImageFromGpuMemoryHandle(
- VkDevice vk_device,
+ std::unique_ptr<VulkanImage> CreateImageFromGpuMemoryHandle(
+ VulkanDeviceQueue* device_queue,
gfx::GpuMemoryBufferHandle gmb_handle,
gfx::Size size,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- base::Optional<VulkanYCbCrInfo>* ycbcr_info) override;
- bool CreateVkImageAndImportAHB(
- const VkDevice& vk_device,
- const VkPhysicalDevice& vk_physical_device,
- const gfx::Size& size,
- base::android::ScopedHardwareBufferHandle ahb_handle,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- VulkanYCbCrInfo* ycbcr_info) override;
+ VkFormat vk_formae) override;
bool GetSamplerYcbcrConversionInfo(
const VkDevice& vk_device,
base::android::ScopedHardwareBufferHandle ahb_handle,
diff --git a/chromium/gpu/vulkan/demo/BUILD.gn b/chromium/gpu/vulkan/demo/BUILD.gn
index 08d814816fe..5224c13104f 100644
--- a/chromium/gpu/vulkan/demo/BUILD.gn
+++ b/chromium/gpu/vulkan/demo/BUILD.gn
@@ -11,9 +11,7 @@ assert(enable_vulkan)
group("demo") {
if (use_x11) {
- deps = [
- ":vulkan_demo",
- ]
+ deps = [ ":vulkan_demo" ]
}
}
diff --git a/chromium/gpu/vulkan/demo/vulkan_demo.cc b/chromium/gpu/vulkan/demo/vulkan_demo.cc
index cbfe195903e..e46d726fb99 100644
--- a/chromium/gpu/vulkan/demo/vulkan_demo.cc
+++ b/chromium/gpu/vulkan/demo/vulkan_demo.cc
@@ -197,6 +197,7 @@ void VulkanDemo::RenderFrame() {
CreateSkSurface();
Draw(sk_surface_->getCanvas(), 0.7);
GrBackendSemaphore semaphore;
+ semaphore.initVulkan(scoped_write_->GetEndSemaphore());
GrFlushInfo flush_info = {
.fFlags = kNone_GrFlushFlags,
.fNumSemaphores = 1,
@@ -209,7 +210,6 @@ void VulkanDemo::RenderFrame() {
if (!backend.getVkImageInfo(&vk_image_info))
NOTREACHED() << "Failed to get image info";
scoped_write_->set_image_layout(vk_image_info.fImageLayout);
- scoped_write_->SetEndSemaphore(semaphore.vkSemaphore());
scoped_write_.reset();
vulkan_surface_->SwapBuffers();
diff --git a/chromium/gpu/vulkan/features.gni b/chromium/gpu/vulkan/features.gni
index d55eddeb0b3..045a5484f70 100644
--- a/chromium/gpu/vulkan/features.gni
+++ b/chromium/gpu/vulkan/features.gni
@@ -8,5 +8,10 @@ import("//build/config/ui.gni")
# For details see declare_args() in build/config/BUILDCONFIG.gn.
declare_args() {
# Enable experimental vulkan backend.
- enable_vulkan = is_linux || is_android || is_fuchsia
+ enable_vulkan = is_linux || is_android || is_fuchsia || is_win
+
+ # Enable swiftshader vulkan. Disabling it can save build time, however
+ # --use-vulkan=swiftshader and some tests which use swiftshader vulkan will
+ # not work.
+ enable_swiftshader_vulkan = true
}
diff --git a/chromium/gpu/vulkan/generate_bindings.py b/chromium/gpu/vulkan/generate_bindings.py
index c9681c0e6ae..cd10c516a45 100755
--- a/chromium/gpu/vulkan/generate_bindings.py
+++ b/chromium/gpu/vulkan/generate_bindings.py
@@ -30,6 +30,7 @@ VULKAN_INSTANCE_FUNCTIONS = [
'functions': [
'vkCreateDevice',
'vkDestroyInstance',
+ 'vkEnumerateDeviceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumeratePhysicalDevices',
'vkGetDeviceProcAddr',
@@ -41,6 +42,14 @@ VULKAN_INSTANCE_FUNCTIONS = [
]
},
{
+ 'ifdef': 'DCHECK_IS_ON()',
+ 'extension': 'VK_EXT_DEBUG_REPORT_EXTENSION_NAME',
+ 'functions': [
+ 'vkCreateDebugReportCallbackEXT',
+ 'vkDestroyDebugReportCallbackEXT',
+ ]
+ },
+ {
'extension': 'VK_KHR_SURFACE_EXTENSION_NAME',
'functions': [
'vkDestroySurfaceKHR',
@@ -58,6 +67,14 @@ VULKAN_INSTANCE_FUNCTIONS = [
]
},
{
+ 'ifdef': 'defined(OS_WIN)',
+ 'extension': 'VK_KHR_WIN32_SURFACE_EXTENSION_NAME',
+ 'functions': [
+ 'vkCreateWin32SurfaceKHR',
+ 'vkGetPhysicalDeviceWin32PresentationSupportKHR',
+ ]
+ },
+ {
'ifdef': 'defined(OS_ANDROID)',
'extension': 'VK_KHR_ANDROID_SURFACE_EXTENSION_NAME',
'functions': [
@@ -172,7 +189,7 @@ VULKAN_DEVICE_FUNCTIONS = [
]
},
{
- 'ifdef': 'defined(OS_LINUX)',
+ 'ifdef': 'defined(OS_LINUX) || defined(OS_ANDROID)',
'extension': 'VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME',
'functions': [
'vkGetMemoryFdKHR',
@@ -189,6 +206,13 @@ VULKAN_DEVICE_FUNCTIONS = [
},
{
'ifdef': 'defined(OS_FUCHSIA)',
+ 'extension': 'VK_FUCHSIA_EXTERNAL_MEMORY_EXTENSION_NAME',
+ 'functions': [
+ 'vkGetMemoryZirconHandleFUCHSIA',
+ ]
+ },
+ {
+ 'ifdef': 'defined(OS_FUCHSIA)',
'extension': 'VK_FUCHSIA_BUFFER_COLLECTION_EXTENSION_NAME',
'functions': [
'vkCreateBufferCollectionFUCHSIA',
@@ -271,7 +295,7 @@ def WriteFunctions(file, functions, template, check_extension=False):
file.write('\n')
def WriteFunctionDeclarations(file, functions):
- template = Template(' PFN_${name} ${name}Fn = nullptr;\n')
+ template = Template(' VulkanFunction<PFN_${name}> ${name}Fn;\n')
WriteFunctions(file, functions, template)
def WriteMacros(file, functions):
@@ -290,6 +314,7 @@ def GenerateHeaderFile(file):
#include <vulkan/vulkan.h>
+#include "base/compiler_specific.h"
#include "base/native_library.h"
#include "build/build_config.h"
#include "gpu/vulkan/vulkan_export.h"
@@ -312,6 +337,10 @@ def GenerateHeaderFile(file):
#include <vulkan/vulkan_xlib.h>
#endif
+#if defined(OS_WIN)
+#include <vulkan/vulkan_win32.h>
+#endif
+
namespace gpu {
struct VulkanFunctionPointers;
@@ -336,11 +365,38 @@ struct VulkanFunctionPointers {
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions);
- base::NativeLibrary vulkan_loader_library_ = nullptr;
+ base::NativeLibrary vulkan_loader_library = nullptr;
+
+ template<typename T>
+ class VulkanFunction;
+ template <typename R, typename ...Args>
+ class VulkanFunction <R(VKAPI_PTR*)(Args...)> {
+ public:
+ explicit operator bool() {
+ return !!fn_;
+ }
+
+ NO_SANITIZE("cfi-icall")
+ R operator()(Args... args) {
+ return fn_(args...);
+ }
+
+ private:
+ friend VulkanFunctionPointers;
+ using Fn = R(VKAPI_PTR*)(Args...);
+
+ Fn operator=(Fn fn) {
+ fn_ = fn;
+ return fn_;
+ }
+
+ Fn fn_ = nullptr;
+ };
// Unassociated functions
- PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersionFn = nullptr;
- PFN_vkGetInstanceProcAddr vkGetInstanceProcAddrFn = nullptr;
+ VulkanFunction<PFN_vkEnumerateInstanceVersion> vkEnumerateInstanceVersionFn;
+ VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddrFn;
+
""")
WriteFunctionDeclarations(file, VULKAN_UNASSOCIATED_FUNCTIONS)
@@ -367,7 +423,8 @@ struct VulkanFunctionPointers {
// Unassociated functions
""")
- WriteMacros(file, [{'functions': [ 'vkGetInstanceProcAddr' ]}])
+ WriteMacros(file, [{'functions': [ 'vkGetInstanceProcAddr' ,
+ 'vkEnumerateInstanceVersion']}])
WriteMacros(file, VULKAN_UNASSOCIATED_FUNCTIONS)
file.write("""\
@@ -386,8 +443,7 @@ struct VulkanFunctionPointers {
file.write("""\
-#endif // GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_
-""")
+#endif // GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_""")
def WriteFunctionPointerInitialization(file, proc_addr_function, parent,
functions):
@@ -410,15 +466,15 @@ def WriteFunctionPointerInitialization(file, proc_addr_function, parent,
WriteFunctions(file, functions, template, check_extension=True)
def WriteUnassociatedFunctionPointerInitialization(file, functions):
- WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddrFn', 'nullptr',
+ WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddr', 'nullptr',
functions)
def WriteInstanceFunctionPointerInitialization(file, functions):
- WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddrFn',
+ WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddr',
'vk_instance', functions)
def WriteDeviceFunctionPointerInitialization(file, functions):
- WriteFunctionPointerInitialization(file, 'vkGetDeviceProcAddrFn', 'vk_device',
+ WriteFunctionPointerInitialization(file, 'vkGetDeviceProcAddr', 'vk_device',
functions)
def GenerateSourceFile(file):
@@ -441,19 +497,20 @@ VulkanFunctionPointers* GetVulkanFunctionPointers() {
VulkanFunctionPointers::VulkanFunctionPointers() = default;
VulkanFunctionPointers::~VulkanFunctionPointers() = default;
+NO_SANITIZE("cfi-icall")
bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
// vkGetInstanceProcAddr must be handled specially since it gets its function
// pointer through base::GetFunctionPOinterFromNativeLibrary(). Other Vulkan
// functions don't do this.
vkGetInstanceProcAddrFn = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
- base::GetFunctionPointerFromNativeLibrary(vulkan_loader_library_,
+ base::GetFunctionPointerFromNativeLibrary(vulkan_loader_library,
"vkGetInstanceProcAddr"));
if (!vkGetInstanceProcAddrFn)
return false;
vkEnumerateInstanceVersionFn =
reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
- vkGetInstanceProcAddrFn(nullptr, "vkEnumerateInstanceVersion"));
+ vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
// vkEnumerateInstanceVersion didn't exist in Vulkan 1.0, so we should
// proceed even if we fail to get vkEnumerateInstanceVersion pointer.
""")
diff --git a/chromium/gpu/vulkan/init/vulkan_factory.cc b/chromium/gpu/vulkan/init/vulkan_factory.cc
index 5bfcb40e34d..74791620dd0 100644
--- a/chromium/gpu/vulkan/init/vulkan_factory.cc
+++ b/chromium/gpu/vulkan/init/vulkan_factory.cc
@@ -30,7 +30,7 @@ std::unique_ptr<VulkanImplementation> CreateVulkanImplementation(
bool use_swiftshader,
bool allow_protected_memory,
bool enforce_protected_memory) {
-#if !defined(USE_X11)
+#if !defined(USE_X11) && !defined(OS_WIN)
// TODO(samans): Support Swiftshader on more platforms.
// https://crbug.com/963988
DCHECK(!use_swiftshader)
@@ -50,7 +50,7 @@ std::unique_ptr<VulkanImplementation> CreateVulkanImplementation(
->CreateVulkanImplementation(allow_protected_memory,
enforce_protected_memory);
#elif defined(OS_WIN)
- return std::make_unique<VulkanImplementationWin32>();
+ return std::make_unique<VulkanImplementationWin32>(use_swiftshader);
#else
#error Unsupported Vulkan Platform.
#endif
diff --git a/chromium/gpu/vulkan/vulkan_command_buffer.cc b/chromium/gpu/vulkan/vulkan_command_buffer.cc
index 9fdb2ddd97b..26fb2c978d3 100644
--- a/chromium/gpu/vulkan/vulkan_command_buffer.cc
+++ b/chromium/gpu/vulkan/vulkan_command_buffer.cc
@@ -219,17 +219,20 @@ bool VulkanCommandBuffer::SubmissionFinished() {
return device_queue_->GetFenceHelper()->HasPassed(submission_fence_);
}
-void VulkanCommandBuffer::TransitionImageLayout(VkImage image,
- VkImageLayout old_layout,
- VkImageLayout new_layout) {
+void VulkanCommandBuffer::TransitionImageLayout(
+ VkImage image,
+ VkImageLayout old_layout,
+ VkImageLayout new_layout,
+ uint32_t src_queue_family_index,
+ uint32_t dst_queue_family_index) {
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcAccessMask = GetAccessMask(old_layout);
barrier.dstAccessMask = GetAccessMask(new_layout);
barrier.oldLayout = old_layout;
barrier.newLayout = new_layout;
- barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.srcQueueFamilyIndex = src_queue_family_index;
+ barrier.dstQueueFamilyIndex = dst_queue_family_index;
barrier.image = image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
diff --git a/chromium/gpu/vulkan/vulkan_command_buffer.h b/chromium/gpu/vulkan/vulkan_command_buffer.h
index 1b83cad2543..7ba08472ec7 100644
--- a/chromium/gpu/vulkan/vulkan_command_buffer.h
+++ b/chromium/gpu/vulkan/vulkan_command_buffer.h
@@ -47,9 +47,12 @@ class VULKAN_EXPORT VulkanCommandBuffer {
// is finished.
bool SubmissionFinished();
- void TransitionImageLayout(VkImage image,
- VkImageLayout old_layout,
- VkImageLayout new_layout);
+ void TransitionImageLayout(
+ VkImage image,
+ VkImageLayout old_layout,
+ VkImageLayout new_layout,
+ uint32_t src_queue_family_index = VK_QUEUE_FAMILY_IGNORED,
+ uint32_t dst_queue_family_index = VK_QUEUE_FAMILY_IGNORED);
void CopyBufferToImage(VkBuffer buffer,
VkImage image,
uint32_t buffer_width,
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.cc b/chromium/gpu/vulkan/vulkan_device_queue.cc
index 55266dac0ab..4d95330b567 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.cc
+++ b/chromium/gpu/vulkan/vulkan_device_queue.cc
@@ -30,6 +30,7 @@ bool VulkanDeviceQueue::Initialize(
uint32_t options,
const VulkanInfo& info,
const std::vector<const char*>& required_extensions,
+ const std::vector<const char*>& optional_extensions,
bool allow_protected_memory,
const GetPresentationSupportCallback& get_presentation_support) {
DCHECK_EQ(static_cast<VkPhysicalDevice>(VK_NULL_HANDLE), vk_physical_device_);
@@ -72,8 +73,10 @@ bool VulkanDeviceQueue::Initialize(
}
}
- if (queue_index == -1)
+ if (queue_index == -1) {
+ DLOG(ERROR) << "Cannot find capable device queue.";
return false;
+ }
const auto& physical_device_info = info.physical_devices[device_index];
vk_physical_device_ = physical_device_info.device;
@@ -89,22 +92,41 @@ bool VulkanDeviceQueue::Initialize(
queue_create_info.flags =
allow_protected_memory ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
- std::vector<const char*> enabled_layer_names;
-#if DCHECK_IS_ON()
- std::unordered_set<std::string> desired_layers({
- "VK_LAYER_KHRONOS_validation",
- });
-
- for (const auto& layer : physical_device_info.layers) {
- if (desired_layers.find(layer.layerName) != desired_layers.end())
- enabled_layer_names.push_back(layer.layerName);
+ std::vector<const char*> enabled_extensions;
+ for (const char* extension : required_extensions) {
+ const auto it =
+ std::find_if(physical_device_info.extensions.begin(),
+ physical_device_info.extensions.end(),
+ [extension](const VkExtensionProperties& p) {
+ return std::strcmp(extension, p.extensionName) == 0;
+ });
+ if (it == physical_device_info.extensions.end()) {
+ // On Fuchsia, some device extensions are provided by layers.
+ // TODO(penghuang): checking extensions against layer device extensions
+ // too.
+#if !defined(OS_FUCHSIA)
+ DLOG(ERROR) << "Required Vulkan extension " << extension
+ << " is not supported.";
+ return false;
+#endif
+ }
+ enabled_extensions.push_back(extension);
}
-#endif // DCHECK_IS_ON()
- std::vector<const char*> enabled_extensions;
- enabled_extensions.insert(std::end(enabled_extensions),
- std::begin(required_extensions),
- std::end(required_extensions));
+ for (const char* extension : optional_extensions) {
+ const auto it =
+ std::find_if(physical_device_info.extensions.begin(),
+ physical_device_info.extensions.end(),
+ [extension](const VkExtensionProperties& p) {
+ return std::strcmp(extension, p.extensionName) == 0;
+ });
+ if (it == physical_device_info.extensions.end()) {
+ DLOG(ERROR) << "Optional Vulkan extension " << extension
+ << " is not supported.";
+ } else {
+ enabled_extensions.push_back(extension);
+ }
+ }
uint32_t device_api_version = std::min(
info.used_api_version, vk_physical_device_properties_.apiVersion);
@@ -148,16 +170,16 @@ bool VulkanDeviceQueue::Initialize(
device_create_info.pNext = enabled_device_features_2_.pNext;
device_create_info.queueCreateInfoCount = 1;
device_create_info.pQueueCreateInfos = &queue_create_info;
- device_create_info.enabledLayerCount = enabled_layer_names.size();
- device_create_info.ppEnabledLayerNames = enabled_layer_names.data();
device_create_info.enabledExtensionCount = enabled_extensions.size();
device_create_info.ppEnabledExtensionNames = enabled_extensions.data();
device_create_info.pEnabledFeatures = &enabled_device_features_2_.features;
result = vkCreateDevice(vk_physical_device_, &device_create_info, nullptr,
&owned_vk_device_);
- if (VK_SUCCESS != result)
+ if (VK_SUCCESS != result) {
+ DLOG(ERROR) << "vkCreateDevice failed. result:" << result;
return false;
+ }
enabled_extensions_ = gfx::ExtensionSet(std::begin(enabled_extensions),
std::end(enabled_extensions));
@@ -185,7 +207,6 @@ bool VulkanDeviceQueue::Initialize(
cleanup_helper_ = std::make_unique<VulkanFenceHelper>(this);
allow_protected_memory_ = allow_protected_memory;
-
return true;
}
@@ -234,4 +255,4 @@ std::unique_ptr<VulkanCommandPool> VulkanDeviceQueue::CreateCommandPool() {
return command_pool;
}
-} // namespace gpu
+} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.h b/chromium/gpu/vulkan/vulkan_device_queue.h
index 3fbc53b2ec4..7e765924fce 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.h
+++ b/chromium/gpu/vulkan/vulkan_device_queue.h
@@ -41,6 +41,7 @@ class VULKAN_EXPORT VulkanDeviceQueue {
uint32_t options,
const VulkanInfo& info,
const std::vector<const char*>& required_extensions,
+ const std::vector<const char*>& optional_extensions,
bool allow_protected_memory,
const GetPresentationSupportCallback& get_presentation_support);
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper.cc b/chromium/gpu/vulkan/vulkan_fence_helper.cc
index 224e4742bca..502436a8400 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper.cc
+++ b/chromium/gpu/vulkan/vulkan_fence_helper.cc
@@ -241,8 +241,10 @@ void VulkanFenceHelper::PerformImmediateCleanup() {
// recover from this.
CHECK(result == VK_SUCCESS || result == VK_ERROR_DEVICE_LOST);
bool device_lost = result == VK_ERROR_DEVICE_LOST;
- if (!device_lost)
- current_generation_ = next_generation_ - 1;
+
+ // We're going to destroy all fences below, so we should consider them as
+ // passed.
+ current_generation_ = next_generation_ - 1;
// Run all cleanup tasks. Create a temporary vector of tasks to run to avoid
// reentrancy issues.
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper.h b/chromium/gpu/vulkan/vulkan_fence_helper.h
index 52c04552a62..20fdfe117f1 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper.h
+++ b/chromium/gpu/vulkan/vulkan_fence_helper.h
@@ -28,7 +28,7 @@ class VULKAN_EXPORT VulkanFenceHelper {
// Class representing a fence registered with this system. Should be treated
// as an opaque handle.
- class FenceHandle {
+ class VULKAN_EXPORT FenceHandle {
public:
FenceHandle();
FenceHandle(const FenceHandle& other);
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc b/chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc
index 8e3d9d1ae79..a92156084f4 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc
+++ b/chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc
@@ -168,6 +168,9 @@ TEST_F(VulkanFenceHelperTest, SkiaCallbackAfterFences) {
fence_helper->GenerateCleanupFence();
EXPECT_TRUE(fence_handle.is_valid());
+ // Call vkQueueWaitIdle() to make sure the |fence_handle| is passed.
+ vkQueueWaitIdle(queue());
+
// Enqueue 5 more callbacks.
for (int i = 5; i < 10; i++) {
fence_helper->EnqueueCleanupTaskForSubmittedWork(
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.cc b/chromium/gpu/vulkan/vulkan_function_pointers.cc
index 9c3cea92b11..77c2a79a5ba 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.cc
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.cc
@@ -22,23 +22,24 @@ VulkanFunctionPointers* GetVulkanFunctionPointers() {
VulkanFunctionPointers::VulkanFunctionPointers() = default;
VulkanFunctionPointers::~VulkanFunctionPointers() = default;
+NO_SANITIZE("cfi-icall")
bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
// vkGetInstanceProcAddr must be handled specially since it gets its function
// pointer through base::GetFunctionPOinterFromNativeLibrary(). Other Vulkan
// functions don't do this.
vkGetInstanceProcAddrFn = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
- base::GetFunctionPointerFromNativeLibrary(vulkan_loader_library_,
+ base::GetFunctionPointerFromNativeLibrary(vulkan_loader_library,
"vkGetInstanceProcAddr"));
if (!vkGetInstanceProcAddrFn)
return false;
vkEnumerateInstanceVersionFn =
reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
- vkGetInstanceProcAddrFn(nullptr, "vkEnumerateInstanceVersion"));
+ vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
// vkEnumerateInstanceVersion didn't exist in Vulkan 1.0, so we should
// proceed even if we fail to get vkEnumerateInstanceVersion pointer.
vkCreateInstanceFn = reinterpret_cast<PFN_vkCreateInstance>(
- vkGetInstanceProcAddrFn(nullptr, "vkCreateInstance"));
+ vkGetInstanceProcAddr(nullptr, "vkCreateInstance"));
if (!vkCreateInstanceFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateInstance";
@@ -47,8 +48,8 @@ bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
vkEnumerateInstanceExtensionPropertiesFn =
reinterpret_cast<PFN_vkEnumerateInstanceExtensionProperties>(
- vkGetInstanceProcAddrFn(nullptr,
- "vkEnumerateInstanceExtensionProperties"));
+ vkGetInstanceProcAddr(nullptr,
+ "vkEnumerateInstanceExtensionProperties"));
if (!vkEnumerateInstanceExtensionPropertiesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateInstanceExtensionProperties";
@@ -57,8 +58,7 @@ bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
vkEnumerateInstanceLayerPropertiesFn =
reinterpret_cast<PFN_vkEnumerateInstanceLayerProperties>(
- vkGetInstanceProcAddrFn(nullptr,
- "vkEnumerateInstanceLayerProperties"));
+ vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceLayerProperties"));
if (!vkEnumerateInstanceLayerPropertiesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateInstanceLayerProperties";
@@ -73,7 +73,7 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
vkCreateDeviceFn = reinterpret_cast<PFN_vkCreateDevice>(
- vkGetInstanceProcAddrFn(vk_instance, "vkCreateDevice"));
+ vkGetInstanceProcAddr(vk_instance, "vkCreateDevice"));
if (!vkCreateDeviceFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDevice";
@@ -81,17 +81,27 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
}
vkDestroyInstanceFn = reinterpret_cast<PFN_vkDestroyInstance>(
- vkGetInstanceProcAddrFn(vk_instance, "vkDestroyInstance"));
+ vkGetInstanceProcAddr(vk_instance, "vkDestroyInstance"));
if (!vkDestroyInstanceFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyInstance";
return false;
}
+ vkEnumerateDeviceExtensionPropertiesFn =
+ reinterpret_cast<PFN_vkEnumerateDeviceExtensionProperties>(
+ vkGetInstanceProcAddr(vk_instance,
+ "vkEnumerateDeviceExtensionProperties"));
+ if (!vkEnumerateDeviceExtensionPropertiesFn) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkEnumerateDeviceExtensionProperties";
+ return false;
+ }
+
vkEnumerateDeviceLayerPropertiesFn =
reinterpret_cast<PFN_vkEnumerateDeviceLayerProperties>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkEnumerateDeviceLayerProperties"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkEnumerateDeviceLayerProperties"));
if (!vkEnumerateDeviceLayerPropertiesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateDeviceLayerProperties";
@@ -100,7 +110,7 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkEnumeratePhysicalDevicesFn =
reinterpret_cast<PFN_vkEnumeratePhysicalDevices>(
- vkGetInstanceProcAddrFn(vk_instance, "vkEnumeratePhysicalDevices"));
+ vkGetInstanceProcAddr(vk_instance, "vkEnumeratePhysicalDevices"));
if (!vkEnumeratePhysicalDevicesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumeratePhysicalDevices";
@@ -108,7 +118,7 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
}
vkGetDeviceProcAddrFn = reinterpret_cast<PFN_vkGetDeviceProcAddr>(
- vkGetInstanceProcAddrFn(vk_instance, "vkGetDeviceProcAddr"));
+ vkGetInstanceProcAddr(vk_instance, "vkGetDeviceProcAddr"));
if (!vkGetDeviceProcAddrFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetDeviceProcAddr";
@@ -117,7 +127,7 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkGetPhysicalDeviceFeaturesFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures>(
- vkGetInstanceProcAddrFn(vk_instance, "vkGetPhysicalDeviceFeatures"));
+ vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceFeatures"));
if (!vkGetPhysicalDeviceFeaturesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceFeatures";
@@ -126,8 +136,8 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkGetPhysicalDeviceFormatPropertiesFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceFormatProperties>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkGetPhysicalDeviceFormatProperties"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceFormatProperties"));
if (!vkGetPhysicalDeviceFormatPropertiesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceFormatProperties";
@@ -136,8 +146,8 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkGetPhysicalDeviceMemoryPropertiesFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceMemoryProperties>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkGetPhysicalDeviceMemoryProperties"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceMemoryProperties"));
if (!vkGetPhysicalDeviceMemoryPropertiesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceMemoryProperties";
@@ -146,8 +156,7 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkGetPhysicalDevicePropertiesFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkGetPhysicalDeviceProperties"));
+ vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceProperties"));
if (!vkGetPhysicalDevicePropertiesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceProperties";
@@ -156,17 +165,42 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkGetPhysicalDeviceQueueFamilyPropertiesFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceQueueFamilyProperties>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkGetPhysicalDeviceQueueFamilyProperties"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceQueueFamilyProperties"));
if (!vkGetPhysicalDeviceQueueFamilyPropertiesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceQueueFamilyProperties";
return false;
}
+#if DCHECK_IS_ON()
+ if (gfx::HasExtension(enabled_extensions,
+ VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
+ vkCreateDebugReportCallbackEXTFn =
+ reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(
+ vkGetInstanceProcAddr(vk_instance,
+ "vkCreateDebugReportCallbackEXT"));
+ if (!vkCreateDebugReportCallbackEXTFn) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkCreateDebugReportCallbackEXT";
+ return false;
+ }
+
+ vkDestroyDebugReportCallbackEXTFn =
+ reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(
+ vkGetInstanceProcAddr(vk_instance,
+ "vkDestroyDebugReportCallbackEXT"));
+ if (!vkDestroyDebugReportCallbackEXTFn) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkDestroyDebugReportCallbackEXT";
+ return false;
+ }
+ }
+#endif // DCHECK_IS_ON()
+
if (gfx::HasExtension(enabled_extensions, VK_KHR_SURFACE_EXTENSION_NAME)) {
vkDestroySurfaceKHRFn = reinterpret_cast<PFN_vkDestroySurfaceKHR>(
- vkGetInstanceProcAddrFn(vk_instance, "vkDestroySurfaceKHR"));
+ vkGetInstanceProcAddr(vk_instance, "vkDestroySurfaceKHR"));
if (!vkDestroySurfaceKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySurfaceKHR";
@@ -175,8 +209,8 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR>(
- vkGetInstanceProcAddrFn(
- vk_instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
if (!vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceSurfaceCapabilitiesKHR";
@@ -185,8 +219,8 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkGetPhysicalDeviceSurfaceFormatsKHRFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceFormatsKHR>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkGetPhysicalDeviceSurfaceFormatsKHR"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceSurfaceFormatsKHR"));
if (!vkGetPhysicalDeviceSurfaceFormatsKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceSurfaceFormatsKHR";
@@ -195,8 +229,8 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkGetPhysicalDeviceSurfaceSupportKHRFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceSupportKHR>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkGetPhysicalDeviceSurfaceSupportKHR"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceSurfaceSupportKHR"));
if (!vkGetPhysicalDeviceSurfaceSupportKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceSurfaceSupportKHR";
@@ -208,7 +242,7 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
if (gfx::HasExtension(enabled_extensions,
VK_KHR_XLIB_SURFACE_EXTENSION_NAME)) {
vkCreateXlibSurfaceKHRFn = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>(
- vkGetInstanceProcAddrFn(vk_instance, "vkCreateXlibSurfaceKHR"));
+ vkGetInstanceProcAddr(vk_instance, "vkCreateXlibSurfaceKHR"));
if (!vkCreateXlibSurfaceKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateXlibSurfaceKHR";
@@ -217,7 +251,7 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
vkGetPhysicalDeviceXlibPresentationSupportKHRFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>(
- vkGetInstanceProcAddrFn(
+ vkGetInstanceProcAddr(
vk_instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR"));
if (!vkGetPhysicalDeviceXlibPresentationSupportKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
@@ -227,12 +261,35 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
}
#endif // defined(USE_VULKAN_XLIB)
+#if defined(OS_WIN)
+ if (gfx::HasExtension(enabled_extensions,
+ VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) {
+ vkCreateWin32SurfaceKHRFn = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>(
+ vkGetInstanceProcAddr(vk_instance, "vkCreateWin32SurfaceKHR"));
+ if (!vkCreateWin32SurfaceKHRFn) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkCreateWin32SurfaceKHR";
+ return false;
+ }
+
+ vkGetPhysicalDeviceWin32PresentationSupportKHRFn =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR>(
+ vkGetInstanceProcAddr(
+ vk_instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR"));
+ if (!vkGetPhysicalDeviceWin32PresentationSupportKHRFn) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetPhysicalDeviceWin32PresentationSupportKHR";
+ return false;
+ }
+ }
+#endif // defined(OS_WIN)
+
#if defined(OS_ANDROID)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
vkCreateAndroidSurfaceKHRFn =
reinterpret_cast<PFN_vkCreateAndroidSurfaceKHR>(
- vkGetInstanceProcAddrFn(vk_instance, "vkCreateAndroidSurfaceKHR"));
+ vkGetInstanceProcAddr(vk_instance, "vkCreateAndroidSurfaceKHR"));
if (!vkCreateAndroidSurfaceKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateAndroidSurfaceKHR";
@@ -246,8 +303,8 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME)) {
vkCreateImagePipeSurfaceFUCHSIAFn =
reinterpret_cast<PFN_vkCreateImagePipeSurfaceFUCHSIA>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkCreateImagePipeSurfaceFUCHSIA"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkCreateImagePipeSurfaceFUCHSIA"));
if (!vkCreateImagePipeSurfaceFUCHSIAFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateImagePipeSurfaceFUCHSIA";
@@ -259,8 +316,8 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
if (api_version >= VK_API_VERSION_1_1) {
vkGetPhysicalDeviceImageFormatProperties2Fn =
reinterpret_cast<PFN_vkGetPhysicalDeviceImageFormatProperties2>(
- vkGetInstanceProcAddrFn(
- vk_instance, "vkGetPhysicalDeviceImageFormatProperties2"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceImageFormatProperties2"));
if (!vkGetPhysicalDeviceImageFormatProperties2Fn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceImageFormatProperties2";
@@ -271,8 +328,7 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
if (api_version >= VK_API_VERSION_1_1) {
vkGetPhysicalDeviceFeatures2Fn =
reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkGetPhysicalDeviceFeatures2"));
+ vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceFeatures2"));
if (!vkGetPhysicalDeviceFeatures2Fn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceFeatures2";
@@ -284,8 +340,8 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
vkGetPhysicalDeviceFeatures2Fn =
reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2>(
- vkGetInstanceProcAddrFn(vk_instance,
- "vkGetPhysicalDeviceFeatures2KHR"));
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceFeatures2KHR"));
if (!vkGetPhysicalDeviceFeatures2Fn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceFeatures2KHR";
@@ -302,7 +358,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
const gfx::ExtensionSet& enabled_extensions) {
// Device functions
vkAllocateCommandBuffersFn = reinterpret_cast<PFN_vkAllocateCommandBuffers>(
- vkGetDeviceProcAddrFn(vk_device, "vkAllocateCommandBuffers"));
+ vkGetDeviceProcAddr(vk_device, "vkAllocateCommandBuffers"));
if (!vkAllocateCommandBuffersFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAllocateCommandBuffers";
@@ -310,7 +366,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkAllocateDescriptorSetsFn = reinterpret_cast<PFN_vkAllocateDescriptorSets>(
- vkGetDeviceProcAddrFn(vk_device, "vkAllocateDescriptorSets"));
+ vkGetDeviceProcAddr(vk_device, "vkAllocateDescriptorSets"));
if (!vkAllocateDescriptorSetsFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAllocateDescriptorSets";
@@ -318,7 +374,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkAllocateMemoryFn = reinterpret_cast<PFN_vkAllocateMemory>(
- vkGetDeviceProcAddrFn(vk_device, "vkAllocateMemory"));
+ vkGetDeviceProcAddr(vk_device, "vkAllocateMemory"));
if (!vkAllocateMemoryFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAllocateMemory";
@@ -326,7 +382,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkBeginCommandBufferFn = reinterpret_cast<PFN_vkBeginCommandBuffer>(
- vkGetDeviceProcAddrFn(vk_device, "vkBeginCommandBuffer"));
+ vkGetDeviceProcAddr(vk_device, "vkBeginCommandBuffer"));
if (!vkBeginCommandBufferFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkBeginCommandBuffer";
@@ -334,7 +390,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkBindBufferMemoryFn = reinterpret_cast<PFN_vkBindBufferMemory>(
- vkGetDeviceProcAddrFn(vk_device, "vkBindBufferMemory"));
+ vkGetDeviceProcAddr(vk_device, "vkBindBufferMemory"));
if (!vkBindBufferMemoryFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkBindBufferMemory";
@@ -342,7 +398,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkBindImageMemoryFn = reinterpret_cast<PFN_vkBindImageMemory>(
- vkGetDeviceProcAddrFn(vk_device, "vkBindImageMemory"));
+ vkGetDeviceProcAddr(vk_device, "vkBindImageMemory"));
if (!vkBindImageMemoryFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkBindImageMemory";
@@ -350,7 +406,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCmdBeginRenderPassFn = reinterpret_cast<PFN_vkCmdBeginRenderPass>(
- vkGetDeviceProcAddrFn(vk_device, "vkCmdBeginRenderPass"));
+ vkGetDeviceProcAddr(vk_device, "vkCmdBeginRenderPass"));
if (!vkCmdBeginRenderPassFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdBeginRenderPass";
@@ -358,7 +414,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCmdCopyBufferToImageFn = reinterpret_cast<PFN_vkCmdCopyBufferToImage>(
- vkGetDeviceProcAddrFn(vk_device, "vkCmdCopyBufferToImage"));
+ vkGetDeviceProcAddr(vk_device, "vkCmdCopyBufferToImage"));
if (!vkCmdCopyBufferToImageFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdCopyBufferToImage";
@@ -366,7 +422,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCmdEndRenderPassFn = reinterpret_cast<PFN_vkCmdEndRenderPass>(
- vkGetDeviceProcAddrFn(vk_device, "vkCmdEndRenderPass"));
+ vkGetDeviceProcAddr(vk_device, "vkCmdEndRenderPass"));
if (!vkCmdEndRenderPassFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdEndRenderPass";
@@ -374,7 +430,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCmdExecuteCommandsFn = reinterpret_cast<PFN_vkCmdExecuteCommands>(
- vkGetDeviceProcAddrFn(vk_device, "vkCmdExecuteCommands"));
+ vkGetDeviceProcAddr(vk_device, "vkCmdExecuteCommands"));
if (!vkCmdExecuteCommandsFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdExecuteCommands";
@@ -382,7 +438,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCmdNextSubpassFn = reinterpret_cast<PFN_vkCmdNextSubpass>(
- vkGetDeviceProcAddrFn(vk_device, "vkCmdNextSubpass"));
+ vkGetDeviceProcAddr(vk_device, "vkCmdNextSubpass"));
if (!vkCmdNextSubpassFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdNextSubpass";
@@ -390,7 +446,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCmdPipelineBarrierFn = reinterpret_cast<PFN_vkCmdPipelineBarrier>(
- vkGetDeviceProcAddrFn(vk_device, "vkCmdPipelineBarrier"));
+ vkGetDeviceProcAddr(vk_device, "vkCmdPipelineBarrier"));
if (!vkCmdPipelineBarrierFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdPipelineBarrier";
@@ -398,7 +454,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateBufferFn = reinterpret_cast<PFN_vkCreateBuffer>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateBuffer"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateBuffer"));
if (!vkCreateBufferFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateBuffer";
@@ -406,7 +462,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateCommandPoolFn = reinterpret_cast<PFN_vkCreateCommandPool>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateCommandPool"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateCommandPool"));
if (!vkCreateCommandPoolFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateCommandPool";
@@ -414,7 +470,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateDescriptorPoolFn = reinterpret_cast<PFN_vkCreateDescriptorPool>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateDescriptorPool"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateDescriptorPool"));
if (!vkCreateDescriptorPoolFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDescriptorPool";
@@ -423,7 +479,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkCreateDescriptorSetLayoutFn =
reinterpret_cast<PFN_vkCreateDescriptorSetLayout>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateDescriptorSetLayout"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateDescriptorSetLayout"));
if (!vkCreateDescriptorSetLayoutFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDescriptorSetLayout";
@@ -431,7 +487,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateFenceFn = reinterpret_cast<PFN_vkCreateFence>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateFence"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateFence"));
if (!vkCreateFenceFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateFence";
@@ -439,7 +495,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateFramebufferFn = reinterpret_cast<PFN_vkCreateFramebuffer>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateFramebuffer"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateFramebuffer"));
if (!vkCreateFramebufferFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateFramebuffer";
@@ -447,7 +503,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateImageFn = reinterpret_cast<PFN_vkCreateImage>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateImage"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateImage"));
if (!vkCreateImageFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateImage";
@@ -455,7 +511,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateImageViewFn = reinterpret_cast<PFN_vkCreateImageView>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateImageView"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateImageView"));
if (!vkCreateImageViewFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateImageView";
@@ -463,7 +519,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateRenderPassFn = reinterpret_cast<PFN_vkCreateRenderPass>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateRenderPass"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateRenderPass"));
if (!vkCreateRenderPassFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateRenderPass";
@@ -471,7 +527,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateSamplerFn = reinterpret_cast<PFN_vkCreateSampler>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateSampler"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateSampler"));
if (!vkCreateSamplerFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateSampler";
@@ -479,7 +535,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateSemaphoreFn = reinterpret_cast<PFN_vkCreateSemaphore>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateSemaphore"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateSemaphore"));
if (!vkCreateSemaphoreFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateSemaphore";
@@ -487,7 +543,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateShaderModuleFn = reinterpret_cast<PFN_vkCreateShaderModule>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateShaderModule"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateShaderModule"));
if (!vkCreateShaderModuleFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateShaderModule";
@@ -495,7 +551,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyBufferFn = reinterpret_cast<PFN_vkDestroyBuffer>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyBuffer"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyBuffer"));
if (!vkDestroyBufferFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyBuffer";
@@ -503,7 +559,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyCommandPoolFn = reinterpret_cast<PFN_vkDestroyCommandPool>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyCommandPool"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyCommandPool"));
if (!vkDestroyCommandPoolFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyCommandPool";
@@ -511,7 +567,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyDescriptorPoolFn = reinterpret_cast<PFN_vkDestroyDescriptorPool>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyDescriptorPool"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyDescriptorPool"));
if (!vkDestroyDescriptorPoolFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDescriptorPool";
@@ -520,7 +576,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkDestroyDescriptorSetLayoutFn =
reinterpret_cast<PFN_vkDestroyDescriptorSetLayout>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyDescriptorSetLayout"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyDescriptorSetLayout"));
if (!vkDestroyDescriptorSetLayoutFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDescriptorSetLayout";
@@ -528,7 +584,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyDeviceFn = reinterpret_cast<PFN_vkDestroyDevice>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyDevice"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyDevice"));
if (!vkDestroyDeviceFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDevice";
@@ -536,7 +592,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyFenceFn = reinterpret_cast<PFN_vkDestroyFence>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyFence"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyFence"));
if (!vkDestroyFenceFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyFence";
@@ -544,7 +600,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyFramebufferFn = reinterpret_cast<PFN_vkDestroyFramebuffer>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyFramebuffer"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyFramebuffer"));
if (!vkDestroyFramebufferFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyFramebuffer";
@@ -552,7 +608,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyImageFn = reinterpret_cast<PFN_vkDestroyImage>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyImage"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyImage"));
if (!vkDestroyImageFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyImage";
@@ -560,7 +616,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyImageViewFn = reinterpret_cast<PFN_vkDestroyImageView>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyImageView"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyImageView"));
if (!vkDestroyImageViewFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyImageView";
@@ -568,7 +624,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyRenderPassFn = reinterpret_cast<PFN_vkDestroyRenderPass>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyRenderPass"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyRenderPass"));
if (!vkDestroyRenderPassFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyRenderPass";
@@ -576,7 +632,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroySamplerFn = reinterpret_cast<PFN_vkDestroySampler>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroySampler"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroySampler"));
if (!vkDestroySamplerFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySampler";
@@ -584,7 +640,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroySemaphoreFn = reinterpret_cast<PFN_vkDestroySemaphore>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroySemaphore"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroySemaphore"));
if (!vkDestroySemaphoreFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySemaphore";
@@ -592,7 +648,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroyShaderModuleFn = reinterpret_cast<PFN_vkDestroyShaderModule>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroyShaderModule"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyShaderModule"));
if (!vkDestroyShaderModuleFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyShaderModule";
@@ -600,7 +656,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDeviceWaitIdleFn = reinterpret_cast<PFN_vkDeviceWaitIdle>(
- vkGetDeviceProcAddrFn(vk_device, "vkDeviceWaitIdle"));
+ vkGetDeviceProcAddr(vk_device, "vkDeviceWaitIdle"));
if (!vkDeviceWaitIdleFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDeviceWaitIdle";
@@ -608,7 +664,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkEndCommandBufferFn = reinterpret_cast<PFN_vkEndCommandBuffer>(
- vkGetDeviceProcAddrFn(vk_device, "vkEndCommandBuffer"));
+ vkGetDeviceProcAddr(vk_device, "vkEndCommandBuffer"));
if (!vkEndCommandBufferFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEndCommandBuffer";
@@ -616,7 +672,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkFreeCommandBuffersFn = reinterpret_cast<PFN_vkFreeCommandBuffers>(
- vkGetDeviceProcAddrFn(vk_device, "vkFreeCommandBuffers"));
+ vkGetDeviceProcAddr(vk_device, "vkFreeCommandBuffers"));
if (!vkFreeCommandBuffersFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFreeCommandBuffers";
@@ -624,7 +680,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkFreeDescriptorSetsFn = reinterpret_cast<PFN_vkFreeDescriptorSets>(
- vkGetDeviceProcAddrFn(vk_device, "vkFreeDescriptorSets"));
+ vkGetDeviceProcAddr(vk_device, "vkFreeDescriptorSets"));
if (!vkFreeDescriptorSetsFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFreeDescriptorSets";
@@ -632,7 +688,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkFreeMemoryFn = reinterpret_cast<PFN_vkFreeMemory>(
- vkGetDeviceProcAddrFn(vk_device, "vkFreeMemory"));
+ vkGetDeviceProcAddr(vk_device, "vkFreeMemory"));
if (!vkFreeMemoryFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFreeMemory";
@@ -641,7 +697,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkGetBufferMemoryRequirementsFn =
reinterpret_cast<PFN_vkGetBufferMemoryRequirements>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetBufferMemoryRequirements"));
+ vkGetDeviceProcAddr(vk_device, "vkGetBufferMemoryRequirements"));
if (!vkGetBufferMemoryRequirementsFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetBufferMemoryRequirements";
@@ -649,7 +705,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkGetDeviceQueueFn = reinterpret_cast<PFN_vkGetDeviceQueue>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetDeviceQueue"));
+ vkGetDeviceProcAddr(vk_device, "vkGetDeviceQueue"));
if (!vkGetDeviceQueueFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetDeviceQueue";
@@ -657,7 +713,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkGetFenceStatusFn = reinterpret_cast<PFN_vkGetFenceStatus>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetFenceStatus"));
+ vkGetDeviceProcAddr(vk_device, "vkGetFenceStatus"));
if (!vkGetFenceStatusFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetFenceStatus";
@@ -666,7 +722,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkGetImageMemoryRequirementsFn =
reinterpret_cast<PFN_vkGetImageMemoryRequirements>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetImageMemoryRequirements"));
+ vkGetDeviceProcAddr(vk_device, "vkGetImageMemoryRequirements"));
if (!vkGetImageMemoryRequirementsFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetImageMemoryRequirements";
@@ -674,7 +730,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkMapMemoryFn = reinterpret_cast<PFN_vkMapMemory>(
- vkGetDeviceProcAddrFn(vk_device, "vkMapMemory"));
+ vkGetDeviceProcAddr(vk_device, "vkMapMemory"));
if (!vkMapMemoryFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkMapMemory";
@@ -682,7 +738,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkQueueSubmitFn = reinterpret_cast<PFN_vkQueueSubmit>(
- vkGetDeviceProcAddrFn(vk_device, "vkQueueSubmit"));
+ vkGetDeviceProcAddr(vk_device, "vkQueueSubmit"));
if (!vkQueueSubmitFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkQueueSubmit";
@@ -690,7 +746,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkQueueWaitIdleFn = reinterpret_cast<PFN_vkQueueWaitIdle>(
- vkGetDeviceProcAddrFn(vk_device, "vkQueueWaitIdle"));
+ vkGetDeviceProcAddr(vk_device, "vkQueueWaitIdle"));
if (!vkQueueWaitIdleFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkQueueWaitIdle";
@@ -698,7 +754,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkResetCommandBufferFn = reinterpret_cast<PFN_vkResetCommandBuffer>(
- vkGetDeviceProcAddrFn(vk_device, "vkResetCommandBuffer"));
+ vkGetDeviceProcAddr(vk_device, "vkResetCommandBuffer"));
if (!vkResetCommandBufferFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkResetCommandBuffer";
@@ -706,7 +762,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkResetFencesFn = reinterpret_cast<PFN_vkResetFences>(
- vkGetDeviceProcAddrFn(vk_device, "vkResetFences"));
+ vkGetDeviceProcAddr(vk_device, "vkResetFences"));
if (!vkResetFencesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkResetFences";
@@ -714,7 +770,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkUnmapMemoryFn = reinterpret_cast<PFN_vkUnmapMemory>(
- vkGetDeviceProcAddrFn(vk_device, "vkUnmapMemory"));
+ vkGetDeviceProcAddr(vk_device, "vkUnmapMemory"));
if (!vkUnmapMemoryFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkUnmapMemory";
@@ -722,7 +778,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkUpdateDescriptorSetsFn = reinterpret_cast<PFN_vkUpdateDescriptorSets>(
- vkGetDeviceProcAddrFn(vk_device, "vkUpdateDescriptorSets"));
+ vkGetDeviceProcAddr(vk_device, "vkUpdateDescriptorSets"));
if (!vkUpdateDescriptorSetsFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkUpdateDescriptorSets";
@@ -730,7 +786,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkWaitForFencesFn = reinterpret_cast<PFN_vkWaitForFences>(
- vkGetDeviceProcAddrFn(vk_device, "vkWaitForFences"));
+ vkGetDeviceProcAddr(vk_device, "vkWaitForFences"));
if (!vkWaitForFencesFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkWaitForFences";
@@ -739,7 +795,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
if (api_version >= VK_API_VERSION_1_1) {
vkGetDeviceQueue2Fn = reinterpret_cast<PFN_vkGetDeviceQueue2>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetDeviceQueue2"));
+ vkGetDeviceProcAddr(vk_device, "vkGetDeviceQueue2"));
if (!vkGetDeviceQueue2Fn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetDeviceQueue2";
@@ -748,7 +804,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkGetImageMemoryRequirements2Fn =
reinterpret_cast<PFN_vkGetImageMemoryRequirements2>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetImageMemoryRequirements2"));
+ vkGetDeviceProcAddr(vk_device, "vkGetImageMemoryRequirements2"));
if (!vkGetImageMemoryRequirements2Fn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetImageMemoryRequirements2";
@@ -762,8 +818,8 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) {
vkGetAndroidHardwareBufferPropertiesANDROIDFn =
reinterpret_cast<PFN_vkGetAndroidHardwareBufferPropertiesANDROID>(
- vkGetDeviceProcAddrFn(
- vk_device, "vkGetAndroidHardwareBufferPropertiesANDROID"));
+ vkGetDeviceProcAddr(vk_device,
+ "vkGetAndroidHardwareBufferPropertiesANDROID"));
if (!vkGetAndroidHardwareBufferPropertiesANDROIDFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetAndroidHardwareBufferPropertiesANDROID";
@@ -776,7 +832,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME)) {
vkGetSemaphoreFdKHRFn = reinterpret_cast<PFN_vkGetSemaphoreFdKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetSemaphoreFdKHR"));
+ vkGetDeviceProcAddr(vk_device, "vkGetSemaphoreFdKHR"));
if (!vkGetSemaphoreFdKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSemaphoreFdKHR";
@@ -784,7 +840,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkImportSemaphoreFdKHRFn = reinterpret_cast<PFN_vkImportSemaphoreFdKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkImportSemaphoreFdKHR"));
+ vkGetDeviceProcAddr(vk_device, "vkImportSemaphoreFdKHR"));
if (!vkImportSemaphoreFdKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkImportSemaphoreFdKHR";
@@ -793,11 +849,11 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
-#if defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME)) {
vkGetMemoryFdKHRFn = reinterpret_cast<PFN_vkGetMemoryFdKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetMemoryFdKHR"));
+ vkGetDeviceProcAddr(vk_device, "vkGetMemoryFdKHR"));
if (!vkGetMemoryFdKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryFdKHR";
@@ -806,22 +862,22 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkGetMemoryFdPropertiesKHRFn =
reinterpret_cast<PFN_vkGetMemoryFdPropertiesKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetMemoryFdPropertiesKHR"));
+ vkGetDeviceProcAddr(vk_device, "vkGetMemoryFdPropertiesKHR"));
if (!vkGetMemoryFdPropertiesKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryFdPropertiesKHR";
return false;
}
}
-#endif // defined(OS_LINUX)
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_EXTERNAL_SEMAPHORE_EXTENSION_NAME)) {
vkImportSemaphoreZirconHandleFUCHSIAFn =
reinterpret_cast<PFN_vkImportSemaphoreZirconHandleFUCHSIA>(
- vkGetDeviceProcAddrFn(vk_device,
- "vkImportSemaphoreZirconHandleFUCHSIA"));
+ vkGetDeviceProcAddr(vk_device,
+ "vkImportSemaphoreZirconHandleFUCHSIA"));
if (!vkImportSemaphoreZirconHandleFUCHSIAFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkImportSemaphoreZirconHandleFUCHSIA";
@@ -830,8 +886,8 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkGetSemaphoreZirconHandleFUCHSIAFn =
reinterpret_cast<PFN_vkGetSemaphoreZirconHandleFUCHSIA>(
- vkGetDeviceProcAddrFn(vk_device,
- "vkGetSemaphoreZirconHandleFUCHSIA"));
+ vkGetDeviceProcAddr(vk_device,
+ "vkGetSemaphoreZirconHandleFUCHSIA"));
if (!vkGetSemaphoreZirconHandleFUCHSIAFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSemaphoreZirconHandleFUCHSIA";
@@ -842,11 +898,24 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
+ VK_FUCHSIA_EXTERNAL_MEMORY_EXTENSION_NAME)) {
+ vkGetMemoryZirconHandleFUCHSIAFn =
+ reinterpret_cast<PFN_vkGetMemoryZirconHandleFUCHSIA>(
+ vkGetDeviceProcAddr(vk_device, "vkGetMemoryZirconHandleFUCHSIA"));
+ if (!vkGetMemoryZirconHandleFUCHSIAFn) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetMemoryZirconHandleFUCHSIA";
+ return false;
+ }
+ }
+#endif // defined(OS_FUCHSIA)
+
+#if defined(OS_FUCHSIA)
+ if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_BUFFER_COLLECTION_EXTENSION_NAME)) {
vkCreateBufferCollectionFUCHSIAFn =
reinterpret_cast<PFN_vkCreateBufferCollectionFUCHSIA>(
- vkGetDeviceProcAddrFn(vk_device,
- "vkCreateBufferCollectionFUCHSIA"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateBufferCollectionFUCHSIA"));
if (!vkCreateBufferCollectionFUCHSIAFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateBufferCollectionFUCHSIA";
@@ -855,8 +924,8 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkSetBufferCollectionConstraintsFUCHSIAFn =
reinterpret_cast<PFN_vkSetBufferCollectionConstraintsFUCHSIA>(
- vkGetDeviceProcAddrFn(vk_device,
- "vkSetBufferCollectionConstraintsFUCHSIA"));
+ vkGetDeviceProcAddr(vk_device,
+ "vkSetBufferCollectionConstraintsFUCHSIA"));
if (!vkSetBufferCollectionConstraintsFUCHSIAFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkSetBufferCollectionConstraintsFUCHSIA";
@@ -865,8 +934,8 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkGetBufferCollectionPropertiesFUCHSIAFn =
reinterpret_cast<PFN_vkGetBufferCollectionPropertiesFUCHSIA>(
- vkGetDeviceProcAddrFn(vk_device,
- "vkGetBufferCollectionPropertiesFUCHSIA"));
+ vkGetDeviceProcAddr(vk_device,
+ "vkGetBufferCollectionPropertiesFUCHSIA"));
if (!vkGetBufferCollectionPropertiesFUCHSIAFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetBufferCollectionPropertiesFUCHSIA";
@@ -875,8 +944,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
vkDestroyBufferCollectionFUCHSIAFn =
reinterpret_cast<PFN_vkDestroyBufferCollectionFUCHSIA>(
- vkGetDeviceProcAddrFn(vk_device,
- "vkDestroyBufferCollectionFUCHSIA"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroyBufferCollectionFUCHSIA"));
if (!vkDestroyBufferCollectionFUCHSIAFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyBufferCollectionFUCHSIA";
@@ -887,7 +955,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
if (gfx::HasExtension(enabled_extensions, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
vkAcquireNextImageKHRFn = reinterpret_cast<PFN_vkAcquireNextImageKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkAcquireNextImageKHR"));
+ vkGetDeviceProcAddr(vk_device, "vkAcquireNextImageKHR"));
if (!vkAcquireNextImageKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAcquireNextImageKHR";
@@ -895,7 +963,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkCreateSwapchainKHRFn = reinterpret_cast<PFN_vkCreateSwapchainKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkCreateSwapchainKHR"));
+ vkGetDeviceProcAddr(vk_device, "vkCreateSwapchainKHR"));
if (!vkCreateSwapchainKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateSwapchainKHR";
@@ -903,7 +971,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkDestroySwapchainKHRFn = reinterpret_cast<PFN_vkDestroySwapchainKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkDestroySwapchainKHR"));
+ vkGetDeviceProcAddr(vk_device, "vkDestroySwapchainKHR"));
if (!vkDestroySwapchainKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySwapchainKHR";
@@ -911,7 +979,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkGetSwapchainImagesKHRFn = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkGetSwapchainImagesKHR"));
+ vkGetDeviceProcAddr(vk_device, "vkGetSwapchainImagesKHR"));
if (!vkGetSwapchainImagesKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSwapchainImagesKHR";
@@ -919,7 +987,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
}
vkQueuePresentKHRFn = reinterpret_cast<PFN_vkQueuePresentKHR>(
- vkGetDeviceProcAddrFn(vk_device, "vkQueuePresentKHR"));
+ vkGetDeviceProcAddr(vk_device, "vkQueuePresentKHR"));
if (!vkQueuePresentKHRFn) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkQueuePresentKHR";
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.h b/chromium/gpu/vulkan/vulkan_function_pointers.h
index 78295b6439c..290a7ea2c88 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.h
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.h
@@ -13,6 +13,7 @@
#include <vulkan/vulkan.h>
+#include "base/compiler_specific.h"
#include "base/native_library.h"
#include "build/build_config.h"
#include "gpu/vulkan/vulkan_export.h"
@@ -35,6 +36,10 @@
#include <vulkan/vulkan_xlib.h>
#endif
+#if defined(OS_WIN)
+#include <vulkan/vulkan_win32.h>
+#endif
+
namespace gpu {
struct VulkanFunctionPointers;
@@ -59,158 +64,207 @@ struct VulkanFunctionPointers {
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions);
- base::NativeLibrary vulkan_loader_library_ = nullptr;
+ base::NativeLibrary vulkan_loader_library = nullptr;
+
+ template <typename T>
+ class VulkanFunction;
+ template <typename R, typename... Args>
+ class VulkanFunction<R(VKAPI_PTR*)(Args...)> {
+ public:
+ explicit operator bool() { return !!fn_; }
+
+ NO_SANITIZE("cfi-icall")
+ R operator()(Args... args) { return fn_(args...); }
+
+ private:
+ friend VulkanFunctionPointers;
+ using Fn = R(VKAPI_PTR*)(Args...);
+
+ Fn operator=(Fn fn) {
+ fn_ = fn;
+ return fn_;
+ }
+
+ Fn fn_ = nullptr;
+ };
// Unassociated functions
- PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersionFn = nullptr;
- PFN_vkGetInstanceProcAddr vkGetInstanceProcAddrFn = nullptr;
- PFN_vkCreateInstance vkCreateInstanceFn = nullptr;
- PFN_vkEnumerateInstanceExtensionProperties
- vkEnumerateInstanceExtensionPropertiesFn = nullptr;
- PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerPropertiesFn =
- nullptr;
+ VulkanFunction<PFN_vkEnumerateInstanceVersion> vkEnumerateInstanceVersionFn;
+ VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddrFn;
+
+ VulkanFunction<PFN_vkCreateInstance> vkCreateInstanceFn;
+ VulkanFunction<PFN_vkEnumerateInstanceExtensionProperties>
+ vkEnumerateInstanceExtensionPropertiesFn;
+ VulkanFunction<PFN_vkEnumerateInstanceLayerProperties>
+ vkEnumerateInstanceLayerPropertiesFn;
// Instance functions
- PFN_vkCreateDevice vkCreateDeviceFn = nullptr;
- PFN_vkDestroyInstance vkDestroyInstanceFn = nullptr;
- PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerPropertiesFn =
- nullptr;
- PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevicesFn = nullptr;
- PFN_vkGetDeviceProcAddr vkGetDeviceProcAddrFn = nullptr;
- PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeaturesFn = nullptr;
- PFN_vkGetPhysicalDeviceFormatProperties
- vkGetPhysicalDeviceFormatPropertiesFn = nullptr;
- PFN_vkGetPhysicalDeviceMemoryProperties
- vkGetPhysicalDeviceMemoryPropertiesFn = nullptr;
- PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDevicePropertiesFn = nullptr;
- PFN_vkGetPhysicalDeviceQueueFamilyProperties
- vkGetPhysicalDeviceQueueFamilyPropertiesFn = nullptr;
-
- PFN_vkDestroySurfaceKHR vkDestroySurfaceKHRFn = nullptr;
- PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR
- vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn = nullptr;
- PFN_vkGetPhysicalDeviceSurfaceFormatsKHR
- vkGetPhysicalDeviceSurfaceFormatsKHRFn = nullptr;
- PFN_vkGetPhysicalDeviceSurfaceSupportKHR
- vkGetPhysicalDeviceSurfaceSupportKHRFn = nullptr;
+ VulkanFunction<PFN_vkCreateDevice> vkCreateDeviceFn;
+ VulkanFunction<PFN_vkDestroyInstance> vkDestroyInstanceFn;
+ VulkanFunction<PFN_vkEnumerateDeviceExtensionProperties>
+ vkEnumerateDeviceExtensionPropertiesFn;
+ VulkanFunction<PFN_vkEnumerateDeviceLayerProperties>
+ vkEnumerateDeviceLayerPropertiesFn;
+ VulkanFunction<PFN_vkEnumeratePhysicalDevices> vkEnumeratePhysicalDevicesFn;
+ VulkanFunction<PFN_vkGetDeviceProcAddr> vkGetDeviceProcAddrFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceFeatures> vkGetPhysicalDeviceFeaturesFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceFormatProperties>
+ vkGetPhysicalDeviceFormatPropertiesFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceMemoryProperties>
+ vkGetPhysicalDeviceMemoryPropertiesFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceProperties>
+ vkGetPhysicalDevicePropertiesFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceQueueFamilyProperties>
+ vkGetPhysicalDeviceQueueFamilyPropertiesFn;
+
+#if DCHECK_IS_ON()
+ VulkanFunction<PFN_vkCreateDebugReportCallbackEXT>
+ vkCreateDebugReportCallbackEXTFn;
+ VulkanFunction<PFN_vkDestroyDebugReportCallbackEXT>
+ vkDestroyDebugReportCallbackEXTFn;
+#endif // DCHECK_IS_ON()
+
+ VulkanFunction<PFN_vkDestroySurfaceKHR> vkDestroySurfaceKHRFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR>
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceFormatsKHR>
+ vkGetPhysicalDeviceSurfaceFormatsKHRFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceSupportKHR>
+ vkGetPhysicalDeviceSurfaceSupportKHRFn;
#if defined(USE_VULKAN_XLIB)
- PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHRFn = nullptr;
- PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR
- vkGetPhysicalDeviceXlibPresentationSupportKHRFn = nullptr;
+ VulkanFunction<PFN_vkCreateXlibSurfaceKHR> vkCreateXlibSurfaceKHRFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>
+ vkGetPhysicalDeviceXlibPresentationSupportKHRFn;
#endif // defined(USE_VULKAN_XLIB)
+#if defined(OS_WIN)
+ VulkanFunction<PFN_vkCreateWin32SurfaceKHR> vkCreateWin32SurfaceKHRFn;
+ VulkanFunction<PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR>
+ vkGetPhysicalDeviceWin32PresentationSupportKHRFn;
+#endif // defined(OS_WIN)
+
#if defined(OS_ANDROID)
- PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHRFn = nullptr;
+ VulkanFunction<PFN_vkCreateAndroidSurfaceKHR> vkCreateAndroidSurfaceKHRFn;
#endif // defined(OS_ANDROID)
#if defined(OS_FUCHSIA)
- PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIAFn =
- nullptr;
+ VulkanFunction<PFN_vkCreateImagePipeSurfaceFUCHSIA>
+ vkCreateImagePipeSurfaceFUCHSIAFn;
#endif // defined(OS_FUCHSIA)
- PFN_vkGetPhysicalDeviceImageFormatProperties2
- vkGetPhysicalDeviceImageFormatProperties2Fn = nullptr;
+ VulkanFunction<PFN_vkGetPhysicalDeviceImageFormatProperties2>
+ vkGetPhysicalDeviceImageFormatProperties2Fn;
- PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2Fn = nullptr;
+ VulkanFunction<PFN_vkGetPhysicalDeviceFeatures2>
+ vkGetPhysicalDeviceFeatures2Fn;
// Device functions
- PFN_vkAllocateCommandBuffers vkAllocateCommandBuffersFn = nullptr;
- PFN_vkAllocateDescriptorSets vkAllocateDescriptorSetsFn = nullptr;
- PFN_vkAllocateMemory vkAllocateMemoryFn = nullptr;
- PFN_vkBeginCommandBuffer vkBeginCommandBufferFn = nullptr;
- PFN_vkBindBufferMemory vkBindBufferMemoryFn = nullptr;
- PFN_vkBindImageMemory vkBindImageMemoryFn = nullptr;
- PFN_vkCmdBeginRenderPass vkCmdBeginRenderPassFn = nullptr;
- PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImageFn = nullptr;
- PFN_vkCmdEndRenderPass vkCmdEndRenderPassFn = nullptr;
- PFN_vkCmdExecuteCommands vkCmdExecuteCommandsFn = nullptr;
- PFN_vkCmdNextSubpass vkCmdNextSubpassFn = nullptr;
- PFN_vkCmdPipelineBarrier vkCmdPipelineBarrierFn = nullptr;
- PFN_vkCreateBuffer vkCreateBufferFn = nullptr;
- PFN_vkCreateCommandPool vkCreateCommandPoolFn = nullptr;
- PFN_vkCreateDescriptorPool vkCreateDescriptorPoolFn = nullptr;
- PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayoutFn = nullptr;
- PFN_vkCreateFence vkCreateFenceFn = nullptr;
- PFN_vkCreateFramebuffer vkCreateFramebufferFn = nullptr;
- PFN_vkCreateImage vkCreateImageFn = nullptr;
- PFN_vkCreateImageView vkCreateImageViewFn = nullptr;
- PFN_vkCreateRenderPass vkCreateRenderPassFn = nullptr;
- PFN_vkCreateSampler vkCreateSamplerFn = nullptr;
- PFN_vkCreateSemaphore vkCreateSemaphoreFn = nullptr;
- PFN_vkCreateShaderModule vkCreateShaderModuleFn = nullptr;
- PFN_vkDestroyBuffer vkDestroyBufferFn = nullptr;
- PFN_vkDestroyCommandPool vkDestroyCommandPoolFn = nullptr;
- PFN_vkDestroyDescriptorPool vkDestroyDescriptorPoolFn = nullptr;
- PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayoutFn = nullptr;
- PFN_vkDestroyDevice vkDestroyDeviceFn = nullptr;
- PFN_vkDestroyFence vkDestroyFenceFn = nullptr;
- PFN_vkDestroyFramebuffer vkDestroyFramebufferFn = nullptr;
- PFN_vkDestroyImage vkDestroyImageFn = nullptr;
- PFN_vkDestroyImageView vkDestroyImageViewFn = nullptr;
- PFN_vkDestroyRenderPass vkDestroyRenderPassFn = nullptr;
- PFN_vkDestroySampler vkDestroySamplerFn = nullptr;
- PFN_vkDestroySemaphore vkDestroySemaphoreFn = nullptr;
- PFN_vkDestroyShaderModule vkDestroyShaderModuleFn = nullptr;
- PFN_vkDeviceWaitIdle vkDeviceWaitIdleFn = nullptr;
- PFN_vkEndCommandBuffer vkEndCommandBufferFn = nullptr;
- PFN_vkFreeCommandBuffers vkFreeCommandBuffersFn = nullptr;
- PFN_vkFreeDescriptorSets vkFreeDescriptorSetsFn = nullptr;
- PFN_vkFreeMemory vkFreeMemoryFn = nullptr;
- PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirementsFn = nullptr;
- PFN_vkGetDeviceQueue vkGetDeviceQueueFn = nullptr;
- PFN_vkGetFenceStatus vkGetFenceStatusFn = nullptr;
- PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirementsFn = nullptr;
- PFN_vkMapMemory vkMapMemoryFn = nullptr;
- PFN_vkQueueSubmit vkQueueSubmitFn = nullptr;
- PFN_vkQueueWaitIdle vkQueueWaitIdleFn = nullptr;
- PFN_vkResetCommandBuffer vkResetCommandBufferFn = nullptr;
- PFN_vkResetFences vkResetFencesFn = nullptr;
- PFN_vkUnmapMemory vkUnmapMemoryFn = nullptr;
- PFN_vkUpdateDescriptorSets vkUpdateDescriptorSetsFn = nullptr;
- PFN_vkWaitForFences vkWaitForFencesFn = nullptr;
-
- PFN_vkGetDeviceQueue2 vkGetDeviceQueue2Fn = nullptr;
- PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2Fn = nullptr;
+ VulkanFunction<PFN_vkAllocateCommandBuffers> vkAllocateCommandBuffersFn;
+ VulkanFunction<PFN_vkAllocateDescriptorSets> vkAllocateDescriptorSetsFn;
+ VulkanFunction<PFN_vkAllocateMemory> vkAllocateMemoryFn;
+ VulkanFunction<PFN_vkBeginCommandBuffer> vkBeginCommandBufferFn;
+ VulkanFunction<PFN_vkBindBufferMemory> vkBindBufferMemoryFn;
+ VulkanFunction<PFN_vkBindImageMemory> vkBindImageMemoryFn;
+ VulkanFunction<PFN_vkCmdBeginRenderPass> vkCmdBeginRenderPassFn;
+ VulkanFunction<PFN_vkCmdCopyBufferToImage> vkCmdCopyBufferToImageFn;
+ VulkanFunction<PFN_vkCmdEndRenderPass> vkCmdEndRenderPassFn;
+ VulkanFunction<PFN_vkCmdExecuteCommands> vkCmdExecuteCommandsFn;
+ VulkanFunction<PFN_vkCmdNextSubpass> vkCmdNextSubpassFn;
+ VulkanFunction<PFN_vkCmdPipelineBarrier> vkCmdPipelineBarrierFn;
+ VulkanFunction<PFN_vkCreateBuffer> vkCreateBufferFn;
+ VulkanFunction<PFN_vkCreateCommandPool> vkCreateCommandPoolFn;
+ VulkanFunction<PFN_vkCreateDescriptorPool> vkCreateDescriptorPoolFn;
+ VulkanFunction<PFN_vkCreateDescriptorSetLayout> vkCreateDescriptorSetLayoutFn;
+ VulkanFunction<PFN_vkCreateFence> vkCreateFenceFn;
+ VulkanFunction<PFN_vkCreateFramebuffer> vkCreateFramebufferFn;
+ VulkanFunction<PFN_vkCreateImage> vkCreateImageFn;
+ VulkanFunction<PFN_vkCreateImageView> vkCreateImageViewFn;
+ VulkanFunction<PFN_vkCreateRenderPass> vkCreateRenderPassFn;
+ VulkanFunction<PFN_vkCreateSampler> vkCreateSamplerFn;
+ VulkanFunction<PFN_vkCreateSemaphore> vkCreateSemaphoreFn;
+ VulkanFunction<PFN_vkCreateShaderModule> vkCreateShaderModuleFn;
+ VulkanFunction<PFN_vkDestroyBuffer> vkDestroyBufferFn;
+ VulkanFunction<PFN_vkDestroyCommandPool> vkDestroyCommandPoolFn;
+ VulkanFunction<PFN_vkDestroyDescriptorPool> vkDestroyDescriptorPoolFn;
+ VulkanFunction<PFN_vkDestroyDescriptorSetLayout>
+ vkDestroyDescriptorSetLayoutFn;
+ VulkanFunction<PFN_vkDestroyDevice> vkDestroyDeviceFn;
+ VulkanFunction<PFN_vkDestroyFence> vkDestroyFenceFn;
+ VulkanFunction<PFN_vkDestroyFramebuffer> vkDestroyFramebufferFn;
+ VulkanFunction<PFN_vkDestroyImage> vkDestroyImageFn;
+ VulkanFunction<PFN_vkDestroyImageView> vkDestroyImageViewFn;
+ VulkanFunction<PFN_vkDestroyRenderPass> vkDestroyRenderPassFn;
+ VulkanFunction<PFN_vkDestroySampler> vkDestroySamplerFn;
+ VulkanFunction<PFN_vkDestroySemaphore> vkDestroySemaphoreFn;
+ VulkanFunction<PFN_vkDestroyShaderModule> vkDestroyShaderModuleFn;
+ VulkanFunction<PFN_vkDeviceWaitIdle> vkDeviceWaitIdleFn;
+ VulkanFunction<PFN_vkEndCommandBuffer> vkEndCommandBufferFn;
+ VulkanFunction<PFN_vkFreeCommandBuffers> vkFreeCommandBuffersFn;
+ VulkanFunction<PFN_vkFreeDescriptorSets> vkFreeDescriptorSetsFn;
+ VulkanFunction<PFN_vkFreeMemory> vkFreeMemoryFn;
+ VulkanFunction<PFN_vkGetBufferMemoryRequirements>
+ vkGetBufferMemoryRequirementsFn;
+ VulkanFunction<PFN_vkGetDeviceQueue> vkGetDeviceQueueFn;
+ VulkanFunction<PFN_vkGetFenceStatus> vkGetFenceStatusFn;
+ VulkanFunction<PFN_vkGetImageMemoryRequirements>
+ vkGetImageMemoryRequirementsFn;
+ VulkanFunction<PFN_vkMapMemory> vkMapMemoryFn;
+ VulkanFunction<PFN_vkQueueSubmit> vkQueueSubmitFn;
+ VulkanFunction<PFN_vkQueueWaitIdle> vkQueueWaitIdleFn;
+ VulkanFunction<PFN_vkResetCommandBuffer> vkResetCommandBufferFn;
+ VulkanFunction<PFN_vkResetFences> vkResetFencesFn;
+ VulkanFunction<PFN_vkUnmapMemory> vkUnmapMemoryFn;
+ VulkanFunction<PFN_vkUpdateDescriptorSets> vkUpdateDescriptorSetsFn;
+ VulkanFunction<PFN_vkWaitForFences> vkWaitForFencesFn;
+
+ VulkanFunction<PFN_vkGetDeviceQueue2> vkGetDeviceQueue2Fn;
+ VulkanFunction<PFN_vkGetImageMemoryRequirements2>
+ vkGetImageMemoryRequirements2Fn;
#if defined(OS_ANDROID)
- PFN_vkGetAndroidHardwareBufferPropertiesANDROID
- vkGetAndroidHardwareBufferPropertiesANDROIDFn = nullptr;
+ VulkanFunction<PFN_vkGetAndroidHardwareBufferPropertiesANDROID>
+ vkGetAndroidHardwareBufferPropertiesANDROIDFn;
#endif // defined(OS_ANDROID)
#if defined(OS_LINUX) || defined(OS_ANDROID)
- PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHRFn = nullptr;
- PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHRFn = nullptr;
+ VulkanFunction<PFN_vkGetSemaphoreFdKHR> vkGetSemaphoreFdKHRFn;
+ VulkanFunction<PFN_vkImportSemaphoreFdKHR> vkImportSemaphoreFdKHRFn;
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
-#if defined(OS_LINUX)
- PFN_vkGetMemoryFdKHR vkGetMemoryFdKHRFn = nullptr;
- PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHRFn = nullptr;
-#endif // defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ VulkanFunction<PFN_vkGetMemoryFdKHR> vkGetMemoryFdKHRFn;
+ VulkanFunction<PFN_vkGetMemoryFdPropertiesKHR> vkGetMemoryFdPropertiesKHRFn;
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_FUCHSIA)
- PFN_vkImportSemaphoreZirconHandleFUCHSIA
- vkImportSemaphoreZirconHandleFUCHSIAFn = nullptr;
- PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIAFn =
- nullptr;
+ VulkanFunction<PFN_vkImportSemaphoreZirconHandleFUCHSIA>
+ vkImportSemaphoreZirconHandleFUCHSIAFn;
+ VulkanFunction<PFN_vkGetSemaphoreZirconHandleFUCHSIA>
+ vkGetSemaphoreZirconHandleFUCHSIAFn;
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
- PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIAFn =
- nullptr;
- PFN_vkSetBufferCollectionConstraintsFUCHSIA
- vkSetBufferCollectionConstraintsFUCHSIAFn = nullptr;
- PFN_vkGetBufferCollectionPropertiesFUCHSIA
- vkGetBufferCollectionPropertiesFUCHSIAFn = nullptr;
- PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIAFn =
- nullptr;
+ VulkanFunction<PFN_vkGetMemoryZirconHandleFUCHSIA>
+ vkGetMemoryZirconHandleFUCHSIAFn;
#endif // defined(OS_FUCHSIA)
- PFN_vkAcquireNextImageKHR vkAcquireNextImageKHRFn = nullptr;
- PFN_vkCreateSwapchainKHR vkCreateSwapchainKHRFn = nullptr;
- PFN_vkDestroySwapchainKHR vkDestroySwapchainKHRFn = nullptr;
- PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHRFn = nullptr;
- PFN_vkQueuePresentKHR vkQueuePresentKHRFn = nullptr;
+#if defined(OS_FUCHSIA)
+ VulkanFunction<PFN_vkCreateBufferCollectionFUCHSIA>
+ vkCreateBufferCollectionFUCHSIAFn;
+ VulkanFunction<PFN_vkSetBufferCollectionConstraintsFUCHSIA>
+ vkSetBufferCollectionConstraintsFUCHSIAFn;
+ VulkanFunction<PFN_vkGetBufferCollectionPropertiesFUCHSIA>
+ vkGetBufferCollectionPropertiesFUCHSIAFn;
+ VulkanFunction<PFN_vkDestroyBufferCollectionFUCHSIA>
+ vkDestroyBufferCollectionFUCHSIAFn;
+#endif // defined(OS_FUCHSIA)
+
+ VulkanFunction<PFN_vkAcquireNextImageKHR> vkAcquireNextImageKHRFn;
+ VulkanFunction<PFN_vkCreateSwapchainKHR> vkCreateSwapchainKHRFn;
+ VulkanFunction<PFN_vkDestroySwapchainKHR> vkDestroySwapchainKHRFn;
+ VulkanFunction<PFN_vkGetSwapchainImagesKHR> vkGetSwapchainImagesKHRFn;
+ VulkanFunction<PFN_vkQueuePresentKHR> vkQueuePresentKHRFn;
};
} // namespace gpu
@@ -218,6 +272,8 @@ struct VulkanFunctionPointers {
// Unassociated functions
#define vkGetInstanceProcAddr \
gpu::GetVulkanFunctionPointers()->vkGetInstanceProcAddrFn
+#define vkEnumerateInstanceVersion \
+ gpu::GetVulkanFunctionPointers()->vkEnumerateInstanceVersionFn
#define vkCreateInstance gpu::GetVulkanFunctionPointers()->vkCreateInstanceFn
#define vkEnumerateInstanceExtensionProperties \
@@ -228,6 +284,8 @@ struct VulkanFunctionPointers {
// Instance functions
#define vkCreateDevice gpu::GetVulkanFunctionPointers()->vkCreateDeviceFn
#define vkDestroyInstance gpu::GetVulkanFunctionPointers()->vkDestroyInstanceFn
+#define vkEnumerateDeviceExtensionProperties \
+ gpu::GetVulkanFunctionPointers()->vkEnumerateDeviceExtensionPropertiesFn
#define vkEnumerateDeviceLayerProperties \
gpu::GetVulkanFunctionPointers()->vkEnumerateDeviceLayerPropertiesFn
#define vkEnumeratePhysicalDevices \
@@ -245,6 +303,13 @@ struct VulkanFunctionPointers {
#define vkGetPhysicalDeviceQueueFamilyProperties \
gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceQueueFamilyPropertiesFn
+#if DCHECK_IS_ON()
+#define vkCreateDebugReportCallbackEXT \
+ gpu::GetVulkanFunctionPointers()->vkCreateDebugReportCallbackEXTFn
+#define vkDestroyDebugReportCallbackEXT \
+ gpu::GetVulkanFunctionPointers()->vkDestroyDebugReportCallbackEXTFn
+#endif // DCHECK_IS_ON()
+
#define vkDestroySurfaceKHR \
gpu::GetVulkanFunctionPointers()->vkDestroySurfaceKHRFn
#define vkGetPhysicalDeviceSurfaceCapabilitiesKHR \
@@ -262,6 +327,14 @@ struct VulkanFunctionPointers {
->vkGetPhysicalDeviceXlibPresentationSupportKHRFn
#endif // defined(USE_VULKAN_XLIB)
+#if defined(OS_WIN)
+#define vkCreateWin32SurfaceKHR \
+ gpu::GetVulkanFunctionPointers()->vkCreateWin32SurfaceKHRFn
+#define vkGetPhysicalDeviceWin32PresentationSupportKHR \
+ gpu::GetVulkanFunctionPointers() \
+ ->vkGetPhysicalDeviceWin32PresentationSupportKHRFn
+#endif // defined(OS_WIN)
+
#if defined(OS_ANDROID)
#define vkCreateAndroidSurfaceKHR \
gpu::GetVulkanFunctionPointers()->vkCreateAndroidSurfaceKHRFn
@@ -381,11 +454,11 @@ struct VulkanFunctionPointers {
gpu::GetVulkanFunctionPointers()->vkImportSemaphoreFdKHRFn
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
-#if defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID)
#define vkGetMemoryFdKHR gpu::GetVulkanFunctionPointers()->vkGetMemoryFdKHRFn
#define vkGetMemoryFdPropertiesKHR \
gpu::GetVulkanFunctionPointers()->vkGetMemoryFdPropertiesKHRFn
-#endif // defined(OS_LINUX)
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_FUCHSIA)
#define vkImportSemaphoreZirconHandleFUCHSIA \
@@ -395,6 +468,11 @@ struct VulkanFunctionPointers {
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
+#define vkGetMemoryZirconHandleFUCHSIA \
+ gpu::GetVulkanFunctionPointers()->vkGetMemoryZirconHandleFUCHSIAFn
+#endif // defined(OS_FUCHSIA)
+
+#if defined(OS_FUCHSIA)
#define vkCreateBufferCollectionFUCHSIA \
gpu::GetVulkanFunctionPointers()->vkCreateBufferCollectionFUCHSIAFn
#define vkSetBufferCollectionConstraintsFUCHSIA \
@@ -415,4 +493,4 @@ struct VulkanFunctionPointers {
gpu::GetVulkanFunctionPointers()->vkGetSwapchainImagesKHRFn
#define vkQueuePresentKHR gpu::GetVulkanFunctionPointers()->vkQueuePresentKHRFn
-#endif // GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_
+#endif // GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_ \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_image.cc b/chromium/gpu/vulkan/vulkan_image.cc
new file mode 100644
index 00000000000..6541e2b9832
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_image.cc
@@ -0,0 +1,367 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/vulkan/vulkan_image.h"
+
+#include <vulkan/vulkan.h>
+
+#include <algorithm>
+
+#include "base/macros.h"
+#include "base/optional.h"
+#include "base/stl_util.h"
+#include "build/build_config.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+
+#if defined(OS_FUCHSIA)
+#include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
+#endif
+
+namespace gpu {
+
+namespace {
+
+base::Optional<uint32_t> FindMemoryTypeIndex(
+ VkPhysicalDevice physical_device,
+ const VkMemoryRequirements* requirements,
+ VkMemoryPropertyFlags flags) {
+ VkPhysicalDeviceMemoryProperties properties;
+ vkGetPhysicalDeviceMemoryProperties(physical_device, &properties);
+ constexpr uint32_t kMaxIndex = 31;
+ for (uint32_t i = 0; i <= kMaxIndex; i++) {
+ if (((1u << i) & requirements->memoryTypeBits) == 0)
+ continue;
+ if ((properties.memoryTypes[i].propertyFlags & flags) != flags)
+ continue;
+ return i;
+ }
+ NOTREACHED();
+ return base::nullopt;
+}
+
+} // namespace
+
+// static
+std::unique_ptr<VulkanImage> VulkanImage::Create(
+ VulkanDeviceQueue* device_queue,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling,
+ void* vk_image_create_info_next,
+ void* vk_memory_allocation_info_next) {
+ auto image = std::make_unique<VulkanImage>(util::PassKey<VulkanImage>());
+ if (!image->Initialize(device_queue, size, format, usage, flags, image_tiling,
+ vk_image_create_info_next,
+ vk_memory_allocation_info_next,
+ nullptr /* requirements */)) {
+ return nullptr;
+ }
+ return image;
+}
+
+// static
+std::unique_ptr<VulkanImage> VulkanImage::CreateWithExternalMemory(
+ VulkanDeviceQueue* device_queue,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling) {
+ auto image = std::make_unique<VulkanImage>(util::PassKey<VulkanImage>());
+ if (!image->InitializeWithExternalMemory(device_queue, size, format, usage,
+ flags, image_tiling)) {
+ return nullptr;
+ }
+ return image;
+}
+
+// static
+std::unique_ptr<VulkanImage> VulkanImage::CreateFromGpuMemoryBufferHandle(
+ VulkanDeviceQueue* device_queue,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling) {
+ auto image = std::make_unique<VulkanImage>(util::PassKey<VulkanImage>());
+ if (!image->InitializeFromGpuMemoryBufferHandle(
+ device_queue, std::move(gmb_handle), size, format, usage, flags,
+ image_tiling)) {
+ return nullptr;
+ }
+ return image;
+}
+
+// static
+std::unique_ptr<VulkanImage> VulkanImage::Create(
+ VulkanDeviceQueue* device_queue,
+ VkImage vk_image,
+ VkDeviceMemory vk_device_memory,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageTiling image_tiling,
+ VkDeviceSize device_size,
+ uint32_t memory_type_index,
+ base::Optional<VulkanYCbCrInfo>& ycbcr_info) {
+ auto image = std::make_unique<VulkanImage>(util::PassKey<VulkanImage>());
+ image->device_queue_ = device_queue;
+ image->image_ = vk_image;
+ image->device_memory_ = vk_device_memory;
+ image->size_ = size;
+ image->format_ = format;
+ image->image_tiling_ = image_tiling;
+ image->device_size_ = device_size;
+ image->memory_type_index_ = memory_type_index;
+ image->ycbcr_info_ = ycbcr_info;
+ return image;
+}
+
+VulkanImage::VulkanImage(util::PassKey<VulkanImage> pass_key) {}
+
+VulkanImage::~VulkanImage() {
+ DCHECK(!device_queue_);
+ DCHECK(image_ == VK_NULL_HANDLE);
+ DCHECK(device_memory_ == VK_NULL_HANDLE);
+}
+
+void VulkanImage::Destroy() {
+ if (!device_queue_)
+ return;
+ VkDevice vk_device = device_queue_->GetVulkanDevice();
+ if (image_ != VK_NULL_HANDLE) {
+ vkDestroyImage(vk_device, image_, nullptr /* pAllocator */);
+ image_ = VK_NULL_HANDLE;
+ }
+ if (device_memory_ != VK_NULL_HANDLE) {
+ vkFreeMemory(vk_device, device_memory_, nullptr /* pAllocator */);
+ device_memory_ = VK_NULL_HANDLE;
+ }
+ device_queue_ = nullptr;
+}
+
+#if defined(OS_POSIX)
+base::ScopedFD VulkanImage::GetMemoryFd(
+ VkExternalMemoryHandleTypeFlagBits handle_type) {
+ VkMemoryGetFdInfoKHR get_fd_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
+ .memory = device_memory_,
+ .handleType = handle_type,
+
+ };
+
+ VkDevice device = device_queue_->GetVulkanDevice();
+ int memory_fd = -1;
+ vkGetMemoryFdKHR(device, &get_fd_info, &memory_fd);
+ if (memory_fd < 0) {
+ DLOG(ERROR) << "Unable to extract file descriptor out of external VkImage";
+ return base::ScopedFD();
+ }
+
+ return base::ScopedFD(memory_fd);
+}
+#endif
+
+#if defined(OS_FUCHSIA)
+zx::vmo VulkanImage::GetMemoryZirconHandle() {
+ DCHECK(handle_types_ &
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA);
+ VkMemoryGetZirconHandleInfoFUCHSIA get_handle_info = {
+ .sType = VK_STRUCTURE_TYPE_TEMP_MEMORY_GET_ZIRCON_HANDLE_INFO_FUCHSIA,
+ .memory = device_memory_,
+ .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA,
+ };
+
+ VkDevice device = device_queue_->GetVulkanDevice();
+ zx::vmo vmo;
+ VkResult result = vkGetMemoryZirconHandleFUCHSIA(device, &get_handle_info,
+ vmo.reset_and_get_address());
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkGetMemoryFuchsiaHandleKHR failed: " << result;
+ vmo.reset();
+ }
+
+ return vmo;
+}
+#endif
+
+bool VulkanImage::Initialize(VulkanDeviceQueue* device_queue,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling,
+ void* vk_image_create_info_next,
+ void* vk_memory_allocation_info_next,
+ const VkMemoryRequirements* requirements) {
+ DCHECK(!device_queue_);
+ DCHECK(image_ == VK_NULL_HANDLE);
+ DCHECK(device_memory_ == VK_NULL_HANDLE);
+
+ device_queue_ = device_queue;
+ size_ = size;
+ format_ = format;
+ flags_ = flags;
+ image_tiling_ = image_tiling;
+
+ VkImageCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ .pNext = vk_image_create_info_next,
+ .flags = flags_,
+ .imageType = VK_IMAGE_TYPE_2D,
+ .format = format_,
+ .extent = {size.width(), size.height(), 1},
+ .mipLevels = 1,
+ .arrayLayers = 1,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .tiling = image_tiling_,
+ .usage = usage,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ .queueFamilyIndexCount = 0,
+ .pQueueFamilyIndices = nullptr,
+ .initialLayout = image_layout_,
+ };
+ VkDevice vk_device = device_queue->GetVulkanDevice();
+ VkResult result =
+ vkCreateImage(vk_device, &create_info, nullptr /* pAllocator */, &image_);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkCreateImage failed result:" << result;
+ device_queue_ = VK_NULL_HANDLE;
+ return false;
+ }
+
+ VkMemoryRequirements tmp_requirements;
+ if (!requirements) {
+ vkGetImageMemoryRequirements(vk_device, image_, &tmp_requirements);
+ if (!tmp_requirements.memoryTypeBits) {
+ DLOG(ERROR) << "vkGetImageMemoryRequirements failed";
+ Destroy();
+ return false;
+ }
+ requirements = &tmp_requirements;
+ }
+
+ device_size_ = requirements->size;
+
+ // Some vulkan implementations require dedicated memory for sharing memory
+ // object between vulkan instances.
+ VkMemoryDedicatedAllocateInfoKHR dedicated_memory_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
+ .pNext = vk_memory_allocation_info_next,
+ .image = image_,
+ };
+
+ auto index =
+ FindMemoryTypeIndex(device_queue->GetVulkanPhysicalDevice(), requirements,
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
+ if (!index) {
+ DLOG(ERROR) << "Cannot find validate memory type index.";
+ Destroy();
+ return false;
+ }
+
+ memory_type_index_ = index.value();
+ VkMemoryAllocateInfo memory_allocate_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext = &dedicated_memory_info,
+ .allocationSize = device_size_,
+ .memoryTypeIndex = memory_type_index_,
+ };
+
+ result = vkAllocateMemory(vk_device, &memory_allocate_info,
+ nullptr /* pAllocator */, &device_memory_);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkAllocateMemory failed result:" << result;
+ Destroy();
+ return false;
+ }
+
+ result = vkBindImageMemory(vk_device, image_, device_memory_,
+ 0 /* memoryOffset */);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "Failed to bind memory to external VkImage: " << result;
+ Destroy();
+ return false;
+ }
+
+ return true;
+}
+
+bool VulkanImage::InitializeWithExternalMemory(VulkanDeviceQueue* device_queue,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling) {
+#if defined(OS_FUCHSIA)
+ constexpr auto kHandleType =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA;
+#else
+ constexpr auto kHandleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+#endif
+
+ VkPhysicalDeviceImageFormatInfo2 format_info_2 = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
+ .format = format,
+ .type = VK_IMAGE_TYPE_2D,
+ .tiling = image_tiling,
+ .usage = usage,
+ .flags = flags,
+ };
+ VkPhysicalDeviceExternalImageFormatInfo external_info = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO,
+ .handleType = kHandleType,
+ };
+ format_info_2.pNext = &external_info;
+
+ VkImageFormatProperties2 image_format_properties_2 = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,
+ };
+ VkExternalImageFormatProperties external_image_format_properties = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES,
+ };
+ image_format_properties_2.pNext = &external_image_format_properties;
+
+ auto result = vkGetPhysicalDeviceImageFormatProperties2(
+ device_queue->GetVulkanPhysicalDevice(), &format_info_2,
+ &image_format_properties_2);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "External memory is not supported."
+ << " format:" << format << " image_tiling:" << image_tiling
+ << " usage:" << usage << " flags:" << flags;
+ return false;
+ }
+
+ const auto& external_format_properties =
+ external_image_format_properties.externalMemoryProperties;
+ if (!(external_format_properties.externalMemoryFeatures &
+ VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT)) {
+ DLOG(ERROR) << "External memroy cannot be exported."
+ << " format:" << format << " image_tiling:" << image_tiling
+ << " usage:" << usage << " flags:" << flags;
+ return false;
+ }
+
+ handle_types_ = external_format_properties.compatibleHandleTypes;
+ DCHECK(handle_types_ & kHandleType);
+
+ VkExternalMemoryImageCreateInfoKHR external_image_create_info = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR,
+ .handleTypes = handle_types_,
+ };
+
+ VkExportMemoryAllocateInfoKHR external_memory_allocate_info = {
+ .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR,
+ .handleTypes = handle_types_,
+ };
+
+ return Initialize(device_queue, size, format, usage, flags, image_tiling,
+ &external_image_create_info, &external_memory_allocate_info,
+ nullptr /* requirements */);
+}
+
+} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_image.h b/chromium/gpu/vulkan/vulkan_image.h
new file mode 100644
index 00000000000..2c24fb58bcd
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_image.h
@@ -0,0 +1,146 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_VULKAN_VULKAN_IMAGE_H_
+#define GPU_VULKAN_VULKAN_IMAGE_H_
+
+#include <vulkan/vulkan.h>
+
+#include "base/files/scoped_file.h"
+#include "base/optional.h"
+#include "base/util/type_safety/pass_key.h"
+#include "build/build_config.h"
+#include "gpu/ipc/common/vulkan_ycbcr_info.h"
+#include "gpu/vulkan/vulkan_export.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+
+#if defined(OS_FUCHSIA)
+#include <lib/zx/vmo.h>
+#endif
+
+namespace gpu {
+
+class VulkanDeviceQueue;
+
+class VULKAN_EXPORT VulkanImage {
+ public:
+ explicit VulkanImage(util::PassKey<VulkanImage> pass_key);
+ ~VulkanImage();
+
+ VulkanImage(VulkanImage&) = delete;
+ VulkanImage& operator=(VulkanImage&) = delete;
+
+ static std::unique_ptr<VulkanImage> Create(
+ VulkanDeviceQueue* device_queue,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags = 0,
+ VkImageTiling image_tiling = VK_IMAGE_TILING_OPTIMAL,
+ void* vk_image_create_info_next = nullptr,
+ void* vk_memory_allocation_info_next = nullptr);
+
+ // Create VulkanImage with external memory, it can be exported and used by
+ // foreign API
+ static std::unique_ptr<VulkanImage> CreateWithExternalMemory(
+ VulkanDeviceQueue* device_queue,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags = 0,
+ VkImageTiling image_tiling = VK_IMAGE_TILING_OPTIMAL);
+
+ static std::unique_ptr<VulkanImage> CreateFromGpuMemoryBufferHandle(
+ VulkanDeviceQueue* device_queue,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags = 0,
+ VkImageTiling image_tiling = VK_IMAGE_TILING_OPTIMAL);
+
+ static std::unique_ptr<VulkanImage> Create(
+ VulkanDeviceQueue* device_queue,
+ VkImage image,
+ VkDeviceMemory device_memory,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageTiling image_tiling,
+ VkDeviceSize device_size,
+ uint32_t memory_type_index,
+ base::Optional<VulkanYCbCrInfo>& ycbcr_info);
+
+ void Destroy();
+
+#if defined(OS_POSIX)
+ base::ScopedFD GetMemoryFd(VkExternalMemoryHandleTypeFlagBits handle_type =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT);
+#endif
+
+#if defined(OS_FUCHSIA)
+ zx::vmo GetMemoryZirconHandle();
+#endif
+
+ VulkanDeviceQueue* device_queue() const { return device_queue_; }
+ const gfx::Size& size() const { return size_; }
+ VkFormat format() const { return format_; }
+ VkImageCreateFlags flags() const { return flags_; }
+ VkDeviceSize device_size() const { return device_size_; }
+ uint32_t memory_type_index() const { return memory_type_index_; }
+ VkImageTiling image_tiling() const { return image_tiling_; }
+ VkImageLayout image_layout() const { return image_layout_; }
+ void set_image_layout(VkImageLayout layout) { image_layout_ = layout; }
+ uint32_t queue_family_index() const { return queue_family_index_; }
+ void set_queue_family_index(uint32_t index) { queue_family_index_ = index; }
+ const base::Optional<VulkanYCbCrInfo>& ycbcr_info() const {
+ return ycbcr_info_;
+ }
+ VkImage image() const { return image_; }
+ VkDeviceMemory device_memory() const { return device_memory_; }
+ VkExternalMemoryHandleTypeFlags handle_types() const { return handle_types_; }
+
+ private:
+ bool Initialize(VulkanDeviceQueue* device_queue,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling,
+ void* image_create_info_next,
+ void* memory_allocation_info_next,
+ const VkMemoryRequirements* requirements);
+ bool InitializeWithExternalMemory(VulkanDeviceQueue* device_queue,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling);
+ bool InitializeFromGpuMemoryBufferHandle(
+ VulkanDeviceQueue* device_queue,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling);
+
+ VulkanDeviceQueue* device_queue_ = nullptr;
+ gfx::Size size_;
+ VkFormat format_ = VK_FORMAT_UNDEFINED;
+ VkImageCreateFlags flags_ = 0;
+ VkDeviceSize device_size_ = 0;
+ uint32_t memory_type_index_ = 0;
+ VkImageTiling image_tiling_ = VK_IMAGE_TILING_OPTIMAL;
+ VkImageLayout image_layout_ = VK_IMAGE_LAYOUT_UNDEFINED;
+ uint32_t queue_family_index_ = VK_QUEUE_FAMILY_IGNORED;
+ base::Optional<VulkanYCbCrInfo> ycbcr_info_;
+ VkImage image_ = VK_NULL_HANDLE;
+ VkDeviceMemory device_memory_ = VK_NULL_HANDLE;
+ VkExternalMemoryHandleTypeFlags handle_types_ = 0;
+};
+
+} // namespace gpu
+
+#endif // GPU_VULKAN_VULKAN_IMAGE_H_
diff --git a/chromium/gpu/vulkan/vulkan_image_android.cc b/chromium/gpu/vulkan/vulkan_image_android.cc
new file mode 100644
index 00000000000..5da67ae4f68
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_image_android.cc
@@ -0,0 +1,141 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/vulkan/vulkan_image.h"
+
+#include "base/android/android_hardware_buffer_compat.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+
+namespace gpu {
+
+bool VulkanImage::InitializeFromGpuMemoryBufferHandle(
+ VulkanDeviceQueue* device_queue,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling) {
+ if (gmb_handle.type != gfx::GpuMemoryBufferType::ANDROID_HARDWARE_BUFFER) {
+ DLOG(ERROR) << "gmb_handle.type is not supported. type:" << gmb_handle.type;
+ return false;
+ }
+ DCHECK(gmb_handle.android_hardware_buffer.is_valid());
+ auto& ahb_handle = gmb_handle.android_hardware_buffer;
+
+ // To obtain format properties of an Android hardware buffer, include an
+ // instance of VkAndroidHardwareBufferFormatPropertiesANDROID in the pNext
+ // chain of the VkAndroidHardwareBufferPropertiesANDROID instance passed to
+ // vkGetAndroidHardwareBufferPropertiesANDROID.
+ VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID,
+ };
+ VkAndroidHardwareBufferPropertiesANDROID ahb_props = {
+ .sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID,
+ .pNext = &ahb_format_props,
+ };
+
+ VkDevice vk_device = device_queue->GetVulkanDevice();
+ VkResult result = vkGetAndroidHardwareBufferPropertiesANDROID(
+ vk_device, ahb_handle.get(), &ahb_props);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR)
+ << "GetAhbProps: vkGetAndroidHardwareBufferPropertiesANDROID failed : "
+ << result;
+ return false;
+ }
+
+ // To create an image with an external format, include an instance of
+ // VkExternalFormatANDROID in the pNext chain of VkImageCreateInfo.
+ VkExternalFormatANDROID external_format = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID,
+ // If externalFormat is zero, the effect is as if the
+ // VkExternalFormatANDROID structure was not present. Otherwise, the image
+ // will have the specified external format.
+ .externalFormat = 0,
+ };
+
+ // If image has an external format, format must be VK_FORMAT_UNDEFINED.
+ if (ahb_format_props.format == VK_FORMAT_UNDEFINED) {
+ // externalFormat must be 0 or a value returned in the externalFormat member
+ // of VkAndroidHardwareBufferFormatPropertiesANDROID by an earlier call to
+ // vkGetAndroidHardwareBufferPropertiesANDROID.
+ external_format.externalFormat = ahb_format_props.externalFormat;
+ }
+
+ // To define a set of external memory handle types that may be used as backing
+ // store for an image, add a VkExternalMemoryImageCreateInfo structure to the
+ // pNext chain of the VkImageCreateInfo structure.
+ VkExternalMemoryImageCreateInfo external_memory_image_info = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
+ .pNext = &external_format,
+ .handleTypes =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID,
+ };
+
+ // Get the AHB description.
+ AHardwareBuffer_Desc ahb_desc = {};
+ base::AndroidHardwareBufferCompat::GetInstance().Describe(ahb_handle.get(),
+ &ahb_desc);
+
+ // Intended usage of the image.
+ VkImageUsageFlags usage_flags = 0;
+ // Get Vulkan Image usage flag equivalence of AHB usage.
+ if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE) {
+ usage_flags = usage_flags | VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+ }
+ if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT) {
+ usage_flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ // TODO(vikassoni) : AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP is supported from API
+ // level 28 which is not part of current android_ndk version in chromium. Add
+ // equivalent VK usage later.
+ if (!usage_flags) {
+ LOG(ERROR) << "No valid usage flags found";
+ return false;
+ }
+
+ VkImageCreateFlags create_flags = 0;
+ if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT) {
+ create_flags = VK_IMAGE_CREATE_PROTECTED_BIT;
+ }
+
+ // To import memory created outside of the current Vulkan instance from an
+ // Android hardware buffer, add a VkImportAndroidHardwareBufferInfoANDROID
+ // structure to the pNext chain of the VkMemoryAllocateInfo structure.
+ VkImportAndroidHardwareBufferInfoANDROID ahb_import_info = {
+ .sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID,
+ .buffer = ahb_handle.get(),
+ };
+
+ VkMemoryRequirements requirements = {
+ .size = ahb_props.allocationSize,
+ .memoryTypeBits = ahb_props.memoryTypeBits,
+ };
+ if (!Initialize(device_queue, size, ahb_format_props.format, usage_flags,
+ create_flags, VK_IMAGE_TILING_OPTIMAL,
+ &external_memory_image_info, &ahb_import_info,
+ &requirements)) {
+ return false;
+ }
+
+ // VkImage is imported from external.
+ queue_family_index_ = VK_QUEUE_FAMILY_EXTERNAL;
+
+ if (ahb_format_props.format == VK_FORMAT_UNDEFINED) {
+ ycbcr_info_.emplace(VK_FORMAT_UNDEFINED, ahb_format_props.externalFormat,
+ ahb_format_props.suggestedYcbcrModel,
+ ahb_format_props.suggestedYcbcrRange,
+ ahb_format_props.suggestedXChromaOffset,
+ ahb_format_props.suggestedYChromaOffset,
+ ahb_format_props.formatFeatures);
+ }
+
+ return true;
+}
+
+} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_image_fuchsia.cc b/chromium/gpu/vulkan/vulkan_image_fuchsia.cc
new file mode 100644
index 00000000000..a66871836b9
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_image_fuchsia.cc
@@ -0,0 +1,21 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/vulkan/vulkan_image.h"
+
+namespace gpu {
+
+bool VulkanImage::InitializeFromGpuMemoryBufferHandle(
+ VulkanDeviceQueue* device_queue,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_image_linux.cc b/chromium/gpu/vulkan/vulkan_image_linux.cc
new file mode 100644
index 00000000000..789b8e3b0e4
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_image_linux.cc
@@ -0,0 +1,73 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/vulkan/vulkan_image.h"
+
+#include "gpu/vulkan/vulkan_device_queue.h"
+
+namespace gpu {
+
+bool VulkanImage::InitializeFromGpuMemoryBufferHandle(
+ VulkanDeviceQueue* device_queue,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling) {
+ if (gmb_handle.type != gfx::GpuMemoryBufferType::NATIVE_PIXMAP) {
+ DLOG(ERROR) << "GpuMemoryBuffer is not supported. type:" << gmb_handle.type;
+ return false;
+ }
+
+ auto& native_pixmap_handle = gmb_handle.native_pixmap_handle;
+ DCHECK_EQ(native_pixmap_handle.planes.size(), 1u);
+
+ auto& scoped_fd = native_pixmap_handle.planes[0].fd;
+ if (!scoped_fd.is_valid()) {
+ DLOG(ERROR) << "GpuMemoryBufferHandle doesn't have a valid fd.";
+ return false;
+ }
+
+ bool using_modifier =
+ native_pixmap_handle.modifier != gfx::NativePixmapHandle::kNoModifier &&
+ gfx::HasExtension(device_queue->enabled_extensions(),
+ VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME);
+
+ // If the driver doesn't support modifier or the native_pixmap_handle doesn't
+ // have modifier, VK_IMAGE_TILING_OPTIMAL will be used.
+ DCHECK_EQ(image_tiling, VK_IMAGE_TILING_OPTIMAL);
+ if (using_modifier)
+ image_tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+
+ VkExternalMemoryImageCreateInfoKHR external_image_create_info = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR,
+ .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ };
+ VkImageDrmFormatModifierListCreateInfoEXT modifier_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT,
+ .drmFormatModifierCount = 1,
+ .pDrmFormatModifiers = &native_pixmap_handle.modifier,
+ };
+ if (using_modifier)
+ external_image_create_info.pNext = &modifier_info;
+
+ VkImportMemoryFdInfoKHR import_memory_fd_info = {
+ .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
+ .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ .fd = scoped_fd.get(),
+ };
+
+ VkMemoryRequirements* requirements = nullptr;
+ bool result = Initialize(device_queue, size, format, usage, flags,
+ image_tiling, &external_image_create_info,
+ &import_memory_fd_info, requirements);
+ // If Initialize successfully, the fd in scoped_fd should be owned by vulkan.
+ if (result)
+ ignore_result(scoped_fd.release());
+
+ return result;
+}
+
+} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_image_win.cc b/chromium/gpu/vulkan/vulkan_image_win.cc
new file mode 100644
index 00000000000..a66871836b9
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_image_win.cc
@@ -0,0 +1,21 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/vulkan/vulkan_image.h"
+
+namespace gpu {
+
+bool VulkanImage::InitializeFromGpuMemoryBufferHandle(
+ VulkanDeviceQueue* device_queue,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ const gfx::Size& size,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags,
+ VkImageTiling image_tiling) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_implementation.cc b/chromium/gpu/vulkan/vulkan_implementation.cc
index 2731519fca6..2dde313cb7c 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.cc
+++ b/chromium/gpu/vulkan/vulkan_implementation.cc
@@ -31,9 +31,11 @@ std::unique_ptr<VulkanDeviceQueue> CreateVulkanDeviceQueue(
base::Unretained(vulkan_implementation));
std::vector<const char*> required_extensions =
vulkan_implementation->GetRequiredDeviceExtensions();
+ std::vector<const char*> optional_extensions =
+ vulkan_implementation->GetOptionalDeviceExtensions();
if (!device_queue->Initialize(
option, vulkan_implementation->GetVulkanInstance()->vulkan_info(),
- std::move(required_extensions),
+ std::move(required_extensions), std::move(optional_extensions),
vulkan_implementation->allow_protected_memory(), callback)) {
device_queue->Destroy();
return nullptr;
diff --git a/chromium/gpu/vulkan/vulkan_implementation.h b/chromium/gpu/vulkan/vulkan_implementation.h
index 3bf07c2f4fe..579ff5c465c 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.h
+++ b/chromium/gpu/vulkan/vulkan_implementation.h
@@ -33,6 +33,7 @@ struct GpuMemoryBufferHandle;
namespace gpu {
class VulkanDeviceQueue;
class VulkanSurface;
+class VulkanImage;
class VulkanInstance;
struct VulkanYCbCrInfo;
@@ -70,6 +71,7 @@ class VULKAN_EXPORT VulkanImplementation {
uint32_t queue_family_index) = 0;
virtual std::vector<const char*> GetRequiredDeviceExtensions() = 0;
+ virtual std::vector<const char*> GetOptionalDeviceExtensions() = 0;
// Creates a VkFence that is exportable to a gfx::GpuFence.
virtual VkFence CreateVkFenceForGpuFence(VkDevice vk_device) = 0;
@@ -108,32 +110,13 @@ class VULKAN_EXPORT VulkanImplementation {
// |vk_image|, |vk_image_info|, |vk_device_memory| and |mem_allocation_size|.
// Implementation must verify that the specified |size| fits in the size
// specified when |gmb_handle| was allocated.
- virtual bool CreateImageFromGpuMemoryHandle(
- VkDevice vk_device,
+ virtual std::unique_ptr<VulkanImage> CreateImageFromGpuMemoryHandle(
+ VulkanDeviceQueue* device_queue,
gfx::GpuMemoryBufferHandle gmb_handle,
gfx::Size size,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- base::Optional<VulkanYCbCrInfo>* ycbcr_info) = 0;
+ VkFormat vk_formae) = 0;
#if defined(OS_ANDROID)
- // Create a VkImage, import Android AHardwareBuffer object created outside of
- // the Vulkan device into Vulkan memory object and bind it to the VkImage.
- // TODO(sergeyu): Remove this method and use
- // CreateVkImageFromGpuMemoryHandle() instead.
- virtual bool CreateVkImageAndImportAHB(
- const VkDevice& vk_device,
- const VkPhysicalDevice& vk_physical_device,
- const gfx::Size& size,
- base::android::ScopedHardwareBufferHandle ahb_handle,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- VulkanYCbCrInfo* ycbcr_info = nullptr) = 0;
-
// Get the sampler ycbcr conversion information from the AHB.
virtual bool GetSamplerYcbcrConversionInfo(
const VkDevice& vk_device,
diff --git a/chromium/gpu/vulkan/vulkan_instance.cc b/chromium/gpu/vulkan/vulkan_instance.cc
index d1638b65dc1..b1eddaed861 100644
--- a/chromium/gpu/vulkan/vulkan_instance.cc
+++ b/chromium/gpu/vulkan/vulkan_instance.cc
@@ -57,10 +57,8 @@ bool VulkanInstance::Initialize(
if (!vulkan_function_pointers->BindUnassociatedFunctionPointers())
return false;
- if (vulkan_function_pointers->vkEnumerateInstanceVersionFn) {
- vulkan_function_pointers->vkEnumerateInstanceVersionFn(
- &vulkan_info_.api_version);
- }
+ if (vkEnumerateInstanceVersion)
+ vkEnumerateInstanceVersion(&vulkan_info_.api_version);
#if defined(OS_ANDROID)
// Ensure that android works only with vulkan apiVersion >= 1.1. Vulkan will
@@ -83,23 +81,37 @@ bool VulkanInstance::Initialize(
app_info.pApplicationName = "Chromium";
app_info.apiVersion = vulkan_info_.used_api_version;
- vulkan_info_.enabled_instance_extensions = required_extensions;
- uint32_t num_instance_exts = 0;
- result = vkEnumerateInstanceExtensionProperties(nullptr, &num_instance_exts,
- nullptr);
- if (VK_SUCCESS != result) {
- DLOG(ERROR) << "vkEnumerateInstanceExtensionProperties(NULL) failed: "
- << result;
- return false;
- }
+ // Query the extensions from all layers, including ones that are implicitly
+ // available (identified by passing a null ptr as the layer name).
+ std::vector<const char*> all_required_layers = required_layers;
- vulkan_info_.instance_extensions.resize(num_instance_exts);
- result = vkEnumerateInstanceExtensionProperties(
- nullptr, &num_instance_exts, vulkan_info_.instance_extensions.data());
- if (VK_SUCCESS != result) {
- DLOG(ERROR) << "vkEnumerateInstanceExtensionProperties() failed: "
- << result;
- return false;
+ // Include the extension properties provided by the Vulkan implementation as
+ // part of the enumeration.
+ all_required_layers.push_back(nullptr);
+
+ for (const char* layer_name : all_required_layers) {
+ vulkan_info_.enabled_instance_extensions = required_extensions;
+ uint32_t num_instance_exts = 0;
+ result = vkEnumerateInstanceExtensionProperties(
+ layer_name, &num_instance_exts, nullptr);
+ if (VK_SUCCESS != result) {
+ DLOG(ERROR) << "vkEnumerateInstanceExtensionProperties(" << layer_name
+ << ") failed: " << result;
+ return false;
+ }
+
+ const size_t previous_extension_count =
+ vulkan_info_.instance_extensions.size();
+ vulkan_info_.instance_extensions.resize(previous_extension_count +
+ num_instance_exts);
+ result = vkEnumerateInstanceExtensionProperties(
+ layer_name, &num_instance_exts,
+ &vulkan_info_.instance_extensions.data()[previous_extension_count]);
+ if (VK_SUCCESS != result) {
+ DLOG(ERROR) << "vkEnumerateInstanceExtensionProperties(" << layer_name
+ << ") failed: " << result;
+ return false;
+ }
}
for (const VkExtensionProperties& ext_property :
@@ -191,15 +203,14 @@ bool VulkanInstance::Initialize(
return false;
}
+ if (!vulkan_function_pointers->BindInstanceFunctionPointers(
+ vk_instance_, vulkan_info_.used_api_version, enabled_extensions)) {
+ return false;
+ }
+
#if DCHECK_IS_ON()
// Register our error logging function.
if (debug_report_enabled_) {
- PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT =
- reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(
- vkGetInstanceProcAddr(vk_instance_,
- "vkCreateDebugReportCallbackEXT"));
- DCHECK(vkCreateDebugReportCallbackEXT);
-
VkDebugReportCallbackCreateInfoEXT cb_create_info = {};
cb_create_info.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
@@ -226,20 +237,22 @@ bool VulkanInstance::Initialize(
}
#endif
- if (!vulkan_function_pointers->BindInstanceFunctionPointers(
- vk_instance_, vulkan_info_.used_api_version, enabled_extensions)) {
+ if (!CollectInfo())
return false;
- }
-
- CollectInfo();
return true;
}
-void VulkanInstance::CollectInfo() {
+bool VulkanInstance::CollectInfo() {
uint32_t count = 0;
VkResult result = vkEnumeratePhysicalDevices(vk_instance_, &count, nullptr);
if (result != VK_SUCCESS) {
DLOG(ERROR) << "vkEnumeratePhysicalDevices failed: " << result;
+ return false;
+ }
+
+ if (!count) {
+ DLOG(ERROR) << "vkEnumeratePhysicalDevices returns zero device.";
+ return false;
}
std::vector<VkPhysicalDevice> physical_devices(count);
@@ -247,7 +260,7 @@ void VulkanInstance::CollectInfo() {
vkEnumeratePhysicalDevices(vk_instance_, &count, physical_devices.data());
if (VK_SUCCESS != result) {
DLOG(ERROR) << "vkEnumeratePhysicalDevices() failed: " << result;
- return;
+ return false;
}
vulkan_info_.physical_devices.reserve(count);
@@ -259,15 +272,16 @@ void VulkanInstance::CollectInfo() {
vkGetPhysicalDeviceProperties(device, &info.properties);
count = 0;
- result = vkEnumerateDeviceLayerProperties(device, &count, nullptr);
+ result = vkEnumerateDeviceExtensionProperties(
+ device, nullptr /* pLayerName */, &count, nullptr);
DLOG_IF(ERROR, result != VK_SUCCESS)
- << "vkEnumerateDeviceLayerProperties failed: " << result;
+ << "vkEnumerateDeviceExtensionProperties failed: " << result;
- info.layers.resize(count);
- result =
- vkEnumerateDeviceLayerProperties(device, &count, info.layers.data());
+ info.extensions.resize(count);
+ result = vkEnumerateDeviceExtensionProperties(
+ device, nullptr /* pLayerName */, &count, info.extensions.data());
DLOG_IF(ERROR, result != VK_SUCCESS)
- << "vkEnumerateDeviceLayerProperties failed: " << result;
+ << "vkEnumerateDeviceExtensionProperties failed: " << result;
// The API version of the VkInstance might be different than the supported
// API version of the VkPhysicalDevice, so we need to check the GPU's
@@ -300,17 +314,13 @@ void VulkanInstance::CollectInfo() {
info.queue_families.data());
}
}
+ return true;
}
void VulkanInstance::Destroy() {
#if DCHECK_IS_ON()
if (debug_report_enabled_ && (error_callback_ != VK_NULL_HANDLE ||
warning_callback_ != VK_NULL_HANDLE)) {
- PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT =
- reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(
- vkGetInstanceProcAddr(vk_instance_,
- "vkDestroyDebugReportCallbackEXT"));
- DCHECK(vkDestroyDebugReportCallbackEXT);
if (error_callback_ != VK_NULL_HANDLE) {
vkDestroyDebugReportCallbackEXT(vk_instance_, error_callback_, nullptr);
error_callback_ = VK_NULL_HANDLE;
@@ -327,9 +337,10 @@ void VulkanInstance::Destroy() {
}
VulkanFunctionPointers* vulkan_function_pointers =
gpu::GetVulkanFunctionPointers();
- if (vulkan_function_pointers->vulkan_loader_library_)
- base::UnloadNativeLibrary(vulkan_function_pointers->vulkan_loader_library_);
- vulkan_function_pointers->vulkan_loader_library_ = nullptr;
+ if (vulkan_function_pointers->vulkan_loader_library) {
+ base::UnloadNativeLibrary(vulkan_function_pointers->vulkan_loader_library);
+ vulkan_function_pointers->vulkan_loader_library = nullptr;
+ }
}
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_instance.h b/chromium/gpu/vulkan/vulkan_instance.h
index bd0092e11bf..ad4497d3bdd 100644
--- a/chromium/gpu/vulkan/vulkan_instance.h
+++ b/chromium/gpu/vulkan/vulkan_instance.h
@@ -35,7 +35,7 @@ class VULKAN_EXPORT VulkanInstance {
VkInstance vk_instance() { return vk_instance_; }
private:
- void CollectInfo();
+ bool CollectInfo();
void Destroy();
VulkanInfo vulkan_info_;
diff --git a/chromium/gpu/vulkan/vulkan_surface.cc b/chromium/gpu/vulkan/vulkan_surface.cc
index e219654e6b2..00c20207da3 100644
--- a/chromium/gpu/vulkan/vulkan_surface.cc
+++ b/chromium/gpu/vulkan/vulkan_surface.cc
@@ -90,13 +90,12 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue,
device_queue_ = device_queue;
- VkResult result = VK_SUCCESS;
VkBool32 present_support;
- if (vkGetPhysicalDeviceSurfaceSupportKHR(
- device_queue_->GetVulkanPhysicalDevice(),
- device_queue_->GetVulkanQueueIndex(), surface_,
- &present_support) != VK_SUCCESS) {
+ VkResult result = vkGetPhysicalDeviceSurfaceSupportKHR(
+ device_queue_->GetVulkanPhysicalDevice(),
+ device_queue_->GetVulkanQueueIndex(), surface_, &present_support);
+ if (result != VK_SUCCESS) {
DLOG(ERROR) << "vkGetPhysicalDeviceSurfaceSupportKHR() failed: " << result;
return false;
}
@@ -154,18 +153,36 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue,
return false;
}
}
- return CreateSwapChain(gfx::Size(), gfx::OVERLAY_TRANSFORM_INVALID);
+
+ VkSurfaceCapabilitiesKHR surface_caps;
+ result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+ device_queue_->GetVulkanPhysicalDevice(), surface_, &surface_caps);
+ if (VK_SUCCESS != result) {
+ DLOG(ERROR) << "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() failed: "
+ << result;
+ return false;
+ }
+
+ image_count_ = std::max(surface_caps.minImageCount, 3u);
+
+ return true;
}
void VulkanSurface::Destroy() {
- swap_chain_->Destroy();
- swap_chain_ = nullptr;
+ if (swap_chain_) {
+ swap_chain_->Destroy();
+ swap_chain_ = nullptr;
+ }
vkDestroySurfaceKHR(vk_instance_, surface_, nullptr);
surface_ = VK_NULL_HANDLE;
}
gfx::SwapResult VulkanSurface::SwapBuffers() {
- return swap_chain_->PresentBuffer();
+ return PostSubBuffer(gfx::Rect(image_size_));
+}
+
+gfx::SwapResult VulkanSurface::PostSubBuffer(const gfx::Rect& rect) {
+ return swap_chain_->PresentBuffer(rect);
}
void VulkanSurface::Finish() {
@@ -240,11 +257,10 @@ bool VulkanSurface::CreateSwapChain(const gfx::Size& size,
auto swap_chain = std::make_unique<VulkanSwapChain>();
// Create swap chain.
- uint32_t min_image_count = std::max(surface_caps.minImageCount, 3u);
- if (!swap_chain->Initialize(device_queue_, surface_, surface_format_,
- image_size_, min_image_count, vk_transform,
- enforce_protected_memory_,
- std::move(swap_chain_))) {
+ DCHECK_EQ(image_count_, std::max(surface_caps.minImageCount, 3u));
+ if (!swap_chain->Initialize(
+ device_queue_, surface_, surface_format_, image_size_, image_count_,
+ vk_transform, enforce_protected_memory_, std::move(swap_chain_))) {
return false;
}
diff --git a/chromium/gpu/vulkan/vulkan_surface.h b/chromium/gpu/vulkan/vulkan_surface.h
index 85df173c66d..66a2f754932 100644
--- a/chromium/gpu/vulkan/vulkan_surface.h
+++ b/chromium/gpu/vulkan/vulkan_surface.h
@@ -43,6 +43,7 @@ class VULKAN_EXPORT VulkanSurface {
void Destroy();
gfx::SwapResult SwapBuffers();
+ gfx::SwapResult PostSubBuffer(const gfx::Rect& rect);
void Finish();
@@ -56,6 +57,7 @@ class VULKAN_EXPORT VulkanSurface {
uint32_t swap_chain_generation() const { return swap_chain_generation_; }
const gfx::Size& image_size() const { return image_size_; }
gfx::OverlayTransform transform() const { return transform_; }
+ uint32_t image_count() const { return image_count_; }
VkSurfaceFormatKHR surface_format() const { return surface_format_; }
private:
@@ -79,6 +81,9 @@ class VULKAN_EXPORT VulkanSurface {
// Swap chain pre-transform.
gfx::OverlayTransform transform_ = gfx::OVERLAY_TRANSFORM_INVALID;
+ // Swap chain image count.
+ uint32_t image_count_ = 0u;
+
std::unique_ptr<VulkanSwapChain> swap_chain_;
DISALLOW_COPY_AND_ASSIGN(VulkanSurface);
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.cc b/chromium/gpu/vulkan/vulkan_swap_chain.cc
index e49279963cd..b80989b8a4a 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.cc
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.cc
@@ -49,6 +49,9 @@ bool VulkanSwapChain::Initialize(
DCHECK(!use_protected_memory || device_queue->allow_protected_memory());
use_protected_memory_ = use_protected_memory;
device_queue_ = device_queue;
+ is_incremental_present_supported_ =
+ gfx::HasExtension(device_queue_->enabled_extensions(),
+ VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME);
device_queue_->GetFenceHelper()->ProcessCleanupTasks();
return InitializeSwapChain(surface, surface_format, image_size,
min_image_count, pre_transform,
@@ -62,7 +65,7 @@ void VulkanSwapChain::Destroy() {
DestroySwapChain();
}
-gfx::SwapResult VulkanSwapChain::PresentBuffer() {
+gfx::SwapResult VulkanSwapChain::PresentBuffer(const gfx::Rect& rect) {
DCHECK(acquired_image_);
DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
@@ -96,15 +99,30 @@ gfx::SwapResult VulkanSwapChain::PresentBuffer() {
end_write_semaphore_ = vk_semaphore;
}
- // Queue the present.
- VkPresentInfoKHR present_info = {};
- present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR};
present_info.waitSemaphoreCount = 1;
present_info.pWaitSemaphores = &end_write_semaphore_;
present_info.swapchainCount = 1;
present_info.pSwapchains = &swap_chain_;
present_info.pImageIndices = &acquired_image_.value();
+ VkRectLayerKHR rect_layer;
+ VkPresentRegionKHR present_region;
+ VkPresentRegionsKHR present_regions = {VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR};
+ if (is_incremental_present_supported_) {
+ rect_layer.offset = {rect.x(), rect.y()};
+ rect_layer.extent = {rect.width(), rect.height()};
+ rect_layer.layer = 0;
+
+ present_region.rectangleCount = 1;
+ present_region.pRectangles = &rect_layer;
+
+ present_regions.swapchainCount = 1;
+ present_regions.pRegions = &present_region;
+
+ present_info.pNext = &present_regions;
+ }
+
result = vkQueuePresentKHR(queue, &present_info);
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
DLOG(ERROR) << "vkQueuePresentKHR() failed: " << result;
@@ -112,10 +130,23 @@ gfx::SwapResult VulkanSwapChain::PresentBuffer() {
}
DLOG_IF(ERROR, result == VK_SUBOPTIMAL_KHR) << "Swapchian is suboptimal.";
- acquired_image_.reset();
- fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(end_write_semaphore_);
+ if (current_image_data.present_begin_semaphore != VK_NULL_HANDLE) {
+ // |present_begin_semaphore| for the previous present for this image can be
+ // safely destroyed after semaphore got from vkAcquireNextImageHKR() is
+ // passed. That acquired semaphore should be already waited on for a
+ // submitted GPU work. So we can safely eunqueue the
+ // |present_begin_semaphore| for cleanup here (the enqueued semaphore will
+ // be destroyed when all submitted GPU work is finished).
+ fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(
+ current_image_data.present_begin_semaphore);
+ }
+ // We are not sure when the semaphore is not used by present engine, so don't
+ // destroy the semaphore until the image is returned from present engine.
+ current_image_data.present_begin_semaphore = end_write_semaphore_;
end_write_semaphore_ = VK_NULL_HANDLE;
+ in_present_images_.emplace_back(*acquired_image_);
+ acquired_image_.reset();
return gfx::SwapResult::SWAP_ACK;
}
@@ -221,10 +252,20 @@ void VulkanSwapChain::DestroySwapImages() {
end_write_semaphore_ = VK_NULL_HANDLE;
for (auto& image_data : images_) {
- if (!image_data.command_buffer)
- continue;
- image_data.command_buffer->Destroy();
- image_data.command_buffer = nullptr;
+ if (image_data.command_buffer) {
+ image_data.command_buffer->Destroy();
+ image_data.command_buffer = nullptr;
+ }
+ if (image_data.present_begin_semaphore != VK_NULL_HANDLE) {
+ vkDestroySemaphore(device_queue_->GetVulkanDevice(),
+ image_data.present_begin_semaphore,
+ nullptr /* pAllocator */);
+ }
+ if (image_data.present_end_semaphore != VK_NULL_HANDLE) {
+ vkDestroySemaphore(device_queue_->GetVulkanDevice(),
+ image_data.present_end_semaphore,
+ nullptr /* pAllocator */);
+ }
}
images_.clear();
@@ -246,33 +287,20 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
if (!acquired_image_) {
DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
-
- VkDevice device = device_queue_->GetVulkanDevice();
- vk_semaphore = CreateSemaphore(device);
- DCHECK(vk_semaphore != VK_NULL_HANDLE);
-
- uint32_t next_image = 0;
- // Acquire then next image.
- auto result =
- vkAcquireNextImageKHR(device, swap_chain_, UINT64_MAX, vk_semaphore,
- VK_NULL_HANDLE, &next_image);
- if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
- vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
- DLOG(ERROR) << "vkAcquireNextImageKHR() failed: " << result;
+ if (!AcquireNextImage())
return false;
- }
- acquired_image_.emplace(next_image);
+ DCHECK(acquired_image_);
+ std::swap(vk_semaphore, images_[*acquired_image_].present_end_semaphore);
} else {
// In this case, PresentBuffer() is not called after
// {Begin,End}WriteCurrentImage pairs, |end_write_semaphore_| should be
// waited on before writing the image again.
- vk_semaphore = end_write_semaphore_;
- end_write_semaphore_ = VK_NULL_HANDLE;
+ std::swap(vk_semaphore, end_write_semaphore_);
}
auto& current_image_data = images_[*acquired_image_];
*image = current_image_data.image;
- *image_index = *acquired_image_;
+ *image_index = acquired_image_.value();
*image_layout = current_image_data.layout;
*semaphore = vk_semaphore;
is_writing_ = true;
@@ -292,6 +320,60 @@ void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout,
is_writing_ = false;
}
+bool VulkanSwapChain::AcquireNextImage() {
+ DCHECK(!acquired_image_);
+ VkDevice device = device_queue_->GetVulkanDevice();
+ // The Vulkan spec doesn't require vkAcquireNextImageKHR() returns images in
+ // the present order for a vulkan swap chain. However for the best performnce,
+ // the driver should return images in order. To avoid buggy drivers, we will
+ // call vkAcquireNextImageKHR() continueslly until the expected image is
+ // returned.
+ do {
+ bool all_images_are_tracked = in_present_images_.size() == images_.size();
+ if (all_images_are_tracked) {
+ // Only check the expected_next_image, when all images are tracked.
+ uint32_t expected_next_image = in_present_images_.front();
+ // If the expected next image has been acquired, use it and return true.
+ if (images_[expected_next_image].present_end_semaphore !=
+ VK_NULL_HANDLE) {
+ in_present_images_.pop_front();
+ acquired_image_.emplace(expected_next_image);
+ break;
+ }
+ }
+
+ VkSemaphore vk_semaphore = CreateSemaphore(device);
+ DCHECK(vk_semaphore != VK_NULL_HANDLE);
+
+ // Acquire the next image.
+ uint32_t next_image;
+ auto result =
+ vkAcquireNextImageKHR(device, swap_chain_, UINT64_MAX, vk_semaphore,
+ VK_NULL_HANDLE, &next_image);
+ if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
+ vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
+ DLOG(ERROR) << "vkAcquireNextImageKHR() failed: " << result;
+ return false;
+ }
+
+ DCHECK(images_[next_image].present_end_semaphore == VK_NULL_HANDLE);
+ images_[next_image].present_end_semaphore = vk_semaphore;
+
+ auto it = std::find(in_present_images_.begin(), in_present_images_.end(),
+ next_image);
+ if (it == in_present_images_.end()) {
+ DCHECK(!all_images_are_tracked);
+ // Got an image which is not in the present queue due to the new created
+ // swap chain. In this case, just use this image.
+ acquired_image_.emplace(next_image);
+ break;
+ }
+ DLOG_IF(ERROR, it != in_present_images_.begin())
+ << "vkAcquireNextImageKHR() returned an unexpected image.";
+ } while (true);
+ return true;
+}
+
VulkanSwapChain::ScopedWrite::ScopedWrite(VulkanSwapChain* swap_chain)
: swap_chain_(swap_chain) {
success_ = swap_chain_->BeginWriteCurrentImage(
@@ -310,10 +392,11 @@ VkSemaphore VulkanSwapChain::ScopedWrite::TakeBeginSemaphore() {
return semaphore;
}
-void VulkanSwapChain::ScopedWrite::SetEndSemaphore(VkSemaphore semaphore) {
+VkSemaphore VulkanSwapChain::ScopedWrite::GetEndSemaphore() {
DCHECK(end_semaphore_ == VK_NULL_HANDLE);
- DCHECK(semaphore != VK_NULL_HANDLE);
- end_semaphore_ = semaphore;
+ end_semaphore_ =
+ CreateSemaphore(swap_chain_->device_queue_->GetVulkanDevice());
+ return end_semaphore_;
}
VulkanSwapChain::ImageData::ImageData() = default;
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.h b/chromium/gpu/vulkan/vulkan_swap_chain.h
index 099e165cdd3..cb79c2fcd6c 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.h
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.h
@@ -5,13 +5,16 @@
#ifndef GPU_VULKAN_VULKAN_SWAP_CHAIN_H_
#define GPU_VULKAN_VULKAN_SWAP_CHAIN_H_
+#include <vulkan/vulkan.h>
+
#include <memory>
#include <vector>
-#include <vulkan/vulkan.h>
+#include "base/containers/circular_deque.h"
#include "base/logging.h"
#include "base/optional.h"
#include "gpu/vulkan/vulkan_export.h"
+#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/swap_result.h"
@@ -38,9 +41,8 @@ class VULKAN_EXPORT VulkanSwapChain {
// transferred to the caller.
VkSemaphore TakeBeginSemaphore();
- // Set the end write semaphore. The ownership of the semaphore will be
- // transferred to ScopedWrite.
- void SetEndSemaphore(VkSemaphore);
+ // Get the end write semaphore.
+ VkSemaphore GetEndSemaphore();
private:
VulkanSwapChain* const swap_chain_;
@@ -71,7 +73,7 @@ class VULKAN_EXPORT VulkanSwapChain {
void Destroy();
// Present the current buffer.
- gfx::SwapResult PresentBuffer();
+ gfx::SwapResult PresentBuffer(const gfx::Rect& rect);
uint32_t num_images() const { return static_cast<uint32_t>(images_.size()); }
const gfx::Size& size() const { return size_; }
@@ -95,9 +97,11 @@ class VULKAN_EXPORT VulkanSwapChain {
VkImageLayout* layout,
VkSemaphore* semaphore);
void EndWriteCurrentImage(VkImageLayout layout, VkSemaphore semaphore);
+ bool AcquireNextImage();
bool use_protected_memory_ = false;
- VulkanDeviceQueue* device_queue_;
+ VulkanDeviceQueue* device_queue_ = nullptr;
+ bool is_incremental_present_supported_ = false;
VkSwapchainKHR swap_chain_ = VK_NULL_HANDLE;
std::unique_ptr<VulkanCommandPool> command_pool_;
@@ -114,10 +118,15 @@ class VULKAN_EXPORT VulkanSwapChain {
VkImage image = VK_NULL_HANDLE;
VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
std::unique_ptr<VulkanCommandBuffer> command_buffer;
+ // Semaphore passed to vkQueuePresentKHR to wait on.
+ VkSemaphore present_begin_semaphore = VK_NULL_HANDLE;
+ // Semaphore signaled when present engine is done with the image.
+ VkSemaphore present_end_semaphore = VK_NULL_HANDLE;
};
std::vector<ImageData> images_;
// Acquired image index.
+ base::circular_deque<uint32_t> in_present_images_;
base::Optional<uint32_t> acquired_image_;
bool is_writing_ = false;
VkSemaphore end_write_semaphore_ = VK_NULL_HANDLE;
diff --git a/chromium/gpu/vulkan/vulkan_util.cc b/chromium/gpu/vulkan/vulkan_util.cc
index 797d78e557c..14fb435d043 100644
--- a/chromium/gpu/vulkan/vulkan_util.cc
+++ b/chromium/gpu/vulkan/vulkan_util.cc
@@ -31,10 +31,14 @@ bool SubmitWaitVkSemaphores(VkQueue vk_queue,
const base::span<VkSemaphore>& vk_semaphores,
VkFence vk_fence) {
DCHECK(!vk_semaphores.empty());
+ std::vector<VkPipelineStageFlags> semaphore_stages(vk_semaphores.size());
+ std::fill(semaphore_stages.begin(), semaphore_stages.end(),
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
// Structure specifying a queue submit operation.
VkSubmitInfo submit_info = {VK_STRUCTURE_TYPE_SUBMIT_INFO};
submit_info.waitSemaphoreCount = vk_semaphores.size();
submit_info.pWaitSemaphores = vk_semaphores.data();
+ submit_info.pWaitDstStageMask = semaphore_stages.data();
const unsigned int submit_count = 1;
return vkQueueSubmit(vk_queue, submit_count, &submit_info, vk_fence) ==
VK_SUCCESS;
diff --git a/chromium/gpu/vulkan/win32/BUILD.gn b/chromium/gpu/vulkan/win32/BUILD.gn
index d1834ff8e03..680717fa08c 100644
--- a/chromium/gpu/vulkan/win32/BUILD.gn
+++ b/chromium/gpu/vulkan/win32/BUILD.gn
@@ -24,9 +24,7 @@ component("win32") {
public_configs = [ ":vulkan_win32" ]
- deps = [
- "//ui/gfx",
- ]
+ deps = [ "//ui/gfx" ]
public_deps = [
"//base",
diff --git a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
index 77b8f99cb3b..19613073a51 100644
--- a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
+++ b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
@@ -9,6 +9,7 @@
#include "base/files/file_path.h"
#include "base/logging.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_instance.h"
#include "gpu/vulkan/vulkan_surface.h"
#include "ui/gfx/gpu_fence.h"
@@ -16,44 +17,32 @@
namespace gpu {
+VulkanImplementationWin32::VulkanImplementationWin32(bool use_swiftshader)
+ : VulkanImplementation(use_swiftshader) {}
+
VulkanImplementationWin32::~VulkanImplementationWin32() = default;
bool VulkanImplementationWin32::InitializeVulkanInstance(bool using_surface) {
DCHECK(using_surface);
std::vector<const char*> required_extensions = {
- VK_KHR_SURFACE_EXTENSION_NAME, VK_KHR_WIN32_SURFACE_EXTENSION_NAME};
+ VK_KHR_SURFACE_EXTENSION_NAME,
+ VK_KHR_WIN32_SURFACE_EXTENSION_NAME,
+ };
VulkanFunctionPointers* vulkan_function_pointers =
gpu::GetVulkanFunctionPointers();
+ base::FilePath path(use_swiftshader() ? L"vk_swiftshader.dll"
+ : L"vulkan-1.dll");
+
base::NativeLibraryLoadError native_library_load_error;
- vulkan_function_pointers->vulkan_loader_library_ = base::LoadNativeLibrary(
- base::FilePath(L"vulkan-1.dll"), &native_library_load_error);
- if (!vulkan_function_pointers->vulkan_loader_library_)
+ vulkan_function_pointers->vulkan_loader_library =
+ base::LoadNativeLibrary(path, &native_library_load_error);
+ if (!vulkan_function_pointers->vulkan_loader_library)
return false;
if (!vulkan_instance_.Initialize(required_extensions, {}))
return false;
-
- // Initialize platform function pointers
- vkGetPhysicalDeviceWin32PresentationSupportKHR_ =
- reinterpret_cast<PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR>(
- vkGetInstanceProcAddr(
- vulkan_instance_.vk_instance(),
- "vkGetPhysicalDeviceWin32PresentationSupportKHR"));
- if (!vkGetPhysicalDeviceWin32PresentationSupportKHR_) {
- LOG(ERROR) << "vkGetPhysicalDeviceWin32PresentationSupportKHR not found";
- return false;
- }
-
- vkCreateWin32SurfaceKHR_ =
- reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>(vkGetInstanceProcAddr(
- vulkan_instance_.vk_instance(), "vkCreateWin32SurfaceKHR"));
- if (!vkCreateWin32SurfaceKHR_) {
- LOG(ERROR) << "vkCreateWin32SurfaceKHR not found";
- return false;
- }
-
return true;
}
@@ -69,7 +58,7 @@ std::unique_ptr<VulkanSurface> VulkanImplementationWin32::CreateViewSurface(
surface_create_info.hinstance =
reinterpret_cast<HINSTANCE>(GetWindowLongPtr(window, GWLP_HINSTANCE));
surface_create_info.hwnd = window;
- VkResult result = vkCreateWin32SurfaceKHR_(
+ VkResult result = vkCreateWin32SurfaceKHR(
vulkan_instance_.vk_instance(), &surface_create_info, nullptr, &surface);
if (VK_SUCCESS != result) {
DLOG(ERROR) << "vkCreatWin32SurfaceKHR() failed: " << result;
@@ -85,13 +74,20 @@ bool VulkanImplementationWin32::GetPhysicalDevicePresentationSupport(
VkPhysicalDevice device,
const std::vector<VkQueueFamilyProperties>& queue_family_properties,
uint32_t queue_family_index) {
- return vkGetPhysicalDeviceWin32PresentationSupportKHR_(device,
- queue_family_index);
+ return vkGetPhysicalDeviceWin32PresentationSupportKHR(device,
+ queue_family_index);
}
std::vector<const char*>
VulkanImplementationWin32::GetRequiredDeviceExtensions() {
- return {VK_KHR_SWAPCHAIN_EXTENSION_NAME};
+ return {
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+ };
+}
+
+std::vector<const char*>
+VulkanImplementationWin32::GetOptionalDeviceExtensions() {
+ return {};
}
VkFence VulkanImplementationWin32::CreateVkFenceForGpuFence(
@@ -136,17 +132,14 @@ bool VulkanImplementationWin32::CanImportGpuMemoryBuffer(
return false;
}
-bool VulkanImplementationWin32::CreateImageFromGpuMemoryHandle(
- VkDevice vk_device,
+std::unique_ptr<VulkanImage>
+VulkanImplementationWin32::CreateImageFromGpuMemoryHandle(
+ VulkanDeviceQueue* device_queue,
gfx::GpuMemoryBufferHandle gmb_handle,
gfx::Size size,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- base::Optional<VulkanYCbCrInfo>* ycbcr_info) {
+ VkFormat vk_formae) {
NOTIMPLEMENTED();
- return false;
+ return nullptr;
}
} // namespace gpu
diff --git a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h
index 4d18e4f60c3..8e6b64945f0 100644
--- a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h
+++ b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h
@@ -5,8 +5,6 @@
#ifndef GPU_VULKAN_WIN32_VULKAN_IMPLEMENTATION_WIN32_H_
#define GPU_VULKAN_WIN32_VULKAN_IMPLEMENTATION_WIN32_H_
-#include <memory>
-
#include "base/component_export.h"
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_instance.h"
@@ -16,7 +14,7 @@ namespace gpu {
class COMPONENT_EXPORT(VULKAN_WIN32) VulkanImplementationWin32
: public VulkanImplementation {
public:
- VulkanImplementationWin32() = default;
+ explicit VulkanImplementationWin32(bool use_swiftshader);
~VulkanImplementationWin32() override;
// VulkanImplementation:
@@ -29,6 +27,7 @@ class COMPONENT_EXPORT(VULKAN_WIN32) VulkanImplementationWin32
const std::vector<VkQueueFamilyProperties>& queue_family_properties,
uint32_t queue_family_index) override;
std::vector<const char*> GetRequiredDeviceExtensions() override;
+ std::vector<const char*> GetOptionalDeviceExtensions() override;
VkFence CreateVkFenceForGpuFence(VkDevice vk_device) override;
std::unique_ptr<gfx::GpuFence> ExportVkFenceToGpuFence(
VkDevice vk_device,
@@ -41,23 +40,15 @@ class COMPONENT_EXPORT(VULKAN_WIN32) VulkanImplementationWin32
VkExternalMemoryHandleTypeFlagBits GetExternalImageHandleType() override;
bool CanImportGpuMemoryBuffer(
gfx::GpuMemoryBufferType memory_buffer_type) override;
- bool CreateImageFromGpuMemoryHandle(
- VkDevice vk_device,
+ std::unique_ptr<VulkanImage> CreateImageFromGpuMemoryHandle(
+ VulkanDeviceQueue* device_queue,
gfx::GpuMemoryBufferHandle gmb_handle,
gfx::Size size,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- base::Optional<VulkanYCbCrInfo>* ycbcr_info) override;
+ VkFormat vk_formae) override;
private:
VulkanInstance vulkan_instance_;
- PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR
- vkGetPhysicalDeviceWin32PresentationSupportKHR_ = nullptr;
- PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR_ = nullptr;
-
DISALLOW_COPY_AND_ASSIGN(VulkanImplementationWin32);
};
diff --git a/chromium/gpu/vulkan/x/BUILD.gn b/chromium/gpu/vulkan/x/BUILD.gn
index 4a1158c403f..48a5a5125b6 100644
--- a/chromium/gpu/vulkan/x/BUILD.gn
+++ b/chromium/gpu/vulkan/x/BUILD.gn
@@ -38,9 +38,9 @@ component("x") {
"//gpu/vulkan",
]
- # Vulkan Swiftshader can only be built on Linux.
- # TODO(samans): Support more platforms. https://crbug.com/963988
- data_deps = [
- "//third_party/swiftshader/src/Vulkan:swiftshader_libvulkan",
- ]
+ if (enable_swiftshader_vulkan) {
+ # Vulkan Swiftshader can only be built on Linux.
+ # TODO(samans): Support more platforms. https://crbug.com/963988
+ data_deps = [ "//third_party/swiftshader/src/Vulkan:swiftshader_libvulkan" ]
+ }
}
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
index 4544bcaf5ef..906543767b8 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
@@ -11,6 +11,7 @@
#include "base/optional.h"
#include "base/path_service.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_instance.h"
#include "gpu/vulkan/vulkan_posix_util.h"
#include "gpu/vulkan/vulkan_surface.h"
@@ -18,18 +19,46 @@
#include "gpu/vulkan/x/vulkan_surface_x11.h"
#include "ui/gfx/gpu_fence.h"
#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/gfx/x/x11_types.h"
namespace gpu {
namespace {
+bool IsVulkanSurfaceSupported() {
+ static const char* extensions[] = {
+ "DRI3", // open source driver.
+ "ATIFGLRXDRI", // AMD proprietary driver.
+ "NV-CONTROL", // NVidia proprietary driver.
+ };
+ auto* display = gfx::GetXDisplay();
+ int ext_code, first_event, first_error;
+ for (const auto* extension : extensions) {
+ if (XQueryExtension(display, extension, &ext_code, &first_event,
+ &first_error)) {
+ return true;
+ }
+ }
+ return false;
+}
+
class ScopedUnsetDisplay {
public:
- ScopedUnsetDisplay() : display_(getenv("DISPLAY")) { unsetenv("DISPLAY"); }
- ~ScopedUnsetDisplay() { setenv("DISPLAY", display_.c_str(), 1); }
+ ScopedUnsetDisplay() {
+ const char* display = getenv("DISPLAY");
+ if (display) {
+ display_.emplace(display);
+ unsetenv("DISPLAY");
+ }
+ }
+ ~ScopedUnsetDisplay() {
+ if (display_) {
+ setenv("DISPLAY", display_->c_str(), 1);
+ }
+ }
private:
- std::string display_;
+ base::Optional<std::string> display_;
DISALLOW_COPY_AND_ASSIGN(ScopedUnsetDisplay);
};
@@ -37,9 +66,9 @@ bool InitializeVulkanFunctionPointers(
const base::FilePath& path,
VulkanFunctionPointers* vulkan_function_pointers) {
base::NativeLibraryLoadError native_library_load_error;
- vulkan_function_pointers->vulkan_loader_library_ =
+ vulkan_function_pointers->vulkan_loader_library =
base::LoadNativeLibrary(path, &native_library_load_error);
- return vulkan_function_pointers->vulkan_loader_library_;
+ return !!vulkan_function_pointers->vulkan_loader_library;
}
} // namespace
@@ -52,6 +81,8 @@ VulkanImplementationX11::VulkanImplementationX11(bool use_swiftshader)
VulkanImplementationX11::~VulkanImplementationX11() {}
bool VulkanImplementationX11::InitializeVulkanInstance(bool using_surface) {
+ if (using_surface && !use_swiftshader() && !IsVulkanSurfaceSupported())
+ using_surface = false;
using_surface_ = using_surface;
// Unset DISPLAY env, so the vulkan can be initialized successfully, if the X
// server doesn't support Vulkan surface.
@@ -94,8 +125,8 @@ VulkanInstance* VulkanImplementationX11::GetVulkanInstance() {
std::unique_ptr<VulkanSurface> VulkanImplementationX11::CreateViewSurface(
gfx::AcceleratedWidget window) {
- DLOG_IF(FATAL, !using_surface_)
- << "Flag --disable-vulkan-surface is provided.";
+ if (!using_surface_)
+ return nullptr;
return VulkanSurfaceX11::Create(vulkan_instance_.vk_instance(), window);
}
@@ -129,6 +160,11 @@ VulkanImplementationX11::GetRequiredDeviceExtensions() {
return extensions;
}
+std::vector<const char*>
+VulkanImplementationX11::GetOptionalDeviceExtensions() {
+ return {VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME};
+}
+
VkFence VulkanImplementationX11::CreateVkFenceForGpuFence(VkDevice vk_device) {
NOTREACHED();
return VK_NULL_HANDLE;
@@ -170,17 +206,14 @@ bool VulkanImplementationX11::CanImportGpuMemoryBuffer(
return false;
}
-bool VulkanImplementationX11::CreateImageFromGpuMemoryHandle(
- VkDevice vk_device,
+std::unique_ptr<VulkanImage>
+VulkanImplementationX11::CreateImageFromGpuMemoryHandle(
+ VulkanDeviceQueue* device_queue,
gfx::GpuMemoryBufferHandle gmb_handle,
gfx::Size size,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- base::Optional<VulkanYCbCrInfo>* ycbcr_info) {
+ VkFormat vk_formae) {
NOTIMPLEMENTED();
- return false;
+ return nullptr;
}
} // namespace gpu
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.h b/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
index 7a4b0c53596..6819fdc36ca 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
@@ -30,6 +30,7 @@ class COMPONENT_EXPORT(VULKAN_X11) VulkanImplementationX11
const std::vector<VkQueueFamilyProperties>& queue_family_properties,
uint32_t queue_family_index) override;
std::vector<const char*> GetRequiredDeviceExtensions() override;
+ std::vector<const char*> GetOptionalDeviceExtensions() override;
VkFence CreateVkFenceForGpuFence(VkDevice vk_device) override;
std::unique_ptr<gfx::GpuFence> ExportVkFenceToGpuFence(
VkDevice vk_device,
@@ -42,15 +43,11 @@ class COMPONENT_EXPORT(VULKAN_X11) VulkanImplementationX11
VkExternalMemoryHandleTypeFlagBits GetExternalImageHandleType() override;
bool CanImportGpuMemoryBuffer(
gfx::GpuMemoryBufferType memory_buffer_type) override;
- bool CreateImageFromGpuMemoryHandle(
- VkDevice vk_device,
+ std::unique_ptr<VulkanImage> CreateImageFromGpuMemoryHandle(
+ VulkanDeviceQueue* device_queue,
gfx::GpuMemoryBufferHandle gmb_handle,
gfx::Size size,
- VkImage* vk_image,
- VkImageCreateInfo* vk_image_info,
- VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size,
- base::Optional<VulkanYCbCrInfo>* ycbcr_info) override;
+ VkFormat vk_formae) override;
private:
bool using_surface_ = true;
diff --git a/chromium/gpu/vulkan/x/vulkan_surface_x11.cc b/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
index 054186aba6d..27e7f706e54 100644
--- a/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
@@ -6,46 +6,10 @@
#include "base/logging.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
-#include "ui/events/platform/platform_event_dispatcher.h"
-#include "ui/events/platform/platform_event_source.h"
#include "ui/events/platform/x11/x11_event_source.h"
namespace gpu {
-#if !defined(USE_OZONE)
-
-class VulkanSurfaceX11::ExposeEventForwarder
- : public ui::PlatformEventDispatcher {
- public:
- explicit ExposeEventForwarder(VulkanSurfaceX11* surface) : surface_(surface) {
- if (auto* event_source = ui::PlatformEventSource::GetInstance()) {
- XSelectInput(gfx::GetXDisplay(), surface_->window_, ExposureMask);
- event_source->AddPlatformEventDispatcher(this);
- }
- }
-
- ~ExposeEventForwarder() override {
- if (auto* event_source = ui::PlatformEventSource::GetInstance())
- event_source->RemovePlatformEventDispatcher(this);
- }
-
- // ui::PlatformEventDispatcher:
- bool CanDispatchEvent(const ui::PlatformEvent& event) override {
- return surface_->CanDispatchXEvent(event);
- }
-
- uint32_t DispatchEvent(const ui::PlatformEvent& event) override {
- surface_->ForwardXExposeEvent(event);
- return ui::POST_DISPATCH_STOP_PROPAGATION;
- }
-
- private:
- VulkanSurfaceX11* const surface_;
- DISALLOW_COPY_AND_ASSIGN(ExposeEventForwarder);
-};
-
-#else // defined(USE_OZONE)
-
class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
public:
explicit ExposeEventForwarder(VulkanSurfaceX11* surface) : surface_(surface) {
@@ -61,12 +25,6 @@ class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
}
// ui::XEventDispatcher:
- void CheckCanDispatchNextPlatformEvent(XEvent* xev) override {}
- void PlatformEventDispatchFinished() override {}
- ui::PlatformEventDispatcher* GetPlatformEventDispatcher() override {
- return nullptr;
- }
-
bool DispatchXEvent(XEvent* xevent) override {
if (!surface_->CanDispatchXEvent(xevent))
return false;
@@ -79,8 +37,6 @@ class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
DISALLOW_COPY_AND_ASSIGN(ExposeEventForwarder);
};
-#endif
-
// static
std::unique_ptr<VulkanSurfaceX11> VulkanSurfaceX11::Create(
VkInstance vk_instance,