summaryrefslogtreecommitdiff
path: root/chromium/gpu/command_buffer
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2021-10-26 13:57:00 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2021-11-02 11:31:01 +0000
commit1943b3c2a1dcee36c233724fc4ee7613d71b9cf6 (patch)
tree8c1b5f12357025c197da5427ae02cfdc2f3570d6 /chromium/gpu/command_buffer
parent21ba0c5d4bf8fba15dddd97cd693bad2358b77fd (diff)
downloadqtwebengine-chromium-1943b3c2a1dcee36c233724fc4ee7613d71b9cf6.tar.gz
BASELINE: Update Chromium to 94.0.4606.111
Change-Id: I924781584def20fc800bedf6ff41fdb96c438193 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/gpu/command_buffer')
-rw-r--r--chromium/gpu/command_buffer/PRESUBMIT.py2
-rwxr-xr-xchromium/gpu/command_buffer/build_raster_cmd_buffer.py11
-rwxr-xr-xchromium/gpu/command_buffer/build_webgpu_cmd_buffer.py4
-rw-r--r--chromium/gpu/command_buffer/client/BUILD.gn2
-rw-r--r--chromium/gpu/command_buffer/client/client_discardable_manager.cc12
-rw-r--r--chromium/gpu/command_buffer/client/client_discardable_manager.h4
-rw-r--r--chromium/gpu/command_buffer/client/client_font_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/client/context_support.h4
-rw-r--r--chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.cc42
-rw-r--r--chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.h7
-rw-r--r--chromium/gpu/command_buffer/client/dawn_client_serializer.cc44
-rw-r--r--chromium/gpu/command_buffer/client/dawn_client_serializer.h20
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc11
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h3
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/client/gpu_control_client.h4
-rw-r--r--chromium/gpu/command_buffer/client/mock_transfer_buffer.cc2
-rw-r--r--chromium/gpu/command_buffer/client/program_info_manager_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker.cc1
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker.h1
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h5
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.cc32
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.h4
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.cc1
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.h3
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/client/raster_interface.h4
-rw-r--r--chromium/gpu/command_buffer/client/readback_buffer_shadow_tracker.h1
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer.cc8
-rw-r--r--chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.cc236
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.h23
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface.h29
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub.cc31
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub.h8
-rw-r--r--chromium/gpu/command_buffer/common/BUILD.gn5
-rw-r--r--chromium/gpu/command_buffer/common/capabilities.h3
-rw-r--r--chromium/gpu/command_buffer/common/command_buffer_id.h4
-rw-r--r--chromium/gpu/command_buffer/common/discardable_handle.h4
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format.cc2
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc2
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc15
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h5
-rw-r--r--chromium/gpu/command_buffer/common/mailbox.cc2
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_enums.h22
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format.cc2
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format.h1
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h18
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_test.cc2
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h6
-rw-r--r--chromium/gpu/command_buffer/common/shared_image_usage.h2
-rw-r--r--chromium/gpu/command_buffer/common/sync_token.cc1
-rw-r--r--chromium/gpu/command_buffer/common/sync_token.h1
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_enums.h2
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_format.cc2
-rw-r--r--chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt2
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn26
-rw-r--r--chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc6
-rw-r--r--chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h3
-rw-r--r--chromium/gpu/command_buffer/service/buffer_manager_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/common_decoder.cc2
-rw-r--r--chromium/gpu/command_buffer/service/context_group.cc1
-rw-r--r--chromium/gpu/command_buffer/service/context_state.cc5
-rw-r--r--chromium/gpu/command_buffer/service/dawn_platform.cc73
-rw-r--r--chromium/gpu/command_buffer/service/dawn_platform.h3
-rw-r--r--chromium/gpu/command_buffer/service/dawn_service_memory_transfer_service.cc30
-rw-r--r--chromium/gpu/command_buffer/service/external_semaphore.cc2
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.cc238
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.h12
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc3
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory.cc58
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory.h11
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory_unittest.cc20
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.cc5
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc3
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc8
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.h7
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc102
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc35
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h9
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc126
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc19
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc7
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.h1
-rw-r--r--chromium/gpu/command_buffer/service/gpu_tracer.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller.cc5
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller.h9
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gr_shader_cache.cc69
-rw-r--r--chromium/gpu/command_buffer/service/gr_shader_cache.h39
-rw-r--r--chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc37
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner.cc118
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner.h32
-rw-r--r--chromium/gpu/command_buffer/service/mock_texture_owner.cc6
-rw-r--r--chromium/gpu/command_buffer/service/mock_texture_owner.h1
-rw-r--r--chromium/gpu/command_buffer/service/program_cache.cc2
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/service/program_manager_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/raster_cmd_validation.cc2
-rw-r--r--chromium/gpu/command_buffer/service/raster_cmd_validation_autogen.h6
-rw-r--r--chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h11
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc257
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_autogen.h4
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h18
-rw-r--r--chromium/gpu/command_buffer/service/ref_counted_lock.cc22
-rw-r--r--chromium/gpu/command_buffer/service/ref_counted_lock.h61
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.cc106
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.h24
-rw-r--r--chromium/gpu/command_buffer/service/scheduler_task_runner.cc94
-rw-r--r--chromium/gpu/command_buffer/service/scheduler_task_runner.h55
-rw-r--r--chromium/gpu/command_buffer/service/scheduler_unittest.cc62
-rw-r--r--chromium/gpu/command_buffer/service/sequence_id.h4
-rw-r--r--chromium/gpu/command_buffer/service/service_font_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.cc8
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.cc14
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.h6
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing.cc3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing.h4
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_d3d.cc115
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_d3d.h5
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc90
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_egl_image.h18
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory.h15
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc32
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h11
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc27
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h11
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc87
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_egl.cc152
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_egl.h99
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_egl_unittest.cc410
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_common.cc193
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_common.h87
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image.cc379
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image.h123
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image_unittest.cc872
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc563
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h112
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc728
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc19
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.cc124
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.h23
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_gl_common.cc3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_gl_common.h1
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc38
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.cc10
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.h3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc192
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_ozone.h41
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_scoped_hardware_buffer_fence_sync.cc3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.cc249
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.h41
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc5
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.cc6
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.h3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.cc11
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_dawn_egl_image.cc79
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_dawn_egl_image.h40
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc23
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc27
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.h4
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc11
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.cc47
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.h2
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_video.cc289
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_video.h27
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.cc39
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.h8
-rw-r--r--chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h29
-rw-r--r--chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc39
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/service/test_helper.cc5
-rw-r--r--chromium/gpu/command_buffer/service/test_shared_image_backing.cc3
-rw-r--r--chromium/gpu/command_buffer/service/test_shared_image_backing.h3
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h9
-rw-r--r--chromium/gpu/command_buffer/service/texture_owner.h14
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_cmd_validation.cc2
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_cmd_validation.h1
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc138
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.cc67
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.h13
193 files changed, 5846 insertions, 2585 deletions
diff --git a/chromium/gpu/command_buffer/PRESUBMIT.py b/chromium/gpu/command_buffer/PRESUBMIT.py
index c3ddb2ccae7..9c8df9fc678 100644
--- a/chromium/gpu/command_buffer/PRESUBMIT.py
+++ b/chromium/gpu/command_buffer/PRESUBMIT.py
@@ -9,6 +9,8 @@ for more details on the presubmit API built into depot_tools.
import os.path
+USE_PYTHON3 = True
+
def _IsGLES2CmdBufferFile(affected_file):
filename = os.path.basename(affected_file.LocalPath())
diff --git a/chromium/gpu/command_buffer/build_raster_cmd_buffer.py b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
index 27240ce2bcc..f0832ff8d46 100755
--- a/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
@@ -141,6 +141,15 @@ _NAMED_TYPE_INFO = {
'viz::ResourceFormat::ETC1',
],
},
+ 'gpu::raster::MsaaMode': {
+ 'type': 'gpu::raster::MsaaMode',
+ 'is_complete': True,
+ 'valid': [
+ 'gpu::raster::MsaaMode::kNoMSAA',
+ 'gpu::raster::MsaaMode::kMSAA',
+ 'gpu::raster::MsaaMode::kDMSAA',
+ ],
+ },
}
# A function info object specifies the type and other special data for the
@@ -227,11 +236,13 @@ _FUNCTION_INFO = {
'impl_func': False,
'client_test': False,
'decoder_func': 'DoFinish',
+ 'unit_test': False,
'trace_level': 1,
},
'Flush': {
'impl_func': False,
'decoder_func': 'DoFlush',
+ 'unit_test': False,
'trace_level': 1,
},
'GetError': {
diff --git a/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py b/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py
index 07b6c8ee2a9..9ff591340ad 100755
--- a/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py
@@ -84,6 +84,10 @@ _FUNCTION_INFO = {
'request_device_properties_size * sizeof(char)',
},
},
+ 'DestroyServer': {
+ 'impl_func': False,
+ 'internal': True,
+ },
}
def main(argv):
diff --git a/chromium/gpu/command_buffer/client/BUILD.gn b/chromium/gpu/command_buffer/client/BUILD.gn
index eec19c86b28..c63ee0c7df0 100644
--- a/chromium/gpu/command_buffer/client/BUILD.gn
+++ b/chromium/gpu/command_buffer/client/BUILD.gn
@@ -116,7 +116,6 @@ source_set("gles2_cmd_helper_sources") {
]
configs += [
- "//build/config/compiler:no_shorten_64_warnings",
"//gpu:gpu_gles2_implementation",
]
@@ -204,6 +203,7 @@ source_set("raster_interface") {
"//base",
"//components/viz/common:resource_format",
"//gpu/command_buffer/common",
+ "//gpu/command_buffer/common:raster",
"//skia:skia",
"//ui/gfx:buffer_types",
]
diff --git a/chromium/gpu/command_buffer/client/client_discardable_manager.cc b/chromium/gpu/command_buffer/client/client_discardable_manager.cc
index ec946ca3cd1..da193fb269f 100644
--- a/chromium/gpu/command_buffer/client/client_discardable_manager.cc
+++ b/chromium/gpu/command_buffer/client/client_discardable_manager.cc
@@ -6,7 +6,7 @@
#include "base/atomic_sequence_num.h"
#include "base/containers/flat_set.h"
-#include "base/numerics/checked_math.h"
+#include "base/numerics/safe_conversions.h"
#include "base/system/sys_info.h"
namespace gpu {
@@ -109,17 +109,16 @@ void FreeOffsetSet::ReturnFreeOffset(uint32_t offset) {
// Returns the size of the allocation which ClientDiscardableManager will
// sub-allocate from. This should be at least as big as the minimum shared
// memory allocation size.
-uint32_t AllocationSize() {
+size_t AllocationSize() {
#if defined(OS_NACL)
// base::SysInfo isn't available under NaCl.
size_t system_allocation_size = getpagesize();
#else
size_t system_allocation_size = base::SysInfo::VMAllocationGranularity();
#endif
- DCHECK(base::CheckedNumeric<uint32_t>(system_allocation_size).IsValid());
// If the allocation is small (less than 2K), round it up to at least 2K.
- return std::max(2048u, static_cast<uint32_t>(system_allocation_size));
+ return std::max(size_t{2048}, system_allocation_size);
}
ClientDiscardableHandle::Id GetNextHandleId() {
@@ -155,8 +154,7 @@ ClientDiscardableHandle::Id ClientDiscardableManager::CreateHandle(
return ClientDiscardableHandle::Id();
}
- DCHECK_LT(offset * element_size_, std::numeric_limits<uint32_t>::max());
- uint32_t byte_offset = static_cast<uint32_t>(offset * element_size_);
+ uint32_t byte_offset = base::checked_cast<uint32_t>(offset * element_size_);
ClientDiscardableHandle handle(std::move(buffer), byte_offset, shm_id);
ClientDiscardableHandle::Id handle_id = GetNextHandleId();
handles_.emplace(handle_id, handle);
@@ -238,7 +236,7 @@ bool ClientDiscardableManager::FindAllocation(CommandBuffer* command_buffer,
// Allocate more space.
auto allocation = std::make_unique<Allocation>(elements_per_allocation_);
allocation->buffer = command_buffer->CreateTransferBuffer(
- allocation_size_, &allocation->shm_id);
+ base::checked_cast<uint32_t>(allocation_size_), &allocation->shm_id);
if (!allocation->buffer)
return false;
diff --git a/chromium/gpu/command_buffer/client/client_discardable_manager.h b/chromium/gpu/command_buffer/client/client_discardable_manager.h
index 21888fc06f5..656c47393a0 100644
--- a/chromium/gpu/command_buffer/client/client_discardable_manager.h
+++ b/chromium/gpu/command_buffer/client/client_discardable_manager.h
@@ -62,9 +62,9 @@ class GPU_EXPORT ClientDiscardableManager {
bool CreateNewAllocation(CommandBuffer* command_buffer);
private:
- uint32_t allocation_size_;
+ size_t allocation_size_;
size_t element_size_ = sizeof(base::subtle::Atomic32);
- uint32_t elements_per_allocation_ = allocation_size_ / element_size_;
+ size_t elements_per_allocation_ = allocation_size_ / element_size_;
struct Allocation;
std::vector<std::unique_ptr<Allocation>> allocations_;
diff --git a/chromium/gpu/command_buffer/client/client_font_manager.cc b/chromium/gpu/command_buffer/client/client_font_manager.cc
index 40aeb331000..6df6769b659 100644
--- a/chromium/gpu/command_buffer/client/client_font_manager.cc
+++ b/chromium/gpu/command_buffer/client/client_font_manager.cc
@@ -40,7 +40,7 @@ class Serializer {
DCHECK(base::bits::IsPowerOfTwo(alignment));
size_t memory = reinterpret_cast<size_t>(memory_);
- size_t padding = base::bits::Align(memory, alignment) - memory;
+ size_t padding = base::bits::AlignUp(memory, alignment) - memory;
DCHECK_LE(bytes_written_ + size + padding, memory_size_);
memory_ += padding;
diff --git a/chromium/gpu/command_buffer/client/context_support.h b/chromium/gpu/command_buffer/client/context_support.h
index 80838db15ef..61c1bc4b761 100644
--- a/chromium/gpu/command_buffer/client/context_support.h
+++ b/chromium/gpu/command_buffer/client/context_support.h
@@ -9,6 +9,7 @@
#include <vector>
#include "base/callback.h"
+#include "ui/gfx/gpu_fence_handle.h"
#include "ui/gfx/overlay_transform.h"
#include "ui/gfx/presentation_feedback.h"
@@ -60,7 +61,8 @@ class ContextSupport {
bool aggressively_free_resources) = 0;
using SwapCompletedCallback =
- base::OnceCallback<void(const SwapBuffersCompleteParams&)>;
+ base::OnceCallback<void(const SwapBuffersCompleteParams&,
+ gfx::GpuFenceHandle)>;
using PresentationCallback =
base::OnceCallback<void(const gfx::PresentationFeedback&)>;
virtual void Swap(uint32_t flags,
diff --git a/chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.cc b/chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.cc
index ee9275cdf84..bdbacb80522 100644
--- a/chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.cc
+++ b/chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.cc
@@ -34,26 +34,14 @@ class DawnClientMemoryTransferService::ReadHandleImpl
*reinterpret_cast<MemoryTransferHandle*>(serialize_pointer) = handle_;
}
- // Load initial data and open the handle for reading.
- // This function takes in the serialized result of
- // ReadHandle::SerializeInitialData.
- // It writes to |data| and |data_length| the pointer and size
- // of the mapped data for reading.
- // The allocation must live at least until the ReadHandle is destructed.
- bool DeserializeInitialData(const void* deserialize_pointer,
- size_t deserialize_size,
- const void** data,
- size_t* data_length) override {
+ const void* GetData() override { return ptr_; }
+
+ bool DeserializeDataUpdate(const void* deserialize_pointer,
+ size_t deserialize_size,
+ size_t offset,
+ size_t size) override {
// No data is deserialized because we're using shared memory.
DCHECK_EQ(deserialize_size, 0u);
- DCHECK(data);
- DCHECK(data_length);
-
- // Write the pointer and size of the shared memory allocation.
- // |data| and |data_length| are provided by the dawn_wire client.
- *data = ptr_;
- *data_length = handle_.size;
-
return true;
}
@@ -86,18 +74,16 @@ class DawnClientMemoryTransferService::WriteHandleImpl
*reinterpret_cast<MemoryTransferHandle*>(serialize_pointer) = handle_;
}
- // Open the handle for writing.
- // The data returned must live at least until the WriteHandle is destructed.
- std::pair<void*, size_t> Open() override {
- return std::make_pair(ptr_, handle_.size);
- }
+ void* GetData() override { return ptr_; }
- size_t SerializeFlushSize() override {
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
// No data is serialized because we're using shared memory.
return 0;
}
- void SerializeFlush(void* serialize_pointer) override {
+ void SerializeDataUpdate(void* serialize_pointer,
+ size_t offset,
+ size_t size) override {
// No data is serialized because we're using shared memory.
}
@@ -139,7 +125,7 @@ DawnClientMemoryTransferService::CreateWriteHandle(size_t size) {
void* DawnClientMemoryTransferService::AllocateHandle(
size_t size,
MemoryTransferHandle* handle) {
- if (size > std::numeric_limits<uint32_t>::max()) {
+ if (size > std::numeric_limits<uint32_t>::max() || disconnected_) {
return nullptr;
}
@@ -169,5 +155,9 @@ void DawnClientMemoryTransferService::FreeHandles(CommandBufferHelper* helper) {
}
}
+void DawnClientMemoryTransferService::Disconnect() {
+ disconnected_ = true;
+}
+
} // namespace webgpu
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.h b/chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.h
index 61efd120ea5..3c8780cdc45 100644
--- a/chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.h
+++ b/chromium/gpu/command_buffer/client/dawn_client_memory_transfer_service.h
@@ -17,7 +17,7 @@ namespace webgpu {
struct MemoryTransferHandle;
-class DawnClientMemoryTransferService final
+class DawnClientMemoryTransferService
: public dawn_wire::client::MemoryTransferService {
public:
DawnClientMemoryTransferService(MappedMemoryManager* mapped_memory);
@@ -35,6 +35,8 @@ class DawnClientMemoryTransferService final
// process.
void FreeHandles(CommandBufferHelper* helper);
+ void Disconnect();
+
private:
class ReadHandleImpl;
class WriteHandleImpl;
@@ -50,6 +52,9 @@ class DawnClientMemoryTransferService final
// Pointers to memory allocated by the MappedMemoryManager to free after
// the next Flush.
std::vector<void*> free_blocks_;
+
+ // If disconnected, new handle creation always returns null.
+ bool disconnected_ = false;
};
} // namespace webgpu
diff --git a/chromium/gpu/command_buffer/client/dawn_client_serializer.cc b/chromium/gpu/command_buffer/client/dawn_client_serializer.cc
index f69ca379983..31154e29571 100644
--- a/chromium/gpu/command_buffer/client/dawn_client_serializer.cc
+++ b/chromium/gpu/command_buffer/client/dawn_client_serializer.cc
@@ -14,37 +14,16 @@
namespace gpu {
namespace webgpu {
-// static
-std::unique_ptr<DawnClientSerializer> DawnClientSerializer::Create(
- WebGPUImplementation* client,
- WebGPUCmdHelper* helper,
- DawnClientMemoryTransferService* memory_transfer_service,
- const SharedMemoryLimits& limits) {
- std::unique_ptr<TransferBuffer> transfer_buffer =
- std::make_unique<TransferBuffer>(helper);
- if (!transfer_buffer->Initialize(limits.start_transfer_buffer_size,
- /* start offset */ 0,
- limits.min_transfer_buffer_size,
- limits.max_transfer_buffer_size,
- /* alignment */ 8)) {
- return nullptr;
- }
- return std::make_unique<DawnClientSerializer>(
- client, helper, memory_transfer_service, std::move(transfer_buffer),
- limits.start_transfer_buffer_size);
-}
-
DawnClientSerializer::DawnClientSerializer(
WebGPUImplementation* client,
WebGPUCmdHelper* helper,
DawnClientMemoryTransferService* memory_transfer_service_,
- std::unique_ptr<TransferBuffer> transfer_buffer,
- uint32_t buffer_initial_size)
+ std::unique_ptr<TransferBuffer> transfer_buffer)
: client_(client),
helper_(helper),
memory_transfer_service_(memory_transfer_service_),
transfer_buffer_(std::move(transfer_buffer)),
- buffer_initial_size_(buffer_initial_size),
+ buffer_initial_size_(transfer_buffer_->GetSize()),
buffer_(helper_, transfer_buffer_.get()) {
DCHECK_GT(buffer_initial_size_, 0u);
}
@@ -102,7 +81,7 @@ void* DawnClientSerializer::GetCmdSpace(size_t size) {
return buffer_.address();
}
-bool DawnClientSerializer::Flush() {
+void DawnClientSerializer::Commit() {
if (buffer_.valid()) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
"DawnClientSerializer::Flush", "bytes", put_offset_);
@@ -120,7 +99,6 @@ bool DawnClientSerializer::Flush() {
memory_transfer_service_->FreeHandles(helper_);
}
- return true;
}
void DawnClientSerializer::SetAwaitingFlush(bool awaiting_flush) {
@@ -132,7 +110,21 @@ void DawnClientSerializer::SetAwaitingFlush(bool awaiting_flush) {
void DawnClientSerializer::Disconnect() {
buffer_.Discard();
- transfer_buffer_ = nullptr;
+ if (transfer_buffer_) {
+ auto transfer_buffer = std::move(transfer_buffer_);
+ // Wait for commands to finish before we free shared memory that
+ // the GPU process is using.
+ // TODO(crbug.com/1231599): This Finish may not be necessary if the
+ // shared memory is not immediately freed. Investigate this and
+ // consider optimization.
+ helper_->Finish();
+ transfer_buffer = nullptr;
+ }
+}
+
+bool DawnClientSerializer::Flush() {
+ Commit();
+ return true;
}
} // namespace webgpu
diff --git a/chromium/gpu/command_buffer/client/dawn_client_serializer.h b/chromium/gpu/command_buffer/client/dawn_client_serializer.h
index a377171ec90..980440b21a6 100644
--- a/chromium/gpu/command_buffer/client/dawn_client_serializer.h
+++ b/chromium/gpu/command_buffer/client/dawn_client_serializer.h
@@ -13,7 +13,6 @@
namespace gpu {
-struct SharedMemoryLimits;
class TransferBuffer;
namespace webgpu {
@@ -22,25 +21,17 @@ class DawnClientMemoryTransferService;
class WebGPUCmdHelper;
class WebGPUImplementation;
-class DawnClientSerializer final : public dawn_wire::CommandSerializer {
+class DawnClientSerializer : public dawn_wire::CommandSerializer {
public:
- static std::unique_ptr<DawnClientSerializer> Create(
- WebGPUImplementation* client,
- WebGPUCmdHelper* helper,
- DawnClientMemoryTransferService* memory_transfer_service,
- const SharedMemoryLimits& limits);
-
DawnClientSerializer(WebGPUImplementation* client,
WebGPUCmdHelper* helper,
DawnClientMemoryTransferService* memory_transfer_service,
- std::unique_ptr<TransferBuffer> transfer_buffer,
- uint32_t buffer_initial_size);
+ std::unique_ptr<TransferBuffer> transfer_buffer);
~DawnClientSerializer() override;
// dawn_wire::CommandSerializer implementation
size_t GetMaximumAllocationSize() const final;
void* GetCmdSpace(size_t size) final;
- bool Flush() final;
// Signal that it's important that the previously encoded commands are
// flushed. Calling |AwaitingFlush| will return whether or not a flush still
@@ -55,7 +46,14 @@ class DawnClientSerializer final : public dawn_wire::CommandSerializer {
// |GetCmdSpace| will do nothing.
void Disconnect();
+ // Marks the commands' place in the GPU command buffer without flushing for
+ // GPU execution.
+ void Commit();
+
private:
+ // dawn_wire::CommandSerializer implementation
+ bool Flush() final;
+
WebGPUImplementation* client_;
WebGPUCmdHelper* helper_;
DawnClientMemoryTransferService* memory_transfer_service_;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index a3044fd3704..be8c49feb86 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -26,8 +26,8 @@
#include "base/bits.h"
#include "base/compiler_specific.h"
#include "base/containers/span.h"
+#include "base/cxx17_backports.h"
#include "base/numerics/safe_math.h"
-#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "base/system/sys_info.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -387,7 +387,8 @@ void GLES2Implementation::OnGpuControlErrorMessage(const char* message,
}
void GLES2Implementation::OnGpuControlSwapBuffersCompleted(
- const SwapBuffersCompleteParams& params) {
+ const SwapBuffersCompleteParams& params,
+ gfx::GpuFenceHandle release_fence) {
auto found = pending_swap_callbacks_.find(params.swap_response.swap_id);
if (found == pending_swap_callbacks_.end())
return;
@@ -397,7 +398,7 @@ void GLES2Implementation::OnGpuControlSwapBuffersCompleted(
auto callback = std::move(found->second);
pending_swap_callbacks_.erase(found);
- std::move(callback).Run(params);
+ std::move(callback).Run(params, std::move(release_fence));
}
void GLES2Implementation::OnGpuSwitched(
@@ -5133,16 +5134,20 @@ void GLES2Implementation::BindFramebufferHelper(GLenum target,
}
break;
case GL_READ_FRAMEBUFFER:
+#if EXPENSIVE_DCHECKS_ARE_ON()
DCHECK(capabilities_.major_version >= 3 ||
IsChromiumFramebufferMultisampleAvailable());
+#endif
if (bound_read_framebuffer_ != framebuffer) {
bound_read_framebuffer_ = framebuffer;
changed = true;
}
break;
case GL_DRAW_FRAMEBUFFER:
+#if EXPENSIVE_DCHECKS_ARE_ON()
DCHECK(capabilities_.major_version >= 3 ||
IsChromiumFramebufferMultisampleAvailable());
+#endif
if (bound_framebuffer_ != framebuffer) {
bound_framebuffer_ = framebuffer;
changed = true;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index 3635e0fa609..207a2e93f68 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -400,7 +400,8 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
void OnGpuControlLostContextMaybeReentrant() final;
void OnGpuControlErrorMessage(const char* message, int32_t id) final;
void OnGpuControlSwapBuffersCompleted(
- const SwapBuffersCompleteParams& params) final;
+ const SwapBuffersCompleteParams& params,
+ gfx::GpuFenceHandle release_fence) final;
void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) final;
void OnSwapBufferPresented(uint64_t swap_id,
const gfx::PresentationFeedback& feedback) final;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
index 1f4f495faf1..71f837999c8 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -18,7 +18,7 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/mock_transfer_buffer.h"
diff --git a/chromium/gpu/command_buffer/client/gpu_control_client.h b/chromium/gpu/command_buffer/client/gpu_control_client.h
index c88be16c02c..81008472d61 100644
--- a/chromium/gpu/command_buffer/client/gpu_control_client.h
+++ b/chromium/gpu/command_buffer/client/gpu_control_client.h
@@ -8,6 +8,7 @@
#include <cstdint>
#include "base/containers/span.h"
+#include "ui/gfx/gpu_fence_handle.h"
#include "ui/gfx/presentation_feedback.h"
#include "ui/gl/gpu_preference.h"
@@ -28,7 +29,8 @@ class GpuControlClient {
virtual void OnGpuControlLostContextMaybeReentrant() = 0;
virtual void OnGpuControlErrorMessage(const char* message, int32_t id) = 0;
virtual void OnGpuControlSwapBuffersCompleted(
- const SwapBuffersCompleteParams& params) = 0;
+ const SwapBuffersCompleteParams& params,
+ gfx::GpuFenceHandle release_fence) = 0;
virtual void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) {}
virtual void OnSwapBufferPresented(
uint64_t swap_id,
diff --git a/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc b/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
index c70462c85fa..9ab9b15be90 100644
--- a/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
+++ b/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
@@ -146,7 +146,7 @@ uint32_t MockTransferBuffer::MaxTransferBufferSize() {
}
unsigned int MockTransferBuffer::RoundToAlignment(unsigned int size) {
- return base::bits::Align(size, alignment_);
+ return base::bits::AlignUp(size, alignment_);
}
bool MockTransferBuffer::InSync() {
diff --git a/chromium/gpu/command_buffer/client/program_info_manager_unittest.cc b/chromium/gpu/command_buffer/client/program_info_manager_unittest.cc
index c9e652d6a97..9ddaa9e4328 100644
--- a/chromium/gpu/command_buffer/client/program_info_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/client/program_info_manager_unittest.cc
@@ -7,7 +7,7 @@
#include <memory>
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "gpu/command_buffer/client/program_info_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/gpu/command_buffer/client/query_tracker.cc b/chromium/gpu/command_buffer/client/query_tracker.cc
index 760c29628f2..64f7c4cf99c 100644
--- a/chromium/gpu/command_buffer/client/query_tracker.cc
+++ b/chromium/gpu/command_buffer/client/query_tracker.cc
@@ -14,6 +14,7 @@
#include "base/atomicops.h"
#include "base/containers/circular_deque.h"
+#include "base/containers/cxx20_erase.h"
#include "base/numerics/safe_conversions.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
diff --git a/chromium/gpu/command_buffer/client/query_tracker.h b/chromium/gpu/command_buffer/client/query_tracker.h
index 7e5c01b954e..09074575415 100644
--- a/chromium/gpu/command_buffer/client/query_tracker.h
+++ b/chromium/gpu/command_buffer/client/query_tracker.h
@@ -22,6 +22,7 @@
#include "base/macros.h"
#include "gles2_impl_export.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/client/query_tracker_unittest.cc b/chromium/gpu/command_buffer/client/query_tracker_unittest.cc
index 0b3982d0a15..2015578e610 100644
--- a/chromium/gpu/command_buffer/client/query_tracker_unittest.cc
+++ b/chromium/gpu/command_buffer/client/query_tracker_unittest.cc
@@ -13,7 +13,7 @@
#include <memory>
#include <vector>
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/mapped_memory.h"
diff --git a/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
index ea433815f25..f04db3dbd7b 100644
--- a/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
@@ -91,6 +91,7 @@ void LoseContextCHROMIUM(GLenum current, GLenum other) {
void BeginRasterCHROMIUMImmediate(GLuint sk_color,
GLboolean needs_clear,
GLuint msaa_sample_count,
+ gpu::raster::MsaaMode msaa_mode,
GLboolean can_use_lcd_text,
const GLbyte* mailbox) {
const uint32_t size =
@@ -99,8 +100,8 @@ void BeginRasterCHROMIUMImmediate(GLuint sk_color,
GetImmediateCmdSpaceTotalSize<raster::cmds::BeginRasterCHROMIUMImmediate>(
size);
if (c) {
- c->Init(sk_color, needs_clear, msaa_sample_count, can_use_lcd_text,
- mailbox);
+ c->Init(sk_color, needs_clear, msaa_sample_count, msaa_mode,
+ can_use_lcd_text, mailbox);
}
}
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc
index a346c8e0b89..64d0b4bb216 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation.cc
@@ -570,7 +570,8 @@ void RasterImplementation::OnGpuControlErrorMessage(const char* message,
}
void RasterImplementation::OnGpuControlSwapBuffersCompleted(
- const SwapBuffersCompleteParams& params) {
+ const SwapBuffersCompleteParams& params,
+ gfx::GpuFenceHandle release_fence) {
NOTREACHED();
}
@@ -1210,6 +1211,23 @@ void RasterImplementation::CopySubTexture(const gpu::Mailbox& source_mailbox,
<< dest_mailbox.ToDebugString() << ", " << xoffset << ", "
<< yoffset << ", " << x << ", " << y << ", " << width
<< ", " << height << ")");
+ if (!source_mailbox.IsSharedImage()) {
+ SetGLError(GL_INVALID_VALUE, "glCopySubTexture",
+ "source_mailbox is not a shared image.");
+ // TODO(crbug.com/1229479): This call to NOTREACHED is temporary while we
+ // investigate crbug.com/1229479. The failure with test
+ // WebRtcVideoCaptureServiceBrowserTest.
+ // FramesSentThroughTextureVirtualDeviceGetDisplayedOnPage when OOP-R
+ // Canvas is enabled does not repro on trybots, only on CI bots.
+ // Crashing here will allow us to get a client-side stack trace.
+ NOTREACHED();
+ return;
+ }
+ if (!dest_mailbox.IsSharedImage()) {
+ SetGLError(GL_INVALID_VALUE, "glCopySubTexture",
+ "dest_mailbox is not a shared image.");
+ return;
+ }
if (width < 0) {
SetGLError(GL_INVALID_VALUE, "glCopySubTexture", "width < 0");
return;
@@ -1239,13 +1257,13 @@ void RasterImplementation::WritePixels(const gpu::Mailbox& dest_mailbox,
// Get the size of the SkColorSpace while maintaining 8-byte alignment.
GLuint pixels_offset = 0;
if (src_info.colorSpace()) {
- pixels_offset = base::bits::Align(
+ pixels_offset = base::bits::AlignUp(
src_info.colorSpace()->writeToMemory(nullptr), sizeof(uint64_t));
}
GLuint src_size = src_info.computeByteSize(row_bytes);
GLuint total_size =
- pixels_offset + base::bits::Align(src_size, sizeof(uint64_t));
+ pixels_offset + base::bits::AlignUp(src_size, sizeof(uint64_t));
std::unique_ptr<ScopedSharedMemoryPtr> scoped_shared_memory =
std::make_unique<ScopedSharedMemoryPtr>(total_size, transfer_buffer_,
@@ -1290,13 +1308,15 @@ void RasterImplementation::BeginRasterCHROMIUM(
GLuint sk_color,
GLboolean needs_clear,
GLuint msaa_sample_count,
+ MsaaMode msaa_mode,
GLboolean can_use_lcd_text,
const gfx::ColorSpace& color_space,
const GLbyte* mailbox) {
DCHECK(!raster_properties_);
- helper_->BeginRasterCHROMIUMImmediate(
- sk_color, needs_clear, msaa_sample_count, can_use_lcd_text, mailbox);
+ helper_->BeginRasterCHROMIUMImmediate(sk_color, needs_clear,
+ msaa_sample_count, msaa_mode,
+ can_use_lcd_text, mailbox);
raster_properties_.emplace(sk_color, can_use_lcd_text,
color_space.ToSkColorSpace());
@@ -1427,7 +1447,7 @@ void RasterImplementation::ReadbackImagePixelsINTERNAL(
GLuint dst_size = dst_info.computeByteSize(dst_row_bytes);
GLuint total_size =
- pixels_offset + base::bits::Align(dst_size, sizeof(uint64_t));
+ pixels_offset + base::bits::AlignUp(dst_size, sizeof(uint64_t));
std::unique_ptr<ScopedMappedMemoryPtr> scoped_shared_memory =
std::make_unique<ScopedMappedMemoryPtr>(total_size, helper(),
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.h b/chromium/gpu/command_buffer/client/raster_implementation.h
index ecd89a30720..bbc240621d7 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation.h
@@ -140,6 +140,7 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
void BeginRasterCHROMIUM(GLuint sk_color,
GLboolean needs_clear,
GLuint msaa_sample_count,
+ MsaaMode msaa_mode,
GLboolean can_use_lcd_text,
const gfx::ColorSpace& color_space,
const GLbyte* mailbox) override;
@@ -295,7 +296,8 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface,
void OnGpuControlLostContextMaybeReentrant() final;
void OnGpuControlErrorMessage(const char* message, int32_t id) final;
void OnGpuControlSwapBuffersCompleted(
- const SwapBuffersCompleteParams& params) final;
+ const SwapBuffersCompleteParams& params,
+ gfx::GpuFenceHandle release_fence) final;
void OnSwapBufferPresented(uint64_t swap_id,
const gfx::PresentationFeedback& feedback) final;
void OnGpuControlReturnData(base::span<const uint8_t> data) final;
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
index 1db20329ed6..472130708fd 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
@@ -211,6 +211,7 @@ void RasterImplementationGLES::BeginRasterCHROMIUM(
GLuint sk_color,
GLboolean needs_clear,
GLuint msaa_sample_count,
+ MsaaMode msaa_mode,
GLboolean can_use_lcd_text,
const gfx::ColorSpace& color_space,
const GLbyte* mailbox) {
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.h b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
index 0fed7afec9d..2cc30bba95e 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
@@ -5,14 +5,12 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_GLES_H_
#define GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_GLES_H_
-#include "base/containers/flat_map.h"
#include "base/macros.h"
#include "gpu/command_buffer/client/client_font_manager.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/client/raster_interface.h"
#include "gpu/command_buffer/common/capabilities.h"
#include "gpu/raster_export.h"
-#include "third_party/abseil-cpp/absl/types/optional.h"
#include "third_party/skia/include/core/SkColorSpace.h"
namespace gpu {
@@ -85,6 +83,7 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
void BeginRasterCHROMIUM(GLuint sk_color,
GLboolean needs_clear,
GLuint msaa_sample_count,
+ MsaaMode msaa_mode,
GLboolean can_use_lcd_text,
const gfx::ColorSpace& color_space,
const GLbyte* mailbox) override;
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
index 53af6b4a922..629ad614a37 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
@@ -16,8 +16,8 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
+#include "base/cxx17_backports.h"
#include "base/memory/ptr_util.h"
-#include "base/stl_util.h"
#include "cc/paint/raw_memory_transfer_cache_entry.h"
#include "cc/paint/transfer_cache_serialize_helper.h"
#include "gpu/command_buffer/client/client_test_helper.h"
diff --git a/chromium/gpu/command_buffer/client/raster_interface.h b/chromium/gpu/command_buffer/client/raster_interface.h
index 4b569dcc076..1bdd1771c2b 100644
--- a/chromium/gpu/command_buffer/client/raster_interface.h
+++ b/chromium/gpu/command_buffer/client/raster_interface.h
@@ -11,6 +11,7 @@
#include "base/containers/span.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/client/interface_base.h"
+#include "gpu/command_buffer/common/raster_cmd_enums.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "third_party/skia/include/core/SkImageInfo.h"
#include "third_party/skia/include/core/SkYUVAInfo.h"
@@ -74,9 +75,12 @@ class RasterInterface : public InterfaceBase {
const gpu::Mailbox yuva_plane_mailboxes[]) = 0;
// OOP-Raster
+
+ // msaa_sample_count has no effect unless msaa_mode is set to kMSAA
virtual void BeginRasterCHROMIUM(GLuint sk_color,
GLboolean needs_clear,
GLuint msaa_sample_count,
+ MsaaMode msaa_mode,
GLboolean can_use_lcd_text,
const gfx::ColorSpace& color_space,
const GLbyte* mailbox) = 0;
diff --git a/chromium/gpu/command_buffer/client/readback_buffer_shadow_tracker.h b/chromium/gpu/command_buffer/client/readback_buffer_shadow_tracker.h
index af58e512f46..88f53dd3725 100644
--- a/chromium/gpu/command_buffer/client/readback_buffer_shadow_tracker.h
+++ b/chromium/gpu/command_buffer/client/readback_buffer_shadow_tracker.h
@@ -7,7 +7,6 @@
#include <GLES2/gl2.h>
#include "base/containers/flat_map.h"
-#include "base/containers/flat_set.h"
#include "base/memory/weak_ptr.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer.cc b/chromium/gpu/command_buffer/client/transfer_buffer.cc
index 4b55c05872e..5d1e0b510d3 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer.cc
+++ b/chromium/gpu/command_buffer/client/transfer_buffer.cc
@@ -48,9 +48,9 @@ bool TransferBuffer::Initialize(unsigned int default_buffer_size,
unsigned int alignment) {
result_size_ = result_size;
alignment_ = alignment;
- default_buffer_size_ = base::bits::Align(default_buffer_size, alignment);
- min_buffer_size_ = base::bits::Align(min_buffer_size, alignment);
- max_buffer_size_ = base::bits::Align(max_buffer_size, alignment);
+ default_buffer_size_ = base::bits::AlignUp(default_buffer_size, alignment);
+ min_buffer_size_ = base::bits::AlignUp(min_buffer_size, alignment);
+ max_buffer_size_ = base::bits::AlignUp(max_buffer_size, alignment);
ReallocateRingBuffer(default_buffer_size_ - result_size);
return HaveBuffer();
}
@@ -134,7 +134,7 @@ void TransferBuffer::AllocateRingBuffer(unsigned int size) {
return;
}
// we failed so don't try larger than this.
- max_buffer_size_ = base::bits::Align(size / 2, alignment_);
+ max_buffer_size_ = base::bits::AlignUp(size / 2, alignment_);
}
usable_ = false;
}
diff --git a/chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc b/chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
index f11e84de21b..b536128506e 100644
--- a/chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
@@ -12,7 +12,7 @@
#include <memory>
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.cc b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
index 5a29b256cfe..ce8a12a999e 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
@@ -22,6 +22,58 @@
namespace gpu {
namespace webgpu {
+#if BUILDFLAG(USE_DAWN)
+class DawnWireServices : public APIChannel {
+ private:
+ friend class base::RefCounted<DawnWireServices>;
+ ~DawnWireServices() override = default;
+
+ public:
+ DawnWireServices(WebGPUImplementation* webgpu_implementation,
+ WebGPUCmdHelper* helper,
+ MappedMemoryManager* mapped_memory,
+ std::unique_ptr<TransferBuffer> transfer_buffer)
+ : memory_transfer_service_(mapped_memory),
+ serializer_(webgpu_implementation,
+ helper,
+ &memory_transfer_service_,
+ std::move(transfer_buffer)),
+ wire_client_(dawn_wire::WireClientDescriptor{
+ &serializer_,
+ &memory_transfer_service_,
+ }) {}
+
+ const DawnProcTable& GetProcs() const override {
+ return dawn_wire::client::GetProcs();
+ }
+
+ dawn_wire::WireClient* wire_client() { return &wire_client_; }
+ DawnClientSerializer* serializer() { return &serializer_; }
+ DawnClientMemoryTransferService* memory_transfer_service() {
+ return &memory_transfer_service_;
+ }
+
+ void Disconnect() override {
+ disconnected_ = true;
+ wire_client_.Disconnect();
+ serializer_.Disconnect();
+ memory_transfer_service_.Disconnect();
+ }
+
+ bool IsDisconnected() const { return disconnected_; }
+
+ void FreeMappedResources(WebGPUCmdHelper* helper) {
+ memory_transfer_service_.FreeHandles(helper);
+ }
+
+ private:
+ bool disconnected_ = false;
+ DawnClientMemoryTransferService memory_transfer_service_;
+ DawnClientSerializer serializer_;
+ dawn_wire::WireClient wire_client_;
+};
+#endif
+
// Include the auto-generated part of this file. We split this because it means
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
@@ -35,26 +87,42 @@ WebGPUImplementation::WebGPUImplementation(
helper_(helper) {}
WebGPUImplementation::~WebGPUImplementation() {
+ LoseContext();
+
+ // Before destroying WebGPUImplementation, all mappable buffers
+ // must be destroyed first. This means that all shared memory mappings are
+ // detached. If they are not destroyed, MappedMemoryManager (member of
+ // base class ImplementationBase) will assert on destruction that some
+ // memory blocks are in use. Calling |FreeMappedResources| marks all
+ // blocks that are no longer in use as free.
#if BUILDFLAG(USE_DAWN)
- if (!wire_client_) {
- // Initialization failed.
- return;
- }
+ dawn_wire_->FreeMappedResources(helper_);
+#endif
- // Flush all commands and synchronously wait for them to finish.
- // TODO(enga): Investigate if we can just Disconnect() instead.
- wire_serializer_->Flush();
+ // Wait for commands to finish before we continue destruction.
+ // WebGPUImplementation no longer owns the WebGPU transfer buffer, but still
+ // owns the GPU command buffer. We should not free shared memory that the
+ // GPU process is using.
helper_->Finish();
+}
- // Now that commands are finished, free the wire and serializers.
- wire_client_.reset();
- wire_serializer_.reset();
+void WebGPUImplementation::LoseContext() {
+ lost_ = true;
+#if BUILDFLAG(USE_DAWN)
+ dawn_wire_->Disconnect();
- // All client-side Dawn objects are now destroyed.
- // Shared memory allocations for buffers that were still mapped at the time
- // of destruction can now be safely freed.
- memory_transfer_service_->FreeHandles(helper_);
- helper_->Finish();
+ auto request_adapter_callback_map = std::move(request_adapter_callback_map_);
+ auto request_device_callback_map = std::move(request_device_callback_map_);
+ for (auto& it : request_adapter_callback_map) {
+ std::move(it.second).Run(-1, {}, "Context Lost");
+ }
+ for (auto& it : request_device_callback_map) {
+ std::move(it.second).Run(false);
+ }
+
+ // After |lost_| is set to true, callbacks should not be enqueued anymore.
+ DCHECK(request_adapter_callback_map_.empty());
+ DCHECK(request_device_callback_map_.empty());
#endif
}
@@ -66,17 +134,19 @@ gpu::ContextResult WebGPUImplementation::Initialize(
return result;
}
-#if BUILDFLAG(USE_DAWN)
- memory_transfer_service_ =
- std::make_unique<DawnClientMemoryTransferService>(mapped_memory_.get());
-
- wire_serializer_ = DawnClientSerializer::Create(
- this, helper_, memory_transfer_service_.get(), limits);
+ std::unique_ptr<TransferBuffer> transfer_buffer =
+ std::make_unique<TransferBuffer>(helper_);
+ if (!transfer_buffer->Initialize(limits.start_transfer_buffer_size,
+ /* start offset */ 0,
+ limits.min_transfer_buffer_size,
+ limits.max_transfer_buffer_size,
+ /* alignment */ 8)) {
+ return gpu::ContextResult::kFatalFailure;
+ }
- dawn_wire::WireClientDescriptor descriptor = {};
- descriptor.serializer = wire_serializer_.get();
- descriptor.memoryTransferService = memory_transfer_service_.get();
- wire_client_ = std::make_unique<dawn_wire::WireClient>(descriptor);
+#if BUILDFLAG(USE_DAWN)
+ dawn_wire_ = base::MakeRefCounted<DawnWireServices>(
+ this, helper_, mapped_memory_.get(), std::move(transfer_buffer));
// TODO(senorblanco): Do this only once per process. Doing it once per
// WebGPUImplementation is non-optimal but valid, since the returned
@@ -190,9 +260,19 @@ bool WebGPUImplementation::CanDecodeWithHardwareAcceleration(
// InterfaceBase implementation.
void WebGPUImplementation::GenSyncTokenCHROMIUM(GLbyte* sync_token) {
+ // Need to commit the commands to the GPU command buffer first for SyncToken
+ // to work.
+#if BUILDFLAG(USE_DAWN)
+ dawn_wire_->serializer()->Commit();
+#endif
ImplementationBase::GenSyncToken(sync_token);
}
void WebGPUImplementation::GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) {
+ // Need to commit the commands to the GPU command buffer first for SyncToken
+ // to work.
+#if BUILDFLAG(USE_DAWN)
+ dawn_wire_->serializer()->Commit();
+#endif
ImplementationBase::GenUnverifiedSyncToken(sync_token);
}
void WebGPUImplementation::VerifySyncTokensCHROMIUM(GLbyte** sync_tokens,
@@ -200,8 +280,11 @@ void WebGPUImplementation::VerifySyncTokensCHROMIUM(GLbyte** sync_tokens,
ImplementationBase::VerifySyncTokens(sync_tokens, count);
}
void WebGPUImplementation::WaitSyncTokenCHROMIUM(const GLbyte* sync_token) {
- // Flush any commands before this, so we don't block more than necessary.
- FlushCommands();
+ // Need to commit the commands to the GPU command buffer first for SyncToken
+ // to work.
+#if BUILDFLAG(USE_DAWN)
+ dawn_wire_->serializer()->Commit();
+#endif
ImplementationBase::WaitSyncToken(sync_token);
}
void WebGPUImplementation::ShallowFlushCHROMIUM() {
@@ -227,9 +310,8 @@ void WebGPUImplementation::SetGLError(GLenum error,
}
// GpuControlClient implementation.
-// TODO(jiawei.shao@intel.com): do other clean-ups when the context is lost.
void WebGPUImplementation::OnGpuControlLostContext() {
- OnGpuControlLostContextMaybeReentrant();
+ LoseContext();
// This should never occur more than once.
DCHECK(!lost_context_callback_run_);
@@ -239,18 +321,20 @@ void WebGPUImplementation::OnGpuControlLostContext() {
}
}
void WebGPUImplementation::OnGpuControlLostContextMaybeReentrant() {
+ // If this function is called, we are guaranteed to also get a call
+ // to |OnGpuControlLostContext| when the callstack unwinds. Thus, this
+ // function only handles immediately setting state so that other operations
+ // which occur while the callstack is unwinding are aware that the context
+ // is lost.
lost_ = true;
-#if BUILDFLAG(USE_DAWN)
- wire_client_->Disconnect();
- wire_serializer_->Disconnect();
-#endif
}
void WebGPUImplementation::OnGpuControlErrorMessage(const char* message,
int32_t id) {
NOTIMPLEMENTED();
}
void WebGPUImplementation::OnGpuControlSwapBuffersCompleted(
- const SwapBuffersCompleteParams& params) {
+ const SwapBuffersCompleteParams& params,
+ gfx::GpuFenceHandle release_fence) {
NOTIMPLEMENTED();
}
void WebGPUImplementation::OnSwapBufferPresented(
@@ -285,10 +369,13 @@ void WebGPUImplementation::OnGpuControlReturnData(
const cmds::DawnReturnCommandsInfo* dawn_return_commands_info =
reinterpret_cast<const cmds::DawnReturnCommandsInfo*>(data.data());
+ if (dawn_wire_->IsDisconnected()) {
+ break;
+ }
// TODO(enga): Instead of a CHECK, this could generate a device lost
// event on just that device. It doesn't seem worth doing right now
// since a failure here is likely not recoverable.
- CHECK(wire_client_->HandleCommands(
+ CHECK(dawn_wire_->wire_client()->HandleCommands(
reinterpret_cast<const char*>(
dawn_return_commands_info->deserialized_buffer),
data.size() -
@@ -305,8 +392,12 @@ void WebGPUImplementation::OnGpuControlReturnData(
auto request_callback_iter =
request_adapter_callback_map_.find(request_adapter_serial);
CHECK(request_callback_iter != request_adapter_callback_map_.end());
+ RequestAdapterCallback callback =
+ std::move(request_callback_iter->second);
+ // Remove the callback from the map immediately since the callback could
+ // perform reentrant calls that modify the map.
+ request_adapter_callback_map_.erase(request_callback_iter);
- auto& request_callback = request_callback_iter->second;
GLuint adapter_service_id =
returned_adapter_info->header.adapter_service_id;
WGPUDeviceProperties adapter_properties = {};
@@ -329,9 +420,8 @@ void WebGPUImplementation::OnGpuControlReturnData(
error_message = "Request adapter failed";
}
}
- std::move(request_callback)
- .Run(adapter_service_id, adapter_properties, error_message);
- request_adapter_callback_map_.erase(request_callback_iter);
+ std::move(callback).Run(adapter_service_id, adapter_properties,
+ error_message);
} break;
case DawnReturnDataType::kRequestedDeviceReturnInfo: {
CHECK_GE(data.size(), sizeof(cmds::DawnReturnRequestDeviceInfo));
@@ -345,12 +435,14 @@ void WebGPUImplementation::OnGpuControlReturnData(
auto request_callback_iter =
request_device_callback_map_.find(request_device_serial);
CHECK(request_callback_iter != request_device_callback_map_.end());
+ RequestDeviceCallback callback = std::move(request_callback_iter->second);
+ // Remove the callback from the map immediately since the callback could
+ // perform reentrant calls that modify the map.
+ request_device_callback_map_.erase(request_callback_iter);
- auto& request_callback = request_callback_iter->second;
bool is_request_device_success =
returned_request_device_info->is_request_device_success;
- std::move(request_callback).Run(is_request_device_success);
- request_device_callback_map_.erase(request_callback_iter);
+ std::move(callback).Run(is_request_device_success);
} break;
default:
NOTREACHED();
@@ -358,19 +450,9 @@ void WebGPUImplementation::OnGpuControlReturnData(
#endif
}
-const DawnProcTable& WebGPUImplementation::GetProcs() const {
-#if !BUILDFLAG(USE_DAWN)
- NOTREACHED();
- static DawnProcTable null_procs = {};
- return null_procs;
-#else
- return dawn_wire::client::GetProcs();
-#endif
-}
-
void WebGPUImplementation::FlushCommands() {
#if BUILDFLAG(USE_DAWN)
- wire_serializer_->Flush();
+ dawn_wire_->serializer()->Commit();
helper_->Flush();
#endif
}
@@ -380,15 +462,15 @@ void WebGPUImplementation::EnsureAwaitingFlush(bool* needs_flush) {
// If there is already a flush waiting, we don't need to flush.
// We only want to set |needs_flush| on state transition from
// false -> true.
- if (wire_serializer_->AwaitingFlush()) {
+ if (dawn_wire_->serializer()->AwaitingFlush()) {
*needs_flush = false;
return;
}
// Set the state to waiting for flush, and then write |needs_flush|.
// Could still be false if there's no data to flush.
- wire_serializer_->SetAwaitingFlush(true);
- *needs_flush = wire_serializer_->AwaitingFlush();
+ dawn_wire_->serializer()->SetAwaitingFlush(true);
+ *needs_flush = dawn_wire_->serializer()->AwaitingFlush();
#else
*needs_flush = false;
#endif
@@ -396,27 +478,28 @@ void WebGPUImplementation::EnsureAwaitingFlush(bool* needs_flush) {
void WebGPUImplementation::FlushAwaitingCommands() {
#if BUILDFLAG(USE_DAWN)
- if (wire_serializer_->AwaitingFlush()) {
- wire_serializer_->Flush();
+ if (dawn_wire_->serializer()->AwaitingFlush()) {
+ dawn_wire_->serializer()->Commit();
helper_->Flush();
}
#endif
}
-void WebGPUImplementation::DisconnectContextAndDestroyServer() {
- // Treat this like a context lost since the context is no longer usable.
- // TODO(crbug.com/1160459): Also send a message to eagerly free server-side
- // resources.
- OnGpuControlLostContextMaybeReentrant();
+scoped_refptr<APIChannel> WebGPUImplementation::GetAPIChannel() const {
+#if BUILDFLAG(USE_DAWN)
+ return dawn_wire_.get();
+#else
+ return nullptr;
+#endif
}
ReservedTexture WebGPUImplementation::ReserveTexture(WGPUDevice device) {
#if BUILDFLAG(USE_DAWN)
- // Flush because we need to make sure messages that free a previously used
+ // Commit because we need to make sure messages that free a previously used
// texture are seen first. ReserveTexture may reuse an existing ID.
- wire_serializer_->Flush();
+ dawn_wire_->serializer()->Commit();
- auto reservation = wire_client_->ReserveTexture(device);
+ auto reservation = dawn_wire_->wire_client()->ReserveTexture(device);
ReservedTexture result;
result.texture = reservation.texture;
result.id = reservation.id;
@@ -492,24 +575,25 @@ void WebGPUImplementation::RequestDeviceAsync(
DCHECK(request_device_callback_map_.find(request_device_serial) ==
request_device_callback_map_.end());
- // Flush because we need to make sure messages that free a previously used
+ // Commit because we need to make sure messages that free a previously used
// device are seen first. ReserveDevice may reuse an existing ID.
- wire_serializer_->Flush();
+ dawn_wire_->serializer()->Commit();
- dawn_wire::ReservedDevice reservation = wire_client_->ReserveDevice();
+ dawn_wire::ReservedDevice reservation =
+ dawn_wire_->wire_client()->ReserveDevice();
request_device_callback_map_[request_device_serial] = base::BindOnce(
- [](dawn_wire::WireClient* wire_client,
+ [](scoped_refptr<DawnWireServices> dawn_wire,
dawn_wire::ReservedDevice reservation,
base::OnceCallback<void(WGPUDevice)> callback, bool success) {
WGPUDevice device = reservation.device;
if (!success) {
- wire_client->ReclaimDeviceReservation(reservation);
+ dawn_wire->wire_client()->ReclaimDeviceReservation(reservation);
device = nullptr;
}
std::move(callback).Run(device);
},
- wire_client_.get(), reservation, std::move(request_device_callback));
+ dawn_wire_, reservation, std::move(request_device_callback));
dawn_wire::SerializeWGPUDeviceProperties(
&requested_device_properties, reinterpret_cast<char*>(buffer.address()));
@@ -564,11 +648,11 @@ void WebGPUImplementation::AssociateMailbox(GLuint device_id,
GLuint usage,
const GLbyte* mailbox) {
#if BUILDFLAG(USE_DAWN)
- // Flush previous Dawn commands as they may manipulate texture object IDs
+ // Commit previous Dawn commands as they may manipulate texture object IDs
// and need to be resolved prior to the AssociateMailbox command. Otherwise
// the service side might not know, for example that the previous texture
// using that ID has been released.
- wire_serializer_->Flush();
+ dawn_wire_->serializer()->Commit();
helper_->AssociateMailboxImmediate(device_id, device_generation, texture_id,
texture_generation, usage, mailbox);
#endif
@@ -577,9 +661,9 @@ void WebGPUImplementation::AssociateMailbox(GLuint device_id,
void WebGPUImplementation::DissociateMailbox(GLuint texture_id,
GLuint texture_generation) {
#if BUILDFLAG(USE_DAWN)
- // Flush previous Dawn commands that might be rendering to the texture, prior
+ // Commit previous Dawn commands that might be rendering to the texture, prior
// to Dissociating the shared image from that texture.
- wire_serializer_->Flush();
+ dawn_wire_->serializer()->Commit();
helper_->DissociateMailbox(texture_id, texture_generation);
#endif
}
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.h b/chromium/gpu/command_buffer/client/webgpu_implementation.h
index cda07ba60b6..c7de5a139c1 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.h
@@ -28,8 +28,7 @@ class WireClient;
namespace gpu {
namespace webgpu {
-class DawnClientMemoryTransferService;
-class DawnClientSerializer;
+class DawnWireServices;
class WEBGPU_EXPORT WebGPUImplementation final : public WebGPUInterface,
public ImplementationBase {
@@ -108,17 +107,17 @@ class WEBGPU_EXPORT WebGPUImplementation final : public WebGPUInterface,
void OnGpuControlLostContextMaybeReentrant() final;
void OnGpuControlErrorMessage(const char* message, int32_t id) final;
void OnGpuControlSwapBuffersCompleted(
- const SwapBuffersCompleteParams& params) final;
+ const SwapBuffersCompleteParams& params,
+ gfx::GpuFenceHandle release_fence) final;
void OnSwapBufferPresented(uint64_t swap_id,
const gfx::PresentationFeedback& feedback) final;
void OnGpuControlReturnData(base::span<const uint8_t> data) final;
// WebGPUInterface implementation
- const DawnProcTable& GetProcs() const override;
void FlushCommands() override;
void EnsureAwaitingFlush(bool* needs_flush) override;
void FlushAwaitingCommands() override;
- void DisconnectContextAndDestroyServer() override;
+ scoped_refptr<APIChannel> GetAPIChannel() const override;
ReservedTexture ReserveTexture(WGPUDevice device) override;
void RequestAdapterAsync(
PowerPreference power_preference,
@@ -137,24 +136,24 @@ class WEBGPU_EXPORT WebGPUImplementation final : public WebGPUInterface,
void CheckGLError() {}
DawnRequestAdapterSerial NextRequestAdapterSerial();
DawnRequestDeviceSerial NextRequestDeviceSerial();
+ void LoseContext();
WebGPUCmdHelper* helper_;
#if BUILDFLAG(USE_DAWN)
- std::unique_ptr<DawnClientMemoryTransferService> memory_transfer_service_;
- std::unique_ptr<DawnClientSerializer> wire_serializer_;
- std::unique_ptr<dawn_wire::WireClient> wire_client_;
+ scoped_refptr<DawnWireServices> dawn_wire_;
#endif
WGPUDevice deprecated_default_device_ = nullptr;
LogSettings log_settings_;
- base::flat_map<DawnRequestAdapterSerial,
- base::OnceCallback<
- void(int32_t, const WGPUDeviceProperties&, const char*)>>
+ using RequestAdapterCallback = base::OnceCallback<
+ void(int32_t, const WGPUDeviceProperties&, const char*)>;
+ base::flat_map<DawnRequestAdapterSerial, RequestAdapterCallback>
request_adapter_callback_map_;
DawnRequestAdapterSerial request_adapter_serial_ = 0;
- base::flat_map<DawnRequestDeviceSerial, base::OnceCallback<void(bool)>>
+ using RequestDeviceCallback = base::OnceCallback<void(bool)>;
+ base::flat_map<DawnRequestDeviceSerial, RequestDeviceCallback>
request_device_callback_map_;
DawnRequestDeviceSerial request_device_serial_ = 0;
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface.h b/chromium/gpu/command_buffer/client/webgpu_interface.h
index df6094f3242..8aee6656a5b 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface.h
@@ -24,13 +24,29 @@ struct ReservedTexture {
uint32_t deviceGeneration;
};
-class WebGPUInterface : public InterfaceBase {
+// APIChannel is a RefCounted class which holds the Dawn wire client.
+class APIChannel : public base::RefCounted<APIChannel> {
public:
- WebGPUInterface() {}
- virtual ~WebGPUInterface() {}
-
+ // Get the proc table.
+ // As long as a reference to this APIChannel alive, it is valid to
+ // call these procs.
virtual const DawnProcTable& GetProcs() const = 0;
+ // Disconnect. All commands using the WebGPU API should become a
+ // no-op and server-side resources can be freed.
+ virtual void Disconnect() = 0;
+
+ protected:
+ friend class base::RefCounted<APIChannel>;
+ APIChannel() = default;
+ virtual ~APIChannel() = default;
+};
+
+class WebGPUInterface : public InterfaceBase {
+ public:
+ WebGPUInterface() = default;
+ virtual ~WebGPUInterface() = default;
+
// Flush all commands.
virtual void FlushCommands() = 0;
@@ -44,9 +60,8 @@ class WebGPUInterface : public InterfaceBase {
// nothing.
virtual void FlushAwaitingCommands() = 0;
- // Disconnect. All commands should become a no-op and server-side resources
- // can be freed.
- virtual void DisconnectContextAndDestroyServer() = 0;
+ // Get a strong reference to the APIChannel backing the implementation.
+ virtual scoped_refptr<APIChannel> GetAPIChannel() const = 0;
virtual ReservedTexture ReserveTexture(WGPUDevice device) = 0;
virtual void RequestAdapterAsync(
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
index b7248970e52..9aca0fe0daa 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
@@ -7,10 +7,34 @@
namespace gpu {
namespace webgpu {
-WebGPUInterfaceStub::WebGPUInterfaceStub() = default;
+namespace {
+
+class APIChannelStub : public APIChannel {
+ public:
+ APIChannelStub() = default;
+
+ const DawnProcTable& GetProcs() const override { return procs_; }
+ void Disconnect() override {}
+
+ DawnProcTable* procs() { return &procs_; }
+
+ private:
+ ~APIChannelStub() override = default;
+
+ DawnProcTable procs_ = {};
+};
+
+} // anonymous namespace
+
+WebGPUInterfaceStub::WebGPUInterfaceStub()
+ : api_channel_(base::MakeRefCounted<APIChannelStub>()) {}
WebGPUInterfaceStub::~WebGPUInterfaceStub() = default;
+DawnProcTable* WebGPUInterfaceStub::procs() {
+ return static_cast<APIChannelStub*>(api_channel_.get())->procs();
+}
+
// InterfaceBase implementation.
void WebGPUInterfaceStub::GenSyncTokenCHROMIUM(GLbyte* sync_token) {}
void WebGPUInterfaceStub::GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) {}
@@ -20,13 +44,12 @@ void WebGPUInterfaceStub::WaitSyncTokenCHROMIUM(const GLbyte* sync_token) {}
void WebGPUInterfaceStub::ShallowFlushCHROMIUM() {}
// WebGPUInterface implementation
-const DawnProcTable& WebGPUInterfaceStub::GetProcs() const {
- return null_procs_;
+scoped_refptr<APIChannel> WebGPUInterfaceStub::GetAPIChannel() const {
+ return api_channel_;
}
void WebGPUInterfaceStub::FlushCommands() {}
void WebGPUInterfaceStub::EnsureAwaitingFlush(bool* needs_flush) {}
void WebGPUInterfaceStub::FlushAwaitingCommands() {}
-void WebGPUInterfaceStub::DisconnectContextAndDestroyServer() {}
ReservedTexture WebGPUInterfaceStub::ReserveTexture(WGPUDevice) {
return {nullptr, 0, 0, 0, 0};
}
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
index 2c7d9d5f8f8..e8b5da5a575 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
@@ -24,11 +24,10 @@ class WebGPUInterfaceStub : public WebGPUInterface {
void ShallowFlushCHROMIUM() override;
// WebGPUInterface implementation
- const DawnProcTable& GetProcs() const override;
+ scoped_refptr<APIChannel> GetAPIChannel() const override;
void FlushCommands() override;
void EnsureAwaitingFlush(bool* needs_flush) override;
void FlushAwaitingCommands() override;
- void DisconnectContextAndDestroyServer() override;
ReservedTexture ReserveTexture(WGPUDevice device) override;
void RequestAdapterAsync(
PowerPreference power_preference,
@@ -47,8 +46,11 @@ class WebGPUInterfaceStub : public WebGPUInterface {
// this file instead of having to edit some template or the code generator.
#include "gpu/command_buffer/client/webgpu_interface_stub_autogen.h"
+ protected:
+ DawnProcTable* procs();
+
private:
- DawnProcTable null_procs_;
+ scoped_refptr<APIChannel> api_channel_;
};
} // namespace webgpu
diff --git a/chromium/gpu/command_buffer/common/BUILD.gn b/chromium/gpu/command_buffer/common/BUILD.gn
index 16fca625f01..bce989bc112 100644
--- a/chromium/gpu/command_buffer/common/BUILD.gn
+++ b/chromium/gpu/command_buffer/common/BUILD.gn
@@ -62,7 +62,6 @@ source_set("common_base_sources") {
public_deps = [
":mailbox",
"//base",
- "//base/util/type_safety",
]
configs += [ "//gpu:gpu_implementation" ]
}
@@ -112,7 +111,7 @@ source_set("common_sources") {
public_deps = [
":common_base_sources",
":mailbox",
- "//base/util/type_safety",
+ "//base",
"//mojo/public/cpp/system",
"//ui/gfx:memory_buffer",
"//ui/gfx/geometry",
@@ -148,7 +147,6 @@ source_set("gles2_sources") {
configs += [
"//gpu:gpu_gles2_implementation",
- "//build/config/compiler:no_shorten_64_warnings",
]
deps = [
@@ -162,6 +160,7 @@ source_set("raster_sources") {
visibility = [ "//gpu/*" ]
sources = [
+ "raster_cmd_enums.h",
"raster_cmd_format.cc",
"raster_cmd_format.h",
"raster_cmd_format_autogen.h",
diff --git a/chromium/gpu/command_buffer/common/capabilities.h b/chromium/gpu/command_buffer/common/capabilities.h
index 7fe983e2272..6462836f5c0 100644
--- a/chromium/gpu/command_buffer/common/capabilities.h
+++ b/chromium/gpu/command_buffer/common/capabilities.h
@@ -203,6 +203,9 @@ struct GPU_EXPORT Capabilities {
bool mesa_framebuffer_flip_y = false;
+ // Clients should use SharedImageInterface instead.
+ bool disable_legacy_mailbox = false;
+
int major_version = 2;
int minor_version = 0;
diff --git a/chromium/gpu/command_buffer/common/command_buffer_id.h b/chromium/gpu/command_buffer/common/command_buffer_id.h
index d3cacb2d18e..3d6dc07a17c 100644
--- a/chromium/gpu/command_buffer/common/command_buffer_id.h
+++ b/chromium/gpu/command_buffer/common/command_buffer_id.h
@@ -5,12 +5,12 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_ID_H_
#define GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_ID_H_
-#include "base/util/type_safety/id_type.h"
+#include "base/types/id_type.h"
namespace gpu {
class CommandBuffer;
-using CommandBufferId = util::IdTypeU64<CommandBuffer>;
+using CommandBufferId = base::IdTypeU64<CommandBuffer>;
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/discardable_handle.h b/chromium/gpu/command_buffer/common/discardable_handle.h
index dbc1149df8b..b9bf3926b08 100644
--- a/chromium/gpu/command_buffer/common/discardable_handle.h
+++ b/chromium/gpu/command_buffer/common/discardable_handle.h
@@ -6,7 +6,7 @@
#define GPU_COMMAND_BUFFER_COMMON_DISCARDABLE_HANDLE_H_
#include "base/memory/ref_counted.h"
-#include "base/util/type_safety/id_type.h"
+#include "base/types/id_type.h"
#include "gpu/gpu_export.h"
namespace gpu {
@@ -83,7 +83,7 @@ class GPU_EXPORT DiscardableHandleBase {
// handle (via the constructor), and can Lock an existing handle.
class GPU_EXPORT ClientDiscardableHandle : public DiscardableHandleBase {
public:
- using Id = util::IdType32<ClientDiscardableHandle>;
+ using Id = base::IdType32<ClientDiscardableHandle>;
ClientDiscardableHandle(); // Constructs an invalid handle.
ClientDiscardableHandle(scoped_refptr<Buffer> buffer,
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format.cc b/chromium/gpu/command_buffer/common/gles2_cmd_format.cc
index 63715537825..b66cf964e87 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format.cc
@@ -12,7 +12,7 @@
#include <stddef.h>
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
namespace gpu {
namespace gles2 {
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index 0b6e26ad36e..0de243f0540 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -16,9 +16,9 @@
#include <sstream>
#include "base/check_op.h"
+#include "base/cxx17_backports.h"
#include "base/notreached.h"
#include "base/numerics/safe_math.h"
-#include "base/stl_util.h"
namespace gpu {
namespace gles2 {
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
index 99bce524a42..01b22f237e5 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
@@ -27,8 +27,9 @@ bool IsImageFromGpuMemoryBufferFormatSupported(
}
bool IsImageSizeValidForGpuMemoryBufferFormat(const gfx::Size& size,
- gfx::BufferFormat format) {
- switch (format) {
+ gfx::BufferFormat format,
+ gfx::BufferPlane plane) {
+ switch (GetPlaneBufferFormat(plane, format)) {
case gfx::BufferFormat::R_8:
case gfx::BufferFormat::R_16:
case gfx::BufferFormat::RG_88:
@@ -45,12 +46,19 @@ bool IsImageSizeValidForGpuMemoryBufferFormat(const gfx::Size& size,
case gfx::BufferFormat::YVU_420:
case gfx::BufferFormat::YUV_420_BIPLANAR:
case gfx::BufferFormat::P010:
+#if defined(OS_CHROMEOS)
+ // Allow odd size for CrOS.
+ // TODO(https://crbug.com/1208788, https://crbug.com/1224781): Merge this
+ // with the path that uses gfx::AllowOddHeightMultiPlanarBuffers.
+ return true;
+#else
// U and V planes are subsampled by a factor of 2.
if (size.width() % 2)
return false;
if (size.height() % 2 && !gfx::AllowOddHeightMultiPlanarBuffers())
return false;
return true;
+#endif // defined(OS_CHROMEOS)
}
NOTREACHED();
@@ -64,14 +72,11 @@ GPU_EXPORT bool IsPlaneValidForGpuMemoryBufferFormat(gfx::BufferPlane plane,
return plane == gfx::BufferPlane::DEFAULT ||
plane == gfx::BufferPlane::Y || plane == gfx::BufferPlane::U ||
plane == gfx::BufferPlane::V;
- break;
case gfx::BufferFormat::YUV_420_BIPLANAR:
return plane == gfx::BufferPlane::DEFAULT ||
plane == gfx::BufferPlane::Y || plane == gfx::BufferPlane::UV;
- break;
default:
return plane == gfx::BufferPlane::DEFAULT;
- break;
}
NOTREACHED();
return false;
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
index c59be9ab958..64510af8709 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.h
@@ -46,10 +46,11 @@ GPU_EXPORT bool IsImageFromGpuMemoryBufferFormatSupported(
gfx::BufferFormat format,
const Capabilities& capabilities);
-// Returns true if |size| is valid for |format|.
+// Returns true if |size| is valid for plane |plane| of |format|.
GPU_EXPORT bool IsImageSizeValidForGpuMemoryBufferFormat(
const gfx::Size& size,
- gfx::BufferFormat format);
+ gfx::BufferFormat format,
+ gfx::BufferPlane plane);
// Returns true if |plane| is a valid plane index for |format|.
GPU_EXPORT bool IsPlaneValidForGpuMemoryBufferFormat(gfx::BufferPlane plane,
diff --git a/chromium/gpu/command_buffer/common/mailbox.cc b/chromium/gpu/command_buffer/common/mailbox.cc
index f31a517102b..da8fda385c9 100644
--- a/chromium/gpu/command_buffer/common/mailbox.cc
+++ b/chromium/gpu/command_buffer/common/mailbox.cc
@@ -9,8 +9,8 @@
#include <string.h>
#include "base/check.h"
+#include "base/cxx17_backports.h"
#include "base/rand_util.h"
-#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_enums.h b/chromium/gpu/command_buffer/common/raster_cmd_enums.h
new file mode 100644
index 00000000000..fa510e50cef
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/raster_cmd_enums.h
@@ -0,0 +1,22 @@
+// Copyright (c) 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_ENUMS_H_
+#define GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_ENUMS_H_
+
+#include <stdint.h>
+
+namespace gpu {
+namespace raster {
+
+enum MsaaMode : uint32_t {
+ kNoMSAA,
+ kMSAA, // legacy
+ kDMSAA, // new and improved
+};
+
+} // namespace raster
+} // namespace gpu
+
+#endif
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format.cc b/chromium/gpu/command_buffer/common/raster_cmd_format.cc
index 6729d13cbe0..dab0c9e7716 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format.cc
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format.cc
@@ -11,7 +11,7 @@
#include <stddef.h>
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
namespace gpu {
namespace raster {
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format.h b/chromium/gpu/command_buffer/common/raster_cmd_format.h
index e43a51f9973..5af4902ad6d 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format.h
@@ -19,6 +19,7 @@
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/gl2_types.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/raster_cmd_enums.h"
#include "gpu/command_buffer/common/raster_cmd_ids.h"
#include "ui/gfx/buffer_types.h"
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
index 7d02570b7aa..ef98b2b46fd 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
@@ -391,12 +391,14 @@ struct BeginRasterCHROMIUMImmediate {
void Init(GLuint _sk_color,
GLboolean _needs_clear,
GLuint _msaa_sample_count,
+ gpu::raster::MsaaMode _msaa_mode,
GLboolean _can_use_lcd_text,
const GLbyte* _mailbox) {
SetHeader();
sk_color = _sk_color;
needs_clear = _needs_clear;
msaa_sample_count = _msaa_sample_count;
+ msaa_mode = _msaa_mode;
can_use_lcd_text = _can_use_lcd_text;
memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
}
@@ -405,11 +407,12 @@ struct BeginRasterCHROMIUMImmediate {
GLuint _sk_color,
GLboolean _needs_clear,
GLuint _msaa_sample_count,
+ gpu::raster::MsaaMode _msaa_mode,
GLboolean _can_use_lcd_text,
const GLbyte* _mailbox) {
static_cast<ValueType*>(cmd)->Init(_sk_color, _needs_clear,
- _msaa_sample_count, _can_use_lcd_text,
- _mailbox);
+ _msaa_sample_count, _msaa_mode,
+ _can_use_lcd_text, _mailbox);
const uint32_t size = ComputeSize();
return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
}
@@ -418,11 +421,12 @@ struct BeginRasterCHROMIUMImmediate {
uint32_t sk_color;
uint32_t needs_clear;
uint32_t msaa_sample_count;
+ uint32_t msaa_mode;
uint32_t can_use_lcd_text;
};
-static_assert(sizeof(BeginRasterCHROMIUMImmediate) == 20,
- "size of BeginRasterCHROMIUMImmediate should be 20");
+static_assert(sizeof(BeginRasterCHROMIUMImmediate) == 24,
+ "size of BeginRasterCHROMIUMImmediate should be 24");
static_assert(offsetof(BeginRasterCHROMIUMImmediate, header) == 0,
"offset of BeginRasterCHROMIUMImmediate header should be 0");
static_assert(offsetof(BeginRasterCHROMIUMImmediate, sk_color) == 4,
@@ -432,9 +436,11 @@ static_assert(offsetof(BeginRasterCHROMIUMImmediate, needs_clear) == 8,
static_assert(
offsetof(BeginRasterCHROMIUMImmediate, msaa_sample_count) == 12,
"offset of BeginRasterCHROMIUMImmediate msaa_sample_count should be 12");
+static_assert(offsetof(BeginRasterCHROMIUMImmediate, msaa_mode) == 16,
+ "offset of BeginRasterCHROMIUMImmediate msaa_mode should be 16");
static_assert(
- offsetof(BeginRasterCHROMIUMImmediate, can_use_lcd_text) == 16,
- "offset of BeginRasterCHROMIUMImmediate can_use_lcd_text should be 16");
+ offsetof(BeginRasterCHROMIUMImmediate, can_use_lcd_text) == 20,
+ "offset of BeginRasterCHROMIUMImmediate can_use_lcd_text should be 20");
struct RasterCHROMIUM {
typedef RasterCHROMIUM ValueType;
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_test.cc b/chromium/gpu/command_buffer/common/raster_cmd_format_test.cc
index dd4ca4c7848..0126355f9ab 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format_test.cc
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_test.cc
@@ -10,9 +10,9 @@
#include <limits>
#include "base/bind.h"
+#include "base/cxx17_backports.h"
#include "base/location.h"
#include "base/single_thread_task_runner.h"
-#include "base/stl_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "gpu/command_buffer/common/raster_cmd_format.h"
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
index 628d09e3f75..414cd093a4a 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
@@ -160,7 +160,8 @@ TEST_F(RasterFormatTest, BeginRasterCHROMIUMImmediate) {
*GetBufferAs<cmds::BeginRasterCHROMIUMImmediate>();
void* next_cmd =
cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLboolean>(12),
- static_cast<GLuint>(13), static_cast<GLboolean>(14), data);
+ static_cast<GLuint>(13), static_cast<gpu::raster::MsaaMode>(14),
+ static_cast<GLboolean>(15), data);
EXPECT_EQ(static_cast<uint32_t>(cmds::BeginRasterCHROMIUMImmediate::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
@@ -168,7 +169,8 @@ TEST_F(RasterFormatTest, BeginRasterCHROMIUMImmediate) {
EXPECT_EQ(static_cast<GLuint>(11), cmd.sk_color);
EXPECT_EQ(static_cast<GLboolean>(12), cmd.needs_clear);
EXPECT_EQ(static_cast<GLuint>(13), cmd.msaa_sample_count);
- EXPECT_EQ(static_cast<GLboolean>(14), cmd.can_use_lcd_text);
+ EXPECT_EQ(static_cast<gpu::raster::MsaaMode>(14), cmd.msaa_mode);
+ EXPECT_EQ(static_cast<GLboolean>(15), cmd.can_use_lcd_text);
CheckBytesWrittenMatchesExpectedSize(
next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
}
diff --git a/chromium/gpu/command_buffer/common/shared_image_usage.h b/chromium/gpu/command_buffer/common/shared_image_usage.h
index ff788ebc33a..413b629bc73 100644
--- a/chromium/gpu/command_buffer/common/shared_image_usage.h
+++ b/chromium/gpu/command_buffer/common/shared_image_usage.h
@@ -43,6 +43,8 @@ enum SharedImageUsage : uint32_t {
// GLImage::DisableInUseByWindowServer should be called on any GLImages that
// use this SharedImage.
SHARED_IMAGE_USAGE_MACOS_VIDEO_TOOLBOX = 1 << 12,
+ // Image will be used with mipmap enabled
+ SHARED_IMAGE_USAGE_MIPMAP = 1 << 13,
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/sync_token.cc b/chromium/gpu/command_buffer/common/sync_token.cc
index 0d6e9e6a166..69dfc8f2f34 100644
--- a/chromium/gpu/command_buffer/common/sync_token.cc
+++ b/chromium/gpu/command_buffer/common/sync_token.cc
@@ -22,6 +22,7 @@ SyncToken::SyncToken(CommandBufferNamespace namespace_id,
release_count_(release_count) {}
SyncToken::SyncToken(const SyncToken& other) = default;
+SyncToken& SyncToken::operator=(const SyncToken& other) = default;
std::string SyncToken::ToDebugString() const {
// At the level of the generic command buffer code, the command buffer ID is
diff --git a/chromium/gpu/command_buffer/common/sync_token.h b/chromium/gpu/command_buffer/common/sync_token.h
index 52d696caa22..eae3faf2c9f 100644
--- a/chromium/gpu/command_buffer/common/sync_token.h
+++ b/chromium/gpu/command_buffer/common/sync_token.h
@@ -33,6 +33,7 @@ struct GPU_EXPORT SyncToken {
uint64_t release_count);
SyncToken(const SyncToken& other);
+ SyncToken& operator=(const SyncToken& other);
void Set(CommandBufferNamespace namespace_id,
CommandBufferId command_buffer_id,
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h b/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h
index 83615b30c8a..f565583699d 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h
@@ -5,6 +5,8 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_ENUMS_H_
#define GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_ENUMS_H_
+#include <stdint.h>
+
namespace gpu {
namespace webgpu {
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format.cc b/chromium/gpu/command_buffer/common/webgpu_cmd_format.cc
index 73e21dcf358..468d828e659 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_format.cc
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format.cc
@@ -7,7 +7,7 @@
// We explicitly do NOT include webgpu_cmd_format.h here because client side
// and service side have different requirements.
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "gpu/command_buffer/common/cmd_buffer_common.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
index 4281ce2b650..2051ebbdfc2 100644
--- a/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
@@ -23,7 +23,7 @@ GL_APICALL void GL_APIENTRY glLoseContextCHROMIUM (GLenumResetStatus cur
GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusKHR (void);
// Extension CHROMIUM_raster_transport
-GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint sk_color, GLboolean needs_clear, GLuint msaa_sample_count, GLboolean can_use_lcd_text, const GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint sk_color, GLboolean needs_clear, GLuint msaa_sample_count, gpu::raster::MsaaMode msaa_mode, GLboolean can_use_lcd_text, const GLbyte* mailbox);
GL_APICALL void GL_APIENTRY glRasterCHROMIUM (GLuint raster_shm_id, GLuint raster_shm_offset, GLuint raster_shm_size, GLuint font_shm_id, GLuint font_shm_offset, GLuint font_shm_size);
GL_APICALL void GL_APIENTRY glEndRasterCHROMIUM (void);
GL_APICALL void GL_APIENTRY glCreateTransferCacheEntryINTERNAL (GLuint entry_type, GLuint entry_id, GLuint handle_shm_id, GLuint handle_shm_offset, GLuint data_shm_id, GLuint data_shm_offset, GLuint data_size);
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index 65b09454024..2fea37a8427 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -5,6 +5,7 @@
import("//build/config/ui.gni")
import("//gpu/vulkan/features.gni")
import("//skia/features.gni")
+import("//third_party/dawn/scripts/dawn_features.gni")
import("//third_party/protobuf/proto_library.gni")
import("//ui/gl/features.gni")
@@ -54,6 +55,8 @@ target(link_target_type, "service_sources") {
"memory_tracking.h",
"scheduler.cc",
"scheduler.h",
+ "scheduler_task_runner.cc",
+ "scheduler_task_runner.h",
"sequence_id.h",
"sync_point_manager.cc",
"sync_point_manager.h",
@@ -64,15 +67,13 @@ target(link_target_type, "service_sources") {
]
configs += [
- # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
- "//build/config/compiler:no_size_t_to_int_warning",
"//build/config:precompiled_headers",
"//gpu:gpu_implementation",
"//third_party/khronos:khronos_headers",
]
public_deps = [
- "//base/util/type_safety",
+ "//base",
"//gpu/command_buffer/common:common_sources",
"//url:url",
]
@@ -226,6 +227,10 @@ target(link_target_type, "gles2_sources") {
"shared_image_backing.h",
"shared_image_backing_factory.cc",
"shared_image_backing_factory.h",
+ "shared_image_backing_factory_gl_common.cc",
+ "shared_image_backing_factory_gl_common.h",
+ "shared_image_backing_factory_gl_image.cc",
+ "shared_image_backing_factory_gl_image.h",
"shared_image_backing_factory_gl_texture.cc",
"shared_image_backing_factory_gl_texture.h",
"shared_image_backing_gl_common.cc",
@@ -290,7 +295,7 @@ target(link_target_type, "gles2_sources") {
include_dirs = [ "//third_party/mesa_headers" ]
public_deps = [
- "//base/util/type_safety",
+ "//base",
"//cc/paint",
"//gpu/command_buffer/common",
"//gpu/command_buffer/common:gles2_sources",
@@ -314,8 +319,8 @@ target(link_target_type, "gles2_sources") {
"//gpu/ipc/common",
"//gpu/vulkan:buildflags",
"//skia:buildflags",
+ "//third_party/angle:angle_commit_id",
"//third_party/angle:angle_image_util",
- "//third_party/angle:commit_id",
"//third_party/libyuv",
"//third_party/protobuf:protobuf_lite",
"//third_party/re2",
@@ -397,9 +402,16 @@ target(link_target_type, "gles2_sources") {
if (use_dawn) {
deps += [
+ "//third_party/dawn/src/dawn:dawn_proc",
"//third_party/dawn/src/dawn_native",
"//third_party/dawn/src/dawn_platform",
]
+ if (dawn_enable_opengles) {
+ sources += [
+ "shared_image_representation_dawn_egl_image.cc",
+ "shared_image_representation_dawn_egl_image.h",
+ ]
+ }
}
if (is_mac) {
@@ -437,12 +449,16 @@ target(link_target_type, "gles2_sources") {
"ahardwarebuffer_utils.h",
"image_reader_gl_owner.cc",
"image_reader_gl_owner.h",
+ "ref_counted_lock.cc",
+ "ref_counted_lock.h",
"shared_image_backing_android.cc",
"shared_image_backing_android.h",
"shared_image_backing_egl_image.cc",
"shared_image_backing_egl_image.h",
"shared_image_backing_factory_ahardwarebuffer.cc",
"shared_image_backing_factory_ahardwarebuffer.h",
+ "shared_image_backing_factory_egl.cc",
+ "shared_image_backing_factory_egl.h",
"shared_image_backing_scoped_hardware_buffer_fence_sync.cc",
"shared_image_backing_scoped_hardware_buffer_fence_sync.h",
"shared_image_batch_access_manager.cc",
diff --git a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
index a53dae3bdcd..bec07037de1 100644
--- a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
+++ b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
@@ -169,7 +169,8 @@ std::unique_ptr<VulkanImage> CreateVkImageFromAhbHandle(
base::android::ScopedHardwareBufferHandle ahb_handle,
SharedContextState* context_state,
const gfx::Size& size,
- const viz::ResourceFormat& format) {
+ const viz::ResourceFormat& format,
+ uint32_t queue_family_index) {
DCHECK(context_state);
DCHECK(context_state->GrContextIsVulkan());
@@ -177,7 +178,8 @@ std::unique_ptr<VulkanImage> CreateVkImageFromAhbHandle(
gfx::GpuMemoryBufferHandle gmb_handle(std::move(ahb_handle));
return VulkanImage::CreateFromGpuMemoryBufferHandle(
device_queue, std::move(gmb_handle), size, ToVkFormat(format),
- 0 /* usage */);
+ /*usage=*/0, /*flags=*/0, /*image_tiling=*/VK_IMAGE_TILING_OPTIMAL,
+ /*queue_family_index=*/queue_family_index);
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h
index a3106ae53a5..07df8cc59d5 100644
--- a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h
+++ b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h
@@ -76,7 +76,8 @@ std::unique_ptr<VulkanImage> CreateVkImageFromAhbHandle(
base::android::ScopedHardwareBufferHandle ahb_handle,
SharedContextState* context_state,
const gfx::Size& size,
- const viz::ResourceFormat& format);
+ const viz::ResourceFormat& format,
+ uint32_t queue_family_index);
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/buffer_manager_unittest.cc b/chromium/gpu/command_buffer/service/buffer_manager_unittest.cc
index 6968432500c..cffb86682bd 100644
--- a/chromium/gpu/command_buffer/service/buffer_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/buffer_manager_unittest.cc
@@ -7,7 +7,7 @@
#include <memory>
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "gpu/command_buffer/service/buffer_manager.h"
#include "gpu/command_buffer/service/error_state_mock.h"
#include "gpu/command_buffer/service/feature_info.h"
diff --git a/chromium/gpu/command_buffer/service/common_decoder.cc b/chromium/gpu/command_buffer/service/common_decoder.cc
index e47dbf5d516..82dea1cffc8 100644
--- a/chromium/gpu/command_buffer/service/common_decoder.cc
+++ b/chromium/gpu/command_buffer/service/common_decoder.cc
@@ -9,8 +9,8 @@
#include <algorithm>
+#include "base/cxx17_backports.h"
#include "base/numerics/safe_math.h"
-#include "base/stl_util.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "ui/gfx/ipc/color/gfx_param_traits.h"
diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc
index 898c5a82e02..ce44491be91 100644
--- a/chromium/gpu/command_buffer/service/context_group.cc
+++ b/chromium/gpu/command_buffer/service/context_group.cc
@@ -12,6 +12,7 @@
#include <string>
#include "base/command_line.h"
+#include "base/containers/cxx20_erase.h"
#include "gpu/command_buffer/service/buffer_manager.h"
#include "gpu/command_buffer/service/decoder_context.h"
#include "gpu/command_buffer/service/framebuffer_manager.h"
diff --git a/chromium/gpu/command_buffer/service/context_state.cc b/chromium/gpu/command_buffer/service/context_state.cc
index 645a7ba2a91..51dd49ffb4c 100644
--- a/chromium/gpu/command_buffer/service/context_state.cc
+++ b/chromium/gpu/command_buffer/service/context_state.cc
@@ -8,7 +8,7 @@
#include <cmath>
-#include "base/numerics/ranges.h"
+#include "base/cxx17_backports.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/buffer_manager.h"
#include "gpu/command_buffer/service/framebuffer_manager.h"
@@ -373,8 +373,7 @@ void ContextState::RestoreUnpackState() const {
}
void ContextState::DoLineWidth(GLfloat width) const {
- api()->glLineWidthFn(
- base::ClampToRange(width, line_width_min_, line_width_max_));
+ api()->glLineWidthFn(base::clamp(width, line_width_min_, line_width_max_));
}
void ContextState::RestoreBufferBindings() const {
diff --git a/chromium/gpu/command_buffer/service/dawn_platform.cc b/chromium/gpu/command_buffer/service/dawn_platform.cc
index c89bb9da89a..30143aab846 100644
--- a/chromium/gpu/command_buffer/service/dawn_platform.cc
+++ b/chromium/gpu/command_buffer/service/dawn_platform.cc
@@ -4,12 +4,80 @@
#include "gpu/command_buffer/service/dawn_platform.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task/thread_pool.h"
#include "base/trace_event/trace_arguments.h"
#include "base/trace_event/trace_event.h"
namespace gpu {
namespace webgpu {
+namespace {
+
+class AsyncWaitableEventImpl
+ : public base::RefCountedThreadSafe<AsyncWaitableEventImpl> {
+ public:
+ explicit AsyncWaitableEventImpl()
+ : waitable_event_(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ void Wait() { waitable_event_.Wait(); }
+
+ bool IsComplete() { return waitable_event_.IsSignaled(); }
+
+ void MarkAsComplete() { waitable_event_.Signal(); }
+
+ private:
+ friend class base::RefCountedThreadSafe<AsyncWaitableEventImpl>;
+ ~AsyncWaitableEventImpl() = default;
+
+ base::WaitableEvent waitable_event_;
+};
+
+class AsyncWaitableEvent : public dawn_platform::WaitableEvent {
+ public:
+ explicit AsyncWaitableEvent()
+ : waitable_event_impl_(base::MakeRefCounted<AsyncWaitableEventImpl>()) {}
+
+ void Wait() override { waitable_event_impl_->Wait(); }
+
+ bool IsComplete() override { return waitable_event_impl_->IsComplete(); }
+
+ scoped_refptr<AsyncWaitableEventImpl> GetWaitableEventImpl() const {
+ return waitable_event_impl_;
+ }
+
+ private:
+ scoped_refptr<AsyncWaitableEventImpl> waitable_event_impl_;
+};
+
+class AsyncWorkerTaskPool : public dawn_platform::WorkerTaskPool {
+ public:
+ std::unique_ptr<dawn_platform::WaitableEvent> PostWorkerTask(
+ dawn_platform::PostWorkerTaskCallback callback,
+ void* user_data) override {
+ std::unique_ptr<AsyncWaitableEvent> waitable_event =
+ std::make_unique<AsyncWaitableEvent>();
+ base::ThreadPool::PostTask(
+ FROM_HERE, {base::MayBlock(), base::TaskPriority::USER_VISIBLE},
+ base::BindOnce(&RunWorkerTask, callback, user_data,
+ waitable_event->GetWaitableEventImpl()));
+ return waitable_event;
+ }
+
+ private:
+ static void RunWorkerTask(
+ dawn_platform::PostWorkerTaskCallback callback,
+ void* user_data,
+ scoped_refptr<AsyncWaitableEventImpl> waitable_event_impl) {
+ TRACE_EVENT0("toplevel", "DawnPlatformImpl::RunWorkerTask");
+ callback(user_data);
+ waitable_event_impl->MarkAsComplete();
+ }
+};
+
+} // anonymous namespace
+
DawnPlatform::DawnPlatform() = default;
DawnPlatform::~DawnPlatform() = default;
@@ -58,5 +126,10 @@ uint64_t DawnPlatform::AddTraceEvent(
return result;
}
+std::unique_ptr<dawn_platform::WorkerTaskPool>
+DawnPlatform::CreateWorkerTaskPool() {
+ return std::make_unique<AsyncWorkerTaskPool>();
+}
+
} // namespace webgpu
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/dawn_platform.h b/chromium/gpu/command_buffer/service/dawn_platform.h
index ffa4e766526..dffb37ff9c8 100644
--- a/chromium/gpu/command_buffer/service/dawn_platform.h
+++ b/chromium/gpu/command_buffer/service/dawn_platform.h
@@ -30,6 +30,9 @@ class DawnPlatform : public dawn_platform::Platform {
const unsigned char* arg_types,
const uint64_t* arg_values,
unsigned char flags) override;
+
+ std::unique_ptr<dawn_platform::WorkerTaskPool> CreateWorkerTaskPool()
+ override;
};
} // namespace webgpu
diff --git a/chromium/gpu/command_buffer/service/dawn_service_memory_transfer_service.cc b/chromium/gpu/command_buffer/service/dawn_service_memory_transfer_service.cc
index 3f546c45673..2df536f5bdb 100644
--- a/chromium/gpu/command_buffer/service/dawn_service_memory_transfer_service.cc
+++ b/chromium/gpu/command_buffer/service/dawn_service_memory_transfer_service.cc
@@ -20,20 +20,20 @@ class ReadHandleImpl
~ReadHandleImpl() override = default;
- size_t SerializeInitialDataSize(const void* data,
- size_t data_length) override {
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
// Nothing is serialized because we're using shared memory.
return 0;
}
- void SerializeInitialData(const void* data,
- size_t data_length,
- void* serialize_pointer) override {
- DCHECK_EQ(data_length, size_);
- // Copy the initial data into the shared memory allocation.
+ void SerializeDataUpdate(const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer) override {
+ DCHECK_LE(size + offset, size_);
+ // Copy the data into the shared memory allocation.
// In the case of buffer mapping, this is the mapped GPU memory which we
// copy into client-visible shared memory.
- memcpy(ptr_, data, data_length);
+ memcpy(static_cast<uint8_t*>(ptr_) + offset, data, size);
}
private:
@@ -49,16 +49,22 @@ class WriteHandleImpl
~WriteHandleImpl() override = default;
- bool DeserializeFlush(const void* deserialize_pointer,
- size_t deserialize_size) override {
+ // The offset is always absolute offset from start of buffer
+ bool DeserializeDataUpdate(const void* deserialize_pointer,
+ size_t deserialize_size,
+ size_t offset,
+ size_t size) override {
// Nothing is serialized because we're using shared memory.
DCHECK_EQ(deserialize_size, 0u);
- DCHECK_EQ(mDataLength, size_);
+ DCHECK_LE(size + offset, size_);
DCHECK(mTargetData);
DCHECK(ptr_);
// Copy from shared memory into the target buffer.
- memcpy(mTargetData, ptr_, size_);
+ // mTargetData will always be the starting address
+ // of the backing buffer after the dawn side change.
+ memcpy(static_cast<uint8_t*>(mTargetData) + offset,
+ static_cast<const uint8_t*>(ptr_) + offset, size);
return true;
}
diff --git a/chromium/gpu/command_buffer/service/external_semaphore.cc b/chromium/gpu/command_buffer/service/external_semaphore.cc
index 73f84c2a2c5..7475cbbf477 100644
--- a/chromium/gpu/command_buffer/service/external_semaphore.cc
+++ b/chromium/gpu/command_buffer/service/external_semaphore.cc
@@ -59,7 +59,7 @@ GLuint ImportSemaphoreHandleToGLSemaphore(SemaphoreHandle handle) {
return gl_semaphore;
#elif defined(OS_FUCHSIA)
if (handle.vk_handle_type() !=
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA) {
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA) {
DLOG(ERROR) << "Importing semaphore handle of unexpected type:"
<< handle.vk_handle_type();
return 0;
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
index 35fbaab5d25..c4aaf62fe49 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
@@ -7,7 +7,7 @@
#include <utility>
#include <vector>
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "build/build_config.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
@@ -27,6 +27,7 @@
#include "ui/gfx/buffer_format_util.h"
#include "ui/gl/buildflags.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_utils.h"
#include "ui/gl/gl_version_info.h"
#include "ui/gl/scoped_binders.h"
@@ -82,28 +83,6 @@ static const struct {
static_assert(base::size(kFormatTable) == (viz::RESOURCE_FORMAT_MAX + 1),
"kFormatTable does not handle all cases.");
-class ScopedPixelStore {
- public:
- ScopedPixelStore(gl::GLApi* api, GLenum name, GLint value)
- : api_(api), name_(name), value_(value) {
- api_->glGetIntegervFn(name_, &old_value_);
- if (value_ != old_value_)
- api->glPixelStoreiFn(name_, value_);
- }
- ~ScopedPixelStore() {
- if (value_ != old_value_)
- api_->glPixelStoreiFn(name_, old_value_);
- }
-
- private:
- gl::GLApi* const api_;
- const GLenum name_;
- const GLint value_;
- GLint old_value_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedPixelStore);
-};
-
class ScopedDedicatedMemoryObject {
public:
explicit ScopedDedicatedMemoryObject(gl::GLApi* api) : api_(api) {
@@ -191,7 +170,6 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
base::span<const uint8_t> pixel_data,
bool using_gmb) {
bool is_external = context_state->support_vulkan_external_object();
- bool is_transfer_dst = using_gmb || !pixel_data.empty() || !is_external;
auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
VkFormat vk_format = ToVkFormat(format);
@@ -199,7 +177,9 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT |
SHARED_IMAGE_USAGE_RASTER | SHARED_IMAGE_USAGE_OOP_RASTERIZATION |
SHARED_IMAGE_USAGE_WEBGPU;
- VkImageUsageFlags vk_usage = VK_IMAGE_USAGE_SAMPLED_BIT;
+ VkImageUsageFlags vk_usage = VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
if (usage & kUsageNeedsColorAttachment) {
vk_usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
if (format == viz::ETC1) {
@@ -208,42 +188,16 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
}
}
- if (is_transfer_dst)
- vk_usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-
// Requested usage flags must be supported.
DCHECK_EQ(vk_usage & image_usage_cache->optimal_tiling_usage[format],
vk_usage);
- if (is_external && (usage & SHARED_IMAGE_USAGE_GLES2)) {
- // Must request all available image usage flags if aliasing GL texture. This
- // is a spec requirement per EXT_memory_object. However, if
- // ANGLE_memory_object_flags is supported, usage flags can be arbitrary.
- if (UseMinimalUsageFlags(context_state.get())) {
- // The following additional usage flags are provided for ANGLE:
- //
- // - TRANSFER_SRC: Used for copies from this image.
- // - TRANSFER_DST: Used for copies to this image or clears.
- vk_usage |=
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- } else {
- vk_usage |= image_usage_cache->optimal_tiling_usage[format];
- }
- }
-
- if (is_external && (usage & SHARED_IMAGE_USAGE_WEBGPU)) {
- // The following additional usage flags are provided for Dawn:
- //
- // - TRANSFER_SRC: Used for copies from this image.
- // - TRANSFER_DST: Used for copies to this image or clears.
- vk_usage |=
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- }
-
- if (usage & SHARED_IMAGE_USAGE_DISPLAY) {
- // Skia currently requires all VkImages it uses to support transfers
- vk_usage |=
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ // Must request all available image usage flags if aliasing GL texture. This
+ // is a spec requirement per EXT_memory_object. However, if
+ // ANGLE_memory_object_flags is supported, usage flags can be arbitrary.
+ if (is_external && (usage & SHARED_IMAGE_USAGE_GLES2) &&
+ !UseMinimalUsageFlags(context_state.get())) {
+ vk_usage |= image_usage_cache->optimal_tiling_usage[format];
}
VkImageCreateFlags vk_flags = 0;
@@ -288,7 +242,8 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
SkAlphaType alpha_type,
uint32_t usage,
const VulkanImageUsageCache* image_usage_cache) {
- if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) {
+ if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(
+ size, buffer_format, gfx::BufferPlane::DEFAULT)) {
DLOG(ERROR) << "Invalid image size for format.";
return nullptr;
}
@@ -600,7 +555,8 @@ void ExternalVkImageBacking::ReturnPendingSemaphoresWithFenceHelper(
std::unique_ptr<SharedImageRepresentationDawn>
ExternalVkImageBacking::ProduceDawn(SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice wgpuDevice) {
+ WGPUDevice wgpuDevice,
+ WGPUBackendType backend_type) {
#if (defined(OS_LINUX) || defined(OS_CHROMEOS)) && BUILDFLAG(USE_DAWN)
auto wgpu_format = viz::ToWGPUFormat(format());
@@ -830,7 +786,7 @@ void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) {
if (latest_content_ & kInSharedMemory) {
CopyPixelsFromShmToGLTexture();
} else if (latest_content_ & kInVkImage) {
- NOTIMPLEMENTED_LOG_ONCE();
+ CopyPixelsFromVkImageToGLTexture();
}
} else if (content_flags == kInSharedMemory) {
// TODO(penghuang): read pixels back from VkImage to shared memory GMB, if
@@ -842,7 +798,7 @@ void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) {
bool ExternalVkImageBacking::WritePixelsWithCallback(
size_t data_size,
size_t stride,
- FillBufferCallback callback) {
+ WriteBufferCallback callback) {
DCHECK(stride == 0 || size().height() * stride <= data_size);
VkBufferCreateInfo buffer_create_info = {
@@ -941,6 +897,83 @@ bool ExternalVkImageBacking::WritePixelsWithCallback(
return true;
}
+bool ExternalVkImageBacking::ReadPixelsWithCallback(
+ size_t data_size,
+ size_t stride,
+ ReadBufferCallback callback) {
+ DCHECK(stride == 0 || size().height() * stride <= data_size);
+
+ VkBufferCreateInfo buffer_create_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .size = data_size,
+ .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ };
+
+ VmaAllocator allocator =
+ context_state()->vk_context_provider()->GetDeviceQueue()->vma_allocator();
+ VkBuffer stage_buffer = VK_NULL_HANDLE;
+ VmaAllocation stage_allocation = VK_NULL_HANDLE;
+ VkResult result = vma::CreateBuffer(allocator, &buffer_create_info,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ 0, &stage_buffer, &stage_allocation);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkCreateBuffer() failed." << result;
+ return false;
+ }
+
+ // ReadPixelsWithCallback() is only called for separate texture.
+ DCHECK(!need_synchronization());
+
+ std::vector<ExternalSemaphore> external_semaphores;
+ if (!BeginAccessInternal(true /* readonly */, &external_semaphores)) {
+ DLOG(ERROR) << "BeginAccess() failed.";
+ vma::DestroyBuffer(allocator, stage_buffer, stage_allocation);
+ return false;
+ }
+ DCHECK(external_semaphores.empty());
+
+ auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
+ CHECK(command_buffer);
+ {
+ ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
+ GrVkImageInfo image_info;
+ bool success = backend_texture_.getVkImageInfo(&image_info);
+ DCHECK(success);
+ if (image_info.fImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
+ command_buffer->TransitionImageLayout(
+ image_info.fImage, image_info.fImageLayout,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+ backend_texture_.setVkImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+ }
+ uint32_t buffer_width =
+ stride ? stride * 8 / BitsPerPixel(format()) : size().width();
+ command_buffer->CopyImageToBuffer(stage_buffer, image_info.fImage,
+ buffer_width, size().height(),
+ size().width(), size().height());
+ }
+
+ command_buffer->Submit(0, nullptr, 0, nullptr);
+ command_buffer->Wait(UINT64_MAX);
+ command_buffer->Destroy();
+ EndAccessInternal(true /* readonly */, ExternalSemaphore());
+
+ void* buffer = nullptr;
+ result = vma::MapMemory(allocator, stage_allocation, &buffer);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vma::MapMemory() failed. " << result;
+ vma::DestroyBuffer(allocator, stage_buffer, stage_allocation);
+ return false;
+ }
+
+ std::move(callback).Run(buffer);
+ vma::UnmapMemory(allocator, stage_allocation);
+ vma::DestroyBuffer(allocator, stage_buffer, stage_allocation);
+
+ return true;
+}
+
bool ExternalVkImageBacking::WritePixelsWithData(
base::span<const uint8_t> pixel_data,
size_t stride) {
@@ -1027,14 +1060,12 @@ void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() {
gl::GLApi* api = gl::g_current_gl_context;
GLuint framebuffer;
- GLint old_framebuffer;
- api->glGetIntegervFn(GL_READ_FRAMEBUFFER_BINDING, &old_framebuffer);
api->glGenFramebuffersEXTFn(1, &framebuffer);
- api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, framebuffer);
+ gl::ScopedFramebufferBinder scoped_framebuffer_binder(framebuffer);
api->glFramebufferTexture2DEXTFn(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, texture_service_id, 0);
- GLenum status = api->glCheckFramebufferStatusEXTFn(GL_READ_FRAMEBUFFER);
- DCHECK_EQ(status, static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE))
+ DCHECK_EQ(api->glCheckFramebufferStatusEXTFn(GL_READ_FRAMEBUFFER),
+ static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE))
<< "CheckFramebufferStatusEXT() failed.";
base::CheckedNumeric<size_t> checked_size = bytes_per_pixel;
@@ -1042,10 +1073,11 @@ void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() {
checked_size *= size().height();
DCHECK(checked_size.IsValid());
- ScopedPixelStore pack_row_length(api, GL_PACK_ROW_LENGTH, 0);
- ScopedPixelStore pack_skip_pixels(api, GL_PACK_SKIP_PIXELS, 0);
- ScopedPixelStore pack_skip_rows(api, GL_PACK_SKIP_ROWS, 0);
- ScopedPixelStore pack_alignment(api, GL_PACK_ALIGNMENT, 1);
+ gl::ScopedPixelStore pack_row_length(GL_PACK_ROW_LENGTH, 0);
+ gl::ScopedPixelStore pack_skip_pixels(GL_PACK_SKIP_PIXELS, 0);
+ gl::ScopedPixelStore pack_skip_rows(GL_PACK_SKIP_ROWS, 0);
+ // Use 1 byte alignment for Vulkan image buffer copy.
+ gl::ScopedPixelStore pack_alignment(GL_PACK_ALIGNMENT, 1);
WritePixelsWithCallback(
checked_size.ValueOrDie(), 0,
@@ -1057,10 +1089,62 @@ void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() {
DCHECK_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
},
api, size(), gl_format, gl_type));
- api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, old_framebuffer);
api->glDeleteFramebuffersEXTFn(1, &framebuffer);
}
+void ExternalVkImageBacking::CopyPixelsFromVkImageToGLTexture() {
+ DCHECK(use_separate_gl_texture());
+ DCHECK_NE(!!texture_, !!texture_passthrough_);
+ const GLuint texture_service_id =
+ texture_ ? texture_->service_id() : texture_passthrough_->service_id();
+
+ DCHECK_GE(format(), 0);
+ DCHECK_LE(format(), viz::RESOURCE_FORMAT_MAX);
+ auto gl_format = kFormatTable[format()].gl_format;
+ auto gl_type = kFormatTable[format()].gl_type;
+ auto bytes_per_pixel = kFormatTable[format()].bytes_per_pixel;
+
+ if (gl_format == GL_ZERO) {
+ NOTREACHED() << "Not supported resource format=" << format();
+ return;
+ }
+
+ // Make sure GrContext is not using GL. So we don't need reset GrContext
+ DCHECK(!context_state_->GrContextIsGL());
+
+ // Make sure a gl context is current, since textures are shared between all gl
+ // contexts, we don't care which gl context is current.
+ if (!gl::GLContext::GetCurrent() &&
+ !context_state_->MakeCurrent(nullptr, true /* needs_gl */))
+ return;
+
+ gl::GLApi* api = gl::g_current_gl_context;
+ base::CheckedNumeric<size_t> checked_size = bytes_per_pixel;
+ checked_size *= size().width();
+ checked_size *= size().height();
+ DCHECK(checked_size.IsValid());
+
+ gl::ScopedTextureBinder scoped_texture_binder(GL_TEXTURE_2D,
+ texture_service_id);
+
+ gl::ScopedPixelStore unpack_row_length(GL_UNPACK_ROW_LENGTH, 0);
+ gl::ScopedPixelStore unpack_skip_pixels(GL_UNPACK_SKIP_PIXELS, 0);
+ gl::ScopedPixelStore unpack_skip_rows(GL_UNPACK_SKIP_ROWS, 0);
+ // Use 1 byte alignment for Vulkan image buffer copy.
+ gl::ScopedPixelStore unpack_alignment(GL_UNPACK_ALIGNMENT, 1);
+
+ ReadPixelsWithCallback(
+ checked_size.ValueOrDie(), 0,
+ base::BindOnce(
+ [](gl::GLApi* api, const gfx::Size& size, GLenum format, GLenum type,
+ const void* buffer) {
+ api->glTexSubImage2DFn(GL_TEXTURE_2D, 0, 0, 0, size.width(),
+ size.height(), format, type, buffer);
+ DCHECK_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
+ },
+ api, size(), gl_format, gl_type));
+}
+
void ExternalVkImageBacking::CopyPixelsFromShmToGLTexture() {
DCHECK(use_separate_gl_texture());
DCHECK_NE(!!texture_, !!texture_passthrough_);
@@ -1088,9 +1172,14 @@ void ExternalVkImageBacking::CopyPixelsFromShmToGLTexture() {
return;
gl::GLApi* api = gl::g_current_gl_context;
- GLint old_texture;
- api->glGetIntegervFn(GL_TEXTURE_BINDING_2D, &old_texture);
- api->glBindTextureFn(GL_TEXTURE_2D, texture_service_id);
+ gl::ScopedTextureBinder scoped_texture_binder(GL_TEXTURE_2D,
+ texture_service_id);
+
+ gl::ScopedPixelStore unpack_row_length(GL_UNPACK_ROW_LENGTH, 0);
+ gl::ScopedPixelStore unpack_skip_pixels(GL_UNPACK_SKIP_PIXELS, 0);
+ gl::ScopedPixelStore unpack_skip_rows(GL_UNPACK_SKIP_ROWS, 0);
+ // Chrome uses 4 bytes alignment for shared memory GMB.
+ gl::ScopedPixelStore unpack_alignment(GL_UNPACK_ALIGNMENT, 4);
base::CheckedNumeric<size_t> checked_size = bytes_per_pixel;
checked_size *= size().width();
@@ -1102,7 +1191,6 @@ void ExternalVkImageBacking::CopyPixelsFromShmToGLTexture() {
size().height(), gl_format, gl_type,
pixel_data.data());
DCHECK_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
- api->glBindTextureFn(GL_TEXTURE_2D, old_texture);
}
bool ExternalVkImageBacking::BeginAccessInternal(
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
index 1bcb5362b49..d61957a6189 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
@@ -151,7 +151,8 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice dawnDevice) override;
+ WGPUDevice dawnDevice,
+ WGPUBackendType backend_type) override;
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
SharedImageManager* manager,
MemoryTypeTracker* tracker) override;
@@ -172,16 +173,21 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
// Returns texture_service_id for ProduceGLTexture and GLTexturePassthrough.
GLuint ProduceGLTextureInternal();
- using FillBufferCallback = base::OnceCallback<void(void* buffer)>;
+ using WriteBufferCallback = base::OnceCallback<void(void* buffer)>;
// TODO(penghuang): Remove it when GrContext::updateBackendTexture() supports
// compressed texture and callback.
bool WritePixelsWithCallback(size_t data_size,
size_t stride,
- FillBufferCallback callback);
+ WriteBufferCallback callback);
+ using ReadBufferCallback = base::OnceCallback<void(const void* buffer)>;
+ bool ReadPixelsWithCallback(size_t data_size,
+ size_t stride,
+ ReadBufferCallback callback);
bool WritePixelsWithData(base::span<const uint8_t> pixel_data, size_t stride);
bool WritePixels();
void CopyPixelsFromGLTextureToVkImage();
void CopyPixelsFromShmToGLTexture();
+ void CopyPixelsFromVkImageToGLTexture();
scoped_refptr<SharedContextState> context_state_;
std::unique_ptr<VulkanImage> image_;
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc
index 3a3109297b4..867c51c0040 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc
@@ -51,7 +51,8 @@ WGPUTexture ExternalVkImageDawnRepresentation::BeginAccess(
texture_descriptor.format = wgpu_format_;
texture_descriptor.usage = usage;
texture_descriptor.dimension = WGPUTextureDimension_2D;
- texture_descriptor.size = {size().width(), size().height(), 1};
+ texture_descriptor.size = {static_cast<uint32_t>(size().width()),
+ static_cast<uint32_t>(size().height()), 1};
texture_descriptor.mipLevelCount = 1;
texture_descriptor.sampleCount = 1;
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory.cc b/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
index 115071b8b44..3e3ac17a526 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/service/external_vk_image_factory.h"
+#include "build/build_config.h"
#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/service/external_vk_image_backing.h"
@@ -23,19 +24,31 @@ namespace {
VkImageUsageFlags GetMaximalImageUsageFlags(
VkFormatFeatureFlags feature_flags) {
VkImageUsageFlags usage_flags = 0;
+ // The TRANSFER_SRC/DST format features were added in Vulkan 1.1 and their
+ // support is required when SAMPLED_IMAGE is supported. In Vulkan 1.0 all
+ // formats support these features implicitly. See discussion in
+ // https://github.com/KhronosGroup/Vulkan-Docs/issues/1223
if (feature_flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
- usage_flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
- if (feature_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)
- usage_flags |= VK_IMAGE_USAGE_STORAGE_BIT;
+ usage_flags |= VK_IMAGE_USAGE_SAMPLED_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ // VUID-VkImageViewCreateInfo-usage-02652: support for INPUT_ATTACHMENT is
+ // implied by both of COLOR_ATTACHNENT and DEPTH_STENCIL_ATTACHMENT
if (feature_flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)
- usage_flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ usage_flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
if (feature_flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)
- usage_flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ usage_flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+
+ if (feature_flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)
+ usage_flags |= VK_IMAGE_USAGE_STORAGE_BIT;
if (feature_flags & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT)
usage_flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
if (feature_flags & VK_FORMAT_FEATURE_TRANSFER_DST_BIT)
usage_flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- usage_flags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+
return usage_flags;
}
@@ -143,4 +156,37 @@ bool ExternalVkImageFactory::CanImportGpuMemoryBuffer(
memory_buffer_type == gfx::SHARED_MEMORY_BUFFER;
}
+bool ExternalVkImageFactory::IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) {
+ if (gmb_type != gfx::EMPTY_BUFFER && !CanImportGpuMemoryBuffer(gmb_type)) {
+ return false;
+ }
+ // TODO(crbug.com/969114): Not all shared image factory implementations
+ // support concurrent read/write usage.
+ if (usage & SHARED_IMAGE_USAGE_CONCURRENT_READ_WRITE) {
+ return false;
+ }
+ if (thread_safe) {
+ LOG(ERROR) << "ExternalVkImageFactory currently do not support "
+ "cross-thread usage.";
+ return false;
+ }
+
+#if defined(OS_ANDROID)
+ // Scanout on Android requires explicit fence synchronization which is only
+ // supported by the interop factory.
+ if (usage & SHARED_IMAGE_USAGE_SCANOUT) {
+ return false;
+ }
+#endif
+
+ *allow_legacy_mailbox = false;
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory.h b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
index 96417b3d5da..055aedc6971 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_factory.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
@@ -60,8 +60,13 @@ class GPU_GLES2_EXPORT ExternalVkImageFactory
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage) override;
- bool CanImportGpuMemoryBuffer(
- gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) override;
private:
VkResult CreateExternalVkImage(VkFormat format,
@@ -70,6 +75,8 @@ class GPU_GLES2_EXPORT ExternalVkImageFactory
void TransitionToColorAttachment(VkImage image);
+ bool CanImportGpuMemoryBuffer(gfx::GpuMemoryBufferType memory_buffer_type);
+
scoped_refptr<SharedContextState> context_state_;
std::unique_ptr<VulkanCommandPool> command_pool_;
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory_unittest.cc b/chromium/gpu/command_buffer/service/external_vk_image_factory_unittest.cc
index 6b757d63d09..422eee829ce 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_factory_unittest.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory_unittest.cc
@@ -161,8 +161,8 @@ TEST_F(ExternalVkImageFactoryTest, DawnWrite_SkiaVulkanRead) {
{
// Create a Dawn representation to clear the texture contents to a green.
auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox,
- dawn_device_.Get());
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, dawn_device_.Get(), WGPUBackendType_Vulkan);
ASSERT_TRUE(dawn_representation);
auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
@@ -297,10 +297,12 @@ TEST_F(ExternalVkImageFactoryTest, SkiaVulkanWrite_DawnRead) {
SkCanvas* dest_canvas = dest_surface->getCanvas();
// Color the top half blue, and the bottom half green
- dest_canvas->drawRect(SkRect{0, 0, size.width(), size.height() / 2},
- SkPaint(SkColors::kBlue));
dest_canvas->drawRect(
- SkRect{0, size.height() / 2, size.width(), size.height()},
+ SkRect{0, 0, static_cast<SkScalar>(size.width()), size.height() / 2.0f},
+ SkPaint(SkColors::kBlue));
+ dest_canvas->drawRect(
+ SkRect{0, size.height() / 2.0f, static_cast<SkScalar>(size.width()),
+ static_cast<SkScalar>(size.height())},
SkPaint(SkColors::kGreen));
skia_representation->SetCleared();
@@ -322,8 +324,8 @@ TEST_F(ExternalVkImageFactoryTest, SkiaVulkanWrite_DawnRead) {
{
// Create a Dawn representation
auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox,
- dawn_device_.Get());
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, dawn_device_.Get(), WGPUBackendType_Vulkan);
ASSERT_TRUE(dawn_representation);
// Begin access to copy the data out. Skia should have initialized the
@@ -353,9 +355,9 @@ TEST_F(ExternalVkImageFactoryTest, SkiaVulkanWrite_DawnRead) {
dst_copy_view.buffer = dst_buffer;
dst_copy_view.layout.bytesPerRow = 256;
dst_copy_view.layout.offset = 0;
- dst_copy_view.layout.rowsPerImage = 0;
- wgpu::Extent3D copy_extent = {size.width(), size.height(), 1};
+ wgpu::Extent3D copy_extent = {static_cast<uint32_t>(size.width()),
+ static_cast<uint32_t>(size.height()), 1};
encoder.CopyTextureToBuffer(&src_copy_view, &dst_copy_view, &copy_extent);
}
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.cc
index f478d513e06..1a61a9aa794 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.cc
@@ -38,7 +38,7 @@ SemaphoreHandle GpuFenceHandleToSemaphoreHandle(
// VkSemaphore, which can then be used to submit present work, see
// https://fuchsia.dev/reference/fidl/fuchsia.ui.scenic.
return SemaphoreHandle(
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA,
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA,
std::move(fence_handle.owned_event));
#elif defined(OS_POSIX)
return SemaphoreHandle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,
@@ -46,8 +46,9 @@ SemaphoreHandle GpuFenceHandleToSemaphoreHandle(
#elif defined(OS_WIN)
return SemaphoreHandle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
std::move(fence_handle.owned_handle));
-#endif // defined(OS_FUCHSIA)
+#else
return SemaphoreHandle();
+#endif // defined(OS_FUCHSIA)
}
} // namespace
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
index 60e8f44e868..2eab6fe557f 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
@@ -201,7 +201,8 @@ sk_sp<SkPromiseImageTexture> ExternalVkImageSkiaRepresentation::BeginAccess(
// Create an |end_access_semaphore_| which will be signalled by the caller.
end_access_semaphore_ =
backing_impl()->external_semaphore_pool()->GetOrCreateSemaphore();
- DCHECK(end_access_semaphore_);
+ if (!end_access_semaphore_)
+ return nullptr;
end_semaphores->emplace_back();
end_semaphores->back().initVulkan(end_access_semaphore_.GetVkSemaphore());
}
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index d8137c45da8..82fa77dba07 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -12,9 +12,9 @@
#include "base/command_line.h"
#include "base/containers/contains.h"
+#include "base/cxx17_backports.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
-#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "build/build_config.h"
@@ -278,6 +278,11 @@ void FeatureInfo::InitializeForTesting(ContextType context_type) {
}
bool IsGL_REDSupportedOnFBOs() {
+#if defined(OS_MAC)
+ // The glTexImage2D call below can hang on Mac so skip this since it's only
+ // really needed to workaround a Mesa issue. See https://crbug.com/1158744.
+ return true;
+#else
DCHECK(glGetError() == GL_NO_ERROR);
// Skia uses GL_RED with frame buffers, unfortunately, Mesa claims to support
// GL_EXT_texture_rg, but it doesn't support it on frame buffers. To fix
@@ -309,6 +314,7 @@ bool IsGL_REDSupportedOnFBOs() {
DCHECK(glGetError() == GL_NO_ERROR);
return result;
+#endif // defined(OS_MAC)
}
void FeatureInfo::EnableCHROMIUMTextureStorageImage() {
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc b/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
index c783a2e6673..5dfcffbdb55 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
@@ -7,7 +7,7 @@
#include <memory>
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/service/error_state_mock.h"
#include "gpu/command_buffer/service/feature_info.h"
diff --git a/chromium/gpu/command_buffer/service/gl_utils.h b/chromium/gpu/command_buffer/service/gl_utils.h
index b96b5a4a66b..2e6fd7151fb 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.h
+++ b/chromium/gpu/command_buffer/service/gl_utils.h
@@ -12,6 +12,7 @@
#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
+#include "gpu/gpu_gles2_export.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/overlay_transform.h"
@@ -91,9 +92,9 @@ void LogGLDebugMessage(GLenum source,
GLsizei length,
const GLchar* message,
Logger* error_logger);
-void InitializeGLDebugLogging(bool log_non_errors,
- GLDEBUGPROC callback,
- const void* user_param);
+GPU_GLES2_EXPORT void InitializeGLDebugLogging(bool log_non_errors,
+ GLDEBUGPROC callback,
+ const void* user_param);
bool ValidContextLostReason(GLenum reason);
error::ContextLostReason GetContextLostReasonFromResetStatus(
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 33892f4e94f..0f272f31e02 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -22,17 +22,17 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
+#include "base/containers/cxx20_erase.h"
#include "base/containers/flat_set.h"
#include "base/containers/queue.h"
#include "base/containers/span.h"
+#include "base/cxx17_backports.h"
#include "base/debug/alias.h"
#include "base/debug/dump_without_crashing.h"
#include "base/hash/legacy_hash.h"
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
-#include "base/numerics/ranges.h"
#include "base/numerics/safe_math.h"
-#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
@@ -90,6 +90,7 @@
#include "ui/gfx/geometry/point.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/rect_conversions.h"
+#include "ui/gfx/geometry/rect_f.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_fence.h"
#include "ui/gfx/gpu_memory_buffer.h"
@@ -4364,6 +4365,9 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
caps.num_surface_buffers = surface_->GetBufferCount();
caps.mesa_framebuffer_flip_y =
feature_info_->feature_flags().mesa_framebuffer_flip_y;
+ caps.disable_legacy_mailbox =
+ group_->shared_image_manager() &&
+ group_->shared_image_manager()->display_context_on_another_thread();
caps.gpu_memory_buffer_formats =
feature_info_->feature_flags().gpu_memory_buffer_formats;
@@ -4593,9 +4597,6 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() {
if (!workarounds().dont_initialize_uninitialized_locals)
driver_bug_workarounds |= SH_INITIALIZE_UNINITIALIZED_LOCALS;
- resources.WEBGL_debug_shader_precision =
- group_->gpu_preferences().emulate_shader_precision;
-
ShShaderOutput shader_output_language =
ShaderTranslator::GetShaderOutputLanguageForContext(gl_version_info());
@@ -5887,8 +5888,8 @@ error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(
static_assert(sizeof(GLuint) >= sizeof(int), "Unexpected GLuint size.");
static const GLuint kMaxDimension =
static_cast<GLuint>(std::numeric_limits<int>::max());
- width = base::ClampToRange(width, 1U, kMaxDimension);
- height = base::ClampToRange(height, 1U, kMaxDimension);
+ width = base::clamp(width, 1U, kMaxDimension);
+ height = base::clamp(height, 1U, kMaxDimension);
bool is_offscreen = !!offscreen_target_frame_buffer_.get();
if (is_offscreen) {
@@ -8443,13 +8444,13 @@ void GLES2DecoderImpl::DoEnableiOES(GLenum target, GLuint index) {
}
void GLES2DecoderImpl::DoDepthRangef(GLclampf znear, GLclampf zfar) {
- state_.z_near = base::ClampToRange(znear, 0.0f, 1.0f);
- state_.z_far = base::ClampToRange(zfar, 0.0f, 1.0f);
+ state_.z_near = base::clamp(znear, 0.0f, 1.0f);
+ state_.z_far = base::clamp(zfar, 0.0f, 1.0f);
api()->glDepthRangeFn(znear, zfar);
}
void GLES2DecoderImpl::DoSampleCoverage(GLclampf value, GLboolean invert) {
- state_.sample_coverage_value = base::ClampToRange(value, 0.0f, 1.0f);
+ state_.sample_coverage_value = base::clamp(value, 0.0f, 1.0f);
state_.sample_coverage_invert = (invert != 0);
api()->glSampleCoverageFn(state_.sample_coverage_value, invert);
}
@@ -9809,7 +9810,7 @@ void GLES2DecoderImpl::DoRenderbufferStorage(
void GLES2DecoderImpl::DoLineWidth(GLfloat width) {
api()->glLineWidthFn(
- base::ClampToRange(width, line_width_range_[0], line_width_range_[1]));
+ base::clamp(width, line_width_range_[0], line_width_range_[1]));
}
void GLES2DecoderImpl::DoLinkProgram(GLuint program_id) {
@@ -10850,8 +10851,8 @@ bool GLES2DecoderImpl::ValidateStencilStateForDraw(const char* function_name) {
GLuint max_stencil_value = (1 << stencil_bits) - 1;
GLint max_stencil_ref = static_cast<GLint>(max_stencil_value);
bool different_refs =
- base::ClampToRange(state_.stencil_front_ref, 0, max_stencil_ref) !=
- base::ClampToRange(state_.stencil_back_ref, 0, max_stencil_ref);
+ base::clamp(state_.stencil_front_ref, 0, max_stencil_ref) !=
+ base::clamp(state_.stencil_back_ref, 0, max_stencil_ref);
bool different_writemasks =
(state_.stencil_front_writemask & max_stencil_value) !=
(state_.stencil_back_writemask & max_stencil_value);
@@ -13671,7 +13672,9 @@ error::Error GLES2DecoderImpl::HandlePostSubBufferCHROMIUM(
ClearScheduleCALayerState();
if (supports_async_swap_) {
- TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", c.swap_id());
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+ "gpu", "AsyncSwapBuffers",
+ TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", c.swap_id()));
client()->OnSwapBuffers(c.swap_id(), c.flags);
surface_->PostSubBufferAsync(
@@ -13731,7 +13734,7 @@ error::Error GLES2DecoderImpl::HandleScheduleOverlayPlaneCHROMIUM(
c.plane_z_order, transform, image,
gfx::Rect(c.bounds_x, c.bounds_y, c.bounds_width, c.bounds_height),
gfx::RectF(c.uv_x, c.uv_y, c.uv_width, c.uv_height), c.enable_blend,
- std::move(gpu_fence))) {
+ /*damage_rect=*/gfx::Rect(), std::move(gpu_fence))) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
"glScheduleOverlayPlaneCHROMIUM",
"failed to schedule overlay");
@@ -15163,7 +15166,7 @@ error::Error GLES2DecoderImpl::HandleTexImage2D(uint32_t immediate_data_size,
texture_state_.tex_image_failed = true;
GLenum target = static_cast<GLenum>(c.target);
GLint level = static_cast<GLint>(c.level);
- GLint internal_format = static_cast<GLint>(c.internalformat);
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
GLsizei width = static_cast<GLsizei>(c.width);
GLsizei height = static_cast<GLsizei>(c.height);
GLint border = static_cast<GLint>(c.border);
@@ -15270,7 +15273,7 @@ error::Error GLES2DecoderImpl::HandleTexImage3D(uint32_t immediate_data_size,
texture_state_.tex_image_failed = true;
GLenum target = static_cast<GLenum>(c.target);
GLint level = static_cast<GLint>(c.level);
- GLint internal_format = static_cast<GLint>(c.internalformat);
+ GLenum internal_format = static_cast<GLenum>(c.internalformat);
GLsizei width = static_cast<GLsizei>(c.width);
GLsizei height = static_cast<GLsizei>(c.height);
GLsizei depth = static_cast<GLsizei>(c.depth);
@@ -15625,13 +15628,39 @@ void GLES2DecoderImpl::DoCopyTexImage2D(
// ExtractTypeFromStorageFormat will always return UNSIGNED_BYTE for
// unsized formats.
DCHECK(type == GL_UNSIGNED_BYTE);
+ // Changing the internal format here is probably not completely
+ // correct. This is the "effective" internal format, and the spec
+ // says "effective internal format is used by the GL for purposes
+ // such as texture completeness or type checks for CopyTex*
+ // commands". But we don't have a separate concept of "effective"
+ // vs. "actual" internal format, so this will have to do for now. See
+ // Table 3.12 and associated explanatory text in the OpenGL ES 3.0.6
+ // spec for more information.
+ //
+ // Unfortunately, changing the internal format here conflicts with a
+ // macos workaround flag, so don't do it if the workaround applies.
+ bool attempt_sized_upgrade =
+ (!workarounds().use_intermediary_for_copy_texture_image ||
+ target == GL_TEXTURE_2D) &&
+ (read_format == GL_RGB || read_format == GL_RGB8 ||
+ read_format == GL_RGBA || read_format == GL_RGBA8);
switch (internal_format) {
case GL_RGB:
+ if (attempt_sized_upgrade) {
+ internal_format = GL_RGB8;
+ }
+ break;
case GL_RGBA:
+ if (attempt_sized_upgrade) {
+ internal_format = GL_RGBA8;
+ }
+ break;
case GL_LUMINANCE_ALPHA:
case GL_LUMINANCE:
case GL_ALPHA:
case GL_BGRA_EXT:
+ // There are no GL constants for sized versions of these internal
+ // formats. We'll just go ahead with the unsized ones.
break;
default:
// Other unsized internal_formats are invalid in ES3.
@@ -16999,7 +17028,9 @@ void GLES2DecoderImpl::DoSwapBuffers(uint64_t swap_id, GLbitfield flags) {
api()->glFlushFn();
}
} else if (supports_async_swap_) {
- TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", swap_id);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+ "gpu", "AsyncSwapBuffers",
+ TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", swap_id));
client()->OnSwapBuffers(swap_id, flags);
surface_->SwapBuffersAsync(
@@ -17019,11 +17050,9 @@ void GLES2DecoderImpl::DoSwapBuffers(uint64_t swap_id, GLbitfield flags) {
void GLES2DecoderImpl::FinishAsyncSwapBuffers(
uint64_t swap_id,
gfx::SwapCompletionResult result) {
- TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", swap_id);
- // Handling of the out-fence should have already happened before reaching
- // this function, so we don't expect to get a valid fence here.
- DCHECK(result.release_fence.is_null());
-
+ TRACE_EVENT_NESTABLE_ASYNC_END0(
+ "gpu", "AsyncSwapBuffers",
+ TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", swap_id));
FinishSwapBuffers(result.swap_result);
}
@@ -17486,8 +17515,8 @@ error::Error GLES2DecoderImpl::HandleDescheduleUntilFinishedCHROMIUM(
return error::kNoError;
}
- TRACE_EVENT_ASYNC_BEGIN0("cc", "GLES2DecoderImpl::DescheduleUntilFinished",
- this);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+ "cc", "GLES2DecoderImpl::DescheduleUntilFinished", TRACE_ID_LOCAL(this));
client()->OnDescheduleUntilFinished();
return error::kDeferLaterCommands;
}
@@ -17581,8 +17610,8 @@ void GLES2DecoderImpl::ProcessDescheduleUntilFinished() {
if (!deschedule_until_finished_fences_[0]->HasCompleted())
return;
- TRACE_EVENT_ASYNC_END0("cc", "GLES2DecoderImpl::DescheduleUntilFinished",
- this);
+ TRACE_EVENT_NESTABLE_ASYNC_END0(
+ "cc", "GLES2DecoderImpl::DescheduleUntilFinished", TRACE_ID_LOCAL(this));
deschedule_until_finished_fences_.erase(
deschedule_until_finished_fences_.begin());
client()->OnRescheduleAfterFinished();
@@ -19797,7 +19826,8 @@ void GLES2DecoderImpl::DoScheduleDCLayerCHROMIUM(GLuint texture_0,
return;
}
- ui::DCRendererLayerParams params;
+ std::unique_ptr<ui::DCRendererLayerParams> params =
+ std::make_unique<ui::DCRendererLayerParams>();
GLuint texture_ids[] = {texture_0, texture_1};
size_t i = 0;
for (GLuint texture_id : texture_ids) {
@@ -19816,22 +19846,22 @@ void GLES2DecoderImpl::DoScheduleDCLayerCHROMIUM(GLuint texture_0,
"unsupported texture format");
return;
}
- params.images[i++] = scoped_refptr<gl::GLImage>(image);
+ params->images[i++] = scoped_refptr<gl::GLImage>(image);
}
- params.z_order = z_order;
- params.content_rect =
+ params->z_order = z_order;
+ params->content_rect =
gfx::Rect(content_x, content_y, content_width, content_height);
- params.quad_rect = gfx::Rect(quad_x, quad_y, quad_width, quad_height);
- params.transform =
+ params->quad_rect = gfx::Rect(quad_x, quad_y, quad_width, quad_height);
+ params->transform =
gfx::Transform(transform_c1r1, transform_c2r1, transform_c1r2,
transform_c2r2, transform_tx, transform_ty);
if (is_clipped) {
- params.clip_rect = gfx::Rect(clip_x, clip_y, clip_width, clip_height);
+ params->clip_rect = gfx::Rect(clip_x, clip_y, clip_width, clip_height);
}
- params.protected_video_type =
+ params->protected_video_type =
static_cast<gfx::ProtectedVideoType>(protected_video_type);
- if (!surface_->ScheduleDCLayer(params)) {
+ if (!surface_->ScheduleDCLayer(std::move(params))) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glScheduleDCLayerCHROMIUM",
"failed to schedule DCLayer");
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index 8d6eb4036aa..85bdd3dddc0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -10,7 +10,7 @@
#include "base/bind.h"
#include "base/callback.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "base/strings/string_split.h"
#include "build/build_config.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
@@ -951,8 +951,14 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
static constexpr const char* kRequiredFunctionalityExtensions[] = {
"GL_ANGLE_framebuffer_blit",
+#if defined(OS_FUCHSIA)
+ "GL_ANGLE_memory_object_fuchsia",
+#endif
"GL_ANGLE_memory_size",
"GL_ANGLE_native_id",
+#if defined(OS_FUCHSIA)
+ "GL_ANGLE_semaphore_fuchsia",
+#endif
"GL_ANGLE_texture_storage_external",
"GL_ANGLE_texture_usage",
"GL_CHROMIUM_bind_uniform_location",
@@ -1116,6 +1122,7 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
bound_buffers_[GL_DRAW_INDIRECT_BUFFER] = 0;
bound_buffers_[GL_DISPATCH_INDIRECT_BUFFER] = 0;
}
+ bound_element_array_buffer_dirty_ = false;
if (feature_info_->feature_flags().chromium_texture_filtering_hint) {
api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
@@ -1679,6 +1686,9 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
feature_info_->feature_flags().gpu_memory_buffer_formats;
caps.texture_target_exception_list =
group_->gpu_preferences().texture_target_exception_list;
+ caps.disable_legacy_mailbox =
+ group_->shared_image_manager() &&
+ group_->shared_image_manager()->display_context_on_another_thread();
return caps;
}
@@ -2239,6 +2249,9 @@ error::Error GLES2DecoderPassthroughImpl::PatchGetBufferResults(GLenum target,
// If there was no error, the buffer target should exist
DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
+ if (target == GL_ELEMENT_ARRAY_BUFFER) {
+ LazilyUpdateCurrentlyBoundElementArrayBuffer();
+ }
GLuint current_client_buffer = bound_buffers_[target];
auto mapped_buffer_info_iter =
@@ -2760,8 +2773,9 @@ void GLES2DecoderPassthroughImpl::ProcessDescheduleUntilFinished() {
return;
}
- TRACE_EVENT_ASYNC_END0(
- "cc", "GLES2DecoderPassthroughImpl::DescheduleUntilFinished", this);
+ TRACE_EVENT_NESTABLE_ASYNC_END0(
+ "cc", "GLES2DecoderPassthroughImpl::DescheduleUntilFinished",
+ TRACE_ID_LOCAL(this));
deschedule_until_finished_fences_.erase(
deschedule_until_finished_fences_.begin());
client()->OnRescheduleAfterFinished();
@@ -2885,7 +2899,11 @@ void GLES2DecoderPassthroughImpl::UpdateTextureSizeFromClientID(
}
}
-void GLES2DecoderPassthroughImpl::UpdateCurrentlyBoundElementArrayBuffer() {
+void GLES2DecoderPassthroughImpl::
+ LazilyUpdateCurrentlyBoundElementArrayBuffer() {
+ if (!bound_element_array_buffer_dirty_)
+ return;
+
GLint service_element_array_buffer = 0;
api_->glGetIntegervFn(GL_ELEMENT_ARRAY_BUFFER_BINDING,
&service_element_array_buffer);
@@ -2898,6 +2916,7 @@ void GLES2DecoderPassthroughImpl::UpdateCurrentlyBoundElementArrayBuffer() {
}
bound_buffers_[GL_ELEMENT_ARRAY_BUFFER] = client_element_array_buffer;
+ bound_element_array_buffer_dirty_ = false;
}
error::Error GLES2DecoderPassthroughImpl::HandleSetActiveURLCHROMIUM(
@@ -2997,11 +3016,9 @@ void GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult(
const char* function_name,
uint64_t swap_id,
gfx::SwapCompletionResult result) {
- TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", swap_id);
- // Handling of the out-fence should have already happened before reaching
- // this function, so we don't expect to get a valid fence here.
- DCHECK(result.release_fence.is_null());
-
+ TRACE_EVENT_NESTABLE_ASYNC_END0(
+ "gpu", "AsyncSwapBuffers",
+ TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", swap_id));
CheckSwapBuffersResult(result.swap_result, function_name);
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index fbd80a8ebb2..227995e6973 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -456,8 +456,11 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl
void UpdateTextureSizeFromClientID(GLuint client_id);
// Some operations like binding a VAO will update the element array buffer
- // binding without an explicit glBindBuffer.
- void UpdateCurrentlyBoundElementArrayBuffer();
+ // binding without an explicit glBindBuffer. This function is extremely
+ // expensive, and it is crucial that it be called only when the command
+ // decoder's notion of the element array buffer absolutely has to be
+ // up-to-date.
+ void LazilyUpdateCurrentlyBoundElementArrayBuffer();
error::Error BindTexImage2DCHROMIUMImpl(GLenum target,
GLenum internalformat,
@@ -654,6 +657,8 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl
// State tracking of currently bound buffers
std::unordered_map<GLenum, GLuint> bound_buffers_;
+ // Lazy tracking of the bound element array buffer when changing VAOs.
+ bool bound_element_array_buffer_dirty_;
// Track the service-id to type of all queries for validation
struct QueryInfo {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index 414773e6097..88b068c0fc2 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -7,8 +7,9 @@
#include <memory>
#include "base/callback_helpers.h"
+#include "base/containers/cxx20_erase.h"
+#include "base/cxx17_backports.h"
#include "base/metrics/histogram_macros.h"
-#include "base/numerics/ranges.h"
#include "base/strings/string_number_conversions.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/discardable_handle.h"
@@ -23,47 +24,12 @@
#include "ui/gfx/geometry/rect_conversions.h"
#include "ui/gl/ca_renderer_layer_params.h"
#include "ui/gl/dc_renderer_layer_params.h"
+#include "ui/gl/gl_utils.h"
#include "ui/gl/gl_version_info.h"
namespace gpu {
namespace gles2 {
-// Temporarily allows compilation of shaders that use the
-// ARB_texture_rectangle/ANGLE_texture_rectangle extension. We don't want to
-// expose the extension to WebGL user shaders but we still need to use it for
-// parts of the implementation on macOS. Note that the extension is always
-// enabled on macOS and this only controls shader compilation.
-class GLES2DecoderPassthroughImpl::
- ScopedEnableTextureRectangleInShaderCompiler {
- public:
- ScopedEnableTextureRectangleInShaderCompiler(
- const ScopedEnableTextureRectangleInShaderCompiler&) = delete;
- ScopedEnableTextureRectangleInShaderCompiler& operator=(
- const ScopedEnableTextureRectangleInShaderCompiler&) = delete;
-
- // This class is a no-op except on macOS.
-#if !defined(OS_MAC)
- explicit ScopedEnableTextureRectangleInShaderCompiler(
- GLES2DecoderPassthroughImpl* decoder) {}
-
- private:
-#else
- explicit ScopedEnableTextureRectangleInShaderCompiler(
- GLES2DecoderPassthroughImpl* decoder)
- : decoder_(decoder) {
- if (decoder_->feature_info_->IsWebGLContext())
- decoder_->api_->glEnableFn(GL_TEXTURE_RECTANGLE_ANGLE);
- }
- ~ScopedEnableTextureRectangleInShaderCompiler() {
- if (decoder_->feature_info_->IsWebGLContext())
- decoder_->api_->glDisableFn(GL_TEXTURE_RECTANGLE_ANGLE);
- }
-
- private:
- GLES2DecoderPassthroughImpl* decoder_;
-#endif
-};
-
namespace {
template <typename ClientType, typename ServiceType, typename GenFunction>
@@ -426,6 +392,9 @@ error::Error GLES2DecoderPassthroughImpl::DoBindBuffer(GLenum target,
DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
bound_buffers_[target] = buffer;
+ if (target == GL_ELEMENT_ARRAY_BUFFER) {
+ bound_element_array_buffer_dirty_ = false;
+ }
return error::kNoError;
}
@@ -443,6 +412,9 @@ error::Error GLES2DecoderPassthroughImpl::DoBindBufferBase(GLenum target,
DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
bound_buffers_[target] = buffer;
+ if (target == GL_ELEMENT_ARRAY_BUFFER) {
+ bound_element_array_buffer_dirty_ = false;
+ }
return error::kNoError;
}
@@ -463,6 +435,9 @@ error::Error GLES2DecoderPassthroughImpl::DoBindBufferRange(GLenum target,
DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
bound_buffers_[target] = buffer;
+ if (target == GL_ELEMENT_ARRAY_BUFFER) {
+ bound_element_array_buffer_dirty_ = false;
+ }
return error::kNoError;
}
@@ -678,6 +653,10 @@ error::Error GLES2DecoderPassthroughImpl::DoBufferData(GLenum target,
return error::kNoError;
}
+ if (target == GL_ELEMENT_ARRAY_BUFFER) {
+ LazilyUpdateCurrentlyBoundElementArrayBuffer();
+ }
+
// Calling buffer data on a mapped buffer will implicitly unmap it
resources_->mapped_buffer_map.erase(bound_buffers_[target]);
@@ -997,6 +976,8 @@ error::Error GLES2DecoderPassthroughImpl::DoDeleteBuffers(
return error::kNoError;
}
+ LazilyUpdateCurrentlyBoundElementArrayBuffer();
+
std::vector<GLuint> service_ids(n, 0);
for (GLsizei ii = 0; ii < n; ++ii) {
GLuint client_id = buffers[ii];
@@ -1320,6 +1301,10 @@ error::Error GLES2DecoderPassthroughImpl::DoFlushMappedBufferRange(
GLenum target,
GLintptr offset,
GLsizeiptr size) {
+ if (target == GL_ELEMENT_ARRAY_BUFFER) {
+ LazilyUpdateCurrentlyBoundElementArrayBuffer();
+ }
+
auto bound_buffers_iter = bound_buffers_.find(target);
if (bound_buffers_iter == bound_buffers_.end() ||
bound_buffers_iter->second == 0) {
@@ -3815,7 +3800,7 @@ error::Error GLES2DecoderPassthroughImpl::DoIsVertexArrayOES(GLuint array,
error::Error GLES2DecoderPassthroughImpl::DoBindVertexArrayOES(GLuint array) {
api()->glBindVertexArrayOESFn(
GetVertexArrayServiceID(array, &vertex_array_id_map_));
- UpdateCurrentlyBoundElementArrayBuffer();
+ bound_element_array_buffer_dirty_ = true;
return error::kNoError;
}
@@ -3874,7 +3859,9 @@ error::Error GLES2DecoderPassthroughImpl::DoSwapBuffers(uint64_t swap_id,
client()->OnSwapBuffers(swap_id, flags);
if (surface_->SupportsAsyncSwap()) {
- TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", swap_id);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+ "gpu", "AsyncSwapBuffers",
+ TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", swap_id));
surface_->SwapBuffersAsync(
base::BindOnce(
&GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult,
@@ -3948,6 +3935,9 @@ error::Error GLES2DecoderPassthroughImpl::DoMapBufferRange(
// Track the mapping of this buffer so that data can be synchronized when it
// is unmapped
DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
+ if (target == GL_ELEMENT_ARRAY_BUFFER) {
+ LazilyUpdateCurrentlyBoundElementArrayBuffer();
+ }
GLuint client_buffer = bound_buffers_.at(target);
MappedBuffer mapped_buffer_info;
@@ -3968,6 +3958,9 @@ error::Error GLES2DecoderPassthroughImpl::DoMapBufferRange(
}
error::Error GLES2DecoderPassthroughImpl::DoUnmapBuffer(GLenum target) {
+ if (target == GL_ELEMENT_ARRAY_BUFFER) {
+ LazilyUpdateCurrentlyBoundElementArrayBuffer();
+ }
auto bound_buffers_iter = bound_buffers_.find(target);
if (bound_buffers_iter == bound_buffers_.end()) {
InsertError(GL_INVALID_ENUM, "Invalid buffer target.");
@@ -4016,8 +4009,8 @@ error::Error GLES2DecoderPassthroughImpl::DoResizeCHROMIUM(
static_assert(sizeof(GLuint) >= sizeof(int), "Unexpected GLuint size.");
static const GLuint kMaxDimension =
static_cast<GLuint>(std::numeric_limits<int>::max());
- gfx::Size safe_size(base::ClampToRange(width, 1U, kMaxDimension),
- base::ClampToRange(height, 1U, kMaxDimension));
+ gfx::Size safe_size(base::clamp(width, 1U, kMaxDimension),
+ base::clamp(height, 1U, kMaxDimension));
if (offscreen_) {
if (!ResizeOffscreenFramebuffer(safe_size)) {
LOG(ERROR) << "GLES2DecoderPassthroughImpl: Context lost because "
@@ -4479,7 +4472,9 @@ error::Error GLES2DecoderPassthroughImpl::DoPostSubBufferCHROMIUM(
client()->OnSwapBuffers(swap_id, flags);
if (surface_->SupportsAsyncSwap()) {
- TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", swap_id);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+ "gpu", "AsyncSwapBuffers",
+ TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", swap_id));
surface_->PostSubBufferAsync(
x, y, width, height,
base::BindOnce(
@@ -4505,7 +4500,8 @@ error::Error GLES2DecoderPassthroughImpl::DoCopyTextureCHROMIUM(
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
- ScopedEnableTextureRectangleInShaderCompiler enable(this);
+ gl::ScopedEnableTextureRectangleInShaderCompiler enable(
+ feature_info_->IsWebGLContext() ? api() : nullptr);
BindPendingImageForClientIDIfNeeded(source_id);
api()->glCopyTextureCHROMIUMFn(
GetTextureServiceID(api(), source_id, resources_, false), source_level,
@@ -4533,7 +4529,8 @@ error::Error GLES2DecoderPassthroughImpl::DoCopySubTextureCHROMIUM(
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
- ScopedEnableTextureRectangleInShaderCompiler enable(this);
+ gl::ScopedEnableTextureRectangleInShaderCompiler enable(
+ feature_info_->IsWebGLContext() ? api() : nullptr);
BindPendingImageForClientIDIfNeeded(source_id);
api()->glCopySubTextureCHROMIUMFn(
GetTextureServiceID(api(), source_id, resources_, false), source_level,
@@ -4761,7 +4758,16 @@ error::Error GLES2DecoderPassthroughImpl::DoDescheduleUntilFinishedCHROMIUM() {
if (!gl::GLFence::IsSupported()) {
return error::kNoError;
}
- deschedule_until_finished_fences_.push_back(gl::GLFence::Create());
+
+ auto fence = gl::GLFence::Create();
+ if (!fence) {
+ InsertError(GL_INVALID_OPERATION, "gl::GLFence::Create() failed.");
+ MarkContextLost(error::kUnknown);
+ group_->LoseContexts(error::kUnknown);
+ return error::kLostContext;
+ }
+
+ deschedule_until_finished_fences_.push_back(std::move(fence));
if (deschedule_until_finished_fences_.size() == 1) {
return error::kNoError;
@@ -4774,8 +4780,9 @@ error::Error GLES2DecoderPassthroughImpl::DoDescheduleUntilFinishedCHROMIUM() {
return error::kNoError;
}
- TRACE_EVENT_ASYNC_BEGIN0(
- "cc", "GLES2DecoderPassthroughImpl::DescheduleUntilFinished", this);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+ "cc", "GLES2DecoderPassthroughImpl::DescheduleUntilFinished",
+ TRACE_ID_LOCAL(this));
client()->OnDescheduleUntilFinished();
return error::kDeferLaterCommands;
}
@@ -4851,7 +4858,7 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleOverlayPlaneCHROMIUM(
plane_z_order, transform, image,
gfx::Rect(bounds_x, bounds_y, bounds_width, bounds_height),
gfx::RectF(uv_x, uv_y, uv_width, uv_height), enable_blend,
- std::move(gpu_fence))) {
+ /*damage_rect=*/gfx::Rect(), std::move(gpu_fence))) {
InsertError(GL_INVALID_OPERATION, "failed to schedule overlay");
return error::kNoError;
}
@@ -5010,7 +5017,8 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerCHROMIUM(
return error::kNoError;
}
- ui::DCRendererLayerParams params;
+ std::unique_ptr<ui::DCRendererLayerParams> params =
+ std::make_unique<ui::DCRendererLayerParams>();
GLuint texture_ids[] = {texture_0, texture_1};
size_t i = 0;
for (GLuint texture_id : texture_ids) {
@@ -5030,22 +5038,22 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerCHROMIUM(
InsertError(GL_INVALID_VALUE, "unsupported texture format");
return error::kNoError;
}
- params.images[i++] = scoped_refptr<gl::GLImage>(image);
+ params->images[i++] = scoped_refptr<gl::GLImage>(image);
}
- params.z_order = z_order;
- params.content_rect =
+ params->z_order = z_order;
+ params->content_rect =
gfx::Rect(content_x, content_y, content_width, content_height);
- params.quad_rect = gfx::Rect(quad_x, quad_y, quad_width, quad_height);
- params.transform =
+ params->quad_rect = gfx::Rect(quad_x, quad_y, quad_width, quad_height);
+ params->transform =
gfx::Transform(transform_c1r1, transform_c2r1, transform_c1r2,
transform_c2r2, transform_tx, transform_ty);
if (is_clipped) {
- params.clip_rect = gfx::Rect(clip_x, clip_y, clip_width, clip_height);
+ params->clip_rect = gfx::Rect(clip_x, clip_y, clip_width, clip_height);
}
- params.protected_video_type =
+ params->protected_video_type =
static_cast<gfx::ProtectedVideoType>(protected_video_type);
- if (!surface_->ScheduleDCLayer(params))
+ if (!surface_->ScheduleDCLayer(std::move(params)))
InsertError(GL_INVALID_OPERATION, "failed to schedule DCLayer");
return error::kNoError;
@@ -5064,7 +5072,9 @@ error::Error GLES2DecoderPassthroughImpl::DoCommitOverlayPlanesCHROMIUM(
client()->OnSwapBuffers(swap_id, flags);
if (surface_->SupportsAsyncSwap()) {
- TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", swap_id);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+ "gpu", "AsyncSwapBuffers",
+ TRACE_ID_WITH_SCOPE("AsyncSwapBuffers", swap_id));
surface_->CommitOverlayPlanesAsync(
base::BindOnce(
&GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index 4f8d6237900..baa0e9cb420 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -9,7 +9,7 @@
#include "base/bind.h"
#include "base/command_line.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
index de2a399524d..2c9b7702ed0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
@@ -7,7 +7,7 @@
#include <stddef.h>
#include "base/command_line.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
index 906f0624703..e7f6c72cab2 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -13,7 +13,7 @@
#include <vector>
#include "base/command_line.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
@@ -1556,6 +1556,14 @@ void GLES2DecoderTestBase::DoCopyTexImage2D(
GLsizei width,
GLsizei height,
GLint border) {
+ GLenum translated_internal_format = internal_format;
+ if (group_->feature_info()->IsWebGL2OrES3Context()) {
+ if (internal_format == GL_RGB) {
+ translated_internal_format = GL_RGB8;
+ } else if (internal_format == GL_RGBA) {
+ translated_internal_format = GL_RGBA8;
+ }
+ }
// For GL_BGRA_EXT, we have to fall back to TexImage2D and
// CopyTexSubImage2D, since GL_BGRA_EXT is not accepted by CopyTexImage2D.
// In some cases this fallback further triggers set and restore of
@@ -1588,14 +1596,15 @@ void GLES2DecoderTestBase::DoCopyTexImage2D(
EXPECT_CALL(*gl_, TexParameteri(target, GL_TEXTURE_SWIZZLE_A, _))
.Times(testing::AtLeast(1));
} else {
- EXPECT_CALL(*gl_, CopyTexImage2D(target, level, internal_format, 0, 0,
- width, height, border))
+ EXPECT_CALL(
+ *gl_, CopyTexImage2D(target, level, translated_internal_format, 0, 0,
+ width, height, border))
.Times(1)
.RetiresOnSaturation();
}
} else {
- EXPECT_CALL(*gl_, CopyTexImage2D(target, level, internal_format, 0, 0,
- width, height, border))
+ EXPECT_CALL(*gl_, CopyTexImage2D(target, level, translated_internal_format,
+ 0, 0, width, height, border))
.Times(1)
.RetiresOnSaturation();
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
index 2599c4f8d1f..6d3f2f24b7a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
@@ -8,7 +8,6 @@
#include <stdint.h>
#include "base/command_line.h"
-#include "base/stl_util.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
index 04ecdd606c9..b2c6a075691 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -11,8 +11,7 @@
#include <memory>
#include "base/command_line.h"
-#include "base/numerics/ranges.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -624,8 +623,8 @@ void GLES2DecoderTest::CheckReadPixelsOutOfRange(GLint in_read_x,
// is requesting a larger size.
GLint read_x = std::max(0, in_read_x);
GLint read_y = std::max(0, in_read_y);
- GLint read_end_x = base::ClampToRange(in_read_x + in_read_width, 0, kWidth);
- GLint read_end_y = base::ClampToRange(in_read_y + in_read_height, 0, kHeight);
+ GLint read_end_x = base::clamp(in_read_x + in_read_width, 0, kWidth);
+ GLint read_end_y = base::clamp(in_read_y + in_read_height, 0, kHeight);
GLint read_width = read_end_x - read_x;
GLint read_height = read_end_y - read_y;
if (read_width > 0 && read_height > 0) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
index 6ec6b2facfa..f0e63754668 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
@@ -8,7 +8,7 @@
#include <stdint.h>
#include "base/command_line.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index 1e3c9b64ccb..197ce21b7ab 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -8,7 +8,7 @@
#include <stdint.h>
#include "base/command_line.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "base/strings/string_number_conversions.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.cc b/chromium/gpu/command_buffer/service/gpu_switches.cc
index a12e9533485..4ae6ba69df5 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.cc
+++ b/chromium/gpu/command_buffer/service/gpu_switches.cc
@@ -65,10 +65,6 @@ const char kEnableThreadedTextureMailboxes[] =
// compilation info logs.
const char kGLShaderIntermOutput[] = "gl-shader-interm-output";
-// Emulate ESSL lowp and mediump float precisions by mutating the shaders to
-// round intermediate values in ANGLE.
-const char kEmulateShaderPrecision[] = "emulate-shader-precision";
-
// Enable Vulkan support and select Vulkan implementation, must also have
// ENABLE_VULKAN defined. This only initializes Vulkan, the flag
// --enable-features=Vulkan must also be used to select Vulkan for compositing
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.h b/chromium/gpu/command_buffer/service/gpu_switches.h
index b3b7d999b73..e87ef8020bb 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.h
+++ b/chromium/gpu/command_buffer/service/gpu_switches.h
@@ -29,7 +29,6 @@ GPU_EXPORT extern const char kGpuProgramCacheSizeKb[];
GPU_EXPORT extern const char kDisableGpuShaderDiskCache[];
GPU_EXPORT extern const char kEnableThreadedTextureMailboxes[];
GPU_EXPORT extern const char kGLShaderIntermOutput[];
-GPU_EXPORT extern const char kEmulateShaderPrecision[];
GPU_EXPORT extern const char kUseVulkan[];
GPU_EXPORT extern const char kVulkanImplementationNameNative[];
GPU_EXPORT extern const char kVulkanImplementationNameSwiftshader[];
diff --git a/chromium/gpu/command_buffer/service/gpu_tracer.cc b/chromium/gpu/command_buffer/service/gpu_tracer.cc
index f6c29308767..7ee96541765 100644
--- a/chromium/gpu/command_buffer/service/gpu_tracer.cc
+++ b/chromium/gpu/command_buffer/service/gpu_tracer.cc
@@ -8,9 +8,9 @@
#include <stdint.h>
#include "base/bind.h"
+#include "base/cxx17_backports.h"
#include "base/location.h"
#include "base/single_thread_task_runner.h"
-#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller.cc b/chromium/gpu/command_buffer/service/gr_cache_controller.cc
index ee21020a3ef..a5db158a6d0 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller.cc
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller.cc
@@ -7,6 +7,7 @@
#include <chrono>
#include "base/bind.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
@@ -14,6 +15,10 @@
namespace gpu {
namespace raster {
+GrCacheController::GrCacheController(SharedContextState* context_state)
+ : context_state_(context_state),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()) {}
+
GrCacheController::GrCacheController(
SharedContextState* context_state,
scoped_refptr<base::SingleThreadTaskRunner> task_runner)
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller.h b/chromium/gpu/command_buffer/service/gr_cache_controller.h
index d207a923ec8..f2188676d6b 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller.h
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller.h
@@ -14,6 +14,7 @@ namespace gpu {
class SharedContextState;
namespace raster {
+class GrCacheControllerTest;
// Manages clearing the GrContext cache after a period of inactivity.
// TODO(khushalsagar): This class replicates the ContextCacheController used in
@@ -21,14 +22,18 @@ namespace raster {
// gpu::Scheduler, since it can better identify when we are in an idle state.
class GPU_GLES2_EXPORT GrCacheController {
public:
- GrCacheController(SharedContextState* context_state,
- scoped_refptr<base::SingleThreadTaskRunner> task_runner);
+ explicit GrCacheController(SharedContextState* context_state);
~GrCacheController();
// Called to schedule purging the GrCache after a period of inactivity.
void ScheduleGrContextCleanup();
private:
+ friend class GrCacheControllerTest;
+
+ // Meant to be used by the GrCacheControllerTest.
+ GrCacheController(SharedContextState* context_state,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner);
void PurgeGrCache(uint64_t idle_id);
// The |current_idle_id_| is used to avoid continuously posting tasks to clear
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc b/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
index b098f44a4bb..b583392f289 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
@@ -45,8 +45,8 @@ class GrCacheControllerTest : public testing::Test {
base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
context_state_->InitializeGL(GpuPreferences(), std::move(feature_info));
- controller_ =
- std::make_unique<GrCacheController>(context_state_.get(), task_runner_);
+ controller_ = base::WrapUnique(
+ new GrCacheController(context_state_.get(), task_runner_));
}
void TearDown() override {
diff --git a/chromium/gpu/command_buffer/service/gr_shader_cache.cc b/chromium/gpu/command_buffer/service/gr_shader_cache.cc
index d305bfce53a..5a9912ef3a7 100644
--- a/chromium/gpu/command_buffer/service/gr_shader_cache.cc
+++ b/chromium/gpu/command_buffer/service/gr_shader_cache.cc
@@ -67,7 +67,8 @@ GrShaderCache::~GrShaderCache() {
sk_sp<SkData> GrShaderCache::load(const SkData& key) {
TRACE_EVENT0("gpu", "GrShaderCache::load");
- DCHECK_NE(current_client_id_, kInvalidClientId);
+ base::AutoLock auto_lock(lock_);
+ DCHECK_NE(current_client_id(), kInvalidClientId);
CacheKey cache_key(SkData::MakeWithoutCopy(key.data(), key.size()));
auto it = store_.Get(cache_key);
@@ -103,7 +104,8 @@ sk_sp<SkData> GrShaderCache::load(const SkData& key) {
void GrShaderCache::store(const SkData& key, const SkData& data) {
TRACE_EVENT0("gpu", "GrShaderCache::store");
- DCHECK_NE(current_client_id_, kInvalidClientId);
+ base::AutoLock auto_lock(lock_);
+ DCHECK_NE(current_client_id(), kInvalidClientId);
CacheKey cache_key(SkData::MakeWithCopy(key.data(), key.size()));
if (IsVkPipelineCacheEntry(cache_key)) {
@@ -137,6 +139,7 @@ void GrShaderCache::store(const SkData& key, const SkData& data) {
void GrShaderCache::PopulateCache(const std::string& key,
const std::string& data) {
TRACE_EVENT0("gpu", "GrShaderCache::PopulateCache");
+ base::AutoLock auto_lock(lock_);
std::string decoded_key;
base::Base64Decode(key, &decoded_key);
@@ -174,6 +177,7 @@ void GrShaderCache::PopulateCache(const std::string& key,
GrShaderCache::Store::iterator GrShaderCache::AddToCache(CacheKey key,
CacheData data) {
+ lock_.AssertAcquired();
auto it = store_.Put(key, std::move(data));
curr_size_bytes_ += it->second.data->size();
return it;
@@ -181,6 +185,7 @@ GrShaderCache::Store::iterator GrShaderCache::AddToCache(CacheKey key,
template <typename Iterator>
void GrShaderCache::EraseFromCache(Iterator it, bool overwriting) {
+ lock_.AssertAcquired();
DCHECK_GE(curr_size_bytes_, it->second.data->size());
if (it->second.prefetched_but_not_read && IsVkPipelineCacheEntry(it->first)) {
@@ -195,11 +200,13 @@ void GrShaderCache::EraseFromCache(Iterator it, bool overwriting) {
}
void GrShaderCache::CacheClientIdOnDisk(int32_t client_id) {
+ base::AutoLock auto_lock(lock_);
client_ids_to_cache_on_disk_.insert(client_id);
}
void GrShaderCache::PurgeMemory(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
+ base::AutoLock auto_lock(lock_);
size_t original_limit = cache_size_limit_;
switch (memory_pressure_level) {
@@ -219,6 +226,7 @@ void GrShaderCache::PurgeMemory(
bool GrShaderCache::OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) {
+ base::AutoLock auto_lock(lock_);
using base::trace_event::MemoryAllocatorDump;
std::string dump_name =
base::StringPrintf("gpu/gr_shader_cache/cache_0x%" PRIXPTR,
@@ -230,14 +238,25 @@ bool GrShaderCache::OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
return true;
}
+size_t GrShaderCache::num_cache_entries() const {
+ base::AutoLock auto_lock(lock_);
+ return store_.size();
+}
+
+size_t GrShaderCache::curr_size_bytes_for_testing() const {
+ base::AutoLock auto_lock(lock_);
+ return curr_size_bytes_;
+}
+
void GrShaderCache::WriteToDisk(const CacheKey& key, CacheData* data) {
- DCHECK_NE(current_client_id_, kInvalidClientId);
+ lock_.AssertAcquired();
+ DCHECK_NE(current_client_id(), kInvalidClientId);
if (!data->pending_disk_write)
return;
// Only cache the shader on disk if this client id is permitted.
- if (client_ids_to_cache_on_disk_.count(current_client_id_) == 0)
+ if (client_ids_to_cache_on_disk_.count(current_client_id()) == 0)
return;
data->pending_disk_write = false;
@@ -248,6 +267,7 @@ void GrShaderCache::WriteToDisk(const CacheKey& key, CacheData* data) {
}
void GrShaderCache::EnforceLimits(size_t size_needed) {
+ lock_.AssertAcquired();
DCHECK_LE(size_needed, cache_size_limit_);
while (size_needed + curr_size_bytes_ > cache_size_limit_)
@@ -255,7 +275,22 @@ void GrShaderCache::EnforceLimits(size_t size_needed) {
}
void GrShaderCache::StoreVkPipelineCacheIfNeeded(GrDirectContext* gr_context) {
- if (enable_vk_pipeline_cache_ && need_store_pipeline_cache_) {
+ // This method must be called only by one gpu thread which is gpu main
+ // thread. Calling it from multiple gpu threads and hence multiple context is
+ // redundant and expensive since each GrContext will have same key. Hence
+ // adding a DCHECK here.
+ // TODO(vikassoni) : https://crbug.com/1211085. Ensure that we call this
+ // method from only one gpu thread when multiple gpu threads aka dr-dc is
+ // implemented.
+ DCHECK_CALLED_ON_VALID_THREAD(gpu_main_thread_checker_);
+
+ bool need_store_pipeline_cache = false;
+ {
+ base::AutoLock auto_lock(lock_);
+ need_store_pipeline_cache = need_store_pipeline_cache_;
+ }
+
+ if (enable_vk_pipeline_cache_ && need_store_pipeline_cache) {
{
base::ScopedClosureRunner uma_runner(base::BindOnce(
[](base::Time time) {
@@ -265,10 +300,13 @@ void GrShaderCache::StoreVkPipelineCacheIfNeeded(GrDirectContext* gr_context) {
base::TimeDelta::FromMicroseconds(5000), 50);
},
base::Time::Now()));
+
gr_context->storeVkPipelineCacheData();
+ {
+ base::AutoLock auto_lock(lock_);
+ need_store_pipeline_cache_ = false;
+ }
}
-
- need_store_pipeline_cache_ = false;
}
}
@@ -279,17 +317,26 @@ bool GrShaderCache::IsVkPipelineCacheEntry(const CacheKey& key) {
return key.data->size() == 4;
}
+int32_t GrShaderCache::current_client_id() const {
+ lock_.AssertAcquired();
+ auto it = current_client_id_.find(base::PlatformThread::CurrentId());
+ if (it != current_client_id_.end())
+ return it->second;
+ return kInvalidClientId;
+}
+
GrShaderCache::ScopedCacheUse::ScopedCacheUse(GrShaderCache* cache,
int32_t client_id)
: cache_(cache) {
- DCHECK_EQ(cache_->current_client_id_, kInvalidClientId);
+ base::AutoLock auto_lock(cache_->lock_);
+ DCHECK_EQ(cache_->current_client_id(), kInvalidClientId);
DCHECK_NE(client_id, kInvalidClientId);
-
- cache_->current_client_id_ = client_id;
+ cache_->current_client_id_[base::PlatformThread::CurrentId()] = client_id;
}
GrShaderCache::ScopedCacheUse::~ScopedCacheUse() {
- cache_->current_client_id_ = kInvalidClientId;
+ base::AutoLock auto_lock(cache_->lock_);
+ cache_->current_client_id_.erase(base::PlatformThread::CurrentId());
}
GrShaderCache::CacheKey::CacheKey(sk_sp<SkData> data) : data(std::move(data)) {
diff --git a/chromium/gpu/command_buffer/service/gr_shader_cache.h b/chromium/gpu/command_buffer/service/gr_shader_cache.h
index b2b38e6d944..70b73d78675 100644
--- a/chromium/gpu/command_buffer/service/gr_shader_cache.h
+++ b/chromium/gpu/command_buffer/service/gr_shader_cache.h
@@ -5,12 +5,15 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GR_SHADER_CACHE_H_
#define GPU_COMMAND_BUFFER_SERVICE_GR_SHADER_CACHE_H_
+#include "base/containers/flat_map.h"
#include "base/containers/flat_set.h"
#include "base/containers/mru_cache.h"
#include "base/hash/hash.h"
#include "base/memory/memory_pressure_listener.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
#include "base/trace_event/memory_dump_provider.h"
-#include "gpu/gpu_gles2_export.h"
+#include "gpu/raster_export.h"
#include "third_party/skia/include/gpu/GrContextOptions.h"
class GrDirectContext;
@@ -18,11 +21,11 @@ class GrDirectContext;
namespace gpu {
namespace raster {
-class GPU_GLES2_EXPORT GrShaderCache
+class RASTER_EXPORT GrShaderCache
: public GrContextOptions::PersistentCache,
public base::trace_event::MemoryDumpProvider {
public:
- class GPU_GLES2_EXPORT Client {
+ class RASTER_EXPORT Client {
public:
virtual ~Client() {}
@@ -30,7 +33,7 @@ class GPU_GLES2_EXPORT GrShaderCache
const std::string& shader) = 0;
};
- class GPU_GLES2_EXPORT ScopedCacheUse {
+ class RASTER_EXPORT ScopedCacheUse {
public:
ScopedCacheUse(GrShaderCache* cache, int32_t client_id);
~ScopedCacheUse();
@@ -55,8 +58,8 @@ class GPU_GLES2_EXPORT GrShaderCache
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
- size_t num_cache_entries() const { return store_.size(); }
- size_t curr_size_bytes_for_testing() const { return curr_size_bytes_; }
+ size_t num_cache_entries() const;
+ size_t curr_size_bytes_for_testing() const;
void StoreVkPipelineCacheIfNeeded(GrDirectContext* gr_context);
@@ -112,18 +115,26 @@ class GPU_GLES2_EXPORT GrShaderCache
bool IsVkPipelineCacheEntry(const CacheKey& key);
- size_t cache_size_limit_;
- size_t curr_size_bytes_ = 0u;
- Store store_;
+ int32_t current_client_id() const;
- Client* const client_;
- base::flat_set<int32_t> client_ids_to_cache_on_disk_;
+ mutable base::Lock lock_;
+ size_t cache_size_limit_ GUARDED_BY(lock_) = 0u;
+ size_t curr_size_bytes_ GUARDED_BY(lock_) = 0u;
+ Store store_ GUARDED_BY(lock_);
+ Client* const client_ GUARDED_BY(lock_);
+ base::flat_set<int32_t> client_ids_to_cache_on_disk_ GUARDED_BY(lock_);
- int32_t current_client_id_ = kInvalidClientId;
-
- bool need_store_pipeline_cache_ = false;
+ // Multiple threads and hence multiple clients can be accessing the shader
+ // cache at the same time. Hence use per thread |current_client_id_|.
+ base::flat_map<base::PlatformThreadId, int32_t> current_client_id_
+ GUARDED_BY(lock_);
+ bool need_store_pipeline_cache_ GUARDED_BY(lock_) = false;
const bool enable_vk_pipeline_cache_;
+ // Bound to the thread on which GrShaderCache is created. Some methods can
+ // only be called on this thread. GrShaderCache is created on gpu main thread.
+ THREAD_CHECKER(gpu_main_thread_checker_);
+
DISALLOW_COPY_AND_ASSIGN(GrShaderCache);
};
diff --git a/chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc b/chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc
index 68b15106256..cc9a8fabc75 100644
--- a/chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gr_shader_cache_unittest.cc
@@ -4,6 +4,8 @@
#include "gpu/command_buffer/service/gr_shader_cache.h"
+#include <thread>
+
#include "base/base64.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -183,5 +185,40 @@ TEST_F(GrShaderCacheTest, PopulateFromDiskAfterStoring) {
EXPECT_EQ(disk_cache_.size(), 1u);
}
+// This test creates GrShaderCache::ScopedCacheUse object from 2 different
+// thread which exists together. In a non thread safe GrShaderCache, this will
+// hit DCHECKS in ScopedCacheUse::ScopedCacheUse() since the current_client_id
+// already exists from 1st object. In a thread safe model, it will not hit
+// DCHECKS.
+TEST_F(GrShaderCacheTest, MultipleThreadsUsingSameCache) {
+ int32_t regular_client_id = 3;
+ int32_t new_client_id = 4;
+ cache_.CacheClientIdOnDisk(regular_client_id);
+
+ auto key = SkData::MakeWithCopy(kShaderKey, strlen(kShaderKey));
+ auto shader = SkData::MakeWithCString(kShader);
+
+ GrShaderCache::ScopedCacheUse cache_use1(&cache_, regular_client_id);
+ EXPECT_EQ(cache_.load(*key), nullptr);
+ cache_.store(*key, *shader);
+
+ EXPECT_EQ(cache_.num_cache_entries(), 1u);
+ EXPECT_EQ(cache_.curr_size_bytes_for_testing(), shader->size());
+
+ // Different client id to use cache on a different thread.
+ std::thread second_thread([&]() {
+ auto key2 = SkData::MakeWithCString("key2");
+ GrShaderCache::ScopedCacheUse cache_use2(&cache_, new_client_id);
+ EXPECT_EQ(cache_.load(*key2), nullptr);
+
+ // Store same shader on a different key.
+ cache_.store(*key2, *shader);
+ });
+ second_thread.join();
+
+ EXPECT_EQ(cache_.num_cache_entries(), 2u);
+ EXPECT_EQ(cache_.curr_size_bytes_for_testing(), 2 * shader->size());
+}
+
} // namespace raster
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
index 72b944d626f..efc14dd3649 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
@@ -68,6 +68,17 @@ uint32_t NumRequiredMaxImages(TextureOwner::Mode mode) {
DCHECK(!features::LimitAImageReaderMaxSizeToOne());
if (features::IncreaseBufferCountForHighFrameRate())
return 5;
+
+ // WebView overlays relies on WebView zero copy at the moment, which
+ // requires at least 3 buffers (one renderer prepares, one is locked by
+ // display compositor in latest compositor frame and one is pending
+ // deletion). These are additional to normal 3 that we need to surface
+ // control.
+ // TODO(vasilyt): This needs to be resolved before feature launch, but
+ // should work for dogfoog.
+ if (features::IncreaseBufferCountForWebViewOverlays())
+ return 6;
+
return 3;
}
return features::LimitAImageReaderMaxSizeToOne() ? 1 : 2;
@@ -93,7 +104,7 @@ class ImageReaderGLOwner::ScopedHardwareBufferImpl
image_(image),
task_runner_(base::ThreadTaskRunnerHandle::Get()) {
DCHECK(image_);
- texture_owner_->RegisterRefOnImage(image_);
+ texture_owner_->RegisterRefOnImageLocked(image_);
}
~ScopedHardwareBufferImpl() override {
@@ -190,7 +201,7 @@ ImageReaderGLOwner::ImageReaderGLOwner(
}
ImageReaderGLOwner::~ImageReaderGLOwner() {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_THREAD(gpu_main_thread_checker_);
// Call ReleaseResources() if it hasn't already. This will do nothing if the
// texture and other resources has already been destroyed due to context loss.
@@ -200,6 +211,8 @@ ImageReaderGLOwner::~ImageReaderGLOwner() {
}
void ImageReaderGLOwner::ReleaseResources() {
+ DCHECK_CALLED_ON_VALID_THREAD(gpu_main_thread_checker_);
+ base::AutoLock auto_lock(lock_);
// Either TextureOwner is being destroyed or the TextureOwner's shared context
// is lost. Cleanup is it hasn't already.
if (image_reader_) {
@@ -228,6 +241,8 @@ void ImageReaderGLOwner::SetFrameAvailableCallback(
}
gl::ScopedJavaSurface ImageReaderGLOwner::CreateJavaSurface() const {
+ base::AutoLock auto_lock(lock_);
+
// If we've already lost the texture, then do nothing.
if (!image_reader_) {
DLOG(ERROR) << "Already lost texture / image reader";
@@ -252,7 +267,7 @@ gl::ScopedJavaSurface ImageReaderGLOwner::CreateJavaSurface() const {
}
void ImageReaderGLOwner::UpdateTexImage() {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ base::AutoLock auto_lock(lock_);
// If we've lost the texture, then do nothing.
if (!texture())
@@ -318,17 +333,22 @@ void ImageReaderGLOwner::UpdateTexImage() {
return;
}
+ UMA_HISTOGRAM_BOOLEAN("Media.AImageReaderGLOwner.HasFence",
+ scoped_acquire_fence_fd.is_valid());
+
// Make the newly acquired image as current image.
current_image_ref_.emplace(this, image, std::move(scoped_acquire_fence_fd));
}
void ImageReaderGLOwner::EnsureTexImageBound(GLuint service_id) {
+ base::AutoLock auto_lock(lock_);
if (current_image_ref_)
current_image_ref_->EnsureBound(service_id);
}
std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
ImageReaderGLOwner::GetAHardwareBuffer() {
+ base::AutoLock auto_lock(lock_);
if (!current_image_ref_)
return nullptr;
@@ -348,7 +368,9 @@ ImageReaderGLOwner::GetAHardwareBuffer() {
current_image_ref_->GetReadyFence());
}
-gfx::Rect ImageReaderGLOwner::GetCropRect() {
+gfx::Rect ImageReaderGLOwner::GetCropRectLocked() {
+ DCHECK_CALLED_ON_VALID_THREAD(gpu_main_thread_checker_);
+ lock_.AssertAcquired();
if (!current_image_ref_)
return gfx::Rect();
@@ -369,7 +391,8 @@ gfx::Rect ImageReaderGLOwner::GetCropRect() {
crop_rect.bottom - crop_rect.top);
}
-void ImageReaderGLOwner::RegisterRefOnImage(AImage* image) {
+void ImageReaderGLOwner::RegisterRefOnImageLocked(AImage* image) {
+ lock_.AssertAcquired();
DCHECK(image_reader_);
// Add a ref that the caller will release.
@@ -378,6 +401,13 @@ void ImageReaderGLOwner::RegisterRefOnImage(AImage* image) {
void ImageReaderGLOwner::ReleaseRefOnImage(AImage* image,
base::ScopedFD fence_fd) {
+ base::AutoLock auto_lock(lock_);
+ ReleaseRefOnImageLocked(image, std::move(fence_fd));
+}
+
+void ImageReaderGLOwner::ReleaseRefOnImageLocked(AImage* image,
+ base::ScopedFD fence_fd) {
+ lock_.AssertAcquired();
// During cleanup on losing the texture, all images are synchronously released
// and the |image_reader_| is destroyed.
if (!image_reader_)
@@ -404,22 +434,38 @@ void ImageReaderGLOwner::ReleaseRefOnImage(AImage* image,
image_refs_.erase(it);
DCHECK_GT(max_images_, static_cast<int32_t>(image_refs_.size()));
- if (buffer_available_cb_)
- std::move(buffer_available_cb_).Run();
+ auto buffer_available_cb = std::move(buffer_available_cb_);
+
+ {
+ // |buffer_available_cb| will try to acquire lock again via
+ // UpdatetexImage(), hence we need to unlock here. Note that when
+ // |max_images_| is 1, this callback will always be empty here since it will
+ // be run immediately in RunWhenBufferIsAvailable(). Hence resetting
+ // |current_image_ref_| in UpdateTexImage() can not trigger this callback.
+ // Otherwise triggering this callback from UpdateTexImage() on
+ // |current_image_ref_| reset would cause callback and hence FrameInfoHelper
+ // to run and eventually call UpdateTexImage() from there which could have
+ // been filmsy.
+ base::AutoUnlock auto_unlock(lock_);
+ if (buffer_available_cb) {
+ DCHECK_GT(max_images_, 1);
+ std::move(buffer_available_cb).Run();
+ }
+ }
}
void ImageReaderGLOwner::ReleaseBackBuffers() {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_THREAD(gpu_main_thread_checker_);
// ReleaseBackBuffers() call is not required with image reader.
}
gl::GLContext* ImageReaderGLOwner::GetContext() const {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_THREAD(gpu_main_thread_checker_);
return context_.get();
}
gl::GLSurface* ImageReaderGLOwner::GetSurface() const {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_CALLED_ON_VALID_THREAD(gpu_main_thread_checker_);
return surface_.get();
}
@@ -434,23 +480,43 @@ void ImageReaderGLOwner::OnFrameAvailable(void* context, AImageReader* reader) {
}
void ImageReaderGLOwner::RunWhenBufferIsAvailable(base::OnceClosure callback) {
- // Note that we handle only one simultaneous request, this is not issue
- // because FrameInfoHelper maintain request queue and has only single
- // outstanding request on GPU thread.
- DCHECK(!buffer_available_cb_);
- // If `max_images` == 1 we will drop it before acquiring new buffer. Note that
- // this must never happen with SurfaceControl and the ImageReaderGLOwner is
- // the sole owner of the images.
- if (max_images_ == 1 || static_cast<int>(image_refs_.size()) < max_images_)
+ DCHECK_CALLED_ON_VALID_THREAD(gpu_main_thread_checker_);
+ int image_refs_size = 0;
+ {
+ base::AutoLock auto_lock(lock_);
+ // Note that we handle only one simultaneous request, this is not issue
+ // because FrameInfoHelper maintain request queue and has only single
+ // outstanding request on GPU thread.
+ DCHECK(!buffer_available_cb_);
+ image_refs_size = static_cast<int>(image_refs_.size());
+ }
+ // If `max_images` == 1 we will drop it before acquiring new buffer. Note
+ // that this must never happen with SurfaceControl and the
+ // ImageReaderGLOwner is the sole owner of the images.
+ if (max_images_ == 1 || image_refs_size < max_images_) {
+ // This callback is run from here as well as from ReleaseRefOnImage() where
+ // we remove one image from image reader queue before callback is run.
+ // Once the |lock_| is dropped in this method here, another thread can
+ // UpdateTexImage() before callback is run and hence cause the image reader
+ // queue to become full. In that case callback will not be able to render
+ // and acquire updated image and hence will use FrameInfo of the previous
+ // image which will result in wrong coded size for all future frames. To
+ // avoid, this no other threads should try to UpdateTexImage() when this
+ // callback is run. lock held by the caller (GetFrameInfo()) of this
+ // method ensures that this never happens.
std::move(callback).Run();
- else
+ } else {
+ base::AutoLock auto_lock(lock_);
buffer_available_cb_ = std::move(callback);
+ }
}
bool ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) {
+ DCHECK_CALLED_ON_VALID_THREAD(gpu_main_thread_checker_);
+ base::AutoLock auto_lock(lock_);
DCHECK(visible_rect);
DCHECK(coded_size);
@@ -471,7 +537,7 @@ bool ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
AHardwareBuffer_Desc desc;
base::AndroidHardwareBufferCompat::GetInstance().Describe(buffer, &desc);
- *visible_rect = GetCropRect();
+ *visible_rect = GetCropRectLocked();
*coded_size = gfx::Size(desc.width, desc.height);
return true;
@@ -490,11 +556,14 @@ ImageReaderGLOwner::ScopedCurrentImageRef::ScopedCurrentImageRef(
: texture_owner_(texture_owner),
image_(image),
ready_fence_(std::move(ready_fence)) {
+ DCHECK(texture_owner_);
+ texture_owner_->lock_.AssertAcquired();
DCHECK(image_);
- texture_owner_->RegisterRefOnImage(image_);
+ texture_owner_->RegisterRefOnImageLocked(image_);
}
ImageReaderGLOwner::ScopedCurrentImageRef::~ScopedCurrentImageRef() {
+ texture_owner_->lock_.AssertAcquired();
base::ScopedFD release_fence;
// If there is no |image_reader_|, we are in tear down so no fence is
// required.
@@ -502,7 +571,7 @@ ImageReaderGLOwner::ScopedCurrentImageRef::~ScopedCurrentImageRef() {
release_fence = CreateEglFenceAndExportFd();
else
release_fence = std::move(ready_fence_);
- texture_owner_->ReleaseRefOnImage(image_, std::move(release_fence));
+ texture_owner_->ReleaseRefOnImageLocked(image_, std::move(release_fence));
}
base::ScopedFD ImageReaderGLOwner::ScopedCurrentImageRef::GetReadyFence()
@@ -521,6 +590,11 @@ void ImageReaderGLOwner::ScopedCurrentImageRef::EnsureBound(GLuint service_id) {
if (!InsertEglFenceAndWait(GetReadyFence()))
return;
+ // CreateAndBindEglImage will bind texture with service_id to current unit. We
+ // never should alter gl binding without updating state tracking, which we
+ // can't do here, so restore previous after we done.
+ ScopedRestoreTextureBinding scoped_restore_texture;
+
// Create EGL image from the AImage and bind it to the texture.
if (!CreateAndBindEglImage(image_, service_id, &texture_owner_->loader_))
return;
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.h b/chromium/gpu/command_buffer/service/image_reader_gl_owner.h
index 93104b8c6e4..f86d5d125fa 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.h
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.h
@@ -44,9 +44,15 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) override;
+
+ // This method is never called in MediaPlayer path. Hence removing thread
+ // safety analysis until thread safety is implemented in MCVD path.
void RunWhenBufferIsAvailable(base::OnceClosure callback) override;
- const AImageReader* image_reader_for_testing() const { return image_reader_; }
+ const AImageReader* image_reader_for_testing() const
+ NO_THREAD_SAFETY_ANALYSIS {
+ return image_reader_;
+ }
int32_t max_images_for_testing() const { return max_images_; }
protected:
@@ -87,19 +93,26 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
// Registers and releases a ref on the image. Once the ref-count for an image
// goes to 0, it is released back to the AImageReader with an optional release
// fence if needed.
- void RegisterRefOnImage(AImage* image);
+ void RegisterRefOnImageLocked(AImage* image);
+ void ReleaseRefOnImageLocked(AImage* image, base::ScopedFD fence_fd);
+
+ // This method acquires |lock_| and calls ReleaseRefOnImageLocked().
void ReleaseRefOnImage(AImage* image, base::ScopedFD fence_fd);
- gfx::Rect GetCropRect();
+ gfx::Rect GetCropRectLocked();
static void OnFrameAvailable(void* context, AImageReader* reader);
- // AImageReader instance
- AImageReader* image_reader_;
+ // All members which can be concurrently accessed from multiple threads will
+ // be guarded by |lock_|.
+ mutable base::Lock lock_;
+
+ // AImageReader instance.
+ AImageReader* image_reader_ GUARDED_BY(lock_);
// Most recently acquired image using image reader. This works like a cached
// image until next new image is acquired which overwrites this.
- absl::optional<ScopedCurrentImageRef> current_image_ref_;
+ absl::optional<ScopedCurrentImageRef> current_image_ref_ GUARDED_BY(lock_);
std::unique_ptr<AImageReader_ImageListener> listener_;
// A map consisting of pending refs on an AImage. If an image has any refs, it
@@ -117,7 +130,7 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
DISALLOW_COPY_AND_ASSIGN(ImageRef);
};
using AImageRefMap = base::flat_map<AImage*, ImageRef>;
- AImageRefMap image_refs_;
+ AImageRefMap image_refs_ GUARDED_BY(lock_);
// reference to the class instance which is used to dynamically
// load the functions in android libraries at runtime.
@@ -134,9 +147,10 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
base::RepeatingClosure frame_available_cb_;
// Runs when free buffer is available.
- base::OnceClosure buffer_available_cb_;
+ base::OnceClosure buffer_available_cb_ GUARDED_BY(lock_);
- THREAD_CHECKER(thread_checker_);
+ // This class is created on gpu main thread.
+ THREAD_CHECKER(gpu_main_thread_checker_);
base::WeakPtrFactory<ImageReaderGLOwner> weak_factory_{this};
diff --git a/chromium/gpu/command_buffer/service/mock_texture_owner.cc b/chromium/gpu/command_buffer/service/mock_texture_owner.cc
index 88c85532a22..1a1a89b9f3f 100644
--- a/chromium/gpu/command_buffer/service/mock_texture_owner.cc
+++ b/chromium/gpu/command_buffer/service/mock_texture_owner.cc
@@ -19,14 +19,10 @@ MockTextureOwner::MockTextureOwner(GLuint fake_texture_id,
: TextureOwner(binds_texture_on_update,
std::make_unique<MockAbstractTexture>(fake_texture_id)),
fake_context(fake_context),
- fake_surface(fake_surface),
- expect_update_tex_image(!binds_texture_on_update) {
+ fake_surface(fake_surface) {
ON_CALL(*this, GetTextureId()).WillByDefault(Return(fake_texture_id));
ON_CALL(*this, GetContext()).WillByDefault(Return(fake_context));
ON_CALL(*this, GetSurface()).WillByDefault(Return(fake_surface));
- ON_CALL(*this, EnsureTexImageBound(_)).WillByDefault(Invoke([this] {
- CHECK(expect_update_tex_image);
- }));
ON_CALL(*this, RunWhenBufferIsAvailable(_))
.WillByDefault(Invoke([](base::OnceClosure cb) { std::move(cb).Run(); }));
}
diff --git a/chromium/gpu/command_buffer/service/mock_texture_owner.h b/chromium/gpu/command_buffer/service/mock_texture_owner.h
index 49c0023451f..838d0119427 100644
--- a/chromium/gpu/command_buffer/service/mock_texture_owner.h
+++ b/chromium/gpu/command_buffer/service/mock_texture_owner.h
@@ -51,7 +51,6 @@ class MockTextureOwner : public TextureOwner {
gl::GLContext* fake_context;
gl::GLSurface* fake_surface;
int get_a_hardware_buffer_count = 0;
- bool expect_update_tex_image;
protected:
~MockTextureOwner();
diff --git a/chromium/gpu/command_buffer/service/program_cache.cc b/chromium/gpu/command_buffer/service/program_cache.cc
index 50858c6b082..f113d475b22 100644
--- a/chromium/gpu/command_buffer/service/program_cache.cc
+++ b/chromium/gpu/command_buffer/service/program_cache.cc
@@ -11,7 +11,7 @@
#include "base/metrics/histogram_macros.h"
#include "gpu/command_buffer/service/shader_manager.h"
-#include "third_party/angle/src/common/version.h"
+#include "third_party/angle/src/common/angle_version.h"
namespace gpu {
namespace gles2 {
diff --git a/chromium/gpu/command_buffer/service/program_manager.cc b/chromium/gpu/command_buffer/service/program_manager.cc
index e3985f92c20..ae6d27d563e 100644
--- a/chromium/gpu/command_buffer/service/program_manager.cc
+++ b/chromium/gpu/command_buffer/service/program_manager.cc
@@ -15,10 +15,10 @@
#include <vector>
#include "base/command_line.h"
+#include "base/cxx17_backports.h"
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_math.h"
-#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/time/time.h"
diff --git a/chromium/gpu/command_buffer/service/program_manager_unittest.cc b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
index f627ad85ba0..696f66a2db0 100644
--- a/chromium/gpu/command_buffer/service/program_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
@@ -11,7 +11,7 @@
#include <memory>
#include "base/command_line.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
diff --git a/chromium/gpu/command_buffer/service/raster_cmd_validation.cc b/chromium/gpu/command_buffer/service/raster_cmd_validation.cc
index ec66e6ba740..676ddcf0dce 100644
--- a/chromium/gpu/command_buffer/service/raster_cmd_validation.cc
+++ b/chromium/gpu/command_buffer/service/raster_cmd_validation.cc
@@ -6,7 +6,7 @@
#include "gpu/command_buffer/service/raster_cmd_validation.h"
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "gpu/command_buffer/service/gl_utils.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/service/raster_cmd_validation_autogen.h b/chromium/gpu/command_buffer/service/raster_cmd_validation_autogen.h
index 0ee7672f99f..0de31567300 100644
--- a/chromium/gpu/command_buffer/service/raster_cmd_validation_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_cmd_validation_autogen.h
@@ -35,6 +35,12 @@ ValueValidator<GLenum> texture_min_filter_mode;
ValueValidator<GLenum> texture_parameter;
ValueValidator<GLenum> texture_wrap_mode;
ValueValidator<gfx::BufferUsage> gfx_buffer_usage;
+class GpuRasterMsaaModeValidator {
+ public:
+ bool IsValid(const gpu::raster::MsaaMode value) const;
+};
+GpuRasterMsaaModeValidator gpu_raster_msaa_mode;
+
ValueValidator<viz::ResourceFormat> viz_resource_format;
#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_CMD_VALIDATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
index db3a13f869c..4c34cae7a30 100644
--- a/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
@@ -71,6 +71,17 @@ static const gfx::BufferUsage valid_gfx_buffer_usage_table[] = {
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
};
+bool Validators::GpuRasterMsaaModeValidator::IsValid(
+ const gpu::raster::MsaaMode value) const {
+ switch (value) {
+ case gpu::raster::MsaaMode::kNoMSAA:
+ case gpu::raster::MsaaMode::kMSAA:
+ case gpu::raster::MsaaMode::kDMSAA:
+ return true;
+ }
+ return false;
+}
+
static const viz::ResourceFormat valid_viz_resource_format_table[] = {
viz::ResourceFormat::RGBA_8888, viz::ResourceFormat::RGBA_4444,
viz::ResourceFormat::BGRA_8888, viz::ResourceFormat::ALPHA_8,
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index 71c42600f8b..ba87b342e82 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -16,12 +16,13 @@
#include "base/bind.h"
#include "base/bits.h"
#include "base/containers/flat_map.h"
+#include "base/cxx17_backports.h"
#include "base/debug/crash_logging.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/numerics/checked_math.h"
-#include "base/stl_util.h"
+#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "cc/paint/paint_cache.h"
@@ -304,6 +305,107 @@ class SharedImageProviderImpl final : public cc::SharedImageProvider {
base::flat_map<gpu::Mailbox, SharedImageReadAccess> read_accessors_;
};
+class RasterCommandsCompletedQuery : public QueryManager::Query {
+ public:
+ RasterCommandsCompletedQuery(
+ scoped_refptr<SharedContextState> shared_context_state,
+ QueryManager* manager,
+ GLenum target,
+ scoped_refptr<gpu::Buffer> buffer,
+ QuerySync* sync)
+ : Query(manager, target, std::move(buffer), sync),
+ shared_context_state_(std::move(shared_context_state)) {}
+
+ // Overridden from QueryManager::Query:
+ void Begin() override {
+ DCHECK(!begin_time_);
+ MarkAsActive();
+ begin_time_.emplace(base::TimeTicks::Now());
+ }
+
+ void End(base::subtle::Atomic32 submit_count) override {
+ DCHECK(begin_time_);
+
+ AddToPendingQueue(submit_count);
+ finished_ = false;
+
+ auto* gr_context = shared_context_state_->gr_context();
+ GrFlushInfo info;
+ info.fFinishedProc = RasterCommandsCompletedQuery::FinishedProc;
+ auto weak_ptr = weak_ptr_factory_.GetWeakPtr();
+ info.fFinishedContext =
+ new base::WeakPtr<RasterCommandsCompletedQuery>(weak_ptr);
+ gr_context->flush(info);
+ }
+
+ void QueryCounter(base::subtle::Atomic32 submit_count) override {
+ NOTREACHED();
+ }
+
+ void Pause() override { MarkAsPaused(); }
+
+ void Resume() override { MarkAsActive(); }
+
+ void Process(bool did_finish) override {
+ DCHECK(begin_time_);
+ if (did_finish || finished_) {
+ const base::TimeDelta elapsed = base::TimeTicks::Now() - *begin_time_;
+ MarkAsCompleted(elapsed.InMicroseconds());
+ begin_time_.reset();
+ }
+ }
+
+ void Destroy(bool have_context) override {
+ if (!IsDeleted())
+ MarkAsDeleted();
+ }
+
+ protected:
+ ~RasterCommandsCompletedQuery() override = default;
+
+ private:
+ static void FinishedProc(void* context) {
+ auto* weak_ptr =
+ reinterpret_cast<base::WeakPtr<RasterCommandsCompletedQuery>*>(context);
+ if (*weak_ptr)
+ (*weak_ptr)->finished_ = true;
+ delete weak_ptr;
+ }
+
+ const scoped_refptr<SharedContextState> shared_context_state_;
+ absl::optional<base::TimeTicks> begin_time_;
+ bool finished_ = false;
+ base::WeakPtrFactory<RasterCommandsCompletedQuery> weak_ptr_factory_{this};
+};
+
+class RasterQueryManager : public QueryManager {
+ public:
+ explicit RasterQueryManager(
+ scoped_refptr<SharedContextState> shared_context_state)
+ : shared_context_state_(std::move(shared_context_state)) {}
+ ~RasterQueryManager() override = default;
+
+ Query* CreateQuery(GLenum target,
+ GLuint client_id,
+ scoped_refptr<gpu::Buffer> buffer,
+ QuerySync* sync) override {
+ if (target == GL_COMMANDS_COMPLETED_CHROMIUM &&
+ shared_context_state_->gr_context()) {
+ auto query = base::MakeRefCounted<RasterCommandsCompletedQuery>(
+ shared_context_state_, this, target, std::move(buffer), sync);
+ std::pair<QueryMap::iterator, bool> result =
+ queries_.insert(std::make_pair(client_id, query));
+ DCHECK(result.second);
+ return query.get();
+ }
+ return QueryManager::CreateQuery(target, client_id, std::move(buffer),
+ sync);
+ }
+
+ private:
+ const scoped_refptr<SharedContextState> shared_context_state_;
+};
+
} // namespace
// RasterDecoderImpl uses two separate state trackers (gpu::gles2::ContextState
@@ -641,6 +743,7 @@ class RasterDecoderImpl final : public RasterDecoder,
void DoBeginRasterCHROMIUM(GLuint sk_color,
GLboolean needs_clear,
GLuint msaa_sample_count,
+ MsaaMode msaa_mode,
GLboolean can_use_lcd_text,
const volatile GLbyte* key);
void DoRasterCHROMIUM(GLuint raster_shm_id,
@@ -761,6 +864,7 @@ class RasterDecoderImpl final : public RasterDecoder,
static const CommandInfo command_info[kNumCommands - kFirstRasterCommand];
const int raster_decoder_id_;
+ const bool disable_legacy_mailbox_;
// Number of commands remaining to be processed in DoCommands().
int commands_to_process_ = 0;
@@ -769,6 +873,7 @@ class RasterDecoderImpl final : public RasterDecoder,
bool supports_oop_raster_ = false;
bool use_passthrough_ = false;
bool use_ddl_ = false;
+ bool use_ddl_in_current_raster_session_ = false;
// The current decoder error communicates the decoder error through command
// processing functions that do not return the error value. Should be set
@@ -786,7 +891,7 @@ class RasterDecoderImpl final : public RasterDecoder,
std::unique_ptr<Validators> validators_;
SharedImageRepresentationFactory shared_image_representation_factory_;
- std::unique_ptr<QueryManager> query_manager_;
+ std::unique_ptr<RasterQueryManager> query_manager_;
gles2::GLES2Util util_;
@@ -921,6 +1026,9 @@ RasterDecoderImpl::RasterDecoderImpl(
bool is_privileged)
: RasterDecoder(client, command_buffer_service, outputter),
raster_decoder_id_(g_raster_decoder_id.GetNext() + 1),
+ disable_legacy_mailbox_(
+ shared_image_manager &&
+ shared_image_manager->display_context_on_another_thread()),
supports_gpu_raster_(
gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] ==
kGpuFeatureStatusEnabled),
@@ -991,7 +1099,7 @@ ContextResult RasterDecoderImpl::Initialize(
CHECK_GL_ERROR();
- query_manager_ = std::make_unique<QueryManager>();
+ query_manager_ = std::make_unique<RasterQueryManager>(shared_context_state_);
if (attrib_helper.enable_oop_rasterization) {
if (!features().chromium_raster_transport) {
@@ -1172,6 +1280,7 @@ Capabilities RasterDecoderImpl::GetCapabilities() {
caps.shared_image_swap_chain =
SharedImageBackingFactoryD3D::IsSwapChainSupported();
#endif // OS_WIN
+ caps.disable_legacy_mailbox = disable_legacy_mailbox_;
return caps;
}
@@ -1821,15 +1930,17 @@ error::Error RasterDecoderImpl::HandleQueryCounterEXT(
}
void RasterDecoderImpl::DoFinish() {
- if (shared_context_state_->GrContextIsGL())
- api()->glFinishFn();
- ProcessPendingQueries(true);
+ if (auto* gr_context = shared_context_state_->gr_context()) {
+ gr_context->flushAndSubmit(/*syncCpu=*/true);
+ }
+ ProcessPendingQueries(/*did_finish=*/true);
}
void RasterDecoderImpl::DoFlush() {
- if (shared_context_state_->GrContextIsGL())
- api()->glFlushFn();
- ProcessPendingQueries(false);
+ if (auto* gr_context = shared_context_state_->gr_context()) {
+ gr_context->flushAndSubmit(/*syncCpu=*/false);
+ }
+ ProcessPendingQueries(/*did_finish=*/false);
}
bool RasterDecoderImpl::GenQueriesEXTHelper(GLsizei n,
@@ -2715,33 +2826,6 @@ void RasterDecoderImpl::DoReadbackARGBImagePixelsINTERNAL(
return;
}
- std::vector<GrBackendSemaphore> begin_semaphores;
-
- std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
- source_scoped_access = source_shared_image->BeginScopedReadAccess(
- &begin_semaphores, nullptr);
-
- if (!begin_semaphores.empty()) {
- bool result = shared_context_state_->gr_context()->wait(
- begin_semaphores.size(), begin_semaphores.data(),
- /*deleteSemaphoresAfterWait=*/false);
- DCHECK(result);
- }
-
- if (!source_scoped_access) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glReadbackImagePixels",
- "Source shared image is not accessible");
- return;
- }
-
- auto sk_image =
- source_scoped_access->CreateSkImage(shared_context_state_->gr_context());
- if (!sk_image) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glReadbackImagePixels",
- "Couldn't create SkImage for reading.");
- return;
- }
-
size_t byte_size = dst_info.computeByteSize(row_bytes);
if (byte_size > UINT32_MAX) {
LOCAL_SET_GL_ERROR(
@@ -2767,6 +2851,47 @@ void RasterDecoderImpl::DoReadbackARGBImagePixelsINTERNAL(
return;
}
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ source_scoped_access = source_shared_image->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+
+ if (!source_scoped_access) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glReadbackImagePixels",
+ "Source shared image is not accessible");
+ return;
+ }
+
+ if (!begin_semaphores.empty()) {
+ bool wait_result = shared_context_state_->gr_context()->wait(
+ begin_semaphores.size(), begin_semaphores.data(),
+ /*deleteSemaphoresAfterWait=*/false);
+ DCHECK(wait_result);
+ }
+
+ if (!end_semaphores.empty()) {
+ // Ask skia to signal |end_semaphores| here, since we will synchronized
+ // read pixels from the shared image.
+ GrFlushInfo flush_info = {
+ .fNumSemaphores = end_semaphores.size(),
+ .fSignalSemaphores = end_semaphores.data(),
+ };
+ AddVulkanCleanupTaskForSkiaFlush(
+ shared_context_state_->vk_context_provider(), &flush_info);
+ auto flush_result = shared_context_state_->gr_context()->flush(flush_info);
+ DCHECK(flush_result == GrSemaphoresSubmitted::kYes);
+ }
+
+ auto sk_image =
+ source_scoped_access->CreateSkImage(shared_context_state_->gr_context());
+ if (!sk_image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glReadbackImagePixels",
+ "Couldn't create SkImage for reading.");
+ return;
+ }
+
bool success =
sk_image->readPixels(dst_info, pixel_address, row_bytes, src_x, src_y);
if (!success) {
@@ -3110,7 +3235,10 @@ void RasterDecoderImpl::DoConvertYUVAMailboxesToRGBINTERNAL(
GL_INVALID_OPERATION, "glConvertYUVAMailboxesToRGB",
"Couldn't create destination images from provided sources");
} else {
- dest_surface->getCanvas()->drawImage(result_image, 0, 0);
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ dest_surface->getCanvas()->drawImage(result_image, 0, 0,
+ SkSamplingOptions(), &paint);
drew_image = true;
}
}
@@ -3204,6 +3332,7 @@ void RasterDecoderImpl::DoClearPaintCacheINTERNAL() {
void RasterDecoderImpl::DoBeginRasterCHROMIUM(GLuint sk_color,
GLboolean needs_clear,
GLuint msaa_sample_count,
+ MsaaMode msaa_mode,
GLboolean can_use_lcd_text,
const volatile GLbyte* key) {
// Workaround for https://crbug.com/906453: Flush before BeginRaster (the
@@ -3236,25 +3365,55 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(GLuint sk_color,
return;
}
+ // This check only fails on validating decoder since clear tracking for
+ // passthrough textures is done by ANGLE. Nonetheless the check is important
+ // so that clients cannot use uninitialized textures with validating decoder.
+ if (!needs_clear && !shared_image_->IsCleared()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "SharedImage not cleared before use.");
+ shared_image_.reset();
+ return;
+ }
+
DCHECK(locked_handles_.empty());
DCHECK(!raster_canvas_);
shared_context_state_->set_need_context_state_reset(true);
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, shared_image_->format());
+
+ int final_msaa_count;
+ uint32_t flags;
+ switch (msaa_mode) {
+ default:
+ case kNoMSAA:
+ final_msaa_count = 0;
+ flags = 0;
+ use_ddl_in_current_raster_session_ = use_ddl_;
+ break;
+ case kMSAA:
+ // If we can't match requested MSAA samples, don't use MSAA.
+ final_msaa_count = std::max(static_cast<int>(msaa_sample_count), 0);
+ if (final_msaa_count >
+ gr_context()->maxSurfaceSampleCountForColorType(sk_color_type))
+ final_msaa_count = 0;
+ flags = 0;
+ use_ddl_in_current_raster_session_ = use_ddl_;
+ break;
+ case kDMSAA:
+ final_msaa_count = 1;
+ flags = SkSurfaceProps::kDynamicMSAA_Flag;
+ // DMSAA is not compatible with DDL
+ use_ddl_in_current_raster_session_ = false;
+ break;
+ }
+
// Use unknown pixel geometry to disable LCD text.
- uint32_t flags = 0;
SkSurfaceProps surface_props(flags, kUnknown_SkPixelGeometry);
if (can_use_lcd_text) {
surface_props = skia::LegacyDisplayGlobals::GetSkSurfaceProps(flags);
}
- SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
- /*gpu_compositing=*/true, shared_image_->format());
- // If we can't match requested MSAA samples, don't use MSAA.
- int final_msaa_count = std::max(static_cast<int>(msaa_sample_count), 0);
- if (final_msaa_count >
- gr_context()->maxSurfaceSampleCountForColorType(sk_color_type))
- final_msaa_count = 0;
-
std::vector<GrBackendSemaphore> begin_semaphores;
DCHECK(end_semaphores_.empty());
DCHECK(!scoped_shared_image_write_);
@@ -3279,7 +3438,7 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(GLuint sk_color,
DCHECK(result);
}
- if (use_ddl_) {
+ if (use_ddl_in_current_raster_session_) {
SkSurfaceCharacterization characterization;
bool result = sk_surface_->characterize(&characterization);
DCHECK(result) << "Failed to characterize raster SkSurface.";
@@ -3399,7 +3558,7 @@ void RasterDecoderImpl::DoRasterCHROMIUM(GLuint raster_shm_id,
}
bool RasterDecoderImpl::EnsureDDLReadyForRaster() {
- DCHECK(use_ddl_);
+ DCHECK(use_ddl_in_current_raster_session_);
DCHECK_EQ(current_decoder_error_, error::kNoError);
if (!ddl_) {
@@ -3436,7 +3595,7 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
shared_context_state_->set_need_context_state_reset(true);
raster_canvas_ = nullptr;
- if (use_ddl_) {
+ if (use_ddl_in_current_raster_session_) {
if (!EnsureDDLReadyForRaster()) {
// This decoder error indicates that this command has not finished
// executing. The decoder will yield and re-execute this command when it
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
index 55b562b89a3..610a1dccff3 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
@@ -114,6 +114,8 @@ error::Error RasterDecoderImpl::HandleBeginRasterCHROMIUMImmediate(
GLuint sk_color = static_cast<GLuint>(c.sk_color);
GLboolean needs_clear = static_cast<GLboolean>(c.needs_clear);
GLuint msaa_sample_count = static_cast<GLuint>(c.msaa_sample_count);
+ gpu::raster::MsaaMode msaa_mode =
+ static_cast<gpu::raster::MsaaMode>(c.msaa_mode);
GLboolean can_use_lcd_text = static_cast<GLboolean>(c.can_use_lcd_text);
uint32_t mailbox_size;
if (!gles2::GLES2Util::ComputeDataSize<GLbyte, 16>(1, &mailbox_size)) {
@@ -128,7 +130,7 @@ error::Error RasterDecoderImpl::HandleBeginRasterCHROMIUMImmediate(
if (mailbox == nullptr) {
return error::kOutOfBounds;
}
- DoBeginRasterCHROMIUM(sk_color, needs_clear, msaa_sample_count,
+ DoBeginRasterCHROMIUM(sk_color, needs_clear, msaa_sample_count, msaa_mode,
can_use_lcd_text, mailbox);
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h
index f6c322fe056..99524a831de 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h
@@ -12,24 +12,6 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_1_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_1_AUTOGEN_H_
-TEST_P(RasterDecoderTest1, FinishValidArgs) {
- EXPECT_CALL(*gl_, Finish());
- SpecializedSetup<cmds::Finish, 0>(true);
- cmds::Finish cmd;
- cmd.Init();
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(RasterDecoderTest1, FlushValidArgs) {
- EXPECT_CALL(*gl_, Flush());
- SpecializedSetup<cmds::Flush, 0>(true);
- cmds::Flush cmd;
- cmd.Init();
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
TEST_P(RasterDecoderTest1, GetErrorValidArgs) {
EXPECT_CALL(*gl_, GetError());
SpecializedSetup<cmds::GetError, 0>(true);
diff --git a/chromium/gpu/command_buffer/service/ref_counted_lock.cc b/chromium/gpu/command_buffer/service/ref_counted_lock.cc
new file mode 100644
index 00000000000..dfd4535e93a
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/ref_counted_lock.cc
@@ -0,0 +1,22 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/ref_counted_lock.h"
+
+#include "gpu/config/gpu_finch_features.h"
+
+namespace gpu {
+
+RefCountedLockHelperDrDc::RefCountedLockHelperDrDc(
+ scoped_refptr<RefCountedLock> lock)
+ : lock_(std::move(lock)) {
+ // |lock_| should be present if DrDc feature is enabled and it should not
+ // be present if feature is disabled.
+ DCHECK((features::IsDrDcEnabled() && lock_) ||
+ (!features::IsDrDcEnabled() && !lock_));
+}
+
+RefCountedLockHelperDrDc::~RefCountedLockHelperDrDc() = default;
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/ref_counted_lock.h b/chromium/gpu/command_buffer/service/ref_counted_lock.h
new file mode 100644
index 00000000000..b9452a3438b
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/ref_counted_lock.h
@@ -0,0 +1,61 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_REF_COUNTED_LOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_REF_COUNTED_LOCK_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "gpu/gpu_gles2_export.h"
+
+namespace gpu {
+
+// Ref counted wrapper for base::Lock.
+class GPU_GLES2_EXPORT RefCountedLock
+ : public base::RefCountedThreadSafe<RefCountedLock> {
+ public:
+ RefCountedLock() = default;
+
+ // Disallow copy and assign.
+ RefCountedLock(const RefCountedLock&) = delete;
+ RefCountedLock& operator=(const RefCountedLock&) = delete;
+
+ base::Lock* GetDrDcLockPtr() { return &lock_; }
+ void AssertAcquired() { lock_.AssertAcquired(); }
+
+ private:
+ friend class base::RefCountedThreadSafe<RefCountedLock>;
+ ~RefCountedLock() = default;
+
+ base::Lock lock_;
+};
+
+// Helper class for handling RefCountedLock for drdc usage.
+class GPU_GLES2_EXPORT RefCountedLockHelperDrDc {
+ public:
+ explicit RefCountedLockHelperDrDc(scoped_refptr<RefCountedLock> lock);
+ ~RefCountedLockHelperDrDc();
+
+ base::Lock* GetDrDcLockPtr() const {
+ return lock_ ? lock_->GetDrDcLockPtr() : nullptr;
+ }
+
+ const scoped_refptr<RefCountedLock>& GetDrDcLock() { return lock_; }
+
+ void AssertAcquiredDrDcLock() const {
+ if (lock_)
+ lock_->AssertAcquired();
+ }
+
+ std::unique_ptr<base::AutoLockMaybe> GetScopedDrDcLock() const {
+ return std::make_unique<base::AutoLockMaybe>(GetDrDcLockPtr());
+ }
+
+ private:
+ mutable scoped_refptr<RefCountedLock> lock_;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_REF_COUNTED_LOCK_H_
diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc
index 60c2794842e..2d8cae18cc1 100644
--- a/chromium/gpu/command_buffer/service/scheduler.cc
+++ b/chromium/gpu/command_buffer/service/scheduler.cc
@@ -14,7 +14,6 @@
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
-#include "base/stl_util.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "base/timer/elapsed_timer.h"
@@ -95,14 +94,15 @@ Scheduler::PerThreadState::~PerThreadState() = default;
Scheduler::PerThreadState& Scheduler::PerThreadState::operator=(
PerThreadState&& other) = default;
-Scheduler::Sequence::Sequence(Scheduler* scheduler,
- SequenceId sequence_id,
- base::PlatformThreadId thread_id,
- SchedulingPriority priority,
- scoped_refptr<SyncPointOrderData> order_data)
+Scheduler::Sequence::Sequence(
+ Scheduler* scheduler,
+ SequenceId sequence_id,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ SchedulingPriority priority,
+ scoped_refptr<SyncPointOrderData> order_data)
: scheduler_(scheduler),
sequence_id_(sequence_id),
- thread_id_(thread_id),
+ task_runner_(std::move(task_runner)),
default_priority_(priority),
current_priority_(priority),
order_data_(std::move(order_data)) {}
@@ -154,7 +154,7 @@ bool Scheduler::Sequence::IsRunnable() const {
}
bool Scheduler::Sequence::ShouldYieldTo(const Sequence* other) const {
- if (thread_id() != other->thread_id())
+ if (task_runner() != other->task_runner())
return false;
if (!running() || !other->scheduled())
return false;
@@ -166,11 +166,13 @@ void Scheduler::Sequence::SetEnabled(bool enabled) {
return;
enabled_ = enabled;
if (enabled) {
- TRACE_EVENT_ASYNC_BEGIN1("gpu", "SequenceEnabled", this, "sequence_id",
- sequence_id_.GetUnsafeValue());
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN1("gpu", "SequenceEnabled",
+ TRACE_ID_LOCAL(this), "sequence_id",
+ sequence_id_.GetUnsafeValue());
} else {
- TRACE_EVENT_ASYNC_END1("gpu", "SequenceEnabled", this, "sequence_id",
- sequence_id_.GetUnsafeValue());
+ TRACE_EVENT_NESTABLE_ASYNC_END1("gpu", "SequenceEnabled",
+ TRACE_ID_LOCAL(this), "sequence_id",
+ sequence_id_.GetUnsafeValue());
}
scheduler_->TryScheduleSequence(this);
}
@@ -393,20 +395,25 @@ Scheduler::~Scheduler() {
DCHECK(!per_thread_state.second.running);
}
-SequenceId Scheduler::CreateSequence(SchedulingPriority priority) {
+SequenceId Scheduler::CreateSequence(
+ SchedulingPriority priority,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
base::AutoLock auto_lock(lock_);
scoped_refptr<SyncPointOrderData> order_data =
sync_point_manager_->CreateSyncPointOrderData();
SequenceId sequence_id = order_data->sequence_id();
- auto task_runner = base::ThreadTaskRunnerHandle::Get();
- auto thread_id = base::PlatformThread::CurrentId();
- auto sequence = std::make_unique<Sequence>(this, sequence_id, thread_id,
- priority, std::move(order_data));
+ auto sequence =
+ std::make_unique<Sequence>(this, sequence_id, std::move(task_runner),
+ priority, std::move(order_data));
sequence_map_.emplace(sequence_id, std::move(sequence));
- per_thread_state_map_[thread_id].task_runner = task_runner;
return sequence_id;
}
+SequenceId Scheduler::CreateSequenceForTesting(SchedulingPriority priority) {
+ // This will create the sequence on the thread on which this method is called.
+ return CreateSequence(priority, base::ThreadTaskRunnerHandle::Get());
+}
+
void Scheduler::DestroySequence(SequenceId sequence_id) {
base::circular_deque<Sequence::Task> tasks_to_be_destroyed;
{
@@ -415,7 +422,7 @@ void Scheduler::DestroySequence(SequenceId sequence_id) {
Sequence* sequence = GetSequence(sequence_id);
DCHECK(sequence);
if (sequence->scheduled()) {
- per_thread_state_map_[sequence->thread_id()].rebuild_scheduling_queue =
+ per_thread_state_map_[sequence->task_runner()].rebuild_scheduling_queue =
true;
}
@@ -479,7 +486,7 @@ void Scheduler::ScheduleTaskHelper(Task task) {
Sequence* sequence = GetSequence(sequence_id);
DCHECK(sequence);
- auto task_runner = per_thread_state_map_[sequence->thread_id()].task_runner;
+ auto* task_runner = sequence->task_runner();
uint32_t order_num = sequence->ScheduleTask(std::move(task.closure),
std::move(task.report_callback));
@@ -507,7 +514,7 @@ void Scheduler::ContinueTask(SequenceId sequence_id,
base::AutoLock auto_lock(lock_);
Sequence* sequence = GetSequence(sequence_id);
DCHECK(sequence);
- DCHECK_EQ(base::PlatformThread::CurrentId(), sequence->thread_id());
+ DCHECK(sequence->task_runner()->BelongsToCurrentThread());
sequence->ContinueTask(std::move(closure));
}
@@ -517,10 +524,10 @@ bool Scheduler::ShouldYield(SequenceId sequence_id) {
Sequence* running_sequence = GetSequence(sequence_id);
DCHECK(running_sequence);
DCHECK(running_sequence->running());
- DCHECK_EQ(base::PlatformThread::CurrentId(), running_sequence->thread_id());
+ DCHECK(running_sequence->task_runner()->BelongsToCurrentThread());
const auto& scheduling_queue =
- RebuildSchedulingQueueIfNeeded(running_sequence->thread_id());
+ RebuildSchedulingQueueIfNeeded(running_sequence->task_runner());
if (scheduling_queue.empty())
return false;
@@ -546,8 +553,8 @@ void Scheduler::SyncTokenFenceReleased(const SyncToken& sync_token,
void Scheduler::TryScheduleSequence(Sequence* sequence) {
lock_.AssertAcquired();
- auto thread_id = sequence->thread_id();
- auto& thread_state = per_thread_state_map_[thread_id];
+ auto* task_runner = sequence->task_runner();
+ auto& thread_state = per_thread_state_map_[task_runner];
if (sequence->running()) {
// Update priority of running sequence because of sync token releases.
@@ -557,30 +564,32 @@ void Scheduler::TryScheduleSequence(Sequence* sequence) {
// Rebuild scheduling queue if priority changed for a scheduled sequence.
DCHECK(thread_state.running);
DCHECK(sequence->IsRunnable());
- per_thread_state_map_[thread_id].rebuild_scheduling_queue = true;
+ per_thread_state_map_[task_runner].rebuild_scheduling_queue = true;
} else if (!sequence->scheduled() && sequence->IsRunnable()) {
// Insert into scheduling queue if sequence isn't already scheduled.
SchedulingState scheduling_state = sequence->SetScheduled();
- auto& scheduling_queue = per_thread_state_map_[thread_id].scheduling_queue;
+ auto& scheduling_queue =
+ per_thread_state_map_[task_runner].scheduling_queue;
scheduling_queue.push_back(scheduling_state);
std::push_heap(scheduling_queue.begin(), scheduling_queue.end(),
&SchedulingState::Comparator);
if (!thread_state.running) {
- TRACE_EVENT_NESTABLE_ASYNC_BEGIN0("gpu", "Scheduler::Running", this);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0("gpu", "Scheduler::Running",
+ TRACE_ID_LOCAL(this));
thread_state.running = true;
run_next_task_scheduled_ = base::TimeTicks::Now();
- thread_state.task_runner->PostTask(
- FROM_HERE,
- base::BindOnce(&Scheduler::RunNextTask, base::Unretained(this)));
+ task_runner->PostTask(FROM_HERE, base::BindOnce(&Scheduler::RunNextTask,
+ base::Unretained(this)));
}
}
}
std::vector<Scheduler::SchedulingState>&
-Scheduler::RebuildSchedulingQueueIfNeeded(base::PlatformThreadId thread_id) {
+Scheduler::RebuildSchedulingQueueIfNeeded(
+ base::SingleThreadTaskRunner* task_runner) {
lock_.AssertAcquired();
- auto& thread_state = per_thread_state_map_[thread_id];
+ auto& thread_state = per_thread_state_map_[task_runner];
auto& scheduling_queue = thread_state.scheduling_queue;
if (!thread_state.rebuild_scheduling_queue)
@@ -591,7 +600,7 @@ Scheduler::RebuildSchedulingQueueIfNeeded(base::PlatformThreadId thread_id) {
for (const auto& kv : sequence_map_) {
Sequence* sequence = kv.second.get();
if (!sequence->IsRunnable() || sequence->running() ||
- sequence->thread_id() != thread_id) {
+ sequence->task_runner() != task_runner) {
continue;
}
SchedulingState scheduling_state = sequence->SetScheduled();
@@ -610,14 +619,15 @@ void Scheduler::RunNextTask() {
base::TimeTicks::Now() - run_next_task_scheduled_,
base::TimeDelta::FromMicroseconds(10), base::TimeDelta::FromSeconds(30),
100);
- auto thread_id = base::PlatformThread::CurrentId();
+ auto* task_runner = base::ThreadTaskRunnerHandle::Get().get();
SchedulingState state;
{
- auto& scheduling_queue = RebuildSchedulingQueueIfNeeded(thread_id);
+ auto& scheduling_queue = RebuildSchedulingQueueIfNeeded(task_runner);
if (scheduling_queue.empty()) {
- TRACE_EVENT_NESTABLE_ASYNC_END0("gpu", "Scheduler::Running", this);
- per_thread_state_map_[thread_id].running = false;
+ TRACE_EVENT_NESTABLE_ASYNC_END0("gpu", "Scheduler::Running",
+ TRACE_ID_LOCAL(this));
+ per_thread_state_map_[task_runner].running = false;
return;
}
@@ -631,7 +641,7 @@ void Scheduler::RunNextTask() {
Sequence* sequence = GetSequence(state.sequence_id);
DCHECK(sequence);
- DCHECK_EQ(sequence->thread_id(), thread_id);
+ DCHECK_EQ(sequence->task_runner(), task_runner);
UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
"GPU.Scheduler.TaskDependencyTime",
@@ -688,7 +698,7 @@ void Scheduler::RunNextTask() {
sequence->FinishTask();
if (sequence->IsRunnable()) {
auto& scheduling_queue =
- per_thread_state_map_[thread_id].scheduling_queue;
+ per_thread_state_map_[task_runner].scheduling_queue;
SchedulingState scheduling_state = sequence->SetScheduled();
scheduling_queue.push_back(scheduling_state);
@@ -703,17 +713,17 @@ void Scheduler::RunNextTask() {
100);
// Avoid scheduling another RunNextTask if we're done with all tasks.
- auto& scheduling_queue = RebuildSchedulingQueueIfNeeded(thread_id);
+ auto& scheduling_queue = RebuildSchedulingQueueIfNeeded(task_runner);
if (scheduling_queue.empty()) {
- TRACE_EVENT_NESTABLE_ASYNC_END0("gpu", "Scheduler::Running", this);
- per_thread_state_map_[thread_id].running = false;
+ TRACE_EVENT_NESTABLE_ASYNC_END0("gpu", "Scheduler::Running",
+ TRACE_ID_LOCAL(this));
+ per_thread_state_map_[task_runner].running = false;
return;
}
run_next_task_scheduled_ = base::TimeTicks::Now();
- per_thread_state_map_[thread_id].task_runner->PostTask(
- FROM_HERE,
- base::BindOnce(&Scheduler::RunNextTask, base::Unretained(this)));
+ task_runner->PostTask(FROM_HERE, base::BindOnce(&Scheduler::RunNextTask,
+ base::Unretained(this)));
}
base::TimeDelta Scheduler::TakeTotalBlockingTime() {
@@ -727,9 +737,7 @@ base::TimeDelta Scheduler::TakeTotalBlockingTime() {
base::SingleThreadTaskRunner* Scheduler::GetTaskRunnerForTesting(
SequenceId sequence_id) {
base::AutoLock auto_lock(lock_);
- return (per_thread_state_map_[GetSequence(sequence_id)->thread_id()]
- .task_runner)
- .get();
+ return GetSequence(sequence_id)->task_runner();
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/scheduler.h b/chromium/gpu/command_buffer/service/scheduler.h
index 92cdcbb7e8b..de327231771 100644
--- a/chromium/gpu/command_buffer/service/scheduler.h
+++ b/chromium/gpu/command_buffer/service/scheduler.h
@@ -64,8 +64,13 @@ class GPU_EXPORT Scheduler {
// Create a sequence with given priority. Returns an identifier for the
// sequence that can be used with SyncPointManager for creating sync point
// release clients. Sequences start off as enabled (see |EnableSequence|).
- // Sequence could be created outside of GPU thread.
- SequenceId CreateSequence(SchedulingPriority priority);
+ // Sequence is bound to the provided |task_runner|.
+ SequenceId CreateSequence(
+ SchedulingPriority priority,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner);
+
+ // Should be only used for tests.
+ SequenceId CreateSequenceForTesting(SchedulingPriority priority);
// Destroy the sequence and run any scheduled tasks immediately. Sequence
// could be destroyed outside of GPU thread.
@@ -135,7 +140,7 @@ class GPU_EXPORT Scheduler {
public:
Sequence(Scheduler* scheduler,
SequenceId sequence_id,
- base::PlatformThreadId thread_id,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
SchedulingPriority priority,
scoped_refptr<SyncPointOrderData> order_data);
@@ -147,7 +152,9 @@ class GPU_EXPORT Scheduler {
return order_data_;
}
- base::PlatformThreadId thread_id() const { return thread_id_; }
+ base::SingleThreadTaskRunner* task_runner() const {
+ return task_runner_.get();
+ }
bool enabled() const { return enabled_; }
@@ -312,7 +319,7 @@ class GPU_EXPORT Scheduler {
Scheduler* const scheduler_;
const SequenceId sequence_id_;
- const base::PlatformThreadId thread_id_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
const SchedulingPriority default_priority_;
SchedulingPriority current_priority_;
@@ -351,7 +358,7 @@ class GPU_EXPORT Scheduler {
// If the scheduling queue needs to be rebuild because a sequence changed
// priority.
std::vector<SchedulingState>& RebuildSchedulingQueueIfNeeded(
- base::PlatformThreadId thread_id);
+ base::SingleThreadTaskRunner* task_runner);
Sequence* GetSequence(SequenceId sequence_id);
@@ -373,11 +380,10 @@ class GPU_EXPORT Scheduler {
// SchedulingState with highest priority (lowest order) in front.
std::vector<SchedulingState> scheduling_queue;
bool rebuild_scheduling_queue = false;
-
- scoped_refptr<base::SingleThreadTaskRunner> task_runner;
bool running = false;
};
- base::flat_map<base::PlatformThreadId, PerThreadState> per_thread_state_map_;
+ base::flat_map<base::SingleThreadTaskRunner*, PerThreadState>
+ per_thread_state_map_;
// Accumulated time the thread was blocked during running task
base::TimeDelta total_blocked_time_;
diff --git a/chromium/gpu/command_buffer/service/scheduler_task_runner.cc b/chromium/gpu/command_buffer/service/scheduler_task_runner.cc
new file mode 100644
index 00000000000..8ab39e7f20d
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/scheduler_task_runner.cc
@@ -0,0 +1,94 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/scheduler_task_runner.h"
+
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/check.h"
+#include "base/no_destructor.h"
+#include "base/threading/thread_local.h"
+#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/command_buffer/service/scheduler.h"
+
+namespace gpu {
+
+namespace {
+
+base::ThreadLocalPointer<const SchedulerTaskRunner>&
+GetCurrentTaskRunnerStorage() {
+ static base::NoDestructor<base::ThreadLocalPointer<const SchedulerTaskRunner>>
+ runner;
+ return *runner;
+}
+
+void SetCurrentTaskRunner(const SchedulerTaskRunner* runner) {
+ GetCurrentTaskRunnerStorage().Set(runner);
+}
+
+const SchedulerTaskRunner* GetCurrentTaskRunner() {
+ return GetCurrentTaskRunnerStorage().Get();
+}
+
+} // namespace
+
+SchedulerTaskRunner::SchedulerTaskRunner(Scheduler& scheduler,
+ SequenceId sequence_id)
+ : scheduler_(scheduler), sequence_id_(sequence_id) {}
+
+SchedulerTaskRunner::~SchedulerTaskRunner() = default;
+
+void SchedulerTaskRunner::ShutDown() {
+ base::AutoLock lock(lock_);
+ is_running_ = false;
+}
+
+bool SchedulerTaskRunner::PostDelayedTask(const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) {
+ return PostNonNestableDelayedTask(from_here, std::move(task), delay);
+}
+
+bool SchedulerTaskRunner::PostNonNestableDelayedTask(
+ const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) {
+ base::AutoLock lock(lock_);
+ if (!is_running_)
+ return false;
+
+ CHECK(delay.is_zero());
+ scheduler_.ScheduleTask(Scheduler::Task(
+ sequence_id_,
+ base::BindOnce(&SchedulerTaskRunner::RunTask, this, std::move(task)),
+ std::vector<SyncToken>()));
+ return true;
+}
+
+bool SchedulerTaskRunner::RunsTasksInCurrentSequence() const {
+ const SchedulerTaskRunner* current = GetCurrentTaskRunner();
+ return current != nullptr && current->sequence_id_ == sequence_id_;
+}
+
+void SchedulerTaskRunner::RunTask(base::OnceClosure task) {
+ {
+ // Handle the case where the sequence was shut down after this task was
+ // posted but before it had a chance to run. Note that we don't hold the
+ // lock while invoking the task below, since a task may reenter this object
+ // to e.g. call ShutDown() or post a new task.
+ base::AutoLock lock(lock_);
+ if (!is_running_)
+ return;
+ }
+
+ // Scheduler doesn't nest tasks, so we don't support nesting.
+ DCHECK(!GetCurrentTaskRunner());
+ SetCurrentTaskRunner(this);
+ std::move(task).Run();
+ SetCurrentTaskRunner(nullptr);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/scheduler_task_runner.h b/chromium/gpu/command_buffer/service/scheduler_task_runner.h
new file mode 100644
index 00000000000..3b189a00c13
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/scheduler_task_runner.h
@@ -0,0 +1,55 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SCHEDULER_TASK_RUNNER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SCHEDULER_TASK_RUNNER_H_
+
+#include "base/sequenced_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/service/sequence_id.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class Scheduler;
+
+// A SequencedTaskRunner implementation to abstract task execution for a
+// specific SequenceId on the GPU Scheduler. This object does not support
+// delayed tasks, because the underlying Scheduler implementation does not
+// support scheduling delayed tasks. Also note that tasks run by this object do
+// not support running nested RunLoops.
+class GPU_EXPORT SchedulerTaskRunner : public base::SequencedTaskRunner {
+ public:
+ // Constructs a SchedulerTaskRunner that runs tasks on `scheduler`, on the
+ // sequence identified by `sequence_id`. This instance must not outlive
+ // `scheduler`.
+ SchedulerTaskRunner(Scheduler& scheduler, SequenceId sequence_id);
+
+ // Once this is called, all subsequent tasks will be rejected.
+ void ShutDown();
+
+ // base::SequencedTaskRunner:
+ bool PostDelayedTask(const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) override;
+ bool PostNonNestableDelayedTask(const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) override;
+ bool RunsTasksInCurrentSequence() const override;
+
+ private:
+ ~SchedulerTaskRunner() override;
+
+ void RunTask(base::OnceClosure task);
+
+ Scheduler& scheduler_;
+ const SequenceId sequence_id_;
+
+ base::Lock lock_;
+ bool is_running_ GUARDED_BY(lock_) = true;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SCHEDULER_TASK_RUNNER_H_
diff --git a/chromium/gpu/command_buffer/service/scheduler_unittest.cc b/chromium/gpu/command_buffer/service/scheduler_unittest.cc
index 3babe97a9cb..e921d556bbd 100644
--- a/chromium/gpu/command_buffer/service/scheduler_unittest.cc
+++ b/chromium/gpu/command_buffer/service/scheduler_unittest.cc
@@ -45,7 +45,7 @@ class SchedulerTest : public testing::Test {
void RunAllPendingTasks() {
SequenceId sequence_id =
- scheduler()->CreateSequence(SchedulingPriority::kLow);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kLow);
scheduler()->ScheduleTask(Scheduler::Task(
sequence_id, run_loop_.QuitClosure(), std::vector<SyncToken>()));
run_loop_.Run();
@@ -62,7 +62,7 @@ class SchedulerTest : public testing::Test {
TEST_F(SchedulerTest, ScheduledTasksRunInOrder) {
SequenceId sequence_id =
- scheduler()->CreateSequence(SchedulingPriority::kNormal);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kNormal);
static int count = 0;
int ran1 = 0;
@@ -87,7 +87,7 @@ TEST_F(SchedulerTest, ScheduledTasksRunInOrder) {
TEST_F(SchedulerTest, ScheduledTasksRunAfterReporting) {
SequenceId sequence_id =
- scheduler()->CreateSequence(SchedulingPriority::kNormal);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kNormal);
bool ran = false;
bool reported = false;
@@ -113,7 +113,7 @@ TEST_F(SchedulerTest, ScheduledTasksRunAfterReporting) {
TEST_F(SchedulerTest, ContinuedTasksRunFirst) {
SequenceId sequence_id =
- scheduler()->CreateSequence(SchedulingPriority::kNormal);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kNormal);
static int count = 0;
int ran1 = 0;
@@ -153,7 +153,7 @@ class SchedulerTaskRunOrderTest : public SchedulerTest {
protected:
void CreateSequence(int sequence_key, SchedulingPriority priority) {
- SequenceId sequence_id = scheduler()->CreateSequence(priority);
+ SequenceId sequence_id = scheduler()->CreateSequenceForTesting(priority);
CommandBufferId command_buffer_id =
CommandBufferId::FromUnsafeValue(sequence_key);
scoped_refptr<SyncPointClientState> release_state =
@@ -482,7 +482,7 @@ TEST_F(SchedulerTaskRunOrderTest, WaitOnSelfShouldNotBlockSequence) {
TEST_F(SchedulerTest, ReleaseSequenceShouldYield) {
SequenceId sequence_id1 =
- scheduler()->CreateSequence(SchedulingPriority::kLow);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kLow);
CommandBufferNamespace namespace_id = CommandBufferNamespace::GPU_IO;
CommandBufferId command_buffer_id = CommandBufferId::FromUnsafeValue(1);
scoped_refptr<SyncPointClientState> release_state =
@@ -504,7 +504,7 @@ TEST_F(SchedulerTest, ReleaseSequenceShouldYield) {
int ran2 = 0;
SyncToken sync_token(namespace_id, command_buffer_id, release);
SequenceId sequence_id2 =
- scheduler()->CreateSequence(SchedulingPriority::kHigh);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kHigh);
scheduler()->ScheduleTask(Scheduler::Task(
sequence_id2, GetClosure([&] { ran2 = ++count; }), {sync_token}));
@@ -521,7 +521,7 @@ TEST_F(SchedulerTest, ReleaseSequenceShouldYield) {
TEST_F(SchedulerTest, ReentrantEnableSequenceShouldNotDeadlock) {
SequenceId sequence_id1 =
- scheduler()->CreateSequence(SchedulingPriority::kHigh);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kHigh);
CommandBufferNamespace namespace_id = CommandBufferNamespace::GPU_IO;
CommandBufferId command_buffer_id1 = CommandBufferId::FromUnsafeValue(1);
scoped_refptr<SyncPointClientState> release_state1 =
@@ -529,7 +529,7 @@ TEST_F(SchedulerTest, ReentrantEnableSequenceShouldNotDeadlock) {
namespace_id, command_buffer_id1, sequence_id1);
SequenceId sequence_id2 =
- scheduler()->CreateSequence(SchedulingPriority::kNormal);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kNormal);
CommandBufferId command_buffer_id2 = CommandBufferId::FromUnsafeValue(2);
scoped_refptr<SyncPointClientState> release_state2 =
sync_point_manager()->CreateSyncPointClientState(
@@ -576,11 +576,11 @@ TEST_F(SchedulerTest, ReentrantEnableSequenceShouldNotDeadlock) {
TEST_F(SchedulerTest, ClientWaitIsPrioritized) {
SequenceId sequence_id1 =
- scheduler()->CreateSequence(SchedulingPriority::kNormal);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kNormal);
SequenceId sequence_id2 =
- scheduler()->CreateSequence(SchedulingPriority::kLow);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kLow);
SequenceId sequence_id3 =
- scheduler()->CreateSequence(SchedulingPriority::kHigh);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kHigh);
CommandBufferId command_buffer_id = CommandBufferId::FromUnsafeValue(1);
@@ -625,7 +625,7 @@ TEST_F(SchedulerTest, ClientWaitIsPrioritized) {
// schedule the task.
base::RunLoop run_loop_temp;
SequenceId sequence_id_run_loop =
- scheduler()->CreateSequence(SchedulingPriority::kLow);
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kLow);
scheduler()->ScheduleTask(Scheduler::Task(sequence_id_run_loop,
run_loop_temp.QuitClosure(),
std::vector<SyncToken>()));
@@ -642,9 +642,12 @@ TEST_F(SchedulerTest, ClientWaitIsPrioritized) {
}
TEST_F(SchedulerTest, StreamPriorities) {
- SequenceId seq_id1 = scheduler()->CreateSequence(SchedulingPriority::kLow);
- SequenceId seq_id2 = scheduler()->CreateSequence(SchedulingPriority::kNormal);
- SequenceId seq_id3 = scheduler()->CreateSequence(SchedulingPriority::kHigh);
+ SequenceId seq_id1 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kLow);
+ SequenceId seq_id2 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kNormal);
+ SequenceId seq_id3 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kHigh);
CommandBufferNamespace namespace_id = CommandBufferNamespace::GPU_IO;
CommandBufferId command_buffer_id1 = CommandBufferId::FromUnsafeValue(1);
@@ -705,9 +708,12 @@ TEST_F(SchedulerTest, StreamPriorities) {
}
TEST_F(SchedulerTest, StreamDestroyRemovesPriorities) {
- SequenceId seq_id1 = scheduler()->CreateSequence(SchedulingPriority::kLow);
- SequenceId seq_id2 = scheduler()->CreateSequence(SchedulingPriority::kNormal);
- SequenceId seq_id3 = scheduler()->CreateSequence(SchedulingPriority::kHigh);
+ SequenceId seq_id1 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kLow);
+ SequenceId seq_id2 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kNormal);
+ SequenceId seq_id3 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kHigh);
CommandBufferNamespace namespace_id = CommandBufferNamespace::GPU_IO;
CommandBufferId command_buffer_id1 = CommandBufferId::FromUnsafeValue(1);
@@ -755,9 +761,12 @@ TEST_F(SchedulerTest, StreamDestroyRemovesPriorities) {
// crbug.com/781585#5: Test RemoveWait/AddWait/RemoveWait sequence.
TEST_F(SchedulerTest, StreamPriorityChangeWhileReleasing) {
- SequenceId seq_id1 = scheduler()->CreateSequence(SchedulingPriority::kLow);
- SequenceId seq_id2 = scheduler()->CreateSequence(SchedulingPriority::kNormal);
- SequenceId seq_id3 = scheduler()->CreateSequence(SchedulingPriority::kHigh);
+ SequenceId seq_id1 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kLow);
+ SequenceId seq_id2 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kNormal);
+ SequenceId seq_id3 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kHigh);
CommandBufferNamespace namespace_id = CommandBufferNamespace::GPU_IO;
CommandBufferId command_buffer_id1 = CommandBufferId::FromUnsafeValue(1);
@@ -812,9 +821,12 @@ TEST_F(SchedulerTest, StreamPriorityChangeWhileReleasing) {
}
TEST_F(SchedulerTest, CircularPriorities) {
- SequenceId seq_id1 = scheduler()->CreateSequence(SchedulingPriority::kHigh);
- SequenceId seq_id2 = scheduler()->CreateSequence(SchedulingPriority::kLow);
- SequenceId seq_id3 = scheduler()->CreateSequence(SchedulingPriority::kNormal);
+ SequenceId seq_id1 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kHigh);
+ SequenceId seq_id2 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kLow);
+ SequenceId seq_id3 =
+ scheduler()->CreateSequenceForTesting(SchedulingPriority::kNormal);
CommandBufferNamespace namespace_id = CommandBufferNamespace::GPU_IO;
CommandBufferId command_buffer_id2 = CommandBufferId::FromUnsafeValue(2);
diff --git a/chromium/gpu/command_buffer/service/sequence_id.h b/chromium/gpu/command_buffer/service/sequence_id.h
index e79ffc08ea0..e59931ab12a 100644
--- a/chromium/gpu/command_buffer/service/sequence_id.h
+++ b/chromium/gpu/command_buffer/service/sequence_id.h
@@ -5,12 +5,12 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_SEQUENCE_ID_H_
#define GPU_COMMAND_BUFFER_SERVICE_SEQUENCE_ID_H_
-#include "base/util/type_safety/id_type.h"
+#include "base/types/id_type.h"
namespace gpu {
class SyncPointOrderData;
-using SequenceId = util::IdTypeU32<SyncPointOrderData>;
+using SequenceId = base::IdTypeU32<SyncPointOrderData>;
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/service_font_manager.cc b/chromium/gpu/command_buffer/service/service_font_manager.cc
index feb11d2986a..bc68171b79a 100644
--- a/chromium/gpu/command_buffer/service/service_font_manager.cc
+++ b/chromium/gpu/command_buffer/service/service_font_manager.cc
@@ -63,7 +63,7 @@ class Deserializer {
DCHECK(base::bits::IsPowerOfTwo(alignment));
size_t memory = reinterpret_cast<size_t>(memory_);
- size_t padding = base::bits::Align(memory, alignment) - memory;
+ size_t padding = base::bits::AlignUp(memory, alignment) - memory;
base::CheckedNumeric<uint32_t> checked_padded_size = bytes_read_;
checked_padded_size += padding;
diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc
index 209f0a4035b..9bd4406c254 100644
--- a/chromium/gpu/command_buffer/service/service_utils.cc
+++ b/chromium/gpu/command_buffer/service/service_utils.cc
@@ -147,8 +147,6 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) {
command_line->HasSwitch(switches::kEnableThreadedTextureMailboxes);
gpu_preferences.gl_shader_interm_output =
command_line->HasSwitch(switches::kGLShaderIntermOutput);
- gpu_preferences.emulate_shader_precision =
- command_line->HasSwitch(switches::kEmulateShaderPrecision);
gpu_preferences.enable_gpu_service_logging =
command_line->HasSwitch(switches::kEnableGPUServiceLogging);
gpu_preferences.enable_gpu_service_tracing =
@@ -159,7 +157,11 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) {
command_line->HasSwitch(switches::kIgnoreGpuBlocklist);
gpu_preferences.enable_webgpu =
command_line->HasSwitch(switches::kEnableUnsafeWebGPU) ||
- command_line->HasSwitch(switches::kEnableUnsafeWebGPUService);
+ base::FeatureList::IsEnabled(features::kWebGPUService);
+ gpu_preferences.enable_webgpu_spirv =
+ command_line->HasSwitch(switches::kEnableUnsafeWebGPU);
+ gpu_preferences.force_webgpu_compat =
+ command_line->HasSwitch(switches::kForceWebGPUCompat);
if (command_line->HasSwitch(switches::kEnableDawnBackendValidation)) {
auto value = command_line->GetSwitchValueASCII(
switches::kEnableDawnBackendValidation);
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc
index 29a43c3845f..f9152ee4ff2 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/shared_context_state.cc
@@ -206,6 +206,9 @@ SharedContextState::SharedContextState(
if (base::ThreadTaskRunnerHandle::IsSet()) {
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "SharedContextState", base::ThreadTaskRunnerHandle::Get());
+
+ // Create |gr_cache_controller_| only if we have task runner.
+ gr_cache_controller_.emplace(this);
}
// Initialize the scratch buffer to some small initial size.
scratch_deserialization_buffer_.resize(
@@ -621,10 +624,8 @@ void SharedContextState::RemoveContextLostObserver(ContextLostObserver* obs) {
void SharedContextState::PurgeMemory(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
- if (!gr_context_) {
- DCHECK(!transfer_cache_);
+ if (!gr_context_)
return;
- }
// Ensure the context is current before doing any GPU cleanup.
if (!MakeCurrent(nullptr))
@@ -770,7 +771,7 @@ void SharedContextState::RestoreTextureUnitBindings(unsigned unit) const {
}
void SharedContextState::RestoreVertexAttribArray(unsigned index) {
- NOTIMPLEMENTED();
+ NOTIMPLEMENTED_LOG_ONCE();
}
void SharedContextState::RestoreAllExternalTextureBindingsIfNeeded() {
@@ -860,4 +861,9 @@ bool SharedContextState::CheckResetStatus(bool need_gl) {
return false;
}
+void SharedContextState::ScheduleGrContextCleanup() {
+ if (gr_cache_controller_)
+ gr_cache_controller_->ScheduleGrContextCleanup();
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.h b/chromium/gpu/command_buffer/service/shared_context_state.h
index e7e8558a12c..bf57380b03b 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.h
+++ b/chromium/gpu/command_buffer/service/shared_context_state.h
@@ -13,6 +13,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/observer_list.h"
+#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "base/trace_event/memory_dump_provider.h"
#include "build/build_config.h"
@@ -20,6 +21,7 @@
#include "gpu/command_buffer/common/gl2_types.h"
#include "gpu/command_buffer/common/skia_utils.h"
#include "gpu/command_buffer/service/gl_context_virtual_delegate.h"
+#include "gpu/command_buffer/service/gr_cache_controller.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/config/gpu_preferences.h"
#include "gpu/gpu_gles2_export.h"
@@ -220,6 +222,8 @@ class GPU_GLES2_EXPORT SharedContextState
bool CheckResetStatus(bool needs_gl);
bool device_needs_reset() { return device_needs_reset_; }
+ void ScheduleGrContextCleanup();
+
private:
friend class base::RefCounted<SharedContextState>;
friend class raster::RasterDecoderTestBase;
@@ -350,6 +354,8 @@ class GPU_GLES2_EXPORT SharedContextState
std::unique_ptr<ExternalSemaphorePool> external_semaphore_pool_;
#endif
+ absl::optional<raster::GrCacheController> gr_cache_controller_;
+
base::WeakPtrFactory<SharedContextState> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(SharedContextState);
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.cc b/chromium/gpu/command_buffer/service/shared_image_backing.cc
index 9bc23bc4120..f98ba6aca94 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing.cc
@@ -79,7 +79,8 @@ std::unique_ptr<SharedImageRepresentationSkia> SharedImageBacking::ProduceSkia(
std::unique_ptr<SharedImageRepresentationDawn> SharedImageBacking::ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) {
+ WGPUDevice device,
+ WGPUBackendType backend_type) {
return nullptr;
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.h b/chromium/gpu/command_buffer/service/shared_image_backing.h
index ec9f63ba9e9..baa35988de1 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing.h
@@ -9,7 +9,6 @@
#include <memory>
-#include "base/containers/flat_map.h"
#include "base/memory/scoped_refptr.h"
#include "base/metrics/histogram_macros.h"
#include "base/synchronization/lock.h"
@@ -166,7 +165,8 @@ class GPU_GLES2_EXPORT SharedImageBacking {
virtual std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device);
+ WGPUDevice device,
+ WGPUBackendType backend_type);
virtual std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
SharedImageManager* manager,
MemoryTypeTracker* tracker);
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_d3d.cc b/chromium/gpu/command_buffer/service/shared_image_backing_d3d.cc
index 328afa61855..91ce315ad6d 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_d3d.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_d3d.cc
@@ -12,7 +12,11 @@
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/shared_image_representation_d3d.h"
+#if BUILDFLAG(USE_DAWN) && BUILDFLAG(DAWN_ENABLE_BACKEND_OPENGLES)
+#include "gpu/command_buffer/service/shared_image_representation_dawn_egl_image.h"
+#endif
#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h"
+#include "ui/gfx/buffer_format_util.h"
#include "ui/gl/trace_util.h"
namespace gpu {
@@ -267,23 +271,29 @@ SharedImageBackingD3D::CreateFromSharedHandle(
Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture,
base::win::ScopedHandle shared_handle) {
DCHECK(shared_handle.IsValid());
+
+ const bool has_webgpu_usage = !!(usage & SHARED_IMAGE_USAGE_WEBGPU);
// Keyed mutexes are required for Dawn interop but are not used for XR
// composition where fences are used instead.
Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dxgi_keyed_mutex;
d3d11_texture.As(&dxgi_keyed_mutex);
- DCHECK(!(usage & SHARED_IMAGE_USAGE_WEBGPU) || dxgi_keyed_mutex);
+ DCHECK(!has_webgpu_usage || dxgi_keyed_mutex);
auto shared_state = base::MakeRefCounted<SharedState>(
std::move(shared_handle), std::move(dxgi_keyed_mutex));
- // Creating the GL texture doesn't require exclusive access to the underlying
- // D3D11 texture.
- auto gl_texture = CreateGLTexture(format, size, color_space, d3d11_texture);
- if (!gl_texture) {
- DLOG(ERROR) << "Failed to create GL texture";
- return nullptr;
+ // Do not cache a GL texture in the backing if it could be owned by WebGPU
+ // since there's no GL context to MakeCurrent in the destructor.
+ scoped_refptr<gles2::TexturePassthrough> gl_texture;
+ if (!has_webgpu_usage) {
+ // Creating the GL texture doesn't require exclusive access to the
+ // underlying D3D11 texture.
+ gl_texture = CreateGLTexture(format, size, color_space, d3d11_texture);
+ if (!gl_texture) {
+ DLOG(ERROR) << "Failed to create GL texture";
+ return nullptr;
+ }
}
-
return base::WrapUnique(new SharedImageBackingD3D(
mailbox, format, size, color_space, surface_origin, alpha_type, usage,
std::move(d3d11_texture), std::move(gl_texture), /*swap_chain=*/nullptr,
@@ -378,21 +388,25 @@ SharedImageBackingD3D::SharedImageBackingD3D(
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain,
size_t buffer_index,
scoped_refptr<SharedState> shared_state)
- : ClearTrackingSharedImageBacking(mailbox,
- format,
- size,
- color_space,
- surface_origin,
- alpha_type,
- usage,
- gl_texture->estimated_size(),
- false /* is_thread_safe */),
+ : ClearTrackingSharedImageBacking(
+ mailbox,
+ format,
+ size,
+ color_space,
+ surface_origin,
+ alpha_type,
+ usage,
+ gl_texture
+ ? gl_texture->estimated_size()
+ : gfx::BufferSizeForBufferFormat(size, viz::BufferFormat(format)),
+ false /* is_thread_safe */),
d3d11_texture_(std::move(d3d11_texture)),
gl_texture_(std::move(gl_texture)),
swap_chain_(std::move(swap_chain)),
buffer_index_(buffer_index),
shared_state_(std::move(shared_state)) {
- DCHECK(gl_texture_);
+ const bool has_webgpu_usage = !!(usage & SHARED_IMAGE_USAGE_WEBGPU);
+ DCHECK(has_webgpu_usage || gl_texture_);
}
SharedImageBackingD3D::~SharedImageBackingD3D() {
@@ -424,35 +438,50 @@ uint32_t SharedImageBackingD3D::GetAllowedDawnUsages() const {
DCHECK(usage() & gpu::SHARED_IMAGE_USAGE_WEBGPU);
return static_cast<uint32_t>(
WGPUTextureUsage_CopySrc | WGPUTextureUsage_CopyDst |
- WGPUTextureUsage_Sampled | WGPUTextureUsage_RenderAttachment);
+ WGPUTextureUsage_TextureBinding | WGPUTextureUsage_RenderAttachment);
}
std::unique_ptr<SharedImageRepresentationDawn>
SharedImageBackingD3D::ProduceDawn(SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) {
+ WGPUDevice device,
+ WGPUBackendType backend_type) {
#if BUILDFLAG(USE_DAWN)
+ const viz::ResourceFormat viz_resource_format = format();
+ const WGPUTextureFormat wgpu_format = viz::ToWGPUFormat(viz_resource_format);
+ if (wgpu_format == WGPUTextureFormat_Undefined) {
+ DLOG(ERROR) << "Unsupported viz format found: " << viz_resource_format;
+ return nullptr;
+ }
- // Persistently open the shared handle by caching it on this backing.
- if (!external_image_) {
- DCHECK(base::win::HandleTraits::IsHandleValid(GetSharedHandle()));
-
- const viz::ResourceFormat viz_resource_format = format();
- const WGPUTextureFormat wgpu_format =
- viz::ToWGPUFormat(viz_resource_format);
- if (wgpu_format == WGPUTextureFormat_Undefined) {
- DLOG(ERROR) << "Unsupported viz format found: " << viz_resource_format;
+ WGPUTextureDescriptor texture_descriptor = {};
+ texture_descriptor.nextInChain = nullptr;
+ texture_descriptor.format = wgpu_format;
+ texture_descriptor.usage = GetAllowedDawnUsages();
+ texture_descriptor.dimension = WGPUTextureDimension_2D;
+ texture_descriptor.size = {static_cast<uint32_t>(size().width()),
+ static_cast<uint32_t>(size().height()), 1};
+ texture_descriptor.mipLevelCount = 1;
+ texture_descriptor.sampleCount = 1;
+
+#if BUILDFLAG(DAWN_ENABLE_BACKEND_OPENGLES)
+ if (backend_type == WGPUBackendType_OpenGLES) {
+ // EGLImage textures do not support sampling, at the moment.
+ texture_descriptor.usage &= ~WGPUTextureUsage_TextureBinding;
+ EGLImage egl_image =
+ static_cast<gl::GLImageD3D*>(GetGLImage())->egl_image();
+ if (!egl_image) {
+ DLOG(ERROR) << "Failed to create EGLImage";
return nullptr;
}
+ return std::make_unique<SharedImageRepresentationDawnEGLImage>(
+ manager, this, tracker, device, egl_image, texture_descriptor);
+ }
+#endif
- WGPUTextureDescriptor texture_descriptor = {};
- texture_descriptor.nextInChain = nullptr;
- texture_descriptor.format = wgpu_format;
- texture_descriptor.usage = GetAllowedDawnUsages();
- texture_descriptor.dimension = WGPUTextureDimension_2D;
- texture_descriptor.size = {size().width(), size().height(), 1};
- texture_descriptor.mipLevelCount = 1;
- texture_descriptor.sampleCount = 1;
+ // Persistently open the shared handle by caching it on this backing.
+ if (!external_image_) {
+ DCHECK(base::win::HandleTraits::IsHandleValid(GetSharedHandle()));
dawn_native::d3d12::ExternalImageDescriptorDXGISharedHandle
externalImageDesc;
@@ -558,8 +587,18 @@ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
SharedImageBackingD3D::ProduceGLTexturePassthrough(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
TRACE_EVENT0("gpu", "SharedImageBackingD3D::ProduceGLTexturePassthrough");
+ // Lazily create a GL texture if it wasn't provided on initialization.
+ auto gl_texture = gl_texture_;
+ if (!gl_texture) {
+ gl_texture =
+ CreateGLTexture(format(), size(), color_space(), d3d11_texture_);
+ if (!gl_texture) {
+ DLOG(ERROR) << "Failed to create GL texture";
+ return nullptr;
+ }
+ }
return std::make_unique<SharedImageRepresentationGLTexturePassthroughD3D>(
- manager, this, tracker, gl_texture_);
+ manager, this, tracker, std::move(gl_texture));
}
std::unique_ptr<SharedImageRepresentationSkia>
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_d3d.h b/chromium/gpu/command_buffer/service/shared_image_backing_d3d.h
index 28163ef967b..c5b37194f2e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_d3d.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_d3d.h
@@ -96,7 +96,8 @@ class GPU_GLES2_EXPORT SharedImageBackingD3D
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) override;
+ WGPUDevice device,
+ WGPUBackendType backend_type) override;
void OnMemoryDump(const std::string& dump_name,
base::trace_event::MemoryAllocatorDump* dump,
@@ -178,6 +179,8 @@ class GPU_GLES2_EXPORT SharedImageBackingD3D
// Texture could be nullptr if an empty backing is needed for testing.
Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture_;
+
+ // Can be null for backings owned by non-GL producers e.g. WebGPU.
scoped_refptr<gles2::TexturePassthrough> gl_texture_;
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain_;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
index 9efed566905..884f3a81523 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
@@ -176,11 +176,12 @@ SharedImageBackingEglImage::SharedImageBackingEglImage(
SkAlphaType alpha_type,
uint32_t usage,
size_t estimated_size,
- GLuint gl_format,
- GLuint gl_type,
+ const SharedImageBackingFactoryGLCommon::FormatInfo format_info,
SharedImageBatchAccessManager* batch_access_manager,
const GpuDriverBugWorkarounds& workarounds,
- bool use_passthrough)
+ const SharedImageBackingGLCommon::UnpackStateAttribs& attribs,
+ bool use_passthrough,
+ base::span<const uint8_t> pixel_data)
: ClearTrackingSharedImageBacking(mailbox,
format,
size,
@@ -190,17 +191,22 @@ SharedImageBackingEglImage::SharedImageBackingEglImage(
usage,
estimated_size,
true /*is_thread_safe*/),
- gl_format_(gl_format),
- gl_type_(gl_type),
+ format_info_(format_info),
batch_access_manager_(batch_access_manager),
+ gl_unpack_attribs_(attribs),
use_passthrough_(use_passthrough) {
DCHECK(batch_access_manager_);
created_on_context_ = gl::g_current_gl_context;
// On some GPUs (NVidia) keeping reference to egl image itself is not enough,
- // we must keep reference to at least one sibling.
- if (workarounds.dont_delete_source_texture_for_egl_image) {
- source_texture_holder_ = GenEGLImageSibling();
- }
+ // we must keep reference to at least one sibling. Note that this workaround
+ // is currently enabled for all android devices.
+ // When we have pixel data, we want to initialize the texture with pixel data
+ // first before creating eglimage from it. Hence using GenEGLImageSibling()
+ // call to do that.
+ if (workarounds.dont_delete_source_texture_for_egl_image)
+ source_texture_holder_ = GenEGLImageSibling(pixel_data);
+ else if (!pixel_data.empty())
+ auto texture_holder = GenEGLImageSibling(pixel_data);
}
SharedImageBackingEglImage::~SharedImageBackingEglImage() {
@@ -235,7 +241,8 @@ std::unique_ptr<T> SharedImageBackingEglImage::ProduceGLTextureInternal(
return std::make_unique<T>(manager, this, tracker, source_texture_holder_);
}
- auto texture_holder = GenEGLImageSibling();
+ auto texture_holder =
+ GenEGLImageSibling(/*pixel_data=*/base::span<const uint8_t>());
if (!texture_holder)
return nullptr;
return std::make_unique<T>(manager, this, tracker, std::move(texture_holder));
@@ -366,7 +373,8 @@ void SharedImageBackingEglImage::EndRead(const RepresentationGLShared* reader) {
}
scoped_refptr<SharedImageBackingEglImage::TextureHolder>
-SharedImageBackingEglImage::GenEGLImageSibling() {
+SharedImageBackingEglImage::GenEGLImageSibling(
+ base::span<const uint8_t> pixel_data) {
// Create a gles2::texture.
GLenum target = GL_TEXTURE_2D;
gl::GLApi* api = gl::g_current_gl_context;
@@ -388,12 +396,44 @@ SharedImageBackingEglImage::GenEGLImageSibling() {
scoped_refptr<gles2::NativeImageBuffer> buffer;
{
AutoLock auto_lock(this);
+
+ // |pixel_data| if present should only be used to initialize texture when we
+ // create |egl_image_buffer_| from it and not after it has been already
+ // created.
+ DCHECK(pixel_data.empty() || !egl_image_buffer_);
if (!egl_image_buffer_) {
- // Allocate memory for texture object if this is the first EGLImage
- // target/sibling. Memory for EGLImage will not be created if we don't
- // allocate memory for the texture object.
- api->glTexImage2DFn(target, 0, gl_format_, size().width(),
- size().height(), 0, gl_format_, gl_type_, nullptr);
+ // Note that we only want to upload pixel data to a texture during init
+ // time before we create |egl_image_buffer_| from it. If pixel data is
+ // empty we only allocate memory for the texture object which is required
+ // to create EGLImage.
+ if (format_info_.supports_storage) {
+ api->glTexStorage2DEXTFn(target, 1,
+ format_info_.storage_internal_format,
+ size().width(), size().height());
+
+ if (!pixel_data.empty()) {
+ SharedImageBackingGLCommon::ScopedResetAndRestoreUnpackState
+ scoped_unpack_state(api, gl_unpack_attribs_,
+ true /* uploading_data */);
+ api->glTexSubImage2DFn(target, 0, 0, 0, size().width(),
+ size().height(), format_info_.adjusted_format,
+ format_info_.gl_type, pixel_data.data());
+ }
+ } else if (format_info_.is_compressed) {
+ SharedImageBackingGLCommon::ScopedResetAndRestoreUnpackState
+ scoped_unpack_state(api, gl_unpack_attribs_, !pixel_data.empty());
+ api->glCompressedTexImage2DFn(
+ target, 0, format_info_.image_internal_format, size().width(),
+ size().height(), 0, pixel_data.size(), pixel_data.data());
+ } else {
+ SharedImageBackingGLCommon::ScopedResetAndRestoreUnpackState
+ scoped_unpack_state(api, gl_unpack_attribs_, !pixel_data.empty());
+
+ api->glTexImage2DFn(target, 0, format_info_.image_internal_format,
+ size().width(), size().height(), 0,
+ format_info_.adjusted_format, format_info_.gl_type,
+ pixel_data.data());
+ }
// Use service id of the texture as a source to create the native buffer.
egl_image_buffer_ = gles2::NativeImageBuffer::Create(service_id);
@@ -405,6 +445,13 @@ SharedImageBackingEglImage::GenEGLImageSibling() {
}
buffer = egl_image_buffer_;
}
+
+ // Mark the backing as cleared if pixel data has been uploaded. Note that
+ // SetCleared() acquires the lock. Hence it is kept outside of previous lock
+ // above.
+ if (!pixel_data.empty())
+ SetCleared();
+
if (bind_egl_image) {
// If we already have the |egl_image_buffer_|, just bind it to the new
// texture to make it an EGLImage sibling.
@@ -414,9 +461,10 @@ SharedImageBackingEglImage::GenEGLImageSibling() {
if (use_passthrough_) {
auto texture_passthrough =
base::MakeRefCounted<gpu::gles2::TexturePassthrough>(
- service_id, GL_TEXTURE_2D, gl_format_, size().width(),
+ service_id, GL_TEXTURE_2D, format_info_.gl_format, size().width(),
size().height(),
- /*depth=*/1, /*border=*/0, gl_format_, gl_type_);
+ /*depth=*/1, /*border=*/0, format_info_.gl_format,
+ format_info_.gl_type);
return base::MakeRefCounted<TextureHolder>(std::move(texture_passthrough));
}
@@ -434,9 +482,9 @@ SharedImageBackingEglImage::GenEGLImageSibling() {
cleared_rect = gfx::Rect(size());
// Set the level info.
- texture->SetLevelInfo(GL_TEXTURE_2D, 0, gl_format_, size().width(),
- size().height(), 1, 0, gl_format_, gl_type_,
- cleared_rect);
+ texture->SetLevelInfo(
+ GL_TEXTURE_2D, 0, format_info_.gl_format, size().width(), size().height(),
+ 1, 0, format_info_.gl_format, format_info_.gl_type, cleared_rect);
texture->SetImmutable(true /*immutable*/, false /*immutable_storage*/);
return base::MakeRefCounted<TextureHolder>(std::move(texture));
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.h b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.h
index c3460e3e467..d75088ebccf 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.h
@@ -8,6 +8,8 @@
#include "base/memory/scoped_refptr.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_common.h"
+#include "gpu/command_buffer/service/shared_image_backing_gl_common.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gl/gl_bindings.h"
@@ -43,11 +45,12 @@ class SharedImageBackingEglImage : public ClearTrackingSharedImageBacking {
SkAlphaType alpha_type,
uint32_t usage,
size_t estimated_size,
- GLuint gl_format,
- GLuint gl_type,
+ const SharedImageBackingFactoryGLCommon::FormatInfo format_into,
SharedImageBatchAccessManager* batch_access_manager,
const GpuDriverBugWorkarounds& workarounds,
- bool use_passthrough);
+ const SharedImageBackingGLCommon::UnpackStateAttribs& attribs,
+ bool use_passthrough,
+ base::span<const uint8_t> pixel_data);
~SharedImageBackingEglImage() override;
@@ -86,12 +89,14 @@ class SharedImageBackingEglImage : public ClearTrackingSharedImageBacking {
void EndRead(const RepresentationGLShared* reader);
// Use to create EGLImage texture target from the same EGLImage object.
- scoped_refptr<TextureHolder> GenEGLImageSibling();
+ // Optional |pixel_data| to initialize a texture with before EGLImage object
+ // is created from it.
+ scoped_refptr<TextureHolder> GenEGLImageSibling(
+ base::span<const uint8_t> pixel_data);
void SetEndReadFence(scoped_refptr<gl::SharedGLFenceEGL> shared_egl_fence);
- const GLuint gl_format_;
- const GLuint gl_type_;
+ const SharedImageBackingFactoryGLCommon::FormatInfo format_info_;
scoped_refptr<TextureHolder> source_texture_holder_;
gl::GLApi* created_on_context_;
@@ -115,6 +120,7 @@ class SharedImageBackingEglImage : public ClearTrackingSharedImageBacking {
GUARDED_BY(lock_);
SharedImageBatchAccessManager* batch_access_manager_ = nullptr;
+ const SharedImageBackingGLCommon::UnpackStateAttribs gl_unpack_attribs_;
const bool use_passthrough_;
DISALLOW_COPY_AND_ASSIGN(SharedImageBackingEglImage);
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
index 22767061f0d..c6a536542d5 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
@@ -8,6 +8,7 @@
#include <memory>
#include "components/viz/common/resources/resource_format.h"
+#include "gpu/config/gpu_preferences.h"
#include "gpu/gpu_gles2_export.h"
#include "gpu/ipc/common/surface_handle.h"
#include "third_party/skia/include/core/SkImageInfo.h"
@@ -58,6 +59,7 @@ class GPU_GLES2_EXPORT SharedImageBackingFactory {
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage) = 0;
+
// Only implemented in the D3D backing factory.
virtual std::vector<std::unique_ptr<SharedImageBacking>>
CreateSharedImageVideoPlanes(base::span<const Mailbox> mailboxes,
@@ -65,10 +67,15 @@ class GPU_GLES2_EXPORT SharedImageBackingFactory {
gfx::BufferFormat format,
const gfx::Size& size,
uint32_t usage);
- // Returns true if the specified GpuMemoryBufferType can be imported using
- // this factory.
- virtual bool CanImportGpuMemoryBuffer(
- gfx::GpuMemoryBufferType memory_buffer_type) = 0;
+
+ // Returns true if the factory is supported
+ virtual bool IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
index 3587e7009f9..4d6991f7e58 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
@@ -399,8 +399,14 @@ SharedImageBackingAHB::ProduceSkia(
// Check whether we are in Vulkan mode OR GL mode and accordingly create
// Skia representation.
if (context_state->GrContextIsVulkan()) {
+ uint32_t queue_family = VK_QUEUE_FAMILY_EXTERNAL;
+ if (usage() & SHARED_IMAGE_USAGE_SCANOUT) {
+ // Any Android API that consume or produce buffers (e.g SurfaceControl)
+ // requires a foreign queue.
+ queue_family = VK_QUEUE_FAMILY_FOREIGN_EXT;
+ }
auto vulkan_image = CreateVkImageFromAhbHandle(
- GetAhbHandle(), context_state.get(), size(), format());
+ GetAhbHandle(), context_state.get(), size(), format(), queue_family);
if (!vulkan_image)
return nullptr;
@@ -733,6 +739,30 @@ bool SharedImageBackingFactoryAHB::CanImportGpuMemoryBuffer(
return memory_buffer_type == gfx::ANDROID_HARDWARE_BUFFER;
}
+bool SharedImageBackingFactoryAHB::IsSupported(
+ uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) {
+ if (gmb_type != gfx::EMPTY_BUFFER && !CanImportGpuMemoryBuffer(gmb_type)) {
+ return false;
+ }
+ // TODO(crbug.com/969114): Not all shared image factory implementations
+ // support concurrent read/write usage.
+ if (usage & SHARED_IMAGE_USAGE_CONCURRENT_READ_WRITE) {
+ return false;
+ }
+ if (!IsFormatSupported(format)) {
+ return false;
+ }
+
+ *allow_legacy_mailbox = false;
+ return true;
+}
+
bool SharedImageBackingFactoryAHB::IsFormatSupported(
viz::ResourceFormat format) {
DCHECK_GE(format, 0);
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
index b70c4db2987..d90fa27da24 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
@@ -63,8 +63,13 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryAHB
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage) override;
- bool CanImportGpuMemoryBuffer(
- gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) override;
bool IsFormatSupported(viz::ResourceFormat format);
private:
@@ -72,6 +77,8 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryAHB
const gfx::Size& size,
viz::ResourceFormat format) const;
+ bool CanImportGpuMemoryBuffer(gfx::GpuMemoryBufferType memory_buffer_type);
+
std::unique_ptr<SharedImageBacking> MakeBacking(
const Mailbox& mailbox,
viz::ResourceFormat format,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc
index fdb21b6b968..85447d8084b 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc
@@ -104,7 +104,8 @@ Microsoft::WRL::ComPtr<ID3D11Texture2D> ValidateAndOpenSharedHandle(
return nullptr;
}
- if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, format)) {
+ if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(
+ size, format, gfx::BufferPlane::DEFAULT)) {
DLOG(ERROR) << "Invalid image size " << size.ToString() << " for "
<< gfx::BufferFormatToString(format);
return nullptr;
@@ -439,4 +440,28 @@ bool SharedImageBackingFactoryD3D::CanImportGpuMemoryBuffer(
return (memory_buffer_type == gfx::DXGI_SHARED_HANDLE);
}
+bool SharedImageBackingFactoryD3D::IsSupported(
+ uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) {
+ if (is_pixel_used) {
+ return false;
+ }
+ if (gmb_type != gfx::EMPTY_BUFFER && !CanImportGpuMemoryBuffer(gmb_type)) {
+ return false;
+ }
+ // TODO(crbug.com/969114): Not all shared image factory implementations
+ // support concurrent read/write usage.
+ if (usage & SHARED_IMAGE_USAGE_CONCURRENT_READ_WRITE) {
+ return false;
+ }
+
+ *allow_legacy_mailbox = false;
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h
index c18995d1ad2..abc82e4e704 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h
@@ -98,10 +98,17 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryD3D
const gfx::Size& size,
uint32_t usage) override;
+ bool IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) override;
+
// Returns true if the specified GpuMemoryBufferType can be imported using
// this factory.
- bool CanImportGpuMemoryBuffer(
- gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool CanImportGpuMemoryBuffer(gfx::GpuMemoryBufferType memory_buffer_type);
Microsoft::WRL::ComPtr<ID3D11Device> GetDeviceForTesting() const {
return d3d11_device_;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc
index 94674667ef3..14764ec4938 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc
@@ -8,7 +8,6 @@
#include <utility>
#include "base/callback_helpers.h"
-#include "base/logging.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
@@ -581,8 +580,8 @@ TEST_F(SharedImageBackingFactoryD3DTest, Dawn_SkiaGL) {
{
// Create a SharedImageRepresentationDawn.
auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox,
- device.Get());
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, device.Get(), WGPUBackendType_D3D12);
ASSERT_TRUE(dawn_representation);
auto scoped_access = dawn_representation->BeginScopedAccess(
@@ -698,8 +697,8 @@ TEST_F(SharedImageBackingFactoryD3DTest, GL_Dawn_Skia_UnclearTexture) {
dawnProcSetProcs(&procs);
{
auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox,
- device.Get());
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, device.Get(), WGPUBackendType_D3D12);
ASSERT_TRUE(dawn_representation);
auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
@@ -782,8 +781,8 @@ TEST_F(SharedImageBackingFactoryD3DTest, UnclearDawn_SkiaFails) {
dawnProcSetProcs(&procs);
{
auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox,
- device.Get());
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, device.Get(), WGPUBackendType_D3D12);
ASSERT_TRUE(dawn_representation);
auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
@@ -990,8 +989,8 @@ TEST_F(SharedImageBackingFactoryD3DTest, Dawn_ReuseExternalImage) {
// Create the first Dawn texture then clear it to green.
{
auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox,
- device.Get());
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, device.Get(), WGPUBackendType_D3D12);
ASSERT_TRUE(dawn_representation);
auto scoped_access = dawn_representation->BeginScopedAccess(
@@ -1026,8 +1025,8 @@ TEST_F(SharedImageBackingFactoryD3DTest, Dawn_ReuseExternalImage) {
// Create another Dawn texture then clear it with another color.
{
auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox,
- device.Get());
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, device.Get(), WGPUBackendType_D3D12);
ASSERT_TRUE(dawn_representation);
// Check again that the texture is still green
@@ -1068,6 +1067,72 @@ TEST_F(SharedImageBackingFactoryD3DTest, Dawn_ReuseExternalImage) {
factory_ref.reset();
}
+
+// Check if making Dawn have the last ref works without a current GL context.
+TEST_F(SharedImageBackingFactoryD3DTest, Dawn_HasLastRef) {
+ if (!IsD3DSharedImageSupported())
+ return;
+
+ // Create a backing using mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ const auto format = viz::ResourceFormat::RGBA_8888;
+ const gfx::Size size(1, 1);
+ const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY |
+ SHARED_IMAGE_USAGE_WEBGPU;
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ auto backing = shared_image_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space,
+ kTopLeft_GrSurfaceOrigin, kPremul_SkAlphaType, usage,
+ false /* is_thread_safe */);
+ ASSERT_NE(backing, nullptr);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ // Create a Dawn D3D12 device
+ dawn_native::Instance instance;
+ instance.DiscoverDefaultAdapters();
+
+ std::vector<dawn_native::Adapter> adapters = instance.GetAdapters();
+ auto adapter_it = std::find_if(
+ adapters.begin(), adapters.end(), [](dawn_native::Adapter adapter) {
+ return adapter.GetBackendType() == dawn_native::BackendType::D3D12;
+ });
+ ASSERT_NE(adapter_it, adapters.end());
+
+ wgpu::Device device = wgpu::Device::Acquire(adapter_it->CreateDevice());
+ DawnProcTable procs = dawn_native::GetProcs();
+ dawnProcSetProcs(&procs);
+
+ auto dawn_representation = shared_image_representation_factory_->ProduceDawn(
+ mailbox, device.Get(), WGPUBackendType_D3D12);
+ ASSERT_NE(dawn_representation, nullptr);
+
+ // Creating the Skia representation will also create a temporary GL texture.
+ auto skia_representation = shared_image_representation_factory_->ProduceSkia(
+ mailbox, context_state_);
+ ASSERT_NE(skia_representation, nullptr);
+
+ // Drop Skia representation and factory ref so that the Dawn representation
+ // has the last ref.
+ skia_representation.reset();
+ factory_ref.reset();
+
+ // Ensure no GL context is current.
+ context_->ReleaseCurrent(surface_.get());
+
+ // This shouldn't crash due to no GL context being current.
+ dawn_representation.reset();
+
+ // Shut down Dawn
+ device = wgpu::Device();
+ dawnProcSetProcs(nullptr);
+
+ // Make context current so that it can be destroyed.
+ context_->MakeCurrent(surface_.get());
+}
#endif // BUILDFLAG(USE_DAWN)
std::vector<std::unique_ptr<SharedImageRepresentationFactoryRef>>
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_egl.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_egl.cc
new file mode 100644
index 00000000000..a93e48011b1
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_egl.cc
@@ -0,0 +1,152 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_factory_egl.h"
+
+#include <algorithm>
+
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "components/viz/common/resources/resource_sizes.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/shared_image_backing_egl_image.h"
+#include "gpu/command_buffer/service/shared_image_batch_access_manager.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/config/gpu_preferences.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_gl_api_implementation.h"
+#include "ui/gl/gl_implementation.h"
+#include "ui/gl/shared_gl_fence_egl.h"
+
+namespace gpu {
+
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingFactoryEGL
+
+SharedImageBackingFactoryEGL::SharedImageBackingFactoryEGL(
+ const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info,
+ SharedImageBatchAccessManager* batch_access_manager)
+ : SharedImageBackingFactoryGLCommon(gpu_preferences,
+ workarounds,
+ gpu_feature_info,
+ /*progress_reporter=*/nullptr),
+ batch_access_manager_(batch_access_manager) {}
+
+SharedImageBackingFactoryEGL::~SharedImageBackingFactoryEGL() = default;
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryEGL::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ bool is_thread_safe) {
+ return MakeEglImageBacking(mailbox, format, size, color_space, surface_origin,
+ alpha_type, usage, base::span<const uint8_t>());
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryEGL::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ return MakeEglImageBacking(mailbox, format, size, color_space, surface_origin,
+ alpha_type, usage, pixel_data);
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryEGL::CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat buffer_format,
+ gfx::BufferPlane plane,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage) {
+ NOTIMPLEMENTED_LOG_ONCE();
+ return nullptr;
+}
+
+bool SharedImageBackingFactoryEGL::IsSupported(
+ uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) {
+ if (is_pixel_used && gr_context_type != GrContextType::kGL) {
+ return false;
+ }
+
+ // Doesn't support gmb for now
+ if (gmb_type != gfx::EMPTY_BUFFER) {
+ return false;
+ }
+
+ // Doesn't support contexts other than GL for OOPR Canvas
+ if (gr_context_type != GrContextType::kGL &&
+ ((usage & SHARED_IMAGE_USAGE_DISPLAY) ||
+ (usage & SHARED_IMAGE_USAGE_RASTER))) {
+ return false;
+ }
+ if ((usage & SHARED_IMAGE_USAGE_WEBGPU) ||
+ (usage & SHARED_IMAGE_USAGE_VIDEO_DECODE) ||
+ (usage & SHARED_IMAGE_USAGE_SCANOUT)) {
+ // return false if it needs interop factory
+ return false;
+ }
+ *allow_legacy_mailbox = false;
+ return true;
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryEGL::MakeEglImageBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ DCHECK(!(usage & SHARED_IMAGE_USAGE_SCANOUT));
+
+ const FormatInfo& format_info = format_info_[format];
+ GLenum target = GL_TEXTURE_2D;
+ if (!CanCreateSharedImage(size, pixel_data, format_info, target)) {
+ return nullptr;
+ }
+
+ // Calculate SharedImage size in bytes.
+ size_t estimated_size;
+ if (!viz::ResourceSizes::MaybeSizeInBytes(size, format, &estimated_size)) {
+ DLOG(ERROR) << "MakeEglImageBacking: Failed to calculate SharedImage size";
+ return nullptr;
+ }
+
+ return std::make_unique<SharedImageBackingEglImage>(
+ mailbox, format, size, color_space, surface_origin, alpha_type, usage,
+ estimated_size, format_info, batch_access_manager_, workarounds_,
+ attribs_, use_passthrough_, pixel_data);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_egl.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_egl.h
new file mode 100644
index 00000000000..97789441bb9
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_egl.h
@@ -0,0 +1,99 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_EGL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_EGL_H_
+
+#include <memory>
+
+#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_common.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/gpu_gles2_export.h"
+#include "ui/gfx/buffer_types.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gfx {
+class Size;
+class ColorSpace;
+} // namespace gfx
+
+namespace gpu {
+class SharedImageBacking;
+class SharedImageBatchAccessManager;
+class GpuDriverBugWorkarounds;
+struct GpuFeatureInfo;
+struct GpuPreferences;
+struct Mailbox;
+
+// Implementation of SharedImageBackingFactory that produces EGL backed
+// SharedImages.
+class GPU_GLES2_EXPORT SharedImageBackingFactoryEGL
+ : public SharedImageBackingFactoryGLCommon {
+ public:
+ SharedImageBackingFactoryEGL(
+ const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info,
+ SharedImageBatchAccessManager* batch_access_manager);
+ ~SharedImageBackingFactoryEGL() override;
+
+ // SharedImageBackingFactory implementation.
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ bool is_thread_safe) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ gfx::BufferPlane plane,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage) override;
+ bool IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) override;
+
+ private:
+ std::unique_ptr<SharedImageBacking> MakeEglImageBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data);
+
+ SharedImageBatchAccessManager* batch_access_manager_ = nullptr;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_EGL_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_egl_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_egl_unittest.cc
new file mode 100644
index 00000000000..67c0d48e2e5
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_egl_unittest.cc
@@ -0,0 +1,410 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_factory_egl.h"
+
+#include <thread>
+
+#include "base/callback_helpers.h"
+#include "base/strings/stringprintf.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "components/viz/common/resources/resource_sizes.h"
+#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/mailbox_manager_impl.h"
+#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/shared_image_test_utils.h"
+#include "gpu/command_buffer/tests/texture_image_factory.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/config/gpu_feature_info.h"
+#include "gpu/config/gpu_preferences.h"
+#include "gpu/config/gpu_test_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
+#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "ui/gfx/buffer_format_util.h"
+#include "ui/gfx/color_space.h"
+#include "ui/gl/buffer_format_utils.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image_shared_memory.h"
+#include "ui/gl/gl_image_stub.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/init/gl_factory.h"
+
+using testing::AtLeast;
+
+namespace gpu {
+namespace {
+
+void CreateSharedContext(const GpuDriverBugWorkarounds& workarounds,
+ scoped_refptr<gl::GLSurface>& surface,
+ scoped_refptr<gl::GLContext>& context,
+ scoped_refptr<SharedContextState>& context_state,
+ scoped_refptr<gles2::FeatureInfo>& feature_info) {
+ surface = gl::init::CreateOffscreenGLSurface(gfx::Size());
+ ASSERT_TRUE(surface);
+ context =
+ gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
+ ASSERT_TRUE(context);
+ bool result = context->MakeCurrent(surface.get());
+ ASSERT_TRUE(result);
+
+ scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
+ feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
+ context_state = base::MakeRefCounted<SharedContextState>(
+ std::move(share_group), surface, context,
+ false /* use_virtualized_gl_contexts */, base::DoNothing());
+ context_state->InitializeGrContext(GpuPreferences(), workarounds, nullptr);
+ context_state->InitializeGL(GpuPreferences(), feature_info);
+}
+
+class SharedImageBackingFactoryEGLThreadSafeTest
+ : public testing::TestWithParam<std::tuple<bool, viz::ResourceFormat>> {
+ public:
+ SharedImageBackingFactoryEGLThreadSafeTest()
+ : shared_image_manager_(std::make_unique<SharedImageManager>(true)) {}
+ ~SharedImageBackingFactoryEGLThreadSafeTest() {
+ // |context_state_| and |context_state2_| must be destroyed on its own
+ // context.
+ context_state2_->MakeCurrent(surface2_.get(), true /* needs_gl */);
+ context_state2_.reset();
+ context_state_->MakeCurrent(surface_.get(), true /* needs_gl */);
+ context_state_.reset();
+ }
+
+ void SetUp() override {
+ GpuDriverBugWorkarounds workarounds;
+ workarounds.max_texture_size = INT_MAX - 1;
+
+ scoped_refptr<gles2::FeatureInfo> feature_info;
+ CreateSharedContext(workarounds, surface_, context_, context_state_,
+ feature_info);
+
+ GpuPreferences preferences;
+ preferences.use_passthrough_cmd_decoder = use_passthrough();
+ backing_factory_ = std::make_unique<SharedImageBackingFactoryEGL>(
+ preferences, workarounds, GpuFeatureInfo(),
+ shared_image_manager_->batch_access_manager());
+
+ memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
+ shared_image_representation_factory_ =
+ std::make_unique<SharedImageRepresentationFactory>(
+ shared_image_manager_.get(), nullptr);
+
+ // Create 2nd context/context_state which are not part of same shared group.
+ scoped_refptr<gles2::FeatureInfo> feature_info2;
+ CreateSharedContext(workarounds, surface2_, context2_, context_state2_,
+ feature_info2);
+ feature_info2.reset();
+ }
+
+ bool use_passthrough() {
+ return std::get<0>(GetParam()) &&
+ gles2::PassthroughCommandDecoderSupported();
+ }
+
+ viz::ResourceFormat get_format() { return std::get<1>(GetParam()); }
+
+ protected:
+ scoped_refptr<gl::GLSurface> surface_;
+ scoped_refptr<gl::GLContext> context_;
+ scoped_refptr<SharedContextState> context_state_;
+ std::unique_ptr<SharedImageBackingFactoryEGL> backing_factory_;
+ gles2::MailboxManagerImpl mailbox_manager_;
+ std::unique_ptr<SharedImageManager> shared_image_manager_;
+ std::unique_ptr<MemoryTypeTracker> memory_type_tracker_;
+ std::unique_ptr<SharedImageRepresentationFactory>
+ shared_image_representation_factory_;
+
+ scoped_refptr<gl::GLSurface> surface2_;
+ scoped_refptr<gl::GLContext> context2_;
+ scoped_refptr<SharedContextState> context_state2_;
+ TextureImageFactory image_factory_;
+};
+
+class CreateAndValidateSharedImageRepresentations {
+ public:
+ CreateAndValidateSharedImageRepresentations(
+ SharedImageBackingFactoryEGL* backing_factory,
+ viz::ResourceFormat format,
+ bool is_thread_safe,
+ gles2::MailboxManagerImpl* mailbox_manager,
+ SharedImageManager* shared_image_manager,
+ MemoryTypeTracker* memory_type_tracker,
+ SharedImageRepresentationFactory* shared_image_representation_factory,
+ SharedContextState* context_state,
+ bool upload_initial_data);
+ ~CreateAndValidateSharedImageRepresentations();
+
+ gfx::Size size() { return size_; }
+ Mailbox mailbox() { return mailbox_; }
+
+ private:
+ gles2::MailboxManagerImpl* mailbox_manager_;
+ gfx::Size size_;
+ Mailbox mailbox_;
+ std::unique_ptr<SharedImageBacking> backing_;
+ std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image_;
+};
+
+// Intent of this test is to create at thread safe backing and test if all
+// representations are working.
+TEST_P(SharedImageBackingFactoryEGLThreadSafeTest, BasicThreadSafe) {
+ CreateAndValidateSharedImageRepresentations shared_image(
+ backing_factory_.get(), get_format(), true /* is_thread_safe */,
+ &mailbox_manager_, shared_image_manager_.get(),
+ memory_type_tracker_.get(), shared_image_representation_factory_.get(),
+ context_state_.get(), /*upload_initial_data=*/false);
+}
+
+// Intent of this test is to create at thread safe backing with initial pixel
+// data and test if all representations are working.
+TEST_P(SharedImageBackingFactoryEGLThreadSafeTest, BasicInitialData) {
+ CreateAndValidateSharedImageRepresentations shared_image(
+ backing_factory_.get(), get_format(), true /* is_thread_safe */,
+ &mailbox_manager_, shared_image_manager_.get(),
+ memory_type_tracker_.get(), shared_image_representation_factory_.get(),
+ context_state_.get(), /*upload_initial_data=*/true);
+}
+
+// Intent of this test is to use the shared image mailbox system by 2 different
+// threads each running their own GL context which are not part of same shared
+// group. One thread will be writing to the backing and other thread will be
+// reading from it.
+TEST_P(SharedImageBackingFactoryEGLThreadSafeTest, OneWriterOneReader) {
+ // Create it on 1st SharedContextState |context_state_|.
+ CreateAndValidateSharedImageRepresentations shared_image(
+ backing_factory_.get(), get_format(), true /* is_thread_safe */,
+ &mailbox_manager_, shared_image_manager_.get(),
+ memory_type_tracker_.get(), shared_image_representation_factory_.get(),
+ context_state_.get(), /*upload_initial_data=*/false);
+
+ auto mailbox = shared_image.mailbox();
+ auto size = shared_image.size();
+
+ // Writer will write to the backing. We will create a GLTexture representation
+ // and write green color to it.
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+
+ // Begin writing to the underlying texture of the backing via ScopedAccess.
+ std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
+ writer_scoped_access = gl_representation->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+
+ DCHECK(writer_scoped_access);
+
+ // Create an FBO.
+ GLuint fbo = 0;
+ gl::GLApi* api = gl::g_current_gl_context;
+ api->glGenFramebuffersEXTFn(1, &fbo);
+ api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
+
+ // Attach the texture to FBO.
+ api->glFramebufferTexture2DEXTFn(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ gl_representation->GetTexture()->target(),
+ gl_representation->GetTexture()->service_id(), 0);
+
+ // Set the clear color to green.
+ api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
+ api->glClearFn(GL_COLOR_BUFFER_BIT);
+ gl_representation->GetTexture()->SetLevelCleared(
+ gl_representation->GetTexture()->target(), 0, true);
+
+ // End writing.
+ writer_scoped_access.reset();
+ gl_representation.reset();
+
+ // Read from the backing in a separate thread. Read is done via
+ // SkiaGLRepresentation. ReadPixels() creates/produces a SkiaGLRepresentation
+ // which in turn wraps a GLTextureRepresentation when for GL mode. Hence
+ // testing reading via SkiaGLRepresentation is equivalent to testing via
+ // GLTextureRepresentation.
+ std::vector<uint8_t> dst_pixels;
+
+ // Launch 2nd thread.
+ std::thread second_thread([&]() {
+ // Do ReadPixels() on 2nd SharedContextState |context_state2_|.
+ dst_pixels = ReadPixels(mailbox, size, context_state2_.get(),
+ shared_image_representation_factory_.get());
+ });
+
+ // Wait for this thread to be done.
+ second_thread.join();
+
+ // Compare the pixel values.
+ EXPECT_EQ(dst_pixels[0], 0);
+ EXPECT_EQ(dst_pixels[1], 255);
+ EXPECT_EQ(dst_pixels[2], 0);
+ EXPECT_EQ(dst_pixels[3], 255);
+}
+
+CreateAndValidateSharedImageRepresentations::
+ CreateAndValidateSharedImageRepresentations(
+ SharedImageBackingFactoryEGL* backing_factory,
+ viz::ResourceFormat format,
+ bool is_thread_safe,
+ gles2::MailboxManagerImpl* mailbox_manager,
+ SharedImageManager* shared_image_manager,
+ MemoryTypeTracker* memory_type_tracker,
+ SharedImageRepresentationFactory* shared_image_representation_factory,
+ SharedContextState* context_state,
+ bool upload_initial_data)
+ : mailbox_manager_(mailbox_manager), size_(256, 256) {
+ // Make the context current.
+ DCHECK(context_state);
+ EXPECT_TRUE(
+ context_state->MakeCurrent(context_state->surface(), true /* needs_gl*/));
+ mailbox_ = Mailbox::GenerateForSharedImage();
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+
+ // SHARED_IMAGE_USAGE_DISPLAY for skia read and SHARED_IMAGE_USAGE_RASTER for
+ // skia write.
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_RASTER;
+ if (!is_thread_safe)
+ usage |= SHARED_IMAGE_USAGE_DISPLAY;
+ if (upload_initial_data) {
+ std::vector<uint8_t> initial_data(
+ viz::ResourceSizes::CheckedSizeInBytes<unsigned int>(size_, format));
+ backing_ = backing_factory->CreateSharedImage(
+ mailbox_, format, size_, color_space, surface_origin, alpha_type, usage,
+ initial_data);
+ } else {
+ backing_ = backing_factory->CreateSharedImage(
+ mailbox_, format, surface_handle, size_, color_space, surface_origin,
+ alpha_type, usage, is_thread_safe);
+ }
+
+ // As long as either |chromium_image_ar30| or |chromium_image_ab30| is
+ // enabled, we can create a non-scanout SharedImage with format
+ // viz::ResourceFormat::{BGRA,RGBA}_1010102.
+ const bool supports_ar30 =
+ context_state->feature_info()->feature_flags().chromium_image_ar30;
+ const bool supports_ab30 =
+ context_state->feature_info()->feature_flags().chromium_image_ab30;
+ if ((format == viz::ResourceFormat::BGRA_1010102 ||
+ format == viz::ResourceFormat::RGBA_1010102) &&
+ !supports_ar30 && !supports_ab30) {
+ EXPECT_FALSE(backing_);
+ return;
+ }
+ EXPECT_TRUE(backing_);
+ if (!backing_)
+ return;
+
+ // Check clearing.
+ if (!backing_->IsCleared()) {
+ backing_->SetCleared();
+ EXPECT_TRUE(backing_->IsCleared());
+ }
+
+ GLenum expected_target = GL_TEXTURE_2D;
+ shared_image_ =
+ shared_image_manager->Register(std::move(backing_), memory_type_tracker);
+
+ // Create and validate GLTexture representation.
+ auto gl_representation =
+ shared_image_representation_factory->ProduceGLTexture(mailbox_);
+
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexture()->service_id());
+ EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
+ EXPECT_EQ(size_, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+
+ // Create and Validate Skia Representations.
+ auto skia_representation =
+ shared_image_representation_factory->ProduceSkia(mailbox_, context_state);
+ EXPECT_TRUE(skia_representation);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
+ scoped_write_access;
+ scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ // We use |supports_ar30| and |supports_ab30| to detect RGB10A2/BGR10A2
+ // support. It's possible Skia might support these formats even if the Chrome
+ // feature flags are false. We just check here that the feature flags don't
+ // allow Chrome to do something that Skia doesn't support.
+ if ((format != viz::ResourceFormat::BGRA_1010102 || supports_ar30) &&
+ (format != viz::ResourceFormat::RGBA_1010102 || supports_ab30)) {
+ EXPECT_TRUE(scoped_write_access);
+ if (!scoped_write_access)
+ return;
+ auto* surface = scoped_write_access->surface();
+ EXPECT_TRUE(surface);
+ if (!surface)
+ return;
+ EXPECT_EQ(size_.width(), surface->width());
+ EXPECT_EQ(size_.height(), surface->height());
+ }
+ EXPECT_TRUE(begin_semaphores.empty());
+ EXPECT_TRUE(end_semaphores.empty());
+ scoped_write_access.reset();
+
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access;
+ scoped_read_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+ auto* promise_texture = scoped_read_access->promise_image_texture();
+ EXPECT_TRUE(promise_texture);
+ EXPECT_TRUE(begin_semaphores.empty());
+ EXPECT_TRUE(end_semaphores.empty());
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size_.width(), backend_texture.width());
+ EXPECT_EQ(size_.height(), backend_texture.height());
+ scoped_read_access.reset();
+ skia_representation.reset();
+}
+
+CreateAndValidateSharedImageRepresentations::
+ ~CreateAndValidateSharedImageRepresentations() {
+ shared_image_.reset();
+ EXPECT_FALSE(mailbox_manager_->ConsumeTexture(mailbox_));
+}
+
+// High bit depth rendering is not supported on Android.
+const auto kResourceFormats = ::testing::Values(viz::ResourceFormat::RGBA_8888);
+
+std::string TestParamToString(
+ const testing::TestParamInfo<std::tuple<bool, viz::ResourceFormat>>&
+ param_info) {
+ const bool allow_passthrough = std::get<0>(param_info.param);
+ const viz::ResourceFormat format = std::get<1>(param_info.param);
+ return base::StringPrintf(
+ "%s_%s", (allow_passthrough ? "AllowPassthrough" : "DisallowPassthrough"),
+ gfx::BufferFormatToString(viz::BufferFormat(format)));
+}
+
+INSTANTIATE_TEST_SUITE_P(Service,
+ SharedImageBackingFactoryEGLThreadSafeTest,
+ ::testing::Combine(::testing::Bool(),
+ kResourceFormats),
+ TestParamToString);
+
+} // anonymous namespace
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_common.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_common.cc
new file mode 100644
index 00000000000..2b8260336ff
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_common.cc
@@ -0,0 +1,193 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_common.h"
+
+#include <algorithm>
+#include <list>
+
+#include "base/containers/contains.h"
+#include "components/viz/common/resources/resource_sizes.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/config/gpu_preferences.h"
+#include "ui/gfx/color_space.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gl/gl_version_info.h"
+#include "ui/gl/progress_reporter.h"
+
+namespace gpu {
+
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingFactoryGLCommon
+
+SharedImageBackingFactoryGLCommon::SharedImageBackingFactoryGLCommon(
+ const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info,
+ gl::ProgressReporter* progress_reporter)
+ : use_passthrough_(gpu_preferences.use_passthrough_cmd_decoder &&
+ gles2::PassthroughCommandDecoderSupported()),
+ workarounds_(workarounds),
+ progress_reporter_(progress_reporter) {
+ gl::GLApi* api = gl::g_current_gl_context;
+ api->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &max_texture_size_);
+ // When the passthrough command decoder is used, the max_texture_size
+ // workaround is implemented by ANGLE. Trying to adjust the max size here
+ // would cause discrepancy between what we think the max size is and what
+ // ANGLE tells the clients.
+ if (!use_passthrough_ && workarounds.max_texture_size) {
+ max_texture_size_ =
+ std::min(max_texture_size_, workarounds.max_texture_size);
+ }
+ // Ensure max_texture_size_ is less than INT_MAX so that gfx::Rect and friends
+ // can be used to accurately represent all valid sub-rects, with overflow
+ // cases, clamped to INT_MAX, always invalid.
+ max_texture_size_ = std::min(max_texture_size_, INT_MAX - 1);
+
+ // TODO(piman): Can we extract the logic out of FeatureInfo?
+ scoped_refptr<gles2::FeatureInfo> feature_info =
+ new gles2::FeatureInfo(workarounds, gpu_feature_info);
+ feature_info->Initialize(ContextType::CONTEXT_TYPE_OPENGLES2,
+ use_passthrough_, gles2::DisallowedFeatures());
+ texture_usage_angle_ = feature_info->feature_flags().angle_texture_usage;
+ attribs_.es3_capable = feature_info->IsES3Capable();
+ attribs_.desktop_gl = !feature_info->gl_version_info().is_es;
+ // Can't use the value from feature_info, as we unconditionally enable this
+ // extension, and assume it can't be used if PBOs are not used (which isn't
+ // true for Skia used directly against GL).
+ attribs_.supports_unpack_subimage =
+ gl::g_current_gl_driver->ext.b_GL_EXT_unpack_subimage;
+ bool enable_texture_storage =
+ feature_info->feature_flags().ext_texture_storage;
+ const gles2::Validators* validators = feature_info->validators();
+ for (int i = 0; i <= viz::RESOURCE_FORMAT_MAX; ++i) {
+ auto format = static_cast<viz::ResourceFormat>(i);
+ FormatInfo& info = format_info_[i];
+ if (!viz::GLSupportsFormat(format))
+ continue;
+ const GLuint image_internal_format = viz::GLInternalFormat(format);
+ const GLenum gl_format = viz::GLDataFormat(format);
+ const GLenum gl_type = viz::GLDataType(format);
+ const bool uncompressed_format_valid =
+ validators->texture_internal_format.IsValid(image_internal_format) &&
+ validators->texture_format.IsValid(gl_format);
+ const bool compressed_format_valid =
+ validators->compressed_texture_format.IsValid(image_internal_format);
+ if ((uncompressed_format_valid || compressed_format_valid) &&
+ validators->pixel_type.IsValid(gl_type)) {
+ info.enabled = true;
+ info.is_compressed = compressed_format_valid;
+ info.gl_format = gl_format;
+ info.gl_type = gl_type;
+ info.swizzle = gles2::TextureManager::GetCompatibilitySwizzle(
+ feature_info.get(), gl_format);
+ info.image_internal_format =
+ gles2::TextureManager::AdjustTexInternalFormat(
+ feature_info.get(), image_internal_format, gl_type);
+ info.adjusted_format =
+ gles2::TextureManager::AdjustTexFormat(feature_info.get(), gl_format);
+ }
+ if (!info.enabled)
+ continue;
+ if (enable_texture_storage && !info.is_compressed) {
+ GLuint storage_internal_format = viz::TextureStorageFormat(format);
+ if (validators->texture_internal_format_storage.IsValid(
+ storage_internal_format)) {
+ info.supports_storage = true;
+ info.storage_internal_format =
+ gles2::TextureManager::AdjustTexStorageFormat(
+ feature_info.get(), storage_internal_format);
+ }
+ }
+ }
+}
+
+SharedImageBackingFactoryGLCommon::~SharedImageBackingFactoryGLCommon() =
+ default;
+
+bool SharedImageBackingFactoryGLCommon::CanCreateSharedImage(
+ const gfx::Size& size,
+ base::span<const uint8_t> pixel_data,
+ const FormatInfo& format_info,
+ GLenum target) {
+ if (!format_info.enabled) {
+ LOG(ERROR) << "CreateSharedImage: invalid format";
+ return false;
+ }
+
+ if (size.width() < 1 || size.height() < 1 ||
+ size.width() > max_texture_size_ || size.height() > max_texture_size_) {
+ LOG(ERROR) << "CreateSharedImage: invalid size";
+ return false;
+ }
+
+ // If we have initial data to upload, ensure it is sized appropriately.
+ if (!pixel_data.empty()) {
+ if (format_info.is_compressed) {
+ const char* error_message = "unspecified";
+ if (!gles2::ValidateCompressedTexDimensions(
+ target, 0 /* level */, size.width(), size.height(), 1 /* depth */,
+ format_info.image_internal_format, &error_message)) {
+ LOG(ERROR) << "CreateSharedImage: "
+ "ValidateCompressedTexDimensionsFailed with error: "
+ << error_message;
+ return false;
+ }
+
+ GLsizei bytes_required = 0;
+ if (!gles2::GetCompressedTexSizeInBytes(
+ nullptr /* function_name */, size.width(), size.height(),
+ 1 /* depth */, format_info.image_internal_format, &bytes_required,
+ nullptr /* error_state */)) {
+ LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
+ "initial texture upload.";
+ return false;
+ }
+
+ if (bytes_required < 0 ||
+ pixel_data.size() != static_cast<size_t>(bytes_required)) {
+ LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
+ "size.";
+ return false;
+ }
+ } else {
+ uint32_t bytes_required;
+ uint32_t unpadded_row_size = 0u;
+ uint32_t padded_row_size = 0u;
+ if (!gles2::GLES2Util::ComputeImageDataSizes(
+ size.width(), size.height(), 1 /* depth */, format_info.gl_format,
+ format_info.gl_type, 4 /* alignment */, &bytes_required,
+ &unpadded_row_size, &padded_row_size)) {
+ LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
+ "initial texture upload.";
+ return false;
+ }
+
+ // The GL spec, used in the computation for required bytes in the function
+ // above, assumes no padding is required for the last row in the image.
+ // But the client data does include this padding, so we add it for the
+ // data validation check here.
+ uint32_t padding = padded_row_size - unpadded_row_size;
+ bytes_required += padding;
+ if (pixel_data.size() != bytes_required) {
+ LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
+ "size.";
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingFactoryGLCommon::FormatInfo
+
+SharedImageBackingFactoryGLCommon::FormatInfo::FormatInfo() = default;
+SharedImageBackingFactoryGLCommon::FormatInfo::FormatInfo(
+ const FormatInfo& other) = default;
+SharedImageBackingFactoryGLCommon::FormatInfo::~FormatInfo() = default;
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_common.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_common.h
new file mode 100644
index 00000000000..b2b4a1ab580
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_common.h
@@ -0,0 +1,87 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_COMMON_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_COMMON_H_
+
+#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/command_buffer/service/shared_image_backing_gl_common.h"
+
+namespace gfx {
+class Size;
+} // namespace gfx
+
+namespace gl {
+class ProgressReporter;
+} // namespace gl
+
+namespace gpu {
+class GpuDriverBugWorkarounds;
+struct GpuFeatureInfo;
+struct GpuPreferences;
+
+// Common constructor and helper functions for
+// SharedImageBackingFactoryGLTexture and SharedImageBackingFactoryGLImage.
+class GPU_GLES2_EXPORT SharedImageBackingFactoryGLCommon
+ : public SharedImageBackingFactory {
+ public:
+ struct FormatInfo {
+ FormatInfo();
+ FormatInfo(const FormatInfo& other);
+ ~FormatInfo();
+
+ // Whether this format is supported.
+ bool enabled = false;
+
+ // Whether this format supports TexStorage2D.
+ bool supports_storage = false;
+
+ // Whether the texture is a compressed type.
+ bool is_compressed = false;
+
+ GLenum gl_format = 0;
+ GLenum gl_type = 0;
+ const gles2::Texture::CompatibilitySwizzle* swizzle = nullptr;
+ GLenum adjusted_format = 0;
+
+ // The internalformat portion of the format/type/internalformat triplet
+ // used when calling TexImage2D
+ GLuint image_internal_format = 0;
+
+ // The internalformat portion of the format/type/internalformat triplet
+ // used when calling TexStorage2D
+ GLuint storage_internal_format = 0;
+ };
+
+ protected:
+ SharedImageBackingFactoryGLCommon(const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info,
+ gl::ProgressReporter* progress_reporter);
+ ~SharedImageBackingFactoryGLCommon() override;
+
+ bool CanCreateSharedImage(const gfx::Size& size,
+ base::span<const uint8_t> pixel_data,
+ const FormatInfo& format_info,
+ GLenum target);
+
+ // Whether we're using the passthrough command decoder and should generate
+ // passthrough textures.
+ bool use_passthrough_ = false;
+
+ FormatInfo format_info_[viz::RESOURCE_FORMAT_MAX + 1];
+ int32_t max_texture_size_ = 0;
+ bool texture_usage_angle_ = false;
+ SharedImageBackingGLCommon::UnpackStateAttribs attribs_;
+ GpuDriverBugWorkarounds workarounds_;
+
+ // Used to notify the watchdog before a buffer allocation in case it takes
+ // long.
+ gl::ProgressReporter* const progress_reporter_ = nullptr;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_COMMON_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image.cc
new file mode 100644
index 00000000000..1fcb65e7e92
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image.cc
@@ -0,0 +1,379 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_image.h"
+
+#include <list>
+#include <utility>
+
+#include "base/containers/contains.h"
+#include "build/build_config.h"
+#include "components/viz/common/resources/resource_sizes.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/image_factory.h"
+#include "gpu/command_buffer/service/shared_image_backing_gl_image.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/config/gpu_preferences.h"
+#include "ui/gfx/buffer_format_util.h"
+#include "ui/gfx/color_space.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gl/buffer_format_utils.h"
+#include "ui/gl/gl_image_shared_memory.h"
+#include "ui/gl/progress_reporter.h"
+
+namespace gpu {
+
+namespace {
+
+using InitializeGLTextureParams =
+ SharedImageBackingGLCommon::InitializeGLTextureParams;
+
+} // anonymous namespace
+
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingFactoryGLImage
+
+SharedImageBackingFactoryGLImage::SharedImageBackingFactoryGLImage(
+ const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info,
+ ImageFactory* image_factory,
+ gl::ProgressReporter* progress_reporter)
+ : SharedImageBackingFactoryGLCommon(gpu_preferences,
+ workarounds,
+ gpu_feature_info,
+ progress_reporter),
+ image_factory_(image_factory) {
+ scoped_refptr<gles2::FeatureInfo> feature_info =
+ new gles2::FeatureInfo(workarounds, gpu_feature_info);
+ feature_info->Initialize(ContextType::CONTEXT_TYPE_OPENGLES2,
+ use_passthrough_, gles2::DisallowedFeatures());
+ gpu_memory_buffer_formats_ =
+ feature_info->feature_flags().gpu_memory_buffer_formats;
+ // Return if scanout images are not supported
+ if (!(image_factory_ && image_factory_->SupportsCreateAnonymousImage())) {
+ return;
+ }
+ for (int i = 0; i <= viz::RESOURCE_FORMAT_MAX; ++i) {
+ auto format = static_cast<viz::ResourceFormat>(i);
+ FormatInfo& info = format_info_[i];
+ BufferFormatInfo& buffer_format_info = buffer_format_info_[i];
+ if (!info.enabled || !IsGpuMemoryBufferFormatSupported(format)) {
+ continue;
+ }
+ const gfx::BufferFormat buffer_format = viz::BufferFormat(format);
+ switch (buffer_format) {
+ case gfx::BufferFormat::RGBA_8888:
+ case gfx::BufferFormat::BGRA_8888:
+ case gfx::BufferFormat::RGBA_F16:
+ case gfx::BufferFormat::R_8:
+ case gfx::BufferFormat::BGRA_1010102:
+ case gfx::BufferFormat::RGBA_1010102:
+ break;
+ default:
+ continue;
+ }
+ if (!gpu_memory_buffer_formats_.Has(buffer_format))
+ continue;
+ buffer_format_info.allow_scanout = true;
+ buffer_format_info.buffer_format = buffer_format;
+ DCHECK_EQ(info.image_internal_format,
+ gl::BufferFormatToGLInternalFormat(buffer_format));
+ if (base::Contains(gpu_preferences.texture_target_exception_list,
+ gfx::BufferUsageAndFormat(gfx::BufferUsage::SCANOUT,
+ buffer_format))) {
+ buffer_format_info.target_for_scanout =
+ gpu::GetPlatformSpecificTextureTarget();
+ }
+ }
+}
+
+SharedImageBackingFactoryGLImage::~SharedImageBackingFactoryGLImage() = default;
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryGLImage::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ bool is_thread_safe) {
+ DCHECK(!is_thread_safe);
+ return CreateSharedImageInternal(mailbox, format, surface_handle, size,
+ color_space, surface_origin, alpha_type,
+ usage, base::span<const uint8_t>());
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryGLImage::CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ return CreateSharedImageInternal(mailbox, format, kNullSurfaceHandle, size,
+ color_space, surface_origin, alpha_type,
+ usage, pixel_data);
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryGLImage::CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat buffer_format,
+ gfx::BufferPlane plane,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage) {
+ if (!gpu_memory_buffer_formats_.Has(buffer_format)) {
+ LOG(ERROR) << "CreateSharedImage: unsupported buffer format "
+ << gfx::BufferFormatToString(buffer_format);
+ return nullptr;
+ }
+
+ if (!gpu::IsPlaneValidForGpuMemoryBufferFormat(plane, buffer_format)) {
+ LOG(ERROR) << "Invalid plane " << gfx::BufferPlaneToString(plane) << " for "
+ << gfx::BufferFormatToString(buffer_format);
+ return nullptr;
+ }
+
+ if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format,
+ plane)) {
+ LOG(ERROR) << "Invalid image size " << size.ToString() << " for "
+ << gfx::BufferFormatToString(buffer_format);
+ return nullptr;
+ }
+
+ const gfx::GpuMemoryBufferType handle_type = handle.type;
+ GLenum target =
+ (handle_type == gfx::SHARED_MEMORY_BUFFER ||
+ !NativeBufferNeedsPlatformSpecificTextureTarget(buffer_format))
+ ? GL_TEXTURE_2D
+ : gpu::GetPlatformSpecificTextureTarget();
+ scoped_refptr<gl::GLImage> image = MakeGLImage(
+ client_id, std::move(handle), buffer_format, plane, surface_handle, size);
+ if (!image) {
+ LOG(ERROR) << "Failed to create image.";
+ return nullptr;
+ }
+ // If we decide to use GL_TEXTURE_2D at the target for a native buffer, we
+ // would like to verify that it will actually work. If the image expects to be
+ // copied, there is no way to do this verification here, because copying is
+ // done lazily after the SharedImage is created, so require that the image is
+ // bindable. Currently NativeBufferNeedsPlatformSpecificTextureTarget can
+ // only return false on Chrome OS where GLImageNativePixmap is used which is
+ // always bindable.
+#if DCHECK_IS_ON()
+ bool texture_2d_support = false;
+#if defined(OS_MAC)
+ // If the PlatformSpecificTextureTarget on Mac is GL_TEXTURE_2D, this is
+ // supported.
+ texture_2d_support =
+ (gpu::GetPlatformSpecificTextureTarget() == GL_TEXTURE_2D);
+#endif // defined(OS_MAC)
+ DCHECK(handle_type == gfx::SHARED_MEMORY_BUFFER || target != GL_TEXTURE_2D ||
+ texture_2d_support || image->ShouldBindOrCopy() == gl::GLImage::BIND);
+#endif // DCHECK_IS_ON()
+ if (color_space.IsValid())
+ image->SetColorSpace(color_space);
+ if (usage & SHARED_IMAGE_USAGE_MACOS_VIDEO_TOOLBOX)
+ image->DisableInUseByWindowServer();
+
+ gfx::BufferFormat plane_buffer_format =
+ GetPlaneBufferFormat(plane, buffer_format);
+ viz::ResourceFormat format = viz::GetResourceFormat(plane_buffer_format);
+ const bool for_framebuffer_attachment =
+ (usage & (SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
+ const bool is_rgb_emulation = (usage & SHARED_IMAGE_USAGE_RGB_EMULATION) != 0;
+
+ InitializeGLTextureParams params;
+ params.target = target;
+ params.internal_format =
+ is_rgb_emulation ? GL_RGB : image->GetInternalFormat();
+ params.format = is_rgb_emulation ? GL_RGB : image->GetDataFormat();
+ params.type = image->GetDataType();
+ params.is_cleared = true;
+ params.is_rgb_emulation = is_rgb_emulation;
+ params.framebuffer_attachment_angle =
+ for_framebuffer_attachment && texture_usage_angle_;
+ return std::make_unique<SharedImageBackingGLImage>(
+ image, mailbox, format, size, color_space, surface_origin, alpha_type,
+ usage, params, attribs_, use_passthrough_);
+}
+
+scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLImage::MakeGLImage(
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ gfx::BufferPlane plane,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size) {
+ if (handle.type == gfx::SHARED_MEMORY_BUFFER) {
+ if (plane != gfx::BufferPlane::DEFAULT)
+ return nullptr;
+ if (!base::IsValueInRangeForNumericType<size_t>(handle.stride))
+ return nullptr;
+ auto image = base::MakeRefCounted<gl::GLImageSharedMemory>(size);
+ if (!image->Initialize(handle.region, handle.id, format, handle.offset,
+ handle.stride)) {
+ return nullptr;
+ }
+
+ return image;
+ }
+
+ if (!image_factory_)
+ return nullptr;
+
+ return image_factory_->CreateImageForGpuMemoryBuffer(
+ std::move(handle), size, format, plane, client_id, surface_handle);
+}
+
+bool SharedImageBackingFactoryGLImage::IsSupported(
+ uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) {
+ if (is_pixel_used && gr_context_type != GrContextType::kGL) {
+ return false;
+ }
+ if (thread_safe) {
+ return false;
+ }
+#if defined(OS_MAC)
+ // On macOS, there is no separate interop factory. Any GpuMemoryBuffer-backed
+ // image can be used with both OpenGL and Metal
+ *allow_legacy_mailbox = gr_context_type == GrContextType::kGL;
+ return true;
+#else
+ // Doesn't support contexts other than GL for OOPR Canvas
+ if (gr_context_type != GrContextType::kGL &&
+ ((usage & SHARED_IMAGE_USAGE_DISPLAY) ||
+ (usage & SHARED_IMAGE_USAGE_RASTER))) {
+ return false;
+ }
+ bool needs_interop_factory = (usage & SHARED_IMAGE_USAGE_WEBGPU) ||
+ (usage & SHARED_IMAGE_USAGE_VIDEO_DECODE);
+#if defined(OS_ANDROID)
+ // Scanout on Android requires explicit fence synchronization which is only
+ // supported by the interop factory.
+ needs_interop_factory |= usage & SHARED_IMAGE_USAGE_SCANOUT;
+#endif
+
+ if (needs_interop_factory) {
+ // return false if it needs interop factory
+ return false;
+ }
+ *allow_legacy_mailbox = gr_context_type == GrContextType::kGL;
+ return true;
+#endif
+}
+
+std::unique_ptr<SharedImageBacking>
+SharedImageBackingFactoryGLImage::CreateSharedImageInternal(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) {
+ const FormatInfo& format_info = format_info_[format];
+ const BufferFormatInfo& buffer_format_info = buffer_format_info_[format];
+ GLenum target = buffer_format_info.target_for_scanout;
+
+ if (!buffer_format_info.allow_scanout) {
+ LOG(ERROR) << "CreateSharedImage: SCANOUT shared images unavailable. "
+ "Buffer format= "
+ << gfx::BufferFormatToString(buffer_format_info.buffer_format);
+ return nullptr;
+ }
+
+ if (!CanCreateSharedImage(size, pixel_data, format_info, target)) {
+ return nullptr;
+ }
+
+ const bool for_framebuffer_attachment =
+ (usage & (SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
+
+ scoped_refptr<gl::GLImage> image;
+
+ // TODO(piman): We pretend the texture was created in an ES2 context, so that
+ // it can be used in other ES2 contexts, and so we have to pass gl_format as
+ // the internal format in the LevelInfo. https://crbug.com/628064
+ GLuint level_info_internal_format = format_info.gl_format;
+ bool is_cleared = false;
+
+ // |scoped_progress_reporter| will notify |progress_reporter_| upon
+ // construction and destruction. We limit the scope so that progress is
+ // reported immediately after allocation/upload and before other GL
+ // operations.
+ {
+ gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
+ image = image_factory_->CreateAnonymousImage(
+ size, buffer_format_info.buffer_format, gfx::BufferUsage::SCANOUT,
+ surface_handle, &is_cleared);
+ }
+ // Scanout images have different constraints than GL images and might fail
+ // to allocate even if GL images can be created.
+ if (!image) {
+ gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
+ // TODO(dcastagna): Use BufferUsage::GPU_READ_WRITE instead
+ // BufferUsage::GPU_READ once we add it.
+ image = image_factory_->CreateAnonymousImage(
+ size, buffer_format_info.buffer_format, gfx::BufferUsage::GPU_READ,
+ surface_handle, &is_cleared);
+ }
+ // The allocated image should not require copy.
+ if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND) {
+ LOG(ERROR) << "CreateSharedImage: Failed to create bindable image";
+ return nullptr;
+ }
+ level_info_internal_format = image->GetInternalFormat();
+ if (color_space.IsValid())
+ image->SetColorSpace(color_space);
+ if (usage & SHARED_IMAGE_USAGE_MACOS_VIDEO_TOOLBOX)
+ image->DisableInUseByWindowServer();
+
+ InitializeGLTextureParams params;
+ params.target = target;
+ params.internal_format = level_info_internal_format;
+ params.format = format_info.gl_format;
+ params.type = format_info.gl_type;
+ params.is_cleared = pixel_data.empty() ? is_cleared : true;
+ params.has_immutable_storage = !image && format_info.supports_storage;
+ params.framebuffer_attachment_angle =
+ for_framebuffer_attachment && texture_usage_angle_;
+
+ DCHECK(!format_info.swizzle);
+ auto result = std::make_unique<SharedImageBackingGLImage>(
+ image, mailbox, format, size, color_space, surface_origin, alpha_type,
+ usage, params, attribs_, use_passthrough_);
+ if (!pixel_data.empty()) {
+ gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
+ result->InitializePixels(format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+ return std::move(result);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image.h
new file mode 100644
index 00000000000..276c45c148b
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image.h
@@ -0,0 +1,123 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_IMAGE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_IMAGE_H_
+
+#include <memory>
+
+#include "base/memory/scoped_refptr.h"
+#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_common.h"
+#include "gpu/command_buffer/service/shared_image_backing_gl_common.h"
+
+namespace gfx {
+class Size;
+class ColorSpace;
+} // namespace gfx
+
+namespace gl {
+class ProgressReporter;
+} // namespace gl
+
+namespace gpu {
+class SharedImageBacking;
+class GpuDriverBugWorkarounds;
+struct GpuFeatureInfo;
+struct GpuPreferences;
+struct Mailbox;
+class ImageFactory;
+
+// Implementation of SharedImageBackingFactory that produces GL-image backed
+// SharedImages.
+class GPU_GLES2_EXPORT SharedImageBackingFactoryGLImage
+ : public SharedImageBackingFactoryGLCommon {
+ public:
+ SharedImageBackingFactoryGLImage(const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info,
+ ImageFactory* image_factory,
+ gl::ProgressReporter* progress_reporter);
+ ~SharedImageBackingFactoryGLImage() override;
+
+ // SharedImageBackingFactory implementation.
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ bool is_thread_safe) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data) override;
+ std::unique_ptr<SharedImageBacking> CreateSharedImage(
+ const Mailbox& mailbox,
+ int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ gfx::BufferPlane plane,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage) override;
+ bool IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) override;
+
+ private:
+ scoped_refptr<gl::GLImage> MakeGLImage(int client_id,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat format,
+ gfx::BufferPlane plane,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size);
+
+ std::unique_ptr<SharedImageBacking> CreateSharedImageInternal(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data);
+
+ struct BufferFormatInfo {
+ // Whether to allow SHARED_IMAGE_USAGE_SCANOUT.
+ bool allow_scanout = false;
+
+ // GL target to use for scanout images.
+ GLenum target_for_scanout = GL_TEXTURE_2D;
+
+ // BufferFormat for scanout images.
+ gfx::BufferFormat buffer_format = gfx::BufferFormat::RGBA_8888;
+ };
+
+ // Factory used to generate GLImages for SCANOUT backings.
+ ImageFactory* const image_factory_ = nullptr;
+
+ BufferFormatInfo buffer_format_info_[viz::RESOURCE_FORMAT_MAX + 1];
+ GpuMemoryBufferFormatSet gpu_memory_buffer_formats_;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_IMAGE_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image_unittest.cc
new file mode 100644
index 00000000000..feb5e493ef8
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_image_unittest.cc
@@ -0,0 +1,872 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_image.h"
+
+#include <thread>
+
+#include "base/callback_helpers.h"
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "components/viz/common/resources/resource_sizes.h"
+#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/mailbox_manager_impl.h"
+#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/shared_image_test_utils.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/command_buffer/tests/texture_image_factory.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/config/gpu_feature_info.h"
+#include "gpu/config/gpu_preferences.h"
+#include "gpu/config/gpu_test_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
+#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "third_party/skia/include/gpu/GrDirectContext.h"
+#include "ui/gfx/buffer_format_util.h"
+#include "ui/gfx/color_space.h"
+#include "ui/gl/buffer_format_utils.h"
+#include "ui/gl/gl_image_shared_memory.h"
+#include "ui/gl/gl_image_stub.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/init/gl_factory.h"
+#include "ui/gl/progress_reporter.h"
+
+using testing::AtLeast;
+
+namespace gpu {
+namespace {
+
+void CreateSharedContext(const GpuDriverBugWorkarounds& workarounds,
+ scoped_refptr<gl::GLSurface>& surface,
+ scoped_refptr<gl::GLContext>& context,
+ scoped_refptr<SharedContextState>& context_state,
+ scoped_refptr<gles2::FeatureInfo>& feature_info) {
+ surface = gl::init::CreateOffscreenGLSurface(gfx::Size());
+ ASSERT_TRUE(surface);
+ context =
+ gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
+ ASSERT_TRUE(context);
+ bool result = context->MakeCurrent(surface.get());
+ ASSERT_TRUE(result);
+
+ scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
+ feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
+ context_state = base::MakeRefCounted<SharedContextState>(
+ std::move(share_group), surface, context,
+ false /* use_virtualized_gl_contexts */, base::DoNothing());
+ context_state->InitializeGrContext(GpuPreferences(), workarounds, nullptr);
+ context_state->InitializeGL(GpuPreferences(), feature_info);
+}
+
+class MockProgressReporter : public gl::ProgressReporter {
+ public:
+ MockProgressReporter() = default;
+ ~MockProgressReporter() override = default;
+
+ // gl::ProgressReporter implementation.
+ MOCK_METHOD0(ReportProgress, void());
+};
+
+class SharedImageBackingFactoryGLImageTestBase
+ : public testing::TestWithParam<std::tuple<bool, viz::ResourceFormat>> {
+ public:
+ explicit SharedImageBackingFactoryGLImageTestBase(bool is_thread_safe)
+ : shared_image_manager_(
+ std::make_unique<SharedImageManager>(is_thread_safe)) {}
+ ~SharedImageBackingFactoryGLImageTestBase() override {
+ // |context_state_| must be destroyed on its own context.
+ context_state_->MakeCurrent(surface_.get(), true /* needs_gl */);
+ }
+
+ void SetUpBase(const GpuDriverBugWorkarounds& workarounds,
+ ImageFactory* factory) {
+ scoped_refptr<gles2::FeatureInfo> feature_info;
+ CreateSharedContext(workarounds, surface_, context_, context_state_,
+ feature_info);
+ supports_etc1_ =
+ feature_info->validators()->compressed_texture_format.IsValid(
+ GL_ETC1_RGB8_OES);
+ supports_ar30_ = feature_info->feature_flags().chromium_image_ar30;
+ supports_ab30_ = feature_info->feature_flags().chromium_image_ab30;
+
+ GpuPreferences preferences;
+ preferences.use_passthrough_cmd_decoder = use_passthrough();
+ backing_factory_ = std::make_unique<SharedImageBackingFactoryGLImage>(
+ preferences, workarounds, GpuFeatureInfo(), factory,
+ &progress_reporter_);
+
+ memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
+ shared_image_representation_factory_ =
+ std::make_unique<SharedImageRepresentationFactory>(
+ shared_image_manager_.get(), nullptr);
+ }
+
+ bool use_passthrough() {
+ return std::get<0>(GetParam()) &&
+ gles2::PassthroughCommandDecoderSupported();
+ }
+
+ bool can_create_scanout_or_gmb_shared_image(
+ viz::ResourceFormat format) const {
+ if (format == viz::ResourceFormat::BGRA_1010102)
+ return supports_ar30_;
+ else if (format == viz::ResourceFormat::RGBA_1010102)
+ return supports_ab30_;
+ return true;
+ }
+
+ viz::ResourceFormat get_format() { return std::get<1>(GetParam()); }
+
+ protected:
+ ::testing::NiceMock<MockProgressReporter> progress_reporter_;
+ scoped_refptr<gl::GLSurface> surface_;
+ scoped_refptr<gl::GLContext> context_;
+ scoped_refptr<SharedContextState> context_state_;
+ std::unique_ptr<SharedImageBackingFactoryGLImage> backing_factory_;
+ gles2::MailboxManagerImpl mailbox_manager_;
+ std::unique_ptr<SharedImageManager> shared_image_manager_;
+ std::unique_ptr<MemoryTypeTracker> memory_type_tracker_;
+ std::unique_ptr<SharedImageRepresentationFactory>
+ shared_image_representation_factory_;
+ bool supports_etc1_ = false;
+ bool supports_ar30_ = false;
+ bool supports_ab30_ = false;
+};
+
+class SharedImageBackingFactoryGLImageTest
+ : public SharedImageBackingFactoryGLImageTestBase {
+ public:
+ SharedImageBackingFactoryGLImageTest()
+ : SharedImageBackingFactoryGLImageTestBase(false) {}
+ void SetUp() override {
+ GpuDriverBugWorkarounds workarounds;
+ workarounds.max_texture_size = INT_MAX - 1;
+ SetUpBase(workarounds, &image_factory_);
+ }
+
+ protected:
+ TextureImageFactory image_factory_;
+};
+
+TEST_P(SharedImageBackingFactoryGLImageTest, Basic) {
+ // TODO(jonahr): Test crashes on Mac with ANGLE/passthrough
+ // (crbug.com/1100975)
+ gpu::GPUTestBotConfig bot_config;
+ if (bot_config.LoadCurrentConfig(nullptr) &&
+ bot_config.Matches("mac passthrough")) {
+ return;
+ }
+
+ const bool should_succeed =
+ can_create_scanout_or_gmb_shared_image(get_format());
+ if (should_succeed)
+ EXPECT_CALL(progress_reporter_, ReportProgress).Times(AtLeast(1));
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto format = get_format();
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ uint32_t usage = SHARED_IMAGE_USAGE_SCANOUT;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, surface_origin,
+ alpha_type, usage, false /* is_thread_safe */);
+
+ if (!should_succeed) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
+ ::testing::Mock::VerifyAndClearExpectations(&progress_reporter_);
+
+ // Check clearing.
+ if (!backing->IsCleared()) {
+ backing->SetCleared();
+ EXPECT_TRUE(backing->IsCleared());
+ }
+
+ // First, validate via a legacy mailbox.
+ EXPECT_TRUE(backing->ProduceLegacyMailbox(&mailbox_manager_));
+ TextureBase* texture_base = mailbox_manager_.ConsumeTexture(mailbox);
+ ASSERT_TRUE(texture_base);
+ GLenum target = texture_base->target();
+ scoped_refptr<gl::GLImage> image;
+ if (use_passthrough()) {
+ auto* texture = static_cast<gles2::TexturePassthrough*>(texture_base);
+ image = texture->GetLevelImage(target, 0);
+ } else {
+ auto* texture = static_cast<gles2::Texture*>(texture_base);
+ image = texture->GetLevelImage(target, 0);
+ }
+ ASSERT_TRUE(image);
+ EXPECT_EQ(size, image->GetSize());
+
+ // Next, validate via a SharedImageRepresentationGLTexture.
+ std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
+ EXPECT_TRUE(shared_image);
+ if (!use_passthrough()) {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexture()->service_id());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+
+ auto gl_representation_rgb =
+ shared_image_representation_factory_->ProduceRGBEmulationGLTexture(
+ mailbox);
+ EXPECT_TRUE(gl_representation_rgb);
+ EXPECT_TRUE(gl_representation_rgb->GetTexture()->service_id());
+ EXPECT_EQ(size, gl_representation_rgb->size());
+ EXPECT_EQ(format, gl_representation_rgb->format());
+ EXPECT_EQ(color_space, gl_representation_rgb->color_space());
+ EXPECT_EQ(usage, gl_representation_rgb->usage());
+ gl_representation_rgb.reset();
+ }
+
+ // Next, validate a SharedImageRepresentationGLTexturePassthrough.
+ if (use_passthrough()) {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexturePassthrough(
+ mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexturePassthrough()->service_id());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+ }
+
+ // Finally, validate a SharedImageRepresentationSkia.
+ auto skia_representation = shared_image_representation_factory_->ProduceSkia(
+ mailbox, context_state_.get());
+ EXPECT_TRUE(skia_representation);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
+ scoped_write_access;
+ scoped_write_access = skia_representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ auto* surface = scoped_write_access->surface();
+ EXPECT_TRUE(surface);
+ EXPECT_EQ(size.width(), surface->width());
+ EXPECT_EQ(size.height(), surface->height());
+ scoped_write_access.reset();
+
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access;
+ scoped_read_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+ auto* promise_texture = scoped_read_access->promise_image_texture();
+ EXPECT_TRUE(promise_texture);
+ EXPECT_TRUE(begin_semaphores.empty());
+ EXPECT_TRUE(end_semaphores.empty());
+ if (promise_texture) {
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+ }
+ scoped_read_access.reset();
+ skia_representation.reset();
+
+ shared_image.reset();
+ EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+
+ if (!use_passthrough() &&
+ context_state_->feature_info()->feature_flags().ext_texture_rg) {
+ EXPECT_CALL(progress_reporter_, ReportProgress).Times(AtLeast(1));
+ // Create a R-8 image texture, and check that the internal_format is that
+ // of the image (GL_RGBA for TextureImageFactory). This only matters for
+ // the validating decoder.
+ auto format = viz::ResourceFormat::RED_8;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, surface_origin,
+ alpha_type, usage, false /* is_thread_safe */);
+ EXPECT_TRUE(backing);
+ shared_image = shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ ASSERT_TRUE(gl_representation);
+ gles2::Texture* texture = gl_representation->GetTexture();
+ ASSERT_TRUE(texture);
+ GLenum type = 0;
+ GLenum internal_format = 0;
+ EXPECT_TRUE(texture->GetLevelType(target, 0, &type, &internal_format));
+ EXPECT_EQ(internal_format, static_cast<GLenum>(GL_RGBA));
+ gl_representation.reset();
+ shared_image.reset();
+ }
+}
+
+TEST_P(SharedImageBackingFactoryGLImageTest, InitialData) {
+ // TODO(andrescj): these loop over the formats can be replaced by test
+ // parameters.
+ for (auto format :
+ {viz::ResourceFormat::RGBA_8888, viz::ResourceFormat::BGRA_1010102,
+ viz::ResourceFormat::RGBA_1010102}) {
+ const bool should_succeed = can_create_scanout_or_gmb_shared_image(format);
+ if (should_succeed)
+ EXPECT_CALL(progress_reporter_, ReportProgress).Times(AtLeast(1));
+
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ uint32_t usage = SHARED_IMAGE_USAGE_SCANOUT;
+ std::vector<uint8_t> initial_data(
+ viz::ResourceSizes::CheckedSizeInBytes<unsigned int>(size, format));
+
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, surface_origin, alpha_type, usage,
+ initial_data);
+ ::testing::Mock::VerifyAndClearExpectations(&progress_reporter_);
+ if (!should_succeed) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
+ EXPECT_TRUE(backing->IsCleared());
+
+ EXPECT_TRUE(backing->ProduceLegacyMailbox(&mailbox_manager_));
+ TextureBase* texture_base = mailbox_manager_.ConsumeTexture(mailbox);
+ ASSERT_TRUE(texture_base);
+ GLenum expected_target = texture_base->target();
+
+ // Validate via a SharedImageRepresentationGLTexture(Passthrough).
+ std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
+ EXPECT_TRUE(shared_image);
+
+ if (!use_passthrough()) {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexture()->service_id());
+ EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+ } else {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexturePassthrough(
+ mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexturePassthrough()->service_id());
+ EXPECT_EQ(expected_target,
+ gl_representation->GetTexturePassthrough()->target());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+ }
+
+ shared_image.reset();
+ EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
+ }
+}
+
+TEST_P(SharedImageBackingFactoryGLImageTest, InitialDataImage) {
+ const bool should_succeed =
+ can_create_scanout_or_gmb_shared_image(get_format());
+ if (should_succeed)
+ EXPECT_CALL(progress_reporter_, ReportProgress).Times(AtLeast(1));
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto format = get_format();
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ uint32_t usage = SHARED_IMAGE_USAGE_SCANOUT;
+ std::vector<uint8_t> initial_data(256 * 256 * 4);
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, surface_origin, alpha_type, usage,
+ initial_data);
+ if (!should_succeed) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
+
+ // Validate via a SharedImageRepresentationGLTexture(Passthrough).
+ std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
+ EXPECT_TRUE(shared_image);
+ if (!use_passthrough()) {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexture()->service_id());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+ } else {
+ auto gl_representation =
+ shared_image_representation_factory_->ProduceGLTexturePassthrough(
+ mailbox);
+ EXPECT_TRUE(gl_representation);
+ EXPECT_TRUE(gl_representation->GetTexturePassthrough()->service_id());
+ EXPECT_EQ(size, gl_representation->size());
+ EXPECT_EQ(format, gl_representation->format());
+ EXPECT_EQ(color_space, gl_representation->color_space());
+ EXPECT_EQ(usage, gl_representation->usage());
+ gl_representation.reset();
+ }
+}
+
+TEST_P(SharedImageBackingFactoryGLImageTest, InitialDataWrongSize) {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto format = get_format();
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ uint32_t usage = SHARED_IMAGE_USAGE_SCANOUT;
+ std::vector<uint8_t> initial_data_small(256 * 128 * 4);
+ std::vector<uint8_t> initial_data_large(256 * 512 * 4);
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, surface_origin, alpha_type, usage,
+ initial_data_small);
+ EXPECT_FALSE(backing);
+ backing = backing_factory_->CreateSharedImage(
+ mailbox, format, size, color_space, surface_origin, alpha_type, usage,
+ initial_data_large);
+ EXPECT_FALSE(backing);
+}
+
+TEST_P(SharedImageBackingFactoryGLImageTest, InvalidFormat) {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto format = viz::ResourceFormat::YUV_420_BIPLANAR;
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ uint32_t usage = SHARED_IMAGE_USAGE_SCANOUT;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, surface_origin,
+ alpha_type, usage, false /* is_thread_safe */);
+ EXPECT_FALSE(backing);
+}
+
+TEST_P(SharedImageBackingFactoryGLImageTest, InvalidSize) {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto format = get_format();
+ gfx::Size size(0, 0);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ uint32_t usage = SHARED_IMAGE_USAGE_SCANOUT;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, surface_origin,
+ alpha_type, usage, false /* is_thread_safe */);
+ EXPECT_FALSE(backing);
+
+ size = gfx::Size(INT_MAX, INT_MAX);
+ backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, surface_origin,
+ alpha_type, usage, false /* is_thread_safe */);
+ EXPECT_FALSE(backing);
+}
+
+TEST_P(SharedImageBackingFactoryGLImageTest, EstimatedSize) {
+ const bool should_succeed =
+ can_create_scanout_or_gmb_shared_image(get_format());
+ if (should_succeed)
+ EXPECT_CALL(progress_reporter_, ReportProgress).Times(AtLeast(1));
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto format = get_format();
+ gfx::Size size(256, 256);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ uint32_t usage = SHARED_IMAGE_USAGE_SCANOUT;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space, surface_origin,
+ alpha_type, usage, false /* is_thread_safe */);
+
+ if (!should_succeed) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
+
+ size_t backing_estimated_size = backing->estimated_size();
+ EXPECT_GT(backing_estimated_size, 0u);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
+ EXPECT_EQ(backing_estimated_size, memory_type_tracker_->GetMemRepresented());
+
+ shared_image.reset();
+}
+
+// Ensures that the various conversion functions used w/ TexStorage2D match
+// their TexImage2D equivalents, allowing us to minimize the amount of parallel
+// data tracked in the SharedImageFactoryGLImage.
+TEST_P(SharedImageBackingFactoryGLImageTest, TexImageTexStorageEquivalence) {
+ scoped_refptr<gles2::FeatureInfo> feature_info =
+ new gles2::FeatureInfo(GpuDriverBugWorkarounds(), GpuFeatureInfo());
+ feature_info->Initialize(ContextType::CONTEXT_TYPE_OPENGLES2,
+ use_passthrough(), gles2::DisallowedFeatures());
+ const gles2::Validators* validators = feature_info->validators();
+
+ for (int i = 0; i <= viz::RESOURCE_FORMAT_MAX; ++i) {
+ auto format = static_cast<viz::ResourceFormat>(i);
+ if (!viz::GLSupportsFormat(format) ||
+ viz::IsResourceFormatCompressed(format))
+ continue;
+ int storage_format = viz::TextureStorageFormat(format);
+
+ int image_gl_format = viz::GLDataFormat(format);
+ int storage_gl_format =
+ gles2::TextureManager::ExtractFormatFromStorageFormat(storage_format);
+ EXPECT_EQ(image_gl_format, storage_gl_format);
+
+ int image_gl_type = viz::GLDataType(format);
+ int storage_gl_type =
+ gles2::TextureManager::ExtractTypeFromStorageFormat(storage_format);
+
+ // Ignore the HALF_FLOAT / HALF_FLOAT_OES discrepancy for now.
+ // TODO(ericrk): Figure out if we need additional action to support
+ // HALF_FLOAT.
+ if (!(image_gl_type == GL_HALF_FLOAT_OES &&
+ storage_gl_type == GL_HALF_FLOAT)) {
+ EXPECT_EQ(image_gl_type, storage_gl_type);
+ }
+
+ // confirm that we support TexStorage2D only if we support TexImage2D:
+ int image_internal_format = viz::GLInternalFormat(format);
+ bool supports_tex_image =
+ validators->texture_internal_format.IsValid(image_internal_format) &&
+ validators->texture_format.IsValid(image_gl_format) &&
+ validators->pixel_type.IsValid(image_gl_type);
+ bool supports_tex_storage =
+ validators->texture_internal_format_storage.IsValid(storage_format);
+ if (supports_tex_storage)
+ EXPECT_TRUE(supports_tex_image);
+ }
+}
+
+class StubImage : public gl::GLImageStub {
+ public:
+ StubImage(const gfx::Size& size, gfx::BufferFormat format)
+ : size_(size), format_(format) {}
+
+ gfx::Size GetSize() override { return size_; }
+ unsigned GetInternalFormat() override {
+ return gl::BufferFormatToGLInternalFormat(format_);
+ }
+ unsigned GetDataType() override {
+ return gl::BufferFormatToGLDataType(format_);
+ }
+
+ BindOrCopy ShouldBindOrCopy() override { return BIND; }
+
+ bool BindTexImage(unsigned target) override {
+ if (!bound_) {
+ bound_ = true;
+ ++update_counter_;
+ }
+ return true;
+ }
+
+ bool BindTexImageWithInternalformat(unsigned target,
+ unsigned internal_format) override {
+ internal_format_ = internal_format;
+ if (!bound_) {
+ bound_ = true;
+ ++update_counter_;
+ }
+ return true;
+ }
+
+ void ReleaseTexImage(unsigned target) override { bound_ = false; }
+
+ bool bound() const { return bound_; }
+ int update_counter() const { return update_counter_; }
+ unsigned internal_format() const { return internal_format_; }
+
+ private:
+ ~StubImage() override = default;
+
+ gfx::Size size_;
+ gfx::BufferFormat format_;
+ bool bound_ = false;
+ int update_counter_ = 0;
+ unsigned internal_format_ = GL_RGBA;
+};
+
+class SharedImageBackingFactoryGLImageWithGMBTest
+ : public SharedImageBackingFactoryGLImageTestBase,
+ public gpu::ImageFactory {
+ public:
+ SharedImageBackingFactoryGLImageWithGMBTest()
+ : SharedImageBackingFactoryGLImageTestBase(false) {}
+ void SetUp() override { SetUpBase(GpuDriverBugWorkarounds(), this); }
+
+ scoped_refptr<gl::GLImage> GetImageFromMailbox(Mailbox mailbox) {
+ if (!use_passthrough()) {
+ auto representation =
+ shared_image_representation_factory_->ProduceGLTexture(mailbox);
+ DCHECK(representation);
+ return representation->GetTexture()->GetLevelImage(GL_TEXTURE_2D, 0);
+ } else {
+ auto representation =
+ shared_image_representation_factory_->ProduceGLTexturePassthrough(
+ mailbox);
+ DCHECK(representation);
+ return representation->GetTexturePassthrough()->GetLevelImage(
+ GL_TEXTURE_2D, 0);
+ }
+ }
+
+ protected:
+ // gpu::ImageFactory implementation.
+ scoped_refptr<gl::GLImage> CreateImageForGpuMemoryBuffer(
+ gfx::GpuMemoryBufferHandle handle,
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferPlane plane,
+ int client_id,
+ gpu::SurfaceHandle surface_handle) override {
+ // pretend to handle NATIVE_PIXMAP types.
+ if (handle.type != gfx::NATIVE_PIXMAP)
+ return nullptr;
+ if (client_id != kClientId)
+ return nullptr;
+ return base::MakeRefCounted<StubImage>(size, format);
+ }
+
+ static constexpr int kClientId = 3;
+};
+
+TEST_P(SharedImageBackingFactoryGLImageWithGMBTest,
+ GpuMemoryBufferImportEmpty) {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ gfx::Size size(256, 256);
+ gfx::BufferFormat format = viz::BufferFormat(get_format());
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
+
+ gfx::GpuMemoryBufferHandle handle;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, kClientId, std::move(handle), format, gfx::BufferPlane::DEFAULT,
+ kNullSurfaceHandle, size, color_space, surface_origin, alpha_type, usage);
+ EXPECT_FALSE(backing);
+}
+
+TEST_P(SharedImageBackingFactoryGLImageWithGMBTest,
+ GpuMemoryBufferImportNative) {
+ // TODO(jonahr): Test crashes on Mac with ANGLE/passthrough
+ // (crbug.com/1100975)
+ gpu::GPUTestBotConfig bot_config;
+ if (bot_config.LoadCurrentConfig(nullptr) &&
+ bot_config.Matches("mac passthrough")) {
+ return;
+ }
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ gfx::Size size(256, 256);
+ gfx::BufferFormat format = viz::BufferFormat(get_format());
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
+
+ gfx::GpuMemoryBufferHandle handle;
+ handle.type = gfx::NATIVE_PIXMAP;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, kClientId, std::move(handle), format, gfx::BufferPlane::DEFAULT,
+ kNullSurfaceHandle, size, color_space, surface_origin, alpha_type, usage);
+ if (!can_create_scanout_or_gmb_shared_image(get_format())) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
+ scoped_refptr<gl::GLImage> image = GetImageFromMailbox(mailbox);
+ ASSERT_EQ(image->GetType(), gl::GLImage::Type::NONE);
+ auto* stub_image = static_cast<StubImage*>(image.get());
+ EXPECT_FALSE(stub_image->bound());
+ int update_counter = stub_image->update_counter();
+ ref->Update(nullptr);
+ EXPECT_EQ(stub_image->update_counter(), update_counter);
+ EXPECT_FALSE(stub_image->bound());
+
+ {
+ auto skia_representation =
+ shared_image_representation_factory_->ProduceSkia(mailbox,
+ context_state_);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access;
+ skia_representation->BeginScopedReadAccess(&begin_semaphores,
+ &end_semaphores);
+ EXPECT_TRUE(stub_image->bound());
+ }
+ if (use_passthrough())
+ EXPECT_FALSE(stub_image->bound());
+ else
+ EXPECT_TRUE(stub_image->bound());
+ EXPECT_GT(stub_image->update_counter(), update_counter);
+}
+
+TEST_P(SharedImageBackingFactoryGLImageWithGMBTest,
+ GpuMemoryBufferImportSharedMemory) {
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ gfx::Size size(256, 256);
+ gfx::BufferFormat format = viz::BufferFormat(get_format());
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
+
+ size_t shm_size = 0u;
+ ASSERT_TRUE(gfx::BufferSizeForBufferFormatChecked(size, format, &shm_size));
+ gfx::GpuMemoryBufferHandle handle;
+ handle.type = gfx::SHARED_MEMORY_BUFFER;
+ handle.region = base::UnsafeSharedMemoryRegion::Create(shm_size);
+ ASSERT_TRUE(handle.region.IsValid());
+ handle.offset = 0;
+ handle.stride = static_cast<int32_t>(
+ gfx::RowSizeForBufferFormat(size.width(), format, 0));
+
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, kClientId, std::move(handle), format, gfx::BufferPlane::DEFAULT,
+ kNullSurfaceHandle, size, color_space, surface_origin, alpha_type, usage);
+ if (!can_create_scanout_or_gmb_shared_image(get_format())) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
+ scoped_refptr<gl::GLImage> image = GetImageFromMailbox(mailbox);
+ ASSERT_EQ(image->GetType(), gl::GLImage::Type::MEMORY);
+ auto* shm_image = static_cast<gl::GLImageSharedMemory*>(image.get());
+ EXPECT_EQ(size, shm_image->GetSize());
+ EXPECT_EQ(format, shm_image->format());
+}
+
+TEST_P(SharedImageBackingFactoryGLImageWithGMBTest,
+ GpuMemoryBufferImportNative_WithRGBEmulation) {
+ if (use_passthrough())
+ return;
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ gfx::Size size(256, 256);
+ gfx::BufferFormat format = viz::BufferFormat(get_format());
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
+ SkAlphaType alpha_type = kPremul_SkAlphaType;
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
+
+ gfx::GpuMemoryBufferHandle handle;
+ handle.type = gfx::NATIVE_PIXMAP;
+ auto backing = backing_factory_->CreateSharedImage(
+ mailbox, kClientId, std::move(handle), format, gfx::BufferPlane::DEFAULT,
+ kNullSurfaceHandle, size, color_space, surface_origin, alpha_type, usage);
+ if (!can_create_scanout_or_gmb_shared_image(get_format())) {
+ EXPECT_FALSE(backing);
+ return;
+ }
+ ASSERT_TRUE(backing);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
+ shared_image_manager_->Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ auto representation =
+ shared_image_representation_factory_->ProduceRGBEmulationGLTexture(
+ mailbox);
+ EXPECT_TRUE(representation);
+ EXPECT_TRUE(representation->GetTexture()->service_id());
+ EXPECT_EQ(size, representation->size());
+ EXPECT_EQ(get_format(), representation->format());
+ EXPECT_EQ(color_space, representation->color_space());
+ EXPECT_EQ(usage, representation->usage());
+
+ scoped_refptr<gl::GLImage> image =
+ representation->GetTexture()->GetLevelImage(GL_TEXTURE_2D, 0);
+ ASSERT_EQ(image->GetType(), gl::GLImage::Type::NONE);
+ auto* stub_image = static_cast<StubImage*>(image.get());
+ EXPECT_EQ(stub_image->internal_format(), (unsigned)GL_RGB);
+ EXPECT_TRUE(stub_image->bound());
+ EXPECT_EQ(stub_image->update_counter(), 1);
+}
+
+#if !defined(OS_ANDROID)
+const auto kResourceFormats =
+ ::testing::Values(viz::ResourceFormat::RGBA_8888,
+ viz::ResourceFormat::BGRA_1010102,
+ viz::ResourceFormat::RGBA_1010102);
+#else
+// High bit depth rendering is not supported on Android.
+const auto kResourceFormats = ::testing::Values(viz::ResourceFormat::RGBA_8888);
+#endif
+
+std::string TestParamToString(
+ const testing::TestParamInfo<std::tuple<bool, viz::ResourceFormat>>&
+ param_info) {
+ const bool allow_passthrough = std::get<0>(param_info.param);
+ const viz::ResourceFormat format = std::get<1>(param_info.param);
+ return base::StringPrintf(
+ "%s_%s", (allow_passthrough ? "AllowPassthrough" : "DisallowPassthrough"),
+ gfx::BufferFormatToString(viz::BufferFormat(format)));
+}
+
+INSTANTIATE_TEST_SUITE_P(Service,
+ SharedImageBackingFactoryGLImageTest,
+ ::testing::Combine(::testing::Bool(),
+ kResourceFormats),
+ TestParamToString);
+INSTANTIATE_TEST_SUITE_P(Service,
+ SharedImageBackingFactoryGLImageWithGMBTest,
+ ::testing::Combine(::testing::Bool(),
+ kResourceFormats),
+ TestParamToString);
+
+} // anonymous namespace
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
index c27e621272e..044c7bd0acb 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
@@ -4,56 +4,17 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
-#include <algorithm>
#include <list>
-#include <string>
#include <utility>
-#include "base/containers/contains.h"
-#include "base/feature_list.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "base/trace_event/trace_event.h"
-#include "build/build_config.h"
-#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
-#include "gpu/command_buffer/common/gles2_cmd_utils.h"
-#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
-#include "gpu/command_buffer/service/context_state.h"
-#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
-#include "gpu/command_buffer/service/image_factory.h"
-#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/service_utils.h"
-#include "gpu/command_buffer/service/shared_context_state.h"
-#include "gpu/command_buffer/service/shared_image_backing.h"
-#include "gpu/command_buffer/service/shared_image_backing_gl_image.h"
#include "gpu/command_buffer/service/shared_image_backing_gl_texture.h"
-#include "gpu/command_buffer/service/shared_image_factory.h"
-#include "gpu/command_buffer/service/shared_image_representation.h"
-#include "gpu/command_buffer/service/skia_utils.h"
-#include "gpu/config/gpu_finch_features.h"
#include "gpu/config/gpu_preferences.h"
-#include "third_party/skia/include/core/SkPromiseImageTexture.h"
-#include "ui/gfx/buffer_format_util.h"
-#include "ui/gfx/color_space.h"
-#include "ui/gfx/geometry/size.h"
-#include "ui/gl/buffer_format_utils.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_gl_api_implementation.h"
-#include "ui/gl/gl_image_native_pixmap.h"
-#include "ui/gl/gl_image_shared_memory.h"
-#include "ui/gl/gl_implementation.h"
-#include "ui/gl/gl_version_info.h"
#include "ui/gl/progress_reporter.h"
-#include "ui/gl/scoped_binders.h"
-#include "ui/gl/shared_gl_fence_egl.h"
-#include "ui/gl/trace_util.h"
-
-#if defined(OS_ANDROID)
-#include "gpu/command_buffer/service/shared_image_backing_egl_image.h"
-#include "gpu/command_buffer/service/shared_image_batch_access_manager.h"
-#endif
namespace gpu {
@@ -76,120 +37,11 @@ SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
const GpuFeatureInfo& gpu_feature_info,
- ImageFactory* image_factory,
- SharedImageBatchAccessManager* batch_access_manager,
gl::ProgressReporter* progress_reporter)
- : use_passthrough_(gpu_preferences.use_passthrough_cmd_decoder &&
- gles2::PassthroughCommandDecoderSupported()),
- image_factory_(image_factory),
- workarounds_(workarounds),
- progress_reporter_(progress_reporter) {
-#if defined(OS_ANDROID)
- batch_access_manager_ = batch_access_manager;
-#endif
- gl::GLApi* api = gl::g_current_gl_context;
- api->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &max_texture_size_);
- // When the passthrough command decoder is used, the max_texture_size
- // workaround is implemented by ANGLE. Trying to adjust the max size here
- // would cause discrepancy between what we think the max size is and what
- // ANGLE tells the clients.
- if (!use_passthrough_ && workarounds.max_texture_size) {
- max_texture_size_ =
- std::min(max_texture_size_, workarounds.max_texture_size);
- }
- // Ensure max_texture_size_ is less than INT_MAX so that gfx::Rect and friends
- // can be used to accurately represent all valid sub-rects, with overflow
- // cases, clamped to INT_MAX, always invalid.
- max_texture_size_ = std::min(max_texture_size_, INT_MAX - 1);
-
- // TODO(piman): Can we extract the logic out of FeatureInfo?
- scoped_refptr<gles2::FeatureInfo> feature_info =
- new gles2::FeatureInfo(workarounds, gpu_feature_info);
- feature_info->Initialize(ContextType::CONTEXT_TYPE_OPENGLES2,
- use_passthrough_, gles2::DisallowedFeatures());
- gpu_memory_buffer_formats_ =
- feature_info->feature_flags().gpu_memory_buffer_formats;
- texture_usage_angle_ = feature_info->feature_flags().angle_texture_usage;
- attribs.es3_capable = feature_info->IsES3Capable();
- attribs.desktop_gl = !feature_info->gl_version_info().is_es;
- // Can't use the value from feature_info, as we unconditionally enable this
- // extension, and assume it can't be used if PBOs are not used (which isn't
- // true for Skia used directly against GL).
- attribs.supports_unpack_subimage =
- gl::g_current_gl_driver->ext.b_GL_EXT_unpack_subimage;
- bool enable_texture_storage =
- feature_info->feature_flags().ext_texture_storage;
- bool enable_scanout_images =
- (image_factory_ && image_factory_->SupportsCreateAnonymousImage());
- const gles2::Validators* validators = feature_info->validators();
- for (int i = 0; i <= viz::RESOURCE_FORMAT_MAX; ++i) {
- auto format = static_cast<viz::ResourceFormat>(i);
- FormatInfo& info = format_info_[i];
- if (!viz::GLSupportsFormat(format))
- continue;
- const GLuint image_internal_format = viz::GLInternalFormat(format);
- const GLenum gl_format = viz::GLDataFormat(format);
- const GLenum gl_type = viz::GLDataType(format);
- const bool uncompressed_format_valid =
- validators->texture_internal_format.IsValid(image_internal_format) &&
- validators->texture_format.IsValid(gl_format);
- const bool compressed_format_valid =
- validators->compressed_texture_format.IsValid(image_internal_format);
- if ((uncompressed_format_valid || compressed_format_valid) &&
- validators->pixel_type.IsValid(gl_type)) {
- info.enabled = true;
- info.is_compressed = compressed_format_valid;
- info.gl_format = gl_format;
- info.gl_type = gl_type;
- info.swizzle = gles2::TextureManager::GetCompatibilitySwizzle(
- feature_info.get(), gl_format);
- info.image_internal_format =
- gles2::TextureManager::AdjustTexInternalFormat(
- feature_info.get(), image_internal_format, gl_type);
- info.adjusted_format =
- gles2::TextureManager::AdjustTexFormat(feature_info.get(), gl_format);
- }
- if (!info.enabled)
- continue;
- if (enable_texture_storage && !info.is_compressed) {
- GLuint storage_internal_format = viz::TextureStorageFormat(format);
- if (validators->texture_internal_format_storage.IsValid(
- storage_internal_format)) {
- info.supports_storage = true;
- info.storage_internal_format =
- gles2::TextureManager::AdjustTexStorageFormat(
- feature_info.get(), storage_internal_format);
- }
- }
- if (!info.enabled || !enable_scanout_images ||
- !IsGpuMemoryBufferFormatSupported(format)) {
- continue;
- }
- const gfx::BufferFormat buffer_format = viz::BufferFormat(format);
- switch (buffer_format) {
- case gfx::BufferFormat::RGBA_8888:
- case gfx::BufferFormat::BGRA_8888:
- case gfx::BufferFormat::RGBA_F16:
- case gfx::BufferFormat::R_8:
- case gfx::BufferFormat::BGRA_1010102:
- case gfx::BufferFormat::RGBA_1010102:
- break;
- default:
- continue;
- }
- if (!gpu_memory_buffer_formats_.Has(buffer_format))
- continue;
- info.allow_scanout = true;
- info.buffer_format = buffer_format;
- DCHECK_EQ(info.image_internal_format,
- gl::BufferFormatToGLInternalFormat(buffer_format));
- if (base::Contains(gpu_preferences.texture_target_exception_list,
- gfx::BufferUsageAndFormat(gfx::BufferUsage::SCANOUT,
- buffer_format))) {
- info.target_for_scanout = gpu::GetPlatformSpecificTextureTarget();
- }
- }
-}
+ : SharedImageBackingFactoryGLCommon(gpu_preferences,
+ workarounds,
+ gpu_feature_info,
+ progress_reporter) {}
SharedImageBackingFactoryGLTexture::~SharedImageBackingFactoryGLTexture() =
default;
@@ -205,14 +57,10 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
SkAlphaType alpha_type,
uint32_t usage,
bool is_thread_safe) {
- if (is_thread_safe) {
- return MakeEglImageBacking(mailbox, format, size, color_space,
- surface_origin, alpha_type, usage);
- } else {
- return CreateSharedImageInternal(mailbox, format, surface_handle, size,
- color_space, surface_origin, alpha_type,
- usage, base::span<const uint8_t>());
- }
+ DCHECK(!is_thread_safe);
+ return CreateSharedImageInternal(mailbox, format, surface_handle, size,
+ color_space, surface_origin, alpha_type,
+ usage, base::span<const uint8_t>());
}
std::unique_ptr<SharedImageBacking>
@@ -243,80 +91,8 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage) {
- if (!gpu_memory_buffer_formats_.Has(buffer_format)) {
- LOG(ERROR) << "CreateSharedImage: unsupported buffer format "
- << gfx::BufferFormatToString(buffer_format);
- return nullptr;
- }
-
- if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) {
- LOG(ERROR) << "Invalid image size " << size.ToString() << " for "
- << gfx::BufferFormatToString(buffer_format);
- return nullptr;
- }
-
- if (!gpu::IsPlaneValidForGpuMemoryBufferFormat(plane, buffer_format)) {
- LOG(ERROR) << "Invalid plane " << gfx::BufferPlaneToString(plane) << " for "
- << gfx::BufferFormatToString(buffer_format);
- return nullptr;
- }
-
- const gfx::GpuMemoryBufferType handle_type = handle.type;
- GLenum target =
- (handle_type == gfx::SHARED_MEMORY_BUFFER ||
- !NativeBufferNeedsPlatformSpecificTextureTarget(buffer_format))
- ? GL_TEXTURE_2D
- : gpu::GetPlatformSpecificTextureTarget();
- scoped_refptr<gl::GLImage> image = MakeGLImage(
- client_id, std::move(handle), buffer_format, plane, surface_handle, size);
- if (!image) {
- LOG(ERROR) << "Failed to create image.";
- return nullptr;
- }
- // If we decide to use GL_TEXTURE_2D at the target for a native buffer, we
- // would like to verify that it will actually work. If the image expects to be
- // copied, there is no way to do this verification here, because copying is
- // done lazily after the SharedImage is created, so require that the image is
- // bindable. Currently NativeBufferNeedsPlatformSpecificTextureTarget can
- // only return false on Chrome OS where GLImageNativePixmap is used which is
- // always bindable.
-#if DCHECK_IS_ON()
- bool texture_2d_support = false;
-#if defined(OS_MAC)
- // If the PlatformSpecificTextureTarget on Mac is GL_TEXTURE_2D, this is
- // supported.
- texture_2d_support =
- (gpu::GetPlatformSpecificTextureTarget() == GL_TEXTURE_2D);
-#endif // defined(OS_MAC)
- DCHECK(handle_type == gfx::SHARED_MEMORY_BUFFER || target != GL_TEXTURE_2D ||
- texture_2d_support || image->ShouldBindOrCopy() == gl::GLImage::BIND);
-#endif // DCHECK_IS_ON()
- if (color_space.IsValid())
- image->SetColorSpace(color_space);
- if (usage & SHARED_IMAGE_USAGE_MACOS_VIDEO_TOOLBOX)
- image->DisableInUseByWindowServer();
-
- gfx::BufferFormat plane_buffer_format =
- GetPlaneBufferFormat(plane, buffer_format);
- viz::ResourceFormat format = viz::GetResourceFormat(plane_buffer_format);
- const bool for_framebuffer_attachment =
- (usage & (SHARED_IMAGE_USAGE_RASTER |
- SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
- const bool is_rgb_emulation = (usage & SHARED_IMAGE_USAGE_RGB_EMULATION) != 0;
-
- InitializeGLTextureParams params;
- params.target = target;
- params.internal_format =
- is_rgb_emulation ? GL_RGB : image->GetInternalFormat();
- params.format = is_rgb_emulation ? GL_RGB : image->GetDataFormat();
- params.type = image->GetDataType();
- params.is_cleared = true;
- params.is_rgb_emulation = is_rgb_emulation;
- params.framebuffer_attachment_angle =
- for_framebuffer_attachment && texture_usage_angle_;
- return std::make_unique<SharedImageBackingGLImage>(
- image, mailbox, format, size, color_space, surface_origin, alpha_type,
- usage, params, attribs, use_passthrough_);
+ NOTIMPLEMENTED_LOG_ONCE();
+ return nullptr;
}
std::unique_ptr<SharedImageBacking>
@@ -341,80 +117,39 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageForTest(
return std::move(result);
}
-scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLTexture::MakeGLImage(
- int client_id,
- gfx::GpuMemoryBufferHandle handle,
- gfx::BufferFormat format,
- gfx::BufferPlane plane,
- SurfaceHandle surface_handle,
- const gfx::Size& size) {
- if (handle.type == gfx::SHARED_MEMORY_BUFFER) {
- if (plane != gfx::BufferPlane::DEFAULT)
- return nullptr;
- if (!base::IsValueInRangeForNumericType<size_t>(handle.stride))
- return nullptr;
- auto image = base::MakeRefCounted<gl::GLImageSharedMemory>(size);
- if (!image->Initialize(handle.region, handle.id, format, handle.offset,
- handle.stride)) {
- return nullptr;
- }
-
- return image;
- }
-
- if (!image_factory_)
- return nullptr;
-
- return image_factory_->CreateImageForGpuMemoryBuffer(
- std::move(handle), size, format, plane, client_id, surface_handle);
-}
-
-bool SharedImageBackingFactoryGLTexture::CanImportGpuMemoryBuffer(
- gfx::GpuMemoryBufferType memory_buffer_type) {
- // SharedImageFactory may call CanImportGpuMemoryBuffer() in all other
- // SharedImageBackingFactory implementations except this one.
- NOTREACHED();
- return true;
-}
-
-std::unique_ptr<SharedImageBacking>
-SharedImageBackingFactoryGLTexture::MakeEglImageBacking(
- const Mailbox& mailbox,
+bool SharedImageBackingFactoryGLTexture::IsSupported(
+ uint32_t usage,
viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- GrSurfaceOrigin surface_origin,
- SkAlphaType alpha_type,
- uint32_t usage) {
-#if defined(OS_ANDROID)
- const FormatInfo& format_info = format_info_[format];
- if (!format_info.enabled) {
- DLOG(ERROR) << "MakeEglImageBacking: invalid format";
- return nullptr;
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) {
+ if (is_pixel_used && gr_context_type != GrContextType::kGL) {
+ return false;
}
-
- DCHECK(!(usage & SHARED_IMAGE_USAGE_SCANOUT));
-
- if (size.width() < 1 || size.height() < 1 ||
- size.width() > max_texture_size_ || size.height() > max_texture_size_) {
- DLOG(ERROR) << "MakeEglImageBacking: Invalid size";
- return nullptr;
+ if (thread_safe) {
+ return false;
+ }
+ if (gmb_type != gfx::EMPTY_BUFFER) {
+ return false;
}
- // Calculate SharedImage size in bytes.
- size_t estimated_size;
- if (!viz::ResourceSizes::MaybeSizeInBytes(size, format, &estimated_size)) {
- DLOG(ERROR) << "MakeEglImageBacking: Failed to calculate SharedImage size";
- return nullptr;
+ // Doesn't support contexts other than GL for OOPR Canvas
+ if (gr_context_type != GrContextType::kGL &&
+ ((usage & SHARED_IMAGE_USAGE_DISPLAY) ||
+ (usage & SHARED_IMAGE_USAGE_RASTER))) {
+ return false;
+ }
+ // Needs interop factory
+ if ((usage & SHARED_IMAGE_USAGE_WEBGPU) ||
+ (usage & SHARED_IMAGE_USAGE_VIDEO_DECODE) ||
+ (usage & SHARED_IMAGE_USAGE_SCANOUT)) {
+ return false;
}
- return std::make_unique<SharedImageBackingEglImage>(
- mailbox, format, size, color_space, surface_origin, alpha_type, usage,
- estimated_size, format_info.gl_format, format_info.gl_type,
- batch_access_manager_, workarounds_, use_passthrough_);
-#else
- return nullptr;
-#endif
+ *allow_legacy_mailbox = gr_context_type == GrContextType::kGL;
+ return true;
}
std::unique_ptr<SharedImageBacking>
@@ -429,204 +164,70 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageInternal(
uint32_t usage,
base::span<const uint8_t> pixel_data) {
const FormatInfo& format_info = format_info_[format];
- if (!format_info.enabled) {
- LOG(ERROR) << "CreateSharedImage: invalid format";
- return nullptr;
- }
-
-#if defined(OS_MAC)
- const bool use_buffer =
- usage & (SHARED_IMAGE_USAGE_SCANOUT | SHARED_IMAGE_USAGE_WEBGPU);
-#else
- const bool use_buffer = usage & SHARED_IMAGE_USAGE_SCANOUT;
-#endif
- if (use_buffer && !format_info.allow_scanout) {
- LOG(ERROR) << "CreateSharedImage: SCANOUT shared images unavailable. "
- "Buffer format= "
- << gfx::BufferFormatToString(format_info.buffer_format);
+ GLenum target = GL_TEXTURE_2D;
+ if (!CanCreateSharedImage(size, pixel_data, format_info, target)) {
return nullptr;
}
- if (size.width() < 1 || size.height() < 1 ||
- size.width() > max_texture_size_ || size.height() > max_texture_size_) {
- LOG(ERROR) << "CreateSharedImage: invalid size";
- return nullptr;
- }
-
- GLenum target = use_buffer ? format_info.target_for_scanout : GL_TEXTURE_2D;
-
- // If we have initial data to upload, ensure it is sized appropriately.
- if (!pixel_data.empty()) {
- if (format_info.is_compressed) {
- const char* error_message = "unspecified";
- if (!gles2::ValidateCompressedTexDimensions(
- target, 0 /* level */, size.width(), size.height(), 1 /* depth */,
- format_info.image_internal_format, &error_message)) {
- LOG(ERROR) << "CreateSharedImage: "
- "ValidateCompressedTexDimensionsFailed with error: "
- << error_message;
- return nullptr;
- }
-
- GLsizei bytes_required = 0;
- if (!gles2::GetCompressedTexSizeInBytes(
- nullptr /* function_name */, size.width(), size.height(),
- 1 /* depth */, format_info.image_internal_format, &bytes_required,
- nullptr /* error_state */)) {
- LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
- "initial texture upload.";
- return nullptr;
- }
-
- if (bytes_required < 0 ||
- pixel_data.size() != static_cast<size_t>(bytes_required)) {
- LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
- "size.";
- return nullptr;
- }
- } else {
- uint32_t bytes_required;
- uint32_t unpadded_row_size = 0u;
- uint32_t padded_row_size = 0u;
- if (!gles2::GLES2Util::ComputeImageDataSizes(
- size.width(), size.height(), 1 /* depth */, format_info.gl_format,
- format_info.gl_type, 4 /* alignment */, &bytes_required,
- &unpadded_row_size, &padded_row_size)) {
- LOG(ERROR) << "CreateSharedImage: Unable to compute required size for "
- "initial texture upload.";
- return nullptr;
- }
-
- // The GL spec, used in the computation for required bytes in the function
- // above, assumes no padding is required for the last row in the image.
- // But the client data does include this padding, so we add it for the
- // data validation check here.
- uint32_t padding = padded_row_size - unpadded_row_size;
- bytes_required += padding;
- if (pixel_data.size() != bytes_required) {
- LOG(ERROR) << "CreateSharedImage: Initial data does not have expected "
- "size.";
- return nullptr;
- }
- }
- }
-
const bool for_framebuffer_attachment =
(usage & (SHARED_IMAGE_USAGE_RASTER |
SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
- scoped_refptr<gl::GLImage> image;
-
+ InitializeGLTextureParams params;
+ params.target = target;
// TODO(piman): We pretend the texture was created in an ES2 context, so that
// it can be used in other ES2 contexts, and so we have to pass gl_format as
// the internal format in the LevelInfo. https://crbug.com/628064
- GLuint level_info_internal_format = format_info.gl_format;
- bool is_cleared = false;
-
- // |scoped_progress_reporter| will notify |progress_reporter_| upon
- // construction and destruction. We limit the scope so that progress is
- // reported immediately after allocation/upload and before other GL
- // operations.
- if (use_buffer) {
- {
- gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
- image = image_factory_->CreateAnonymousImage(
- size, format_info.buffer_format, gfx::BufferUsage::SCANOUT,
- surface_handle, &is_cleared);
- }
- // Scanout images have different constraints than GL images and might fail
- // to allocate even if GL images can be created.
- if (!image) {
- gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
- // TODO(dcastagna): Use BufferUsage::GPU_READ_WRITE instead
- // BufferUsage::GPU_READ once we add it.
- image = image_factory_->CreateAnonymousImage(
- size, format_info.buffer_format, gfx::BufferUsage::GPU_READ,
- surface_handle, &is_cleared);
- }
- // The allocated image should not require copy.
- if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND) {
- LOG(ERROR) << "CreateSharedImage: Failed to create bindable image";
- return nullptr;
- }
- level_info_internal_format = image->GetInternalFormat();
- if (color_space.IsValid())
- image->SetColorSpace(color_space);
- if (usage & SHARED_IMAGE_USAGE_MACOS_VIDEO_TOOLBOX)
- image->DisableInUseByWindowServer();
- }
-
- InitializeGLTextureParams params;
- params.target = target;
- params.internal_format = level_info_internal_format;
+ params.internal_format = format_info.gl_format;
params.format = format_info.gl_format;
params.type = format_info.gl_type;
- params.is_cleared = pixel_data.empty() ? is_cleared : true;
- params.has_immutable_storage = !image && format_info.supports_storage;
+ params.is_cleared = !pixel_data.empty();
+ params.has_immutable_storage = format_info.supports_storage;
params.framebuffer_attachment_angle =
for_framebuffer_attachment && texture_usage_angle_;
- if (image) {
- DCHECK(!format_info.swizzle);
- auto result = std::make_unique<SharedImageBackingGLImage>(
- image, mailbox, format, size, color_space, surface_origin, alpha_type,
- usage, params, attribs, use_passthrough_);
- if (!pixel_data.empty()) {
- gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
- result->InitializePixels(format_info.adjusted_format, format_info.gl_type,
- pixel_data.data());
- }
- return std::move(result);
- } else {
- auto result = std::make_unique<SharedImageBackingGLTexture>(
- mailbox, format, size, color_space, surface_origin, alpha_type, usage,
- use_passthrough_);
- result->InitializeGLTexture(0, params);
-
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
- api->glBindTextureFn(target, result->GetGLServiceId());
+ auto result = std::make_unique<SharedImageBackingGLTexture>(
+ mailbox, format, size, color_space, surface_origin, alpha_type, usage,
+ use_passthrough_);
+ result->InitializeGLTexture(0, params);
- if (format_info.supports_storage) {
- {
- gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
- api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
- size.width(), size.height());
- }
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+ api->glBindTextureFn(target, result->GetGLServiceId());
- if (!pixel_data.empty()) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(
- api, attribs, true /* uploading_data */);
- gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
- api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
- format_info.adjusted_format, format_info.gl_type,
- pixel_data.data());
- }
- } else if (format_info.is_compressed) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
+ if (format_info.supports_storage) {
+ {
gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
- api->glCompressedTexImage2DFn(
- target, 0, format_info.image_internal_format, size.width(),
- size.height(), 0, pixel_data.size(), pixel_data.data());
- } else {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
+ api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
+ size.width(), size.height());
+ }
+
+ if (!pixel_data.empty()) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(
+ api, attribs_, true /* uploading_data */);
gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
- api->glTexImage2DFn(target, 0, format_info.image_internal_format,
- size.width(), size.height(), 0,
- format_info.adjusted_format, format_info.gl_type,
- pixel_data.data());
+ api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
+ format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
}
- result->SetCompatibilitySwizzle(format_info.swizzle);
- return std::move(result);
- }
+ } else if (format_info.is_compressed) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs_,
+ !pixel_data.empty());
+ gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
+ api->glCompressedTexImage2DFn(target, 0, format_info.image_internal_format,
+ size.width(), size.height(), 0,
+ pixel_data.size(), pixel_data.data());
+ } else {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs_,
+ !pixel_data.empty());
+ gl::ScopedProgressReporter scoped_progress_reporter(progress_reporter_);
+ api->glTexImage2DFn(target, 0, format_info.image_internal_format,
+ size.width(), size.height(), 0,
+ format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+ result->SetCompatibilitySwizzle(format_info.swizzle);
+ return std::move(result);
}
-///////////////////////////////////////////////////////////////////////////////
-// SharedImageBackingFactoryGLTexture::FormatInfo
-
-SharedImageBackingFactoryGLTexture::FormatInfo::FormatInfo() = default;
-SharedImageBackingFactoryGLTexture::FormatInfo::~FormatInfo() = default;
-
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
index df747b14f12..ed5a80645b0 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
@@ -7,15 +7,9 @@
#include <memory>
-#include "base/memory/scoped_refptr.h"
-#include "build/build_config.h"
#include "components/viz/common/resources/resource_format.h"
-#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_common.h"
#include "gpu/command_buffer/service/shared_image_backing_gl_common.h"
-#include "gpu/command_buffer/service/texture_manager.h"
-#include "gpu/gpu_gles2_export.h"
-#include "ui/gfx/buffer_types.h"
-#include "ui/gl/gl_bindings.h"
namespace gfx {
class Size;
@@ -28,27 +22,20 @@ class ProgressReporter;
namespace gpu {
class SharedImageBacking;
-class SharedImageBatchAccessManager;
class GpuDriverBugWorkarounds;
struct GpuFeatureInfo;
struct GpuPreferences;
struct Mailbox;
-class ImageFactory;
// Implementation of SharedImageBackingFactory that produces GL-texture backed
// SharedImages.
-// TODO(ericrk): Remove support for buffer / GLImage based backings and move
-// to its own type of backing.
class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
- : public SharedImageBackingFactory {
+ : public SharedImageBackingFactoryGLCommon {
public:
- SharedImageBackingFactoryGLTexture(
- const GpuPreferences& gpu_preferences,
- const GpuDriverBugWorkarounds& workarounds,
- const GpuFeatureInfo& gpu_feature_info,
- ImageFactory* image_factory,
- SharedImageBatchAccessManager* batch_access_manager,
- gl::ProgressReporter* progress_reporter);
+ SharedImageBackingFactoryGLTexture(const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info,
+ gl::ProgressReporter* progress_reporter);
~SharedImageBackingFactoryGLTexture() override;
// SharedImageBackingFactory implementation.
@@ -83,8 +70,13 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage) override;
- bool CanImportGpuMemoryBuffer(
- gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) override;
static std::unique_ptr<SharedImageBacking> CreateSharedImageForTest(
const Mailbox& mailbox,
@@ -96,24 +88,6 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
uint32_t usage);
private:
- scoped_refptr<gl::GLImage> MakeGLImage(int client_id,
- gfx::GpuMemoryBufferHandle handle,
- gfx::BufferFormat format,
- gfx::BufferPlane plane,
- SurfaceHandle surface_handle,
- const gfx::Size& size);
-
- // This is meant to be used only on Android. Return nullptr for other
- // platforms.
- std::unique_ptr<SharedImageBacking> MakeEglImageBacking(
- const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- GrSurfaceOrigin surface_origin,
- SkAlphaType alpha_type,
- uint32_t usage);
-
std::unique_ptr<SharedImageBacking> CreateSharedImageInternal(
const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -124,66 +98,6 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
SkAlphaType alpha_type,
uint32_t usage,
base::span<const uint8_t> pixel_data);
-
- struct FormatInfo {
- FormatInfo();
- ~FormatInfo();
-
- // Whether this format is supported.
- bool enabled = false;
-
- // Whether this format supports TexStorage2D.
- bool supports_storage = false;
-
- // Whether to allow SHARED_IMAGE_USAGE_SCANOUT.
- bool allow_scanout = false;
-
- // Whether the texture is a compressed type.
- bool is_compressed = false;
-
- GLenum gl_format = 0;
- GLenum gl_type = 0;
- const gles2::Texture::CompatibilitySwizzle* swizzle = nullptr;
- GLenum adjusted_format = 0;
-
- // The internalformat portion of the format/type/internalformat triplet
- // used when calling TexImage2D
- GLuint image_internal_format = 0;
-
- // The internalformat portion of the format/type/internalformat triplet
- // used when calling TexStorage2D
- GLuint storage_internal_format = 0;
-
- // GL target to use for scanout images.
- GLenum target_for_scanout = GL_TEXTURE_2D;
-
- // BufferFormat for scanout images.
- gfx::BufferFormat buffer_format = gfx::BufferFormat::RGBA_8888;
-
- DISALLOW_COPY_AND_ASSIGN(FormatInfo);
- };
-
- // Whether we're using the passthrough command decoder and should generate
- // passthrough textures.
- bool use_passthrough_ = false;
-
- // Factory used to generate GLImages for SCANOUT backings.
- ImageFactory* image_factory_ = nullptr;
-
- FormatInfo format_info_[viz::RESOURCE_FORMAT_MAX + 1];
- GpuMemoryBufferFormatSet gpu_memory_buffer_formats_;
- int32_t max_texture_size_ = 0;
- bool texture_usage_angle_ = false;
- SharedImageBackingGLCommon::UnpackStateAttribs attribs;
- GpuDriverBugWorkarounds workarounds_;
-
- // Used to notify the watchdog before a buffer allocation in case it takes
- // long.
- gl::ProgressReporter* const progress_reporter_ = nullptr;
-
-#if defined(OS_ANDROID)
- SharedImageBatchAccessManager* batch_access_manager_ = nullptr;
-#endif
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
index be739ac9179..722d2356401 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
@@ -11,7 +11,6 @@
#include "build/build_config.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
-#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/mailbox_manager_impl.h"
@@ -32,17 +31,11 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
-#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/GrDirectContext.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/color_space.h"
-#include "ui/gl/buffer_format_utils.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_image_shared_memory.h"
-#include "ui/gl/gl_image_stub.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/init/gl_factory.h"
#include "ui/gl/progress_reporter.h"
@@ -75,14 +68,6 @@ void CreateSharedContext(const GpuDriverBugWorkarounds& workarounds,
context_state->InitializeGL(GpuPreferences(), feature_info);
}
-bool IsAndroid() {
-#if defined(OS_ANDROID)
- return true;
-#else
- return false;
-#endif
-}
-
class MockProgressReporter : public gl::ProgressReporter {
public:
MockProgressReporter() = default;
@@ -95,10 +80,10 @@ class MockProgressReporter : public gl::ProgressReporter {
class SharedImageBackingFactoryGLTextureTestBase
: public testing::TestWithParam<std::tuple<bool, viz::ResourceFormat>> {
public:
- SharedImageBackingFactoryGLTextureTestBase(bool is_thread_safe)
+ explicit SharedImageBackingFactoryGLTextureTestBase(bool is_thread_safe)
: shared_image_manager_(
std::make_unique<SharedImageManager>(is_thread_safe)) {}
- ~SharedImageBackingFactoryGLTextureTestBase() {
+ ~SharedImageBackingFactoryGLTextureTestBase() override {
// |context_state_| must be destroyed on its own context.
context_state_->MakeCurrent(surface_.get(), true /* needs_gl */);
}
@@ -117,8 +102,7 @@ class SharedImageBackingFactoryGLTextureTestBase
GpuPreferences preferences;
preferences.use_passthrough_cmd_decoder = use_passthrough();
backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>(
- preferences, workarounds, GpuFeatureInfo(), factory,
- shared_image_manager_->batch_access_manager(), &progress_reporter_);
+ preferences, workarounds, GpuFeatureInfo(), &progress_reporter_);
memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
shared_image_representation_factory_ =
@@ -141,19 +125,8 @@ class SharedImageBackingFactoryGLTextureTestBase
return true;
}
- bool can_create_scanout_or_gmb_shared_image(
- viz::ResourceFormat format) const {
- if (format == viz::ResourceFormat::BGRA_1010102)
- return supports_ar30_;
- else if (format == viz::ResourceFormat::RGBA_1010102)
- return supports_ab30_;
- return true;
- }
-
viz::ResourceFormat get_format() { return std::get<1>(GetParam()); }
- GrDirectContext* gr_context() { return context_state_->gr_context(); }
-
protected:
::testing::NiceMock<MockProgressReporter> progress_reporter_;
scoped_refptr<gl::GLSurface> surface_;
@@ -185,58 +158,6 @@ class SharedImageBackingFactoryGLTextureTest
TextureImageFactory image_factory_;
};
-class SharedImageBackingFactoryGLTextureThreadSafeTest
- : public SharedImageBackingFactoryGLTextureTestBase {
- public:
- SharedImageBackingFactoryGLTextureThreadSafeTest()
- : SharedImageBackingFactoryGLTextureTestBase(true) {}
- ~SharedImageBackingFactoryGLTextureThreadSafeTest() {
- // |context_state2_| must be destroyed on its own context.
- context_state2_->MakeCurrent(surface2_.get(), true /* needs_gl */);
- }
- void SetUp() override {
- GpuDriverBugWorkarounds workarounds;
- workarounds.max_texture_size = INT_MAX - 1;
- SetUpBase(workarounds, &image_factory_);
-
- // Create 2nd context/context_state which are not part of same shared group.
- scoped_refptr<gles2::FeatureInfo> feature_info;
- CreateSharedContext(workarounds, surface2_, context2_, context_state2_,
- feature_info);
- feature_info.reset();
- }
-
- protected:
- scoped_refptr<gl::GLSurface> surface2_;
- scoped_refptr<gl::GLContext> context2_;
- scoped_refptr<SharedContextState> context_state2_;
- TextureImageFactory image_factory_;
-};
-
-class CreateAndValidateSharedImageRepresentations {
- public:
- CreateAndValidateSharedImageRepresentations(
- SharedImageBackingFactoryGLTexture* backing_factory,
- viz::ResourceFormat format,
- bool is_thread_safe,
- gles2::MailboxManagerImpl* mailbox_manager,
- SharedImageManager* shared_image_manager,
- MemoryTypeTracker* memory_type_tracker,
- SharedImageRepresentationFactory* shared_image_representation_factory,
- SharedContextState* context_state);
- ~CreateAndValidateSharedImageRepresentations();
-
- gfx::Size size() { return size_; }
- Mailbox mailbox() { return mailbox_; }
-
- private:
- gles2::MailboxManagerImpl* mailbox_manager_;
- gfx::Size size_;
- Mailbox mailbox_;
- std::unique_ptr<SharedImageBacking> backing_;
- std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image_;
-};
-
TEST_P(SharedImageBackingFactoryGLTextureTest, Basic) {
// TODO(jonahr): Test fails on Mac with ANGLE/passthrough
// (crbug.com/1100975)
@@ -370,167 +291,6 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Basic) {
EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
}
-TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
- // TODO(jonahr): Test crashes on Mac with ANGLE/passthrough
- // (crbug.com/1100975)
- gpu::GPUTestBotConfig bot_config;
- if (bot_config.LoadCurrentConfig(nullptr) &&
- bot_config.Matches("mac passthrough")) {
- return;
- }
-
- const bool should_succeed =
- can_create_scanout_or_gmb_shared_image(get_format());
- if (should_succeed)
- EXPECT_CALL(progress_reporter_, ReportProgress).Times(AtLeast(1));
- auto mailbox = Mailbox::GenerateForSharedImage();
- auto format = get_format();
- gfx::Size size(256, 256);
- auto color_space = gfx::ColorSpace::CreateSRGB();
- GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
- SkAlphaType alpha_type = kPremul_SkAlphaType;
- uint32_t usage = SHARED_IMAGE_USAGE_SCANOUT;
- gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
- auto backing = backing_factory_->CreateSharedImage(
- mailbox, format, surface_handle, size, color_space, surface_origin,
- alpha_type, usage, false /* is_thread_safe */);
-
- if (!should_succeed) {
- EXPECT_FALSE(backing);
- return;
- }
- ASSERT_TRUE(backing);
- ::testing::Mock::VerifyAndClearExpectations(&progress_reporter_);
-
- // Check clearing.
- if (!backing->IsCleared()) {
- backing->SetCleared();
- EXPECT_TRUE(backing->IsCleared());
- }
-
- // First, validate via a legacy mailbox.
- EXPECT_TRUE(backing->ProduceLegacyMailbox(&mailbox_manager_));
- TextureBase* texture_base = mailbox_manager_.ConsumeTexture(mailbox);
- ASSERT_TRUE(texture_base);
- GLenum target = texture_base->target();
- scoped_refptr<gl::GLImage> image;
- if (use_passthrough()) {
- auto* texture = static_cast<gles2::TexturePassthrough*>(texture_base);
- image = texture->GetLevelImage(target, 0);
- } else {
- auto* texture = static_cast<gles2::Texture*>(texture_base);
- image = texture->GetLevelImage(target, 0);
- }
- ASSERT_TRUE(image);
- EXPECT_EQ(size, image->GetSize());
-
- // Next, validate via a SharedImageRepresentationGLTexture.
- std::unique_ptr<SharedImageRepresentationFactoryRef> shared_image =
- shared_image_manager_->Register(std::move(backing),
- memory_type_tracker_.get());
- EXPECT_TRUE(shared_image);
- if (!use_passthrough()) {
- auto gl_representation =
- shared_image_representation_factory_->ProduceGLTexture(mailbox);
- EXPECT_TRUE(gl_representation);
- EXPECT_TRUE(gl_representation->GetTexture()->service_id());
- EXPECT_EQ(size, gl_representation->size());
- EXPECT_EQ(format, gl_representation->format());
- EXPECT_EQ(color_space, gl_representation->color_space());
- EXPECT_EQ(usage, gl_representation->usage());
- gl_representation.reset();
-
- auto gl_representation_rgb =
- shared_image_representation_factory_->ProduceRGBEmulationGLTexture(
- mailbox);
- EXPECT_TRUE(gl_representation_rgb);
- EXPECT_TRUE(gl_representation_rgb->GetTexture()->service_id());
- EXPECT_EQ(size, gl_representation_rgb->size());
- EXPECT_EQ(format, gl_representation_rgb->format());
- EXPECT_EQ(color_space, gl_representation_rgb->color_space());
- EXPECT_EQ(usage, gl_representation_rgb->usage());
- gl_representation_rgb.reset();
- }
-
- // Next, validate a SharedImageRepresentationGLTexturePassthrough.
- if (use_passthrough()) {
- auto gl_representation =
- shared_image_representation_factory_->ProduceGLTexturePassthrough(
- mailbox);
- EXPECT_TRUE(gl_representation);
- EXPECT_TRUE(gl_representation->GetTexturePassthrough()->service_id());
- EXPECT_EQ(size, gl_representation->size());
- EXPECT_EQ(format, gl_representation->format());
- EXPECT_EQ(color_space, gl_representation->color_space());
- EXPECT_EQ(usage, gl_representation->usage());
- gl_representation.reset();
- }
-
- // Finally, validate a SharedImageRepresentationSkia.
- auto skia_representation = shared_image_representation_factory_->ProduceSkia(
- mailbox, context_state_.get());
- EXPECT_TRUE(skia_representation);
- std::vector<GrBackendSemaphore> begin_semaphores;
- std::vector<GrBackendSemaphore> end_semaphores;
- std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
- scoped_write_access;
- scoped_write_access = skia_representation->BeginScopedWriteAccess(
- &begin_semaphores, &end_semaphores,
- SharedImageRepresentation::AllowUnclearedAccess::kYes);
- auto* surface = scoped_write_access->surface();
- EXPECT_TRUE(surface);
- EXPECT_EQ(size.width(), surface->width());
- EXPECT_EQ(size.height(), surface->height());
- scoped_write_access.reset();
-
- std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
- scoped_read_access;
- scoped_read_access = skia_representation->BeginScopedReadAccess(
- &begin_semaphores, &end_semaphores);
- auto* promise_texture = scoped_read_access->promise_image_texture();
- EXPECT_TRUE(promise_texture);
- EXPECT_TRUE(begin_semaphores.empty());
- EXPECT_TRUE(end_semaphores.empty());
- if (promise_texture) {
- GrBackendTexture backend_texture = promise_texture->backendTexture();
- EXPECT_TRUE(backend_texture.isValid());
- EXPECT_EQ(size.width(), backend_texture.width());
- EXPECT_EQ(size.height(), backend_texture.height());
- }
- scoped_read_access.reset();
- skia_representation.reset();
-
- shared_image.reset();
- EXPECT_FALSE(mailbox_manager_.ConsumeTexture(mailbox));
-
- if (!use_passthrough() &&
- context_state_->feature_info()->feature_flags().ext_texture_rg) {
- EXPECT_CALL(progress_reporter_, ReportProgress).Times(AtLeast(1));
- // Create a R-8 image texture, and check that the internal_format is that
- // of the image (GL_RGBA for TextureImageFactory). This only matters for
- // the validating decoder.
- auto format = viz::ResourceFormat::RED_8;
- gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
- backing = backing_factory_->CreateSharedImage(
- mailbox, format, surface_handle, size, color_space, surface_origin,
- alpha_type, usage, false /* is_thread_safe */);
- EXPECT_TRUE(backing);
- shared_image = shared_image_manager_->Register(std::move(backing),
- memory_type_tracker_.get());
- auto gl_representation =
- shared_image_representation_factory_->ProduceGLTexture(mailbox);
- ASSERT_TRUE(gl_representation);
- gles2::Texture* texture = gl_representation->GetTexture();
- ASSERT_TRUE(texture);
- GLenum type = 0;
- GLenum internal_format = 0;
- EXPECT_TRUE(texture->GetLevelType(target, 0, &type, &internal_format));
- EXPECT_EQ(internal_format, static_cast<GLenum>(GL_RGBA));
- gl_representation.reset();
- shared_image.reset();
- }
-}
-
TEST_P(SharedImageBackingFactoryGLTextureTest, InitialData) {
// TODO(andrescj): these loop over the formats can be replaced by test
// parameters.
@@ -597,8 +357,7 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, InitialData) {
}
TEST_P(SharedImageBackingFactoryGLTextureTest, InitialDataImage) {
- const bool should_succeed =
- can_create_scanout_or_gmb_shared_image(get_format());
+ const bool should_succeed = can_create_non_scanout_shared_image(get_format());
if (should_succeed)
EXPECT_CALL(progress_reporter_, ReportProgress).Times(AtLeast(1));
auto mailbox = Mailbox::GenerateForSharedImage();
@@ -607,7 +366,7 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, InitialDataImage) {
auto color_space = gfx::ColorSpace::CreateSRGB();
GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
SkAlphaType alpha_type = kPremul_SkAlphaType;
- uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_SCANOUT;
+ uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
std::vector<uint8_t> initial_data(256 * 256 * 4);
auto backing = backing_factory_->CreateSharedImage(
mailbox, format, size, color_space, surface_origin, alpha_type, usage,
@@ -783,473 +542,6 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, TexImageTexStorageEquivalence) {
}
}
-class StubImage : public gl::GLImageStub {
- public:
- StubImage(const gfx::Size& size, gfx::BufferFormat format)
- : size_(size), format_(format) {}
-
- gfx::Size GetSize() override { return size_; }
- unsigned GetInternalFormat() override {
- return gl::BufferFormatToGLInternalFormat(format_);
- }
- unsigned GetDataType() override {
- return gl::BufferFormatToGLDataType(format_);
- }
-
- BindOrCopy ShouldBindOrCopy() override { return BIND; }
-
- bool BindTexImage(unsigned target) override {
- if (!bound_) {
- bound_ = true;
- ++update_counter_;
- }
- return true;
- }
-
- bool BindTexImageWithInternalformat(unsigned target,
- unsigned internal_format) override {
- internal_format_ = internal_format;
- if (!bound_) {
- bound_ = true;
- ++update_counter_;
- }
- return true;
- }
-
- void ReleaseTexImage(unsigned target) override { bound_ = false; }
-
- bool bound() const { return bound_; }
- int update_counter() const { return update_counter_; }
- unsigned internal_format() const { return internal_format_; }
-
- private:
- ~StubImage() override = default;
-
- gfx::Size size_;
- gfx::BufferFormat format_;
- bool bound_ = false;
- int update_counter_ = 0;
- unsigned internal_format_ = GL_RGBA;
-};
-
-class SharedImageBackingFactoryGLTextureWithGMBTest
- : public SharedImageBackingFactoryGLTextureTestBase,
- public gpu::ImageFactory {
- public:
- SharedImageBackingFactoryGLTextureWithGMBTest()
- : SharedImageBackingFactoryGLTextureTestBase(false) {}
- void SetUp() override { SetUpBase(GpuDriverBugWorkarounds(), this); }
-
- scoped_refptr<gl::GLImage> GetImageFromMailbox(Mailbox mailbox) {
- if (!use_passthrough()) {
- auto representation =
- shared_image_representation_factory_->ProduceGLTexture(mailbox);
- DCHECK(representation);
- return representation->GetTexture()->GetLevelImage(GL_TEXTURE_2D, 0);
- } else {
- auto representation =
- shared_image_representation_factory_->ProduceGLTexturePassthrough(
- mailbox);
- DCHECK(representation);
- return representation->GetTexturePassthrough()->GetLevelImage(
- GL_TEXTURE_2D, 0);
- }
- }
-
- protected:
- // gpu::ImageFactory implementation.
- scoped_refptr<gl::GLImage> CreateImageForGpuMemoryBuffer(
- gfx::GpuMemoryBufferHandle handle,
- const gfx::Size& size,
- gfx::BufferFormat format,
- gfx::BufferPlane plane,
- int client_id,
- gpu::SurfaceHandle surface_handle) override {
- // pretend to handle NATIVE_PIXMAP types.
- if (handle.type != gfx::NATIVE_PIXMAP)
- return nullptr;
- if (client_id != kClientId)
- return nullptr;
- return base::MakeRefCounted<StubImage>(size, format);
- }
-
- static constexpr int kClientId = 3;
-};
-
-TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
- GpuMemoryBufferImportEmpty) {
- auto mailbox = Mailbox::GenerateForSharedImage();
- gfx::Size size(256, 256);
- gfx::BufferFormat format = viz::BufferFormat(get_format());
- auto color_space = gfx::ColorSpace::CreateSRGB();
- GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
- SkAlphaType alpha_type = kPremul_SkAlphaType;
- uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
-
- gfx::GpuMemoryBufferHandle handle;
- auto backing = backing_factory_->CreateSharedImage(
- mailbox, kClientId, std::move(handle), format, gfx::BufferPlane::DEFAULT,
- kNullSurfaceHandle, size, color_space, surface_origin, alpha_type, usage);
- EXPECT_FALSE(backing);
-}
-
-TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
- GpuMemoryBufferImportNative) {
- // TODO(jonahr): Test crashes on Mac with ANGLE/passthrough
- // (crbug.com/1100975)
- gpu::GPUTestBotConfig bot_config;
- if (bot_config.LoadCurrentConfig(nullptr) &&
- bot_config.Matches("mac passthrough")) {
- return;
- }
- auto mailbox = Mailbox::GenerateForSharedImage();
- gfx::Size size(256, 256);
- gfx::BufferFormat format = viz::BufferFormat(get_format());
- auto color_space = gfx::ColorSpace::CreateSRGB();
- GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
- SkAlphaType alpha_type = kPremul_SkAlphaType;
- uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
-
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::NATIVE_PIXMAP;
- auto backing = backing_factory_->CreateSharedImage(
- mailbox, kClientId, std::move(handle), format, gfx::BufferPlane::DEFAULT,
- kNullSurfaceHandle, size, color_space, surface_origin, alpha_type, usage);
- if (!can_create_scanout_or_gmb_shared_image(get_format())) {
- EXPECT_FALSE(backing);
- return;
- }
- ASSERT_TRUE(backing);
-
- std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
- shared_image_manager_->Register(std::move(backing),
- memory_type_tracker_.get());
- scoped_refptr<gl::GLImage> image = GetImageFromMailbox(mailbox);
- ASSERT_EQ(image->GetType(), gl::GLImage::Type::NONE);
- auto* stub_image = static_cast<StubImage*>(image.get());
- EXPECT_FALSE(stub_image->bound());
- int update_counter = stub_image->update_counter();
- ref->Update(nullptr);
- EXPECT_EQ(stub_image->update_counter(), update_counter);
- EXPECT_FALSE(stub_image->bound());
-
- {
- auto skia_representation =
- shared_image_representation_factory_->ProduceSkia(mailbox,
- context_state_);
- std::vector<GrBackendSemaphore> begin_semaphores;
- std::vector<GrBackendSemaphore> end_semaphores;
- std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
- scoped_read_access;
- skia_representation->BeginScopedReadAccess(&begin_semaphores,
- &end_semaphores);
- }
- EXPECT_TRUE(stub_image->bound());
- EXPECT_GT(stub_image->update_counter(), update_counter);
-}
-
-TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
- GpuMemoryBufferImportSharedMemory) {
- auto mailbox = Mailbox::GenerateForSharedImage();
- gfx::Size size(256, 256);
- gfx::BufferFormat format = viz::BufferFormat(get_format());
- auto color_space = gfx::ColorSpace::CreateSRGB();
- GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
- SkAlphaType alpha_type = kPremul_SkAlphaType;
- uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
-
- size_t shm_size = 0u;
- ASSERT_TRUE(gfx::BufferSizeForBufferFormatChecked(size, format, &shm_size));
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SHARED_MEMORY_BUFFER;
- handle.region = base::UnsafeSharedMemoryRegion::Create(shm_size);
- ASSERT_TRUE(handle.region.IsValid());
- handle.offset = 0;
- handle.stride = static_cast<int32_t>(
- gfx::RowSizeForBufferFormat(size.width(), format, 0));
-
- auto backing = backing_factory_->CreateSharedImage(
- mailbox, kClientId, std::move(handle), format, gfx::BufferPlane::DEFAULT,
- kNullSurfaceHandle, size, color_space, surface_origin, alpha_type, usage);
- if (!can_create_scanout_or_gmb_shared_image(get_format())) {
- EXPECT_FALSE(backing);
- return;
- }
- ASSERT_TRUE(backing);
-
- std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
- shared_image_manager_->Register(std::move(backing),
- memory_type_tracker_.get());
- scoped_refptr<gl::GLImage> image = GetImageFromMailbox(mailbox);
- ASSERT_EQ(image->GetType(), gl::GLImage::Type::MEMORY);
- auto* shm_image = static_cast<gl::GLImageSharedMemory*>(image.get());
- EXPECT_EQ(size, shm_image->GetSize());
- EXPECT_EQ(format, shm_image->format());
-}
-
-TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
- GpuMemoryBufferImportNative_WithRGBEmulation) {
- if (use_passthrough())
- return;
- auto mailbox = Mailbox::GenerateForSharedImage();
- gfx::Size size(256, 256);
- gfx::BufferFormat format = viz::BufferFormat(get_format());
- auto color_space = gfx::ColorSpace::CreateSRGB();
- GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
- SkAlphaType alpha_type = kPremul_SkAlphaType;
- uint32_t usage = SHARED_IMAGE_USAGE_GLES2;
-
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::NATIVE_PIXMAP;
- auto backing = backing_factory_->CreateSharedImage(
- mailbox, kClientId, std::move(handle), format, gfx::BufferPlane::DEFAULT,
- kNullSurfaceHandle, size, color_space, surface_origin, alpha_type, usage);
- if (!can_create_scanout_or_gmb_shared_image(get_format())) {
- EXPECT_FALSE(backing);
- return;
- }
- ASSERT_TRUE(backing);
-
- std::unique_ptr<SharedImageRepresentationFactoryRef> ref =
- shared_image_manager_->Register(std::move(backing),
- memory_type_tracker_.get());
-
- auto representation =
- shared_image_representation_factory_->ProduceRGBEmulationGLTexture(
- mailbox);
- EXPECT_TRUE(representation);
- EXPECT_TRUE(representation->GetTexture()->service_id());
- EXPECT_EQ(size, representation->size());
- EXPECT_EQ(get_format(), representation->format());
- EXPECT_EQ(color_space, representation->color_space());
- EXPECT_EQ(usage, representation->usage());
-
- scoped_refptr<gl::GLImage> image =
- representation->GetTexture()->GetLevelImage(GL_TEXTURE_2D, 0);
- ASSERT_EQ(image->GetType(), gl::GLImage::Type::NONE);
- auto* stub_image = static_cast<StubImage*>(image.get());
- EXPECT_EQ(stub_image->internal_format(), (unsigned)GL_RGB);
- EXPECT_TRUE(stub_image->bound());
- EXPECT_EQ(stub_image->update_counter(), 1);
-}
-
-// Intent of this test is to create at thread safe backing and test if all
-// representations are working.
-TEST_P(SharedImageBackingFactoryGLTextureThreadSafeTest, BasicThreadSafe) {
- // SharedImageBackingFactoryGLTextureThreadSafeTest tests are only meant for
- // android platform.
- if (!IsAndroid())
- return;
-
- CreateAndValidateSharedImageRepresentations shared_image(
- backing_factory_.get(), get_format(), true /* is_thread_safe */,
- &mailbox_manager_, shared_image_manager_.get(),
- memory_type_tracker_.get(), shared_image_representation_factory_.get(),
- context_state_.get());
-}
-
-// Intent of this test is to use the shared image mailbox system by 2 different
-// threads each running their own GL context which are not part of same shared
-// group. One thread will be writing to the backing and other thread will be
-// reading from it.
-TEST_P(SharedImageBackingFactoryGLTextureThreadSafeTest, OneWriterOneReader) {
- if (!IsAndroid())
- return;
-
- // Create it on 1st SharedContextState |context_state_|.
- CreateAndValidateSharedImageRepresentations shared_image(
- backing_factory_.get(), get_format(), true /* is_thread_safe */,
- &mailbox_manager_, shared_image_manager_.get(),
- memory_type_tracker_.get(), shared_image_representation_factory_.get(),
- context_state_.get());
-
- auto mailbox = shared_image.mailbox();
- auto size = shared_image.size();
-
- // Writer will write to the backing. We will create a GLTexture representation
- // and write green color to it.
- auto gl_representation =
- shared_image_representation_factory_->ProduceGLTexture(mailbox);
- EXPECT_TRUE(gl_representation);
-
- // Begin writing to the underlying texture of the backing via ScopedAccess.
- std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
- writer_scoped_access = gl_representation->BeginScopedAccess(
- GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
- SharedImageRepresentation::AllowUnclearedAccess::kNo);
-
- DCHECK(writer_scoped_access);
-
- // Create an FBO.
- GLuint fbo = 0;
- gl::GLApi* api = gl::g_current_gl_context;
- api->glGenFramebuffersEXTFn(1, &fbo);
- api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
-
- // Attach the texture to FBO.
- api->glFramebufferTexture2DEXTFn(
- GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- gl_representation->GetTexture()->target(),
- gl_representation->GetTexture()->service_id(), 0);
-
- // Set the clear color to green.
- api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
- api->glClearFn(GL_COLOR_BUFFER_BIT);
- gl_representation->GetTexture()->SetLevelCleared(
- gl_representation->GetTexture()->target(), 0, true);
-
- // End writing.
- writer_scoped_access.reset();
- gl_representation.reset();
-
- // Read from the backing in a separate thread. Read is done via
- // SkiaGLRepresentation. ReadPixels() creates/produces a SkiaGLRepresentation
- // which in turn wraps a GLTextureRepresentation when for GL mode. Hence
- // testing reading via SkiaGLRepresentation is equivalent to testing via
- // GLTextureRepresentation.
- std::vector<uint8_t> dst_pixels;
-
- // Launch 2nd thread.
- std::thread second_thread([&]() {
- // Do ReadPixels() on 2nd SharedContextState |context_state2_|.
- dst_pixels = ReadPixels(mailbox, size, context_state2_.get(),
- shared_image_representation_factory_.get());
- });
-
- // Wait for this thread to be done.
- second_thread.join();
-
- // Compare the pixel values.
- EXPECT_EQ(dst_pixels[0], 0);
- EXPECT_EQ(dst_pixels[1], 255);
- EXPECT_EQ(dst_pixels[2], 0);
- EXPECT_EQ(dst_pixels[3], 255);
-}
-
-CreateAndValidateSharedImageRepresentations::
- CreateAndValidateSharedImageRepresentations(
- SharedImageBackingFactoryGLTexture* backing_factory,
- viz::ResourceFormat format,
- bool is_thread_safe,
- gles2::MailboxManagerImpl* mailbox_manager,
- SharedImageManager* shared_image_manager,
- MemoryTypeTracker* memory_type_tracker,
- SharedImageRepresentationFactory* shared_image_representation_factory,
- SharedContextState* context_state)
- : mailbox_manager_(mailbox_manager), size_(256, 256) {
- // Make the context current.
- DCHECK(context_state);
- EXPECT_TRUE(
- context_state->MakeCurrent(context_state->surface(), true /* needs_gl*/));
- mailbox_ = Mailbox::GenerateForSharedImage();
- auto color_space = gfx::ColorSpace::CreateSRGB();
- GrSurfaceOrigin surface_origin = kTopLeft_GrSurfaceOrigin;
- SkAlphaType alpha_type = kPremul_SkAlphaType;
- gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
-
- // SHARED_IMAGE_USAGE_DISPLAY for skia read and SHARED_IMAGE_USAGE_RASTER for
- // skia write.
- uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_RASTER;
- if (!is_thread_safe)
- usage |= SHARED_IMAGE_USAGE_DISPLAY;
- backing_ = backing_factory->CreateSharedImage(
- mailbox_, format, surface_handle, size_, color_space, surface_origin,
- alpha_type, usage, is_thread_safe);
-
- // As long as either |chromium_image_ar30| or |chromium_image_ab30| is
- // enabled, we can create a non-scanout SharedImage with format
- // viz::ResourceFormat::{BGRA,RGBA}_1010102.
- const bool supports_ar30 =
- context_state->feature_info()->feature_flags().chromium_image_ar30;
- const bool supports_ab30 =
- context_state->feature_info()->feature_flags().chromium_image_ab30;
- if ((format == viz::ResourceFormat::BGRA_1010102 ||
- format == viz::ResourceFormat::RGBA_1010102) &&
- !supports_ar30 && !supports_ab30) {
- EXPECT_FALSE(backing_);
- return;
- }
- EXPECT_TRUE(backing_);
- if (!backing_)
- return;
-
- // Check clearing.
- if (!backing_->IsCleared()) {
- backing_->SetCleared();
- EXPECT_TRUE(backing_->IsCleared());
- }
-
- GLenum expected_target = GL_TEXTURE_2D;
- shared_image_ =
- shared_image_manager->Register(std::move(backing_), memory_type_tracker);
-
- // Create and validate GLTexture representation.
- auto gl_representation =
- shared_image_representation_factory->ProduceGLTexture(mailbox_);
-
- EXPECT_TRUE(gl_representation);
- EXPECT_TRUE(gl_representation->GetTexture()->service_id());
- EXPECT_EQ(expected_target, gl_representation->GetTexture()->target());
- EXPECT_EQ(size_, gl_representation->size());
- EXPECT_EQ(format, gl_representation->format());
- EXPECT_EQ(color_space, gl_representation->color_space());
- EXPECT_EQ(usage, gl_representation->usage());
- gl_representation.reset();
-
- // Create and Validate Skia Representations.
- auto skia_representation =
- shared_image_representation_factory->ProduceSkia(mailbox_, context_state);
- EXPECT_TRUE(skia_representation);
- std::vector<GrBackendSemaphore> begin_semaphores;
- std::vector<GrBackendSemaphore> end_semaphores;
-
- std::unique_ptr<SharedImageRepresentationSkia::ScopedWriteAccess>
- scoped_write_access;
- scoped_write_access = skia_representation->BeginScopedWriteAccess(
- &begin_semaphores, &end_semaphores,
- SharedImageRepresentation::AllowUnclearedAccess::kNo);
- // We use |supports_ar30| and |supports_ab30| to detect RGB10A2/BGR10A2
- // support. It's possible Skia might support these formats even if the Chrome
- // feature flags are false. We just check here that the feature flags don't
- // allow Chrome to do something that Skia doesn't support.
- if ((format != viz::ResourceFormat::BGRA_1010102 || supports_ar30) &&
- (format != viz::ResourceFormat::RGBA_1010102 || supports_ab30)) {
- EXPECT_TRUE(scoped_write_access);
- if (!scoped_write_access)
- return;
- auto* surface = scoped_write_access->surface();
- EXPECT_TRUE(surface);
- if (!surface)
- return;
- EXPECT_EQ(size_.width(), surface->width());
- EXPECT_EQ(size_.height(), surface->height());
- }
- EXPECT_TRUE(begin_semaphores.empty());
- EXPECT_TRUE(end_semaphores.empty());
- scoped_write_access.reset();
-
- std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
- scoped_read_access;
- scoped_read_access = skia_representation->BeginScopedReadAccess(
- &begin_semaphores, &end_semaphores);
- auto* promise_texture = scoped_read_access->promise_image_texture();
- EXPECT_TRUE(promise_texture);
- EXPECT_TRUE(begin_semaphores.empty());
- EXPECT_TRUE(end_semaphores.empty());
- GrBackendTexture backend_texture = promise_texture->backendTexture();
- EXPECT_TRUE(backend_texture.isValid());
- EXPECT_EQ(size_.width(), backend_texture.width());
- EXPECT_EQ(size_.height(), backend_texture.height());
- scoped_read_access.reset();
- skia_representation.reset();
-}
-
-CreateAndValidateSharedImageRepresentations::
- ~CreateAndValidateSharedImageRepresentations() {
- shared_image_.reset();
- EXPECT_FALSE(mailbox_manager_->ConsumeTexture(mailbox_));
-}
-
#if !defined(OS_ANDROID)
const auto kResourceFormats =
::testing::Values(viz::ResourceFormat::RGBA_8888,
@@ -1275,16 +567,6 @@ INSTANTIATE_TEST_SUITE_P(Service,
::testing::Combine(::testing::Bool(),
kResourceFormats),
TestParamToString);
-INSTANTIATE_TEST_SUITE_P(Service,
- SharedImageBackingFactoryGLTextureThreadSafeTest,
- ::testing::Combine(::testing::Bool(),
- kResourceFormats),
- TestParamToString);
-INSTANTIATE_TEST_SUITE_P(Service,
- SharedImageBackingFactoryGLTextureWithGMBTest,
- ::testing::Combine(::testing::Bool(),
- kResourceFormats),
- TestParamToString);
} // anonymous namespace
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
index 593f039a959..33c146c86e3 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
@@ -116,7 +116,8 @@ class SharedImageRepresentationDawnIOSurface
texture_descriptor.format = wgpu_format_;
texture_descriptor.usage = usage;
texture_descriptor.dimension = WGPUTextureDimension_2D;
- texture_descriptor.size = {size().width(), size().height(), 1};
+ texture_descriptor.size = {static_cast<uint32_t>(size().width()),
+ static_cast<uint32_t>(size().height()), 1};
texture_descriptor.mipLevelCount = 1;
texture_descriptor.sampleCount = 1;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
index ee1bcbf2580..f37ec78de1c 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_image.h"
#include <memory>
#include <utility>
@@ -65,9 +65,8 @@ class SharedImageBackingFactoryIOSurfaceTest : public testing::Test {
base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo());
context_state_->InitializeGL(preferences, std::move(feature_info));
- backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>(
+ backing_factory_ = std::make_unique<SharedImageBackingFactoryGLImage>(
preferences, workarounds, GpuFeatureInfo(), &image_factory_,
- shared_image_manager_.batch_access_manager(),
/*progress_reporter=*/nullptr);
memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
@@ -82,7 +81,7 @@ class SharedImageBackingFactoryIOSurfaceTest : public testing::Test {
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<gl::GLContext> context_;
scoped_refptr<SharedContextState> context_state_;
- std::unique_ptr<SharedImageBackingFactoryGLTexture> backing_factory_;
+ std::unique_ptr<SharedImageBackingFactoryGLImage> backing_factory_;
gles2::MailboxManagerImpl mailbox_manager_;
SharedImageManager shared_image_manager_;
std::unique_ptr<MemoryTypeTracker> memory_type_tracker_;
@@ -406,8 +405,8 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) {
memory_type_tracker_.get());
// Create a SharedImageRepresentationDawn.
- auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox, device.Get());
+ auto dawn_representation = shared_image_representation_factory_->ProduceDawn(
+ mailbox, device.Get(), WGPUBackendType_Metal);
EXPECT_TRUE(dawn_representation);
// Clear the shared image to green using Dawn.
@@ -531,8 +530,8 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, GL_Dawn_Skia_UnclearTexture) {
dawnProcSetProcs(&procs);
{
auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox,
- device.Get());
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, device.Get(), WGPUBackendType_Metal);
ASSERT_TRUE(dawn_representation);
auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
@@ -613,8 +612,8 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, UnclearDawn_SkiaFails) {
dawnProcSetProcs(&procs);
{
auto dawn_representation =
- shared_image_representation_factory_->ProduceDawn(mailbox,
- device.Get());
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, device.Get(), WGPUBackendType_Metal);
ASSERT_TRUE(dawn_representation);
auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.cc
index 5c49cfc82f1..05a99ba869e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.cc
@@ -8,10 +8,34 @@
#include <dawn_native/DawnNative.h>
#include "base/logging.h"
+#include "base/memory/scoped_refptr.h"
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/shared_image_backing_ozone.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/gfx/native_pixmap.h"
#include "ui/gl/buildflags.h"
+#include "ui/ozone/public/ozone_platform.h"
+#include "ui/ozone/public/surface_factory_ozone.h"
namespace gpu {
+namespace {
+
+gfx::BufferUsage GetBufferUsage(uint32_t usage) {
+ if (usage & SHARED_IMAGE_USAGE_WEBGPU) {
+ // Just use SCANOUT for WebGPU since the memory doesn't need to be linear.
+ return gfx::BufferUsage::SCANOUT;
+ } else if (usage & SHARED_IMAGE_USAGE_SCANOUT) {
+ return gfx::BufferUsage::SCANOUT;
+ } else {
+ NOTREACHED() << "Unsupported usage flags.";
+ return gfx::BufferUsage::SCANOUT;
+ }
+}
+
+} // namespace
SharedImageBackingFactoryOzone::SharedImageBackingFactoryOzone(
SharedContextState* shared_context_state)
@@ -24,6 +48,36 @@ SharedImageBackingFactoryOzone::SharedImageBackingFactoryOzone(
SharedImageBackingFactoryOzone::~SharedImageBackingFactoryOzone() = default;
+std::unique_ptr<SharedImageBackingOzone>
+SharedImageBackingFactoryOzone::CreateSharedImageInternal(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage) {
+ gfx::BufferFormat buffer_format = viz::BufferFormat(format);
+ VkDevice vk_device = VK_NULL_HANDLE;
+ DCHECK(shared_context_state_);
+ if (shared_context_state_->vk_context_provider()) {
+ vk_device = shared_context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ }
+ ui::SurfaceFactoryOzone* surface_factory =
+ ui::OzonePlatform::GetInstance()->GetSurfaceFactoryOzone();
+ scoped_refptr<gfx::NativePixmap> pixmap = surface_factory->CreateNativePixmap(
+ surface_handle, vk_device, size, buffer_format, GetBufferUsage(usage));
+ if (!pixmap) {
+ return nullptr;
+ }
+ return std::make_unique<SharedImageBackingOzone>(
+ mailbox, format, size, color_space, surface_origin, alpha_type, usage,
+ shared_context_state_, std::move(pixmap), dawn_procs_);
+}
+
std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryOzone::CreateSharedImage(
const Mailbox& mailbox,
@@ -36,9 +90,9 @@ SharedImageBackingFactoryOzone::CreateSharedImage(
uint32_t usage,
bool is_thread_safe) {
DCHECK(!is_thread_safe);
- return SharedImageBackingOzone::Create(
- dawn_procs_, shared_context_state_, mailbox, format, size, color_space,
- surface_origin, alpha_type, usage, surface_handle);
+ return CreateSharedImageInternal(mailbox, format, surface_handle, size,
+ color_space, surface_origin, alpha_type,
+ usage);
}
std::unique_ptr<SharedImageBacking>
@@ -51,8 +105,18 @@ SharedImageBackingFactoryOzone::CreateSharedImage(
SkAlphaType alpha_type,
uint32_t usage,
base::span<const uint8_t> pixel_data) {
- NOTIMPLEMENTED_LOG_ONCE();
- return nullptr;
+ SurfaceHandle surface_handle = SurfaceHandle();
+ auto backing =
+ CreateSharedImageInternal(mailbox, format, surface_handle, size,
+ color_space, surface_origin, alpha_type, usage);
+
+ if (!pixel_data.empty() &&
+ !backing->WritePixels(pixel_data, shared_context_state_, format, size,
+ alpha_type)) {
+ return nullptr;
+ }
+
+ return backing;
}
std::unique_ptr<SharedImageBacking>
@@ -68,14 +132,52 @@ SharedImageBackingFactoryOzone::CreateSharedImage(
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage) {
- NOTIMPLEMENTED_LOG_ONCE();
- return nullptr;
+ ui::SurfaceFactoryOzone* surface_factory =
+ ui::OzonePlatform::GetInstance()->GetSurfaceFactoryOzone();
+ scoped_refptr<gfx::NativePixmap> pixmap =
+ surface_factory->CreateNativePixmapFromHandle(
+ surface_handle, size, buffer_format,
+ std::move(handle.native_pixmap_handle));
+ if (!pixmap) {
+ return nullptr;
+ }
+ auto backing = std::make_unique<SharedImageBackingOzone>(
+ mailbox, viz::GetResourceFormat(buffer_format), size, color_space,
+ surface_origin, alpha_type, usage, shared_context_state_,
+ std::move(pixmap), dawn_procs_);
+ backing->SetCleared();
+ return backing;
}
-bool SharedImageBackingFactoryOzone::CanImportGpuMemoryBuffer(
- gfx::GpuMemoryBufferType memory_buffer_type) {
- NOTIMPLEMENTED_LOG_ONCE();
- return false;
+bool SharedImageBackingFactoryOzone::IsSupported(
+ uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) {
+ if (gmb_type != gfx::EMPTY_BUFFER && gmb_type != gfx::NATIVE_PIXMAP) {
+ return false;
+ }
+ // TODO(crbug.com/969114): Not all shared image factory implementations
+ // support concurrent read/write usage.
+ if (usage & SHARED_IMAGE_USAGE_CONCURRENT_READ_WRITE) {
+ return false;
+ }
+
+ // TODO(hitawala): Until SharedImageBackingOzone supports all use cases prefer
+ // using SharedImageBackingGLImage instead
+ bool needs_interop_factory = (gr_context_type == GrContextType::kVulkan &&
+ (usage & SHARED_IMAGE_USAGE_DISPLAY)) ||
+ (usage & SHARED_IMAGE_USAGE_WEBGPU) ||
+ (usage & SHARED_IMAGE_USAGE_VIDEO_DECODE);
+ if (!needs_interop_factory) {
+ return false;
+ }
+
+ *allow_legacy_mailbox = false;
+ return true;
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.h
index 97ace679a37..8fee442e053 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ozone.h
@@ -8,6 +8,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/command_buffer/service/shared_image_backing_ozone.h"
#include "gpu/gpu_gles2_export.h"
struct DawnProcTable;
@@ -21,7 +22,8 @@ class SharedContextState;
class GPU_GLES2_EXPORT SharedImageBackingFactoryOzone
: public SharedImageBackingFactory {
public:
- SharedImageBackingFactoryOzone(SharedContextState* shared_context_state);
+ explicit SharedImageBackingFactoryOzone(
+ SharedContextState* shared_context_state);
~SharedImageBackingFactoryOzone() override;
@@ -60,12 +62,27 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryOzone
SkAlphaType alpha_type,
uint32_t usage) override;
- bool CanImportGpuMemoryBuffer(
- gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) override;
private:
SharedContextState* const shared_context_state_;
scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs_;
+
+ std::unique_ptr<SharedImageBackingOzone> CreateSharedImageInternal(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ SurfaceHandle surface_handle,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ GrSurfaceOrigin surface_origin,
+ SkAlphaType alpha_type,
+ uint32_t usage);
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_gl_common.cc b/chromium/gpu/command_buffer/service/shared_image_backing_gl_common.cc
index 3da23ab97e3..f86263bcece 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_gl_common.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_gl_common.cc
@@ -118,6 +118,7 @@ SharedImageBackingGLCommon::ProduceDawnCommon(SharedImageFactory* factory,
SharedImageManager* manager,
MemoryTypeTracker* tracker,
WGPUDevice device,
+ WGPUBackendType backend_type,
SharedImageBacking* backing,
bool use_passthrough) {
DCHECK(factory);
@@ -206,7 +207,7 @@ SharedImageBackingGLCommon::ProduceDawnCommon(SharedImageFactory* factory,
// representation ref.
factory->DestroySharedImage(dst_mailbox);
- return manager->ProduceDawn(dst_mailbox, tracker, device);
+ return manager->ProduceDawn(dst_mailbox, tracker, device, backend_type);
}
// static
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_gl_common.h b/chromium/gpu/command_buffer/service/shared_image_backing_gl_common.h
index 6af656cf8b7..535ae78953e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_gl_common.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_gl_common.h
@@ -97,6 +97,7 @@ class GPU_GLES2_EXPORT SharedImageBackingGLCommon {
SharedImageManager* manager,
MemoryTypeTracker* tracker,
WGPUDevice device,
+ WGPUBackendType backend_type,
SharedImageBacking* backing,
bool use_passthrough);
};
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc b/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc
index ab6da2efaf2..fb3c720c911 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc
@@ -14,6 +14,7 @@
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrContextThreadSafeProxy.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_gl_api_implementation.h"
@@ -374,8 +375,16 @@ void SharedImageBackingGLImage::ReleaseGLTexture(bool have_context) {
}
if (IsPassthrough()) {
if (passthrough_texture_) {
- if (!have_context)
+ if (have_context) {
+ if (!passthrough_texture_->is_bind_pending()) {
+ const GLenum target = GetGLTarget();
+ gl::ScopedTextureBinder binder(target,
+ passthrough_texture_->service_id());
+ image_->ReleaseTexImage(target);
+ }
+ } else {
passthrough_texture_->MarkContextLost();
+ }
passthrough_texture_.reset();
}
} else {
@@ -483,18 +492,19 @@ SharedImageBackingGLImage::ProduceGLTexturePassthrough(
std::unique_ptr<SharedImageRepresentationOverlay>
SharedImageBackingGLImage::ProduceOverlay(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
-#if defined(OS_MAC) || defined(USE_OZONE)
+#if defined(OS_MAC) || defined(USE_OZONE) || defined(OS_WIN)
return std::make_unique<SharedImageRepresentationOverlayImpl>(
manager, this, tracker, image_);
-#else // !(defined(OS_MAC) || defined(USE_OZONE))
+#else // !(defined(OS_MAC) || defined(USE_OZONE) || defined(OS_WIN))
return SharedImageBacking::ProduceOverlay(manager, tracker);
-#endif // defined(OS_MAC) || defined(USE_OZONE)
+#endif // defined(OS_MAC) || defined(USE_OZONE) || defined(OS_WIN)
}
std::unique_ptr<SharedImageRepresentationDawn>
SharedImageBackingGLImage::ProduceDawn(SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) {
+ WGPUDevice device,
+ WGPUBackendType backend_type) {
#if defined(OS_MAC)
auto result = SharedImageBackingFactoryIOSurface::ProduceDawn(
manager, this, tracker, device, image_);
@@ -507,7 +517,7 @@ SharedImageBackingGLImage::ProduceDawn(SharedImageManager* manager,
}
return SharedImageBackingGLCommon::ProduceDawnCommon(
- factory(), manager, tracker, device, this, IsPassthrough());
+ factory(), manager, tracker, device, backend_type, this, IsPassthrough());
}
std::unique_ptr<SharedImageRepresentationSkia>
@@ -534,7 +544,9 @@ SharedImageBackingGLImage::ProduceSkia(
} else {
GrBackendTexture backend_texture;
GetGrBackendTexture(context_state->feature_info(), GetGLTarget(), size(),
- GetGLServiceId(), format(), &backend_texture);
+ GetGLServiceId(), format(),
+ context_state->gr_context()->threadSafeProxy(),
+ &backend_texture);
cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
}
}
@@ -641,9 +653,14 @@ void SharedImageBackingGLImage::Update(
bool SharedImageBackingGLImage::
SharedImageRepresentationGLTextureBeginAccess() {
- if (!release_fence_.is_null())
- gl::GLFence::CreateFromGpuFence(gfx::GpuFence(std::move(release_fence_)))
- ->ServerWait();
+ if (!release_fence_.is_null()) {
+ auto fence = gfx::GpuFence(std::move(release_fence_));
+ if (gl::GLFence::IsGpuFenceSupported()) {
+ gl::GLFence::CreateFromGpuFence(std::move(fence))->ServerWait();
+ } else {
+ fence.Wait();
+ }
+ }
return BindOrCopyImageIfNeeded();
}
@@ -670,6 +687,7 @@ void SharedImageBackingGLImage::SharedImageRepresentationGLTextureEndAccess(
if (!passthrough_texture_->is_bind_pending()) {
image_->ReleaseTexImage(target);
image_bind_or_copy_needed_ = true;
+ passthrough_texture_->set_is_bind_pending(true);
}
}
#else
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h b/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h
index 5ad96212c61..e3727be6fcc 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h
@@ -213,7 +213,8 @@ class GPU_GLES2_EXPORT SharedImageBackingGLImage
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) final;
+ WGPUDevice device,
+ WGPUBackendType backend_type) final;
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.cc
index c5bf15bef21..7f97f27a865 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.cc
@@ -33,6 +33,7 @@
#include "gpu/config/gpu_finch_features.h"
#include "gpu/config/gpu_preferences.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrContextThreadSafeProxy.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
@@ -178,14 +179,15 @@ SharedImageBackingGLTexture::ProduceGLTexturePassthrough(
std::unique_ptr<SharedImageRepresentationDawn>
SharedImageBackingGLTexture::ProduceDawn(SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) {
+ WGPUDevice device,
+ WGPUBackendType backend_type) {
if (!factory()) {
DLOG(ERROR) << "No SharedImageFactory to create a dawn representation.";
return nullptr;
}
return SharedImageBackingGLCommon::ProduceDawnCommon(
- factory(), manager, tracker, device, this, IsPassthrough());
+ factory(), manager, tracker, device, backend_type, this, IsPassthrough());
}
std::unique_ptr<SharedImageRepresentationSkia>
@@ -196,7 +198,9 @@ SharedImageBackingGLTexture::ProduceSkia(
if (!cached_promise_texture_) {
GrBackendTexture backend_texture;
GetGrBackendTexture(context_state->feature_info(), GetGLTarget(), size(),
- GetGLServiceId(), format(), &backend_texture);
+ GetGLServiceId(), format(),
+ context_state->gr_context()->threadSafeProxy(),
+ &backend_texture);
cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
}
return std::make_unique<SharedImageRepresentationSkiaImpl>(
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.h
index b406cd4529e..a551da45dbf 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_gl_texture.h
@@ -53,7 +53,8 @@ class SharedImageBackingGLTexture : public SharedImageBacking {
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) final;
+ WGPUDevice device,
+ WGPUBackendType backend_type) final;
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc
index b195fce05a6..87802d302ba 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc
@@ -5,16 +5,12 @@
#include "gpu/command_buffer/service/shared_image_backing_ozone.h"
#include <dawn/webgpu.h>
-#include <vulkan/vulkan.h>
#include <memory>
#include <utility>
#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
-#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "components/viz/common/resources/resource_format.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/mailbox.h"
@@ -26,18 +22,20 @@
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/shared_image_representation_gl_ozone.h"
#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h"
-#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/command_buffer/service/skia_utils.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_fence.h"
+#include "ui/gfx/gpu_fence_handle.h"
#include "ui/gfx/native_pixmap.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gl/buildflags.h"
-#include "ui/ozone/public/ozone_platform.h"
-#include "ui/ozone/public/surface_factory_ozone.h"
+#include "ui/gl/gl_image_native_pixmap.h"
#if BUILDFLAG(USE_DAWN)
#include "gpu/command_buffer/service/shared_image_representation_dawn_ozone.h"
@@ -51,18 +49,6 @@ size_t GetPixmapSizeInBytes(const gfx::NativePixmap& pixmap) {
pixmap.GetBufferFormat());
}
-gfx::BufferUsage GetBufferUsage(uint32_t usage) {
- if (usage & SHARED_IMAGE_USAGE_WEBGPU) {
- // Just use SCANOUT for WebGPU since the memory doesn't need to be linear.
- return gfx::BufferUsage::SCANOUT;
- } else if (usage & SHARED_IMAGE_USAGE_SCANOUT) {
- return gfx::BufferUsage::SCANOUT;
- } else {
- NOTREACHED() << "Unsupported usage flags.";
- return gfx::BufferUsage::SCANOUT;
- }
-}
-
} // namespace
class SharedImageBackingOzone::SharedImageRepresentationVaapiOzone
@@ -89,37 +75,36 @@ class SharedImageBackingOzone::SharedImageRepresentationVaapiOzone
}
};
-std::unique_ptr<SharedImageBackingOzone> SharedImageBackingOzone::Create(
- scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs,
- SharedContextState* context_state,
- const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- GrSurfaceOrigin surface_origin,
- SkAlphaType alpha_type,
- uint32_t usage,
- SurfaceHandle surface_handle) {
- gfx::BufferFormat buffer_format = viz::BufferFormat(format);
- gfx::BufferUsage buffer_usage = GetBufferUsage(usage);
- VkDevice vk_device = VK_NULL_HANDLE;
- DCHECK(context_state);
- if (context_state->vk_context_provider()) {
- vk_device = context_state->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanDevice();
+class SharedImageBackingOzone::SharedImageRepresentationOverlayOzone
+ : public SharedImageRepresentationOverlay {
+ public:
+ SharedImageRepresentationOverlayOzone(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gl::GLImageNativePixmap> image)
+ : SharedImageRepresentationOverlay(manager, backing, tracker),
+ gl_image_(image) {}
+ ~SharedImageRepresentationOverlayOzone() override = default;
+
+ private:
+ bool BeginReadAccess(std::vector<gfx::GpuFence>* acquire_fences) override {
+ auto* ozone_backing = static_cast<SharedImageBackingOzone*>(backing());
+ std::vector<gfx::GpuFenceHandle> fences;
+ ozone_backing->BeginAccess(&fences);
+ for (auto& fence : fences) {
+ acquire_fences->emplace_back(std::move(fence));
+ }
+ return true;
}
- ui::SurfaceFactoryOzone* surface_factory =
- ui::OzonePlatform::GetInstance()->GetSurfaceFactoryOzone();
- scoped_refptr<gfx::NativePixmap> pixmap = surface_factory->CreateNativePixmap(
- surface_handle, vk_device, size, buffer_format, buffer_usage);
- if (!pixmap) {
- return nullptr;
+ void EndReadAccess(gfx::GpuFenceHandle release_fence) override {
+ auto* ozone_backing = static_cast<SharedImageBackingOzone*>(backing());
+ ozone_backing->EndAccess(true, std::move(release_fence));
}
- return base::WrapUnique(new SharedImageBackingOzone(
- mailbox, format, size, color_space, surface_origin, alpha_type, usage,
- context_state, std::move(pixmap), std::move(dawn_procs)));
-}
+ gl::GLImage* GetGLImage() override { return gl_image_.get(); }
+
+ scoped_refptr<gl::GLImageNativePixmap> gl_image_;
+};
SharedImageBackingOzone::~SharedImageBackingOzone() = default;
@@ -137,7 +122,8 @@ bool SharedImageBackingOzone::ProduceLegacyMailbox(
std::unique_ptr<SharedImageRepresentationDawn>
SharedImageBackingOzone::ProduceDawn(SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) {
+ WGPUDevice device,
+ WGPUBackendType backend_type) {
#if BUILDFLAG(USE_DAWN)
DCHECK(dawn_procs_);
WGPUTextureFormat webgpu_format = viz::ToWGPUFormat(format());
@@ -195,8 +181,11 @@ SharedImageBackingOzone::ProduceSkia(
std::unique_ptr<SharedImageRepresentationOverlay>
SharedImageBackingOzone::ProduceOverlay(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
- NOTIMPLEMENTED_LOG_ONCE();
- return nullptr;
+ gfx::BufferFormat buffer_format = viz::BufferFormat(format());
+ auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(
+ pixmap_->GetBufferSize(), buffer_format);
+ return std::make_unique<SharedImageRepresentationOverlayOzone>(
+ manager, this, tracker, image);
}
SharedImageBackingOzone::SharedImageBackingOzone(
@@ -246,4 +235,105 @@ bool SharedImageBackingOzone::VaSync() {
has_pending_va_writes_ = !vaapi_deps_->SyncSurface();
return !has_pending_va_writes_;
}
+
+bool SharedImageBackingOzone::WritePixels(
+ base::span<const uint8_t> pixel_data,
+ SharedContextState* const shared_context_state,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ SkAlphaType alpha_type) {
+ auto representation =
+ ProduceSkia(nullptr, shared_context_state->memory_type_tracker(),
+ shared_context_state);
+
+ SkImageInfo info = SkImageInfo::Make(size.width(), size.height(),
+ ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format),
+ alpha_type);
+ SkPixmap sk_pixmap(info, pixel_data.data(), info.minRowBytes());
+
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ // Allow uncleared access, as we manually handle clear tracking.
+ auto dest_scoped_access = representation->BeginScopedWriteAccess(
+ &begin_semaphores, &end_semaphores,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes,
+ /*use_sk_surface=*/false);
+ if (!dest_scoped_access) {
+ return false;
+ }
+ if (!begin_semaphores.empty()) {
+ bool result = shared_context_state->gr_context()->wait(
+ begin_semaphores.size(), begin_semaphores.data(),
+ /*deleteSemaphoresAfterWait=*/false);
+ DCHECK(result);
+ }
+
+ bool written = shared_context_state->gr_context()->updateBackendTexture(
+ dest_scoped_access->promise_image_texture()->backendTexture(), &sk_pixmap,
+ /*numLevels=*/1, representation->surface_origin(), nullptr, nullptr);
+
+ FlushAndSubmitIfNecessary(std::move(end_semaphores), shared_context_state);
+ if (written && !representation->IsCleared()) {
+ representation->SetClearedRect(gfx::Rect(info.width(), info.height()));
+ }
+ return written;
+}
+
+void SharedImageBackingOzone::FlushAndSubmitIfNecessary(
+ std::vector<GrBackendSemaphore> signal_semaphores,
+ SharedContextState* const shared_context_state) {
+ bool sync_cpu = gpu::ShouldVulkanSyncCpuForSkiaSubmit(
+ shared_context_state->vk_context_provider());
+ GrFlushInfo flush_info = {};
+ if (!signal_semaphores.empty()) {
+ flush_info = {
+ .fNumSemaphores = signal_semaphores.size(),
+ .fSignalSemaphores = signal_semaphores.data(),
+ };
+ gpu::AddVulkanCleanupTaskForSkiaFlush(
+ shared_context_state->vk_context_provider(), &flush_info);
+ }
+
+ shared_context_state->gr_context()->flush(flush_info);
+ if (sync_cpu || !signal_semaphores.empty()) {
+ shared_context_state->gr_context()->submit();
+ }
+}
+
+bool SharedImageBackingOzone::NeedsSynchronization() const {
+ return (usage() & SHARED_IMAGE_USAGE_WEBGPU) ||
+ (usage() & SHARED_IMAGE_USAGE_SCANOUT);
+}
+
+void SharedImageBackingOzone::BeginAccess(
+ std::vector<gfx::GpuFenceHandle>* fences) {
+ if (NeedsSynchronization()) {
+ // Technically, we don't need to wait on other read fences when performing
+ // a read access, but like in the case of |ExternalVkImageBacking|, reading
+ // repeatedly without a write access will cause us to run out of FDs.
+ // TODO(penghuang): Avoid waiting on read semaphores.
+ *fences = std::move(read_fences_);
+ read_fences_.clear();
+ if (!write_fence_.is_null()) {
+ fences->push_back(std::move(write_fence_));
+ write_fence_ = gfx::GpuFenceHandle();
+ }
+ }
+}
+
+void SharedImageBackingOzone::EndAccess(bool readonly,
+ gfx::GpuFenceHandle fence) {
+ if (NeedsSynchronization()) {
+ DCHECK(!fence.is_null());
+ if (readonly) {
+ read_fences_.push_back(std::move(fence));
+ } else {
+ DCHECK(write_fence_.is_null());
+ DCHECK(read_fences_.empty());
+ write_fence_ = std::move(fence);
+ }
+ }
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h
index 85393fd6829..f1757851552 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h
@@ -34,9 +34,7 @@ class VaapiDependencies;
// aliased by both GL and Vulkan for use in rendering or compositing.
class SharedImageBackingOzone final : public ClearTrackingSharedImageBacking {
public:
- static std::unique_ptr<SharedImageBackingOzone> Create(
- scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs,
- SharedContextState* context_state,
+ SharedImageBackingOzone(
const Mailbox& mailbox,
viz::ResourceFormat format,
const gfx::Size& size,
@@ -44,18 +42,27 @@ class SharedImageBackingOzone final : public ClearTrackingSharedImageBacking {
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage,
- SurfaceHandle surface_handle);
+ SharedContextState* context_state,
+ scoped_refptr<gfx::NativePixmap> pixmap,
+ scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs);
+
~SharedImageBackingOzone() override;
// gpu::SharedImageBacking:
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
+ bool WritePixels(base::span<const uint8_t> pixel_data,
+ SharedContextState* const shared_context_state,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ SkAlphaType alpha_type);
protected:
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) override;
+ WGPUDevice device,
+ WGPUBackendType backend_type) override;
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
SharedImageManager* manager,
MemoryTypeTracker* tracker) override;
@@ -78,26 +85,26 @@ class SharedImageBackingOzone final : public ClearTrackingSharedImageBacking {
friend class SharedImageRepresentationGLOzone;
friend class SharedImageRepresentationDawnOzone;
class SharedImageRepresentationVaapiOzone;
-
- SharedImageBackingOzone(
- const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- GrSurfaceOrigin surface_origin,
- SkAlphaType alpha_type,
- uint32_t usage,
- SharedContextState* context_state,
- scoped_refptr<gfx::NativePixmap> pixmap,
- scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs);
+ class SharedImageRepresentationOverlayOzone;
bool VaSync();
+ void FlushAndSubmitIfNecessary(
+ std::vector<GrBackendSemaphore> signal_semaphores,
+ SharedContextState* const shared_context_state);
+
+ bool NeedsSynchronization() const;
+
+ void BeginAccess(std::vector<gfx::GpuFenceHandle>* fences);
+ void EndAccess(bool readonly, gfx::GpuFenceHandle fence);
+
// Indicates if this backing produced a VASurface that may have pending work.
bool has_pending_va_writes_ = false;
std::unique_ptr<VaapiDependencies> vaapi_deps_;
scoped_refptr<gfx::NativePixmap> pixmap_;
scoped_refptr<base::RefCountedData<DawnProcTable>> dawn_procs_;
+ gfx::GpuFenceHandle write_fence_;
+ std::vector<gfx::GpuFenceHandle> read_fences_;
DISALLOW_COPY_AND_ASSIGN(SharedImageBackingOzone);
};
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_scoped_hardware_buffer_fence_sync.cc b/chromium/gpu/command_buffer/service/shared_image_backing_scoped_hardware_buffer_fence_sync.cc
index 8384ebad1e3..ba3e6e3fc02 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_scoped_hardware_buffer_fence_sync.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_scoped_hardware_buffer_fence_sync.cc
@@ -348,7 +348,8 @@ SharedImageBackingScopedHardwareBufferFenceSync::ProduceSkia(
scoped_hardware_buffer_->buffer());
auto vulkan_image = CreateVkImageFromAhbHandle(
- std::move(ahb_handle), context_state.get(), size(), format());
+ std::move(ahb_handle), context_state.get(), size(), format(),
+ VK_QUEUE_FAMILY_FOREIGN_EXT);
if (!vulkan_image)
return nullptr;
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc
index a5828bd7577..0cab34a3602 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc
@@ -20,6 +20,7 @@
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_image.h"
#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
@@ -61,6 +62,7 @@
#if defined(OS_ANDROID)
#include "base/android/android_hardware_buffer_compat.h"
#include "base/android/scoped_hardware_buffer_fence_sync.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_egl.h"
#include "gpu/command_buffer/service/shared_image_backing_scoped_hardware_buffer_fence_sync.h"
#endif
@@ -82,8 +84,9 @@ bool ShouldUseExternalVulkanImageFactory() {
#endif
#if defined(USE_X11)
return true;
-#endif
+#else
return false;
+#endif
}
} // namespace
@@ -117,11 +120,13 @@ SharedImageFactory::SharedImageFactory(
SharedImageManager* shared_image_manager,
ImageFactory* image_factory,
MemoryTracker* memory_tracker,
- bool enable_wrapped_sk_image)
+ bool enable_wrapped_sk_image,
+ bool is_for_display_compositor)
: mailbox_manager_(mailbox_manager),
shared_image_manager_(shared_image_manager),
shared_context_state_(context_state),
memory_tracker_(std::make_unique<MemoryTypeTracker>(memory_tracker)),
+ is_for_display_compositor_(is_for_display_compositor),
gr_context_type_(context_state ? context_state->gr_context_type()
: GrContextType::kGL) {
#if defined(OS_MAC)
@@ -130,13 +135,27 @@ SharedImageFactory::SharedImageFactory(
gr_context_type_ == GrContextType::kMetal);
#endif
+ if (enable_wrapped_sk_image && context_state) {
+ auto wrapped_sk_image_factory =
+ std::make_unique<raster::WrappedSkImageFactory>(context_state);
+ factories_.push_back(std::move(wrapped_sk_image_factory));
+ }
+
bool use_gl = gl::GetGLImplementation() != gl::kGLImplementationNone;
if (use_gl) {
- gl_backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>(
- gpu_preferences, workarounds, gpu_feature_info, image_factory,
- shared_image_manager->batch_access_manager(),
- shared_context_state_ ? shared_context_state_->progress_reporter()
- : nullptr);
+ auto gl_texture_backing_factory =
+ std::make_unique<SharedImageBackingFactoryGLTexture>(
+ gpu_preferences, workarounds, gpu_feature_info,
+ shared_context_state_ ? shared_context_state_->progress_reporter()
+ : nullptr);
+ factories_.push_back(std::move(gl_texture_backing_factory));
+
+#if defined(OS_ANDROID)
+ auto egl_backing_factory = std::make_unique<SharedImageBackingFactoryEGL>(
+ gpu_preferences, workarounds, gpu_feature_info,
+ shared_image_manager->batch_access_manager());
+ factories_.push_back(std::move(egl_backing_factory));
+#endif
}
// TODO(ccameron): This block of code should be changed to a switch on
@@ -147,50 +166,48 @@ SharedImageFactory::SharedImageFactory(
!BUILDFLAG(IS_CHROMEOS_LACROS) && !BUILDFLAG(IS_CHROMECAST)
// Desktop Linux, not ChromeOS.
if (ShouldUseExternalVulkanImageFactory()) {
- interop_backing_factory_ =
+ auto external_vk_image_factory =
std::make_unique<ExternalVkImageFactory>(context_state);
+ factories_.push_back(std::move(external_vk_image_factory));
} else {
LOG(ERROR) << "ERROR: gr_context_type_ is GrContextType::kVulkan and "
"interop_backing_factory_ is not set";
}
#elif defined(OS_FUCHSIA) || defined(OS_WIN)
- interop_backing_factory_ =
+ auto external_vk_image_factory =
std::make_unique<ExternalVkImageFactory>(context_state);
+ factories_.push_back(std::move(external_vk_image_factory));
#elif defined(OS_ANDROID)
- // For Android
- external_vk_image_factory_ =
- std::make_unique<ExternalVkImageFactory>(context_state);
const auto& enabled_extensions = context_state->vk_context_provider()
->GetDeviceQueue()
->enabled_extensions();
if (gfx::HasExtension(
enabled_extensions,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) {
- interop_backing_factory_ = std::make_unique<SharedImageBackingFactoryAHB>(
+ auto ahb_factory = std::make_unique<SharedImageBackingFactoryAHB>(
workarounds, gpu_feature_info);
+ factories_.push_back(std::move(ahb_factory));
}
-#endif
-#elif BUILDFLAG(IS_CHROMEOS_ASH)
- interop_backing_factory_ =
- std::make_unique<SharedImageBackingFactoryOzone>(context_state);
-#else
- // Others
+ // For Android
+ auto external_vk_image_factory =
+ std::make_unique<ExternalVkImageFactory>(context_state);
+ factories_.push_back(std::move(external_vk_image_factory));
+#endif // defined(OS_ANDROID)
+#else // BUILDFLAG(ENABLE_VULKAN)
+ // Others (ChromeOS is handled below for compat with WebGPU)
LOG(ERROR) << "ERROR: gr_context_type_ is GrContextType::kVulkan and "
"interop_backing_factory_ is not set";
-#endif
+#endif // BUILDFLAG(ENABLE_VULKAN)
} else {
// gr_context_type_ != GrContextType::kVulkan
#if defined(OS_ANDROID) && BUILDFLAG(ENABLE_VULKAN)
if (base::AndroidHardwareBufferCompat::IsSupportAvailable()) {
- interop_backing_factory_ = std::make_unique<SharedImageBackingFactoryAHB>(
+ auto ahb_factory = std::make_unique<SharedImageBackingFactoryAHB>(
workarounds, gpu_feature_info);
+ factories_.push_back(std::move(ahb_factory));
}
#endif
}
- if (enable_wrapped_sk_image && context_state) {
- wrapped_sk_image_factory_ =
- std::make_unique<raster::WrappedSkImageFactory>(context_state);
- }
#if defined(OS_WIN)
// For Windows
@@ -198,10 +215,30 @@ SharedImageFactory::SharedImageFactory(
gles2::PassthroughCommandDecoderSupported();
if (use_passthrough && gr_context_type_ == GrContextType::kGL) {
// Only supported for passthrough command decoder.
- interop_backing_factory_ = std::make_unique<SharedImageBackingFactoryD3D>();
+ auto d3d_factory = std::make_unique<SharedImageBackingFactoryD3D>();
+ d3d_backing_factory_ = d3d_factory.get();
+ factories_.push_back(std::move(d3d_factory));
}
#endif // OS_WIN
+ if (use_gl) {
+ auto gl_image_backing_factory =
+ std::make_unique<SharedImageBackingFactoryGLImage>(
+ gpu_preferences, workarounds, gpu_feature_info, image_factory,
+ shared_context_state_ ? shared_context_state_->progress_reporter()
+ : nullptr);
+ factories_.push_back(std::move(gl_image_backing_factory));
+ }
+
+#if BUILDFLAG(IS_CHROMEOS_ASH)
+ if (gpu_preferences.enable_webgpu ||
+ gr_context_type_ == GrContextType::kVulkan) {
+ auto ozone_factory =
+ std::make_unique<SharedImageBackingFactoryOzone>(context_state);
+ factories_.push_back(std::move(ozone_factory));
+ }
+#endif // IS_CHROMEOS_ASH
+
#if defined(OS_FUCHSIA)
vulkan_context_provider_ = context_state->vk_context_provider();
#endif // OS_FUCHSIA
@@ -220,7 +257,8 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
gpu::SurfaceHandle surface_handle,
uint32_t usage) {
bool allow_legacy_mailbox = false;
- auto* factory = GetFactoryByUsage(usage, format, &allow_legacy_mailbox);
+ auto* factory = GetFactoryByUsage(usage, format, &allow_legacy_mailbox,
+ /*is_pixel_used=*/false);
if (!factory)
return false;
auto backing = factory->CreateSharedImage(
@@ -248,19 +286,13 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
return false;
}
- // Currently we only perform data uploads via two paths,
- // |gl_backing_factory_| for GL and |wrapped_sk_image_factory_| for Vulkan and
- // Dawn.
- // TODO(ericrk): Make this generic in the future.
bool allow_legacy_mailbox = false;
SharedImageBackingFactory* factory = nullptr;
if (backing_factory_for_testing_) {
factory = backing_factory_for_testing_;
- } else if (gr_context_type_ == GrContextType::kGL) {
- allow_legacy_mailbox = true;
- factory = gl_backing_factory_.get();
} else {
- factory = wrapped_sk_image_factory_.get();
+ factory = GetFactoryByUsage(usage, format, &allow_legacy_mailbox,
+ /*is_pixel_used=*/true, gfx::EMPTY_BUFFER);
}
if (!factory)
return false;
@@ -287,8 +319,9 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
// factory, e.g. SharedImageBackingFactoryAHB.
bool allow_legacy_mailbox = false;
auto resource_format = viz::GetResourceFormat(format);
- auto* factory = GetFactoryByUsage(usage, resource_format,
- &allow_legacy_mailbox, handle.type);
+ auto* factory =
+ GetFactoryByUsage(usage, resource_format, &allow_legacy_mailbox,
+ /*is_pixel_used=*/false, handle.type);
if (!factory)
return false;
auto backing = factory->CreateSharedImage(
@@ -345,11 +378,8 @@ bool SharedImageFactory::CreateSwapChain(const Mailbox& front_buffer_mailbox,
if (!SharedImageBackingFactoryD3D::IsSwapChainSupported())
return false;
- SharedImageBackingFactoryD3D* d3d_backing_factory =
- static_cast<SharedImageBackingFactoryD3D*>(
- interop_backing_factory_.get());
bool allow_legacy_mailbox = true;
- auto backings = d3d_backing_factory->CreateSwapChain(
+ auto backings = d3d_backing_factory_->CreateSwapChain(
front_buffer_mailbox, back_buffer_mailbox, format, size, color_space,
surface_origin, alpha_type, usage);
return RegisterBacking(std::move(backings.front_buffer),
@@ -433,10 +463,10 @@ bool SharedImageFactory::CreateSharedImageVideoPlanes(
gfx::BufferFormat format,
const gfx::Size& size,
uint32_t usage) {
- if (!interop_backing_factory_)
+ if (!d3d_backing_factory_)
return false;
- auto backings = interop_backing_factory_->CreateSharedImageVideoPlanes(
+ auto backings = d3d_backing_factory_->CreateSharedImageVideoPlanes(
mailboxes, std::move(handle), format, size, usage);
if (backings.size() != gfx::NumberOfPlanesForLinearBufferFormat(format))
@@ -480,124 +510,44 @@ void SharedImageFactory::RegisterSharedImageBackingFactoryForTesting(
}
bool SharedImageFactory::IsSharedBetweenThreads(uint32_t usage) {
- // If |shared_image_manager_| is thread safe, it means the display is running
- // on a separate thread (which uses a separate GL context or VkDeviceQueue).
- return shared_image_manager_->display_context_on_another_thread() &&
- (usage & SHARED_IMAGE_USAGE_DISPLAY);
-}
-
-bool SharedImageFactory::CanUseWrappedSkImage(uint32_t usage) const {
- if (!wrapped_sk_image_factory_)
- return false;
-
- constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER |
- SHARED_IMAGE_USAGE_OOP_RASTERIZATION |
- SHARED_IMAGE_USAGE_DISPLAY;
-
- if (gr_context_type_ != GrContextType::kGL) {
- // For SkiaRenderer/Vulkan+Dawn use WrappedSkImage if the usage is only
- // raster and/or display.
- return (usage & kWrappedSkImageUsage) && !(usage & ~kWrappedSkImageUsage);
- } else {
- // For d SkiaRenderer/GL only use WrappedSkImages for OOP-R because
- // CopySubTexture() doesn't use Skia. https://crbug.com/984045
- return usage == kWrappedSkImageUsage;
- }
+ // Ignore for mipmap usage.
+ usage &= ~SHARED_IMAGE_USAGE_MIPMAP;
+ // If |shared_image_manager_| is thread safe, it means the display is
+ // running on a separate thread (which uses a separate GL context or
+ // VkDeviceQueue).
+ const bool used_by_display_compositor_gpu_thread =
+ (usage & SHARED_IMAGE_USAGE_DISPLAY || is_for_display_compositor_) &&
+ shared_image_manager_->display_context_on_another_thread();
+ // If it has usage other than DISPLAY OR if it is not used just for display
+ // compositor, it means that it is used by the gpu main thread.
+ const bool used_by_main_gpu_thread =
+ usage & ~SHARED_IMAGE_USAGE_DISPLAY || !is_for_display_compositor_;
+ return used_by_display_compositor_gpu_thread && used_by_main_gpu_thread;
}
SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
uint32_t usage,
viz::ResourceFormat format,
bool* allow_legacy_mailbox,
+ bool is_pixel_used,
gfx::GpuMemoryBufferType gmb_type) {
if (backing_factory_for_testing_)
return backing_factory_for_testing_;
- bool using_dawn = usage & SHARED_IMAGE_USAGE_WEBGPU;
- bool vulkan_usage = gr_context_type_ == GrContextType::kVulkan &&
- (usage & SHARED_IMAGE_USAGE_DISPLAY);
- bool gl_usage = usage & SHARED_IMAGE_USAGE_GLES2;
bool share_between_threads = IsSharedBetweenThreads(usage);
- bool share_between_gl_vulkan = gl_usage && vulkan_usage;
- bool using_interop_factory = share_between_gl_vulkan || using_dawn ||
- (usage & SHARED_IMAGE_USAGE_VIDEO_DECODE) ||
- (share_between_threads && vulkan_usage);
-
-#if defined(OS_ANDROID)
- // Scanout on Android requires explicit fence synchronization which is only
- // supported by the interop factory.
- using_interop_factory |= usage & SHARED_IMAGE_USAGE_SCANOUT;
-#elif defined(OS_MAC)
- // On macOS, there is no separate interop factory. Any GpuMemoryBuffer-backed
- // image can be used with both OpenGL and Metal.
- using_interop_factory = false;
-#endif
-
- bool using_wrapped_sk_image = !using_interop_factory &&
- !share_between_threads &&
- CanUseWrappedSkImage(usage);
- if (using_wrapped_sk_image) {
- if (gmb_type == gfx::EMPTY_BUFFER ||
- wrapped_sk_image_factory_->CanImportGpuMemoryBuffer(gmb_type)) {
- *allow_legacy_mailbox = false;
- return wrapped_sk_image_factory_.get();
- }
- }
-
- using_interop_factory |= vulkan_usage;
-
- if (gmb_type != gfx::EMPTY_BUFFER) {
- bool interop_factory_supports_gmb =
- interop_backing_factory_ &&
- interop_backing_factory_->CanImportGpuMemoryBuffer(gmb_type);
-
- if (using_interop_factory && !interop_backing_factory_) {
- LOG(ERROR) << "Unable to screate SharedImage backing: no support for the "
- "requested GpuMemoryBufferType.";
- return nullptr;
- }
-
- // If |interop_backing_factory_| supports supplied GMB type then use it
- // instead of |gl_backing_factory_|.
- using_interop_factory |= interop_factory_supports_gmb;
- }
-
- *allow_legacy_mailbox = !using_interop_factory &&
- gr_context_type_ == GrContextType::kGL &&
- !share_between_threads;
-
- if (using_interop_factory) {
- // TODO(crbug.com/969114): Not all shared image factory implementations
- // support concurrent read/write usage.
- if (usage & SHARED_IMAGE_USAGE_CONCURRENT_READ_WRITE) {
- LOG(ERROR) << "Unable to create SharedImage backing: Interoperability is "
- "not supported for concurrent read/write usage";
- return nullptr;
- }
-
-#if defined(OS_ANDROID)
- // On android, we sometime choose VkImage based backing factory as an
- // interop if the format is not supported by the AHB backing factory.
- auto* ahb_backing_factory = static_cast<SharedImageBackingFactoryAHB*>(
- interop_backing_factory_.get());
- if (ahb_backing_factory && ahb_backing_factory->IsFormatSupported(format))
- return ahb_backing_factory;
- if (share_between_threads) {
- LOG(FATAL) << "ExternalVkImageFactory currently do not support "
- "cross-thread usage.";
+ for (auto& factory : factories_) {
+ if (factory->IsSupported(usage, format, share_between_threads, gmb_type,
+ gr_context_type_, allow_legacy_mailbox,
+ is_pixel_used)) {
+ return factory.get();
}
- *allow_legacy_mailbox = false;
- return external_vk_image_factory_.get();
-#else // defined(OS_ANDROID)
- LOG_IF(ERROR, !interop_backing_factory_)
- << "Unable to create SharedImage backing: GL / Vulkan interoperability "
- "is not supported on this platform";
-
- return interop_backing_factory_.get();
-#endif // !defined(OS_ANDROID)
}
- return gl_backing_factory_.get();
+ LOG(ERROR) << "Could not find SharedImageBackingFactory with params: usage: "
+ << usage << ", format: " << format
+ << ", share_between_threads: " << share_between_threads
+ << ", gmb_type: " << gmb_type;
+ return nullptr;
}
bool SharedImageFactory::RegisterBacking(
@@ -667,8 +617,9 @@ SharedImageRepresentationFactory::ProduceSkia(
std::unique_ptr<SharedImageRepresentationDawn>
SharedImageRepresentationFactory::ProduceDawn(const Mailbox& mailbox,
- WGPUDevice device) {
- return manager_->ProduceDawn(mailbox, tracker_.get(), device);
+ WGPUDevice device,
+ WGPUBackendType backend_type) {
+ return manager_->ProduceDawn(mailbox, tracker_.get(), device, backend_type);
}
std::unique_ptr<SharedImageRepresentationOverlay>
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h
index 8830fbe84d4..1bb4f844042 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.h
@@ -6,6 +6,7 @@
#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_FACTORY_H_
#include <memory>
+#include <vector>
#include "base/containers/flat_set.h"
#include "base/memory/scoped_refptr.h"
@@ -26,14 +27,13 @@ class VulkanContextProvider;
} // namespace viz
namespace gpu {
-class ExternalVkImageFactory;
class GpuDriverBugWorkarounds;
class ImageFactory;
class MailboxManager;
class MemoryTracker;
class SharedContextState;
class SharedImageBackingFactory;
-class SharedImageBackingFactoryGLTexture;
+class SharedImageBackingFactoryD3D;
struct GpuFeatureInfo;
struct GpuPreferences;
@@ -41,10 +41,6 @@ struct GpuPreferences;
class SysmemBufferCollection;
#endif // OS_FUCHSIA
-namespace raster {
-class WrappedSkImageFactory;
-} // namespace raster
-
// TODO(ericrk): Make this a very thin wrapper around SharedImageManager like
// SharedImageRepresentationFactory.
class GPU_GLES2_EXPORT SharedImageFactory {
@@ -58,7 +54,8 @@ class GPU_GLES2_EXPORT SharedImageFactory {
SharedImageManager* manager,
ImageFactory* image_factory,
MemoryTracker* tracker,
- bool enable_wrapped_sk_image);
+ bool enable_wrapped_sk_image,
+ bool is_for_display_compositor);
~SharedImageFactory();
bool CreateSharedImage(const Mailbox& mailbox,
@@ -149,11 +146,11 @@ class GPU_GLES2_EXPORT SharedImageFactory {
private:
bool IsSharedBetweenThreads(uint32_t usage);
- bool CanUseWrappedSkImage(uint32_t usage) const;
SharedImageBackingFactory* GetFactoryByUsage(
uint32_t usage,
viz::ResourceFormat format,
bool* allow_legacy_mailbox,
+ bool is_pixel_used,
gfx::GpuMemoryBufferType gmb_type = gfx::EMPTY_BUFFER);
MailboxManager* mailbox_manager_;
@@ -161,6 +158,10 @@ class GPU_GLES2_EXPORT SharedImageFactory {
SharedContextState* shared_context_state_;
std::unique_ptr<MemoryTypeTracker> memory_tracker_;
+ // This is used if the factory is created on display compositor to check for
+ // sharing between threads.
+ const bool is_for_display_compositor_;
+
// This is |shared_context_state_|'s context type. Some tests leave
// |shared_context_state_| as nullptr, in which case this is set to a default
/// of kGL.
@@ -171,24 +172,15 @@ class GPU_GLES2_EXPORT SharedImageFactory {
base::flat_set<std::unique_ptr<SharedImageRepresentationFactoryRef>>
shared_images_;
- // TODO(ericrk): This should be some sort of map from usage to factory
- // eventually.
- std::unique_ptr<SharedImageBackingFactoryGLTexture> gl_backing_factory_;
-
- // Used for creating shared image which can be shared between GL, Vulkan and
- // D3D12.
- std::unique_ptr<SharedImageBackingFactory> interop_backing_factory_;
+ // Array of all the backing factories to choose from for creating shared
+ // images.
+ std::vector<std::unique_ptr<SharedImageBackingFactory>> factories_;
-#if defined(OS_ANDROID)
- // On android we have two interop factory which is |interop_backing_factory_|
- // and |external_vk_image_factory_| and we choose one of those
- // based on the format it supports.
- std::unique_ptr<ExternalVkImageFactory> external_vk_image_factory_;
+#if defined(OS_WIN)
+ // Used for creating swap chains
+ SharedImageBackingFactoryD3D* d3d_backing_factory_ = nullptr;
#endif
- // Non-null if compositing with SkiaRenderer.
- std::unique_ptr<raster::WrappedSkImageFactory> wrapped_sk_image_factory_;
-
#if defined(OS_FUCHSIA)
viz::VulkanContextProvider* vulkan_context_provider_;
base::flat_map<gfx::SysmemBufferCollectionId,
@@ -218,7 +210,8 @@ class GPU_GLES2_EXPORT SharedImageRepresentationFactory {
scoped_refptr<SharedContextState> context_State);
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
const Mailbox& mailbox,
- WGPUDevice device);
+ WGPUDevice device,
+ WGPUBackendType backend_type);
std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
const Mailbox& mailbox);
std::unique_ptr<SharedImageRepresentationMemory> ProduceMemory(
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
index 1ff4e1175e5..4cea65728e0 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
@@ -41,7 +41,8 @@ class SharedImageFactoryTest : public testing::Test {
factory_ = std::make_unique<SharedImageFactory>(
preferences, workarounds, GpuFeatureInfo(), nullptr, &mailbox_manager_,
&shared_image_manager_, &image_factory_, nullptr,
- /*enable_wrapped_sk_image=*/false);
+ /*enable_wrapped_sk_image=*/false,
+ /*is_for_display_compositor=*/false);
}
void TearDown() override {
@@ -96,7 +97,7 @@ TEST_F(SharedImageFactoryTest, DuplicateMailbox) {
auto other_factory = std::make_unique<SharedImageFactory>(
preferences, workarounds, GpuFeatureInfo(), nullptr, &mailbox_manager_,
&shared_image_manager_, &image_factory_, nullptr,
- /*enable_wrapped_sk_image=*/false);
+ /*enable_wrapped_sk_image=*/false, /*is_for_display_compositor=*/false);
EXPECT_FALSE(other_factory->CreateSharedImage(
mailbox, format, size, color_space, kTopLeft_GrSurfaceOrigin,
kPremul_SkAlphaType, surface_handle, usage));
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc
index f056db507f2..8e56114bcb7 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc
@@ -229,7 +229,8 @@ std::unique_ptr<SharedImageRepresentationSkia> SharedImageManager::ProduceSkia(
std::unique_ptr<SharedImageRepresentationDawn> SharedImageManager::ProduceDawn(
const Mailbox& mailbox,
MemoryTypeTracker* tracker,
- WGPUDevice device) {
+ WGPUDevice device,
+ WGPUBackendType backend_type) {
CALLED_ON_VALID_THREAD();
AutoLock autolock(this);
@@ -240,7 +241,8 @@ std::unique_ptr<SharedImageRepresentationDawn> SharedImageManager::ProduceDawn(
return nullptr;
}
- auto representation = (*found)->ProduceDawn(this, tracker, device);
+ auto representation =
+ (*found)->ProduceDawn(this, tracker, device, backend_type);
if (!representation) {
LOG(ERROR) << "SharedImageManager::ProduceDawn: Trying to produce a "
"Dawn representation from an incompatible mailbox.";
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.h b/chromium/gpu/command_buffer/service/shared_image_manager.h
index e8593b5bae5..2c4f2d7ab6d 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.h
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.h
@@ -67,7 +67,8 @@ class GPU_GLES2_EXPORT SharedImageManager {
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
const Mailbox& mailbox,
MemoryTypeTracker* ref,
- WGPUDevice device);
+ WGPUDevice device,
+ WGPUBackendType backend_type);
std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
const Mailbox& mailbox,
MemoryTypeTracker* ref);
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.cc b/chromium/gpu/command_buffer/service/shared_image_representation.cc
index e6bff7d59f6..e8aa1b35c6a 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.cc
@@ -22,7 +22,12 @@ SharedImageRepresentation::SharedImageRepresentation(
MemoryTypeTracker* owning_tracker)
: manager_(manager), backing_(backing), tracker_(owning_tracker) {
DCHECK(tracker_);
- backing_->AddRef(this);
+ // TODO(hitawala): Rewrite the reference counting so that
+ // SharedImageRepresentation does not need manager and manager attaches to
+ // backing in Register().
+ if (manager_) {
+ backing_->AddRef(this);
+ }
}
SharedImageRepresentation::~SharedImageRepresentation() {
@@ -30,7 +35,9 @@ SharedImageRepresentation::~SharedImageRepresentation() {
// error is.
CHECK(!has_scoped_access_) << "Destroying a SharedImageRepresentation with "
"outstanding Scoped*Access objects.";
- manager_->OnRepresentationDestroyed(backing_->mailbox(), this);
+ if (manager_) {
+ manager_->OnRepresentationDestroyed(backing_->mailbox(), this);
+ }
}
std::unique_ptr<SharedImageRepresentationGLTexture::ScopedAccess>
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_dawn_egl_image.cc b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_egl_image.cc
new file mode 100644
index 00000000000..f5e664a1bba
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_egl_image.cc
@@ -0,0 +1,79 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_representation_dawn_egl_image.h"
+
+#include "build/build_config.h"
+#if defined(OS_WIN)
+#include "gpu/command_buffer/service/shared_image_backing_d3d.h"
+#endif
+
+#include <dawn_native/OpenGLBackend.h>
+
+namespace gpu {
+
+SharedImageRepresentationDawnEGLImage::SharedImageRepresentationDawnEGLImage(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device,
+ EGLImage image,
+ const WGPUTextureDescriptor& texture_descriptor)
+ : SharedImageRepresentationDawn(manager, backing, tracker),
+ device_(device),
+ image_(image),
+ texture_descriptor_(texture_descriptor),
+ dawn_procs_(dawn_native::GetProcs()) {
+ DCHECK(device_);
+ DCHECK(image_);
+
+ // Keep a reference to the device so that it stays valid.
+ dawn_procs_.deviceReference(device_);
+}
+
+SharedImageRepresentationDawnEGLImage::
+ ~SharedImageRepresentationDawnEGLImage() {
+ EndAccess();
+
+ dawn_procs_.deviceRelease(device_);
+}
+
+WGPUTexture SharedImageRepresentationDawnEGLImage::BeginAccess(
+ WGPUTextureUsage usage) {
+#if defined(OS_WIN)
+ // On D3D11 backings, we must acquire the keyed mutex to do interop. If we
+ // ever switch to non-D3D backings on Windows, this code will break horribly.
+ // TODO(senorblanco): This should probably be a virtual on SharedImageBacking
+ // to avoid this cast.
+ static_cast<SharedImageBackingD3D*>(backing())->BeginAccessD3D11();
+#endif
+ dawn_native::opengl::ExternalImageDescriptorEGLImage externalImageDesc;
+ externalImageDesc.cTextureDescriptor = &texture_descriptor_;
+ externalImageDesc.image = image_;
+ externalImageDesc.isInitialized = true;
+ texture_ =
+ dawn_native::opengl::WrapExternalEGLImage(device_, &externalImageDesc);
+ return texture_;
+}
+
+void SharedImageRepresentationDawnEGLImage::EndAccess() {
+ if (!texture_) {
+ return;
+ }
+ if (dawn_native::IsTextureSubresourceInitialized(texture_, 0, 1, 0, 1)) {
+ SetCleared();
+ }
+#if defined(OS_WIN)
+ // TODO(senorblanco): This should probably be a virtual on SharedImageBacking
+ // to avoid this cast.
+ static_cast<SharedImageBackingD3D*>(backing())->EndAccessD3D11();
+#endif
+ // All further operations on the textures are errors (they would be racy
+ // with other backings).
+ dawn_procs_.textureDestroy(texture_);
+ dawn_procs_.textureRelease(texture_);
+ texture_ = nullptr;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_dawn_egl_image.h b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_egl_image.h
new file mode 100644
index 00000000000..baa82154884
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_egl_image.h
@@ -0,0 +1,40 @@
+// Copyright 2021 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_DAWN_EGL_IMAGE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_DAWN_EGL_IMAGE_H_
+
+#include "gpu/command_buffer/service/shared_image_representation.h"
+
+typedef void* EGLImage;
+
+namespace gpu {
+
+class GPU_GLES2_EXPORT SharedImageRepresentationDawnEGLImage
+ : public SharedImageRepresentationDawn {
+ public:
+ SharedImageRepresentationDawnEGLImage(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device,
+ EGLImage image,
+ const WGPUTextureDescriptor& texture_descriptor);
+ ~SharedImageRepresentationDawnEGLImage() override;
+
+ private:
+ WGPUTexture BeginAccess(WGPUTextureUsage usage) override;
+ void EndAccess() override;
+
+ private:
+ WGPUDevice device_;
+ EGLImage image_;
+ WGPUTextureDescriptor texture_descriptor_;
+ DawnProcTable dawn_procs_;
+ WGPUTexture texture_ = nullptr;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_DAWN_EGL_IMAGE_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc
index 9199032ed80..421034ca302 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc
@@ -56,21 +56,25 @@ WGPUTexture SharedImageRepresentationDawnOzone::BeginAccess(
}
DCHECK(pixmap_->GetNumberOfPlanes() == 1)
<< "Multi-plane formats are not supported.";
- // TODO(hob): Synchronize access to the dma-buf by waiting on all semaphores
- // tracked by SharedImageBackingOzone.
+
+ std::vector<gfx::GpuFenceHandle> fences;
+ ozone_backing()->BeginAccess(&fences);
+
gfx::Size pixmap_size = pixmap_->GetBufferSize();
WGPUTextureDescriptor texture_descriptor = {};
texture_descriptor.nextInChain = nullptr;
texture_descriptor.format = format_;
texture_descriptor.usage = usage;
texture_descriptor.dimension = WGPUTextureDimension_2D;
- texture_descriptor.size = {pixmap_size.width(), pixmap_size.height(), 1};
+ texture_descriptor.size = {static_cast<uint32_t>(pixmap_size.width()),
+ static_cast<uint32_t>(pixmap_size.height()), 1};
texture_descriptor.mipLevelCount = 1;
texture_descriptor.sampleCount = 1;
dawn_native::vulkan::ExternalImageDescriptorDmaBuf descriptor = {};
descriptor.cTextureDescriptor = &texture_descriptor;
descriptor.isInitialized = IsCleared();
+
// Import the dma-buf into Dawn via the Vulkan backend. As per the Vulkan
// documentation, importing memory from a file descriptor transfers
// ownership of the fd from the application to the Vulkan implementation.
@@ -82,6 +86,12 @@ WGPUTexture SharedImageRepresentationDawnOzone::BeginAccess(
descriptor.drmModifier = pixmap_->GetBufferFormatModifier();
descriptor.waitFDs = {};
+ if (ozone_backing()->NeedsSynchronization()) {
+ for (auto& fence : fences) {
+ descriptor.waitFDs.push_back(fence.owned_fd.release());
+ }
+ }
+
texture_ = dawn_native::vulkan::WrapVulkanImage(device_, &descriptor);
if (!texture_) {
close(fd);
@@ -105,8 +115,11 @@ void SharedImageRepresentationDawnOzone::EndAccess() {
SetCleared();
}
- // TODO(hob): Synchronize access to the dma-buf by waiting on
- // |export_info.semaphoreHandles|
+ // TODO(hob): Handle waiting on multiple semaphores from dawn.
+ DCHECK(export_info.semaphoreHandles.size() == 1);
+ gfx::GpuFenceHandle fence;
+ fence.owned_fd = base::ScopedFD(export_info.semaphoreHandles[0]);
+ ozone_backing()->EndAccess(false /* readonly */, std::move(fence));
}
dawn_procs_->data.textureDestroy(texture_);
dawn_procs_->data.textureRelease(texture_);
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
index 5645db88629..cebf7b56e88 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
@@ -98,8 +98,19 @@ gles2::Texture* SharedImageRepresentationGLOzone::GetTexture() {
}
bool SharedImageRepresentationGLOzone::BeginAccess(GLenum mode) {
- // TODO(hob): Synchronize access to the dma-buf by waiting on all semaphores
- // tracked by SharedImageBackingOzone.
+ DCHECK(!current_access_mode_);
+ current_access_mode_ = mode;
+ std::vector<gfx::GpuFenceHandle> fences;
+ ozone_backing()->BeginAccess(&fences);
+
+ if (ozone_backing()->NeedsSynchronization()) {
+ for (auto& fence : fences) {
+ gfx::GpuFence gpu_fence = gfx::GpuFence(std::move(fence));
+ std::unique_ptr<gl::GLFence> gl_fence =
+ gl::GLFence::CreateFromGpuFence(gpu_fence);
+ gl_fence->ServerWait();
+ }
+ }
// We must call VaapiWrapper::SyncSurface() to ensure all VA-API work is done
// prior to using the buffer in a graphics API.
@@ -107,8 +118,16 @@ bool SharedImageRepresentationGLOzone::BeginAccess(GLenum mode) {
}
void SharedImageRepresentationGLOzone::EndAccess() {
- // TODO(hob): Synchronize access to the dma-buf by signaling completion via
- // glSignalSemaphoreEXT.
+ gfx::GpuFenceHandle fence;
+ if (ozone_backing()->NeedsSynchronization()) {
+ auto gl_fence = gl::GLFence::CreateForGpuFence();
+ DCHECK(gl_fence);
+ fence = gl_fence->GetGpuFence()->GetGpuFenceHandle().Clone();
+ }
+ bool readonly =
+ current_access_mode_ != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM;
+ ozone_backing()->EndAccess(readonly, std::move(fence));
+ current_access_mode_ = 0;
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.h b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.h
index 40ba782255a..78cc7f888bc 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.h
@@ -45,11 +45,13 @@ class SharedImageRepresentationGLOzone
MemoryTypeTracker* tracker,
gles2::Texture* texture);
- gles2::Texture* texture_;
SharedImageBackingOzone* ozone_backing() {
return static_cast<SharedImageBackingOzone*>(backing());
}
+ gles2::Texture* texture_;
+ GLenum current_access_mode_ = 0;
+
DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLOzone);
};
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
index 53b43c09278..cdc45b270b1 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
@@ -11,6 +11,7 @@
#include "gpu/command_buffer/service/texture_manager.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "third_party/skia/include/gpu/GrContextThreadSafeProxy.h"
#include "ui/gl/gl_bindings.h"
namespace gpu {
@@ -39,11 +40,11 @@ SharedImageRepresentationSkiaGL::Create(
SharedImageBacking* backing,
MemoryTypeTracker* tracker) {
GrBackendTexture backend_texture;
- if (!GetGrBackendTexture(context_state->feature_info(),
- gl_representation->GetTextureBase()->target(),
- backing->size(),
- gl_representation->GetTextureBase()->service_id(),
- backing->format(), &backend_texture)) {
+ if (!GetGrBackendTexture(
+ context_state->feature_info(),
+ gl_representation->GetTextureBase()->target(), backing->size(),
+ gl_representation->GetTextureBase()->service_id(), backing->format(),
+ context_state->gr_context()->threadSafeProxy(), &backend_texture)) {
return nullptr;
}
auto promise_texture = SkPromiseImageTexture::Make(backend_texture);
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.cc b/chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.cc
index 1c32d4afa93..4c00d0e2d27 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.cc
@@ -89,13 +89,7 @@ sk_sp<SkSurface> SharedImageRepresentationSkiaVkAndroid::BeginWriteAccess(
surface_msaa_count_ = final_msaa_count;
}
- // If the backing could be used for scanout, we always set the layout to
- // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR after each accessing.
- if (android_backing()->usage() & SHARED_IMAGE_USAGE_SCANOUT) {
- *end_state = std::make_unique<GrBackendSurfaceMutableState>(
- VK_IMAGE_LAYOUT_UNDEFINED, VK_QUEUE_FAMILY_FOREIGN_EXT);
- }
-
+ *end_state = GetEndAccessState();
return surface_;
}
@@ -111,13 +105,7 @@ SharedImageRepresentationSkiaVkAndroid::BeginWriteAccess(
base::ScopedFD()))
return nullptr;
- // If the backing could be used for scanout, we always set the layout to
- // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR after each accessing.
- if (android_backing()->usage() & SHARED_IMAGE_USAGE_SCANOUT) {
- *end_state = std::make_unique<GrBackendSurfaceMutableState>(
- VK_IMAGE_LAYOUT_UNDEFINED, VK_QUEUE_FAMILY_FOREIGN_EXT);
- }
-
+ *end_state = GetEndAccessState();
return promise_texture_;
}
@@ -152,13 +140,7 @@ SharedImageRepresentationSkiaVkAndroid::BeginReadAccess(
std::move(init_read_fence_)))
return nullptr;
- // If the backing could be used for scanout, we always set the layout to
- // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR after each accessing.
- if (android_backing()->usage() & SHARED_IMAGE_USAGE_SCANOUT) {
- *end_state = std::make_unique<GrBackendSurfaceMutableState>(
- VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, VK_QUEUE_FAMILY_IGNORED);
- }
-
+ *end_state = GetEndAccessState();
return promise_texture_;
}
@@ -288,4 +270,27 @@ void SharedImageRepresentationSkiaVkAndroid::EndAccess(bool readonly) {
mode_ = RepresentationAccessMode::kNone;
}
+std::unique_ptr<GrBackendSurfaceMutableState>
+SharedImageRepresentationSkiaVkAndroid::GetEndAccessState() {
+ // There is no layout to change if there is no image.
+ if (!vulkan_image_)
+ return nullptr;
+
+ const uint32_t kSingleDeviceUsage = SHARED_IMAGE_USAGE_DISPLAY |
+ SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_OOP_RASTERIZATION;
+
+ // If SharedImage is used outside of current VkDeviceQueue we need to transfer
+ // image back to it's original queue. Note, that for multithreading we use
+ // same vkDevice, so technically we could transfer between queues instead of
+ // jumping to external queue. But currently it's not possible because we
+ // create new vkImage each time.
+ if ((android_backing()->usage() & ~kSingleDeviceUsage) ||
+ android_backing()->is_thread_safe()) {
+ return std::make_unique<GrBackendSurfaceMutableState>(
+ VK_IMAGE_LAYOUT_UNDEFINED, vulkan_image_->queue_family_index());
+ }
+ return nullptr;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.h b/chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.h
index d4f8134f04e..a29d1c49aa7 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_vk_android.h
@@ -67,6 +67,8 @@ class SharedImageRepresentationSkiaVkAndroid
std::vector<GrBackendSemaphore>* end_semaphores,
base::ScopedFD init_read_fence);
void EndAccess(bool readonly);
+ std::unique_ptr<GrBackendSurfaceMutableState> GetEndAccessState();
+
VkDevice vk_device();
VulkanImplementation* vk_implementation();
VkPhysicalDevice vk_phy_device();
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_representation_unittest.cc
index ce578e18aad..4fe0132cac1 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_unittest.cc
@@ -187,8 +187,8 @@ TEST_F(SharedImageRepresentationTest, SkiaClearing) {
}
TEST_F(SharedImageRepresentationTest, DawnClearing) {
- auto representation =
- manager_.ProduceDawn(mailbox_, tracker_.get(), nullptr /* device */);
+ auto representation = manager_.ProduceDawn(
+ mailbox_, tracker_.get(), nullptr /* device */, WGPUBackendType_Null);
EXPECT_FALSE(representation->IsCleared());
// We should not be able to begin access with |allow_uncleared| == false.
diff --git a/chromium/gpu/command_buffer/service/shared_image_video.cc b/chromium/gpu/command_buffer/service/shared_image_video.cc
index 2730774e89d..50b4e32cbf0 100644
--- a/chromium/gpu/command_buffer/service/shared_image_video.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_video.cc
@@ -8,6 +8,7 @@
#include "base/android/scoped_hardware_buffer_fence_sync.h"
#include "base/android/scoped_hardware_buffer_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
@@ -33,9 +34,71 @@
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "ui/gl/gl_utils.h"
namespace gpu {
+namespace {
+class VideoImage : public gl::GLImage {
+ public:
+ VideoImage() = default;
+
+ VideoImage(AHardwareBuffer* buffer, base::ScopedFD begin_read_fence)
+ : handle_(base::android::ScopedHardwareBufferHandle::Create(buffer)),
+ begin_read_fence_(std::move(begin_read_fence)) {}
+
+ // gl::GLImage:
+ std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
+ GetAHardwareBuffer() override {
+ if (!handle_.is_valid())
+ return nullptr;
+
+ return std::make_unique<ScopedHardwareBufferFenceSyncImpl>(
+ this, base::android::ScopedHardwareBufferHandle::Create(handle_.get()),
+ std::move(begin_read_fence_));
+ }
+
+ base::ScopedFD TakeEndReadFence() { return std::move(end_read_fence_); }
+
+ protected:
+ ~VideoImage() override = default;
+
+ private:
+ class ScopedHardwareBufferFenceSyncImpl
+ : public base::android::ScopedHardwareBufferFenceSync {
+ public:
+ ScopedHardwareBufferFenceSyncImpl(
+ scoped_refptr<VideoImage> image,
+ base::android::ScopedHardwareBufferHandle handle,
+ base::ScopedFD fence_fd)
+ : ScopedHardwareBufferFenceSync(std::move(handle),
+ std::move(fence_fd),
+ base::ScopedFD(),
+ /*is_video=*/true),
+ image_(std::move(image)) {}
+ ~ScopedHardwareBufferFenceSyncImpl() override = default;
+
+ void SetReadFence(base::ScopedFD fence_fd, bool has_context) override {
+ image_->end_read_fence_ =
+ gl::MergeFDs(std::move(image_->end_read_fence_), std::move(fence_fd));
+ }
+
+ private:
+ scoped_refptr<VideoImage> image_;
+ };
+
+ base::android::ScopedHardwareBufferHandle handle_;
+
+ // This fence should be waited upon before reading from the buffer.
+ base::ScopedFD begin_read_fence_;
+
+ // This fence should be waited upon to ensure that the reader is finished
+ // reading from the buffer.
+ base::ScopedFD end_read_fence_;
+};
+
+} // namespace
+
SharedImageVideo::SharedImageVideo(
const Mailbox& mailbox,
const gfx::Size& size,
@@ -44,7 +107,8 @@ SharedImageVideo::SharedImageVideo(
SkAlphaType alpha_type,
scoped_refptr<StreamTextureSharedImageInterface> stream_texture_sii,
scoped_refptr<SharedContextState> context_state,
- bool is_thread_safe)
+ bool is_thread_safe,
+ scoped_refptr<RefCountedLock> drdc_lock)
: SharedImageBackingAndroid(
mailbox,
viz::RGBA_8888,
@@ -57,20 +121,51 @@ SharedImageVideo::SharedImageVideo(
viz::RGBA_8888),
is_thread_safe,
base::ScopedFD()),
+ RefCountedLockHelperDrDc(std::move(drdc_lock)),
stream_texture_sii_(std::move(stream_texture_sii)),
- context_state_(std::move(context_state)) {
+ context_state_(std::move(context_state)),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()) {
DCHECK(stream_texture_sii_);
DCHECK(context_state_);
- // Currently this backing is not thread safe.
- DCHECK(!is_thread_safe);
context_state_->AddContextLostObserver(this);
}
SharedImageVideo::~SharedImageVideo() {
- stream_texture_sii_->ReleaseResources();
- if (context_state_)
- context_state_->RemoveContextLostObserver(this);
+ if (task_runner_->RunsTasksInCurrentSequence()) {
+ CleanupOnCorrectThread(std::move(stream_texture_sii_),
+ std::move(context_state_), this, /*event=*/nullptr,
+ GetDrDcLock());
+ } else {
+ base::WaitableEvent event;
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&SharedImageVideo::CleanupOnCorrectThread,
+ std::move(stream_texture_sii_),
+ std::move(context_state_), base::Unretained(this),
+ &event, GetDrDcLock()));
+ event.Wait();
+ }
+}
+
+void SharedImageVideo::CleanupOnCorrectThread(
+ scoped_refptr<StreamTextureSharedImageInterface> stream_texture_sii,
+ scoped_refptr<SharedContextState> context_state,
+ SharedImageVideo* backing,
+ base::WaitableEvent* event,
+ scoped_refptr<RefCountedLock> drdc_lock) {
+ if (context_state)
+ context_state->RemoveContextLostObserver(backing);
+ context_state.reset();
+
+ {
+ base::AutoLockMaybe auto_lock(drdc_lock ? drdc_lock->GetDrDcLockPtr()
+ : nullptr);
+ stream_texture_sii->ReleaseResources();
+ stream_texture_sii.reset();
+ }
+ if (event)
+ event->Signal();
}
gfx::Rect SharedImageVideo::ClearedRect() const {
@@ -88,22 +183,26 @@ void SharedImageVideo::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
bool SharedImageVideo::ProduceLegacyMailbox(MailboxManager* mailbox_manager) {
// Android does not use legacy mailbox anymore. Hence marking this as
- // NOTREACHED() now. Once all platform stops using legacy mailbox, this method
- // can be removed.
+ // NOTREACHED() now. Once all platform stops using legacy mailbox, this
+ // method can be removed.
NOTREACHED();
return false;
}
size_t SharedImageVideo::EstimatedSizeForMemTracking() const {
- // This backing contributes to gpu memory only if its bound to the texture and
- // not when the backing is created.
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+
+ // This backing contributes to gpu memory only if its bound to the texture
+ // and not when the backing is created.
return stream_texture_sii_->IsUsingGpuMemory() ? estimated_size() : 0;
}
void SharedImageVideo::OnContextLost() {
- // We release codec buffers when shared image context is lost. This is because
- // texture owner's texture was created on shared context. Once shared context
- // is lost, no one should try to use that texture.
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+
+ // We release codec buffers when shared image context is lost. This is
+ // because texture owner's texture was created on shared context. Once
+ // shared context is lost, no one should try to use that texture.
stream_texture_sii_->ReleaseResources();
context_state_->RemoveContextLostObserver(this);
context_state_ = nullptr;
@@ -116,8 +215,7 @@ absl::optional<VulkanYCbCrInfo> SharedImageVideo::GetYcbcrInfo(
if (!context_state->GrContextIsVulkan())
return absl::nullopt;
- // GetAHardwareBuffer() renders the latest image and gets AHardwareBuffer
- // from it.
+ // Get AHardwareBuffer from the latest frame.
auto scoped_hardware_buffer = texture_owner->GetAHardwareBuffer();
if (!scoped_hardware_buffer) {
return absl::nullopt;
@@ -140,20 +238,25 @@ absl::optional<VulkanYCbCrInfo> SharedImageVideo::GetYcbcrInfo(
std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
SharedImageVideo::GetAHardwareBuffer() {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+
DCHECK(stream_texture_sii_);
return stream_texture_sii_->GetAHardwareBuffer();
}
// Representation of SharedImageVideo as a GL Texture.
class SharedImageRepresentationGLTextureVideo
- : public SharedImageRepresentationGLTexture {
+ : public SharedImageRepresentationGLTexture,
+ public RefCountedLockHelperDrDc {
public:
SharedImageRepresentationGLTextureVideo(
SharedImageManager* manager,
SharedImageVideo* backing,
MemoryTypeTracker* tracker,
- std::unique_ptr<gles2::AbstractTexture> texture)
+ std::unique_ptr<gles2::AbstractTexture> texture,
+ scoped_refptr<RefCountedLock> drdc_lock)
: SharedImageRepresentationGLTexture(manager, backing, tracker),
+ RefCountedLockHelperDrDc(std::move(drdc_lock)),
texture_(std::move(texture)) {}
gles2::Texture* GetTexture() override {
@@ -164,6 +267,8 @@ class SharedImageRepresentationGLTextureVideo
}
bool BeginAccess(GLenum mode) override {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+
// This representation should only be called for read or overlay.
DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
@@ -183,16 +288,19 @@ class SharedImageRepresentationGLTextureVideo
// Representation of SharedImageVideo as a GL Texture.
class SharedImageRepresentationGLTexturePassthroughVideo
- : public SharedImageRepresentationGLTexturePassthrough {
+ : public SharedImageRepresentationGLTexturePassthrough,
+ public RefCountedLockHelperDrDc {
public:
SharedImageRepresentationGLTexturePassthroughVideo(
SharedImageManager* manager,
SharedImageVideo* backing,
MemoryTypeTracker* tracker,
- std::unique_ptr<gles2::AbstractTexture> abstract_texture)
+ std::unique_ptr<gles2::AbstractTexture> abstract_texture,
+ scoped_refptr<RefCountedLock> drdc_lock)
: SharedImageRepresentationGLTexturePassthrough(manager,
backing,
tracker),
+ RefCountedLockHelperDrDc(std::move(drdc_lock)),
abstract_texture_(std::move(abstract_texture)),
passthrough_texture_(gles2::TexturePassthrough::CheckedCast(
abstract_texture_->GetTextureBase())) {
@@ -206,6 +314,8 @@ class SharedImageRepresentationGLTexturePassthroughVideo
}
bool BeginAccess(GLenum mode) override {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+
// This representation should only be called for read or overlay.
DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
@@ -225,17 +335,20 @@ class SharedImageRepresentationGLTexturePassthroughVideo
};
class SharedImageRepresentationVideoSkiaVk
- : public SharedImageRepresentationSkiaVkAndroid {
+ : public SharedImageRepresentationSkiaVkAndroid,
+ public RefCountedLockHelperDrDc {
public:
SharedImageRepresentationVideoSkiaVk(
SharedImageManager* manager,
SharedImageBackingAndroid* backing,
scoped_refptr<SharedContextState> context_state,
- MemoryTypeTracker* tracker)
+ MemoryTypeTracker* tracker,
+ scoped_refptr<RefCountedLock> drdc_lock)
: SharedImageRepresentationSkiaVkAndroid(manager,
backing,
std::move(context_state),
- tracker) {}
+ tracker),
+ RefCountedLockHelperDrDc(std::move(drdc_lock)) {}
sk_sp<SkSurface> BeginWriteAccess(
int final_msaa_count,
@@ -254,6 +367,8 @@ class SharedImageRepresentationVideoSkiaVk
std::vector<GrBackendSemaphore>* begin_semaphores,
std::vector<GrBackendSemaphore>* end_semaphores,
std::unique_ptr<GrBackendSurfaceMutableState>* end_state) override {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+
DCHECK(!scoped_hardware_buffer_);
auto* video_backing = static_cast<SharedImageVideo*>(backing());
DCHECK(video_backing);
@@ -276,18 +391,18 @@ class SharedImageRepresentationVideoSkiaVk
if (!vulkan_image_) {
DCHECK(!promise_texture_);
- vulkan_image_ =
- CreateVkImageFromAhbHandle(scoped_hardware_buffer_->TakeBuffer(),
- context_state(), size(), format());
+ vulkan_image_ = CreateVkImageFromAhbHandle(
+ scoped_hardware_buffer_->TakeBuffer(), context_state(), size(),
+ format(), VK_QUEUE_FAMILY_FOREIGN_EXT);
if (!vulkan_image_)
return nullptr;
// We always use VK_IMAGE_TILING_OPTIMAL while creating the vk image in
- // VulkanImplementationAndroid::CreateVkImageAndImportAHB. Hence pass the
- // tiling parameter as VK_IMAGE_TILING_OPTIMAL to below call rather than
- // passing |vk_image_info.tiling|. This is also to ensure that the promise
- // image created here at [1] as well the fullfil image created via the
- // current function call are consistent and both are using
+ // VulkanImplementationAndroid::CreateVkImageAndImportAHB. Hence pass
+ // the tiling parameter as VK_IMAGE_TILING_OPTIMAL to below call rather
+ // than passing |vk_image_info.tiling|. This is also to ensure that the
+ // promise image created here at [1] as well the fulfill image created
+ // via the current function call are consistent and both are using
// VK_IMAGE_TILING_OPTIMAL. [1] -
// https://cs.chromium.org/chromium/src/components/viz/service/display_embedder/skia_output_surface_impl.cc?rcl=db5ffd448ba5d66d9d3c5c099754e5067c752465&l=789.
DCHECK_EQ(static_cast<int32_t>(vulkan_image_->image_tiling()),
@@ -306,13 +421,14 @@ class SharedImageRepresentationVideoSkiaVk
}
void EndReadAccess() override {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
DCHECK(scoped_hardware_buffer_);
SharedImageRepresentationSkiaVkAndroid::EndReadAccess();
- // Pass the end read access sync fd to the scoped hardware buffer. This will
- // make sure that the AImage associated with the hardware buffer will be
- // deleted only when the read access is ending.
+ // Pass the end read access sync fd to the scoped hardware buffer. This
+ // will make sure that the AImage associated with the hardware buffer will
+ // be deleted only when the read access is ending.
scoped_hardware_buffer_->SetReadFence(android_backing()->TakeReadFence(),
true);
scoped_hardware_buffer_ = nullptr;
@@ -329,6 +445,8 @@ class SharedImageRepresentationVideoSkiaVk
std::unique_ptr<SharedImageRepresentationGLTexture>
SharedImageVideo::ProduceGLTexture(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+
// For (old) overlays, we don't have a texture owner, but overlay promotion
// might not happen for some reasons. In that case, it will try to draw
// which should result in no image.
@@ -341,7 +459,7 @@ SharedImageVideo::ProduceGLTexture(SharedImageManager* manager,
return nullptr;
return std::make_unique<SharedImageRepresentationGLTextureVideo>(
- manager, this, tracker, std::move(texture));
+ manager, this, tracker, std::move(texture), GetDrDcLock());
}
// TODO(vikassoni): Currently GLRenderer doesn't support overlays with shared
@@ -350,6 +468,8 @@ SharedImageVideo::ProduceGLTexture(SharedImageManager* manager,
std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
SharedImageVideo::ProduceGLTexturePassthrough(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+
// For (old) overlays, we don't have a texture owner, but overlay promotion
// might not happen for some reasons. In that case, it will try to draw
// which should result in no image.
@@ -362,7 +482,7 @@ SharedImageVideo::ProduceGLTexturePassthrough(SharedImageManager* manager,
return nullptr;
return std::make_unique<SharedImageRepresentationGLTexturePassthroughVideo>(
- manager, this, tracker, std::move(texture));
+ manager, this, tracker, std::move(texture), GetDrDcLock());
}
// Currently SkiaRenderer doesn't support overlays.
@@ -370,6 +490,8 @@ std::unique_ptr<SharedImageRepresentationSkia> SharedImageVideo::ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
scoped_refptr<SharedContextState> context_state) {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+
DCHECK(context_state);
// For (old) overlays, we don't have a texture owner, but overlay promotion
@@ -380,11 +502,15 @@ std::unique_ptr<SharedImageRepresentationSkia> SharedImageVideo::ProduceSkia(
if (context_state->GrContextIsVulkan()) {
return std::make_unique<SharedImageRepresentationVideoSkiaVk>(
- manager, this, std::move(context_state), tracker);
+ manager, this, std::move(context_state), tracker, GetDrDcLock());
}
DCHECK(context_state->GrContextIsGL());
- const bool passthrough = Passthrough();
+ auto* texture_base = stream_texture_sii_->GetTextureBase();
+ DCHECK(texture_base);
+ const bool passthrough =
+ (texture_base->GetType() == gpu::TextureBase::Type::kPassthrough);
+
auto texture = GenAbstractTexture(context_state, passthrough);
if (!texture)
return nullptr;
@@ -394,27 +520,22 @@ std::unique_ptr<SharedImageRepresentationSkia> SharedImageVideo::ProduceSkia(
if (passthrough) {
gl_representation =
std::make_unique<SharedImageRepresentationGLTexturePassthroughVideo>(
- manager, this, tracker, std::move(texture));
+ manager, this, tracker, std::move(texture), GetDrDcLock());
} else {
gl_representation =
std::make_unique<SharedImageRepresentationGLTextureVideo>(
- manager, this, tracker, std::move(texture));
+ manager, this, tracker, std::move(texture), GetDrDcLock());
}
return SharedImageRepresentationSkiaGL::Create(std::move(gl_representation),
std::move(context_state),
manager, this, tracker);
}
-bool SharedImageVideo::Passthrough() {
- auto* texture_base = stream_texture_sii_->GetTextureBase();
- DCHECK(texture_base);
-
- return (texture_base->GetType() == gpu::TextureBase::Type::kPassthrough);
-}
-
std::unique_ptr<gles2::AbstractTexture> SharedImageVideo::GenAbstractTexture(
scoped_refptr<SharedContextState> context_state,
const bool passthrough) {
+ AssertAcquiredDrDcLock();
+
std::unique_ptr<gles2::AbstractTexture> texture;
if (passthrough) {
texture = std::make_unique<gles2::AbstractTextureImplPassthrough>(
@@ -426,8 +547,8 @@ std::unique_ptr<gles2::AbstractTexture> SharedImageVideo::GenAbstractTexture(
GL_RGBA, GL_UNSIGNED_BYTE);
}
- // If TextureOwner binds texture implicitly on update, that means it will use
- // TextureOwner texture_id to update and bind. Hence use TextureOwner
+ // If TextureOwner binds texture implicitly on update, that means it will
+ // use TextureOwner texture_id to update and bind. Hence use TextureOwner
// texture_id in abstract texture via BindStreamTextureImage().
if (stream_texture_sii_->TextureOwnerBindsTextureOnUpdate()) {
texture->BindStreamTextureImage(
@@ -438,48 +559,96 @@ std::unique_ptr<gles2::AbstractTexture> SharedImageVideo::GenAbstractTexture(
}
void SharedImageVideo::BeginGLReadAccess(const GLuint service_id) {
+ AssertAcquiredDrDcLock();
stream_texture_sii_->UpdateAndBindTexImage(service_id);
}
// Representation of SharedImageVideo as an overlay plane.
class SharedImageRepresentationOverlayVideo
- : public gpu::SharedImageRepresentationOverlay {
+ : public gpu::SharedImageRepresentationOverlay,
+ public RefCountedLockHelperDrDc {
public:
SharedImageRepresentationOverlayVideo(gpu::SharedImageManager* manager,
SharedImageVideo* backing,
- gpu::MemoryTypeTracker* tracker)
+ gpu::MemoryTypeTracker* tracker,
+ scoped_refptr<RefCountedLock> drdc_lock)
: gpu::SharedImageRepresentationOverlay(manager, backing, tracker),
- stream_image_(backing->stream_texture_sii_) {}
+ RefCountedLockHelperDrDc(std::move(drdc_lock)) {}
protected:
bool BeginReadAccess(std::vector<gfx::GpuFence>* acquire_fences) override {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
// A |CodecImage| is already in a SurfaceView, render content to the
// overlay.
- if (!stream_image_->HasTextureOwner()) {
+ if (!stream_image()->HasTextureOwner()) {
TRACE_EVENT0("media",
"SharedImageRepresentationOverlayVideo::BeginReadAccess");
- stream_image_->RenderToOverlay();
+ stream_image()->RenderToOverlay();
}
return true;
}
void EndReadAccess(gfx::GpuFenceHandle release_fence) override {
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
DCHECK(release_fence.is_null());
+ if (gl_image_) {
+ if (scoped_hardware_buffer_) {
+ scoped_hardware_buffer_->SetReadFence(gl_image_->TakeEndReadFence(),
+ true);
+ }
+ gl_image_.reset();
+ scoped_hardware_buffer_.reset();
+ }
}
gl::GLImage* GetGLImage() override {
- DCHECK(stream_image_->HasTextureOwner())
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+ DCHECK(stream_image()->HasTextureOwner())
<< "The backing is already in a SurfaceView!";
- return stream_image_.get();
+
+ // Note that we have SurfaceView overlay as well as SurfaceControl.
+ // SurfaceView may/may not have TextureOwner whereas SurfaceControl always
+ // have TextureOwner. It is not possible to know whether we are in
+ // SurfaceView or SurfaceControl mode in Begin/EndReadAccess. Hence
+ // |scoped_hardware_buffer_| and |gl_image_| needs to be created here since
+ // GetGLImage will only be called for SurfaceControl.
+ if (!gl_image_) {
+ scoped_hardware_buffer_ = stream_image()->GetAHardwareBuffer();
+
+ // |scoped_hardware_buffer_| could be null for cases when a buffer is
+ // not acquired in ImageReader for some reasons and there is no previously
+ // acquired image left.
+ if (scoped_hardware_buffer_) {
+ gl_image_ = base::MakeRefCounted<VideoImage>(
+ scoped_hardware_buffer_->buffer(),
+ scoped_hardware_buffer_->TakeFence());
+ } else {
+ // Caller of GetGLImage currently do not expect a null |gl_image_|.
+ // Hence creating a valid object with null buffer which results in a
+ // blank video frame and is expected. TODO(vikassoni) : Explore option
+ // of returning a null GLImage here.
+ gl_image_ = base::MakeRefCounted<VideoImage>();
+ }
+ }
+ return gl_image_.get();
}
void NotifyOverlayPromotion(bool promotion,
const gfx::Rect& bounds) override {
- stream_image_->NotifyOverlayPromotion(promotion, bounds);
+ base::AutoLockMaybe auto_lock(GetDrDcLockPtr());
+ stream_image()->NotifyOverlayPromotion(promotion, bounds);
}
private:
- scoped_refptr<StreamTextureSharedImageInterface> stream_image_;
+ std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
+ scoped_hardware_buffer_;
+ scoped_refptr<VideoImage> gl_image_;
+
+ StreamTextureSharedImageInterface* stream_image() {
+ auto* video_backing = static_cast<SharedImageVideo*>(backing());
+ DCHECK(video_backing);
+ return video_backing->stream_texture_sii_.get();
+ }
DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationOverlayVideo);
};
@@ -487,8 +656,8 @@ class SharedImageRepresentationOverlayVideo
std::unique_ptr<gpu::SharedImageRepresentationOverlay>
SharedImageVideo::ProduceOverlay(gpu::SharedImageManager* manager,
gpu::MemoryTypeTracker* tracker) {
- return std::make_unique<SharedImageRepresentationOverlayVideo>(manager, this,
- tracker);
+ return std::make_unique<SharedImageRepresentationOverlayVideo>(
+ manager, this, tracker, GetDrDcLock());
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_video.h b/chromium/gpu/command_buffer/service/shared_image_video.h
index ec227116c26..c2dd94ec902 100644
--- a/chromium/gpu/command_buffer/service/shared_image_video.h
+++ b/chromium/gpu/command_buffer/service/shared_image_video.h
@@ -8,6 +8,9 @@
#include <memory>
#include "base/memory/scoped_refptr.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "gpu/command_buffer/service/ref_counted_lock.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing_android.h"
#include "gpu/command_buffer/service/stream_texture_shared_image_interface.h"
@@ -28,7 +31,8 @@ class AbstractTexture;
// TextureOwner or overlay as needed in order to draw them.
class GPU_GLES2_EXPORT SharedImageVideo
: public SharedImageBackingAndroid,
- public SharedContextState::ContextLostObserver {
+ public SharedContextState::ContextLostObserver,
+ public RefCountedLockHelperDrDc {
public:
SharedImageVideo(
const Mailbox& mailbox,
@@ -38,7 +42,8 @@ class GPU_GLES2_EXPORT SharedImageVideo
SkAlphaType alpha_type,
scoped_refptr<StreamTextureSharedImageInterface> stream_texture_sii,
scoped_refptr<SharedContextState> shared_context_state,
- bool is_thread_safe);
+ bool is_thread_safe,
+ scoped_refptr<RefCountedLock> drdc_lock);
~SharedImageVideo() override;
@@ -88,10 +93,6 @@ class GPU_GLES2_EXPORT SharedImageVideo
friend class SharedImageRepresentationVideoSkiaVk;
friend class SharedImageRepresentationOverlayVideo;
- // Whether we're using the passthrough command decoder and should generate
- // passthrough textures.
- bool Passthrough();
-
// Helper method to generate an abstract texture.
std::unique_ptr<gles2::AbstractTexture> GenAbstractTexture(
scoped_refptr<SharedContextState> context_state,
@@ -99,8 +100,22 @@ class GPU_GLES2_EXPORT SharedImageVideo
void BeginGLReadAccess(const GLuint service_id);
+ // Creating representations on SharedImageVideo is already thread safe
+ // but SharedImageVideo backing can be destroyed on any thread when a
+ // backing is used by multiple threads like dr-dc. Hence backing is not
+ // guaranteed to be destroyed on the same thread on which it was created.
+ // This method ensures that all the member variables of this class are
+ // destroyed on the thread in which it was created.
+ static void CleanupOnCorrectThread(
+ scoped_refptr<StreamTextureSharedImageInterface> stream_texture_sii,
+ scoped_refptr<SharedContextState> context_state,
+ SharedImageVideo* backing,
+ base::WaitableEvent* event,
+ scoped_refptr<RefCountedLock> lock);
+
scoped_refptr<StreamTextureSharedImageInterface> stream_texture_sii_;
scoped_refptr<SharedContextState> context_state_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DISALLOW_COPY_AND_ASSIGN(SharedImageVideo);
};
diff --git a/chromium/gpu/command_buffer/service/skia_utils.cc b/chromium/gpu/command_buffer/service/skia_utils.cc
index 015f407dfbb..969d4655b98 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.cc
+++ b/chromium/gpu/command_buffer/service/skia_utils.cc
@@ -7,12 +7,14 @@
#include "base/command_line.h"
#include "base/logging.h"
#include "build/build_config.h"
+#include "components/viz/common/resources/resource_format.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/config/gpu_switches.h"
#include "gpu/config/skia_limits.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "third_party/skia/include/gpu/GrContextThreadSafeProxy.h"
#include "third_party/skia/include/gpu/gl/GrGLTypes.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_bindings.h"
@@ -83,9 +85,10 @@ GrContextOptions GetDefaultGrContextOptions(GrContextType type) {
&glyph_cache_max_texture_bytes);
options.fDisableCoverageCountingPaths = true;
options.fGlyphCacheTextureMaximumBytes = glyph_cache_max_texture_bytes;
- // TODO(csmartdalton): enable internal multisampling after the related Skia
- // rolls are in.
- options.fInternalMultisampleCount = 0;
+ // TODO(junov, csmartdalton): Find a way to control fInternalMultisampleCount
+ // in a more granular way. For OOPR-Canvas we want 8, but for other purposes,
+ // a texture atlas with sample count of 4 would be sufficient
+ options.fInternalMultisampleCount = 8;
if (type == GrContextType::kMetal)
options.fRuntimeProgramCacheSize = 1024;
@@ -96,8 +99,10 @@ GrContextOptions GetDefaultGrContextOptions(GrContextType type) {
return options;
}
-GLuint GetGrGLBackendTextureFormat(const gles2::FeatureInfo* feature_info,
- viz::ResourceFormat resource_format) {
+GLuint GetGrGLBackendTextureFormat(
+ const gles2::FeatureInfo* feature_info,
+ viz::ResourceFormat resource_format,
+ sk_sp<GrContextThreadSafeProxy> gr_context_thread_safe) {
const gl::GLVersionInfo* version_info = &feature_info->gl_version_info();
GLuint internal_format = gl::GetInternalFormat(
version_info, viz::TextureStorageFormat(resource_format));
@@ -107,19 +112,34 @@ GLuint GetGrGLBackendTextureFormat(const gles2::FeatureInfo* feature_info,
use_version_es2 = base::FeatureList::IsEnabled(features::kUseGles2ForOopR);
#endif
- // Use R8 and R16F when using later GLs where LUMINANCE8 and LUMINANCE18F are
- // deprecated
+ // Use R8 and R16F when using later GLs where ALPHA8, LUMINANCE8, ALPHA16F and
+ // LUMINANCE16F are deprecated
if (feature_info->gl_version_info().NeedsLuminanceAlphaEmulation()) {
switch (internal_format) {
+ case GL_ALPHA8_EXT:
case GL_LUMINANCE8:
internal_format = GL_R8_EXT;
break;
+ case GL_ALPHA16F_EXT:
case GL_LUMINANCE16F_EXT:
internal_format = GL_R16F_EXT;
break;
}
}
+ // Map ETC1 to ETC2 type depending on conversion by skia
+ if (resource_format == viz::ResourceFormat::ETC1) {
+ GrGLFormat gr_gl_format =
+ gr_context_thread_safe
+ ->compressedBackendFormat(SkImage::kETC1_CompressionType)
+ .asGLFormat();
+ if (gr_gl_format == GrGLFormat::kCOMPRESSED_ETC1_RGB8) {
+ internal_format = GL_ETC1_RGB8_OES;
+ } else if (gr_gl_format == GrGLFormat::kCOMPRESSED_RGB8_ETC2) {
+ internal_format = GL_COMPRESSED_RGB8_ETC2;
+ }
+ }
+
// We tell Skia to use es2 which does not have GL_R8_EXT
if (feature_info->gl_version_info().is_es3 && use_version_es2) {
if (internal_format == GL_R8_EXT)
@@ -134,6 +154,7 @@ bool GetGrBackendTexture(const gles2::FeatureInfo* feature_info,
const gfx::Size& size,
GLuint service_id,
viz::ResourceFormat resource_format,
+ sk_sp<GrContextThreadSafeProxy> gr_context_thread_safe,
GrBackendTexture* gr_texture) {
if (target != GL_TEXTURE_2D && target != GL_TEXTURE_RECTANGLE_ARB &&
target != GL_TEXTURE_EXTERNAL_OES) {
@@ -144,8 +165,8 @@ bool GetGrBackendTexture(const gles2::FeatureInfo* feature_info,
GrGLTextureInfo texture_info;
texture_info.fID = service_id;
texture_info.fTarget = target;
- texture_info.fFormat =
- GetGrGLBackendTextureFormat(feature_info, resource_format);
+ texture_info.fFormat = GetGrGLBackendTextureFormat(
+ feature_info, resource_format, gr_context_thread_safe);
*gr_texture = GrBackendTexture(size.width(), size.height(), GrMipMapped::kNo,
texture_info);
return true;
diff --git a/chromium/gpu/command_buffer/service/skia_utils.h b/chromium/gpu/command_buffer/service/skia_utils.h
index 1d9e307483c..575745cc08b 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.h
+++ b/chromium/gpu/command_buffer/service/skia_utils.h
@@ -49,9 +49,10 @@ GPU_GLES2_EXPORT GrContextOptions
GetDefaultGrContextOptions(GrContextType type);
// Returns internal gl format of texture for Skia
-GPU_GLES2_EXPORT GLuint
-GetGrGLBackendTextureFormat(const gles2::FeatureInfo* feature_info,
- viz::ResourceFormat resource_format);
+GPU_GLES2_EXPORT GLuint GetGrGLBackendTextureFormat(
+ const gles2::FeatureInfo* feature_info,
+ viz::ResourceFormat resource_format,
+ sk_sp<GrContextThreadSafeProxy> gr_context_thread_safe);
// Creates a GrBackendTexture from a service ID. Skia does not take ownership.
// Returns true on success.
@@ -61,6 +62,7 @@ GPU_GLES2_EXPORT bool GetGrBackendTexture(
const gfx::Size& size,
GLuint service_id,
viz::ResourceFormat resource_format,
+ sk_sp<GrContextThreadSafeProxy> gr_context_thread_safe,
GrBackendTexture* gr_texture);
// Adds a task to be executed when the flush in |flush_info| is complete.
diff --git a/chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h b/chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h
index 668843a6ae9..9093657156c 100644
--- a/chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h
+++ b/chromium/gpu/command_buffer/service/stream_texture_shared_image_interface.h
@@ -18,17 +18,12 @@ class TextureBase;
class GPU_GLES2_EXPORT StreamTextureSharedImageInterface : public gl::GLImage {
public:
enum class BindingsMode {
- // Ensures that the texture is bound to the latest image, if
- // it requires explicit binding.
- kEnsureTexImageBound,
+ // Binds image to the texture with service id. Doesn't alter current gl
+ // bindings.
+ kBindImage,
- // Updates the current image but does not bind it. If updating the image
- // implicitly binds the texture, the current bindings will be restored.
- kRestoreIfBound,
-
- // Updates the current image but does not bind it. If updating the image
- // implicitly binds the texture, the current bindings will not be restored.
- kDontRestoreIfBound
+ // Updates the current image but does not bind it.
+ kDontBindImage
};
// Release the underlying resources. This should be called when the image is
@@ -66,20 +61,6 @@ class GPU_GLES2_EXPORT StreamTextureSharedImageInterface : public gl::GLImage {
~StreamTextureSharedImageInterface() override = default;
};
-// Used to restore texture binding to GL_TEXTURE_EXTERNAL_OES target.
-class ScopedRestoreTextureBinding {
- public:
- ScopedRestoreTextureBinding() {
- glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id_);
- }
- ~ScopedRestoreTextureBinding() {
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, bound_service_id_);
- }
-
- private:
- GLint bound_service_id_;
-};
-
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_SERVICE_STREAM_TEXTURE_SHARED_IMAGE_INTERFACE_H_
diff --git a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc
index 633cba00c6e..6a50884a3d1 100644
--- a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc
+++ b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc
@@ -17,10 +17,35 @@
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
#include "gpu/command_buffer/service/abstract_texture.h"
+#include "gpu/config/gpu_finch_features.h"
#include "ui/gl/scoped_binders.h"
#include "ui/gl/scoped_make_current.h"
namespace gpu {
+namespace {
+
+// Makes |texture_owner|'s context current if it isn't already.
+std::unique_ptr<ui::ScopedMakeCurrent> MakeCurrentIfNeeded(
+ gpu::TextureOwner* texture_owner) {
+ gl::GLContext* context = texture_owner->GetContext();
+ // Note: this works for virtual contexts too, because IsCurrent() returns true
+ // if their shared platform context is current, regardless of which virtual
+ // context is current.
+ if (context->IsCurrent(nullptr))
+ return nullptr;
+
+ auto scoped_current = std::make_unique<ui::ScopedMakeCurrent>(
+ context, texture_owner->GetSurface());
+ // Log an error if ScopedMakeCurrent failed for debugging
+ // https://crbug.com/878042.
+ // TODO(ericrk): Remove this once debugging is completed.
+ if (!context->IsCurrent(nullptr)) {
+ LOG(ERROR) << "Failed to make context current in CodecImage. Subsequent "
+ "UpdateTexImage may fail.";
+ }
+ return scoped_current;
+}
+} // namespace
SurfaceTextureGLOwner::SurfaceTextureGLOwner(
std::unique_ptr<gles2::AbstractTexture> texture,
@@ -33,6 +58,7 @@ SurfaceTextureGLOwner::SurfaceTextureGLOwner(
surface_(gl::GLSurface::GetCurrent()) {
DCHECK(context_);
DCHECK(surface_);
+ DCHECK(!features::IsDrDcEnabled());
}
SurfaceTextureGLOwner::~SurfaceTextureGLOwner() {
@@ -73,12 +99,21 @@ gl::ScopedJavaSurface SurfaceTextureGLOwner::CreateJavaSurface() const {
void SurfaceTextureGLOwner::UpdateTexImage() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- if (surface_texture_)
+ if (surface_texture_) {
+ // UpdateTexImage bounds texture to the SurfaceTexture context, so make it
+ // current.
+ auto scoped_make_current = MakeCurrentIfNeeded(this);
+ // UpdateTexImage might change gl binding and we never should alter gl
+ // binding without updating state tracking, which we can't do here, so
+ // restore previous after we done.
+ ScopedRestoreTextureBinding scoped_restore_texture;
surface_texture_->UpdateTexImage();
+ }
}
void SurfaceTextureGLOwner::EnsureTexImageBound(GLuint service_id) {
- NOTREACHED();
+ // We can't bind SurfaceTexture to different ids.
+ DCHECK_EQ(service_id, GetTextureId());
}
void SurfaceTextureGLOwner::ReleaseBackBuffers() {
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.cc b/chromium/gpu/command_buffer/service/sync_point_manager.cc
index cede40713ae..6ae530be410 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager.cc
+++ b/chromium/gpu/command_buffer/service/sync_point_manager.cc
@@ -9,11 +9,11 @@
#include <stdint.h>
#include "base/bind.h"
+#include "base/cxx17_backports.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
-#include "base/stl_util.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/service/test_helper.cc b/chromium/gpu/command_buffer/service/test_helper.cc
index da9d7c4cd68..eeba4aa9b98 100644
--- a/chromium/gpu/command_buffer/service/test_helper.cc
+++ b/chromium/gpu/command_buffer/service/test_helper.cc
@@ -10,8 +10,9 @@
#include <algorithm>
#include <string>
-#include "base/stl_util.h"
+#include "base/cxx17_backports.h"
#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/service/buffer_manager.h"
#include "gpu/command_buffer/service/error_state_mock.h"
#include "gpu/command_buffer/service/feature_info.h"
@@ -676,6 +677,7 @@ void TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
.RetiresOnSaturation();
}
+#if !defined(OS_MAC)
if (gl_info.is_es3 || gl_info.is_desktop_core_profile ||
gfx::HasExtension(extension_set, "GL_EXT_texture_rg") ||
(gfx::HasExtension(extension_set, "GL_ARB_texture_rg"))) {
@@ -734,6 +736,7 @@ void TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
.RetiresOnSaturation();
#endif
}
+#endif // !defined(OS_MAC)
}
void TestHelper::SetupExpectationsForClearingUniforms(::gl::MockGLInterface* gl,
diff --git a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
index 6faccb84685..85d1e8cdc9c 100644
--- a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
@@ -252,7 +252,8 @@ TestSharedImageBacking::ProduceSkia(
std::unique_ptr<SharedImageRepresentationDawn>
TestSharedImageBacking::ProduceDawn(SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) {
+ WGPUDevice device,
+ WGPUBackendType backend_type) {
return std::make_unique<TestSharedImageRepresentationDawn>(manager, this,
tracker);
}
diff --git a/chromium/gpu/command_buffer/service/test_shared_image_backing.h b/chromium/gpu/command_buffer/service/test_shared_image_backing.h
index b97d29de824..8dc2644a38c 100644
--- a/chromium/gpu/command_buffer/service/test_shared_image_backing.h
+++ b/chromium/gpu/command_buffer/service/test_shared_image_backing.h
@@ -65,7 +65,8 @@ class TestSharedImageBacking : public SharedImageBacking {
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
- WGPUDevice device) override;
+ WGPUDevice device,
+ WGPUBackendType backend_type) override;
std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
SharedImageManager* manager,
MemoryTypeTracker* tracker) override;
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index 528a13bd7c4..3e0b11b49a3 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -13,9 +13,9 @@
#include <utility>
#include "base/bits.h"
+#include "base/cxx17_backports.h"
#include "base/format_macros.h"
#include "base/lazy_instance.h"
-#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index 85907fb319f..0e38f79f0dd 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -106,7 +106,7 @@ class GPU_GLES2_EXPORT TexturePassthrough final
friend class base::RefCounted<TexturePassthrough>;
- GLuint owned_service_id_ = 0;
+ const GLuint owned_service_id_ = 0;
bool have_context_;
bool is_bind_pending_ = false;
@@ -281,13 +281,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
bool CanRenderTo(const FeatureInfo* feature_info, GLint level) const;
- void SetServiceId(GLuint service_id) {
- DCHECK(service_id);
- DCHECK_EQ(owned_service_id_, service_id_);
- service_id_ = service_id;
- owned_service_id_ = service_id;
- }
-
bool SafeToRenderFrom() const {
return cleared_;
}
diff --git a/chromium/gpu/command_buffer/service/texture_owner.h b/chromium/gpu/command_buffer/service/texture_owner.h
index 7f5b20f8fc4..5770d0f6287 100644
--- a/chromium/gpu/command_buffer/service/texture_owner.h
+++ b/chromium/gpu/command_buffer/service/texture_owner.h
@@ -132,6 +132,20 @@ class GPU_GLES2_EXPORT TextureOwner
friend class base::RefCountedDeleteOnSequence<TextureOwner>;
friend class base::DeleteHelper<TextureOwner>;
+ // Used to restore texture binding to GL_TEXTURE_EXTERNAL_OES target.
+ class ScopedRestoreTextureBinding {
+ public:
+ ScopedRestoreTextureBinding() {
+ glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id_);
+ }
+ ~ScopedRestoreTextureBinding() {
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, bound_service_id_);
+ }
+
+ private:
+ GLint bound_service_id_;
+ };
+
// |texture| is the texture that we'll own.
TextureOwner(bool binds_texture_on_update,
std::unique_ptr<gles2::AbstractTexture> texture,
diff --git a/chromium/gpu/command_buffer/service/webgpu_cmd_validation.cc b/chromium/gpu/command_buffer/service/webgpu_cmd_validation.cc
index d87568d3f45..7bd602e7df5 100644
--- a/chromium/gpu/command_buffer/service/webgpu_cmd_validation.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_cmd_validation.cc
@@ -5,6 +5,8 @@
// Contains various validation functions for the Webgpu service.
#include "gpu/command_buffer/service/webgpu_cmd_validation.h"
+
+#include "base/cxx17_backports.h"
#include "gpu/command_buffer/service/gl_utils.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/service/webgpu_cmd_validation.h b/chromium/gpu/command_buffer/service/webgpu_cmd_validation.h
index 5b432763581..12cd952662b 100644
--- a/chromium/gpu/command_buffer/service/webgpu_cmd_validation.h
+++ b/chromium/gpu/command_buffer/service/webgpu_cmd_validation.h
@@ -7,7 +7,6 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_WEBGPU_CMD_VALIDATION_H_
#define GPU_COMMAND_BUFFER_SERVICE_WEBGPU_CMD_VALIDATION_H_
-#include "base/stl_util.h"
#include "gpu/command_buffer/common/webgpu_cmd_enums.h"
#include "gpu/command_buffer/common/webgpu_cmd_format.h"
#include "gpu/command_buffer/service/value_validator.h"
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
index 0615b074a79..703dd4c1aee 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
@@ -5,6 +5,7 @@
#include "gpu/command_buffer/service/webgpu_decoder_impl.h"
#include <dawn_native/DawnNative.h>
+#include <dawn_native/OpenGLBackend.h>
#include <dawn_platform/DawnPlatform.h>
#include <dawn_wire/WireServer.h>
@@ -29,6 +30,8 @@
#include "gpu/command_buffer/service/webgpu_decoder.h"
#include "gpu/config/gpu_preferences.h"
#include "ipc/ipc_channel.h"
+#include "ui/gl/gl_context_egl.h"
+#include "ui/gl/gl_surface_egl.h"
#if defined(OS_WIN)
#include <dawn_native/D3D12Backend.h>
@@ -137,6 +140,25 @@ dawn_native::DeviceType PowerPreferenceToDawnDeviceType(
}
}
+WGPUBackendType ToWGPUBackendType(dawn_native::BackendType type) {
+ switch (type) {
+ case dawn_native::BackendType::D3D12:
+ return WGPUBackendType_D3D12;
+ case dawn_native::BackendType::Metal:
+ return WGPUBackendType_Metal;
+ case dawn_native::BackendType::Null:
+ return WGPUBackendType_Null;
+ case dawn_native::BackendType::OpenGL:
+ return WGPUBackendType_OpenGL;
+ case dawn_native::BackendType::OpenGLES:
+ return WGPUBackendType_OpenGLES;
+ case dawn_native::BackendType::Vulkan:
+ return WGPUBackendType_Vulkan;
+ }
+ DCHECK(false);
+ return WGPUBackendType_Null;
+}
+
} // namespace
class WebGPUDecoderImpl final : public WebGPUDecoder {
@@ -162,7 +184,12 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
return nullptr;
}
void Destroy(bool have_context) override;
- bool MakeCurrent() override { return true; }
+ bool MakeCurrent() override {
+ if (gl_context_.get()) {
+ gl_context_->MakeCurrent(gl_surface_.get());
+ }
+ return true;
+ }
gl::GLContext* GetGLContext() override { return nullptr; }
gl::GLSurface* GetGLSurface() override {
NOTREACHED();
@@ -398,7 +425,8 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
int32_t requested_adapter_index,
uint32_t device_id,
uint32_t device_generation,
- const WGPUDeviceProperties& requested_device_properties);
+ const WGPUDeviceProperties& requested_device_properties,
+ bool* creation_succeeded);
void SendAdapterProperties(DawnRequestAdapterSerial request_adapter_serial,
int32_t adapter_service_id,
@@ -417,6 +445,8 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
std::unique_ptr<dawn_native::Instance> dawn_instance_;
std::vector<dawn_native::Adapter> dawn_adapters_;
+ bool allow_spirv_ = false;
+ bool force_webgpu_compat_ = false;
std::vector<std::string> force_enabled_toggles_;
std::vector<std::string> force_disabled_toggles_;
@@ -441,8 +471,13 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
// in PerformPollingWork. Dawn will never reuse a previously allocated
// <ID, generation> pair.
std::vector<std::pair<uint32_t, uint32_t>> known_devices_;
+ std::unordered_map<uint32_t, WGPUBackendType> device_backend_types_;
bool has_polling_work_ = false;
+ bool destroyed_ = false;
+
+ scoped_refptr<gl::GLContext> gl_context_;
+ scoped_refptr<gl::GLSurface> gl_surface_;
DISALLOW_COPY_AND_ASSIGN(WebGPUDecoderImpl);
};
@@ -502,6 +537,8 @@ WebGPUDecoderImpl::WebGPUDecoderImpl(
break;
}
+ allow_spirv_ = gpu_preferences.enable_webgpu_spirv;
+ force_webgpu_compat_ = gpu_preferences.force_webgpu_compat;
force_enabled_toggles_ = gpu_preferences.enabled_dawn_features_list;
force_disabled_toggles_ = gpu_preferences.disabled_dawn_features_list;
@@ -518,10 +555,23 @@ WebGPUDecoderImpl::~WebGPUDecoderImpl() {
void WebGPUDecoderImpl::Destroy(bool have_context) {
associated_shared_image_map_.clear();
+ known_devices_.clear();
+ device_backend_types_.clear();
wire_server_ = nullptr;
+
+ destroyed_ = true;
}
ContextResult WebGPUDecoderImpl::Initialize() {
+ if (force_webgpu_compat_) {
+ gl_surface_ = new gl::SurfacelessEGL(gfx::Size(1, 1));
+ gl::GLContextAttribs attribs;
+ attribs.client_major_es_version = 3;
+ attribs.client_minor_es_version = 1;
+ gl_context_ = new gl::GLContextEGL(nullptr);
+ gl_context_->Initialize(gl_surface_.get(), attribs);
+ gl_context_->MakeCurrent(gl_surface_.get());
+ }
DiscoverAdapters();
return ContextResult::kSuccess;
}
@@ -530,12 +580,15 @@ error::Error WebGPUDecoderImpl::InitDawnDevice(
int32_t requested_adapter_index,
uint32_t device_id,
uint32_t device_generation,
- const WGPUDeviceProperties& request_device_properties) {
+ const WGPUDeviceProperties& request_device_properties,
+ bool* creation_succeeded) {
DCHECK_LE(0, requested_adapter_index);
DCHECK_LT(static_cast<size_t>(requested_adapter_index),
dawn_adapters_.size());
+ *creation_succeeded = false;
+
dawn_native::DeviceDescriptor device_descriptor;
if (request_device_properties.textureCompressionBC) {
device_descriptor.requiredExtensions.push_back("texture_compression_bc");
@@ -552,6 +605,18 @@ error::Error WebGPUDecoderImpl::InitDawnDevice(
if (request_device_properties.depthClamping) {
device_descriptor.requiredExtensions.push_back("depth_clamping");
}
+ if (request_device_properties.invalidExtension) {
+ device_descriptor.requiredExtensions.push_back("invalid_extension");
+ }
+
+ // If a new toggle is added here, ForceDawnTogglesForWebGPU() which collects
+ // info for about:gpu should be updated as well.
+
+ // Disallows usage of SPIR-V by default for security (we only ensure that WGSL
+ // is secure), unless --enable-unsafe-webgpu is used.
+ if (!allow_spirv_) {
+ device_descriptor.forceEnabledToggles.push_back("disallow_spirv");
+ }
for (const std::string& toggles : force_enabled_toggles_) {
device_descriptor.forceEnabledToggles.push_back(toggles.c_str());
@@ -563,7 +628,9 @@ error::Error WebGPUDecoderImpl::InitDawnDevice(
WGPUDevice wgpu_device =
dawn_adapters_[requested_adapter_index].CreateDevice(&device_descriptor);
if (wgpu_device == nullptr) {
- return error::kInvalidArguments;
+ // Device creation failed, but it's not a fatal error that needs to trigger
+ // GPU process lost
+ return error::kNoError;
}
if (!wire_server_->InjectDevice(wgpu_device, device_id, device_generation)) {
@@ -578,11 +645,25 @@ error::Error WebGPUDecoderImpl::InitDawnDevice(
// checked in PerformPollingWork to tick all the live devices and remove all
// the dead ones.
known_devices_.emplace_back(device_id, device_generation);
+ dawn_native::BackendType type =
+ dawn_adapters_[requested_adapter_index].GetBackendType();
+ device_backend_types_[device_id] = ToWGPUBackendType(type);
+ *creation_succeeded = true;
return error::kNoError;
}
void WebGPUDecoderImpl::DiscoverAdapters() {
+#if BUILDFLAG(DAWN_ENABLE_BACKEND_OPENGLES)
+ if (force_webgpu_compat_) {
+ auto getProc = [](const char* pname) {
+ return reinterpret_cast<void*>(eglGetProcAddress(pname));
+ };
+ dawn_native::opengl::AdapterDiscoveryOptionsES optionsES;
+ optionsES.getProc = getProc;
+ dawn_instance_->DiscoverAdapters(&optionsES);
+ }
+#endif
#if defined(OS_WIN)
Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
gl::QueryD3D11DeviceObjectFromANGLE();
@@ -603,8 +684,15 @@ void WebGPUDecoderImpl::DiscoverAdapters() {
std::vector<dawn_native::Adapter> adapters = dawn_instance_->GetAdapters();
for (const dawn_native::Adapter& adapter : adapters) {
- if (adapter.GetBackendType() != dawn_native::BackendType::Null &&
- adapter.GetBackendType() != dawn_native::BackendType::OpenGL) {
+ if (!adapter.SupportsExternalImages()) {
+ continue;
+ }
+ if (force_webgpu_compat_) {
+ if (adapter.GetBackendType() == dawn_native::BackendType::OpenGLES) {
+ dawn_adapters_.push_back(adapter);
+ }
+ } else if (adapter.GetBackendType() != dawn_native::BackendType::Null &&
+ adapter.GetBackendType() != dawn_native::BackendType::OpenGL) {
dawn_adapters_.push_back(adapter);
}
}
@@ -697,6 +785,12 @@ error::Error WebGPUDecoderImpl::DoCommands(unsigned int num_commands,
const unsigned int arg_count = size - 1;
unsigned int command_index = command - kFirstWebGPUCommand;
if (command_index < base::size(command_info)) {
+ // Prevent all further WebGPU commands from being processed if the server
+ // is destroyed.
+ if (destroyed_) {
+ result = error::kLostContext;
+ break;
+ }
const CommandInfo& info = command_info[command_index];
unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
@@ -742,7 +836,21 @@ void WebGPUDecoderImpl::SendAdapterProperties(
size_t serialized_adapter_properties_size = 0;
if (adapter) {
+ // Only allow unsafe APIs if the disallow_unsafe_apis toggle is explicitly
+ // disabled.
+ const bool allow_unsafe_apis =
+ std::find(force_disabled_toggles_.begin(),
+ force_disabled_toggles_.end(),
+ "disallow_unsafe_apis") != force_disabled_toggles_.end();
+
adapter_properties = adapter.GetAdapterProperties();
+
+ // Don't surface extensions that are unsafe. A malicious client could still
+ // request them, so Dawn must also validate they cannot be used if
+ // DisallowUnsafeAPIs is enabled.
+ adapter_properties.timestampQuery &= allow_unsafe_apis;
+ adapter_properties.pipelineStatisticsQuery &= allow_unsafe_apis;
+
serialized_adapter_properties_size =
dawn_wire::SerializedWGPUDevicePropertiesSize(&adapter_properties);
} else {
@@ -823,12 +931,12 @@ error::Error WebGPUDecoderImpl::HandleRequestAdapter(
static_cast<DawnRequestAdapterSerial>(c.request_adapter_serial);
if (gr_context_type_ != GrContextType::kVulkan) {
-#if defined(OS_LINUX) || defined(OS_CHROMEOS)
+#if defined(OS_LINUX)
SendAdapterProperties(request_adapter_serial, -1, nullptr,
"WebGPU on Linux requires command-line flag "
"--enable-features=Vulkan,UseSkiaRenderer");
return error::kNoError;
-#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
+#endif // defined(OS_LINUX)
}
int32_t requested_adapter_index = GetPreferredAdapterIndex(power_preference);
@@ -885,10 +993,11 @@ error::Error WebGPUDecoderImpl::HandleRequestDevice(
}
}
- error::Error init_device_error = InitDawnDevice(
- adapter_service_id, device_id, device_generation, device_properties);
- SendRequestedDeviceInfo(request_device_serial,
- !error::IsError(init_device_error));
+ bool creation_succeeded;
+ error::Error init_device_error =
+ InitDawnDevice(adapter_service_id, device_id, device_generation,
+ device_properties, &creation_succeeded);
+ SendRequestedDeviceInfo(request_device_serial, creation_succeeded);
return init_device_error;
}
@@ -957,7 +1066,7 @@ error::Error WebGPUDecoderImpl::HandleAssociateMailboxImmediate(
static constexpr uint32_t kAllowedTextureUsages = static_cast<uint32_t>(
WGPUTextureUsage_CopySrc | WGPUTextureUsage_CopyDst |
- WGPUTextureUsage_Sampled | WGPUTextureUsage_RenderAttachment);
+ WGPUTextureUsage_TextureBinding | WGPUTextureUsage_RenderAttachment);
if (usage & ~kAllowedTextureUsages) {
DLOG(ERROR) << "AssociateMailbox: Invalid usage";
return error::kInvalidArguments;
@@ -970,7 +1079,8 @@ error::Error WebGPUDecoderImpl::HandleAssociateMailboxImmediate(
// Create a WGPUTexture from the mailbox.
std::unique_ptr<SharedImageRepresentationDawn> shared_image =
- shared_image_representation_factory_->ProduceDawn(mailbox, device);
+ shared_image_representation_factory_->ProduceDawn(
+ mailbox, device, device_backend_types_[device_id]);
if (!shared_image) {
DLOG(ERROR) << "AssociateMailbox: Couldn't produce shared image";
return error::kInvalidArguments;
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
index 809c631770b..a2be2318442 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
@@ -13,6 +13,7 @@
#include "build/build_config.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/shared_context_state.h"
@@ -81,7 +82,7 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
SkPixmap pixmap(info, shared_memory_wrapper_.GetMemory(),
shared_memory_wrapper_.GetStride());
if (!context_state_->gr_context()->updateBackendTexture(
- backend_texture_, &pixmap, /*levels=*/1, nullptr, nullptr)) {
+ backend_texture_, &pixmap, /*numLevels=*/1, nullptr, nullptr)) {
DLOG(ERROR) << "Failed to update WrappedSkImage texture";
}
}
@@ -177,9 +178,10 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
}
bool InitializeGMB(const SkImageInfo& info,
- SharedMemoryRegionWrapper shm_wrapper) {
- if (Initialize(info, shm_wrapper.GetMemoryAsSpan(),
- shm_wrapper.GetStride())) {
+ SharedMemoryRegionWrapper shm_wrapper,
+ GrMipMapped mipmap) {
+ if (Initialize(info, shm_wrapper.GetMemoryAsSpan(), shm_wrapper.GetStride(),
+ mipmap)) {
shared_memory_wrapper_ = std::move(shm_wrapper);
return true;
}
@@ -193,7 +195,8 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
// data must be provided since updating compressed textures is not supported.
bool Initialize(const SkImageInfo& info,
base::span<const uint8_t> pixels,
- size_t stride) {
+ size_t stride,
+ GrMipMapped mipmap) {
if (context_state_->context_lost())
return false;
@@ -229,10 +232,10 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
// We don't do this on release builds because there is a slight overhead.
backend_texture_ = context_state_->gr_context()->createBackendTexture(
size().width(), size().height(), GetSkColorType(), SkColors::kBlue,
- GrMipMapped::kNo, GrRenderable::kYes, GrProtected::kNo);
+ mipmap, GrRenderable::kYes, GrProtected::kNo);
#else
backend_texture_ = context_state_->gr_context()->createBackendTexture(
- size().width(), size().height(), GetSkColorType(), GrMipMapped::kNo,
+ size().width(), size().height(), GetSkColorType(), mipmap,
GrRenderable::kYes, GrProtected::kNo);
#endif
@@ -421,7 +424,9 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
std::unique_ptr<WrappedSkImage> texture(
new WrappedSkImage(mailbox, format, size, color_space, surface_origin,
alpha_type, usage, estimated_size, context_state_));
- if (!texture->Initialize(info, data, /*stride=*/0))
+ GrMipMapped mipmap =
+ usage & SHARED_IMAGE_USAGE_MIPMAP ? GrMipMapped::kYes : GrMipMapped::kNo;
+ if (!texture->Initialize(info, data, /*stride=*/0, mipmap))
return nullptr;
return texture;
}
@@ -440,7 +445,8 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
uint32_t usage) {
DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER);
- if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) {
+ if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format,
+ plane)) {
DLOG(ERROR) << "Invalid image size for format.";
return nullptr;
}
@@ -468,7 +474,9 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
std::unique_ptr<WrappedSkImage> texture(new WrappedSkImage(
mailbox, format, size, color_space, surface_origin, alpha_type, usage,
info.computeMinByteSize(), context_state_));
- if (!texture->InitializeGMB(info, std::move(shm_wrapper)))
+ GrMipMapped mipmap = (usage & SHARED_IMAGE_USAGE_MIPMAP) ? GrMipMapped::kYes
+ : GrMipMapped::kNo;
+ if (!texture->InitializeGMB(info, std::move(shm_wrapper), mipmap))
return nullptr;
return texture;
@@ -479,6 +487,45 @@ bool WrappedSkImageFactory::CanImportGpuMemoryBuffer(
return memory_buffer_type == gfx::SHARED_MEMORY_BUFFER;
}
+bool WrappedSkImageFactory::CanUseWrappedSkImage(
+ uint32_t usage,
+ GrContextType gr_context_type) const {
+ // Ignore for mipmap usage.
+ usage &= ~SHARED_IMAGE_USAGE_MIPMAP;
+ auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_DISPLAY |
+ SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_OOP_RASTERIZATION;
+
+ if (gr_context_type != GrContextType::kGL) {
+ // For SkiaRenderer/Vulkan+Dawn use WrappedSkImage if the usage is only
+ // raster and/or display.
+ return (usage & kWrappedSkImageUsage) && !(usage & ~kWrappedSkImageUsage);
+ } else {
+ // For SkiaRenderer/GL only use WrappedSkImages for OOP-R because
+ // CopySubTexture() doesn't use Skia. https://crbug.com/984045
+ return (usage == kWrappedSkImageUsage) ||
+ (usage == SHARED_IMAGE_USAGE_DISPLAY);
+ }
+}
+
+bool WrappedSkImageFactory::IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) {
+ if (!CanUseWrappedSkImage(usage, gr_context_type) || thread_safe) {
+ return false;
+ }
+ if (gmb_type != gfx::EMPTY_BUFFER && !CanImportGpuMemoryBuffer(gmb_type)) {
+ return false;
+ }
+
+ *allow_legacy_mailbox = false;
+ return true;
+}
+
std::unique_ptr<SharedImageRepresentationSkia> WrappedSkImage::ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.h b/chromium/gpu/command_buffer/service/wrapped_sk_image.h
index 21f2fe32b80..55a66c0bb14 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.h
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.h
@@ -60,10 +60,19 @@ class GPU_GLES2_EXPORT WrappedSkImageFactory
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage) override;
- bool CanImportGpuMemoryBuffer(
- gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool IsSupported(uint32_t usage,
+ viz::ResourceFormat format,
+ bool thread_safe,
+ gfx::GpuMemoryBufferType gmb_type,
+ GrContextType gr_context_type,
+ bool* allow_legacy_mailbox,
+ bool is_pixel_used) override;
private:
+ bool CanImportGpuMemoryBuffer(gfx::GpuMemoryBufferType memory_buffer_type);
+ bool CanUseWrappedSkImage(uint32_t usage,
+ GrContextType gr_context_type) const;
+
scoped_refptr<SharedContextState> context_state_;
DISALLOW_COPY_AND_ASSIGN(WrappedSkImageFactory);