summaryrefslogtreecommitdiff
path: root/chromium/gpu/command_buffer/service
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/gpu/command_buffer/service')
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn3
-rw-r--r--chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc8
-rw-r--r--chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc134
-rw-r--r--chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h53
-rw-r--r--chromium/gpu/command_buffer/service/buffer_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/client_service_map.h2
-rw-r--r--chromium/gpu/command_buffer/service/context_group.cc1
-rw-r--r--chromium/gpu/command_buffer/service/context_state.h3
-rw-r--r--chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h45
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.cc279
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.h24
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc35
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc4
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc57
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h3
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gl_stream_texture_image.h19
-rw-r--r--chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc138
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h51
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc138
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.h2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h172
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc27
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h11
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h23
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc93
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc192
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc15
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h53
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc20
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h62
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h15
-rw-r--r--chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc8
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner.cc125
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner.h9
-rw-r--r--chromium/gpu/command_buffer/service/memory_tracking.h2
-rw-r--r--chromium/gpu/command_buffer/service/mock_texture_owner.h9
-rw-r--r--chromium/gpu/command_buffer/service/mocks.h1
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc151
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc6
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc31
-rw-r--r--chromium/gpu/command_buffer/service/sampler_manager.h1
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.cc1
-rw-r--r--chromium/gpu/command_buffer/service/service_font_manager.cc1
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.cc22
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.h6
-rw-r--r--chromium/gpu/command_buffer/service/shader_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.cc131
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.h29
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc11
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc124
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc1313
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h18
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h296
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc16
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h22
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm257
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.cc72
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.h11
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.cc47
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.cc16
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc8
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_video.cc24
-rw-r--r--chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc127
-rw-r--r--chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h48
-rw-r--r--chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc21
-rw-r--r--chromium/gpu/command_buffer/service/surface_texture_gl_owner.h4
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/test_shared_image_backing.cc8
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h156
-rw-r--r--chromium/gpu/command_buffer/service/texture_owner.h8
-rw-r--r--chromium/gpu/command_buffer/service/vertex_array_manager.h1
-rw-r--r--chromium/gpu/command_buffer/service/vertex_attrib_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc5
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.cc122
80 files changed, 3130 insertions, 1857 deletions
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index b6ad2bd114b..16cb6065726 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -227,6 +227,7 @@ target(link_target_type, "gles2_sources") {
"shared_image_backing_factory.h",
"shared_image_backing_factory_gl_texture.cc",
"shared_image_backing_factory_gl_texture.h",
+ "shared_image_backing_factory_gl_texture_internal.h",
"shared_image_factory.cc",
"shared_image_factory.h",
"shared_image_manager.cc",
@@ -235,6 +236,8 @@ target(link_target_type, "gles2_sources") {
"shared_image_representation.h",
"shared_image_representation_skia_gl.cc",
"shared_image_representation_skia_gl.h",
+ "shared_memory_region_wrapper.cc",
+ "shared_memory_region_wrapper.h",
"skia_utils.cc",
"skia_utils.h",
"texture_definition.cc",
diff --git a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
index 9d0d7f74222..43337b5c2ba 100644
--- a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
@@ -46,10 +46,10 @@ AbstractTextureImplOnSharedContext::AbstractTextureImplOnSharedContext(
texture_ = new gpu::gles2::Texture(service_id);
texture_->SetLightweightRef();
texture_->SetTarget(target, 1);
- texture_->sampler_state_.min_filter = GL_LINEAR;
- texture_->sampler_state_.mag_filter = GL_LINEAR;
- texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture_->set_min_filter(GL_LINEAR);
+ texture_->set_mag_filter(GL_LINEAR);
+ texture_->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture_->set_wrap_s(GL_CLAMP_TO_EDGE);
gfx::Rect cleared_rect;
texture_->SetLevelInfo(target, 0, internal_format, width, height, depth,
border, format, type, cleared_rect);
diff --git a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
index 5224766c14d..a53dae3bdcd 100644
--- a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
+++ b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
@@ -6,10 +6,102 @@
#include <android/hardware_buffer.h>
+#include "base/android/scoped_hardware_buffer_handle.h"
#include "base/check.h"
#include "base/notreached.h"
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/vulkan/vulkan_image.h"
+#include "ui/gfx/color_space.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gl/gl_gl_api_implementation.h"
+#include "ui/gl/gl_image_ahardwarebuffer.h"
+#include "ui/gl/scoped_binders.h"
namespace gpu {
+namespace {
+
+gles2::Texture* MakeGLTexture(
+ GLenum target,
+ GLuint service_id,
+ scoped_refptr<gl::GLImageAHardwareBuffer> egl_image,
+ const gfx::Size& size,
+ const gfx::Rect& cleared_rect) {
+ auto* texture = new gles2::Texture(service_id);
+ texture->SetLightweightRef();
+ texture->SetTarget(target, 1);
+ texture->set_min_filter(GL_LINEAR);
+ texture->set_mag_filter(GL_LINEAR);
+ texture->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture->set_wrap_s(GL_CLAMP_TO_EDGE);
+
+ texture->SetLevelInfo(target, 0, egl_image->GetInternalFormat(), size.width(),
+ size.height(), 1, 0, egl_image->GetDataFormat(),
+ egl_image->GetDataType(), cleared_rect);
+ texture->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
+ texture->SetImmutable(true, false);
+ return texture;
+}
+
+scoped_refptr<gles2::TexturePassthrough> MakeGLTexturePassthrough(
+ GLenum target,
+ GLuint service_id,
+ scoped_refptr<gl::GLImageAHardwareBuffer> egl_image,
+ const size_t estimated_size) {
+ auto passthrough_texture =
+ base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
+ passthrough_texture->SetEstimatedSize(estimated_size);
+ passthrough_texture->SetLevelImage(target, 0, egl_image.get());
+ passthrough_texture->set_is_bind_pending(false);
+ return passthrough_texture;
+}
+
+void GenGLTextureInternal(
+ AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect,
+ scoped_refptr<gles2::TexturePassthrough>* passthrough_texture,
+ gles2::Texture** texture) {
+ gl::GLApi* api = gl::g_current_gl_context;
+ GLuint service_id = 0;
+ api->glGenTexturesFn(1, &service_id);
+ gl::ScopedTextureBinder texture_binder(target, service_id);
+
+ api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ // Create an egl image using AHardwareBuffer.
+ auto egl_image = base::MakeRefCounted<gl::GLImageAHardwareBuffer>(size);
+ if (!egl_image->Initialize(buffer, false)) {
+ LOG(ERROR) << "Failed to create EGL image";
+ api->glDeleteTexturesFn(1, &service_id);
+ return;
+ }
+
+ if (!egl_image->BindTexImage(target)) {
+ LOG(ERROR) << "Failed to bind egl image";
+ api->glDeleteTexturesFn(1, &service_id);
+ return;
+ }
+ egl_image->SetColorSpace(color_space);
+
+ if (passthrough_texture) {
+ *passthrough_texture = MakeGLTexturePassthrough(
+ target, service_id, std::move(egl_image), estimated_size);
+ } else {
+ *texture = MakeGLTexture(target, service_id, std::move(egl_image), size,
+ cleared_rect);
+ }
+}
+
+} // namespace
bool AHardwareBufferSupportedFormat(viz::ResourceFormat format) {
switch (format) {
@@ -46,4 +138,46 @@ unsigned int AHardwareBufferFormat(viz::ResourceFormat format) {
}
}
+gles2::Texture* GenGLTexture(AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect) {
+ gles2::Texture* texture = nullptr;
+ GenGLTextureInternal(buffer, target, color_space, size, estimated_size,
+ cleared_rect, nullptr /* passthrough_texture */,
+ &texture);
+ return texture;
+}
+
+scoped_refptr<gles2::TexturePassthrough> GenGLTexturePassthrough(
+ AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect) {
+ scoped_refptr<gles2::TexturePassthrough> passthrough_texture;
+ GenGLTextureInternal(buffer, target, color_space, size, estimated_size,
+ cleared_rect, &passthrough_texture,
+ nullptr /* texture */);
+ return passthrough_texture;
+}
+
+std::unique_ptr<VulkanImage> CreateVkImageFromAhbHandle(
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ SharedContextState* context_state,
+ const gfx::Size& size,
+ const viz::ResourceFormat& format) {
+ DCHECK(context_state);
+ DCHECK(context_state->GrContextIsVulkan());
+
+ auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
+ gfx::GpuMemoryBufferHandle gmb_handle(std::move(ahb_handle));
+ return VulkanImage::CreateFromGpuMemoryBufferHandle(
+ device_queue, std::move(gmb_handle), size, ToVkFormat(format),
+ 0 /* usage */);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h
index 77a32393676..a3106ae53a5 100644
--- a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h
+++ b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h
@@ -5,10 +5,36 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_AHARDWAREBUFFER_UTILS_H_
#define GPU_COMMAND_BUFFER_SERVICE_AHARDWAREBUFFER_UTILS_H_
+#include <memory>
+
+#include "base/memory/scoped_refptr.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/gpu_gles2_export.h"
+extern "C" typedef struct AHardwareBuffer AHardwareBuffer;
+
+typedef unsigned int GLenum;
+
+namespace base {
+namespace android {
+class ScopedHardwareBufferHandle;
+} // namespace android
+} // namespace base
+
+namespace gfx {
+class ColorSpace;
+class Rect;
+class Size;
+} // namespace gfx
+
namespace gpu {
+class SharedContextState;
+class VulkanImage;
+
+namespace gles2 {
+class Texture;
+class TexturePassthrough;
+} // namespace gles2
// TODO(vikassoni): In future we will need to expose the set of formats and
// constraints (e.g. max size) to the clients somehow that are available for
@@ -25,6 +51,33 @@ AHardwareBufferSupportedFormat(viz::ResourceFormat format);
// Returns the corresponding AHardwareBuffer format.
unsigned int GPU_GLES2_EXPORT AHardwareBufferFormat(viz::ResourceFormat format);
+// Generates a gles2 texture from AHB. This method must be called with a current
+// GLContext which will be used to create the Texture. This method adds a
+// lightweight ref on the Texture which the caller is responsible for releasing.
+gles2::Texture* GenGLTexture(AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect);
+
+// Generates a passthrough texture from AHB. This method must be called with a
+// current GLContext which will be used to create the Texture.
+scoped_refptr<gles2::TexturePassthrough> GenGLTexturePassthrough(
+ AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect);
+
+// Create a vulkan image from the AHB handle.
+std::unique_ptr<VulkanImage> CreateVkImageFromAhbHandle(
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ SharedContextState* context_state,
+ const gfx::Size& size,
+ const viz::ResourceFormat& format);
+
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_SERVICE_AHARDWAREBUFFER_UTILS_H_
diff --git a/chromium/gpu/command_buffer/service/buffer_manager.h b/chromium/gpu/command_buffer/service/buffer_manager.h
index 86c3561104a..67b042d99af 100644
--- a/chromium/gpu/command_buffer/service/buffer_manager.h
+++ b/chromium/gpu/command_buffer/service/buffer_manager.h
@@ -14,7 +14,7 @@
#include <unordered_map>
#include <vector>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/common/buffer.h"
diff --git a/chromium/gpu/command_buffer/service/client_service_map.h b/chromium/gpu/command_buffer/service/client_service_map.h
index d19484ee1a7..56caf0d3b2c 100644
--- a/chromium/gpu/command_buffer/service/client_service_map.h
+++ b/chromium/gpu/command_buffer/service/client_service_map.h
@@ -9,7 +9,7 @@
#include <unordered_map>
#include <vector>
-#include "base/logging.h"
+#include "base/check.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc
index 51452b9ce43..f05b4688919 100644
--- a/chromium/gpu/command_buffer/service/context_group.cc
+++ b/chromium/gpu/command_buffer/service/context_group.cc
@@ -58,6 +58,7 @@ DisallowedFeatures AdjustDisallowedFeatures(
adjusted_disallowed_features.ext_texture_filter_anisotropic = true;
adjusted_disallowed_features.ext_float_blend = true;
adjusted_disallowed_features.oes_fbo_render_mipmap = true;
+ adjusted_disallowed_features.oes_draw_buffers_indexed = true;
}
return adjusted_disallowed_features;
}
diff --git a/chromium/gpu/command_buffer/service/context_state.h b/chromium/gpu/command_buffer/service/context_state.h
index 531313953a4..3f04a3dd6aa 100644
--- a/chromium/gpu/command_buffer/service/context_state.h
+++ b/chromium/gpu/command_buffer/service/context_state.h
@@ -10,7 +10,8 @@
#include <memory>
#include <vector>
-#include "base/logging.h"
+#include "base/check_op.h"
+#include "base/notreached.h"
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/command_buffer/service/sampler_manager.h"
#include "gpu/command_buffer/service/shader_manager.h"
diff --git a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
index ec299201fb8..eb55bbd0845 100644
--- a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
+++ b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
@@ -116,51 +116,6 @@ class MockCopyTextureResourceManager
bool dither,
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) override {}
- void DoCopySubTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLsizei dest_width,
- GLsizei dest_height,
- GLsizei source_width,
- GLsizei source_height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTexImageResourceManager* luma_emulation_blitter) override {}
- void DoCopyTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_format,
- GLsizei width,
- GLsizei height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTextureMethod method,
- CopyTexImageResourceManager* luma_emulation_blitter) override {}
private:
DISALLOW_COPY_AND_ASSIGN(MockCopyTextureResourceManager);
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
index 05778fdf5b7..5e38edb1270 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
@@ -8,9 +8,7 @@
#include <vector>
#include "base/stl_util.h"
-#include "base/system/sys_info.h"
#include "build/build_config.h"
-#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
#include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
@@ -23,6 +21,7 @@
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_util.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gl/buildflags.h"
#include "ui/gl/gl_context.h"
@@ -138,8 +137,19 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
VkFormat vk_format = ToVkFormat(format);
- VkImageUsageFlags vk_usage =
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+
+ constexpr auto kUsageNeedsColorAttachment =
+ SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_OOP_RASTERIZATION | SHARED_IMAGE_USAGE_WEBGPU;
+ VkImageUsageFlags vk_usage = VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (usage & kUsageNeedsColorAttachment) {
+ vk_usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ if (format == viz::ETC1) {
+ DLOG(ERROR) << "ETC1 format cannot be used as color attachment.";
+ return nullptr;
+ }
+ }
+
if (is_transfer_dst)
vk_usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
@@ -155,10 +165,20 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
auto* vulkan_implementation =
context_state->vk_context_provider()->GetVulkanImplementation();
- VkImageCreateFlags vk_flags =
- vulkan_implementation->enforce_protected_memory()
- ? VK_IMAGE_CREATE_PROTECTED_BIT
- : 0;
+ VkImageCreateFlags vk_flags = 0;
+
+ // In protected mode mark the image as protected, except when the image needs
+ // GLES2, but not Raster usage. ANGLE currenctly doesn't support protected
+ // images. Some clients request GLES2 and Raster usage (e.g. see
+ // GpuMemoryBufferVideoFramePool). In that case still allocate protected
+ // image, which ensures that image can still usable, but it may not work in
+ // some scenarios (e.g. when the video frame is used in WebGL).
+ if (vulkan_implementation->enforce_protected_memory() &&
+ (!(usage & SHARED_IMAGE_USAGE_GLES2) ||
+ (usage & SHARED_IMAGE_USAGE_RASTER))) {
+ vk_flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
+ }
+
std::unique_ptr<VulkanImage> image;
if (is_external) {
image = VulkanImage::CreateWithExternalMemory(device_queue, size, vk_format,
@@ -176,7 +196,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
color_space, usage, context_state, std::move(image), command_pool);
if (!pixel_data.empty()) {
- backing->WritePixels(
+ backing->WritePixelsWithCallback(
pixel_data.size(), 0,
base::BindOnce([](const void* data, size_t size,
void* buffer) { memcpy(buffer, data, size); },
@@ -228,73 +248,10 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
}
DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER);
- if (!base::IsValueInRangeForNumericType<size_t>(handle.stride))
- return nullptr;
- int32_t width_in_bytes = 0;
- if (!viz::ResourceSizes::MaybeWidthInBytes(size.width(), resource_format,
- &width_in_bytes)) {
- DLOG(ERROR) << "ResourceSizes::MaybeWidthInBytes() failed.";
+ SharedMemoryRegionWrapper shared_memory_wrapper;
+ if (!shared_memory_wrapper.Initialize(handle, size, resource_format))
return nullptr;
- }
-
- if (handle.stride < width_in_bytes) {
- DLOG(ERROR) << "Invalid GMB stride.";
- return nullptr;
- }
-
- auto bits_per_pixel = viz::BitsPerPixel(resource_format);
- switch (bits_per_pixel) {
- case 64:
- case 32:
- case 16:
- if (handle.stride % (bits_per_pixel / 8) != 0) {
- DLOG(ERROR) << "Invalid GMB stride.";
- return nullptr;
- }
- break;
- case 8:
- case 4:
- break;
- case 12:
- // We are not supporting YVU420 and YUV_420_BIPLANAR format.
- default:
- NOTREACHED();
- return nullptr;
- }
-
- if (!handle.region.IsValid()) {
- DLOG(ERROR) << "Invalid GMB shared memory region.";
- return nullptr;
- }
-
- base::CheckedNumeric<size_t> checked_size = handle.stride;
- checked_size *= size.height();
- if (!checked_size.IsValid()) {
- DLOG(ERROR) << "Invalid GMB size.";
- return nullptr;
- }
-
- // Minimize the amount of address space we use but make sure offset is a
- // multiple of page size as required by MapAt().
- size_t memory_offset =
- handle.offset % base::SysInfo::VMAllocationGranularity();
- size_t map_offset =
- base::SysInfo::VMAllocationGranularity() *
- (handle.offset / base::SysInfo::VMAllocationGranularity());
- checked_size += memory_offset;
- if (!checked_size.IsValid()) {
- DLOG(ERROR) << "Invalid GMB size.";
- return nullptr;
- }
-
- auto shared_memory_mapping = handle.region.MapAt(
- static_cast<off_t>(map_offset), checked_size.ValueOrDie());
-
- if (!shared_memory_mapping.IsValid()) {
- DLOG(ERROR) << "Failed to map shared memory.";
- return nullptr;
- }
auto backing = Create(context_state, command_pool, mailbox, resource_format,
size, color_space, usage, image_usage_cache,
@@ -302,8 +259,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
if (!backing)
return nullptr;
- backing->InstallSharedMemory(std::move(shared_memory_mapping), handle.stride,
- memory_offset);
+ backing->InstallSharedMemory(std::move(shared_memory_wrapper));
return backing;
}
@@ -366,21 +322,36 @@ bool ExternalVkImageBacking::BeginAccess(
bool readonly,
std::vector<SemaphoreHandle>* semaphore_handles,
bool is_gl) {
+ DLOG_IF(ERROR, gl_reads_in_progress_ != 0 && !is_gl)
+ << "Backing is being accessed by both GL and Vulkan.";
+ // Do not need do anything for the second and following GL read access.
+ if (is_gl && readonly && gl_reads_in_progress_) {
+ ++gl_reads_in_progress_;
+ return true;
+ }
+
if (readonly && !reads_in_progress_) {
UpdateContent(kInVkImage);
if (texture_)
UpdateContent(kInGLTexture);
}
+
if (!BeginAccessInternal(readonly, semaphore_handles))
return false;
if (!is_gl)
return true;
+ if (readonly) {
+ DCHECK(!gl_reads_in_progress_);
+ gl_reads_in_progress_ = 1;
+ }
+
if (use_separate_gl_texture())
return true;
DCHECK(need_synchronization());
+ DCHECK(is_gl);
auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
{
@@ -401,7 +372,7 @@ bool ExternalVkImageBacking::BeginAccess(
uint32_t vulkan_queue_index = context_state_->vk_context_provider()
->GetDeviceQueue()
->GetVulkanQueueIndex();
- // Transfer image queue faimily ownership to external, so the image can be
+ // Transfer image queue family ownership to external, so the image can be
// used by GL.
command_buffer->TransitionImageLayout(image_info.fImage, image_layout,
image_layout, vulkan_queue_index,
@@ -422,9 +393,9 @@ bool ExternalVkImageBacking::BeginAccess(
// TODO(penghuang): ask skia to do it for us to avoid this queue submission.
command_buffer->Submit(wait_semaphores.size(), wait_semaphores.data(), 1,
&signal_semaphore);
- auto end_access_semphore_handle =
+ auto end_access_semaphore_handle =
vulkan_implementation()->GetSemaphoreHandle(device(), signal_semaphore);
- semaphore_handles->push_back(std::move(end_access_semphore_handle));
+ semaphore_handles->push_back(std::move(end_access_semaphore_handle));
auto* fence_helper =
context_state_->vk_context_provider()->GetDeviceQueue()->GetFenceHelper();
@@ -440,7 +411,17 @@ bool ExternalVkImageBacking::BeginAccess(
void ExternalVkImageBacking::EndAccess(bool readonly,
SemaphoreHandle semaphore_handle,
bool is_gl) {
+ if (is_gl && readonly) {
+ DCHECK(gl_reads_in_progress_);
+ if (--gl_reads_in_progress_ > 0) {
+ DCHECK(!semaphore_handle.is_valid());
+ return;
+ }
+ }
+
+ // Only transite image layout and queue back when it is the last gl access.
if (is_gl && !use_separate_gl_texture()) {
+ DCHECK(semaphore_handle.is_valid());
auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
{
ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
@@ -629,10 +610,10 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
texture_ = new gles2::Texture(texture_service_id);
texture_->SetLightweightRef();
texture_->SetTarget(GL_TEXTURE_2D, 1);
- texture_->sampler_state_.min_filter = GL_LINEAR;
- texture_->sampler_state_.mag_filter = GL_LINEAR;
- texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture_->set_min_filter(GL_LINEAR);
+ texture_->set_mag_filter(GL_LINEAR);
+ texture_->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture_->set_wrap_s(GL_CLAMP_TO_EDGE);
// If the backing is already cleared, no need to clear it again.
gfx::Rect cleared_rect;
if (IsCleared())
@@ -689,14 +670,10 @@ ExternalVkImageBacking::ProduceSkia(
}
void ExternalVkImageBacking::InstallSharedMemory(
- base::WritableSharedMemoryMapping shared_memory_mapping,
- size_t stride,
- size_t memory_offset) {
- DCHECK(!shared_memory_mapping_.IsValid());
- DCHECK(shared_memory_mapping.IsValid());
- shared_memory_mapping_ = std::move(shared_memory_mapping);
- stride_ = stride;
- memory_offset_ = memory_offset;
+ SharedMemoryRegionWrapper shared_memory_wrapper) {
+ DCHECK(!shared_memory_wrapper_.IsValid());
+ DCHECK(shared_memory_wrapper.IsValid());
+ shared_memory_wrapper_ = std::move(shared_memory_wrapper);
Update(nullptr);
}
@@ -713,18 +690,10 @@ void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) {
if (content_flags == kInVkImage) {
if (latest_content_ & kInSharedMemory) {
- if (!shared_memory_mapping_.IsValid())
+ if (!shared_memory_wrapper_.IsValid())
return;
- auto pixel_data =
- shared_memory_mapping_.GetMemoryAsSpan<const uint8_t>().subspan(
- memory_offset_);
- if (!WritePixels(
- pixel_data.size(), stride_,
- base::BindOnce([](const void* data, size_t size,
- void* buffer) { memcpy(buffer, data, size); },
- pixel_data.data(), pixel_data.size()))) {
+ if (!WritePixels())
return;
- }
latest_content_ |=
use_separate_gl_texture() ? kInVkImage : kInVkImage | kInGLTexture;
return;
@@ -748,9 +717,10 @@ void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) {
}
}
-bool ExternalVkImageBacking::WritePixels(size_t data_size,
- size_t stride,
- FillBufferCallback callback) {
+bool ExternalVkImageBacking::WritePixelsWithCallback(
+ size_t data_size,
+ size_t stride,
+ FillBufferCallback callback) {
DCHECK(stride == 0 || size().height() * stride <= data_size);
VkBufferCreateInfo buffer_create_info = {
@@ -811,6 +781,8 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size,
size().width(), size().height());
}
+ SetCleared();
+
if (!need_synchronization()) {
DCHECK(handles.empty());
command_buffer->Submit(0, nullptr, 0, nullptr);
@@ -823,7 +795,6 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size,
std::move(command_buffer));
fence_helper->EnqueueBufferCleanupForSubmittedWork(stage_buffer,
stage_allocation);
-
return true;
}
@@ -841,10 +812,11 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size,
begin_access_semaphores.data(), 1,
&end_access_semaphore);
- auto end_access_semphore_handle = vulkan_implementation()->GetSemaphoreHandle(
- device(), end_access_semaphore);
+ auto end_access_semaphore_handle =
+ vulkan_implementation()->GetSemaphoreHandle(device(),
+ end_access_semaphore);
EndAccessInternal(false /* readonly */,
- std::move(end_access_semphore_handle));
+ std::move(end_access_semaphore_handle));
auto* fence_helper =
context_state_->vk_context_provider()->GetDeviceQueue()->GetFenceHelper();
@@ -855,10 +827,69 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size,
begin_access_semaphores);
fence_helper->EnqueueBufferCleanupForSubmittedWork(stage_buffer,
stage_allocation);
-
return true;
}
+bool ExternalVkImageBacking::WritePixels() {
+ std::vector<gpu::SemaphoreHandle> handles;
+ if (!BeginAccessInternal(false /* readonly */, &handles)) {
+ DLOG(ERROR) << "BeginAccess() failed.";
+ return false;
+ }
+
+ std::vector<GrBackendSemaphore> begin_access_semaphores;
+ begin_access_semaphores.reserve(handles.size() + 1);
+ for (auto& handle : handles) {
+ VkSemaphore semaphore = vulkan_implementation()->ImportSemaphoreHandle(
+ device(), std::move(handle));
+ begin_access_semaphores.emplace_back();
+ begin_access_semaphores.back().initVulkan(semaphore);
+ }
+
+ auto* gr_context = context_state_->gr_context();
+ gr_context->wait(begin_access_semaphores.size(),
+ begin_access_semaphores.data());
+
+ auto info = SkImageInfo::Make(size().width(), size().height(),
+ ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format()),
+ kOpaque_SkAlphaType);
+ SkPixmap pixmap(info, shared_memory_wrapper_.GetMemory(),
+ shared_memory_wrapper_.GetStride());
+
+ if (!gr_context->updateBackendTexture(backend_texture_, &pixmap,
+ /*levels=*/1, nullptr, nullptr)) {
+ DLOG(ERROR) << "updateBackendTexture() failed.";
+ }
+
+ if (!need_synchronization()) {
+ DCHECK(handles.empty());
+ EndAccessInternal(false /* readonly */, SemaphoreHandle());
+ return true;
+ }
+
+ VkSemaphore end_access_semaphore =
+ vulkan_implementation()->CreateExternalSemaphore(device());
+ GrBackendSemaphore end_access_backend_semaphore;
+ end_access_backend_semaphore.initVulkan(end_access_semaphore);
+
+ GrFlushInfo flush_info = {
+ .fNumSemaphores = 1,
+ .fSignalSemaphores = &end_access_backend_semaphore,
+ };
+
+ gr_context->flush(flush_info);
+ // Submit so the |end_access_semaphore| is ready for waiting.
+ gr_context->submit();
+
+ auto end_access_semaphore_handle =
+ vulkan_implementation()->GetSemaphoreHandle(device(),
+ end_access_semaphore);
+ EndAccessInternal(false /* readonly */,
+ std::move(end_access_semaphore_handle));
+ return true;
+} // namespace gpu
+
void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() {
DCHECK(use_separate_gl_texture());
DCHECK_NE(!!texture_, !!texture_passthrough_);
@@ -907,16 +938,16 @@ void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() {
ScopedPixelStore pack_skip_rows(api, GL_PACK_SKIP_ROWS, 0);
ScopedPixelStore pack_aligment(api, GL_PACK_ALIGNMENT, 1);
- WritePixels(checked_size.ValueOrDie(), 0,
- base::BindOnce(
- [](gl::GLApi* api, const gfx::Size& size, GLenum format,
- GLenum type, void* buffer) {
- api->glReadPixelsFn(0, 0, size.width(), size.height(),
- format, type, buffer);
- DCHECK_EQ(api->glGetErrorFn(),
- static_cast<GLenum>(GL_NO_ERROR));
- },
- api, size(), gl_format, gl_type));
+ WritePixelsWithCallback(
+ checked_size.ValueOrDie(), 0,
+ base::BindOnce(
+ [](gl::GLApi* api, const gfx::Size& size, GLenum format, GLenum type,
+ void* buffer) {
+ api->glReadPixelsFn(0, 0, size.width(), size.height(), format, type,
+ buffer);
+ DCHECK_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
+ },
+ api, size(), gl_format, gl_type));
api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, old_framebuffer);
api->glDeleteFramebuffersEXTFn(1, &framebuffer);
}
@@ -957,9 +988,7 @@ void ExternalVkImageBacking::CopyPixelsFromShmToGLTexture() {
checked_size *= size().height();
DCHECK(checked_size.IsValid());
- auto pixel_data =
- shared_memory_mapping_.GetMemoryAsSpan<const uint8_t>().subspan(
- memory_offset_);
+ auto pixel_data = shared_memory_wrapper_.GetMemoryAsSpan();
api->glTexSubImage2DFn(GL_TEXTURE_2D, 0, 0, 0, size().width(),
size().height(), gl_format, gl_type,
pixel_data.data());
@@ -1023,7 +1052,9 @@ void ExternalVkImageBacking::EndAccessInternal(
is_write_in_progress_ = false;
}
- if (need_synchronization()) {
+ // synchronization is not needed if it is not the last gl access.
+ if (need_synchronization() && reads_in_progress_ == 0) {
+ DCHECK(!is_write_in_progress_);
DCHECK(semaphore_handle.is_valid());
if (readonly) {
read_semaphore_handles_.push_back(std::move(semaphore_handle));
@@ -1032,8 +1063,6 @@ void ExternalVkImageBacking::EndAccessInternal(
DCHECK(read_semaphore_handles_.empty());
write_semaphore_handle_ = std::move(semaphore_handle);
}
- } else {
- DCHECK(!semaphore_handle.is_valid());
}
}
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
index 7e7dc67b627..e3d1103d649 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
@@ -9,7 +9,6 @@
#include <vector>
#include "base/memory/scoped_refptr.h"
-#include "base/memory/shared_memory_mapping.h"
#include "base/optional.h"
#include "base/util/type_safety/pass_key.h"
#include "build/build_config.h"
@@ -17,6 +16,7 @@
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_memory_region_wrapper.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/vulkan/semaphore_handle.h"
#include "gpu/vulkan/vulkan_device_queue.h"
@@ -99,6 +99,9 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
return !context_state()->support_vulkan_external_object();
}
+ uint32_t reads_in_progress() const { return reads_in_progress_; }
+ uint32_t gl_reads_in_progress() const { return gl_reads_in_progress_; }
+
// Notifies the backing that an access will start. Return false if there is
// currently any other conflict access in progress. Otherwise, returns true
// and semaphore handles which will be waited on before accessing.
@@ -157,17 +160,17 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
#endif
// Install a shared memory GMB to the backing.
- void InstallSharedMemory(
- base::WritableSharedMemoryMapping shared_memory_mapping,
- size_t stride,
- size_t memory_offset);
+ void InstallSharedMemory(SharedMemoryRegionWrapper shared_memory_wrapper);
// Returns texture_service_id for ProduceGLTexture and GLTexturePassthrough.
GLuint ProduceGLTextureInternal();
using FillBufferCallback = base::OnceCallback<void(void* buffer)>;
- bool WritePixels(size_t data_size,
- size_t stride,
- FillBufferCallback callback);
+ // TODO(penghuang): Remove it when GrContext::updateBackendTexture() supports
+ // compressed texture and callback.
+ bool WritePixelsWithCallback(size_t data_size,
+ size_t stride,
+ FillBufferCallback callback);
+ bool WritePixels();
void CopyPixelsFromGLTextureToVkImage();
void CopyPixelsFromShmToGLTexture();
@@ -181,13 +184,12 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
bool is_write_in_progress_ = false;
uint32_t reads_in_progress_ = 0;
+ uint32_t gl_reads_in_progress_ = 0;
gles2::Texture* texture_ = nullptr;
scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
// GMB related stuff.
- base::WritableSharedMemoryMapping shared_memory_mapping_;
- size_t stride_ = 0;
- size_t memory_offset_ = 0;
+ SharedMemoryRegionWrapper shared_memory_wrapper_;
enum LatestContent {
kInVkImage = 1 << 0,
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
index 34fdcde0c1d..bbad54274a2 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
@@ -77,8 +77,10 @@ bool ExternalVkImageGLRepresentationShared::BeginAccess(GLenum mode) {
}
DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
- mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
- const bool readonly = (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
+ const bool readonly =
+ (mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
std::vector<SemaphoreHandle> handles;
if (!backing_impl()->BeginAccess(readonly, &handles, true /* is_gl */))
@@ -111,16 +113,17 @@ void ExternalVkImageGLRepresentationShared::EndAccess() {
DCHECK(current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
current_access_mode_ ==
- GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM ||
+ current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
const bool readonly =
- (current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ (current_access_mode_ != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
current_access_mode_ = 0;
- VkSemaphore semaphore = VK_NULL_HANDLE;
SemaphoreHandle semaphore_handle;
- GLuint gl_semaphore = 0;
- if (backing_impl()->need_synchronization()) {
- semaphore =
+ if (backing_impl()->need_synchronization() &&
+ backing_impl()->gl_reads_in_progress() <= 1) {
+ DCHECK(readonly == !!backing_impl()->gl_reads_in_progress());
+ VkSemaphore semaphore =
vk_implementation()->CreateExternalSemaphore(backing_impl()->device());
if (semaphore == VK_NULL_HANDLE) {
// TODO(crbug.com/933452): We should be able to handle this failure more
@@ -142,7 +145,8 @@ void ExternalVkImageGLRepresentationShared::EndAccess() {
}
SemaphoreHandle dup_semaphore_handle = semaphore_handle.Duplicate();
- gl_semaphore = ImportVkSemaphoreIntoGL(std::move(dup_semaphore_handle));
+ GLuint gl_semaphore =
+ ImportVkSemaphoreIntoGL(std::move(dup_semaphore_handle));
if (!gl_semaphore) {
// TODO(crbug.com/933452): We should be able to semaphore_handle this
@@ -152,24 +156,21 @@ void ExternalVkImageGLRepresentationShared::EndAccess() {
<< "Vulkan";
return;
}
- }
- GrVkImageInfo info;
- auto result = backing_impl()->backend_texture().getVkImageInfo(&info);
- DCHECK(result);
- GLenum dst_layout = ToGLImageLayout(info.fImageLayout);
- if (backing_impl()->need_synchronization()) {
+ GrVkImageInfo info;
+ auto result = backing_impl()->backend_texture().getVkImageInfo(&info);
+ DCHECK(result);
+ GLenum dst_layout = ToGLImageLayout(info.fImageLayout);
api()->glSignalSemaphoreEXTFn(gl_semaphore, 0, nullptr, 1,
&texture_service_id_, &dst_layout);
api()->glDeleteSemaphoresEXTFn(1, &gl_semaphore);
// Base on the spec, the glSignalSemaphoreEXT() call just inserts signal
// semaphore command in the gl context. It may or may not flush the context
- // which depends on the impelemntation. So to make it safe, we always call
+ // which depends on the implementation. So to make it safe, we always call
// glFlush() here. If the implementation does flush in the
// glSignalSemaphoreEXT() call, the glFlush() call should be a noop.
api()->glFlushFn();
}
-
backing_impl()->EndAccess(readonly, std::move(semaphore_handle),
true /* is_gl */);
}
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
index 692eb8feadd..3211b8b59c8 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
@@ -61,12 +61,12 @@ sk_sp<SkSurface> ExternalVkImageSkiaRepresentation::BeginWriteAccess(
final_msaa_count != surface_msaa_count_) {
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
true /* gpu_compositing */, format());
- surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
+ surface = SkSurface::MakeFromBackendTexture(
gr_context, promise_texture->backendTexture(), kTopLeft_GrSurfaceOrigin,
final_msaa_count, sk_color_type,
backing_impl()->color_space().ToSkColorSpace(), &surface_props);
if (!surface) {
- LOG(ERROR) << "MakeFromBackendTextureAsRenderTarget() failed.";
+ LOG(ERROR) << "MakeFromBackendTexture() failed.";
backing_impl()->context_state()->EraseCachedSkSurface(this);
return nullptr;
}
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index 20e5298d9f6..ca410861576 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -170,35 +170,6 @@ bool IsWebGLDrawBuffersSupported(bool webglCompatibilityContext,
} // anonymous namespace.
-namespace {
-
-enum GpuTextureResultR16_L16 {
- // Values synced with 'GpuTextureResultR16_L16' in
- // src/tools/metrics/histograms/histograms.xml
- kHaveNone = 0,
- kHaveR16 = 1,
- kHaveL16 = 2,
- kHaveR16AndL16 = 3,
- kMax = kHaveR16AndL16
-};
-
-// TODO(riju): For UMA, remove after crbug.com/759456 is resolved.
-bool g_r16_is_present;
-bool g_l16_is_present;
-
-GpuTextureResultR16_L16 GpuTextureUMAHelper() {
- if (g_r16_is_present && g_l16_is_present) {
- return GpuTextureResultR16_L16::kHaveR16AndL16;
- } else if (g_r16_is_present) {
- return GpuTextureResultR16_L16::kHaveR16;
- } else if (g_l16_is_present) {
- return GpuTextureResultR16_L16::kHaveL16;
- }
- return GpuTextureResultR16_L16::kHaveNone;
-}
-
-} // anonymous namespace.
-
FeatureInfo::FeatureFlags::FeatureFlags() = default;
FeatureInfo::FeatureInfo() {
@@ -250,11 +221,6 @@ void FeatureInfo::InitializeBasicState(const base::CommandLine* command_line) {
feature_flags_.is_swiftshader_for_webgl =
(useGL == gl::kGLImplementationSwiftShaderForWebGLName);
- feature_flags_.is_swiftshader =
- (useGL == gl::kGLImplementationSwiftShaderName) ||
- ((useGL == gl::kGLImplementationANGLEName) &&
- (useANGLE == gl::kANGLEImplementationSwiftShaderName));
-
// The shader translator is needed to translate from WebGL-conformant GLES SL
// to normal GLES SL, enforce WebGL conformance, translate from GLES SL 1.0 to
// target context GLSL, implement emulation of OpenGL ES features on OpenGL,
@@ -420,6 +386,13 @@ void FeatureInfo::EnableCHROMIUMColorBufferFloatRGB() {
AddExtensionString("GL_CHROMIUM_color_buffer_float_rgb");
}
+void FeatureInfo::EnableOESDrawBuffersIndexed() {
+ if (!feature_flags_.oes_draw_buffers_indexed) {
+ AddExtensionString("GL_OES_draw_buffers_indexed");
+ feature_flags_.oes_draw_buffers_indexed = true;
+ }
+}
+
void FeatureInfo::EnableOESFboRenderMipmap() {
if (!feature_flags_.oes_fbo_render_mipmap) {
AddExtensionString("GL_OES_fbo_render_mipmap");
@@ -778,6 +751,14 @@ void FeatureInfo::InitializeFeatures() {
validators_.index_type.AddValue(GL_UNSIGNED_INT);
}
+ // Note (crbug.com/1058744): not implemented for validating command decoder
+ if (is_passthrough_cmd_decoder_ &&
+ gfx::HasExtension(extensions, "GL_OES_draw_buffers_indexed")) {
+ if (!disallowed_features_.oes_draw_buffers_indexed) {
+ EnableOESDrawBuffersIndexed();
+ }
+ }
+
if (gl_version_info_->IsAtLeastGL(3, 0) || gl_version_info_->is_es3 ||
gfx::HasExtension(extensions, "GL_OES_fbo_render_mipmap") ||
gfx::HasExtension(extensions, "GL_EXT_framebuffer_object")) {
@@ -1461,7 +1442,6 @@ void FeatureInfo::InitializeFeatures() {
gfx::HasExtension(extensions, "GL_EXT_texture_norm16"))) {
AddExtensionString("GL_EXT_texture_norm16");
feature_flags_.ext_texture_norm16 = true;
- g_r16_is_present = true;
validators_.pixel_type.AddValue(GL_UNSIGNED_SHORT);
validators_.pixel_type.AddValue(GL_SHORT);
@@ -1511,10 +1491,6 @@ void FeatureInfo::InitializeFeatures() {
feature_flags_.gpu_memory_buffer_formats.Add(gfx::BufferFormat::R_16);
}
- UMA_HISTOGRAM_ENUMERATION(
- "GPU.TextureR16Ext_LuminanceF16", GpuTextureUMAHelper(),
- static_cast<int>(GpuTextureResultR16_L16::kMax) + 1);
-
if (enable_es3 && gfx::HasExtension(extensions, "GL_EXT_window_rectangles")) {
AddExtensionString("GL_EXT_window_rectangles");
feature_flags_.ext_window_rectangles = true;
@@ -1945,9 +1921,6 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
validators_.texture_internal_format_storage.AddValue(
GL_LUMINANCE_ALPHA16F_EXT);
}
-
- g_l16_is_present =
- enable_texture_half_float && feature_flags_.ext_texture_storage;
}
bool FeatureInfo::IsES3Capable() const {
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index 4a3255b6b6e..ac6c4d8e393 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -90,7 +90,6 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool ext_discard_framebuffer = false;
bool angle_depth_texture = false;
bool is_swiftshader_for_webgl = false;
- bool is_swiftshader = false;
bool chromium_texture_filtering_hint = false;
bool angle_texture_usage = false;
bool ext_texture_storage = false;
@@ -151,6 +150,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool webgl_multi_draw_instanced_base_vertex_base_instance = false;
bool ext_texture_compression_bptc = false;
bool ext_texture_compression_rgtc = false;
+ bool oes_draw_buffers_indexed = false;
};
FeatureInfo();
@@ -213,6 +213,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
void EnableEXTColorBufferFloat();
void EnableEXTColorBufferHalfFloat();
void EnableEXTTextureFilterAnisotropic();
+ void EnableOESDrawBuffersIndexed();
void EnableOESFboRenderMipmap();
void EnableOESTextureFloatLinear();
void EnableOESTextureHalfFloatLinear();
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual.cc b/chromium/gpu/command_buffer/service/gl_context_virtual.cc
index c6e000cd9a6..c8662354eb6 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual.cc
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual.cc
@@ -87,10 +87,12 @@ void GLContextVirtual::SetSafeToForceGpuSwitch() {
}
unsigned int GLContextVirtual::CheckStickyGraphicsResetStatus() {
- // Don't pretend we know which one of the virtual contexts was responsible.
unsigned int reset_status = shared_context_->CheckStickyGraphicsResetStatus();
- return reset_status == GL_NO_ERROR ? GL_NO_ERROR
- : GL_UNKNOWN_CONTEXT_RESET_ARB;
+ if (reset_status == GL_NO_ERROR)
+ return GL_NO_ERROR;
+ shared_context_->MarkVirtualContextLost();
+ // Don't pretend we know which one of the virtual contexts was responsible.
+ return GL_UNKNOWN_CONTEXT_RESET_ARB;
}
void GLContextVirtual::SetUnbindFboOnMakeCurrent() {
diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
index 501e5cc35d4..27084971eff 100644
--- a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
+++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
@@ -16,12 +16,6 @@ namespace gles2 {
// that supply a texture matrix.
class GPU_GLES2_EXPORT GLStreamTextureImage : public gl::GLImage {
public:
- // Get the matrix.
- // Copy the texture matrix for this image into |matrix|.
- // Subclasses must return a matrix appropriate for a coordinate system where
- // UV=(0,0) corresponds to the top left corner of the image.
- virtual void GetTextureMatrix(float matrix[16]) = 0;
-
// TODO(weiliangc): When Overlay is moved off command buffer and we use
// SharedImage in all cases, this API should be deleted.
virtual void NotifyPromotionHint(bool promotion_hint,
@@ -32,19 +26,6 @@ class GPU_GLES2_EXPORT GLStreamTextureImage : public gl::GLImage {
protected:
~GLStreamTextureImage() override = default;
-
- // Convenience function for subclasses that deal with SurfaceTextures, whose
- // coordinate system has (0,0) at the bottom left of the image.
- // [ a e i m ] [ 1 0 0 0 ] [ a -e i m+e ]
- // [ b f j n ] [ 0 -1 0 1 ] = [ b -f j n+f ]
- // [ c g k o ] [ 0 0 1 0 ] [ c -g k o+g ]
- // [ d h l p ] [ 0 0 0 1 ] [ d -h l p+h ]
- static void YInvertMatrix(float matrix[16]) {
- for (int i = 0; i < 4; ++i) {
- matrix[i + 12] += matrix[i + 4];
- matrix[i + 4] = -matrix[i + 4];
- }
- }
};
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h
index 9c35dae2934..1c7271618ca 100644
--- a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h
+++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h
@@ -42,7 +42,6 @@ class GLStreamTextureImageStub : public GLStreamTextureImage {
bool EmulatingRGB() const override;
// Overridden from GLStreamTextureImage:
- void GetTextureMatrix(float matrix[16]) override {}
void NotifyPromotionHint(bool promotion_hint,
int display_x,
int display_y,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
index 2eb24507574..c84b347b150 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -464,12 +464,9 @@ std::string GetFragmentShaderSource(unsigned glslVersion,
// Main shader source.
source +=
"uniform SamplerType u_sampler;\n"
- "uniform mat4 u_tex_coord_transform;\n"
"VARYING TexCoordPrecision vec2 v_uv;\n"
"void main(void) {\n"
- " TexCoordPrecision vec4 uv =\n"
- " u_tex_coord_transform * vec4(v_uv, 0, 1);\n"
- " vec4 color = TextureLookup(u_sampler, uv.st);\n";
+ " vec4 color = TextureLookup(u_sampler, v_uv);\n";
// Premultiply or un-premultiply alpha. Must always do this, even
// if the destination format doesn't have an alpha channel.
@@ -927,59 +924,12 @@ class CopyTextureResourceManagerImpl
bool dither,
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) override;
- void DoCopySubTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLsizei dest_width,
- GLsizei dest_height,
- GLsizei source_width,
- GLsizei source_height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTexImageResourceManager* luma_emulation_blitter) override;
- void DoCopyTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_format,
- GLsizei width,
- GLsizei height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTextureMethod method,
- CopyTexImageResourceManager* luma_emulation_blitter) override;
-
private:
struct ProgramInfo {
ProgramInfo()
: program(0u),
vertex_source_mult_handle(0u),
vertex_source_add_handle(0u),
- tex_coord_transform_handle(0u),
sampler_handle(0u) {}
GLuint program;
@@ -989,7 +939,6 @@ class CopyTextureResourceManagerImpl
GLuint vertex_source_mult_handle;
GLuint vertex_source_add_handle;
- GLuint tex_coord_transform_handle;
GLuint sampler_handle;
};
@@ -1017,7 +966,6 @@ class CopyTextureResourceManagerImpl
bool premultiply_alpha,
bool unpremultiply_alpha,
bool dither,
- const GLfloat transform_matrix[16],
CopyTexImageResourceManager* luma_emulation_blitter);
bool initialized_;
@@ -1117,32 +1065,6 @@ void CopyTextureResourceManagerImpl::Destroy() {
buffer_id_ = 0;
}
-void CopyTextureResourceManagerImpl::DoCopyTexture(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLsizei width,
- GLsizei height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- CopyTextureMethod method,
- gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
- // Use kIdentityMatrix if no transform passed in.
- DoCopyTextureWithTransform(
- decoder, source_target, source_id, source_level, source_internal_format,
- dest_target, dest_id, dest_level, dest_internal_format, width, height,
- flip_y, premultiply_alpha, unpremultiply_alpha, dither, kIdentityMatrix,
- method, luma_emulation_blitter);
-}
-
void CopyTextureResourceManagerImpl::DoCopySubTexture(
DecoderContext* decoder,
GLenum source_target,
@@ -1210,12 +1132,12 @@ void CopyTextureResourceManagerImpl::DoCopySubTexture(
dest_height = height;
}
- DoCopySubTextureWithTransform(
+ DoCopyTextureInternal(
decoder, source_target, source_id, source_level, source_internal_format,
dest_target, dest_texture, dest_level, dest_internal_format, dest_xoffset,
dest_yoffset, x, y, width, height, dest_width, dest_height, source_width,
source_height, flip_y, premultiply_alpha, unpremultiply_alpha, dither,
- kIdentityMatrix, luma_emulation_blitter);
+ luma_emulation_blitter);
if (method == CopyTextureMethod::DRAW_AND_COPY ||
method == CopyTextureMethod::DRAW_AND_READBACK) {
@@ -1237,41 +1159,7 @@ void CopyTextureResourceManagerImpl::DoCopySubTexture(
}
}
-void CopyTextureResourceManagerImpl::DoCopySubTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLsizei dest_width,
- GLsizei dest_height,
- GLsizei source_width,
- GLsizei source_height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
- DoCopyTextureInternal(
- decoder, source_target, source_id, source_level, source_internal_format,
- dest_target, dest_id, dest_level, dest_internal_format, xoffset, yoffset,
- x, y, width, height, dest_width, dest_height, source_width, source_height,
- flip_y, premultiply_alpha, unpremultiply_alpha, dither, transform_matrix,
- luma_emulation_blitter);
-}
-
-void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform(
+void CopyTextureResourceManagerImpl::DoCopyTexture(
DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
@@ -1287,7 +1175,6 @@ void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform(
bool premultiply_alpha,
bool unpremultiply_alpha,
bool dither,
- const GLfloat transform_matrix[16],
CopyTextureMethod method,
gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
GLsizei dest_width = width;
@@ -1326,12 +1213,11 @@ void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform(
dest_internal_format = adjusted_internal_format;
}
- DoCopyTextureInternal(decoder, source_target, source_id, source_level,
- source_internal_format, dest_target, dest_texture,
- dest_level, dest_internal_format, 0, 0, 0, 0, width,
- height, dest_width, dest_height, width, height, flip_y,
- premultiply_alpha, unpremultiply_alpha, dither,
- transform_matrix, luma_emulation_blitter);
+ DoCopyTextureInternal(
+ decoder, source_target, source_id, source_level, source_internal_format,
+ dest_target, dest_texture, dest_level, dest_internal_format, 0, 0, 0, 0,
+ width, height, dest_width, dest_height, width, height, flip_y,
+ premultiply_alpha, unpremultiply_alpha, dither, luma_emulation_blitter);
if (method == CopyTextureMethod::DRAW_AND_COPY ||
method == CopyTextureMethod::DRAW_AND_READBACK) {
@@ -1375,7 +1261,6 @@ void CopyTextureResourceManagerImpl::DoCopyTextureInternal(
bool premultiply_alpha,
bool unpremultiply_alpha,
bool dither,
- const GLfloat transform_matrix[16],
gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
DCHECK(source_target == GL_TEXTURE_2D ||
source_target == GL_TEXTURE_RECTANGLE_ARB ||
@@ -1465,15 +1350,10 @@ void CopyTextureResourceManagerImpl::DoCopyTextureInternal(
info->vertex_source_add_handle =
glGetUniformLocation(info->program, "u_vertex_source_add");
- info->tex_coord_transform_handle =
- glGetUniformLocation(info->program, "u_tex_coord_transform");
info->sampler_handle = glGetUniformLocation(info->program, "u_sampler");
}
glUseProgram(info->program);
- glUniformMatrix4fv(info->tex_coord_transform_handle, 1, GL_FALSE,
- transform_matrix);
-
// Note: For simplicity, the calculations in this comment block use a single
// dimension. All calculations trivially extend to the x-y plane.
// The target subrange in the source texture has coordinates [x, x + width].
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
index 351e181a635..33207ea04a0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
@@ -103,57 +103,6 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) = 0;
- virtual void DoCopySubTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLsizei dest_width,
- GLsizei dest_height,
- GLsizei source_width,
- GLsizei source_height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTexImageResourceManager* luma_emulation_blitter) = 0;
-
- // This will apply a transform on the texture coordinates before sampling
- // the source texture and copying to the destination texture. The transform
- // matrix should be given in column-major form, so it can be passed
- // directly to GL.
- virtual void DoCopyTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_format,
- GLsizei width,
- GLsizei height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTextureMethod method,
- CopyTexImageResourceManager* luma_emulation_blitter) = 0;
-
// The attributes used during invocation of the extension.
static const GLuint kVertexPositionAttrib = 0;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index d729023a0b9..250c811b727 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -1092,8 +1092,7 @@ class GLES2DecoderImpl : public GLES2Decoder,
// Callback for async SwapBuffers.
void FinishAsyncSwapBuffers(uint64_t swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence>);
+ gfx::SwapCompletionResult result);
void FinishSwapBuffers(gfx::SwapResult result);
void DoCommitOverlayPlanes(uint64_t swap_id, GLbitfield flags);
@@ -1703,6 +1702,9 @@ class GLES2DecoderImpl : public GLES2Decoder,
// Wrapper for glDisable
void DoDisable(GLenum cap);
+ // Wrapper for glDisableiOES
+ void DoDisableiOES(GLenum target, GLuint index);
+
// Wrapper for glDisableVertexAttribArray.
void DoDisableVertexAttribArray(GLuint index);
@@ -1737,6 +1739,9 @@ class GLES2DecoderImpl : public GLES2Decoder,
// Wrapper for glEnable
void DoEnable(GLenum cap);
+ // Wrapper for glEnableiOES
+ void DoEnableiOES(GLenum target, GLuint index);
+
// Wrapper for glEnableVertexAttribArray.
void DoEnableVertexAttribArray(GLuint index);
@@ -1809,11 +1814,17 @@ class GLES2DecoderImpl : public GLES2Decoder,
// Wrapper for glGetIntegerv.
void DoGetIntegerv(GLenum pname, GLint* params, GLsizei params_size);
- // Helper for DoGetIntegeri_v and DoGetInteger64i_v.
+ // Helper for DoGetBooleani_v, DoGetIntegeri_v and DoGetInteger64i_v.
template <typename TYPE>
void GetIndexedIntegerImpl(
const char* function_name, GLenum target, GLuint index, TYPE* data);
+ // Wrapper for glGetBooleani_v.
+ void DoGetBooleani_v(GLenum target,
+ GLuint index,
+ GLboolean* params,
+ GLsizei params_size);
+
// Wrapper for glGetIntegeri_v.
void DoGetIntegeri_v(GLenum target,
GLuint index,
@@ -1925,6 +1936,8 @@ class GLES2DecoderImpl : public GLES2Decoder,
bool DoIsVertexArrayOES(GLuint client_id);
bool DoIsSync(GLuint client_id);
+ bool DoIsEnablediOES(GLenum target, GLuint index);
+
void DoLineWidth(GLfloat width);
// Wrapper for glLinkProgram
@@ -4116,8 +4129,7 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
InitializeGLDebugLogging(true, GLDebugMessageCallback, &logger_);
}
- if (feature_info_->feature_flags().chromium_texture_filtering_hint &&
- feature_info_->feature_flags().is_swiftshader) {
+ if (feature_info_->feature_flags().chromium_texture_filtering_hint) {
api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
}
@@ -7778,6 +7790,7 @@ void GLES2DecoderImpl::GetIndexedIntegerImpl(
state_.GetWindowRectangle(index, data);
return;
}
+
scoped_refptr<IndexedBufferBindingHost> bindings;
switch (target) {
case GL_TRANSFORM_FEEDBACK_BUFFER_BINDING:
@@ -7798,6 +7811,16 @@ void GLES2DecoderImpl::GetIndexedIntegerImpl(
}
bindings = state_.indexed_uniform_buffer_bindings.get();
break;
+ case GL_BLEND_SRC_RGB:
+ case GL_BLEND_SRC_ALPHA:
+ case GL_BLEND_DST_RGB:
+ case GL_BLEND_DST_ALPHA:
+ case GL_BLEND_EQUATION_RGB:
+ case GL_BLEND_EQUATION_ALPHA:
+ case GL_COLOR_WRITEMASK:
+ // Note (crbug.com/1058744): not implemented for validating command
+ // decoder
+ break;
default:
NOTREACHED();
break;
@@ -7819,12 +7842,29 @@ void GLES2DecoderImpl::GetIndexedIntegerImpl(
case GL_UNIFORM_BUFFER_START:
*data = static_cast<TYPE>(bindings->GetBufferStart(index));
break;
+ case GL_BLEND_SRC_RGB:
+ case GL_BLEND_SRC_ALPHA:
+ case GL_BLEND_DST_RGB:
+ case GL_BLEND_DST_ALPHA:
+ case GL_BLEND_EQUATION_RGB:
+ case GL_BLEND_EQUATION_ALPHA:
+ case GL_COLOR_WRITEMASK:
+ // Note (crbug.com/1058744): not implemented for validating command
+ // decoder
+ break;
default:
NOTREACHED();
break;
}
}
+void GLES2DecoderImpl::DoGetBooleani_v(GLenum target,
+ GLuint index,
+ GLboolean* params,
+ GLsizei params_size) {
+ GetIndexedIntegerImpl<GLboolean>("glGetBooleani_v", target, index, params);
+}
+
void GLES2DecoderImpl::DoGetIntegeri_v(GLenum target,
GLuint index,
GLint* params,
@@ -8358,6 +8398,10 @@ void GLES2DecoderImpl::DoDisable(GLenum cap) {
}
}
+void GLES2DecoderImpl::DoDisableiOES(GLenum target, GLuint index) {
+ api()->glDisableiOESFn(target, index);
+}
+
void GLES2DecoderImpl::DoEnable(GLenum cap) {
if (SetCapabilityState(cap, true)) {
if (cap == GL_PRIMITIVE_RESTART_FIXED_INDEX &&
@@ -8375,6 +8419,10 @@ void GLES2DecoderImpl::DoEnable(GLenum cap) {
}
}
+void GLES2DecoderImpl::DoEnableiOES(GLenum target, GLuint index) {
+ api()->glEnableiOESFn(target, index);
+}
+
void GLES2DecoderImpl::DoDepthRangef(GLclampf znear, GLclampf zfar) {
state_.z_near = base::ClampToRange(znear, 0.0f, 1.0f);
state_.z_far = base::ClampToRange(zfar, 0.0f, 1.0f);
@@ -10418,32 +10466,9 @@ void GLES2DecoderImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
GLint fake_location,
GLboolean transpose,
const volatile GLfloat* transform) {
- float gl_matrix[16];
-
// This refers to the bound external texture on the active unit.
TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
- if (TextureRef* texture_ref = unit.bound_texture_external_oes.get()) {
- if (GLStreamTextureImage* image =
- texture_ref->texture()->GetLevelStreamTextureImage(
- GL_TEXTURE_EXTERNAL_OES, 0)) {
- gfx::Transform st_transform(gfx::Transform::kSkipInitialization);
- gfx::Transform pre_transform(gfx::Transform::kSkipInitialization);
- image->GetTextureMatrix(gl_matrix);
- st_transform.matrix().setColMajorf(gl_matrix);
- // const_cast is safe, because setColMajorf only does a memcpy.
- // TODO(piman): can we remove this assumption without having to introduce
- // an extra copy?
- pre_transform.matrix().setColMajorf(
- const_cast<const GLfloat*>(transform));
- gfx::Transform(pre_transform, st_transform)
- .matrix()
- .asColMajorf(gl_matrix);
- } else {
- // Missing stream texture. Treat matrix as identity.
- memcpy(gl_matrix, const_cast<const GLfloat*>(transform),
- sizeof(gl_matrix));
- }
- } else {
+ if (!unit.bound_texture_external_oes.get()) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
"DoUniformMatrix4vStreamTextureMatrix",
"no texture bound");
@@ -10459,7 +10484,8 @@ void GLES2DecoderImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
return;
}
- api()->glUniformMatrix4fvFn(real_location, count, transpose, gl_matrix);
+ api()->glUniformMatrix4fvFn(real_location, count, transpose,
+ const_cast<const GLfloat*>(transform));
}
void GLES2DecoderImpl::DoUniformMatrix2x3fv(GLint fake_location,
@@ -12382,6 +12408,11 @@ bool GLES2DecoderImpl::DoIsEnabled(GLenum cap) {
return state_.GetEnabled(cap);
}
+bool GLES2DecoderImpl::DoIsEnablediOES(GLenum target, GLuint index) {
+ // Note (crbug.com/1058744): not implemented for validating command decoder
+ return false;
+}
+
bool GLES2DecoderImpl::DoIsBuffer(GLuint client_id) {
const Buffer* buffer = GetBuffer(client_id);
return buffer && buffer->IsValid() && !buffer->IsDeleted();
@@ -16997,14 +17028,13 @@ void GLES2DecoderImpl::DoSwapBuffers(uint64_t swap_id, GLbitfield flags) {
void GLES2DecoderImpl::FinishAsyncSwapBuffers(
uint64_t swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence) {
+ gfx::SwapCompletionResult result) {
TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", swap_id);
// Handling of the out-fence should have already happened before reaching
// this function, so we don't expect to get a valid fence here.
- DCHECK(!gpu_fence);
+ DCHECK(!result.gpu_fence);
- FinishSwapBuffers(result);
+ FinishSwapBuffers(result.swap_result);
}
void GLES2DecoderImpl::FinishSwapBuffers(gfx::SwapResult result) {
@@ -17438,7 +17468,7 @@ error::Error GLES2DecoderImpl::HandleDescheduleUntilFinishedCHROMIUM(
if (fence)
deschedule_until_finished_fences_.push_back(std::move(fence));
- if (deschedule_until_finished_fences_.size() == 1)
+ if (deschedule_until_finished_fences_.size() <= 1)
return error::kNoError;
DCHECK_EQ(2u, deschedule_until_finished_fences_.size());
@@ -18210,24 +18240,6 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
unpack_unmultiply_alpha == GL_TRUE, false /* dither */);
- // GL_TEXTURE_EXTERNAL_OES texture requires that we apply a transform matrix
- // before presenting.
- if (source_target == GL_TEXTURE_EXTERNAL_OES) {
- if (GLStreamTextureImage* texture_image =
- source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES,
- source_level)) {
- GLfloat transform_matrix[16];
- texture_image->GetTextureMatrix(transform_matrix);
- copy_texture_chromium_->DoCopyTextureWithTransform(
- this, source_target, source_texture->service_id(), source_level,
- source_internal_format, dest_target, dest_texture->service_id(),
- dest_level, internal_format, source_width, source_height,
- unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE, false /* dither */,
- transform_matrix, method, copy_tex_image_blit_.get());
- return;
- }
- }
copy_texture_chromium_->DoCopyTexture(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
@@ -18431,26 +18443,6 @@ void GLES2DecoderImpl::CopySubTextureHelper(const char* function_name,
DoBindOrCopyTexImageIfNeeded(source_texture, source_target, 0);
- // GL_TEXTURE_EXTERNAL_OES texture requires apply a transform matrix
- // before presenting.
- if (source_target == GL_TEXTURE_EXTERNAL_OES) {
- if (GLStreamTextureImage* texture_image =
- source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES,
- source_level)) {
- GLfloat transform_matrix[16];
- texture_image->GetTextureMatrix(transform_matrix);
- copy_texture_chromium_->DoCopySubTextureWithTransform(
- this, source_target, source_texture->service_id(), source_level,
- source_internal_format, dest_target, dest_texture->service_id(),
- dest_level, dest_internal_format, xoffset, yoffset, x, y, width,
- height, dest_width, dest_height, source_width, source_height,
- unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE, dither == GL_TRUE,
- transform_matrix, copy_tex_image_blit_.get());
- return;
- }
- }
-
CopyTextureMethod method = GetCopyTextureCHROMIUMMethod(
GetFeatureInfo(), source_target, source_level, source_internal_format,
source_type, dest_binding_target, dest_level, dest_internal_format,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
index 7491797e7fd..147e831f5cf 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -69,6 +69,7 @@ struct GPU_GLES2_EXPORT DisallowedFeatures {
oes_texture_half_float_linear = false;
ext_float_blend = false;
oes_fbo_render_mipmap = false;
+ oes_draw_buffers_indexed = false;
}
bool operator==(const DisallowedFeatures& other) const {
@@ -85,6 +86,7 @@ struct GPU_GLES2_EXPORT DisallowedFeatures {
bool oes_texture_half_float_linear = false;
bool ext_float_blend = false;
bool oes_fbo_render_mipmap = false;
+ bool oes_draw_buffers_indexed = false;
};
// This class implements the DecoderContext interface, decoding GLES2
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index f8be401bf99..615acf88e5a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -1314,6 +1314,43 @@ error::Error GLES2DecoderImpl::HandleGetBooleanv(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleGetBooleani_v(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2OrES3OrHigherContext())
+ return error::kUnknownCommand;
+ const volatile gles2::cmds::GetBooleani_v& c =
+ *static_cast<const volatile gles2::cmds::GetBooleani_v*>(cmd_data);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLuint index = static_cast<GLuint>(c.index);
+ typedef cmds::GetBooleani_v::Result Result;
+ GLsizei num_values = 0;
+ if (!GetNumValuesReturnedForGLGet(pname, &num_values)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(":GetBooleani_v", pname, "pname");
+ return error::kNoError;
+ }
+ uint32_t checked_size = 0;
+ if (!Result::ComputeSize(num_values).AssignIfValid(&checked_size)) {
+ return error::kOutOfBounds;
+ }
+ Result* result = GetSharedMemoryAs<Result*>(c.data_shm_id, c.data_shm_offset,
+ checked_size);
+ GLboolean* data = result ? result->GetData() : nullptr;
+ if (!validators_->indexed_g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetBooleani_v", pname, "pname");
+ return error::kNoError;
+ }
+ if (data == nullptr) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetBooleani_v(pname, index, data, num_values);
+ result->SetNumResults(num_values);
+ return error::kNoError;
+}
error::Error GLES2DecoderImpl::HandleGetBufferParameteri64v(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -5617,6 +5654,141 @@ error::Error GLES2DecoderImpl::HandleEndBatchReadAccessSharedImageCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleEnableiOES(uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::EnableiOES& c =
+ *static_cast<const volatile gles2::cmds::EnableiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ DoEnableiOES(target, index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDisableiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::DisableiOES& c =
+ *static_cast<const volatile gles2::cmds::DisableiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ DoDisableiOES(target, index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendEquationiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendEquationiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendEquationiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum mode = static_cast<GLenum>(c.mode);
+ api()->glBlendEquationiOESFn(buf, mode);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendEquationSeparateiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendEquationSeparateiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendEquationSeparateiOES*>(
+ cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum modeRGB = static_cast<GLenum>(c.modeRGB);
+ GLenum modeAlpha = static_cast<GLenum>(c.modeAlpha);
+ api()->glBlendEquationSeparateiOESFn(buf, modeRGB, modeAlpha);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendFunciOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendFunciOES& c =
+ *static_cast<const volatile gles2::cmds::BlendFunciOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum src = static_cast<GLenum>(c.src);
+ GLenum dst = static_cast<GLenum>(c.dst);
+ api()->glBlendFunciOESFn(buf, src, dst);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendFuncSeparateiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendFuncSeparateiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendFuncSeparateiOES*>(
+ cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum srcRGB = static_cast<GLenum>(c.srcRGB);
+ GLenum dstRGB = static_cast<GLenum>(c.dstRGB);
+ GLenum srcAlpha = static_cast<GLenum>(c.srcAlpha);
+ GLenum dstAlpha = static_cast<GLenum>(c.dstAlpha);
+ api()->glBlendFuncSeparateiOESFn(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleColorMaskiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ColorMaskiOES& c =
+ *static_cast<const volatile gles2::cmds::ColorMaskiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLboolean r = static_cast<GLboolean>(c.r);
+ GLboolean g = static_cast<GLboolean>(c.g);
+ GLboolean b = static_cast<GLboolean>(c.b);
+ GLboolean a = static_cast<GLboolean>(c.a);
+ api()->glColorMaskiOESFn(buf, r, g, b, a);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsEnablediOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::IsEnablediOES& c =
+ *static_cast<const volatile gles2::cmds::IsEnablediOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ typedef cmds::IsEnablediOES::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsEnablediOES(target, index);
+ return error::kNoError;
+}
+
bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
switch (cap) {
case GL_BLEND:
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index 6736ed33120..3fa8cacc086 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -24,6 +24,7 @@
#include "gpu/command_buffer/service/program_cache.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "ui/gl/gl_version_info.h"
+#include "ui/gl/gpu_switching_manager.h"
#include "ui/gl/progress_reporter.h"
#if defined(OS_WIN)
@@ -1092,8 +1093,7 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
bound_buffers_[GL_DISPATCH_INDIRECT_BUFFER] = 0;
}
- if (feature_info_->feature_flags().chromium_texture_filtering_hint &&
- feature_info_->feature_flags().is_swiftshader) {
+ if (feature_info_->feature_flags().chromium_texture_filtering_hint) {
api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
}
@@ -1210,6 +1210,11 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
api()->glDisableFn(GL_TEXTURE_RECTANGLE_ANGLE);
#endif
+ // Register this object as a GPU switching observer.
+ if (feature_info_->IsWebGLContext()) {
+ ui::GpuSwitchingManager::GetInstance()->AddObserver(this);
+ }
+
set_initialized();
return gpu::ContextResult::kSuccess;
}
@@ -1315,6 +1320,11 @@ void GLES2DecoderPassthroughImpl::Destroy(bool have_context) {
}
deschedule_until_finished_fences_.clear();
+ // Unregister this object as a GPU switching observer.
+ if (feature_info_->IsWebGLContext()) {
+ ui::GpuSwitchingManager::GetInstance()->RemoveObserver(this);
+ }
+
// Destroy the surface before the context, some surface destructors make GL
// calls.
surface_ = nullptr;
@@ -1872,6 +1882,12 @@ gpu::gles2::Logger* GLES2DecoderPassthroughImpl::GetLogger() {
return &logger_;
}
+void GLES2DecoderPassthroughImpl::OnGpuSwitched(
+ gl::GpuPreference active_gpu_heuristic) {
+ // Send OnGpuSwitched notification to renderer process via decoder client.
+ client()->OnGpuSwitched(active_gpu_heuristic);
+}
+
void GLES2DecoderPassthroughImpl::BeginDecoding() {
gpu_tracer_->BeginDecoding();
gpu_trace_commands_ = gpu_tracer_->IsTracing() && *gpu_decoder_category_;
@@ -2865,14 +2881,13 @@ bool GLES2DecoderPassthroughImpl::IsEmulatedFramebufferBound(
void GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult(
const char* function_name,
uint64_t swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence) {
+ gfx::SwapCompletionResult result) {
TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", swap_id);
// Handling of the out-fence should have already happened before reaching
// this function, so we don't expect to get a valid fence here.
- DCHECK(!gpu_fence);
+ DCHECK(!result.gpu_fence);
- CheckSwapBuffersResult(result, function_name);
+ CheckSwapBuffersResult(result.swap_result, function_name);
}
error::Error GLES2DecoderPassthroughImpl::CheckSwapBuffersResult(
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index ade661ee2af..7ff062e8b00 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -35,6 +35,7 @@
#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_image.h"
#include "ui/gl/gl_surface.h"
+#include "ui/gl/gpu_switching_observer.h"
namespace gl {
class GLFence;
@@ -139,7 +140,9 @@ struct PassthroughResources {
std::unordered_map<GLuint, MappedBuffer> mapped_buffer_map;
};
-class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
+class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl
+ : public GLES2Decoder,
+ public ui::GpuSwitchingObserver {
public:
GLES2DecoderPassthroughImpl(DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
@@ -347,6 +350,9 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
// directly, and needing to know if they failed due to loss.
bool CheckResetStatus() override;
+ // Implement GpuSwitchingObserver.
+ void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override;
+
Logger* GetLogger() override;
void BeginDecoding() override;
@@ -465,8 +471,7 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
void CheckSwapBuffersAsyncResult(const char* function_name,
uint64_t swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence);
+ gfx::SwapCompletionResult result);
error::Error CheckSwapBuffersResult(gfx::SwapResult result,
const char* function_name);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index be04a014d68..069eb85a96b 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -257,6 +257,11 @@ error::Error DoGetBooleanv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLboolean* params);
+error::Error DoGetBooleani_v(GLenum pname,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLboolean* data);
error::Error DoGetBufferParameteri64v(GLenum target,
GLenum pname,
GLsizei bufsize,
@@ -1143,4 +1148,22 @@ error::Error DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id,
error::Error DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id);
error::Error DoBeginBatchReadAccessSharedImageCHROMIUM(void);
error::Error DoEndBatchReadAccessSharedImageCHROMIUM(void);
+error::Error DoEnableiOES(GLenum target, GLuint index);
+error::Error DoDisableiOES(GLenum target, GLuint index);
+error::Error DoBlendEquationiOES(GLuint buf, GLenum mode);
+error::Error DoBlendEquationSeparateiOES(GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha);
+error::Error DoBlendFunciOES(GLuint buf, GLenum sfactor, GLenum dfactor);
+error::Error DoBlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha);
+error::Error DoColorMaskiOES(GLuint buf,
+ GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha);
+error::Error DoIsEnablediOES(GLenum target, GLuint index, uint32_t* result);
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_PASSTHROUGH_DOER_PROTOTYPES_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index ecad6a45c14..19086f610a0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -615,6 +615,12 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendEquation(GLenum mode) {
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoBlendEquationiOES(GLuint buf,
+ GLenum mode) {
+ api()->glBlendEquationiOESFn(buf, mode);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparate(
GLenum modeRGB,
GLenum modeAlpha) {
@@ -622,12 +628,27 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparate(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparateiOES(
+ GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha) {
+ api()->glBlendEquationSeparateiOESFn(buf, modeRGB, modeAlpha);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoBlendFunc(GLenum sfactor,
GLenum dfactor) {
api()->glBlendFuncFn(sfactor, dfactor);
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoBlendFunciOES(GLuint buf,
+ GLenum sfactor,
+ GLenum dfactor) {
+ api()->glBlendFunciOESFn(buf, sfactor, dfactor);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparate(GLenum srcRGB,
GLenum dstRGB,
GLenum srcAlpha,
@@ -636,6 +657,16 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparate(GLenum srcRGB,
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparateiOES(
+ GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ api()->glBlendFuncSeparateiOESFn(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoBufferData(GLenum target,
GLsizeiptr size,
const void* data,
@@ -744,6 +775,15 @@ error::Error GLES2DecoderPassthroughImpl::DoColorMask(GLboolean red,
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoColorMaskiOES(GLuint buf,
+ GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ api()->glColorMaskiOESFn(buf, red, green, blue, alpha);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoCompileShader(GLuint shader) {
api()->glCompileShaderFn(GetShaderServiceID(shader, resources_));
return error::kNoError;
@@ -1591,6 +1631,15 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBooleanv(GLenum pname,
});
}
+error::Error GLES2DecoderPassthroughImpl::DoGetBooleani_v(GLenum pname,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLboolean* data) {
+ glGetBooleani_vRobustANGLE(pname, index, bufsize, length, data);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteri64v(
GLenum target,
GLenum pname,
@@ -2238,6 +2287,13 @@ error::Error GLES2DecoderPassthroughImpl::DoIsEnabled(GLenum cap,
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoIsEnablediOES(GLenum target,
+ GLuint index,
+ uint32_t* result) {
+ *result = api()->glIsEnablediOESFn(target, index);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoIsFramebuffer(GLuint framebuffer,
uint32_t* result) {
*result = api()->glIsFramebufferEXTFn(
@@ -5062,26 +5118,8 @@ GLES2DecoderPassthroughImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
return error::kNoError;
}
- float gl_matrix[16] = {};
-
- GLStreamTextureImage* image =
- bound_texture->GetStreamLevelImage(kTextureTarget, 0);
- if (image) {
- gfx::Transform st_transform(gfx::Transform::kSkipInitialization);
- gfx::Transform pre_transform(gfx::Transform::kSkipInitialization);
- image->GetTextureMatrix(gl_matrix);
- st_transform.matrix().setColMajorf(gl_matrix);
- // const_cast is safe, because setColMajorf only does a memcpy.
- // TODO(piman): can we remove this assumption without having to introduce
- // an extra copy?
- pre_transform.matrix().setColMajorf(const_cast<const GLfloat*>(transform));
- gfx::Transform(pre_transform, st_transform).matrix().asColMajorf(gl_matrix);
- } else {
- // Missing stream texture. Treat matrix as identity.
- memcpy(gl_matrix, const_cast<const GLfloat*>(transform), sizeof(gl_matrix));
- }
-
- api()->glUniformMatrix4fvFn(location, 1, transpose, gl_matrix);
+ api()->glUniformMatrix4fvFn(location, 1, transpose,
+ const_cast<const GLfloat*>(transform));
return error::kNoError;
}
@@ -5406,7 +5444,8 @@ error::Error
GLES2DecoderPassthroughImpl::DoBeginSharedImageAccessDirectCHROMIUM(
GLuint client_id,
GLenum mode) {
- if (mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM &&
+ if (mode != GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM &&
+ mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM &&
mode != GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
InsertError(GL_INVALID_ENUM, "unrecognized access mode");
return error::kNoError;
@@ -5460,5 +5499,17 @@ GLES2DecoderPassthroughImpl::DoEndBatchReadAccessSharedImageCHROMIUM() {
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoEnableiOES(GLenum target,
+ GLuint index) {
+ api()->glEnableiOESFn(target, index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoDisableiOES(GLenum target,
+ GLuint index) {
+ api()->glDisableiOESFn(target, index);
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index 7e7ca53b9c9..cddfeff0b97 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -1130,6 +1130,37 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetBooleanv(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleGetBooleani_v(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2OrES3OrHigherContext())
+ return error::kUnknownCommand;
+ const volatile gles2::cmds::GetBooleani_v& c =
+ *static_cast<const volatile gles2::cmds::GetBooleani_v*>(cmd_data);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLuint index = static_cast<GLuint>(c.index);
+ unsigned int buffer_size = 0;
+ typedef cmds::GetBooleani_v::Result Result;
+ Result* result = GetSharedMemoryAndSizeAs<Result*>(
+ c.data_shm_id, c.data_shm_offset, sizeof(Result), &buffer_size);
+ GLboolean* data = result ? result->GetData() : nullptr;
+ if (data == nullptr) {
+ return error::kOutOfBounds;
+ }
+ GLsizei bufsize = Result::ComputeMaxResults(buffer_size);
+ GLsizei written_values = 0;
+ GLsizei* length = &written_values;
+ error::Error error = DoGetBooleani_v(pname, index, bufsize, length, data);
+ if (error != error::kNoError) {
+ return error;
+ }
+ if (written_values > bufsize) {
+ return error::kOutOfBounds;
+ }
+ result->SetNumResults(written_values);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleGetBufferParameteri64v(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -4888,5 +4919,166 @@ GLES2DecoderPassthroughImpl::HandleEndBatchReadAccessSharedImageCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleEnableiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::EnableiOES& c =
+ *static_cast<const volatile gles2::cmds::EnableiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ error::Error error = DoEnableiOES(target, index);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleDisableiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::DisableiOES& c =
+ *static_cast<const volatile gles2::cmds::DisableiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ error::Error error = DoDisableiOES(target, index);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleBlendEquationiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendEquationiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendEquationiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum mode = static_cast<GLenum>(c.mode);
+ error::Error error = DoBlendEquationiOES(buf, mode);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleBlendEquationSeparateiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendEquationSeparateiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendEquationSeparateiOES*>(
+ cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum modeRGB = static_cast<GLenum>(c.modeRGB);
+ GLenum modeAlpha = static_cast<GLenum>(c.modeAlpha);
+ error::Error error = DoBlendEquationSeparateiOES(buf, modeRGB, modeAlpha);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleBlendFunciOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendFunciOES& c =
+ *static_cast<const volatile gles2::cmds::BlendFunciOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum src = static_cast<GLenum>(c.src);
+ GLenum dst = static_cast<GLenum>(c.dst);
+ error::Error error = DoBlendFunciOES(buf, src, dst);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleBlendFuncSeparateiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendFuncSeparateiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendFuncSeparateiOES*>(
+ cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum srcRGB = static_cast<GLenum>(c.srcRGB);
+ GLenum dstRGB = static_cast<GLenum>(c.dstRGB);
+ GLenum srcAlpha = static_cast<GLenum>(c.srcAlpha);
+ GLenum dstAlpha = static_cast<GLenum>(c.dstAlpha);
+ error::Error error =
+ DoBlendFuncSeparateiOES(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleColorMaskiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ColorMaskiOES& c =
+ *static_cast<const volatile gles2::cmds::ColorMaskiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLboolean r = static_cast<GLboolean>(c.r);
+ GLboolean g = static_cast<GLboolean>(c.g);
+ GLboolean b = static_cast<GLboolean>(c.b);
+ GLboolean a = static_cast<GLboolean>(c.a);
+ error::Error error = DoColorMaskiOES(buf, r, g, b, a);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleIsEnablediOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::IsEnablediOES& c =
+ *static_cast<const volatile gles2::cmds::IsEnablediOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ typedef cmds::IsEnablediOES::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ error::Error error = DoIsEnablediOES(target, index, result);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
index bd8874bf3b2..97996f33dd9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
@@ -176,21 +176,6 @@ void GLES2DecoderTestBase::SpecializedSetup<
}
template <>
-void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramiv, 0>(
- bool valid) {
- if (valid) {
- // GetProgramiv calls ClearGLError then GetError to make sure
- // it actually got a value so it can report correctly to the client.
- EXPECT_CALL(*gl_, GetError())
- .WillOnce(Return(GL_NO_ERROR))
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, GetError())
- .WillOnce(Return(GL_NO_ERROR))
- .RetiresOnSaturation();
- }
-}
-
-template <>
void GLES2DecoderTestBase::
SpecializedSetup<cmds::GenTransformFeedbacksImmediate, 0>(bool valid) {
if (valid) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
index 3d5553178ca..3bc69db4302 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
@@ -1233,57 +1233,4 @@ TEST_P(GLES2DecoderTest1, GetIntegervInvalidArgs1_1) {
EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
EXPECT_EQ(0u, result->size);
}
-
-TEST_P(GLES2DecoderTest1, GetProgramivValidArgs) {
- SpecializedSetup<cmds::GetProgramiv, 0>(true);
- typedef cmds::GetProgramiv::Result Result;
- Result* result = static_cast<Result*>(shared_memory_address_);
- result->size = 0;
- cmds::GetProgramiv cmd;
- cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_,
- shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DELETE_STATUS),
- result->GetNumResults());
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs1_0) {
- EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
- SpecializedSetup<cmds::GetProgramiv, 0>(false);
- cmds::GetProgramiv::Result* result =
- static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
- result->size = 0;
- cmds::GetProgramiv cmd;
- cmd.Init(client_program_id_, GL_PROGRAM_BINARY_RETRIEVABLE_HINT,
- shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(0u, result->size);
- EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
-}
-
-TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_0) {
- EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
- SpecializedSetup<cmds::GetProgramiv, 0>(false);
- cmds::GetProgramiv::Result* result =
- static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
- result->size = 0;
- cmds::GetProgramiv cmd;
- cmd.Init(client_program_id_, GL_DELETE_STATUS, kInvalidSharedMemoryId, 0);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(0u, result->size);
-}
-
-TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_1) {
- EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
- SpecializedSetup<cmds::GetProgramiv, 0>(false);
- cmds::GetProgramiv::Result* result =
- static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
- result->size = 0;
- cmds::GetProgramiv cmd;
- cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_,
- kInvalidSharedMemoryOffset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(0u, result->size);
-}
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_1_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
index 8288b6d3b18..75b71b0ed28 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
@@ -691,6 +691,20 @@ void GLES2DecoderTestBase::SpecializedSetup<
}
template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramiv, 0>(bool valid) {
+ if (valid) {
+ // GetProgramiv calls ClearGLError then GetError to make sure
+ // it actually got a value so it can report correctly to the client.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+}
+
+template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::RenderbufferStorage, 0>(
bool valid) {
DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
@@ -851,12 +865,6 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3ivImmediate, 0>(
}
template <>
-void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>(
- bool /* valid */) {
- SetupShaderForUniform(GL_FLOAT_VEC4);
-}
-
-template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT2);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
index 47fd77225a4..fa45593b8f5 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
@@ -12,6 +12,59 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
+TEST_P(GLES2DecoderTest2, GetProgramivValidArgs) {
+ SpecializedSetup<cmds::GetProgramiv, 0>(true);
+ typedef cmds::GetProgramiv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DELETE_STATUS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, GetProgramivInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_PROGRAM_BINARY_RETRIEVABLE_HINT,
+ shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, GetProgramivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_DELETE_STATUS, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest2, GetProgramivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
TEST_P(GLES2DecoderTest2, GetProgramInfoLogValidArgs) {
const char* kInfo = "hello";
const uint32_t kBucketId = 123;
@@ -1298,13 +1351,4 @@ TEST_P(GLES2DecoderTest2, Uniform3ivImmediateValidArgs) {
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
-
-TEST_P(GLES2DecoderTest2, Uniform4fValidArgs) {
- EXPECT_CALL(*gl_, Uniform4fv(1, 1, _));
- SpecializedSetup<cmds::Uniform4f, 0>(true);
- cmds::Uniform4f cmd;
- cmd.Init(1, 2, 3, 4, 5);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
index 580131f038c..b37cb1943d8 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
@@ -51,6 +51,12 @@ INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderTest3, ::testing::Bool());
INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest3, ::testing::Bool());
template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC4);
+}
+
+template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_VEC4);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
index 00161c02032..10ec529b465 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
@@ -12,6 +12,15 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
+TEST_P(GLES2DecoderTest3, Uniform4fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform4fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform4f, 0>(true);
+ cmds::Uniform4f cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
TEST_P(GLES2DecoderTest3, Uniform4fvImmediateValidArgs) {
cmds::Uniform4fvImmediate& cmd = *GetImmediateAs<cmds::Uniform4fvImmediate>();
SpecializedSetup<cmds::Uniform4fvImmediate, 0>(true);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
index 5284cc802b9..38d67d62bb3 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
@@ -437,6 +437,13 @@ static const GLenum valid_g_l_state_table_es3[] = {
GL_UNPACK_SKIP_IMAGES,
GL_UNPACK_SKIP_PIXELS,
GL_UNPACK_SKIP_ROWS,
+ GL_BLEND_EQUATION_RGB,
+ GL_BLEND_EQUATION_ALPHA,
+ GL_BLEND_SRC_RGB,
+ GL_BLEND_SRC_ALPHA,
+ GL_BLEND_DST_RGB,
+ GL_BLEND_DST_ALPHA,
+ GL_COLOR_WRITEMASK,
};
bool Validators::GetMaxIndexTypeValidator::IsValid(const GLenum value) const {
@@ -512,6 +519,13 @@ static const GLenum valid_indexed_g_l_state_table[] = {
GL_UNIFORM_BUFFER_BINDING,
GL_UNIFORM_BUFFER_SIZE,
GL_UNIFORM_BUFFER_START,
+ GL_BLEND_EQUATION_RGB,
+ GL_BLEND_EQUATION_ALPHA,
+ GL_BLEND_SRC_RGB,
+ GL_BLEND_SRC_ALPHA,
+ GL_BLEND_DST_RGB,
+ GL_BLEND_DST_ALPHA,
+ GL_COLOR_WRITEMASK,
};
bool Validators::InternalFormatParameterValidator::IsValid(
@@ -773,6 +787,7 @@ bool Validators::ShaderTypeValidator::IsValid(const GLenum value) const {
bool Validators::SharedImageAccessModeValidator::IsValid(
const GLenum value) const {
switch (value) {
+ case GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM:
case GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM:
case GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM:
return true;
diff --git a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
index fa686c24432..97b6d43e986 100644
--- a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
+++ b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
@@ -46,9 +46,11 @@ GpuCommandBufferMemoryTracker::GpuCommandBufferMemoryTracker(
: command_buffer_id_(command_buffer_id),
client_tracing_id_(client_tracing_id),
context_type_(context_type),
- memory_pressure_listener_(base::BindRepeating(
- &GpuCommandBufferMemoryTracker::LogMemoryStatsPressure,
- base::Unretained(this))),
+ memory_pressure_listener_(
+ FROM_HERE,
+ base::BindRepeating(
+ &GpuCommandBufferMemoryTracker::LogMemoryStatsPressure,
+ base::Unretained(this))),
observer_(observer) {
// Set up |memory_stats_timer_| to call LogMemoryPeriodic periodically
// via the provided |task_runner|.
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
index c743800f0a5..c1ebdde777a 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
@@ -45,22 +45,38 @@ bool IsSurfaceControl(TextureOwner::Mode mode) {
}
} // namespace
+// This class is safe to be created/destroyed on different threads. This is made
+// sure by destruction happening on correct thread. This class is not thread
+// safe to be used concurrently on multiple thraeads.
class ImageReaderGLOwner::ScopedHardwareBufferImpl
: public base::android::ScopedHardwareBufferFenceSync {
public:
- ScopedHardwareBufferImpl(scoped_refptr<ImageReaderGLOwner> texture_owner,
+ ScopedHardwareBufferImpl(base::WeakPtr<ImageReaderGLOwner> texture_owner,
AImage* image,
base::android::ScopedHardwareBufferHandle handle,
base::ScopedFD fence_fd)
: base::android::ScopedHardwareBufferFenceSync(std::move(handle),
- std::move(fence_fd)),
+ std::move(fence_fd),
+ base::ScopedFD(),
+ true /* is_video */),
texture_owner_(std::move(texture_owner)),
- image_(image) {
+ image_(image),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()) {
DCHECK(image_);
texture_owner_->RegisterRefOnImage(image_);
}
+
~ScopedHardwareBufferImpl() override {
- texture_owner_->ReleaseRefOnImage(image_, std::move(read_fence_));
+ if (task_runner_->RunsTasksInCurrentSequence()) {
+ if (texture_owner_) {
+ texture_owner_->ReleaseRefOnImage(image_, std::move(read_fence_));
+ }
+ } else {
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&gpu::ImageReaderGLOwner::ReleaseRefOnImage,
+ texture_owner_, image_, std::move(read_fence_)));
+ }
}
void SetReadFence(base::ScopedFD fence_fd, bool has_context) final {
@@ -72,8 +88,9 @@ class ImageReaderGLOwner::ScopedHardwareBufferImpl
private:
base::ScopedFD read_fence_;
- scoped_refptr<ImageReaderGLOwner> texture_owner_;
+ base::WeakPtr<ImageReaderGLOwner> texture_owner_;
AImage* image_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
};
ImageReaderGLOwner::ImageReaderGLOwner(
@@ -305,7 +322,7 @@ ImageReaderGLOwner::GetAHardwareBuffer() {
return nullptr;
return std::make_unique<ScopedHardwareBufferImpl>(
- this, current_image_ref_->image(),
+ weak_factory_.GetWeakPtr(), current_image_ref_->image(),
base::android::ScopedHardwareBufferHandle::Create(buffer),
current_image_ref_->GetReadyFence());
}
@@ -367,96 +384,6 @@ void ImageReaderGLOwner::ReleaseRefOnImage(AImage* image,
image_refs_.erase(it);
}
-void ImageReaderGLOwner::GetTransformMatrix(float mtx[]) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-
- // Assign a Y inverted Identity matrix. Both MCVD and AVDA path performs a Y
- // inversion of this matrix later. Hence if we assign a Y inverted matrix
- // here, it simply becomes an identity matrix later and will have no effect
- // on the image data.
- static constexpr float kYInvertedIdentity[16]{1, 0, 0, 0, 0, -1, 0, 0,
- 0, 0, 1, 0, 0, 1, 0, 1};
- memcpy(mtx, kYInvertedIdentity, sizeof(kYInvertedIdentity));
-
-
- // Get the crop rectangle associated with this image. The crop rectangle
- // specifies the region of valid pixels in the image.
- gfx::Rect crop_rect = GetCropRect();
- if (crop_rect.IsEmpty())
- return;
-
- // Get the AHardwareBuffer to query its dimensions.
- AHardwareBuffer* buffer = nullptr;
- loader_.AImage_getHardwareBuffer(current_image_ref_->image(), &buffer);
- if (!buffer) {
- DLOG(ERROR) << "Unable to get an AHardwareBuffer from the image";
- return;
- }
-
- // Get the buffer descriptor. Note that for querying the buffer descriptor, we
- // do not need to wait on the AHB to be ready.
- AHardwareBuffer_Desc desc;
- base::AndroidHardwareBufferCompat::GetInstance().Describe(buffer, &desc);
-
- // Note: Below calculation of shrink_amount and the transform matrix params
- // tx,ty,sx,sy is copied from the android
- // SurfaceTexture::computeCurrentTransformMatrix() -
- // https://android.googlesource.com/platform/frameworks/native/+/5c1139f/libs/gui/SurfaceTexture.cpp#516.
- // We are assuming here that bilinear filtering is always enabled for
- // sampling the texture.
- float shrink_amount = 0.0f;
- float tx = 0.0f, ty = 0.0f, sx = 1.0f, sy = 1.0f;
-
- // In order to prevent bilinear sampling beyond the edge of the
- // crop rectangle we may need to shrink it by 2 texels in each
- // dimension. Normally this would just need to take 1/2 a texel
- // off each end, but because the chroma channels of YUV420 images
- // are subsampled we may need to shrink the crop region by a whole
- // texel on each side.
- switch (desc.format) {
- case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
- case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
- case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
- case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
- // We know there's no subsampling of any channels, so we
- // only need to shrink by a half a pixel.
- shrink_amount = 0.5;
- break;
- default:
- // If we don't recognize the format, we must assume the
- // worst case (that we care about), which is YUV420.
- shrink_amount = 1.0;
- }
-
- int32_t crop_rect_width = crop_rect.width();
- int32_t crop_rect_height = crop_rect.height();
- int32_t crop_rect_left = crop_rect.x();
- int32_t crop_rect_bottom = crop_rect.y() + crop_rect_height;
- int32_t buffer_width = desc.width;
- int32_t buffer_height = desc.height;
- DCHECK_GT(buffer_width, 0);
- DCHECK_GT(buffer_height, 0);
-
- // Only shrink the dimensions that are not the size of the buffer.
- if (crop_rect_width < buffer_width) {
- tx = (float(crop_rect_left) + shrink_amount) / buffer_width;
- sx = (float(crop_rect_width) - (2.0f * shrink_amount)) / buffer_width;
- }
-
- if (crop_rect_height < buffer_height) {
- ty = (float(buffer_height - crop_rect_bottom) + shrink_amount) /
- buffer_height;
- sy = (float(crop_rect_height) - (2.0f * shrink_amount)) / buffer_height;
- }
-
- // Update the transform matrix with above parameters by also taking into
- // account Y inversion/ vertical flip.
- mtx[0] = sx;
- mtx[5] = 0 - sy;
- mtx[12] = tx;
- mtx[13] = 1 - ty;
-}
-
void ImageReaderGLOwner::ReleaseBackBuffers() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
// ReleaseBackBuffers() call is not required with image reader.
@@ -482,7 +409,7 @@ void ImageReaderGLOwner::OnFrameAvailable(void* context, AImageReader* reader) {
image_reader_ptr->frame_available_cb_.Run();
}
-void ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
+bool ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) {
@@ -499,7 +426,7 @@ void ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
if (!buffer) {
*coded_size = gfx::Size();
*visible_rect = gfx::Rect();
- return;
+ return false;
}
// Get the buffer descriptor. Note that for querying the buffer descriptor, we
// do not need to wait on the AHB to be ready.
@@ -508,6 +435,8 @@ void ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
*visible_rect = GetCropRect();
*coded_size = gfx::Size(desc.width, desc.height);
+
+ return true;
}
ImageReaderGLOwner::ImageRef::ImageRef() = default;
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.h b/chromium/gpu/command_buffer/service/image_reader_gl_owner.h
index 0d9f93f0475..b6c2d2c0d3b 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.h
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.h
@@ -9,6 +9,7 @@
#include "base/android/android_image_reader_compat.h"
#include "base/containers/flat_map.h"
+#include "base/memory/weak_ptr.h"
#include "gpu/command_buffer/service/texture_owner.h"
#include "gpu/gpu_gles2_export.h"
#include "ui/gl/gl_fence_egl.h"
@@ -37,12 +38,10 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
gl::ScopedJavaSurface CreateJavaSurface() const override;
void UpdateTexImage() override;
void EnsureTexImageBound() override;
- void GetTransformMatrix(float mtx[16]) override;
void ReleaseBackBuffers() override;
std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
GetAHardwareBuffer() override;
- gfx::Rect GetCropRect() override;
- void GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
+ bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) override;
@@ -89,6 +88,8 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
void RegisterRefOnImage(AImage* image);
void ReleaseRefOnImage(AImage* image, base::ScopedFD fence_fd);
+ gfx::Rect GetCropRect();
+
static void OnFrameAvailable(void* context, AImageReader* reader);
// AImageReader instance
@@ -132,6 +133,8 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
THREAD_CHECKER(thread_checker_);
+ base::WeakPtrFactory<ImageReaderGLOwner> weak_factory_{this};
+
DISALLOW_COPY_AND_ASSIGN(ImageReaderGLOwner);
};
diff --git a/chromium/gpu/command_buffer/service/memory_tracking.h b/chromium/gpu/command_buffer/service/memory_tracking.h
index ea211deddf6..d55a130e2ec 100644
--- a/chromium/gpu/command_buffer/service/memory_tracking.h
+++ b/chromium/gpu/command_buffer/service/memory_tracking.h
@@ -9,7 +9,7 @@
#include <stdint.h>
#include <string>
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/trace_event/trace_event.h"
diff --git a/chromium/gpu/command_buffer/service/mock_texture_owner.h b/chromium/gpu/command_buffer/service/mock_texture_owner.h
index e98cdc4a729..2cf23affb93 100644
--- a/chromium/gpu/command_buffer/service/mock_texture_owner.h
+++ b/chromium/gpu/command_buffer/service/mock_texture_owner.h
@@ -33,12 +33,11 @@ class MockTextureOwner : public TextureOwner {
MOCK_CONST_METHOD0(CreateJavaSurface, gl::ScopedJavaSurface());
MOCK_METHOD0(UpdateTexImage, void());
MOCK_METHOD0(EnsureTexImageBound, void());
- MOCK_METHOD1(GetTransformMatrix, void(float mtx[16]));
MOCK_METHOD0(ReleaseBackBuffers, void());
MOCK_METHOD1(OnTextureDestroyed, void(gpu::gles2::AbstractTexture*));
MOCK_METHOD1(SetFrameAvailableCallback, void(const base::RepeatingClosure&));
MOCK_METHOD3(GetCodedSizeAndVisibleRect,
- void(gfx::Size rotated_visible_size,
+ bool(gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect));
@@ -48,15 +47,9 @@ class MockTextureOwner : public TextureOwner {
return nullptr;
}
- gfx::Rect GetCropRect() override {
- ++get_crop_rect_count;
- return gfx::Rect();
- }
-
gl::GLContext* fake_context;
gl::GLSurface* fake_surface;
int get_a_hardware_buffer_count = 0;
- int get_crop_rect_count = 0;
bool expect_update_tex_image;
protected:
diff --git a/chromium/gpu/command_buffer/service/mocks.h b/chromium/gpu/command_buffer/service/mocks.h
index d01e9b6c4bd..ac9a5efb25c 100644
--- a/chromium/gpu/command_buffer/service/mocks.h
+++ b/chromium/gpu/command_buffer/service/mocks.h
@@ -16,7 +16,6 @@
#include <string>
#include <vector>
-#include "base/logging.h"
#include "gpu/command_buffer/common/cmd_buffer_common.h"
#include "gpu/command_buffer/service/async_api_interface.h"
#include "gpu/command_buffer/service/memory_tracking.h"
diff --git a/chromium/gpu/command_buffer/service/program_manager.h b/chromium/gpu/command_buffer/service/program_manager.h
index 800de937f5d..f99a099b0c8 100644
--- a/chromium/gpu/command_buffer/service/program_manager.h
+++ b/chromium/gpu/command_buffer/service/program_manager.h
@@ -13,7 +13,7 @@
#include <string>
#include <vector>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/common_decoder.h"
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index 12302131817..aec7821dc48 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -208,7 +208,8 @@ bool AllowedBetweenBeginEndRaster(CommandId command) {
// avoid it as much as possible.
class RasterDecoderImpl final : public RasterDecoder,
public gles2::ErrorStateClient,
- public ServiceFontManager::Client {
+ public ServiceFontManager::Client,
+ public SharedContextState::ContextLostObserver {
public:
RasterDecoderImpl(DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
@@ -365,6 +366,9 @@ class RasterDecoderImpl final : public RasterDecoder,
scoped_refptr<Buffer> GetShmBuffer(uint32_t shm_id) override;
void ReportProgress() override;
+ // SharedContextState::ContextLostObserver implementation.
+ void OnContextLost() override;
+
private:
gles2::ContextState* state() const {
if (use_passthrough_) {
@@ -401,7 +405,7 @@ class RasterDecoderImpl final : public RasterDecoder,
if (!flush_workaround_disabled_for_test_) {
TRACE_EVENT0("gpu", "RasterDecoderImpl::FlushToWorkAroundMacCrashes");
if (gr_context())
- gr_context()->flush();
+ gr_context()->flushAndSubmit();
api()->glFlushFn();
// Flushes can be expensive, yield to allow interruption after each flush.
@@ -583,8 +587,6 @@ class RasterDecoderImpl final : public RasterDecoder,
bool use_passthrough_ = false;
bool use_ddl_ = false;
- bool reset_by_robustness_extension_ = false;
-
// The current decoder error communicates the decoder error through command
// processing functions that do not return the error value. Should be set
// only if not returning an error.
@@ -756,9 +758,12 @@ RasterDecoderImpl::RasterDecoderImpl(
font_manager_(base::MakeRefCounted<ServiceFontManager>(this)),
is_privileged_(is_privileged) {
DCHECK(shared_context_state_);
+ shared_context_state_->AddContextLostObserver(this);
}
-RasterDecoderImpl::~RasterDecoderImpl() = default;
+RasterDecoderImpl::~RasterDecoderImpl() {
+ shared_context_state_->RemoveContextLostObserver(this);
+}
base::WeakPtr<DecoderContext> RasterDecoderImpl::AsWeakPtr() {
return weak_ptr_factory_.GetWeakPtr();
@@ -854,16 +859,12 @@ void RasterDecoderImpl::Destroy(bool have_context) {
DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
end_semaphores_.clear();
sk_surface_ = nullptr;
- if (shared_image_) {
- scoped_shared_image_write_.reset();
- shared_image_.reset();
- } else {
- sk_surface_for_testing_.reset();
- }
- }
- if (gr_context()) {
- gr_context()->flush();
}
+ if (gr_context())
+ gr_context()->flushAndSubmit();
+ scoped_shared_image_write_.reset();
+ shared_image_.reset();
+ sk_surface_for_testing_.reset();
}
copy_tex_image_blit_.reset();
@@ -891,18 +892,11 @@ bool RasterDecoderImpl::MakeCurrent() {
if (shared_context_state_->context_lost() ||
!shared_context_state_->MakeCurrent(nullptr)) {
LOG(ERROR) << " RasterDecoderImpl: Context lost during MakeCurrent.";
- MarkContextLost(error::kMakeCurrentFailed);
return false;
}
DCHECK_EQ(api(), gl::g_current_gl_context);
- if (CheckResetStatus()) {
- LOG(ERROR)
- << " RasterDecoderImpl: Context reset detected after MakeCurrent.";
- return false;
- }
-
// Rebind textures if the service ids may have changed.
RestoreAllExternalTextureBindingsIfNeeded();
@@ -948,6 +942,10 @@ Capabilities RasterDecoderImpl::GetCapabilities() {
#else
NOTREACHED();
#endif
+ } else if (shared_context_state_->GrContextIsDawn()) {
+ // TODO(crbug.com/1090476): Query Dawn for this value once an API exists for
+ // capabilities.
+ caps.max_texture_size = 8192;
} else {
NOTIMPLEMENTED();
}
@@ -1113,55 +1111,27 @@ void RasterDecoderImpl::SetLevelInfo(uint32_t client_id,
}
bool RasterDecoderImpl::WasContextLost() const {
- return context_lost_;
+ return shared_context_state_->context_lost();
}
bool RasterDecoderImpl::WasContextLostByRobustnessExtension() const {
- return WasContextLost() && reset_by_robustness_extension_;
+ return shared_context_state_->device_needs_reset();
}
void RasterDecoderImpl::MarkContextLost(error::ContextLostReason reason) {
- // Only lose the context once.
- if (WasContextLost())
- return;
+ shared_context_state_->MarkContextLost(reason);
+}
- // Don't make GL calls in here, the context might not be current.
- context_lost_ = true;
- command_buffer_service()->SetContextLostReason(reason);
+void RasterDecoderImpl::OnContextLost() {
+ DCHECK(shared_context_state_->context_lost());
+ command_buffer_service()->SetContextLostReason(
+ *shared_context_state_->context_lost_reason());
current_decoder_error_ = error::kLostContext;
}
bool RasterDecoderImpl::CheckResetStatus() {
DCHECK(!WasContextLost());
- DCHECK(shared_context_state_->context()->IsCurrent(nullptr));
-
- // If the reason for the call was a GL error, we can try to determine the
- // reset status more accurately.
- GLenum driver_status =
- shared_context_state_->context()->CheckStickyGraphicsResetStatus();
- if (driver_status == GL_NO_ERROR)
- return false;
-
- LOG(ERROR) << "RasterDecoder context lost via ARB/EXT_robustness. Reset "
- "status = "
- << gles2::GLES2Util::GetStringEnum(driver_status);
-
- switch (driver_status) {
- case GL_GUILTY_CONTEXT_RESET_ARB:
- MarkContextLost(error::kGuilty);
- break;
- case GL_INNOCENT_CONTEXT_RESET_ARB:
- MarkContextLost(error::kInnocent);
- break;
- case GL_UNKNOWN_CONTEXT_RESET_ARB:
- MarkContextLost(error::kUnknown);
- break;
- default:
- NOTREACHED();
- return false;
- }
- reset_by_robustness_extension_ = true;
- return true;
+ return shared_context_state_->CheckResetStatus(/*needs_gl=*/false);
}
gles2::Logger* RasterDecoderImpl::GetLogger() {
@@ -1500,14 +1470,13 @@ void RasterDecoderImpl::DisableFlushWorkaroundForTest() {
void RasterDecoderImpl::OnContextLostError() {
if (!WasContextLost()) {
// Need to lose current context before broadcasting!
- CheckResetStatus();
- reset_by_robustness_extension_ = true;
+ shared_context_state_->CheckResetStatus(/*needs_gl=*/false);
}
}
void RasterDecoderImpl::OnOutOfMemoryError() {
if (lose_context_when_out_of_memory_ && !WasContextLost()) {
- if (!CheckResetStatus()) {
+ if (!shared_context_state_->CheckResetStatus(/*needs_gl=*/false)) {
MarkContextLost(error::kOutOfMemory);
}
}
@@ -2071,17 +2040,14 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
if (gles2::GLStreamTextureImage* image =
source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES,
source_level)) {
- GLfloat transform_matrix[16];
- image->GetTextureMatrix(transform_matrix);
-
- copy_texture_chromium_->DoCopySubTextureWithTransform(
+ copy_texture_chromium_->DoCopySubTexture(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
dest_level, dest_internal_format, xoffset, yoffset, x, y, width,
height, dest_size.width(), dest_size.height(), source_size.width(),
source_size.height(), unpack_flip_y, unpack_premultiply_alpha,
- false /* unpack_unmultiply_alpha */, false /* dither */,
- transform_matrix, copy_tex_image_blit_.get());
+ /*unpack_unmultiply_alpha=*/false, /*dither=*/false,
+ gles2::CopyTextureMethod::DIRECT_DRAW, copy_tex_image_blit_.get());
dest_texture->SetLevelClearedRect(dest_target, dest_level,
new_cleared_rect);
return;
@@ -2255,8 +2221,13 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALSkia(
};
gpu::AddVulkanCleanupTaskForSkiaFlush(
shared_context_state_->vk_context_provider(), &flush_info);
- dest_scoped_access->surface()->flush(
+ auto result = dest_scoped_access->surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
+ // If the |end_semaphores| is empty, we can deferred the queue submission.
+ if (!end_semaphores.empty()) {
+ DCHECK_EQ(result, GrSemaphoresSubmitted::kYes);
+ gr_context()->submit();
+ }
if (!dest_shared_image->IsCleared()) {
dest_shared_image->SetClearedRect(new_cleared_rect);
@@ -2297,6 +2268,15 @@ void RasterDecoderImpl::DoWritePixelsINTERNAL(GLint x_offset,
return;
}
+ if (SkColorTypeBytesPerPixel(viz::ResourceFormatToClosestSkColorType(
+ true, dest_shared_image->format())) !=
+ SkColorTypeBytesPerPixel(static_cast<SkColorType>(src_sk_color_type))) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels",
+ "Bytes per pixel for src SkColorType and dst "
+ "SkColorType must be the same.");
+ return;
+ }
+
// If present, the color space is serialized into shared memory before the
// pixel data.
sk_sp<SkColorSpace> color_space;
@@ -2375,8 +2355,12 @@ void RasterDecoderImpl::DoWritePixelsINTERNAL(GLint x_offset,
};
gpu::AddVulkanCleanupTaskForSkiaFlush(
shared_context_state_->vk_context_provider(), &flush_info);
- dest_scoped_access->surface()->flush(
+ auto result = dest_scoped_access->surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
+ if (!end_semaphores.empty()) {
+ DCHECK_EQ(result, GrSemaphoresSubmitted::kYes);
+ gr_context()->submit();
+ }
if (!dest_shared_image->IsCleared()) {
dest_shared_image->SetClearedRect(
@@ -2565,8 +2549,12 @@ void RasterDecoderImpl::DoConvertYUVMailboxesToRGBINTERNAL(
};
gpu::AddVulkanCleanupTaskForSkiaFlush(
shared_context_state_->vk_context_provider(), &flush_info);
- dest_scoped_access->surface()->flush(
+ auto result = dest_scoped_access->surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
+ if (!end_semaphores.empty()) {
+ DCHECK_EQ(result, GrSemaphoresSubmitted::kYes);
+ gr_context()->submit();
+ }
if (!images[YUVConversionMailboxIndex::kDestIndex]->IsCleared() &&
drew_image) {
@@ -2899,13 +2887,15 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
.fNumSemaphores = end_semaphores_.size(),
.fSignalSemaphores = end_semaphores_.data(),
};
- AddVulkanCleanupTaskForSkiaFlush(
- shared_context_state_->vk_context_provider(), &flush_info);
auto result = sk_surface_->flush(SkSurface::BackendSurfaceAccess::kPresent,
flush_info);
- DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
- end_semaphores_.clear();
-
+ // If |end_semaphores_| is not empty, we will submit work to the queue.
+ // Otherwise the queue submission can be deferred..
+ if (!end_semaphores_.empty()) {
+ DCHECK(result == GrSemaphoresSubmitted::kYes);
+ gr_context()->submit();
+ end_semaphores_.clear();
+ }
// The DDL pins memory for the recorded ops so it must be kept alive until
// its flushed.
ddl_.reset();
@@ -2913,13 +2903,10 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
shared_context_state_->UpdateSkiaOwnedMemorySize();
sk_surface_ = nullptr;
- if (!shared_image_) {
- // Test only path for SetUpForRasterCHROMIUMForTest.
- sk_surface_for_testing_.reset();
- } else {
- scoped_shared_image_write_.reset();
- shared_image_.reset();
- }
+ scoped_shared_image_write_.reset();
+ shared_image_.reset();
+ // Test only path for SetUpForRasterCHROMIUMForTest.
+ sk_surface_for_testing_.reset();
// Unlock all font handles. This needs to be deferred until
// SkSurface::flush since that flushes batched Gr operations
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
index 9375b1e17ae..ff476d4f3cf 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
@@ -187,7 +187,7 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
new gl::GLShareGroup(), surface_, context_,
feature_info()->workarounds().use_virtualized_gl_contexts,
base::DoNothing(), GpuPreferences().gr_context_type);
-
+ shared_context_state_->disable_check_reset_status_throttling_for_test_ = true;
shared_context_state_->InitializeGL(GpuPreferences(), feature_info_);
command_buffer_service_.reset(new FakeCommandBufferServiceBase());
@@ -213,10 +213,14 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
gpu::ContextResult::kSuccess);
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
if (context_->HasRobustness()) {
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(GL_NO_ERROR));
}
+
decoder_->MakeCurrent();
decoder_->BeginDecoding();
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
index 2f778122fa8..d82e5e8b852 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
@@ -34,13 +34,13 @@ class RasterDecoderOOMTest : public RasterDecoderManualInitTest {
if (context_->HasRobustness()) {
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(reset_status));
+ EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_CONTEXT_LOST_KHR));
+ } else {
+ EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_NO_ERROR));
}
- // glGetError merges driver error state with decoder error state. Return
- // GL_NO_ERROR from mock driver and GL_OUT_OF_MEMORY from decoder.
- EXPECT_CALL(*gl_, GetError())
- .WillOnce(Return(GL_NO_ERROR))
- .RetiresOnSaturation();
+ // RasterDecoder::HandleGetError merges driver error state with decoder
+ // error state. Return GL_OUT_OF_MEMORY from decoder.
GetDecoder()->SetOOMErrorForTest();
cmds::GetError cmd;
@@ -112,9 +112,9 @@ class RasterDecoderLostContextTest : public RasterDecoderManualInitTest {
void DoGetErrorWithContextLost(GLenum reset_status) {
DCHECK(context_->HasExtension("GL_KHR_robustness"));
- EXPECT_CALL(*gl_, GetError())
- .WillOnce(Return(GL_CONTEXT_LOST_KHR))
- .RetiresOnSaturation();
+ // Once context loss has occurred, driver will always return
+ // GL_CONTEXT_LOST_KHR.
+ EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_CONTEXT_LOST_KHR));
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(reset_status));
cmds::GetError cmd;
@@ -147,6 +147,20 @@ TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrent) {
ClearCurrentDecoderError();
}
+TEST_P(RasterDecoderLostContextTest, LostFromDriverOOM) {
+ Init(/*has_robustness=*/false);
+ EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
+ EXPECT_CALL(*gl_, GetError()).WillOnce(Return(GL_OUT_OF_MEMORY));
+ EXPECT_FALSE(decoder_->WasContextLost());
+ decoder_->MakeCurrent();
+ EXPECT_TRUE(decoder_->WasContextLost());
+ EXPECT_EQ(error::kOutOfMemory, GetContextLostReason());
+
+ // We didn't process commands, so we need to clear the decoder error,
+ // so that we can shut down cleanly.
+ ClearCurrentDecoderError();
+}
+
TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrentWithRobustness) {
Init(/*has_robustness=*/true); // with robustness
// If we can't make the context current, we cannot query the robustness
@@ -215,6 +229,7 @@ TEST_P(RasterDecoderLostContextTest, LostFromResetAfterMakeCurrent) {
Init(/*has_robustness=*/true);
InSequence seq;
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
+ EXPECT_CALL(*gl_, GetError()).WillOnce(Return(GL_CONTEXT_LOST_KHR));
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(GL_GUILTY_CONTEXT_RESET_KHR));
decoder_->MakeCurrent();
diff --git a/chromium/gpu/command_buffer/service/sampler_manager.h b/chromium/gpu/command_buffer/service/sampler_manager.h
index 2b46c8dd099..9828eb0354e 100644
--- a/chromium/gpu/command_buffer/service/sampler_manager.h
+++ b/chromium/gpu/command_buffer/service/sampler_manager.h
@@ -8,7 +8,6 @@
#include <unordered_map>
#include <vector>
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/feature_info.h"
diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc
index 95bc584365f..b9d88280b52 100644
--- a/chromium/gpu/command_buffer/service/scheduler.cc
+++ b/chromium/gpu/command_buffer/service/scheduler.cc
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/callback.h"
+#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
diff --git a/chromium/gpu/command_buffer/service/service_font_manager.cc b/chromium/gpu/command_buffer/service/service_font_manager.cc
index 50a1b4a74e2..dc41e910e7f 100644
--- a/chromium/gpu/command_buffer/service/service_font_manager.cc
+++ b/chromium/gpu/command_buffer/service/service_font_manager.cc
@@ -7,6 +7,7 @@
#include <inttypes.h>
#include "base/debug/dump_without_crashing.h"
+#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/rand_util.h"
#include "base/strings/stringprintf.h"
diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc
index 4432b9222dc..bd7c709e241 100644
--- a/chromium/gpu/command_buffer/service/service_utils.cc
+++ b/chromium/gpu/command_buffer/service/service_utils.cc
@@ -165,8 +165,7 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) {
gpu_preferences.enable_dawn_backend_validation =
command_line->HasSwitch(switches::kEnableDawnBackendValidation);
gpu_preferences.gr_context_type = ParseGrContextType();
- gpu_preferences.use_vulkan = ParseVulkanImplementationName(
- command_line, gpu_preferences.gr_context_type);
+ gpu_preferences.use_vulkan = ParseVulkanImplementationName(command_line);
gpu_preferences.disable_vulkan_surface =
command_line->HasSwitch(switches::kDisableVulkanSurface);
@@ -192,8 +191,7 @@ GrContextType ParseGrContextType() {
}
VulkanImplementationName ParseVulkanImplementationName(
- const base::CommandLine* command_line,
- GrContextType gr_context_type) {
+ const base::CommandLine* command_line) {
if (command_line->HasSwitch(switches::kUseVulkan)) {
auto value = command_line->GetSwitchValueASCII(switches::kUseVulkan);
if (value.empty() || value == switches::kVulkanImplementationNameNative) {
@@ -202,11 +200,17 @@ VulkanImplementationName ParseVulkanImplementationName(
return VulkanImplementationName::kSwiftshader;
}
}
- // If the vulkan implementation is not set from --use-vulkan, the native
- // vulkan implementation will be used by default.
- return gr_context_type == GrContextType::kVulkan
- ? VulkanImplementationName::kNative
- : VulkanImplementationName::kNone;
+
+ // GrContext is not going to use Vulkan.
+ if (!base::FeatureList::IsEnabled(features::kVulkan))
+ return VulkanImplementationName::kNone;
+
+ // If the vulkan feature is enabled from command line, we will force to use
+ // vulkan even if it is blacklisted.
+ return base::FeatureList::GetInstance()->IsFeatureOverriddenFromCommandLine(
+ features::kVulkan.name, base::FeatureList::OVERRIDE_ENABLE_FEATURE)
+ ? VulkanImplementationName::kForcedNative
+ : VulkanImplementationName::kNative;
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/service_utils.h b/chromium/gpu/command_buffer/service/service_utils.h
index 76a802e433c..e10ff73dcfb 100644
--- a/chromium/gpu/command_buffer/service/service_utils.h
+++ b/chromium/gpu/command_buffer/service/service_utils.h
@@ -40,10 +40,10 @@ ParseGpuPreferences(const base::CommandLine* command_line);
GPU_GLES2_EXPORT GrContextType ParseGrContextType();
// Parse the value of --use-vulkan from the command line. If unspecified and
-// a Vulkan GrContext is going to be used, default to the native implementation.
+// features::kVulkan is enabled (GrContext is going to use vulkan), default to
+// the native implementation.
GPU_GLES2_EXPORT VulkanImplementationName
-ParseVulkanImplementationName(const base::CommandLine* command_line,
- GrContextType gr_context_type);
+ParseVulkanImplementationName(const base::CommandLine* command_line);
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shader_manager.h b/chromium/gpu/command_buffer/service/shader_manager.h
index deb4a491486..873f92ddcdf 100644
--- a/chromium/gpu/command_buffer/service/shader_manager.h
+++ b/chromium/gpu/command_buffer/service/shader_manager.h
@@ -8,7 +8,7 @@
#include <string>
#include <unordered_map>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/gl_utils.h"
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc
index 765ad3a0d62..900a182c781 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/shared_context_state.cc
@@ -69,7 +69,7 @@ size_t MaxNumSkSurface() {
namespace gpu {
void SharedContextState::compileError(const char* shader, const char* errors) {
- if (!context_lost_) {
+ if (!context_lost()) {
LOG(ERROR) << "Skia shader compilation error\n"
<< "------------------------\n"
<< shader << "\nErrors:\n"
@@ -163,7 +163,7 @@ SharedContextState::~SharedContextState() {
// The context should be current so that texture deletes that result from
// destroying the cache happen in the right context (unless the context is
// lost in which case we don't delete the textures).
- DCHECK(IsCurrent(nullptr) || context_lost_);
+ DCHECK(IsCurrent(nullptr) || context_lost());
transfer_cache_.reset();
// We should have the last ref on this GrContext to ensure we're not holding
@@ -191,7 +191,7 @@ SharedContextState::~SharedContextState() {
this);
}
-void SharedContextState::InitializeGrContext(
+bool SharedContextState::InitializeGrContext(
const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
GrContextOptions::PersistentCache* cache,
@@ -220,7 +220,7 @@ void SharedContextState::InitializeGrContext(
if (!interface) {
LOG(ERROR) << "OOP raster support disabled: GrGLInterface creation "
"failed.";
- return;
+ return false;
}
if (activity_flags && cache) {
@@ -255,12 +255,13 @@ void SharedContextState::InitializeGrContext(
}
if (!gr_context_) {
- LOG(ERROR) << "OOP raster support disabled: GrContext creation "
- "failed.";
- } else {
- gr_context_->setResourceCacheLimit(max_resource_cache_bytes);
+ LOG(ERROR) << "OOP raster support disabled: GrContext creation failed.";
+ return false;
}
+
+ gr_context_->setResourceCacheLimit(max_resource_cache_bytes);
transfer_cache_ = std::make_unique<ServiceTransferCache>(gpu_preferences);
+ return true;
}
bool SharedContextState::InitializeGL(
@@ -424,28 +425,23 @@ bool SharedContextState::InitializeGL(
}
bool SharedContextState::MakeCurrent(gl::GLSurface* surface, bool needs_gl) {
- if (context_lost_)
+ if (context_lost())
return false;
- if (gr_context_ && gr_context_->abandoned()) {
- MarkContextLost();
- return false;
- }
-
- if (!GrContextIsGL() && !needs_gl)
- return true;
-
- gl::GLSurface* dont_care_surface =
- last_current_surface_ ? last_current_surface_ : surface_.get();
- surface = surface ? surface : dont_care_surface;
+ const bool using_gl = GrContextIsGL() || needs_gl;
+ if (using_gl) {
+ gl::GLSurface* dont_care_surface =
+ last_current_surface_ ? last_current_surface_ : surface_.get();
+ surface = surface ? surface : dont_care_surface;
- if (!context_->MakeCurrent(surface)) {
- MarkContextLost();
- return false;
+ if (!context_->MakeCurrent(surface)) {
+ MarkContextLost(error::kMakeCurrentFailed);
+ return false;
+ }
+ last_current_surface_ = surface;
}
- last_current_surface_ = surface;
- return true;
+ return !CheckResetStatus(needs_gl);
}
void SharedContextState::ReleaseCurrent(gl::GLSurface* surface) {
@@ -456,14 +452,14 @@ void SharedContextState::ReleaseCurrent(gl::GLSurface* surface) {
return;
last_current_surface_ = nullptr;
- if (!context_lost_)
+ if (!context_lost())
context_->ReleaseCurrent(surface);
}
-void SharedContextState::MarkContextLost() {
- if (!context_lost_) {
+void SharedContextState::MarkContextLost(error::ContextLostReason reason) {
+ if (!context_lost()) {
scoped_refptr<SharedContextState> prevent_last_ref_drop = this;
- context_lost_ = true;
+ context_lost_reason_ = reason;
// context_state_ could be nullptr for some unittests.
if (context_state_)
context_state_->MarkContextLost();
@@ -486,7 +482,7 @@ void SharedContextState::MarkContextLost() {
bool SharedContextState::IsCurrent(gl::GLSurface* surface) {
if (!GrContextIsGL())
return true;
- if (context_lost_)
+ if (context_lost())
return false;
return context_->IsCurrent(surface);
}
@@ -669,4 +665,79 @@ QueryManager* SharedContextState::GetQueryManager() {
return nullptr;
}
+bool SharedContextState::CheckResetStatus(bool needs_gl) {
+ DCHECK(!context_lost());
+
+ if (device_needs_reset_)
+ return true;
+
+ if (gr_context_) {
+ // Maybe Skia detected VK_ERROR_DEVICE_LOST.
+ if (gr_context_->abandoned()) {
+ LOG(ERROR) << "SharedContextState context lost via Skia.";
+ device_needs_reset_ = true;
+ MarkContextLost(error::kUnknown);
+ return true;
+ }
+
+ if (gr_context_->oomed()) {
+ LOG(ERROR) << "SharedContextState context lost via Skia OOM.";
+ device_needs_reset_ = true;
+ MarkContextLost(error::kOutOfMemory);
+ return true;
+ }
+ }
+
+ // Not using GL.
+ if (!GrContextIsGL() && !needs_gl)
+ return false;
+
+ // GL is not initialized.
+ if (!context_state_)
+ return false;
+
+ GLenum error = context_state_->api()->glGetErrorFn();
+ if (error == GL_OUT_OF_MEMORY) {
+ LOG(ERROR) << "SharedContextState lost due to GL_OUT_OF_MEMORY";
+ MarkContextLost(error::kOutOfMemory);
+ device_needs_reset_ = true;
+ return true;
+ }
+
+ // Checking the reset status is expensive on some OS/drivers
+ // (https://crbug.com/1090232). Rate limit it.
+ constexpr base::TimeDelta kMinCheckDelay =
+ base::TimeDelta::FromMilliseconds(5);
+ base::Time now = base::Time::Now();
+ if (!disable_check_reset_status_throttling_for_test_ &&
+ now < last_gl_check_graphics_reset_status_ + kMinCheckDelay) {
+ return false;
+ }
+ last_gl_check_graphics_reset_status_ = now;
+
+ GLenum driver_status = context()->CheckStickyGraphicsResetStatus();
+ if (driver_status == GL_NO_ERROR)
+ return false;
+ LOG(ERROR) << "SharedContextState context lost via ARB/EXT_robustness. Reset "
+ "status = "
+ << gles2::GLES2Util::GetStringEnum(driver_status);
+
+ switch (driver_status) {
+ case GL_GUILTY_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kGuilty);
+ break;
+ case GL_INNOCENT_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kInnocent);
+ break;
+ case GL_UNKNOWN_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kUnknown);
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ device_needs_reset_ = true;
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.h b/chromium/gpu/command_buffer/service/shared_context_state.h
index 21e9687ffa5..6a7fcf0fc12 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.h
+++ b/chromium/gpu/command_buffer/service/shared_context_state.h
@@ -13,8 +13,12 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/observer_list.h"
+#include "base/optional.h"
+#include "base/time/time.h"
#include "base/trace_event/memory_dump_provider.h"
#include "build/build_config.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/gl2_types.h"
#include "gpu/command_buffer/common/skia_utils.h"
#include "gpu/command_buffer/service/gl_context_virtual_delegate.h"
#include "gpu/command_buffer/service/memory_tracking.h"
@@ -47,6 +51,10 @@ class FeatureInfo;
struct ContextState;
} // namespace gles2
+namespace raster {
+class RasterDecoderTestBase;
+} // namespace raster
+
class GPU_GLES2_EXPORT SharedContextState
: public base::trace_event::MemoryDumpProvider,
public gpu::GLContextVirtualDelegate,
@@ -68,7 +76,7 @@ class GPU_GLES2_EXPORT SharedContextState
base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor =
nullptr);
- void InitializeGrContext(const GpuPreferences& gpu_preferences,
+ bool InitializeGrContext(const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
GrContextOptions::PersistentCache* cache,
GpuProcessActivityFlags* activity_flags = nullptr,
@@ -92,7 +100,7 @@ class GPU_GLES2_EXPORT SharedContextState
bool MakeCurrent(gl::GLSurface* surface, bool needs_gl = false);
void ReleaseCurrent(gl::GLSurface* surface);
- void MarkContextLost();
+ void MarkContextLost(error::ContextLostReason reason = error::kUnknown);
bool IsCurrent(gl::GLSurface* surface);
void PurgeMemory(
@@ -122,7 +130,10 @@ class GPU_GLES2_EXPORT SharedContextState
void compileError(const char* shader, const char* errors) override;
gles2::FeatureInfo* feature_info() { return feature_info_.get(); }
gles2::ContextState* context_state() const { return context_state_.get(); }
- bool context_lost() const { return context_lost_; }
+ bool context_lost() const { return !!context_lost_reason_; }
+ base::Optional<error::ContextLostReason> context_lost_reason() {
+ return context_lost_reason_;
+ }
bool need_context_state_reset() const { return need_context_state_reset_; }
void set_need_context_state_reset(bool reset) {
need_context_state_reset_ = reset;
@@ -179,8 +190,14 @@ class GPU_GLES2_EXPORT SharedContextState
return found->second->unique();
}
+ // Updates |context_lost_reason| and returns true if lost
+ // (e.g. VK_ERROR_DEVICE_LOST or GL_UNKNOWN_CONTEXT_RESET_ARB).
+ bool CheckResetStatus(bool needs_gl);
+ bool device_needs_reset() { return device_needs_reset_; }
+
private:
friend class base::RefCounted<SharedContextState>;
+ friend class raster::RasterDecoderTestBase;
// Observer which is notified when SkiaOutputSurfaceImpl takes ownership of a
// shared image, and forward information to both histograms and task manager.
@@ -265,11 +282,15 @@ class GPU_GLES2_EXPORT SharedContextState
// driver's GL state.
bool need_context_state_reset_ = false;
- bool context_lost_ = false;
+ base::Optional<error::ContextLostReason> context_lost_reason_;
base::ObserverList<ContextLostObserver>::Unchecked context_lost_observers_;
base::MRUCache<void*, sk_sp<SkSurface>> sk_surface_cache_;
+ bool device_needs_reset_ = false;
+ base::Time last_gl_check_graphics_reset_status_;
+ bool disable_check_reset_status_throttling_for_test_ = false;
+
base::WeakPtrFactory<SharedContextState> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(SharedContextState);
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
index 3117ba53f49..6cb0ebeb6b6 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
@@ -37,7 +37,8 @@ class SharedImageRepresentationEglImageGLTexture
}
bool BeginAccess(GLenum mode) override {
- if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) {
if (!egl_backing()->BeginRead(this))
return false;
mode_ = RepresentationAccessMode::kRead;
@@ -262,10 +263,10 @@ gles2::Texture* SharedImageBackingEglImage::GenEGLImageSibling() {
auto* texture = new gles2::Texture(service_id);
texture->SetLightweightRef();
texture->SetTarget(target, 1 /*max_levels*/);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture->set_min_filter(GL_LINEAR);
+ texture->set_mag_filter(GL_LINEAR);
+ texture->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture->set_wrap_s(GL_CLAMP_TO_EDGE);
// If the backing is already cleared, no need to clear it again.
gfx::Rect cleared_rect;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
index cc4b235d582..044f201a9e9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
@@ -70,6 +70,9 @@ class OverlayImage final : public gl::GLImage {
base::ScopedFD TakeEndFence() {
DCHECK(!begin_read_fence_.is_valid());
+
+ previous_end_read_fence_ =
+ base::ScopedFD(HANDLE_EINTR(dup(end_read_fence_.get())));
return std::move(end_read_fence_);
}
@@ -78,7 +81,7 @@ class OverlayImage final : public gl::GLImage {
GetAHardwareBuffer() override {
return std::make_unique<ScopedHardwareBufferFenceSyncImpl>(
this, base::android::ScopedHardwareBufferHandle::Create(handle_.get()),
- std::move(begin_read_fence_));
+ std::move(begin_read_fence_), std::move(previous_end_read_fence_));
}
protected:
@@ -91,14 +94,20 @@ class OverlayImage final : public gl::GLImage {
ScopedHardwareBufferFenceSyncImpl(
scoped_refptr<OverlayImage> image,
base::android::ScopedHardwareBufferHandle handle,
- base::ScopedFD fence_fd)
- : ScopedHardwareBufferFenceSync(std::move(handle), std::move(fence_fd)),
+ base::ScopedFD fence_fd,
+ base::ScopedFD available_fence_fd)
+ : ScopedHardwareBufferFenceSync(std::move(handle),
+ std::move(fence_fd),
+ std::move(available_fence_fd),
+ false /* is_video */),
image_(std::move(image)) {}
~ScopedHardwareBufferFenceSyncImpl() override = default;
void SetReadFence(base::ScopedFD fence_fd, bool has_context) override {
DCHECK(!image_->begin_read_fence_.is_valid());
DCHECK(!image_->end_read_fence_.is_valid());
+ DCHECK(!image_->previous_end_read_fence_.is_valid());
+
image_->end_read_fence_ = std::move(fence_fd);
}
@@ -115,6 +124,10 @@ class OverlayImage final : public gl::GLImage {
// completion. The image content should not be modified before passing this
// fence.
base::ScopedFD end_read_fence_;
+
+ // The fence for overlay controller from the last frame where this buffer was
+ // presented.
+ base::ScopedFD previous_end_read_fence_;
};
} // namespace
@@ -170,7 +183,6 @@ class SharedImageBackingAHB : public ClearTrackingSharedImageBacking {
MemoryTypeTracker* tracker) override;
private:
- gles2::Texture* GenGLTexture();
const base::android::ScopedHardwareBufferHandle hardware_buffer_handle_;
// Not guarded by |lock_| as we do not use legacy_texture_ in threadsafe
@@ -213,7 +225,8 @@ class SharedImageRepresentationGLTextureAHB
gles2::Texture* GetTexture() override { return texture_; }
bool BeginAccess(GLenum mode) override {
- if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) {
base::ScopedFD write_sync_fd;
if (!ahb_backing()->BeginRead(this, &write_sync_fd))
return false;
@@ -228,7 +241,8 @@ class SharedImageRepresentationGLTextureAHB
return false;
}
- if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) {
mode_ = RepresentationAccessMode::kRead;
} else if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) {
mode_ = RepresentationAccessMode::kWrite;
@@ -316,12 +330,12 @@ class SharedImageRepresentationSkiaVkAHB
surface_props != surface_->props()) {
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
- surface_ = SkSurface::MakeFromBackendTextureAsRenderTarget(
+ surface_ = SkSurface::MakeFromBackendTexture(
gr_context, promise_texture_->backendTexture(),
kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
color_space().ToSkColorSpace(), &surface_props);
if (!surface_) {
- LOG(ERROR) << "MakeFromBackendTextureAsRenderTarget() failed.";
+ LOG(ERROR) << "MakeFromBackendTexture() failed.";
return nullptr;
}
surface_msaa_count_ = final_msaa_count;
@@ -582,7 +596,9 @@ bool SharedImageBackingAHB::ProduceLegacyMailbox(
DCHECK(!is_writing_);
DCHECK_EQ(size_t{0}, active_readers_.size());
DCHECK(hardware_buffer_handle_.is_valid());
- legacy_texture_ = GenGLTexture();
+ legacy_texture_ =
+ GenGLTexture(hardware_buffer_handle_.get(), GL_TEXTURE_2D, color_space(),
+ size(), estimated_size(), ClearedRect());
if (!legacy_texture_)
return false;
// Make sure our |legacy_texture_| has the right initial cleared rect.
@@ -602,7 +618,16 @@ SharedImageBackingAHB::ProduceGLTexture(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
// Use same texture for all the texture representations generated from same
// backing.
- auto* texture = GenGLTexture();
+ DCHECK(hardware_buffer_handle_.is_valid());
+
+ // Note that we are not using GL_TEXTURE_EXTERNAL_OES target(here and all
+ // other places in this file) since sksurface
+ // doesn't supports it. As per the egl documentation -
+ // https://www.khronos.org/registry/OpenGL/extensions/OES/OES_EGL_image_external.txt
+ // if GL_OES_EGL_image is supported then <target> may also be TEXTURE_2D.
+ auto* texture =
+ GenGLTexture(hardware_buffer_handle_.get(), GL_TEXTURE_2D, color_space(),
+ size(), estimated_size(), ClearedRect());
if (!texture)
return nullptr;
@@ -620,11 +645,9 @@ SharedImageBackingAHB::ProduceSkia(
// Check whether we are in Vulkan mode OR GL mode and accordingly create
// Skia representation.
if (context_state->GrContextIsVulkan()) {
- auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
- gfx::GpuMemoryBufferHandle gmb_handle(GetAhbHandle());
- auto vulkan_image = VulkanImage::CreateFromGpuMemoryBufferHandle(
- device_queue, std::move(gmb_handle), size(), ToVkFormat(format()),
- 0 /* usage */);
+ auto vulkan_image = CreateVkImageFromAhbHandle(
+ GetAhbHandle(), context_state.get(), size(), format());
+
if (!vulkan_image)
return nullptr;
@@ -633,8 +656,10 @@ SharedImageBackingAHB::ProduceSkia(
tracker);
}
DCHECK(context_state->GrContextIsGL());
-
- auto* texture = GenGLTexture();
+ DCHECK(hardware_buffer_handle_.is_valid());
+ auto* texture =
+ GenGLTexture(hardware_buffer_handle_.get(), GL_TEXTURE_2D, color_space(),
+ size(), estimated_size(), ClearedRect());
if (!texture)
return nullptr;
auto gl_representation =
@@ -759,64 +784,6 @@ void SharedImageBackingAHB::EndOverlayAccess() {
read_sync_fd_ = gl::MergeFDs(std::move(read_sync_fd_), std::move(fence_fd));
}
-gles2::Texture* SharedImageBackingAHB::GenGLTexture() {
- DCHECK(hardware_buffer_handle_.is_valid());
-
- // Target for AHB backed egl images.
- // Note that we are not using GL_TEXTURE_EXTERNAL_OES target since sksurface
- // doesn't supports it. As per the egl documentation -
- // https://www.khronos.org/registry/OpenGL/extensions/OES/OES_EGL_image_external.txt
- // if GL_OES_EGL_image is supported then <target> may also be TEXTURE_2D.
- GLenum target = GL_TEXTURE_2D;
- GLenum get_target = GL_TEXTURE_BINDING_2D;
-
- // Create a gles2 texture using the AhardwareBuffer.
- gl::GLApi* api = gl::g_current_gl_context;
- GLuint service_id = 0;
- api->glGenTexturesFn(1, &service_id);
- GLint old_texture_binding = 0;
- api->glGetIntegervFn(get_target, &old_texture_binding);
- api->glBindTextureFn(target, service_id);
- api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
-
- // Create an egl image using AHardwareBuffer.
- auto egl_image = base::MakeRefCounted<gl::GLImageAHardwareBuffer>(size());
- if (!egl_image->Initialize(hardware_buffer_handle_.get(), false)) {
- LOG(ERROR) << "Failed to create EGL image";
- api->glBindTextureFn(target, old_texture_binding);
- api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
- }
- if (!egl_image->BindTexImage(target)) {
- LOG(ERROR) << "Failed to bind egl image";
- api->glBindTextureFn(target, old_texture_binding);
- api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
- }
- egl_image->SetColorSpace(color_space());
-
- // Create a gles2 Texture.
- auto* texture = new gles2::Texture(service_id);
- texture->SetLightweightRef();
- texture->SetTarget(target, 1);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
-
- texture->SetLevelInfo(target, 0, egl_image->GetInternalFormat(),
- size().width(), size().height(), 1, 0,
- egl_image->GetDataFormat(), egl_image->GetDataType(),
- ClearedRect());
- texture->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
- texture->SetImmutable(true, false);
- api->glBindTextureFn(target, old_texture_binding);
- return texture;
-}
-
SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB(
const GpuDriverBugWorkarounds& workarounds,
const GpuFeatureInfo& gpu_feature_info) {
@@ -1065,11 +1032,8 @@ SharedImageBackingFactoryAHB::CreateSharedImage(
const gfx::ColorSpace& color_space,
uint32_t usage,
base::span<const uint8_t> pixel_data) {
- auto backing =
- MakeBacking(mailbox, format, size, color_space, usage, false, pixel_data);
- if (backing)
- backing->OnWriteSucceeded();
- return backing;
+ return MakeBacking(mailbox, format, size, color_space, usage, false,
+ pixel_data);
}
bool SharedImageBackingFactoryAHB::CanImportGpuMemoryBuffer(
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
index 91798bb9b36..bd4e77afcbd 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
@@ -25,6 +25,7 @@
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/skia_utils.h"
@@ -51,6 +52,10 @@
#include "gpu/command_buffer/service/shared_image_batch_access_manager.h"
#endif
+#if defined(OS_MACOSX)
+#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h"
+#endif
+
namespace gpu {
namespace {
@@ -189,23 +194,6 @@ class ScopedRestoreTexture {
DISALLOW_COPY_AND_ASSIGN(ScopedRestoreTexture);
};
-GLuint MakeTextureAndSetParameters(gl::GLApi* api,
- GLenum target,
- bool framebuffer_attachment_angle) {
- GLuint service_id = 0;
- api->glGenTexturesFn(1, &service_id);
- api->glBindTextureFn(target, service_id);
- api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- if (framebuffer_attachment_angle) {
- api->glTexParameteriFn(target, GL_TEXTURE_USAGE_ANGLE,
- GL_FRAMEBUFFER_ATTACHMENT_ANGLE);
- }
- return service_id;
-}
-
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawnCommon(
SharedImageFactory* factory,
SharedImageManager* manager,
@@ -301,518 +289,718 @@ std::unique_ptr<SharedImageRepresentationDawn> ProduceDawnCommon(
return manager->ProduceDawn(dst_mailbox, tracker, device);
}
+size_t EstimatedSize(viz::ResourceFormat format, const gfx::Size& size) {
+ size_t estimated_size = 0;
+ viz::ResourceSizes::MaybeSizeInBytes(size, format, &estimated_size);
+ return estimated_size;
+}
+
} // anonymous namespace
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageRepresentationGLTextureImpl
+
// Representation of a SharedImageBackingGLTexture as a GL Texture.
-class SharedImageRepresentationGLTextureImpl
- : public SharedImageRepresentationGLTexture {
- public:
- SharedImageRepresentationGLTextureImpl(SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- gles2::Texture* texture)
- : SharedImageRepresentationGLTexture(manager, backing, tracker),
- texture_(texture) {}
+SharedImageRepresentationGLTextureImpl::SharedImageRepresentationGLTextureImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture)
+ : SharedImageRepresentationGLTexture(manager, backing, tracker),
+ client_(client),
+ texture_(texture) {}
- gles2::Texture* GetTexture() override { return texture_; }
+gles2::Texture* SharedImageRepresentationGLTextureImpl::GetTexture() {
+ return texture_;
+}
- private:
- gles2::Texture* texture_;
-};
+bool SharedImageRepresentationGLTextureImpl::BeginAccess(GLenum mode) {
+ if (client_)
+ return client_->OnGLTextureBeginAccess(mode);
+ return true;
+}
-// Representation of a SharedImageBackingGLTexturePassthrough as a GL
-// TexturePassthrough.
-class SharedImageRepresentationGLTexturePassthroughImpl
- : public SharedImageRepresentationGLTexturePassthrough {
- public:
- SharedImageRepresentationGLTexturePassthroughImpl(
- SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough)
- : SharedImageRepresentationGLTexturePassthrough(manager,
- backing,
- tracker),
- texture_passthrough_(std::move(texture_passthrough)) {}
-
- const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
- override {
- return texture_passthrough_;
- }
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageRepresentationGLTexturePassthroughImpl
+
+SharedImageRepresentationGLTexturePassthroughImpl::
+ SharedImageRepresentationGLTexturePassthroughImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough)
+ : SharedImageRepresentationGLTexturePassthrough(manager, backing, tracker),
+ client_(client),
+ texture_passthrough_(std::move(texture_passthrough)) {}
+
+SharedImageRepresentationGLTexturePassthroughImpl::
+ ~SharedImageRepresentationGLTexturePassthroughImpl() = default;
+
+const scoped_refptr<gles2::TexturePassthrough>&
+SharedImageRepresentationGLTexturePassthroughImpl::GetTexturePassthrough() {
+ return texture_passthrough_;
+}
- void EndAccess() override {
- GLenum target = texture_passthrough_->target();
- gl::GLImage* image = texture_passthrough_->GetLevelImage(target, 0);
- if (!image)
- return;
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
- gl::ScopedTextureBinder binder(target,
- texture_passthrough_->service_id());
- image->ReleaseTexImage(target);
- image->BindTexImage(target);
- }
- }
+bool SharedImageRepresentationGLTexturePassthroughImpl::BeginAccess(
+ GLenum mode) {
+ if (client_)
+ return client_->OnGLTexturePassthroughBeginAccess(mode);
+ return true;
+}
- private:
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
-};
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingGLCommon
-class SharedImageBackingWithReadAccess : public SharedImageBacking {
- public:
- SharedImageBackingWithReadAccess(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- size_t estimated_size,
- bool is_thread_safe)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- estimated_size,
- is_thread_safe) {}
- ~SharedImageBackingWithReadAccess() override = default;
-
- virtual void BeginReadAccess() = 0;
-};
+// static
+void SharedImageBackingGLCommon::MakeTextureAndSetParameters(
+ GLenum target,
+ GLuint service_id,
+ bool framebuffer_attachment_angle,
+ scoped_refptr<gles2::TexturePassthrough>* passthrough_texture,
+ gles2::Texture** texture) {
+ if (!service_id) {
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
-class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
- public:
- SharedImageRepresentationSkiaImpl(
- SharedImageManager* manager,
- SharedImageBackingWithReadAccess* backing,
- scoped_refptr<SharedContextState> context_state,
- sk_sp<SkPromiseImageTexture> cached_promise_texture,
- MemoryTypeTracker* tracker,
- GLenum target,
- GLuint service_id)
- : SharedImageRepresentationSkia(manager, backing, tracker),
- context_state_(std::move(context_state)),
- promise_texture_(cached_promise_texture) {
- if (!promise_texture_) {
- GrBackendTexture backend_texture;
- GetGrBackendTexture(context_state_->feature_info(), target, size(),
- service_id, format(), &backend_texture);
- promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
+ api->glGenTexturesFn(1, &service_id);
+ api->glBindTextureFn(target, service_id);
+ api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ if (framebuffer_attachment_angle) {
+ api->glTexParameteriFn(target, GL_TEXTURE_USAGE_ANGLE,
+ GL_FRAMEBUFFER_ATTACHMENT_ANGLE);
}
+ }
+ if (passthrough_texture) {
+ *passthrough_texture =
+ base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
+ }
+ if (texture) {
+ *texture = new gles2::Texture(service_id);
+ (*texture)->SetLightweightRef();
+ (*texture)->SetTarget(target, 1);
+ (*texture)->set_min_filter(GL_LINEAR);
+ (*texture)->set_mag_filter(GL_LINEAR);
+ (*texture)->set_wrap_s(GL_CLAMP_TO_EDGE);
+ (*texture)->set_wrap_t(GL_CLAMP_TO_EDGE);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageRepresentationSkiaImpl
+
+SharedImageRepresentationSkiaImpl::SharedImageRepresentationSkiaImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ scoped_refptr<SharedContextState> context_state,
+ sk_sp<SkPromiseImageTexture> promise_texture,
+ MemoryTypeTracker* tracker)
+ : SharedImageRepresentationSkia(manager, backing, tracker),
+ client_(client),
+ context_state_(std::move(context_state)),
+ promise_texture_(promise_texture) {
+ DCHECK(promise_texture_);
#if DCHECK_IS_ON()
+ if (context_state_->GrContextIsGL())
context_ = gl::GLContext::GetCurrent();
#endif
- }
+}
- ~SharedImageRepresentationSkiaImpl() override {
- if (write_surface_) {
- DLOG(ERROR) << "SharedImageRepresentationSkia was destroyed while still "
- << "open for write access.";
- }
+SharedImageRepresentationSkiaImpl::~SharedImageRepresentationSkiaImpl() {
+ if (write_surface_) {
+ DLOG(ERROR) << "SharedImageRepresentationSkia was destroyed while still "
+ << "open for write access.";
}
+}
- sk_sp<SkSurface> BeginWriteAccess(
- int final_msaa_count,
- const SkSurfaceProps& surface_props,
- std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores) override {
- CheckContext();
- if (write_surface_)
- return nullptr;
-
- if (!promise_texture_) {
- return nullptr;
- }
- SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
- /*gpu_compositing=*/true, format());
- auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
- context_state_->gr_context(), promise_texture_->backendTexture(),
- kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
- backing()->color_space().ToSkColorSpace(), &surface_props);
- write_surface_ = surface.get();
- return surface;
- }
+sk_sp<SkSurface> SharedImageRepresentationSkiaImpl::BeginWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ CheckContext();
+ if (client_ && !client_->OnSkiaBeginWriteAccess())
+ return nullptr;
+ if (write_surface_)
+ return nullptr;
- void EndWriteAccess(sk_sp<SkSurface> surface) override {
- DCHECK_EQ(surface.get(), write_surface_);
- DCHECK(surface->unique());
- CheckContext();
- // TODO(ericrk): Keep the surface around for re-use.
- write_surface_ = nullptr;
+ if (!promise_texture_) {
+ return nullptr;
}
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format());
+ auto surface = SkSurface::MakeFromBackendTexture(
+ context_state_->gr_context(), promise_texture_->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
+ backing()->color_space().ToSkColorSpace(), &surface_props);
+ write_surface_ = surface.get();
+ return surface;
+}
- sk_sp<SkPromiseImageTexture> BeginReadAccess(
- std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores) override {
- CheckContext();
- static_cast<SharedImageBackingWithReadAccess*>(backing())
- ->BeginReadAccess();
- return promise_texture_;
- }
+void SharedImageRepresentationSkiaImpl::EndWriteAccess(
+ sk_sp<SkSurface> surface) {
+ DCHECK_EQ(surface.get(), write_surface_);
+ DCHECK(surface->unique());
+ CheckContext();
+ // TODO(ericrk): Keep the surface around for re-use.
+ write_surface_ = nullptr;
+}
- void EndReadAccess() override {
- // TODO(ericrk): Handle begin/end correctness checks.
- }
+sk_sp<SkPromiseImageTexture> SharedImageRepresentationSkiaImpl::BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ CheckContext();
+ if (client_ && !client_->OnSkiaBeginReadAccess())
+ return nullptr;
+ return promise_texture_;
+}
- bool SupportsMultipleConcurrentReadAccess() override { return true; }
+void SharedImageRepresentationSkiaImpl::EndReadAccess() {
+ // TODO(ericrk): Handle begin/end correctness checks.
+}
- sk_sp<SkPromiseImageTexture> promise_texture() { return promise_texture_; }
+bool SharedImageRepresentationSkiaImpl::SupportsMultipleConcurrentReadAccess() {
+ return true;
+}
- private:
- void CheckContext() {
+void SharedImageRepresentationSkiaImpl::CheckContext() {
#if DCHECK_IS_ON()
+ if (context_)
DCHECK(gl::GLContext::GetCurrent() == context_);
#endif
- }
+}
- scoped_refptr<SharedContextState> context_state_;
- sk_sp<SkPromiseImageTexture> promise_texture_;
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingGLTexture
- SkSurface* write_surface_ = nullptr;
-#if DCHECK_IS_ON()
- gl::GLContext* context_;
-#endif
-};
-
-// Implementation of SharedImageBacking that creates a GL Texture and stores it
-// as a gles2::Texture. Can be used with the legacy mailbox implementation.
-class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
- public:
- SharedImageBackingGLTexture(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- gles2::Texture* texture,
- const UnpackStateAttribs& attribs)
- : SharedImageBackingWithReadAccess(mailbox,
- format,
- size,
- color_space,
- usage,
- texture->estimated_size(),
- false /* is_thread_safe */),
- texture_(texture),
- attribs_(attribs) {
- DCHECK(texture_);
- gl::GLImage* image =
- texture_->GetLevelImage(texture_->target(), 0, nullptr);
- if (image)
- native_pixmap_ = image->GetNativePixmap();
+SharedImageBackingGLTexture::SharedImageBackingGLTexture(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ bool is_passthrough)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ EstimatedSize(format, size),
+ false /* is_thread_safe */),
+ is_passthrough_(is_passthrough) {}
+
+SharedImageBackingGLTexture::~SharedImageBackingGLTexture() {
+ if (IsPassthrough()) {
+ if (passthrough_texture_) {
+ if (!have_context())
+ passthrough_texture_->MarkContextLost();
+ passthrough_texture_.reset();
+ }
+ } else {
+ if (texture_) {
+ texture_->RemoveLightweightRef(have_context());
+ texture_ = nullptr;
+ }
}
+}
- ~SharedImageBackingGLTexture() override {
- DCHECK(texture_);
- texture_->RemoveLightweightRef(have_context());
- texture_ = nullptr;
+GLenum SharedImageBackingGLTexture::GetGLTarget() const {
+ return texture_ ? texture_->target() : passthrough_texture_->target();
+}
- if (rgb_emulation_texture_) {
- rgb_emulation_texture_->RemoveLightweightRef(have_context());
- rgb_emulation_texture_ = nullptr;
- }
+GLuint SharedImageBackingGLTexture::GetGLServiceId() const {
+ return texture_ ? texture_->service_id() : passthrough_texture_->service_id();
+}
+
+void SharedImageBackingGLTexture::OnMemoryDump(
+ const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) {
+ const auto client_guid = GetSharedImageGUIDForTracing(mailbox());
+ if (!IsPassthrough()) {
+ const auto service_guid =
+ gl::GetGLTextureServiceGUIDForTracing(texture_->service_id());
+ pmd->CreateSharedGlobalAllocatorDump(service_guid);
+ pmd->AddOwnershipEdge(client_guid, service_guid, /* importance */ 2);
+ texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name);
}
+}
- gfx::Rect ClearedRect() const override {
+gfx::Rect SharedImageBackingGLTexture::ClearedRect() const {
+ if (IsPassthrough()) {
+ // This backing is used exclusively with ANGLE which handles clear tracking
+ // internally. Act as though the texture is always cleared.
+ return gfx::Rect(size());
+ } else {
return texture_->GetLevelClearedRect(texture_->target(), 0);
}
+}
- void SetClearedRect(const gfx::Rect& cleared_rect) override {
+void SharedImageBackingGLTexture::SetClearedRect(
+ const gfx::Rect& cleared_rect) {
+ if (!IsPassthrough())
texture_->SetLevelClearedRect(texture_->target(), 0, cleared_rect);
- }
-
- void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
- GLenum target = texture_->target();
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
- api->glBindTextureFn(target, texture_->service_id());
-
- gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND;
- gl::GLImage* image = texture_->GetLevelImage(target, 0, &old_state);
- if (!image)
- return;
- if (old_state == gles2::Texture::BOUND)
- image->ReleaseTexImage(target);
-
- if (in_fence) {
- // TODO(dcastagna): Don't wait for the fence if the SharedImage is going
- // to be scanned out as an HW overlay. Currently we don't know that at
- // this point and we always bind the image, therefore we need to wait for
- // the fence.
- std::unique_ptr<gl::GLFence> egl_fence =
- gl::GLFence::CreateFromGpuFence(*in_fence.get());
- egl_fence->ServerWait();
- }
- gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND;
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND &&
- image->BindTexImage(target)) {
- new_state = gles2::Texture::BOUND;
- }
- if (old_state != new_state)
- texture_->SetLevelImage(target, 0, image, new_state);
- }
+}
- bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
- DCHECK(texture_);
+bool SharedImageBackingGLTexture::ProduceLegacyMailbox(
+ MailboxManager* mailbox_manager) {
+ if (IsPassthrough())
+ mailbox_manager->ProduceTexture(mailbox(), passthrough_texture_.get());
+ else
mailbox_manager->ProduceTexture(mailbox(), texture_);
- return true;
- }
+ return true;
+}
- void OnMemoryDump(const std::string& dump_name,
- base::trace_event::MemoryAllocatorDump* dump,
- base::trace_event::ProcessMemoryDump* pmd,
- uint64_t client_tracing_id) override {
- // Add a |service_guid| which expresses shared ownership between the
- // various GPU dumps.
- auto client_guid = GetSharedImageGUIDForTracing(mailbox());
- auto service_guid =
- gl::GetGLTextureServiceGUIDForTracing(texture_->service_id());
- pmd->CreateSharedGlobalAllocatorDump(service_guid);
- // TODO(piman): coalesce constant with TextureManager::DumpTextureRef.
- int importance = 2; // This client always owns the ref.
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageBackingGLTexture::ProduceGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ DCHECK(texture_);
+ return std::make_unique<SharedImageRepresentationGLTextureImpl>(
+ manager, this, nullptr, tracker, texture_);
+}
- pmd->AddOwnershipEdge(client_guid, service_guid, importance);
+std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+SharedImageBackingGLTexture::ProduceGLTexturePassthrough(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ DCHECK(passthrough_texture_);
+ return std::make_unique<SharedImageRepresentationGLTexturePassthroughImpl>(
+ manager, this, nullptr, tracker, passthrough_texture_);
+}
- // Dump all sub-levels held by the texture. They will appear below the
- // main gl/textures/client_X/mailbox_Y dump.
- texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name);
+std::unique_ptr<SharedImageRepresentationDawn>
+SharedImageBackingGLTexture::ProduceDawn(SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) {
+ if (!factory()) {
+ DLOG(ERROR) << "No SharedImageFactory to create a dawn representation.";
+ return nullptr;
}
- void BeginReadAccess() override {
- GLenum target = texture_->target();
- gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND;
- gl::GLImage* image = texture_->GetLevelImage(target, 0, &old_state);
- if (image && old_state == gpu::gles2::Texture::UNBOUND) {
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
- api->glBindTextureFn(target, texture_->service_id());
- gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND;
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
- if (image->BindTexImage(target))
- new_state = gles2::Texture::BOUND;
- } else {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs_,
- /*upload=*/true);
- if (image->CopyTexImage(target))
- new_state = gles2::Texture::COPIED;
- }
- if (old_state != new_state)
- texture_->SetLevelImage(target, 0, image, new_state);
- }
- }
+ return ProduceDawnCommon(factory(), manager, tracker, device, this,
+ IsPassthrough());
+}
- scoped_refptr<gfx::NativePixmap> GetNativePixmap() override {
- return native_pixmap_;
+std::unique_ptr<SharedImageRepresentationSkia>
+SharedImageBackingGLTexture::ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) {
+ if (!cached_promise_texture_) {
+ GrBackendTexture backend_texture;
+ GetGrBackendTexture(context_state->feature_info(), GetGLTarget(), size(),
+ GetGLServiceId(), format(), &backend_texture);
+ cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
}
+ return std::make_unique<SharedImageRepresentationSkiaImpl>(
+ manager, this, nullptr, std::move(context_state), cached_promise_texture_,
+ tracker);
+}
- protected:
- std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- return std::make_unique<SharedImageRepresentationGLTextureImpl>(
- manager, this, tracker, texture_);
+void SharedImageBackingGLTexture::Update(
+ std::unique_ptr<gfx::GpuFence> in_fence) {}
+
+void SharedImageBackingGLTexture::InitializeGLTexture(
+ GLuint service_id,
+ const SharedImageBackingGLCommon::InitializeGLTextureParams& params) {
+ SharedImageBackingGLCommon::MakeTextureAndSetParameters(
+ params.target, service_id, params.framebuffer_attachment_angle,
+ IsPassthrough() ? &passthrough_texture_ : nullptr,
+ IsPassthrough() ? nullptr : &texture_);
+
+ if (IsPassthrough()) {
+ passthrough_texture_->SetEstimatedSize(EstimatedSize(format(), size()));
+ } else {
+ texture_->SetLevelInfo(params.target, 0, params.internal_format,
+ size().width(), size().height(), 1, 0, params.format,
+ params.type,
+ params.is_cleared ? gfx::Rect(size()) : gfx::Rect());
+ texture_->SetImmutable(true, params.has_immutable_storage);
}
+}
- std::unique_ptr<SharedImageRepresentationGLTexture>
- ProduceRGBEmulationGLTexture(SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- if (!rgb_emulation_texture_) {
- GLenum target = texture_->target();
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
-
- // Set to false as this code path is only used on Mac.
- bool framebuffer_attachment_angle = false;
- GLuint service_id = MakeTextureAndSetParameters(
- api, target, framebuffer_attachment_angle);
-
- gles2::Texture::ImageState image_state = gles2::Texture::BOUND;
- gl::GLImage* image = texture_->GetLevelImage(target, 0, &image_state);
- if (!image) {
- LOG(ERROR) << "Texture is not bound to an image.";
- return nullptr;
- }
+void SharedImageBackingGLTexture::SetCompatibilitySwizzle(
+ const gles2::Texture::CompatibilitySwizzle* swizzle) {
+ if (!IsPassthrough())
+ texture_->SetCompatibilitySwizzle(swizzle);
+}
- DCHECK(image->ShouldBindOrCopy() == gl::GLImage::BIND);
- const GLenum internal_format = GL_RGB;
- if (!image->BindTexImageWithInternalformat(target, internal_format)) {
- LOG(ERROR) << "Failed to bind image to rgb texture.";
- api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
- }
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingGLImage
- rgb_emulation_texture_ = new gles2::Texture(service_id);
- rgb_emulation_texture_->SetLightweightRef();
- rgb_emulation_texture_->SetTarget(target, 1);
- rgb_emulation_texture_->sampler_state_.min_filter = GL_LINEAR;
- rgb_emulation_texture_->sampler_state_.mag_filter = GL_LINEAR;
- rgb_emulation_texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- rgb_emulation_texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
-
- GLenum format = gles2::TextureManager::ExtractFormatFromStorageFormat(
- internal_format);
- GLenum type =
- gles2::TextureManager::ExtractTypeFromStorageFormat(internal_format);
-
- const gles2::Texture::LevelInfo* info = texture_->GetLevelInfo(target, 0);
- rgb_emulation_texture_->SetLevelInfo(target, 0, internal_format,
- info->width, info->height, 1, 0,
- format, type, info->cleared_rect);
-
- rgb_emulation_texture_->SetLevelImage(target, 0, image, image_state);
- rgb_emulation_texture_->SetImmutable(true, false);
- }
+SharedImageBackingGLImage::SharedImageBackingGLImage(
+ scoped_refptr<gl::GLImage> image,
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SharedImageBackingGLCommon::InitializeGLTextureParams& params,
+ const UnpackStateAttribs& attribs,
+ bool is_passthrough)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ EstimatedSize(format, size),
+ false /* is_thread_safe */),
+ image_(image),
+ gl_params_(params),
+ gl_unpack_attribs_(attribs),
+ is_passthrough_(is_passthrough),
+ weak_factory_(this) {
+ DCHECK(image_);
+}
- return std::make_unique<SharedImageRepresentationGLTextureImpl>(
- manager, this, tracker, rgb_emulation_texture_);
+SharedImageBackingGLImage::~SharedImageBackingGLImage() {
+ if (rgb_emulation_texture_) {
+ rgb_emulation_texture_->RemoveLightweightRef(have_context());
+ rgb_emulation_texture_ = nullptr;
}
-
- std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- scoped_refptr<SharedContextState> context_state) override {
- auto result = std::make_unique<SharedImageRepresentationSkiaImpl>(
- manager, this, std::move(context_state), cached_promise_texture_,
- tracker, texture_->target(), texture_->service_id());
- cached_promise_texture_ = result->promise_texture();
- return result;
+ if (IsPassthrough()) {
+ if (passthrough_texture_) {
+ if (!have_context())
+ passthrough_texture_->MarkContextLost();
+ passthrough_texture_.reset();
+ }
+ } else {
+ if (texture_) {
+ texture_->RemoveLightweightRef(have_context());
+ texture_ = nullptr;
+ }
}
+}
- std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- WGPUDevice device) override {
- if (!factory()) {
- DLOG(ERROR) << "No SharedImageFactory to create a dawn representation.";
- return nullptr;
- }
+GLenum SharedImageBackingGLImage::GetGLTarget() const {
+ return gl_params_.target;
+}
- return ProduceDawnCommon(factory(), manager, tracker, device, this, false);
- }
+GLuint SharedImageBackingGLImage::GetGLServiceId() const {
+ return texture_ ? texture_->service_id() : passthrough_texture_->service_id();
+}
- private:
- gles2::Texture* texture_ = nullptr;
- gles2::Texture* rgb_emulation_texture_ = nullptr;
- sk_sp<SkPromiseImageTexture> cached_promise_texture_;
- const UnpackStateAttribs attribs_;
- scoped_refptr<gfx::NativePixmap> native_pixmap_;
-};
+scoped_refptr<gfx::NativePixmap> SharedImageBackingGLImage::GetNativePixmap() {
+ if (IsPassthrough())
+ return nullptr;
-// Implementation of SharedImageBacking that creates a GL Texture and stores it
-// as a gles2::TexturePassthrough. Can be used with the legacy mailbox
-// implementation.
-class SharedImageBackingPassthroughGLTexture
- : public SharedImageBackingWithReadAccess {
- public:
- SharedImageBackingPassthroughGLTexture(
- const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- scoped_refptr<gles2::TexturePassthrough> passthrough_texture)
- : SharedImageBackingWithReadAccess(mailbox,
- format,
- size,
- color_space,
- usage,
- passthrough_texture->estimated_size(),
- false /* is_thread_safe */),
- texture_passthrough_(std::move(passthrough_texture)) {
- DCHECK(texture_passthrough_);
- }
+ return image_->GetNativePixmap();
+}
- ~SharedImageBackingPassthroughGLTexture() override {
- DCHECK(texture_passthrough_);
- if (!have_context())
- texture_passthrough_->MarkContextLost();
- texture_passthrough_.reset();
+void SharedImageBackingGLImage::OnMemoryDump(
+ const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) {
+ // Add a |service_guid| which expresses shared ownership between the
+ // various GPU dumps.
+ auto client_guid = GetSharedImageGUIDForTracing(mailbox());
+ auto service_guid = gl::GetGLTextureServiceGUIDForTracing(GetGLServiceId());
+ pmd->CreateSharedGlobalAllocatorDump(service_guid);
+ // TODO(piman): coalesce constant with TextureManager::DumpTextureRef.
+ int importance = 2; // This client always owns the ref.
+
+ pmd->AddOwnershipEdge(client_guid, service_guid, importance);
+
+ if (IsPassthrough()) {
+ auto* gl_image = passthrough_texture_->GetLevelImage(GetGLTarget(), 0);
+ if (gl_image)
+ gl_image->OnMemoryDump(pmd, client_tracing_id, dump_name);
+ } else {
+ // Dump all sub-levels held by the texture. They will appear below the
+ // main gl/textures/client_X/mailbox_Y dump.
+ texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name);
}
+}
- gfx::Rect ClearedRect() const override {
+gfx::Rect SharedImageBackingGLImage::ClearedRect() const {
+ if (IsPassthrough()) {
// This backing is used exclusively with ANGLE which handles clear tracking
// internally. Act as though the texture is always cleared.
return gfx::Rect(size());
+ } else {
+ return texture_->GetLevelClearedRect(texture_->target(), 0);
}
+}
+void SharedImageBackingGLImage::SetClearedRect(const gfx::Rect& cleared_rect) {
+ if (!IsPassthrough())
+ texture_->SetLevelClearedRect(texture_->target(), 0, cleared_rect);
+}
+bool SharedImageBackingGLImage::ProduceLegacyMailbox(
+ MailboxManager* mailbox_manager) {
+ if (IsPassthrough())
+ mailbox_manager->ProduceTexture(mailbox(), passthrough_texture_.get());
+ else
+ mailbox_manager->ProduceTexture(mailbox(), texture_);
+ return true;
+}
- void SetClearedRect(const gfx::Rect& cleared_rect) override {}
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageBackingGLImage::ProduceGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ DCHECK(texture_);
+ return std::make_unique<SharedImageRepresentationGLTextureImpl>(
+ manager, this, this, tracker, texture_);
+}
+std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+SharedImageBackingGLImage::ProduceGLTexturePassthrough(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ DCHECK(passthrough_texture_);
+ return std::make_unique<SharedImageRepresentationGLTexturePassthroughImpl>(
+ manager, this, this, tracker, passthrough_texture_);
+}
- void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
- GLenum target = texture_passthrough_->target();
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
- api->glBindTextureFn(target, texture_passthrough_->service_id());
-
- gl::GLImage* image = texture_passthrough_->GetLevelImage(target, 0);
- if (!image)
- return;
- image->ReleaseTexImage(target);
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND)
- image->BindTexImage(target);
- else
- image->CopyTexImage(target);
+std::unique_ptr<SharedImageRepresentationOverlay>
+SharedImageBackingGLImage::ProduceOverlay(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+#if defined(OS_MACOSX)
+ return SharedImageBackingFactoryIOSurface::ProduceOverlay(manager, this,
+ tracker, image_);
+#else // defined(OS_MACOSX)
+ return SharedImageBacking::ProduceOverlay(manager, tracker);
+#endif // !defined(OS_MACOSX)
+}
+
+std::unique_ptr<SharedImageRepresentationDawn>
+SharedImageBackingGLImage::ProduceDawn(SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) {
+#if defined(OS_MACOSX)
+ auto result = SharedImageBackingFactoryIOSurface::ProduceDawn(
+ manager, this, tracker, device, image_);
+ if (result)
+ return result;
+#endif // defined(OS_MACOSX)
+ if (!factory()) {
+ DLOG(ERROR) << "No SharedImageFactory to create a dawn representation.";
+ return nullptr;
}
- bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
- DCHECK(texture_passthrough_);
- mailbox_manager->ProduceTexture(mailbox(), texture_passthrough_.get());
- return true;
+ return ProduceDawnCommon(factory(), manager, tracker, device, this,
+ IsPassthrough());
+}
+
+std::unique_ptr<SharedImageRepresentationSkia>
+SharedImageBackingGLImage::ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) {
+ if (!cached_promise_texture_) {
+ if (context_state->GrContextIsMetal()) {
+#if defined(OS_MACOSX)
+ cached_promise_texture_ =
+ SharedImageBackingFactoryIOSurface::ProduceSkiaPromiseTextureMetal(
+ this, context_state, image_);
+ DCHECK(cached_promise_texture_);
+#endif
+ } else {
+ GrBackendTexture backend_texture;
+ GetGrBackendTexture(context_state->feature_info(), GetGLTarget(), size(),
+ GetGLServiceId(), format(), &backend_texture);
+ cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
+ }
}
+ return std::make_unique<SharedImageRepresentationSkiaImpl>(
+ manager, this, this, std::move(context_state), cached_promise_texture_,
+ tracker);
+}
- void OnMemoryDump(const std::string& dump_name,
- base::trace_event::MemoryAllocatorDump* dump,
- base::trace_event::ProcessMemoryDump* pmd,
- uint64_t client_tracing_id) override {
- // Add a |service_guid| which expresses shared ownership between the
- // various GPU dumps.
- auto client_guid = GetSharedImageGUIDForTracing(mailbox());
- auto service_guid = gl::GetGLTextureServiceGUIDForTracing(
- texture_passthrough_->service_id());
- pmd->CreateSharedGlobalAllocatorDump(service_guid);
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageBackingGLImage::ProduceRGBEmulationGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ if (IsPassthrough())
+ return nullptr;
- int importance = 2; // This client always owns the ref.
- pmd->AddOwnershipEdge(client_guid, service_guid, importance);
+ if (!rgb_emulation_texture_) {
+ const GLenum target = GetGLTarget();
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
- auto* gl_image = texture_passthrough_->GetLevelImage(
- texture_passthrough_->target(), /*level=*/0);
- if (gl_image)
- gl_image->OnMemoryDump(pmd, client_tracing_id, dump_name);
+ // Set to false as this code path is only used on Mac.
+ const bool framebuffer_attachment_angle = false;
+ SharedImageBackingGLCommon::MakeTextureAndSetParameters(
+ target, 0 /* service_id */, framebuffer_attachment_angle, nullptr,
+ &rgb_emulation_texture_);
+ api->glBindTextureFn(target, rgb_emulation_texture_->service_id());
+
+ gles2::Texture::ImageState image_state = gles2::Texture::BOUND;
+ gl::GLImage* image = texture_->GetLevelImage(target, 0, &image_state);
+ DCHECK_EQ(image, image_.get());
+
+ DCHECK(image->ShouldBindOrCopy() == gl::GLImage::BIND);
+ const GLenum internal_format = GL_RGB;
+ if (!image->BindTexImageWithInternalformat(target, internal_format)) {
+ LOG(ERROR) << "Failed to bind image to rgb texture.";
+ rgb_emulation_texture_->RemoveLightweightRef(true /* have_context */);
+ rgb_emulation_texture_ = nullptr;
+ return nullptr;
+ }
+ GLenum format =
+ gles2::TextureManager::ExtractFormatFromStorageFormat(internal_format);
+ GLenum type =
+ gles2::TextureManager::ExtractTypeFromStorageFormat(internal_format);
+
+ const gles2::Texture::LevelInfo* info = texture_->GetLevelInfo(target, 0);
+ rgb_emulation_texture_->SetLevelInfo(target, 0, internal_format,
+ info->width, info->height, 1, 0,
+ format, type, info->cleared_rect);
+
+ rgb_emulation_texture_->SetLevelImage(target, 0, image, image_state);
+ rgb_emulation_texture_->SetImmutable(true, false);
}
- void BeginReadAccess() override {}
+ return std::make_unique<SharedImageRepresentationGLTextureImpl>(
+ manager, this, this, tracker, rgb_emulation_texture_);
+}
- protected:
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- ProduceGLTexturePassthrough(SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- return std::make_unique<SharedImageRepresentationGLTexturePassthroughImpl>(
- manager, this, tracker, texture_passthrough_);
+void SharedImageBackingGLImage::Update(
+ std::unique_ptr<gfx::GpuFence> in_fence) {
+ if (in_fence) {
+ // TODO(dcastagna): Don't wait for the fence if the SharedImage is going
+ // to be scanned out as an HW overlay. Currently we don't know that at
+ // this point and we always bind the image, therefore we need to wait for
+ // the fence.
+ std::unique_ptr<gl::GLFence> egl_fence =
+ gl::GLFence::CreateFromGpuFence(*in_fence.get());
+ egl_fence->ServerWait();
}
- std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- scoped_refptr<SharedContextState> context_state) override {
- auto result = std::make_unique<SharedImageRepresentationSkiaImpl>(
- manager, this, std::move(context_state), cached_promise_texture_,
- tracker, texture_passthrough_->target(),
- texture_passthrough_->service_id());
- cached_promise_texture_ = result->promise_texture();
- return result;
+ image_bind_or_copy_needed_ = true;
+}
+
+bool SharedImageBackingGLImage::OnGLTextureBeginAccess(GLenum mode) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM)
+ return true;
+ return BindOrCopyImageIfNeeded();
+}
+
+bool SharedImageBackingGLImage::OnGLTexturePassthroughBeginAccess(GLenum mode) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM)
+ return true;
+ return BindOrCopyImageIfNeeded();
+}
+
+bool SharedImageBackingGLImage::OnSkiaBeginReadAccess() {
+ return BindOrCopyImageIfNeeded();
+}
+
+bool SharedImageBackingGLImage::OnSkiaBeginWriteAccess() {
+ return BindOrCopyImageIfNeeded();
+}
+
+bool SharedImageBackingGLImage::InitializeGLTexture() {
+ SharedImageBackingGLCommon::MakeTextureAndSetParameters(
+ gl_params_.target, 0 /* service_id */,
+ gl_params_.framebuffer_attachment_angle,
+ IsPassthrough() ? &passthrough_texture_ : nullptr,
+ IsPassthrough() ? nullptr : &texture_);
+
+ // Set the GLImage to be unbound from the texture.
+ if (IsPassthrough()) {
+ passthrough_texture_->SetEstimatedSize(EstimatedSize(format(), size()));
+ passthrough_texture_->SetLevelImage(gl_params_.target, 0, image_.get());
+ passthrough_texture_->set_is_bind_pending(true);
+ } else {
+ texture_->SetLevelInfo(
+ gl_params_.target, 0, gl_params_.internal_format, size().width(),
+ size().height(), 1, 0, gl_params_.format, gl_params_.type,
+ gl_params_.is_cleared ? gfx::Rect(size()) : gfx::Rect());
+ texture_->SetLevelImage(gl_params_.target, 0, image_.get(),
+ gles2::Texture::UNBOUND);
+ texture_->SetImmutable(true, false /* has_immutable_storage */);
}
- std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- WGPUDevice device) override {
- if (!factory()) {
- DLOG(ERROR) << "No SharedImageFactory to create a dawn representation.";
- return nullptr;
+ // Historically we have bound GLImages at initialization, rather than waiting
+ // until the bound representation is actually needed.
+ if (image_->ShouldBindOrCopy() == gl::GLImage::BIND)
+ return BindOrCopyImageIfNeeded();
+ return true;
+}
+
+bool SharedImageBackingGLImage::BindOrCopyImageIfNeeded() {
+ if (!image_bind_or_copy_needed_)
+ return true;
+
+ const GLenum target = GetGLTarget();
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+ api->glBindTextureFn(target, GetGLServiceId());
+
+ // Un-bind the GLImage from the texture if it is currently bound.
+ if (image_->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ bool is_bound = false;
+ if (IsPassthrough()) {
+ is_bound = !passthrough_texture_->is_bind_pending();
+ } else {
+ gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND;
+ texture_->GetLevelImage(target, 0, &old_state);
+ is_bound = old_state == gles2::Texture::BOUND;
}
+ if (is_bound)
+ image_->ReleaseTexImage(target);
+ }
- return ProduceDawnCommon(factory(), manager, tracker, device, this, true);
+ // Bind or copy the GLImage to the texture.
+ gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND;
+ if (image_->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ if (gl_params_.is_rgb_emulation) {
+ if (!image_->BindTexImageWithInternalformat(target, GL_RGB)) {
+ LOG(ERROR) << "Failed to bind GLImage to RGB target";
+ return false;
+ }
+ } else {
+ if (!image_->BindTexImage(target)) {
+ LOG(ERROR) << "Failed to bind GLImage to target";
+ return false;
+ }
+ }
+ new_state = gles2::Texture::BOUND;
+ } else {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api,
+ gl_unpack_attribs_,
+ /*upload=*/true);
+ if (!image_->CopyTexImage(target)) {
+ LOG(ERROR) << "Failed to copy GLImage to target";
+ return false;
+ }
+ new_state = gles2::Texture::COPIED;
+ }
+ if (IsPassthrough()) {
+ passthrough_texture_->set_is_bind_pending(new_state ==
+ gles2::Texture::UNBOUND);
+ } else {
+ texture_->SetLevelImage(target, 0, image_.get(), new_state);
}
- private:
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
- sk_sp<SkPromiseImageTexture> cached_promise_texture_;
-};
+ image_bind_or_copy_needed_ = false;
+ return true;
+}
+
+void SharedImageBackingGLImage::InitializePixels(GLenum format,
+ GLenum type,
+ const uint8_t* data) {
+ DCHECK_EQ(image_->ShouldBindOrCopy(), gl::GLImage::BIND);
+ BindOrCopyImageIfNeeded();
+
+ const GLenum target = GetGLTarget();
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+ api->glBindTextureFn(target, GetGLServiceId());
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(
+ api, gl_unpack_attribs_, true /* uploading_data */);
+ api->glTexSubImage2DFn(target, 0, 0, 0, size().width(), size().height(),
+ format, type, data);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingFactoryGLTexture
SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
const GpuPreferences& gpu_preferences,
@@ -1019,44 +1207,27 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
image->SetColorSpace(color_space);
viz::ResourceFormat format = viz::GetResourceFormat(buffer_format);
-
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
-
const bool for_framebuffer_attachment =
(usage & (SHARED_IMAGE_USAGE_RASTER |
SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
- GLuint service_id = MakeTextureAndSetParameters(
- api, target, for_framebuffer_attachment && texture_usage_angle_);
- bool is_rgb_emulation = usage & SHARED_IMAGE_USAGE_RGB_EMULATION;
-
- gles2::Texture::ImageState image_state = gles2::Texture::UNBOUND;
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
- bool is_bound = false;
- if (is_rgb_emulation)
- is_bound = image->BindTexImageWithInternalformat(target, GL_RGB);
- else
- is_bound = image->BindTexImage(target);
- if (is_bound) {
- image_state = gles2::Texture::BOUND;
- } else {
- LOG(ERROR) << "Failed to bind image to target.";
- api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
- }
- } else if (use_passthrough_) {
- image->CopyTexImage(target);
- image_state = gles2::Texture::COPIED;
- }
+ const bool is_rgb_emulation = (usage & SHARED_IMAGE_USAGE_RGB_EMULATION) != 0;
- GLuint internal_format =
+ SharedImageBackingGLCommon::InitializeGLTextureParams params;
+ params.target = target;
+ params.internal_format =
is_rgb_emulation ? GL_RGB : image->GetInternalFormat();
- GLenum gl_format = is_rgb_emulation ? GL_RGB : image->GetDataFormat();
- GLenum gl_type = image->GetDataType();
-
- return MakeBacking(use_passthrough_, mailbox, target, service_id, image,
- image_state, internal_format, gl_format, gl_type, nullptr,
- true, false, format, size, color_space, usage, attribs);
+ params.format = is_rgb_emulation ? GL_RGB : image->GetDataFormat();
+ params.type = image->GetDataType();
+ params.is_cleared = true;
+ params.is_rgb_emulation = is_rgb_emulation;
+ params.framebuffer_attachment_angle =
+ for_framebuffer_attachment && texture_usage_angle_;
+ auto result = std::make_unique<SharedImageBackingGLImage>(
+ image, mailbox, format, size, color_space, usage, params, attribs,
+ use_passthrough_);
+ if (!result->InitializeGLTexture())
+ return nullptr;
+ return std::move(result);
}
std::unique_ptr<SharedImageBacking>
@@ -1068,11 +1239,17 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageForTest(
viz::ResourceFormat format,
const gfx::Size& size,
uint32_t usage) {
- return MakeBacking(false, mailbox, target, service_id, nullptr,
- gles2::Texture::UNBOUND, viz::GLInternalFormat(format),
- viz::GLDataFormat(format), viz::GLDataType(format),
- nullptr, is_cleared, false, format, size,
- gfx::ColorSpace(), usage, UnpackStateAttribs());
+ auto result = std::make_unique<SharedImageBackingGLTexture>(
+ mailbox, format, size, gfx::ColorSpace(), usage,
+ false /* is_passthrough */);
+ SharedImageBackingGLCommon::InitializeGLTextureParams params;
+ params.target = target;
+ params.internal_format = viz::GLInternalFormat(format);
+ params.format = viz::GLDataFormat(format);
+ params.type = viz::GLDataType(format);
+ params.is_cleared = is_cleared;
+ result->InitializeGLTexture(service_id, params);
+ return std::move(result);
}
scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLTexture::MakeGLImage(
@@ -1109,66 +1286,6 @@ bool SharedImageBackingFactoryGLTexture::CanImportGpuMemoryBuffer(
}
std::unique_ptr<SharedImageBacking>
-SharedImageBackingFactoryGLTexture::MakeBacking(
- bool passthrough,
- const Mailbox& mailbox,
- GLenum target,
- GLuint service_id,
- scoped_refptr<gl::GLImage> image,
- gles2::Texture::ImageState image_state,
- GLuint level_info_internal_format,
- GLuint gl_format,
- GLuint gl_type,
- const gles2::Texture::CompatibilitySwizzle* swizzle,
- bool is_cleared,
- bool has_immutable_storage,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const UnpackStateAttribs& attribs) {
- if (passthrough) {
- scoped_refptr<gles2::TexturePassthrough> passthrough_texture =
- base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
- if (image) {
- passthrough_texture->SetLevelImage(target, 0, image.get());
- passthrough_texture->set_is_bind_pending(image_state ==
- gles2::Texture::UNBOUND);
- }
-
- // Get the texture size from ANGLE and set it on the passthrough texture.
- GLint texture_memory_size = 0;
- gl::GLApi* api = gl::g_current_gl_context;
- api->glGetTexParameterivFn(target, GL_MEMORY_SIZE_ANGLE,
- &texture_memory_size);
- passthrough_texture->SetEstimatedSize(texture_memory_size);
-
- return std::make_unique<SharedImageBackingPassthroughGLTexture>(
- mailbox, format, size, color_space, usage,
- std::move(passthrough_texture));
- } else {
- gles2::Texture* texture = new gles2::Texture(service_id);
- texture->SetLightweightRef();
- texture->SetTarget(target, 1);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->SetLevelInfo(target, 0, level_info_internal_format, size.width(),
- size.height(), 1, 0, gl_format, gl_type,
- is_cleared ? gfx::Rect(size) : gfx::Rect());
- if (swizzle)
- texture->SetCompatibilitySwizzle(swizzle);
- if (image)
- texture->SetLevelImage(target, 0, image.get(), image_state);
- texture->SetImmutable(true, has_immutable_storage);
-
- return std::make_unique<SharedImageBackingGLTexture>(
- mailbox, format, size, color_space, usage, texture, attribs);
- }
-}
-
-std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryGLTexture::MakeEglImageBacking(
const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -1291,23 +1408,17 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageInternal(
}
}
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
-
const bool for_framebuffer_attachment =
(usage & (SHARED_IMAGE_USAGE_RASTER |
SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
- GLuint service_id = MakeTextureAndSetParameters(
- api, target, for_framebuffer_attachment && texture_usage_angle_);
scoped_refptr<gl::GLImage> image;
+
// TODO(piman): We pretend the texture was created in an ES2 context, so that
// it can be used in other ES2 contexts, and so we have to pass gl_format as
// the internal format in the LevelInfo. https://crbug.com/628064
GLuint level_info_internal_format = format_info.gl_format;
bool is_cleared = false;
- bool needs_subimage_upload = false;
- bool has_immutable_storage = false;
if (use_buffer) {
image = image_factory_->CreateAnonymousImage(
size, format_info.buffer_format, gfx::BufferUsage::SCANOUT,
@@ -1322,55 +1433,79 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageInternal(
surface_handle, &is_cleared);
}
// The allocated image should not require copy.
- if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND ||
- !image->BindTexImage(target)) {
- LOG(ERROR) << "CreateSharedImage: Failed to "
- << (image ? "bind" : "create") << " image";
- api->glDeleteTexturesFn(1, &service_id);
+ if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND) {
+ LOG(ERROR) << "CreateSharedImage: Failed to create bindable image";
return nullptr;
}
level_info_internal_format = image->GetInternalFormat();
if (color_space.IsValid())
image->SetColorSpace(color_space);
- needs_subimage_upload = !pixel_data.empty();
- } else if (format_info.supports_storage) {
- api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
- size.width(), size.height());
- has_immutable_storage = true;
- needs_subimage_upload = !pixel_data.empty();
- } else if (format_info.is_compressed) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
- api->glCompressedTexImage2DFn(target, 0, format_info.image_internal_format,
- size.width(), size.height(), 0,
- pixel_data.size(), pixel_data.data());
- } else {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
- api->glTexImage2DFn(target, 0, format_info.image_internal_format,
- size.width(), size.height(), 0,
- format_info.adjusted_format, format_info.gl_type,
- pixel_data.data());
}
- // If we are using a buffer or TexStorage API but have data to upload, do so
- // now via TexSubImage2D.
- if (needs_subimage_upload) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
- api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
- format_info.adjusted_format, format_info.gl_type,
- pixel_data.data());
- }
+ SharedImageBackingGLCommon::InitializeGLTextureParams params;
+ params.target = target;
+ params.internal_format = level_info_internal_format;
+ params.format = format_info.gl_format;
+ params.type = format_info.gl_type;
+ params.is_cleared = pixel_data.empty() ? is_cleared : true;
+ params.has_immutable_storage = !image && format_info.supports_storage;
+ params.framebuffer_attachment_angle =
+ for_framebuffer_attachment && texture_usage_angle_;
+
+ if (image) {
+ DCHECK(!format_info.swizzle);
+ auto result = std::make_unique<SharedImageBackingGLImage>(
+ image, mailbox, format, size, color_space, usage, params, attribs,
+ use_passthrough_);
+ if (!result->InitializeGLTexture())
+ return nullptr;
+ if (!pixel_data.empty()) {
+ result->InitializePixels(format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+ return std::move(result);
+ } else {
+ auto result = std::make_unique<SharedImageBackingGLTexture>(
+ mailbox, format, size, color_space, usage, use_passthrough_);
+ result->InitializeGLTexture(0, params);
- return MakeBacking(
- use_passthrough_, mailbox, target, service_id, image,
- gles2::Texture::BOUND, level_info_internal_format, format_info.gl_format,
- format_info.gl_type, format_info.swizzle,
- pixel_data.empty() ? is_cleared : true, has_immutable_storage, format,
- size, color_space, usage, attribs);
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+ api->glBindTextureFn(target, result->GetGLServiceId());
+
+ if (format_info.supports_storage) {
+ api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
+ size.width(), size.height());
+
+ if (!pixel_data.empty()) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(
+ api, attribs, true /* uploading_data */);
+ api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
+ format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+ } else if (format_info.is_compressed) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
+ api->glCompressedTexImage2DFn(
+ target, 0, format_info.image_internal_format, size.width(),
+ size.height(), 0, pixel_data.size(), pixel_data.data());
+ } else {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
+ api->glTexImage2DFn(target, 0, format_info.image_internal_format,
+ size.width(), size.height(), 0,
+ format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+ result->SetCompatibilitySwizzle(format_info.swizzle);
+ return std::move(result);
+ }
}
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingFactoryGLTexture::FormatInfo
+
SharedImageBackingFactoryGLTexture::FormatInfo::FormatInfo() = default;
SharedImageBackingFactoryGLTexture::FormatInfo::~FormatInfo() = default;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
index 257cca42041..b73c65631d9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
@@ -94,24 +94,6 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
gfx::BufferFormat format,
SurfaceHandle surface_handle,
const gfx::Size& size);
- static std::unique_ptr<SharedImageBacking> MakeBacking(
- bool passthrough,
- const Mailbox& mailbox,
- GLenum target,
- GLuint service_id,
- scoped_refptr<gl::GLImage> image,
- gles2::Texture::ImageState image_state,
- GLuint internal_format,
- GLuint gl_format,
- GLuint gl_type,
- const gles2::Texture::CompatibilitySwizzle* swizzle,
- bool is_cleared,
- bool has_immutable_storage,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const UnpackStateAttribs& attribs);
// This is meant to be used only on Android. Return nullptr for other
// platforms.
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h
new file mode 100644
index 00000000000..dafdfd4a359
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h
@@ -0,0 +1,296 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_TEXTURE_INTERNAL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_TEXTURE_INTERNAL_H_
+
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+
+namespace gpu {
+
+// Representation of a SharedImageBackingGLTexture or SharedImageBackingGLImage
+// as a GL Texture.
+class SharedImageRepresentationGLTextureImpl
+ : public SharedImageRepresentationGLTexture {
+ public:
+ class Client {
+ public:
+ virtual bool OnGLTextureBeginAccess(GLenum mode) = 0;
+ };
+ SharedImageRepresentationGLTextureImpl(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture);
+
+ private:
+ // SharedImageRepresentationGLTexture:
+ gles2::Texture* GetTexture() override;
+ bool BeginAccess(GLenum mode) override;
+
+ Client* const client_ = nullptr;
+ gles2::Texture* texture_;
+};
+
+// Representation of a SharedImageBackingGLTexture or
+// SharedImageBackingGLTexturePassthrough as a GL TexturePassthrough.
+class SharedImageRepresentationGLTexturePassthroughImpl
+ : public SharedImageRepresentationGLTexturePassthrough {
+ public:
+ class Client {
+ public:
+ virtual bool OnGLTexturePassthroughBeginAccess(GLenum mode) = 0;
+ };
+ SharedImageRepresentationGLTexturePassthroughImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough);
+ ~SharedImageRepresentationGLTexturePassthroughImpl() override;
+
+ private:
+ // SharedImageRepresentationGLTexturePassthrough:
+ const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
+ override;
+ bool BeginAccess(GLenum mode) override;
+
+ Client* const client_ = nullptr;
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
+};
+
+// Common helper functions for SharedImageBackingGLTexture and
+// SharedImageBackingPassthroughGLImage.
+class SharedImageBackingGLCommon : public SharedImageBacking {
+ public:
+ // These parameters are used to explicitly initialize a GL texture.
+ // TODO(https://crbug.com/1092155): The goal here is to cache these parameters
+ // (which are specified at initialization), so that the GL texture can be
+ // allocated and bound lazily. In that world, |service_id| will not be a
+ // parameter, but will be allocated lazily, and |image| will be handled by the
+ // relevant sub-class.
+ struct InitializeGLTextureParams {
+ GLenum target = 0;
+ GLenum internal_format = 0;
+ GLenum format = 0;
+ GLenum type = 0;
+ bool is_cleared = false;
+ bool is_rgb_emulation = false;
+ bool framebuffer_attachment_angle = false;
+ bool has_immutable_storage = false;
+ };
+
+ // Helper function to create a GL texture.
+ static void MakeTextureAndSetParameters(
+ GLenum target,
+ GLuint service_id,
+ bool framebuffer_attachment_angle,
+ scoped_refptr<gles2::TexturePassthrough>* passthrough_texture,
+ gles2::Texture** texture);
+};
+
+// Skia representation for both SharedImageBackingGLCommon.
+class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
+ public:
+ class Client {
+ public:
+ virtual bool OnSkiaBeginReadAccess() = 0;
+ virtual bool OnSkiaBeginWriteAccess() = 0;
+ };
+ SharedImageRepresentationSkiaImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ scoped_refptr<SharedContextState> context_state,
+ sk_sp<SkPromiseImageTexture> promise_texture,
+ MemoryTypeTracker* tracker);
+ ~SharedImageRepresentationSkiaImpl() override;
+
+ void SetBeginReadAccessCallback(
+ base::RepeatingClosure begin_read_access_callback);
+
+ private:
+ // SharedImageRepresentationSkia:
+ sk_sp<SkSurface> BeginWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override;
+ void EndWriteAccess(sk_sp<SkSurface> surface) override;
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override;
+ void EndReadAccess() override;
+ bool SupportsMultipleConcurrentReadAccess() override;
+
+ void CheckContext();
+
+ Client* const client_ = nullptr;
+ scoped_refptr<SharedContextState> context_state_;
+ sk_sp<SkPromiseImageTexture> promise_texture_;
+
+ SkSurface* write_surface_ = nullptr;
+#if DCHECK_IS_ON()
+ gl::GLContext* context_ = nullptr;
+#endif
+};
+
+// Implementation of SharedImageBacking that creates a GL Texture that is not
+// backed by a GLImage.
+class SharedImageBackingGLTexture : public SharedImageBacking {
+ public:
+ SharedImageBackingGLTexture(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ bool is_passthrough);
+ SharedImageBackingGLTexture(const SharedImageBackingGLTexture&) = delete;
+ SharedImageBackingGLTexture& operator=(const SharedImageBackingGLTexture&) =
+ delete;
+ ~SharedImageBackingGLTexture() override;
+
+ void InitializeGLTexture(
+ GLuint service_id,
+ const SharedImageBackingGLCommon::InitializeGLTextureParams& params);
+ void SetCompatibilitySwizzle(
+ const gles2::Texture::CompatibilitySwizzle* swizzle);
+
+ GLenum GetGLTarget() const;
+ GLuint GetGLServiceId() const;
+
+ private:
+ // SharedImageBacking:
+ void OnMemoryDump(const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) override;
+ gfx::Rect ClearedRect() const final;
+ void SetClearedRect(const gfx::Rect& cleared_rect) final;
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final;
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) final;
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) override;
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
+
+ bool IsPassthrough() const { return is_passthrough_; }
+
+ const bool is_passthrough_;
+ gles2::Texture* texture_ = nullptr;
+ scoped_refptr<gles2::TexturePassthrough> passthrough_texture_;
+
+ sk_sp<SkPromiseImageTexture> cached_promise_texture_;
+};
+
+// Implementation of SharedImageBacking that creates a GL Texture that is backed
+// by a GLImage and stores it as a gles2::Texture. Can be used with the legacy
+// mailbox implementation.
+class SharedImageBackingGLImage
+ : public SharedImageBacking,
+ public SharedImageRepresentationGLTextureImpl::Client,
+ public SharedImageRepresentationGLTexturePassthroughImpl::Client,
+ public SharedImageRepresentationSkiaImpl::Client {
+ public:
+ SharedImageBackingGLImage(
+ scoped_refptr<gl::GLImage> image,
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SharedImageBackingGLCommon::InitializeGLTextureParams& params,
+ const SharedImageBackingFactoryGLTexture::UnpackStateAttribs& attribs,
+ bool is_passthrough);
+ SharedImageBackingGLImage(const SharedImageBackingGLImage& other) = delete;
+ SharedImageBackingGLImage& operator=(const SharedImageBackingGLImage& other) =
+ delete;
+ ~SharedImageBackingGLImage() override;
+
+ bool InitializeGLTexture();
+ void InitializePixels(GLenum format, GLenum type, const uint8_t* data);
+
+ GLenum GetGLTarget() const;
+ GLuint GetGLServiceId() const;
+
+ private:
+ // SharedImageBacking:
+ scoped_refptr<gfx::NativePixmap> GetNativePixmap() override;
+ void OnMemoryDump(const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) override;
+ gfx::Rect ClearedRect() const final;
+ void SetClearedRect(const gfx::Rect& cleared_rect) final;
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final;
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) final;
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) override;
+ std::unique_ptr<SharedImageRepresentationGLTexture>
+ ProduceRGBEmulationGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
+
+ // SharedImageRepresentationGLTextureImpl::Client:
+ bool OnGLTextureBeginAccess(GLenum mode) override;
+
+ // SharedImageRepresentationGLTexturePassthroughImpl::Client:
+ bool OnGLTexturePassthroughBeginAccess(GLenum mode) override;
+
+ // SharedImageRepresentationGLTextureImpl::Client:
+ bool OnSkiaBeginReadAccess() override;
+ bool OnSkiaBeginWriteAccess() override;
+
+ bool IsPassthrough() const { return is_passthrough_; }
+
+ scoped_refptr<gl::GLImage> image_;
+
+ // If |image_bind_or_copy_needed_| is true, then either bind or copy |image_|
+ // to the GL texture, and un-set |image_bind_or_copy_needed_|.
+ bool BindOrCopyImageIfNeeded();
+ bool image_bind_or_copy_needed_ = true;
+
+ const SharedImageBackingGLCommon::InitializeGLTextureParams gl_params_;
+ const SharedImageBackingFactoryGLTexture::UnpackStateAttribs
+ gl_unpack_attribs_;
+ const bool is_passthrough_;
+
+ gles2::Texture* rgb_emulation_texture_ = nullptr;
+ gles2::Texture* texture_ = nullptr;
+ scoped_refptr<gles2::TexturePassthrough> passthrough_texture_;
+
+ sk_sp<SkPromiseImageTexture> cached_promise_texture_;
+
+ base::WeakPtrFactory<SharedImageBackingGLImage> weak_factory_;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_TEXTURE_INTERNAL_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
index fb37ea94ee4..ccbe66b99c9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
@@ -863,6 +863,22 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
EXPECT_TRUE(stub_image->bound());
int update_counter = stub_image->update_counter();
ref->Update(nullptr);
+ EXPECT_EQ(stub_image->update_counter(), update_counter);
+ EXPECT_TRUE(stub_image->bound());
+
+ // TODO(https://crbug.com/1092155): When we lazily bind the GLImage, this
+ // will be needed to trigger binding the GLImage.
+ {
+ auto skia_representation =
+ shared_image_representation_factory_->ProduceSkia(mailbox,
+ context_state_);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access;
+ skia_representation->BeginScopedReadAccess(&begin_semaphores,
+ &end_semaphores);
+ }
EXPECT_TRUE(stub_image->bound());
EXPECT_GT(stub_image->update_counter(), update_counter);
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
index 4d7006bc582..d0335b8a227 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
@@ -9,9 +9,12 @@
#include "base/macros.h"
#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/gpu_gles2_export.h"
#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_image.h"
namespace gfx {
class Size;
@@ -22,7 +25,6 @@ namespace gpu {
class GpuDriverBugWorkarounds;
struct GpuFeatureInfo;
struct Mailbox;
-class SharedImageBacking;
// Implementation of SharedImageBackingFactory that produce IOSurface backed
// SharedImages. This is meant to be used on macOS only.
@@ -34,6 +36,24 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryIOSurface
bool use_gl);
~SharedImageBackingFactoryIOSurface() override;
+ // Helper functions used used by SharedImageRepresentationGLImage to do
+ // IOSurface-specific sharing.
+ static sk_sp<SkPromiseImageTexture> ProduceSkiaPromiseTextureMetal(
+ SharedImageBacking* backing,
+ scoped_refptr<SharedContextState> context_state,
+ scoped_refptr<gl::GLImage> image);
+ static std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gl::GLImage> image);
+ static std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device,
+ scoped_refptr<gl::GLImage> image);
+
// SharedImageBackingFactory implementation.
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
index f2e9f952c4b..7e01171b0c1 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
@@ -110,28 +110,11 @@ base::scoped_nsprotocol<id<MTLTexture>> API_AVAILABLE(macos(10.11))
viz::ResourceFormat format) {
TRACE_EVENT0("gpu", "SharedImageBackingFactoryIOSurface::CreateMetalTexture");
base::scoped_nsprotocol<id<MTLTexture>> mtl_texture;
- MTLPixelFormat mtl_pixel_format;
- switch (format) {
- case viz::RED_8:
- case viz::ALPHA_8:
- case viz::LUMINANCE_8:
- mtl_pixel_format = MTLPixelFormatR8Unorm;
- break;
- case viz::RG_88:
- mtl_pixel_format = MTLPixelFormatRG8Unorm;
- break;
- case viz::RGBA_8888:
- mtl_pixel_format = MTLPixelFormatRGBA8Unorm;
- break;
- case viz::BGRA_8888:
- mtl_pixel_format = MTLPixelFormatBGRA8Unorm;
- break;
- default:
- // TODO(https://crbug.com/952063): Add support for all formats supported
- // by GLImageIOSurface.
- DLOG(ERROR) << "Resource format not yet supported in Metal.";
- return mtl_texture;
- }
+ MTLPixelFormat mtl_pixel_format =
+ static_cast<MTLPixelFormat>(viz::ToMTLPixelFormat(format));
+ if (mtl_pixel_format == MTLPixelFormatInvalid)
+ return mtl_texture;
+
base::scoped_nsobject<MTLTextureDescriptor> mtl_tex_desc(
[MTLTextureDescriptor new]);
[mtl_tex_desc setTextureType:MTLTextureType2D];
@@ -186,6 +169,32 @@ class SharedImageRepresentationGLTextureIOSurface
DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLTextureIOSurface);
};
+class SharedImageRepresentationGLTexturePassthroughIOSurface
+ : public SharedImageRepresentationGLTexturePassthrough {
+ public:
+ SharedImageRepresentationGLTexturePassthroughIOSurface(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough)
+ : SharedImageRepresentationGLTexturePassthrough(manager,
+ backing,
+ tracker),
+ texture_passthrough_(texture_passthrough) {}
+
+ const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
+ override {
+ return texture_passthrough_;
+ }
+ bool BeginAccess(GLenum mode) override { return true; }
+ void EndAccess() override { FlushIOSurfaceGLOperations(); }
+
+ private:
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
+ DISALLOW_COPY_AND_ASSIGN(
+ SharedImageRepresentationGLTexturePassthroughIOSurface);
+};
+
// Representation of a SharedImageBackingIOSurface as a Skia Texture.
class SharedImageRepresentationSkiaIOSurface
: public SharedImageRepresentationSkia {
@@ -217,7 +226,7 @@ class SharedImageRepresentationSkiaIOSurface
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
- return SkSurface::MakeFromBackendTextureAsRenderTarget(
+ return SkSurface::MakeFromBackendTexture(
context_state_->gr_context(), promise_texture_->backendTexture(),
kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
backing()->color_space().ToSkColorSpace(), &surface_props);
@@ -250,6 +259,26 @@ class SharedImageRepresentationSkiaIOSurface
gles2::Texture* const gles2_texture_;
};
+class SharedImageRepresentationOverlayIOSurface
+ : public SharedImageRepresentationOverlay {
+ public:
+ SharedImageRepresentationOverlayIOSurface(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gl::GLImage> gl_image)
+ : SharedImageRepresentationOverlay(manager, backing, tracker),
+ gl_image_(gl_image) {}
+
+ ~SharedImageRepresentationOverlayIOSurface() override { EndReadAccess(); }
+
+ private:
+ bool BeginReadAccess() override { return true; }
+ void EndReadAccess() override {}
+ gl::GLImage* GetGLImage() override { return gl_image_.get(); }
+
+ scoped_refptr<gl::GLImage> gl_image_;
+};
+
// Representation of a SharedImageBackingIOSurface as a Dawn Texture.
#if BUILDFLAG(USE_DAWN)
class SharedImageRepresentationDawnIOSurface
@@ -414,7 +443,7 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final {
DCHECK(io_surface_);
- legacy_texture_ = GenGLTexture();
+ GenGLTexture(&legacy_texture_, nullptr);
if (!legacy_texture_) {
return false;
}
@@ -432,15 +461,28 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
SharedImageManager* manager,
MemoryTypeTracker* tracker) final {
- gles2::Texture* texture = GenGLTexture();
- if (!texture) {
+ gles2::Texture* texture = nullptr;
+ GenGLTexture(&texture, nullptr);
+ if (!texture)
return nullptr;
- }
-
return std::make_unique<SharedImageRepresentationGLTextureIOSurface>(
manager, this, tracker, texture);
}
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override {
+ TRACE_EVENT0("gpu",
+ "SharedImageBackingFactoryIOSurface::GenGLTexturePassthrough");
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough;
+ GenGLTexture(nullptr, &texture_passthrough);
+ if (!texture_passthrough)
+ return nullptr;
+ return std::make_unique<
+ SharedImageRepresentationGLTexturePassthroughIOSurface>(
+ manager, this, tracker, texture_passthrough);
+ }
+
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
@@ -448,7 +490,7 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
gles2::Texture* gles2_texture = nullptr;
GrBackendTexture gr_backend_texture;
if (context_state->GrContextIsGL()) {
- gles2_texture = GenGLTexture();
+ GenGLTexture(&gles2_texture, nullptr);
if (!gles2_texture)
return nullptr;
GetGrBackendTexture(
@@ -475,6 +517,15 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
gles2_texture);
}
+ std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override {
+ if (!EnsureGLImage())
+ return nullptr;
+ return SharedImageBackingFactoryIOSurface::ProduceOverlay(
+ manager, this, tracker, gl_image_);
+ }
+
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
@@ -493,19 +544,35 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
}
private:
- gles2::Texture* GenGLTexture() {
+ bool EnsureGLImage() {
+ if (!gl_image_) {
+ GLFormatInfo gl_info = GetGLFormatInfo(format());
+ scoped_refptr<gl::GLImageIOSurface> gl_image(
+ gl::GLImageIOSurface::Create(size(), gl_info.internal_format));
+ if (!gl_image->Initialize(io_surface_, gfx::GenericSharedMemoryId(),
+ viz::BufferFormat(format()))) {
+ LOG(ERROR) << "Failed to create GLImageIOSurface";
+ } else {
+ gl_image_ = gl_image;
+ }
+ }
+ return !!gl_image_;
+ }
+
+ void GenGLTexture(
+ gles2::Texture** texture,
+ scoped_refptr<gles2::TexturePassthrough>* texture_passthrough) {
TRACE_EVENT0("gpu", "SharedImageBackingFactoryIOSurface::GenGLTexture");
GLFormatInfo gl_info = GetGLFormatInfo(format());
DCHECK(gl_info.supported);
+ if (texture)
+ *texture = nullptr;
+ if (texture_passthrough)
+ *texture_passthrough = nullptr;
// Wrap the IOSurface in a GLImageIOSurface
- scoped_refptr<gl::GLImageIOSurface> image(
- gl::GLImageIOSurface::Create(size(), gl_info.internal_format));
- if (!image->Initialize(io_surface_, gfx::GenericSharedMemoryId(),
- viz::BufferFormat(format()))) {
- LOG(ERROR) << "Failed to create GLImageIOSurface";
- return nullptr;
- }
+ if (!EnsureGLImage())
+ return;
gl::GLApi* api = gl::g_current_gl_context;
@@ -527,37 +594,48 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
GL_CLAMP_TO_EDGE);
// Bind the GLImageIOSurface to our texture
- if (!image->BindTexImage(GL_TEXTURE_RECTANGLE)) {
+ if (!gl_image_->BindTexImage(GL_TEXTURE_RECTANGLE)) {
LOG(ERROR) << "Failed to bind GLImageIOSurface";
api->glBindTextureFn(GL_TEXTURE_RECTANGLE, old_texture_binding);
api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
+ return;
}
// If the backing is already cleared, no need to clear it again.
gfx::Rect cleared_rect = ClearedRect();
// Manually create a gles2::Texture wrapping our driver texture.
- gles2::Texture* texture = new gles2::Texture(service_id);
- texture->SetLightweightRef();
- texture->SetTarget(GL_TEXTURE_RECTANGLE, 1);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- texture->SetLevelInfo(GL_TEXTURE_RECTANGLE, 0, gl_info.internal_format,
- size().width(), size().height(), 1, 0, gl_info.format,
- gl_info.type, cleared_rect);
- texture->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, image.get(),
- gles2::Texture::BOUND);
- texture->SetImmutable(true, false);
-
- DCHECK_EQ(image->GetInternalFormat(), gl_info.internal_format);
+ if (texture) {
+ *texture = new gles2::Texture(service_id);
+ (*texture)->SetLightweightRef();
+ (*texture)->SetTarget(GL_TEXTURE_RECTANGLE, 1);
+ (*texture)->set_min_filter(GL_LINEAR);
+ (*texture)->set_mag_filter(GL_LINEAR);
+ (*texture)->set_wrap_t(GL_CLAMP_TO_EDGE);
+ (*texture)->set_wrap_s(GL_CLAMP_TO_EDGE);
+ (*texture)->SetLevelInfo(GL_TEXTURE_RECTANGLE, 0, gl_info.internal_format,
+ size().width(), size().height(), 1, 0,
+ gl_info.format, gl_info.type, cleared_rect);
+ (*texture)->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, gl_image_.get(),
+ gles2::Texture::BOUND);
+ (*texture)->SetImmutable(true, false);
+ }
+ if (texture_passthrough) {
+ *texture_passthrough = scoped_refptr<gles2::TexturePassthrough>(
+ new gles2::TexturePassthrough(service_id, GL_TEXTURE_RECTANGLE,
+ gl_info.internal_format, size().width(),
+ size().height(), 1, 0, gl_info.format,
+ gl_info.type));
+ (*texture_passthrough)
+ ->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, gl_image_.get());
+ }
+
+ DCHECK_EQ(gl_image_->GetInternalFormat(), gl_info.internal_format);
api->glBindTextureFn(GL_TEXTURE_RECTANGLE, old_texture_binding);
- return texture;
}
+ scoped_refptr<gl::GLImageIOSurface> gl_image_;
base::ScopedCFTypeRef<IOSurfaceRef> io_surface_;
base::Optional<WGPUTextureFormat> dawn_format_;
base::scoped_nsprotocol<id<MTLTexture>> mtl_texture_;
@@ -715,4 +793,73 @@ bool SharedImageBackingFactoryIOSurface::CanImportGpuMemoryBuffer(
return false;
}
+// static
+sk_sp<SkPromiseImageTexture>
+SharedImageBackingFactoryIOSurface::ProduceSkiaPromiseTextureMetal(
+ SharedImageBacking* backing,
+ scoped_refptr<SharedContextState> context_state,
+ scoped_refptr<gl::GLImage> image) {
+ if (@available(macOS 10.11, *)) {
+ DCHECK(context_state->GrContextIsMetal());
+
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface =
+ static_cast<gl::GLImageIOSurface*>(image.get())->io_surface();
+
+ id<MTLDevice> mtl_device =
+ context_state->metal_context_provider()->GetMTLDevice();
+ auto mtl_texture = CreateMetalTexture(mtl_device, io_surface.get(),
+ backing->size(), backing->format());
+ DCHECK(mtl_texture);
+
+ GrMtlTextureInfo info;
+ info.fTexture.retain(mtl_texture.get());
+ auto gr_backend_texture =
+ GrBackendTexture(backing->size().width(), backing->size().height(),
+ GrMipMapped::kNo, info);
+ return SkPromiseImageTexture::Make(gr_backend_texture);
+ }
+ return nullptr;
+}
+
+// static
+std::unique_ptr<SharedImageRepresentationOverlay>
+SharedImageBackingFactoryIOSurface::ProduceOverlay(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gl::GLImage> image) {
+ return std::make_unique<SharedImageRepresentationOverlayIOSurface>(
+ manager, backing, tracker, image);
+}
+
+// static
+std::unique_ptr<SharedImageRepresentationDawn>
+SharedImageBackingFactoryIOSurface::ProduceDawn(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device,
+ scoped_refptr<gl::GLImage> image) {
+#if BUILDFLAG(USE_DAWN)
+ // See comments in SharedImageBackingFactoryIOSurface::CreateSharedImage
+ // regarding RGBA versus BGRA.
+ viz::ResourceFormat actual_format = backing->format();
+ if (actual_format == viz::RGBA_8888)
+ actual_format = viz::BGRA_8888;
+
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface =
+ static_cast<gl::GLImageIOSurface*>(image.get())->io_surface();
+
+ base::Optional<WGPUTextureFormat> wgpu_format =
+ viz::ToWGPUFormat(actual_format);
+ if (wgpu_format.value() == WGPUTextureFormat_Undefined)
+ return nullptr;
+
+ return std::make_unique<SharedImageRepresentationDawnIOSurface>(
+ manager, backing, tracker, device, io_surface, wgpu_format.value());
+#else // BUILDFLAG(USE_DAWN)
+ return nullptr;
+#endif // BUILDFLAG(USE_DAWN)
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc
index 2eb65e9ba98..36d96fb7896 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc
@@ -86,8 +86,7 @@ SharedImageFactory::SharedImageFactory(
shared_context_state_(context_state),
memory_tracker_(std::make_unique<MemoryTypeTracker>(memory_tracker)),
using_vulkan_(context_state && context_state->GrContextIsVulkan()),
- using_metal_(context_state && context_state->GrContextIsMetal()),
- using_dawn_(context_state && context_state->GrContextIsDawn()) {
+ using_skia_dawn_(context_state && context_state->GrContextIsDawn()) {
bool use_gl = gl::GetGLImplementation() != gl::kGLImplementationNone;
if (use_gl) {
gl_backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>(
@@ -203,7 +202,7 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
SharedImageBackingFactory* factory = nullptr;
if (backing_factory_for_testing_) {
factory = backing_factory_for_testing_;
- } else if (!using_vulkan_ && !using_dawn_) {
+ } else if (!using_vulkan_ && !using_skia_dawn_) {
allow_legacy_mailbox = true;
factory = gl_backing_factory_.get();
} else {
@@ -213,6 +212,8 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
return false;
auto backing = factory->CreateSharedImage(mailbox, format, size, color_space,
usage, data);
+ if (backing)
+ backing->OnWriteSucceeded();
return RegisterBacking(std::move(backing), allow_legacy_mailbox);
}
@@ -235,6 +236,8 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
auto backing =
factory->CreateSharedImage(mailbox, client_id, std::move(handle), format,
surface_handle, size, color_space, usage);
+ if (backing)
+ backing->OnWriteSucceeded();
return RegisterBacking(std::move(backing), allow_legacy_mailbox);
}
@@ -310,7 +313,9 @@ bool SharedImageFactory::PresentSwapChain(const Mailbox& mailbox) {
#if defined(OS_FUCHSIA)
bool SharedImageFactory::RegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
decltype(buffer_collections_)::iterator it;
bool inserted;
std::tie(it, inserted) =
@@ -331,9 +336,9 @@ bool SharedImageFactory::RegisterSysmemBufferCollection(
VkDevice device =
vulkan_context_provider_->GetDeviceQueue()->GetVulkanDevice();
DCHECK(device != VK_NULL_HANDLE);
- it->second =
- vulkan_context_provider_->GetVulkanImplementation()
- ->RegisterSysmemBufferCollection(device, id, std::move(token));
+ it->second = vulkan_context_provider_->GetVulkanImplementation()
+ ->RegisterSysmemBufferCollection(
+ device, id, std::move(token), format, usage);
return true;
}
@@ -371,6 +376,25 @@ bool SharedImageFactory::IsSharedBetweenThreads(uint32_t usage) {
(usage & SHARED_IMAGE_USAGE_DISPLAY);
}
+bool SharedImageFactory::CanUseWrappedSkImage(uint32_t usage) const {
+ if (!wrapped_sk_image_factory_)
+ return false;
+
+ constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_OOP_RASTERIZATION |
+ SHARED_IMAGE_USAGE_DISPLAY;
+
+ if (using_vulkan_ || using_skia_dawn_) {
+ // For SkiaRenderer/Vulkan+Dawn use WrappedSkImage if the usage is only
+ // raster and/or display.
+ return (usage & kWrappedSkImageUsage) && !(usage & ~kWrappedSkImageUsage);
+ } else {
+ // For d SkiaRenderer/GL only use WrappedSkImages for OOP-R because
+ // CopySubTexture() doesn't use Skia. https://crbug.com/984045
+ return usage == kWrappedSkImageUsage;
+ }
+}
+
SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
uint32_t usage,
viz::ResourceFormat format,
@@ -382,12 +406,9 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
bool using_dawn = usage & SHARED_IMAGE_USAGE_WEBGPU;
bool vulkan_usage = using_vulkan_ && (usage & SHARED_IMAGE_USAGE_DISPLAY);
bool gl_usage = usage & SHARED_IMAGE_USAGE_GLES2;
- bool share_between_gl_metal =
- using_metal_ && (usage & SHARED_IMAGE_USAGE_OOP_RASTERIZATION);
bool share_between_threads = IsSharedBetweenThreads(usage);
bool share_between_gl_vulkan = gl_usage && vulkan_usage;
bool using_interop_factory = share_between_gl_vulkan || using_dawn ||
- share_between_gl_metal ||
(usage & SHARED_IMAGE_USAGE_VIDEO_DECODE) ||
(share_between_threads && vulkan_usage);
@@ -397,23 +418,25 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
using_interop_factory |= usage & SHARED_IMAGE_USAGE_SCANOUT;
#endif
- // wrapped_sk_image_factory_ is only used for OOPR and supports
- // a limited number of flags (e.g. no SHARED_IMAGE_USAGE_SCANOUT).
- constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER |
- SHARED_IMAGE_USAGE_OOP_RASTERIZATION |
- SHARED_IMAGE_USAGE_DISPLAY;
- bool using_wrapped_sk_image =
- wrapped_sk_image_factory_ && (usage == kWrappedSkImageUsage) &&
- !using_interop_factory && !share_between_threads;
- using_interop_factory |= vulkan_usage && !using_wrapped_sk_image;
+ bool using_wrapped_sk_image = !using_interop_factory &&
+ !share_between_threads &&
+ CanUseWrappedSkImage(usage);
+ if (using_wrapped_sk_image) {
+ if (gmb_type == gfx::EMPTY_BUFFER ||
+ wrapped_sk_image_factory_->CanImportGpuMemoryBuffer(gmb_type)) {
+ *allow_legacy_mailbox = false;
+ return wrapped_sk_image_factory_.get();
+ }
+ }
+
+ using_interop_factory |= vulkan_usage;
if (gmb_type != gfx::EMPTY_BUFFER) {
bool interop_factory_supports_gmb =
interop_backing_factory_ &&
interop_backing_factory_->CanImportGpuMemoryBuffer(gmb_type);
- if (using_wrapped_sk_image ||
- (using_interop_factory && !interop_backing_factory_)) {
+ if (using_interop_factory && !interop_backing_factory_) {
LOG(ERROR) << "Unable to screate SharedImage backing: no support for the "
"requested GpuMemoryBufferType.";
return nullptr;
@@ -424,11 +447,8 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
using_interop_factory |= interop_factory_supports_gmb;
}
- *allow_legacy_mailbox = !using_wrapped_sk_image && !using_interop_factory &&
- !using_vulkan_ && !share_between_threads;
-
- if (using_wrapped_sk_image)
- return wrapped_sk_image_factory_.get();
+ *allow_legacy_mailbox =
+ !using_interop_factory && !using_vulkan_ && !share_between_threads;
if (using_interop_factory) {
// TODO(crbug.com/969114): Not all shared image factory implementations
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h
index 60cb6aa9346..9753cf95a2a 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.h
@@ -99,7 +99,9 @@ class GPU_GLES2_EXPORT SharedImageFactory {
#if defined(OS_FUCHSIA)
bool RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token);
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage);
bool ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id);
#endif // defined(OS_FUCHSIA)
@@ -117,20 +119,23 @@ class GPU_GLES2_EXPORT SharedImageFactory {
void RegisterSharedImageBackingFactoryForTesting(
SharedImageBackingFactory* factory);
+ MailboxManager* mailbox_manager() { return mailbox_manager_; }
+
private:
bool IsSharedBetweenThreads(uint32_t usage);
+ bool CanUseWrappedSkImage(uint32_t usage) const;
SharedImageBackingFactory* GetFactoryByUsage(
uint32_t usage,
viz::ResourceFormat format,
bool* allow_legacy_mailbox,
gfx::GpuMemoryBufferType gmb_type = gfx::EMPTY_BUFFER);
+
MailboxManager* mailbox_manager_;
SharedImageManager* shared_image_manager_;
SharedContextState* shared_context_state_;
std::unique_ptr<MemoryTypeTracker> memory_tracker_;
const bool using_vulkan_;
- const bool using_metal_;
- const bool using_dawn_;
+ const bool using_skia_dawn_;
// The set of SharedImages which have been created (and are being kept alive)
// by this factory.
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc
index ec4004578a8..578b38c7b84 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc
@@ -103,17 +103,19 @@ SharedImageManager::Register(std::unique_ptr<SharedImageBacking> backing,
DCHECK(backing->mailbox().IsSharedImage());
AutoLock autolock(this);
- const auto lower_bound = images_.lower_bound(backing->mailbox());
- if (lower_bound != images_.end() &&
- (*lower_bound)->mailbox() == backing->mailbox()) {
+ if (images_.find(backing->mailbox()) != images_.end()) {
LOG(ERROR) << "SharedImageManager::Register: Trying to register an "
"already registered mailbox.";
return nullptr;
}
+ // TODO(jonross): Determine how the direct destruction of a
+ // SharedImageRepresentationFactoryRef leads to ref-counting issues as
+ // well as thread-checking failures in tests.
auto factory_ref = std::make_unique<SharedImageRepresentationFactoryRef>(
this, backing.get(), tracker);
- images_.emplace_hint(lower_bound, std::move(backing));
+ images_.emplace(std::move(backing));
+
return factory_ref;
}
@@ -301,21 +303,32 @@ void SharedImageManager::OnRepresentationDestroyed(
CALLED_ON_VALID_THREAD();
AutoLock autolock(this);
- auto found = images_.find(mailbox);
- if (found == images_.end()) {
- LOG(ERROR) << "SharedImageManager::OnRepresentationDestroyed: Trying to "
- "destroy a non existent mailbox.";
- return;
+
+ {
+ auto found = images_.find(mailbox);
+ if (found == images_.end()) {
+ LOG(ERROR) << "SharedImageManager::OnRepresentationDestroyed: Trying to "
+ "destroy a non existent mailbox.";
+ return;
+ }
+
+ // TODO(piman): When the original (factory) representation is destroyed, we
+ // should treat the backing as pending destruction and prevent additional
+ // representations from being created. This will help avoid races due to a
+ // consumer getting lucky with timing due to a representation inadvertently
+ // extending a backing's lifetime.
+ (*found)->ReleaseRef(representation);
}
- // TODO(piman): When the original (factory) representation is destroyed, we
- // should treat the backing as pending destruction and prevent additional
- // representations from being created. This will help avoid races due to a
- // consumer getting lucky with timing due to a representation inadvertently
- // extending a backing's lifetime.
- (*found)->ReleaseRef(representation);
- if (!(*found)->HasAnyRefs())
- images_.erase(found);
+ {
+ // TODO(jonross): Once the pending destruction TODO above is addressed then
+ // this block can be removed, and the deletion can occur directly. Currently
+ // SharedImageManager::OnRepresentationDestroyed can be nested, so we need
+ // to get the iterator again.
+ auto found = images_.find(mailbox);
+ if (found != images_.end() && (!(*found)->HasAnyRefs()))
+ images_.erase(found);
+ }
}
void SharedImageManager::OnMemoryDump(const Mailbox& mailbox,
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.cc b/chromium/gpu/command_buffer/service/shared_image_representation.cc
index 20196375765..fd2d31b5b2e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.cc
@@ -117,6 +117,8 @@ SharedImageRepresentationSkia::BeginScopedWriteAccess(
if (!surface)
return nullptr;
+ backing()->OnWriteSucceeded();
+
return std::make_unique<ScopedWriteAccess>(
util::PassKey<SharedImageRepresentationSkia>(), this, std::move(surface));
}
@@ -157,6 +159,8 @@ SharedImageRepresentationSkia::BeginScopedReadAccess(
if (!promise_image_texture)
return nullptr;
+ backing()->OnReadSucceeded();
+
return std::make_unique<ScopedReadAccess>(
util::PassKey<SharedImageRepresentationSkia>(), this,
std::move(promise_image_texture));
@@ -178,6 +182,8 @@ SharedImageRepresentationOverlay::BeginScopedReadAccess(bool needs_gl_image) {
if (!BeginReadAccess())
return nullptr;
+ backing()->OnReadSucceeded();
+
return std::make_unique<ScopedReadAccess>(
util::PassKey<SharedImageRepresentationOverlay>(), this,
needs_gl_image ? GetGLImage() : nullptr);
@@ -205,6 +211,16 @@ SharedImageRepresentationDawn::BeginScopedAccess(
WGPUTexture texture = BeginAccess(usage);
if (!texture)
return nullptr;
+
+ constexpr auto kWriteUsage =
+ WGPUTextureUsage_CopyDst | WGPUTextureUsage_OutputAttachment;
+
+ if (usage & kWriteUsage) {
+ backing()->OnWriteSucceeded();
+ } else {
+ backing()->OnReadSucceeded();
+ }
+
return std::make_unique<ScopedAccess>(
util::PassKey<SharedImageRepresentationDawn>(), this, texture);
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
index c931778902f..5645db88629 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
@@ -63,10 +63,10 @@ SharedImageRepresentationGLOzone::Create(
gles2::Texture* texture = new gles2::Texture(gl_texture_service_id);
texture->SetLightweightRef();
texture->SetTarget(GL_TEXTURE_2D, 1 /*max_levels=*/);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture->set_min_filter(GL_LINEAR);
+ texture->set_mag_filter(GL_LINEAR);
+ texture->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture->set_wrap_s(GL_CLAMP_TO_EDGE);
GLenum gl_format = viz::GLDataFormat(format);
GLenum gl_type = viz::GLDataType(format);
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
index d23d5358e2f..a5d75a204f0 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
@@ -96,9 +96,6 @@ sk_sp<SkSurface> SharedImageRepresentationSkiaGL::BeginWriteAccess(
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
- // TODO(https://crbug.com/1054033): Switch back to
- // MakeFromBackendTextureAsRenderTarget once we no longer use GLRendererCopier
- // with surfaceless surfaces.
auto surface = SkSurface::MakeFromBackendTexture(
context_state_->gr_context(), promise_texture_->backendTexture(),
kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
diff --git a/chromium/gpu/command_buffer/service/shared_image_video.cc b/chromium/gpu/command_buffer/service/shared_image_video.cc
index 50838310832..db9b2524073 100644
--- a/chromium/gpu/command_buffer/service/shared_image_video.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_video.cc
@@ -13,6 +13,7 @@
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/abstract_texture.h"
+#include "gpu/command_buffer/service/ahardwarebuffer_utils.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/shared_context_state.h"
@@ -145,9 +146,9 @@ class SharedImageRepresentationGLTextureVideo
gles2::Texture* GetTexture() override { return texture_; }
bool BeginAccess(GLenum mode) override {
- // This representation should only be called for read.
- DCHECK_EQ(mode,
- static_cast<GLenum>(GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM));
+ // This representation should only be called for read or overlay.
+ DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
auto* video_backing = static_cast<SharedImageVideo*>(backing());
video_backing->BeginGLReadAccess();
@@ -182,9 +183,9 @@ class SharedImageRepresentationGLTexturePassthroughVideo
}
bool BeginAccess(GLenum mode) override {
- // This representation should only be called for read.
- DCHECK_EQ(mode,
- static_cast<GLenum>(GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM));
+ // This representation should only be called for read or overlay.
+ DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
auto* video_backing = static_cast<SharedImageVideo*>(backing());
video_backing->BeginGLReadAccess();
@@ -266,13 +267,10 @@ class SharedImageRepresentationVideoSkiaVk
if (!vulkan_image_) {
DCHECK(!promise_texture_);
- gfx::GpuMemoryBufferHandle gmb_handle(
- scoped_hardware_buffer_->TakeBuffer());
- auto* device_queue =
- context_state_->vk_context_provider()->GetDeviceQueue();
- vulkan_image_ = VulkanImage::CreateFromGpuMemoryBufferHandle(
- device_queue, std::move(gmb_handle), size(), ToVkFormat(format()),
- 0 /* usage */);
+
+ vulkan_image_ =
+ CreateVkImageFromAhbHandle(scoped_hardware_buffer_->TakeBuffer(),
+ context_state_.get(), size(), format());
if (!vulkan_image_)
return nullptr;
diff --git a/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc
new file mode 100644
index 00000000000..213099665fd
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc
@@ -0,0 +1,127 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_memory_region_wrapper.h"
+
+#include "base/logging.h"
+#include "base/numerics/checked_math.h"
+#include "base/system/sys_info.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "components/viz/common/resources/resource_sizes.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+
+namespace gpu {
+namespace {
+
+// Validate that |stride| will work for pixels with |size| and |format|.
+bool ValidateStride(const gfx::Size size,
+ viz::ResourceFormat format,
+ int32_t stride) {
+ if (!base::IsValueInRangeForNumericType<size_t>(stride))
+ return false;
+
+ int32_t min_width_in_bytes = 0;
+ if (!viz::ResourceSizes::MaybeWidthInBytes(size.width(), format,
+ &min_width_in_bytes)) {
+ return false;
+ }
+
+ if (stride < min_width_in_bytes)
+ return false;
+
+ // Check that stride is a multiple of pixel byte size.
+ int bits_per_pixel = viz::BitsPerPixel(format);
+ switch (bits_per_pixel) {
+ case 64:
+ case 32:
+ case 16:
+ if (stride % (bits_per_pixel / 8) != 0)
+ return false;
+ break;
+ case 8:
+ case 4:
+ break;
+ default:
+ // YVU420 and YUV_420_BIPLANAR format aren't supported.
+ NOTREACHED();
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+SharedMemoryRegionWrapper::SharedMemoryRegionWrapper() = default;
+SharedMemoryRegionWrapper::SharedMemoryRegionWrapper(
+ SharedMemoryRegionWrapper&& other) = default;
+SharedMemoryRegionWrapper& SharedMemoryRegionWrapper::operator=(
+ SharedMemoryRegionWrapper&& other) = default;
+SharedMemoryRegionWrapper::~SharedMemoryRegionWrapper() = default;
+
+bool SharedMemoryRegionWrapper::Initialize(
+ const gfx::GpuMemoryBufferHandle& handle,
+ const gfx::Size& size,
+ viz::ResourceFormat format) {
+ DCHECK(!mapping_.IsValid());
+
+ if (!handle.region.IsValid()) {
+ DLOG(ERROR) << "Invalid GMB shared memory region.";
+ return false;
+ }
+
+ if (!ValidateStride(size, format, handle.stride)) {
+ DLOG(ERROR) << "Invalid GMB stride.";
+ return false;
+ }
+
+ // Minimize the amount of address space we use but make sure offset is a
+ // multiple of page size as required by MapAt().
+ size_t allocation_granularity = base::SysInfo::VMAllocationGranularity();
+ size_t memory_offset = handle.offset % allocation_granularity;
+ size_t map_offset =
+ allocation_granularity * (handle.offset / allocation_granularity);
+
+ base::CheckedNumeric<size_t> checked_size = handle.stride;
+ checked_size *= size.height();
+ checked_size += memory_offset;
+ if (!checked_size.IsValid()) {
+ DLOG(ERROR) << "Invalid GMB size.";
+ return false;
+ }
+
+ mapping_ = handle.region.MapAt(static_cast<off_t>(map_offset),
+ checked_size.ValueOrDie());
+
+ if (!mapping_.IsValid()) {
+ DLOG(ERROR) << "Failed to map shared memory.";
+ return false;
+ }
+
+ offset_ = memory_offset;
+ stride_ = handle.stride;
+
+ return true;
+}
+
+bool SharedMemoryRegionWrapper::IsValid() const {
+ return mapping_.IsValid();
+}
+
+uint8_t* SharedMemoryRegionWrapper::GetMemory() const {
+ DCHECK(IsValid());
+ return mapping_.GetMemoryAs<uint8_t>() + offset_;
+}
+
+base::span<const uint8_t> SharedMemoryRegionWrapper::GetMemoryAsSpan() const {
+ DCHECK(IsValid());
+ return mapping_.GetMemoryAsSpan<const uint8_t>().subspan(offset_);
+}
+
+size_t SharedMemoryRegionWrapper::GetStride() const {
+ DCHECK(IsValid());
+ return stride_;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h
new file mode 100644
index 00000000000..280a09b840c
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h
@@ -0,0 +1,48 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_MEMORY_REGION_WRAPPER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_MEMORY_REGION_WRAPPER_H_
+
+#include "base/containers/span.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "components/viz/common/resources/resource_format.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace gfx {
+struct GpuMemoryBufferHandle;
+}
+
+namespace gpu {
+
+// Wrapper for shared memory region from a GpuMemoryBuffer with type
+// SHARED_MEMORY_BUFFER.
+class SharedMemoryRegionWrapper {
+ public:
+ SharedMemoryRegionWrapper();
+ SharedMemoryRegionWrapper(SharedMemoryRegionWrapper&& other);
+ SharedMemoryRegionWrapper& operator=(SharedMemoryRegionWrapper&& other);
+ ~SharedMemoryRegionWrapper();
+
+ // Validates that size, stride and format parameters make sense and maps
+ // memory for shared memory owned by |handle|. Shared memory stays mapped
+ // until destruction.
+ bool Initialize(const gfx::GpuMemoryBufferHandle& handle,
+ const gfx::Size& size,
+ viz::ResourceFormat format);
+
+ bool IsValid() const;
+ uint8_t* GetMemory() const;
+ base::span<const uint8_t> GetMemoryAsSpan() const;
+ size_t GetStride() const;
+
+ private:
+ base::WritableSharedMemoryMapping mapping_;
+ size_t offset_ = 0;
+ size_t stride_ = 0;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_MEMORY_REGION_WRAPPER_H_
diff --git a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc
index 7ee33fbf629..a99a5c4279f 100644
--- a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc
+++ b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc
@@ -72,16 +72,6 @@ void SurfaceTextureGLOwner::EnsureTexImageBound() {
NOTREACHED();
}
-void SurfaceTextureGLOwner::GetTransformMatrix(float mtx[]) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- // If we don't have a SurfaceTexture, then the matrix doesn't matter. We
- // still initialize it for good measure.
- if (surface_texture_)
- surface_texture_->GetTransformMatrix(mtx);
- else
- memset(mtx, 0, sizeof(mtx[0]) * 16);
-}
-
void SurfaceTextureGLOwner::ReleaseBackBuffers() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (surface_texture_)
@@ -104,12 +94,7 @@ SurfaceTextureGLOwner::GetAHardwareBuffer() {
return nullptr;
}
-gfx::Rect SurfaceTextureGLOwner::GetCropRect() {
- NOTREACHED() << "Don't use GetCropRect with SurfaceTextureGLOwner";
- return gfx::Rect();
-}
-
-void SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect(
+bool SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect(
gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) {
@@ -119,7 +104,7 @@ void SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect(
if (!surface_texture_) {
*visible_rect = gfx::Rect();
*coded_size = gfx::Size();
- return;
+ return false;
}
float mtx[16];
@@ -154,6 +139,8 @@ void SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect(
base::debug::DumpWithoutCrashing();
}
+
+ return true;
}
// static
diff --git a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h
index 80d23de9035..d1ecf45dab2 100644
--- a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h
+++ b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h
@@ -33,12 +33,10 @@ class GPU_GLES2_EXPORT SurfaceTextureGLOwner : public TextureOwner {
gl::ScopedJavaSurface CreateJavaSurface() const override;
void UpdateTexImage() override;
void EnsureTexImageBound() override;
- void GetTransformMatrix(float mtx[16]) override;
void ReleaseBackBuffers() override;
std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
GetAHardwareBuffer() override;
- gfx::Rect GetCropRect() override;
- void GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
+ bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) override;
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.h b/chromium/gpu/command_buffer/service/sync_point_manager.h
index c6fe88dc0be..496840b4448 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager.h
+++ b/chromium/gpu/command_buffer/service/sync_point_manager.h
@@ -15,7 +15,7 @@
#include "base/atomic_sequence_num.h"
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/condition_variable.h"
diff --git a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
index d07cfd627d2..290bf973958 100644
--- a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
@@ -146,10 +146,10 @@ TestSharedImageBacking::TestSharedImageBacking(
texture_ = new gles2::Texture(service_id_);
texture_->SetLightweightRef();
texture_->SetTarget(GL_TEXTURE_2D, 1);
- texture_->sampler_state_.min_filter = GL_LINEAR;
- texture_->sampler_state_.mag_filter = GL_LINEAR;
- texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture_->set_min_filter(GL_LINEAR);
+ texture_->set_mag_filter(GL_LINEAR);
+ texture_->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture_->set_wrap_s(GL_CLAMP_TO_EDGE);
texture_->SetLevelInfo(GL_TEXTURE_2D, 0, GLInternalFormat(format),
size.width(), size.height(), 1, 0,
GLDataFormat(format), GLDataType(format), gfx::Rect());
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index 0fece4ee111..56ffa63b427 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -35,24 +35,7 @@ class ProgressReporter;
namespace gpu {
class DecoderContext;
-class ExternalVkImageBacking;
-class ExternalVkImageGlRepresentation;
class ServiceDiscardableManager;
-class SharedImageBackingGLTexture;
-class SharedImageBackingFactoryGLTexture;
-class SharedImageBackingAHB;
-class SharedImageBackingEglImage;
-class SharedImageRepresentationGLTexture;
-class SharedImageRepresentationEglImageGLTexture;
-class SharedImageRepresentationGLTextureAHB;
-class SharedImageRepresentationSkiaGLAHB;
-class SharedImageBackingIOSurface;
-class SharedImageRepresentationGLTextureIOSurface;
-class SharedImageRepresentationSkiaIOSurface;
-class SharedImageRepresentationGLOzone;
-class SharedImageVideo;
-class StreamTexture;
-class TestSharedImageBacking;
namespace gles2 {
class GLStreamTextureImage;
@@ -187,6 +170,28 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
GLenum alpha;
};
+ struct LevelInfo {
+ LevelInfo();
+ LevelInfo(const LevelInfo& rhs);
+ ~LevelInfo();
+
+ gfx::Rect cleared_rect;
+ GLenum target = 0;
+ GLint level = -1;
+ GLenum internal_format = 0;
+ GLsizei width = 0;
+ GLsizei height = 0;
+ GLsizei depth = 0;
+ GLint border = 0;
+ GLenum format = 0;
+ GLenum type = 0;
+ scoped_refptr<gl::GLImage> image;
+ scoped_refptr<GLStreamTextureImage> stream_texture_image;
+ ImageState image_state = UNBOUND;
+ uint32_t estimated_size = 0;
+ bool internal_workaround = false;
+ };
+
explicit Texture(GLuint service_id);
// TextureBase implementation:
@@ -198,22 +203,36 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
return sampler_state_;
}
+ void set_min_filter(GLenum min_filter) {
+ sampler_state_.min_filter = min_filter;
+ }
+
GLenum min_filter() const {
return sampler_state_.min_filter;
}
+ void set_mag_filter(GLenum mag_filter) {
+ sampler_state_.mag_filter = mag_filter;
+ }
+
GLenum mag_filter() const {
return sampler_state_.mag_filter;
}
+ void set_wrap_r(GLenum wrap_r) { sampler_state_.wrap_r = wrap_r; }
+
GLenum wrap_r() const {
return sampler_state_.wrap_r;
}
+ void set_wrap_s(GLenum wrap_s) { sampler_state_.wrap_s = wrap_s; }
+
GLenum wrap_s() const {
return sampler_state_.wrap_s;
}
+ void set_wrap_t(GLenum wrap_t) { sampler_state_.wrap_t = wrap_t; }
+
GLenum wrap_t() const {
return sampler_state_.wrap_t;
}
@@ -429,26 +448,43 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// Returns GL_NONE on error.
GLenum GetInternalFormatOfBaseLevel() const;
+ void SetLightweightRef();
+
+ void RemoveLightweightRef(bool have_context);
+
+ // Set the info for a particular level.
+ void SetLevelInfo(GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const gfx::Rect& cleared_rect);
+
+ // Returns the LevelInfo for |target| and |level| if it's set, else nullptr.
+ const LevelInfo* GetLevelInfo(GLint target, GLint level) const;
+
+ // Sets the Texture's target
+ // Parameters:
+ // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP or
+ // GL_TEXTURE_EXTERNAL_OES or GL_TEXTURE_RECTANGLE_ARB
+ // GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3)
+ // max_levels: The maximum levels this type of target can have.
+ void SetTarget(GLenum target, GLint max_levels);
+
+ void SetCompatibilitySwizzle(const CompatibilitySwizzle* swizzle);
+
+ bool NeedsMips() const {
+ return sampler_state_.min_filter != GL_NEAREST &&
+ sampler_state_.min_filter != GL_LINEAR;
+ }
+
private:
friend class MailboxManagerSync;
friend class MailboxManagerTest;
- friend class gpu::ExternalVkImageBacking;
- friend class gpu::ExternalVkImageGlRepresentation;
- friend class gpu::SharedImageVideo;
- friend class gpu::SharedImageBackingGLTexture;
- friend class gpu::SharedImageBackingFactoryGLTexture;
- friend class gpu::SharedImageBackingAHB;
- friend class gpu::SharedImageBackingEglImage;
- friend class gpu::SharedImageRepresentationGLTextureAHB;
- friend class gpu::SharedImageRepresentationEglImageGLTexture;
- friend class gpu::SharedImageRepresentationSkiaGLAHB;
- friend class gpu::SharedImageBackingIOSurface;
- friend class gpu::SharedImageRepresentationGLTextureIOSurface;
- friend class gpu::SharedImageRepresentationSkiaIOSurface;
- friend class gpu::SharedImageRepresentationGLOzone;
- friend class gpu::StreamTexture;
- friend class gpu::TestSharedImageBacking;
- friend class AbstractTextureImplOnSharedContext;
friend class TextureDefinition;
friend class TextureManager;
friend class TextureRef;
@@ -458,8 +494,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
~Texture() override;
void AddTextureRef(TextureRef* ref);
void RemoveTextureRef(TextureRef* ref, bool have_context);
- void SetLightweightRef();
- void RemoveLightweightRef(bool have_context);
void MaybeDeleteThis(bool have_context);
// Condition on which this texture is renderable. Can be ONLY_IF_NPOT if it
@@ -474,28 +508,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
CAN_RENDER_NEEDS_VALIDATION,
};
- struct LevelInfo {
- LevelInfo();
- LevelInfo(const LevelInfo& rhs);
- ~LevelInfo();
-
- gfx::Rect cleared_rect;
- GLenum target;
- GLint level;
- GLenum internal_format;
- GLsizei width;
- GLsizei height;
- GLsizei depth;
- GLint border;
- GLenum format;
- GLenum type;
- scoped_refptr<gl::GLImage> image;
- scoped_refptr<GLStreamTextureImage> stream_texture_image;
- ImageState image_state;
- uint32_t estimated_size;
- bool internal_workaround;
- };
-
struct FaceInfo {
FaceInfo();
FaceInfo(const FaceInfo& other);
@@ -514,23 +526,9 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
GLStreamTextureImage* stream_texture_image,
ImageState state);
- // Returns the LevelInfo for |target| and |level| if it's set, else NULL.
- const LevelInfo* GetLevelInfo(GLint target, GLint level) const;
// Returns NULL if the base level is not defined.
const LevelInfo* GetBaseLevelInfo() const;
- // Set the info for a particular level.
- void SetLevelInfo(GLenum target,
- GLint level,
- GLenum internal_format,
- GLsizei width,
- GLsizei height,
- GLsizei depth,
- GLint border,
- GLenum format,
- GLenum type,
- const gfx::Rect& cleared_rect);
-
// Causes us to report |service_id| as our service id, but does not delete
// it when we are destroyed. Will rebind any OES_EXTERNAL texture units to
// our new service id in all contexts. If |service_id| is zero, then we
@@ -573,11 +571,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// Makes each of the mip levels as though they were generated.
void MarkMipmapsGenerated();
- bool NeedsMips() const {
- return sampler_state_.min_filter != GL_NEAREST &&
- sampler_state_.min_filter != GL_LINEAR;
- }
-
// True if this texture meets all the GLES2 criteria for rendering.
// See section 3.8.2 of the GLES2 spec.
bool CanRender(const FeatureInfo* feature_info) const;
@@ -618,14 +611,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
GLenum type,
bool immutable);
- // Sets the Texture's target
- // Parameters:
- // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP or
- // GL_TEXTURE_EXTERNAL_OES or GL_TEXTURE_RECTANGLE_ARB
- // GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3)
- // max_levels: The maximum levels this type of target can have.
- void SetTarget(GLenum target, GLint max_levels);
-
// Update info about this texture.
void Update();
@@ -682,7 +667,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
GLuint owned_service_id() const { return owned_service_id_; }
GLenum GetCompatibilitySwizzleForChannel(GLenum channel);
- void SetCompatibilitySwizzle(const CompatibilitySwizzle* swizzle);
// Info about each face and level of texture.
std::vector<FaceInfo> face_infos_;
diff --git a/chromium/gpu/command_buffer/service/texture_owner.h b/chromium/gpu/command_buffer/service/texture_owner.h
index 8f0fced6b6a..81bd88c1bda 100644
--- a/chromium/gpu/command_buffer/service/texture_owner.h
+++ b/chromium/gpu/command_buffer/service/texture_owner.h
@@ -83,7 +83,6 @@ class GPU_GLES2_EXPORT TextureOwner
virtual void EnsureTexImageBound() = 0;
// Transformation matrix if any associated with the texture image.
- virtual void GetTransformMatrix(float mtx[16]) = 0;
virtual void ReleaseBackBuffers() = 0;
// Retrieves the AHardwareBuffer from the latest available image data.
@@ -92,10 +91,6 @@ class GPU_GLES2_EXPORT TextureOwner
virtual std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
GetAHardwareBuffer() = 0;
- // Provides the crop rectangle associated with the most recent image. The
- // crop rectangle specifies the region of valid pixels in the image.
- virtual gfx::Rect GetCropRect() = 0;
-
// Retrieves backing size and visible rect associated with the most recent
// image. |rotated_visible_size| is the size of the visible region
// post-transform in pixels and is used for SurfaceTexture case. Transform
@@ -103,7 +98,8 @@ class GPU_GLES2_EXPORT TextureOwner
// expect to have rotation and MediaPlayer reports rotated size. For
// MediaCodec we don't expect rotation in ST so visible_size (i.e crop rect
// from codec) can be used.
- virtual void GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
+ // Returns whether call was successful or not.
+ virtual bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) = 0;
diff --git a/chromium/gpu/command_buffer/service/vertex_array_manager.h b/chromium/gpu/command_buffer/service/vertex_array_manager.h
index 2053fb2342b..cc1f84a8c79 100644
--- a/chromium/gpu/command_buffer/service/vertex_array_manager.h
+++ b/chromium/gpu/command_buffer/service/vertex_array_manager.h
@@ -9,7 +9,6 @@
#include <unordered_map>
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/gl_utils.h"
diff --git a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h
index 044d0255992..0bdb52371b7 100644
--- a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h
+++ b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h
@@ -10,7 +10,7 @@
#include <list>
#include <vector>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/memory/ref_counted.h"
#include "build/build_config.h"
#include "gpu/command_buffer/service/buffer_manager.h"
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
index a4c8530dfd8..514e52a4d78 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
@@ -435,10 +435,7 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
const volatile void* buffer,
int num_entries,
int* entries_processed) override;
- base::StringPiece GetLogPrefix() override {
- NOTIMPLEMENTED();
- return "";
- }
+ base::StringPiece GetLogPrefix() override { return "WebGPUDecoderImpl"; }
void BindImage(uint32_t client_texture_id,
uint32_t texture_target,
gl::GLImage* image,
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
index 591d0923180..7cd628f1325 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
@@ -17,6 +17,7 @@
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/shared_memory_region_wrapper.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "skia/buildflags.h"
#include "third_party/skia/include/core/SkCanvas.h"
@@ -25,6 +26,7 @@
#include "third_party/skia/include/core/SkSurfaceProps.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/GrTypes.h"
+#include "ui/gfx/buffer_format_util.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/trace_util.h"
@@ -39,6 +41,13 @@ namespace raster {
namespace {
+SkImageInfo MakeSkImageInfo(const gfx::Size& size, viz::ResourceFormat format) {
+ return SkImageInfo::Make(size.width(), size.height(),
+ ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format),
+ kOpaque_SkAlphaType);
+}
+
class WrappedSkImage : public ClearTrackingSharedImageBacking {
public:
~WrappedSkImage() override {
@@ -59,7 +68,24 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
return false;
}
- void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {}
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
+ if (shared_memory_wrapper_.IsValid()) {
+ DCHECK(!in_fence);
+
+ if (context_state_->context_lost())
+ return;
+
+ DCHECK(context_state_->IsCurrent(nullptr));
+
+ SkImageInfo info = MakeSkImageInfo(size(), format());
+ SkPixmap pixmap(info, shared_memory_wrapper_.GetMemory(),
+ shared_memory_wrapper_.GetStride());
+ if (!context_state_->gr_context()->updateBackendTexture(
+ backend_texture_, &pixmap, /*levels=*/1, nullptr, nullptr)) {
+ DLOG(ERROR) << "Failed to update WrappedSkImage texture";
+ }
+ }
+ }
void OnMemoryDump(const std::string& dump_name,
base::trace_event::MemoryAllocatorDump* dump,
@@ -138,11 +164,28 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
DCHECK(!!context_state_);
}
- bool Initialize(const SkImageInfo& info, base::span<const uint8_t> data) {
+ bool InitializeGMB(const SkImageInfo& info,
+ SharedMemoryRegionWrapper shm_wrapper) {
+ if (Initialize(info, shm_wrapper.GetMemoryAsSpan(),
+ shm_wrapper.GetStride())) {
+ shared_memory_wrapper_ = std::move(shm_wrapper);
+ return true;
+ }
+ return false;
+ }
+
+ // |pixels| optionally contains pixel data to upload to the texture. If pixel
+ // data is provided and the image format is not ETC1 then |stride| is used. If
+ // |stride| is non-zero then it's used as the stride, otherwise
+ // SkImageInfo::minRowBytes() is used for the stride. For ETC1 textures pixel
+ // data must be provided since updating compressed textures is not supported.
+ bool Initialize(const SkImageInfo& info,
+ base::span<const uint8_t> pixels,
+ size_t stride) {
if (context_state_->context_lost())
return false;
- DCHECK(context_state_->IsCurrent(nullptr));
+ DCHECK(context_state_->IsCurrent(nullptr));
context_state_->set_need_context_state_reset(true);
#if BUILDFLAG(ENABLE_VULKAN)
@@ -156,33 +199,30 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
auto is_protected = GrProtected::kNo;
#endif
- if (!data.empty()) {
+ if (pixels.data()) {
if (format() == viz::ResourceFormat::ETC1) {
backend_texture_ =
context_state_->gr_context()->createCompressedBackendTexture(
size().width(), size().height(), SkImage::kETC1_CompressionType,
- data.data(), data.size(), GrMipMapped::kNo, is_protected);
+ pixels.data(), pixels.size(), GrMipMapped::kNo, is_protected);
} else {
- SkBitmap bitmap;
- if (!bitmap.installPixels(info, const_cast<uint8_t*>(data.data()),
- info.minRowBytes())) {
- return false;
- }
+ if (!stride)
+ stride = info.minRowBytes();
+ SkPixmap pixmap(info, pixels.data(), stride);
backend_texture_ = context_state_->gr_context()->createBackendTexture(
- bitmap.pixmap(), GrRenderable::kNo, is_protected);
+ pixmap, GrRenderable::kNo, is_protected);
}
if (!backend_texture_.isValid())
return false;
SetCleared();
- OnWriteSucceeded();
} else {
+ DCHECK_NE(format(), viz::ResourceFormat::ETC1);
+#if DCHECK_IS_ON()
// Initializing to bright green makes it obvious if the pixels are not
// properly set before they are displayed (e.g. https://crbug.com/956555).
// We don't do this on release builds because there is a slight overhead.
-
-#if DCHECK_IS_ON()
backend_texture_ = context_state_->gr_context()->createBackendTexture(
size().width(), size().height(), GetSkColorType(), SkColors::kBlue,
GrMipMapped::kNo, GrRenderable::kYes, is_protected);
@@ -191,12 +231,12 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
size().width(), size().height(), GetSkColorType(), GrMipMapped::kNo,
GrRenderable::kYes, is_protected);
#endif
- }
- if (!backend_texture_.isValid()) {
- DLOG(ERROR) << "createBackendTexture() failed with SkColorType:"
- << GetSkColorType();
- return false;
+ if (!backend_texture_.isValid()) {
+ DLOG(ERROR) << "createBackendTexture() failed with SkColorType:"
+ << GetSkColorType();
+ return false;
+ }
}
promise_texture_ = SkPromiseImageTexture::Make(backend_texture_);
@@ -236,6 +276,9 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
sk_sp<SkPromiseImageTexture> promise_texture_;
int surface_msaa_count_ = 0;
+ // Set for shared memory GMB.
+ SharedMemoryRegionWrapper shared_memory_wrapper_;
+
uint64_t tracing_id_ = 0;
DISALLOW_COPY_AND_ASSIGN(WrappedSkImage);
@@ -324,15 +367,12 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
const gfx::ColorSpace& color_space,
uint32_t usage,
base::span<const uint8_t> data) {
- auto info = SkImageInfo::Make(size.width(), size.height(),
- ResourceFormatToClosestSkColorType(
- /*gpu_compositing=*/true, format),
- kOpaque_SkAlphaType);
+ auto info = MakeSkImageInfo(size, format);
size_t estimated_size = info.computeMinByteSize();
std::unique_ptr<WrappedSkImage> texture(
new WrappedSkImage(mailbox, format, size, color_space, usage,
estimated_size, context_state_));
- if (!texture->Initialize(info, data))
+ if (!texture->Initialize(info, data, /*stride=*/0))
return nullptr;
return texture;
}
@@ -346,13 +386,41 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
- NOTREACHED();
- return nullptr;
+ DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER);
+
+ if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) {
+ DLOG(ERROR) << "Invalid image size for format.";
+ return nullptr;
+ }
+
+ if (gfx::NumberOfPlanesForLinearBufferFormat(buffer_format) != 1) {
+ DLOG(ERROR) << "Invalid image format.";
+ return nullptr;
+ }
+
+ viz::ResourceFormat format = viz::GetResourceFormat(buffer_format);
+
+ // The Skia API to handle compressed texture is limited and not compatible
+ // with updating the texture or custom strides.
+ DCHECK_NE(format, viz::ResourceFormat::ETC1);
+
+ SharedMemoryRegionWrapper shm_wrapper;
+ if (!shm_wrapper.Initialize(handle, size, format))
+ return nullptr;
+
+ auto info = MakeSkImageInfo(size, format);
+ std::unique_ptr<WrappedSkImage> texture(
+ new WrappedSkImage(mailbox, format, size, color_space, usage,
+ info.computeMinByteSize(), context_state_));
+ if (!texture->InitializeGMB(info, std::move(shm_wrapper)))
+ return nullptr;
+
+ return texture;
}
bool WrappedSkImageFactory::CanImportGpuMemoryBuffer(
gfx::GpuMemoryBufferType memory_buffer_type) {
- return false;
+ return memory_buffer_type == gfx::SHARED_MEMORY_BUFFER;
}
std::unique_ptr<SharedImageRepresentationSkia> WrappedSkImage::ProduceSkia(