diff options
Diffstat (limited to 'chromium/gpu/command_buffer')
119 files changed, 4695 insertions, 2158 deletions
diff --git a/chromium/gpu/command_buffer/PRESUBMIT.py b/chromium/gpu/command_buffer/PRESUBMIT.py index c1f484c8c6b..c3ddb2ccae7 100644 --- a/chromium/gpu/command_buffer/PRESUBMIT.py +++ b/chromium/gpu/command_buffer/PRESUBMIT.py @@ -10,8 +10,8 @@ for more details on the presubmit API built into depot_tools. import os.path -def _IsGLES2CmdBufferFile(file): - filename = os.path.basename(file.LocalPath()) +def _IsGLES2CmdBufferFile(affected_file): + filename = os.path.basename(affected_file.LocalPath()) if filename in [ 'build_cmd_buffer_lib.py', 'build_gles2_cmd_buffer.py', 'gles2_cmd_buffer_functions.txt', 'gl2.h', 'gl2ext.h', 'gl3.h', 'gl31.h', @@ -24,8 +24,8 @@ def _IsGLES2CmdBufferFile(file): filename.endswith('_autogen.h')) -def _IsRasterCmdBufferFile(file): - filename = os.path.basename(file.LocalPath()) +def _IsRasterCmdBufferFile(affected_file): + filename = os.path.basename(affected_file.LocalPath()) if filename in [ 'build_cmd_buffer_lib.py', 'build_raster_cmd_buffer.py', 'raster_cmd_buffer_functions.txt' @@ -35,8 +35,8 @@ def _IsRasterCmdBufferFile(file): return filename.startswith('raster') and filename.endswith('_autogen.h') -def _IsWebGPUCmdBufferFile(file): - filename = os.path.basename(file.LocalPath()) +def _IsWebGPUCmdBufferFile(affected_file): + filename = os.path.basename(affected_file.LocalPath()) if filename in [ 'build_cmd_buffer_lib.py', 'build_webgpu_cmd_buffer.py', 'webgpu_cmd_buffer_functions.txt' diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py index 653016c3ae2..75380476f35 100755 --- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py +++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py @@ -308,6 +308,13 @@ _NAMED_TYPE_INFO = { 'GL_UNPACK_SKIP_IMAGES', 'GL_UNPACK_SKIP_PIXELS', 'GL_UNPACK_SKIP_ROWS', + 'GL_BLEND_EQUATION_RGB', + 'GL_BLEND_EQUATION_ALPHA', + 'GL_BLEND_SRC_RGB', + 'GL_BLEND_SRC_ALPHA', + 'GL_BLEND_DST_RGB', + 'GL_BLEND_DST_ALPHA', + 'GL_COLOR_WRITEMASK', # GL_VERTEX_ARRAY_BINDING is the same as GL_VERTEX_ARRAY_BINDING_OES # 'GL_VERTEX_ARRAY_BINDING', ], @@ -324,6 +331,13 @@ _NAMED_TYPE_INFO = { 'GL_UNIFORM_BUFFER_BINDING', 'GL_UNIFORM_BUFFER_SIZE', 'GL_UNIFORM_BUFFER_START', + 'GL_BLEND_EQUATION_RGB', + 'GL_BLEND_EQUATION_ALPHA', + 'GL_BLEND_SRC_RGB', + 'GL_BLEND_SRC_ALPHA', + 'GL_BLEND_DST_RGB', + 'GL_BLEND_DST_ALPHA', + 'GL_COLOR_WRITEMASK', ], 'invalid': [ 'GL_FOG_HINT', @@ -1578,6 +1592,7 @@ _NAMED_TYPE_INFO = { 'type': 'GLenum', 'is_complete': True, 'valid': [ + 'GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM', 'GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM', 'GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM', ], @@ -1828,6 +1843,11 @@ _FUNCTION_INFO = { 'no_gl': True, 'expectation': False, }, + 'ColorMaskiOES': { + 'extension_flag': 'oes_draw_buffers_indexed', + 'unit_test': False, + 'extension': 'OES_draw_buffers_indexed', + }, 'ContextVisibilityHintCHROMIUM': { 'decoder_func': 'DoContextVisibilityHintCHROMIUM', 'extension': 'CHROMIUM_context_visibility_hint', @@ -1954,6 +1974,15 @@ _FUNCTION_INFO = { '0': 'GL_FUNC_SUBTRACT' }, }, + 'BlendEquationiOES': { + 'extension_flag': 'oes_draw_buffers_indexed', + 'unit_test': False, + 'extension': 'OES_draw_buffers_indexed', + 'valid_args': { + '1': 'GL_FUNC_SUBTRACT', + '2': 'GL_FUNC_SUBTRACT' + }, + }, 'BlendEquationSeparate': { 'type': 'StateSet', 'state': 'BlendEquation', @@ -1961,14 +1990,33 @@ _FUNCTION_INFO = { '0': 'GL_FUNC_SUBTRACT' }, }, + 'BlendEquationSeparateiOES': { + 'extension_flag': 'oes_draw_buffers_indexed', + 'unit_test': False, + 'extension': 'OES_draw_buffers_indexed', + 'valid_args': { + '1': 'GL_FUNC_SUBTRACT', + '2': 'GL_FUNC_SUBTRACT' + }, + }, 'BlendFunc': { 'type': 'StateSetRGBAlpha', 'state': 'BlendFunc', }, + 'BlendFunciOES': { + 'extension_flag': 'oes_draw_buffers_indexed', + 'unit_test': False, + 'extension': 'OES_draw_buffers_indexed', + }, 'BlendFuncSeparate': { 'type': 'StateSet', 'state': 'BlendFunc', }, + 'BlendFuncSeparateiOES': { + 'extension_flag': 'oes_draw_buffers_indexed', + 'unit_test': False, + 'extension': 'OES_draw_buffers_indexed', + }, 'BlendBarrierKHR': { 'gl_test_func': 'glBlendBarrierKHR', 'extension': 'KHR_blend_equation_advanced', @@ -2079,6 +2127,13 @@ _FUNCTION_INFO = { 'impl_func': False, 'client_test': False, }, + 'DisableiOES': { + 'extension_flag': 'oes_draw_buffers_indexed', + 'extension': 'OES_draw_buffers_indexed', + 'decoder_func': 'DoDisableiOES', + 'impl_func': False, + 'unit_test': False, + }, 'DisableVertexAttribArray': { 'decoder_func': 'DoDisableVertexAttribArray', 'impl_func': False, @@ -2139,6 +2194,13 @@ _FUNCTION_INFO = { 'impl_func': False, 'client_test': False, }, + 'EnableiOES': { + 'extension_flag': 'oes_draw_buffers_indexed', + 'extension': 'OES_draw_buffers_indexed', + 'decoder_func': 'DoEnableiOES', + 'impl_func': False, + 'unit_test': False, + }, 'EnableVertexAttribArray': { 'decoder_func': 'DoEnableVertexAttribArray', 'impl_func': False, @@ -2325,6 +2387,15 @@ _FUNCTION_INFO = { 'decoder_func': 'DoGetBooleanv', 'gl_test_func': 'glGetIntegerv', }, + 'GetBooleani_v': { + 'type': 'GETn', + 'result': ['SizedResult<GLboolean>'], + 'decoder_func': 'DoGetBooleani_v', + 'shadowed': True, + 'client_test': False, + 'unit_test': False, + 'es3': True + }, 'GetBufferParameteri64v': { 'type': 'GETn', 'result': ['SizedResult<GLint64>'], @@ -2721,6 +2792,16 @@ _FUNCTION_INFO = { 'impl_func': False, 'expectation': False, }, + 'IsEnablediOES': { + 'extension_flag': 'oes_draw_buffers_indexed', + 'unit_test': False, + 'extension': 'OES_draw_buffers_indexed', + 'type': 'Is', + 'decoder_func': 'DoIsEnablediOES', + 'client_test': False, + 'impl_func': False, + 'expectation': False, + }, 'IsFramebuffer': { 'type': 'Is', 'decoder_func': 'DoIsFramebuffer', diff --git a/chromium/gpu/command_buffer/client/client_font_manager.cc b/chromium/gpu/command_buffer/client/client_font_manager.cc index 6dc6f269cdb..8a11104ee88 100644 --- a/chromium/gpu/command_buffer/client/client_font_manager.cc +++ b/chromium/gpu/command_buffer/client/client_font_manager.cc @@ -4,6 +4,8 @@ #include "gpu/command_buffer/client/client_font_manager.h" +#include "base/logging.h" + namespace gpu { namespace raster { diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h index eca26c60e79..7111a460147 100644 --- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h +++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h @@ -11,7 +11,7 @@ #include <stdint.h> #include <string.h> -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/time/time.h" diff --git a/chromium/gpu/command_buffer/client/fenced_allocator.h b/chromium/gpu/command_buffer/client/fenced_allocator.h index 7b238dd8249..d3299c11768 100644 --- a/chromium/gpu/command_buffer/client/fenced_allocator.h +++ b/chromium/gpu/command_buffer/client/fenced_allocator.h @@ -13,7 +13,7 @@ #include <vector> #include "base/bind.h" -#include "base/logging.h" +#include "base/check.h" #include "base/macros.h" #include "gpu/gpu_export.h" diff --git a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc index ed09dd197f6..f68bf95f8b3 100644 --- a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc +++ b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc @@ -119,7 +119,7 @@ TEST_F(FencedAllocatorTest, TestOutOfMemory) { const unsigned int kSize = 16; const unsigned int kAllocCount = kBufferSize / kSize; - CHECK(kAllocCount * kSize == kBufferSize); + CHECK_EQ(kAllocCount * kSize, kBufferSize); // Allocate several buffers to fill in the memory. FencedAllocator::Offset offsets[kAllocCount]; @@ -161,7 +161,7 @@ TEST_F(FencedAllocatorTest, TestFreePendingToken) { const unsigned int kSize = 16; const unsigned int kAllocCount = kBufferSize / kSize; - CHECK(kAllocCount * kSize == kBufferSize); + CHECK_EQ(kAllocCount * kSize, kBufferSize); // Allocate several buffers to fill in the memory. FencedAllocator::Offset offsets[kAllocCount]; @@ -209,7 +209,7 @@ TEST_F(FencedAllocatorTest, FreeUnused) { const unsigned int kSize = 16; const unsigned int kAllocCount = kBufferSize / kSize; - CHECK(kAllocCount * kSize == kBufferSize); + CHECK_EQ(kAllocCount * kSize, kBufferSize); // Allocate several buffers to fill in the memory. FencedAllocator::Offset offsets[kAllocCount]; @@ -406,7 +406,7 @@ TEST_F(FencedAllocatorWrapperTest, TestBasic) { allocator_->CheckConsistency(); const unsigned int kSize = 16; - void *pointer = allocator_->Alloc(kSize); + void* pointer = allocator_->Alloc(kSize); ASSERT_TRUE(pointer); EXPECT_LE(buffer_.get(), static_cast<char *>(pointer)); EXPECT_GE(kBufferSize, static_cast<char *>(pointer) - buffer_.get() + kSize); @@ -415,14 +415,14 @@ TEST_F(FencedAllocatorWrapperTest, TestBasic) { allocator_->Free(pointer); EXPECT_TRUE(allocator_->CheckConsistency()); - char *pointer_char = allocator_->AllocTyped<char>(kSize); + char* pointer_char = allocator_->AllocTyped<char>(kSize); ASSERT_TRUE(pointer_char); EXPECT_LE(buffer_.get(), pointer_char); EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize); allocator_->Free(pointer_char); EXPECT_TRUE(allocator_->CheckConsistency()); - unsigned int *pointer_uint = allocator_->AllocTyped<unsigned int>(kSize); + unsigned int* pointer_uint = allocator_->AllocTyped<unsigned int>(kSize); ASSERT_TRUE(pointer_uint); EXPECT_LE(buffer_.get(), reinterpret_cast<char *>(pointer_uint)); EXPECT_GE(buffer_.get() + kBufferSize, @@ -439,7 +439,7 @@ TEST_F(FencedAllocatorWrapperTest, TestBasic) { TEST_F(FencedAllocatorWrapperTest, TestAllocZero) { allocator_->CheckConsistency(); - void *pointer = allocator_->Alloc(0); + void* pointer = allocator_->Alloc(0); ASSERT_FALSE(pointer); EXPECT_TRUE(allocator_->CheckConsistency()); } @@ -449,15 +449,15 @@ TEST_F(FencedAllocatorWrapperTest, TestAlignment) { allocator_->CheckConsistency(); const unsigned int kSize1 = 75; - void *pointer1 = allocator_->Alloc(kSize1); + void* pointer1 = allocator_->Alloc(kSize1); ASSERT_TRUE(pointer1); - EXPECT_EQ(reinterpret_cast<intptr_t>(pointer1) & (kAllocAlignment - 1), 0); + EXPECT_TRUE(base::IsAligned(pointer1, kAllocAlignment)); EXPECT_TRUE(allocator_->CheckConsistency()); const unsigned int kSize2 = 43; - void *pointer2 = allocator_->Alloc(kSize2); + void* pointer2 = allocator_->Alloc(kSize2); ASSERT_TRUE(pointer2); - EXPECT_EQ(reinterpret_cast<intptr_t>(pointer2) & (kAllocAlignment - 1), 0); + EXPECT_TRUE(base::IsAligned(pointer2, kAllocAlignment)); EXPECT_TRUE(allocator_->CheckConsistency()); allocator_->Free(pointer2); @@ -473,10 +473,10 @@ TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) { const unsigned int kSize = 16; const unsigned int kAllocCount = kBufferSize / kSize; - CHECK(kAllocCount * kSize == kBufferSize); + CHECK_EQ(kAllocCount * kSize, kBufferSize); // Allocate several buffers to fill in the memory. - void *pointers[kAllocCount]; + void* pointers[kAllocCount]; for (unsigned int i = 0; i < kAllocCount; ++i) { pointers[i] = allocator_->Alloc(kSize); EXPECT_TRUE(pointers[i]); @@ -484,7 +484,7 @@ TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) { } // This allocation should fail. - void *pointer_failed = allocator_->Alloc(kSize); + void* pointer_failed = allocator_->Alloc(kSize); EXPECT_FALSE(pointer_failed); EXPECT_TRUE(allocator_->CheckConsistency()); @@ -513,10 +513,10 @@ TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) { const unsigned int kSize = 16; const unsigned int kAllocCount = kBufferSize / kSize; - CHECK(kAllocCount * kSize == kBufferSize); + CHECK_EQ(kAllocCount * kSize, kBufferSize); // Allocate several buffers to fill in the memory. - void *pointers[kAllocCount]; + void* pointers[kAllocCount]; for (unsigned int i = 0; i < kAllocCount; ++i) { pointers[i] = allocator_->Alloc(kSize); EXPECT_TRUE(pointers[i]); @@ -524,7 +524,7 @@ TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) { } // This allocation should fail. - void *pointer_failed = allocator_->Alloc(kSize); + void* pointer_failed = allocator_->Alloc(kSize); EXPECT_FALSE(pointer_failed); EXPECT_TRUE(allocator_->CheckConsistency()); diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h index 76546b8dd53..dfe2d8512a2 100644 --- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h @@ -423,6 +423,11 @@ GLint GL_APIENTRY GLES2GetAttribLocation(GLuint program, const char* name) { void GL_APIENTRY GLES2GetBooleanv(GLenum pname, GLboolean* params) { gles2::GetGLContext()->GetBooleanv(pname, params); } +void GL_APIENTRY GLES2GetBooleani_v(GLenum pname, + GLuint index, + GLboolean* data) { + gles2::GetGLContext()->GetBooleani_v(pname, index, data); +} void GL_APIENTRY GLES2GetBufferParameteri64v(GLenum target, GLenum pname, GLint64* params) { @@ -1832,6 +1837,41 @@ void GL_APIENTRY GLES2BeginBatchReadAccessSharedImageCHROMIUM() { void GL_APIENTRY GLES2EndBatchReadAccessSharedImageCHROMIUM() { gles2::GetGLContext()->EndBatchReadAccessSharedImageCHROMIUM(); } +void GL_APIENTRY GLES2EnableiOES(GLenum target, GLuint index) { + gles2::GetGLContext()->EnableiOES(target, index); +} +void GL_APIENTRY GLES2DisableiOES(GLenum target, GLuint index) { + gles2::GetGLContext()->DisableiOES(target, index); +} +void GL_APIENTRY GLES2BlendEquationiOES(GLuint buf, GLenum mode) { + gles2::GetGLContext()->BlendEquationiOES(buf, mode); +} +void GL_APIENTRY GLES2BlendEquationSeparateiOES(GLuint buf, + GLenum modeRGB, + GLenum modeAlpha) { + gles2::GetGLContext()->BlendEquationSeparateiOES(buf, modeRGB, modeAlpha); +} +void GL_APIENTRY GLES2BlendFunciOES(GLuint buf, GLenum src, GLenum dst) { + gles2::GetGLContext()->BlendFunciOES(buf, src, dst); +} +void GL_APIENTRY GLES2BlendFuncSeparateiOES(GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha) { + gles2::GetGLContext()->BlendFuncSeparateiOES(buf, srcRGB, dstRGB, srcAlpha, + dstAlpha); +} +void GL_APIENTRY GLES2ColorMaskiOES(GLuint buf, + GLboolean r, + GLboolean g, + GLboolean b, + GLboolean a) { + gles2::GetGLContext()->ColorMaskiOES(buf, r, g, b, a); +} +GLboolean GL_APIENTRY GLES2IsEnablediOES(GLenum target, GLuint index) { + return gles2::GetGLContext()->IsEnablediOES(target, index); +} namespace gles2 { @@ -2169,6 +2209,10 @@ extern const NameToFunc g_gles2_function_table[] = { reinterpret_cast<GLES2FunctionPointer>(glGetBooleanv), }, { + "glGetBooleani_v", + reinterpret_cast<GLES2FunctionPointer>(glGetBooleani_v), + }, + { "glGetBufferParameteri64v", reinterpret_cast<GLES2FunctionPointer>(glGetBufferParameteri64v), }, @@ -3277,6 +3321,38 @@ extern const NameToFunc g_gles2_function_table[] = { glEndBatchReadAccessSharedImageCHROMIUM), }, { + "glEnableiOES", + reinterpret_cast<GLES2FunctionPointer>(glEnableiOES), + }, + { + "glDisableiOES", + reinterpret_cast<GLES2FunctionPointer>(glDisableiOES), + }, + { + "glBlendEquationiOES", + reinterpret_cast<GLES2FunctionPointer>(glBlendEquationiOES), + }, + { + "glBlendEquationSeparateiOES", + reinterpret_cast<GLES2FunctionPointer>(glBlendEquationSeparateiOES), + }, + { + "glBlendFunciOES", + reinterpret_cast<GLES2FunctionPointer>(glBlendFunciOES), + }, + { + "glBlendFuncSeparateiOES", + reinterpret_cast<GLES2FunctionPointer>(glBlendFuncSeparateiOES), + }, + { + "glColorMaskiOES", + reinterpret_cast<GLES2FunctionPointer>(glColorMaskiOES), + }, + { + "glIsEnablediOES", + reinterpret_cast<GLES2FunctionPointer>(glIsEnablediOES), + }, + { nullptr, nullptr, }, diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h index 7aa80690359..efe2fe42a7b 100644 --- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h @@ -851,6 +851,16 @@ void GetBooleanv(GLenum pname, } } +void GetBooleani_v(GLenum pname, + GLuint index, + uint32_t data_shm_id, + uint32_t data_shm_offset) { + gles2::cmds::GetBooleani_v* c = GetCmdSpace<gles2::cmds::GetBooleani_v>(); + if (c) { + c->Init(pname, index, data_shm_id, data_shm_offset); + } +} + void GetBufferParameteri64v(GLenum target, GLenum pname, uint32_t params_shm_id, @@ -3415,4 +3425,74 @@ void EndBatchReadAccessSharedImageCHROMIUM() { } } +void EnableiOES(GLenum target, GLuint index) { + gles2::cmds::EnableiOES* c = GetCmdSpace<gles2::cmds::EnableiOES>(); + if (c) { + c->Init(target, index); + } +} + +void DisableiOES(GLenum target, GLuint index) { + gles2::cmds::DisableiOES* c = GetCmdSpace<gles2::cmds::DisableiOES>(); + if (c) { + c->Init(target, index); + } +} + +void BlendEquationiOES(GLuint buf, GLenum mode) { + gles2::cmds::BlendEquationiOES* c = + GetCmdSpace<gles2::cmds::BlendEquationiOES>(); + if (c) { + c->Init(buf, mode); + } +} + +void BlendEquationSeparateiOES(GLuint buf, GLenum modeRGB, GLenum modeAlpha) { + gles2::cmds::BlendEquationSeparateiOES* c = + GetCmdSpace<gles2::cmds::BlendEquationSeparateiOES>(); + if (c) { + c->Init(buf, modeRGB, modeAlpha); + } +} + +void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) { + gles2::cmds::BlendFunciOES* c = GetCmdSpace<gles2::cmds::BlendFunciOES>(); + if (c) { + c->Init(buf, src, dst); + } +} + +void BlendFuncSeparateiOES(GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha) { + gles2::cmds::BlendFuncSeparateiOES* c = + GetCmdSpace<gles2::cmds::BlendFuncSeparateiOES>(); + if (c) { + c->Init(buf, srcRGB, dstRGB, srcAlpha, dstAlpha); + } +} + +void ColorMaskiOES(GLuint buf, + GLboolean r, + GLboolean g, + GLboolean b, + GLboolean a) { + gles2::cmds::ColorMaskiOES* c = GetCmdSpace<gles2::cmds::ColorMaskiOES>(); + if (c) { + c->Init(buf, r, g, b, a); + } +} + +void IsEnablediOES(GLenum target, + GLuint index, + uint32_t result_shm_id, + uint32_t result_shm_offset) { + gles2::cmds::IsEnablediOES* c = GetCmdSpace<gles2::cmds::IsEnablediOES>(); + if (c) { + c->Init(target, index, result_shm_id, result_shm_offset); + } +} + #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc index c5d65e65595..49d050e7e0d 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation.cc +++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc @@ -647,6 +647,24 @@ void GLES2Implementation::Disable(GLenum cap) { CheckGLError(); } +void GLES2Implementation::DisableiOES(GLenum target, GLuint index) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDisableiOES(" + << GLES2Util::GetStringEnum(target) << ", " << index + << ")"); + if (index == 0u && target == GL_BLEND) { + bool changed = false; + DCHECK(target == GL_BLEND); + if (!state_.SetCapabilityState(target, false, &changed) || changed) { + helper_->DisableiOES(target, index); + } + } else { + helper_->DisableiOES(target, index); + } + + CheckGLError(); +} + void GLES2Implementation::Enable(GLenum cap) { GPU_CLIENT_SINGLE_THREAD_CHECK(); GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnable(" @@ -658,6 +676,24 @@ void GLES2Implementation::Enable(GLenum cap) { CheckGLError(); } +void GLES2Implementation::EnableiOES(GLenum target, GLuint index) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnableiOES(" + << GLES2Util::GetStringEnum(target) << ", " << index + << ")"); + if (index == 0u && target == GL_BLEND) { + bool changed = false; + DCHECK(target == GL_BLEND); + if (!state_.SetCapabilityState(target, true, &changed) || changed) { + helper_->EnableiOES(target, index); + } + } else { + helper_->EnableiOES(target, index); + } + + CheckGLError(); +} + GLboolean GLES2Implementation::IsEnabled(GLenum cap) { GPU_CLIENT_SINGLE_THREAD_CHECK(); GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsEnabled(" @@ -680,6 +716,24 @@ GLboolean GLES2Implementation::IsEnabled(GLenum cap) { return state; } +GLboolean GLES2Implementation::IsEnablediOES(GLenum target, GLuint index) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsEnablediOES(" + << GLES2Util::GetStringCapability(target) << ", " << index + << ")"); + bool state = false; + typedef cmds::IsEnabled::Result Result; + auto result = GetResultAs<Result>(); + *result = 0; + helper_->IsEnablediOES(target, index, GetResultShmId(), result.offset()); + WaitForCmd(); + state = (*result) != 0; + + GPU_CLIENT_LOG("returned " << state); + CheckGLError(); + return state; +} + bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) { // TODO(zmo): For all the BINDING points, there is a possibility where // resources are shared among multiple contexts, that the cached points @@ -1105,6 +1159,13 @@ bool GLES2Implementation::GetBooleanvHelper(GLenum pname, GLboolean* params) { return true; } +bool GLES2Implementation::GetBooleani_vHelper(GLenum pname, + GLuint index, + GLboolean* data) { + // TODO(zmo): Implement client side caching. + return false; +} + bool GLES2Implementation::GetFloatvHelper(GLenum pname, GLfloat* params) { // TODO(gman): Make this handle pnames that return more than 1 value. switch (pname) { diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h index e0db2688e6b..5b828ea7208 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation.h +++ b/chromium/gpu/command_buffer/client/gles2_implementation.h @@ -599,6 +599,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface, bool GetHelper(GLenum pname, GLint* params); GLuint GetBoundBufferHelper(GLenum target); bool GetBooleanvHelper(GLenum pname, GLboolean* params); + bool GetBooleani_vHelper(GLenum pname, GLuint index, GLboolean* data); bool GetBufferParameteri64vHelper( GLenum target, GLenum pname, GLint64* params); bool GetBufferParameterivHelper(GLenum target, GLenum pname, GLint* params); diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h index 6fb5046d5de..2ce5be504ca 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h @@ -316,6 +316,8 @@ GLint GetAttribLocation(GLuint program, const char* name) override; void GetBooleanv(GLenum pname, GLboolean* params) override; +void GetBooleani_v(GLenum pname, GLuint index, GLboolean* data) override; + void GetBufferParameteri64v(GLenum target, GLenum pname, GLint64* params) override; @@ -1291,4 +1293,30 @@ void BeginBatchReadAccessSharedImageCHROMIUM() override; void EndBatchReadAccessSharedImageCHROMIUM() override; +void EnableiOES(GLenum target, GLuint index) override; + +void DisableiOES(GLenum target, GLuint index) override; + +void BlendEquationiOES(GLuint buf, GLenum mode) override; + +void BlendEquationSeparateiOES(GLuint buf, + GLenum modeRGB, + GLenum modeAlpha) override; + +void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) override; + +void BlendFuncSeparateiOES(GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha) override; + +void ColorMaskiOES(GLuint buf, + GLboolean r, + GLboolean g, + GLboolean b, + GLboolean a) override; + +GLboolean IsEnablediOES(GLenum target, GLuint index) override; + #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h index 69cd2bd09f6..6fa311c7a15 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h @@ -889,6 +889,34 @@ void GLES2Implementation::GetBooleanv(GLenum pname, GLboolean* params) { }); CheckGLError(); } +void GLES2Implementation::GetBooleani_v(GLenum pname, + GLuint index, + GLboolean* data) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLboolean, data); + GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetBooleani_v(" + << GLES2Util::GetStringIndexedGLState(pname) << ", " + << index << ", " << static_cast<const void*>(data) << ")"); + TRACE_EVENT0("gpu", "GLES2Implementation::GetBooleani_v"); + if (GetBooleani_vHelper(pname, index, data)) { + return; + } + typedef cmds::GetBooleani_v::Result Result; + ScopedResultPtr<Result> result = GetResultAs<Result>(); + if (!result) { + return; + } + result->SetNumResults(0); + helper_->GetBooleani_v(pname, index, GetResultShmId(), result.offset()); + WaitForCmd(); + result->CopyResult(data); + GPU_CLIENT_LOG_CODE_BLOCK({ + for (int32_t i = 0; i < result->GetNumResults(); ++i) { + GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]); + } + }); + CheckGLError(); +} void GLES2Implementation::GetBufferParameteri64v(GLenum target, GLenum pname, GLint64* params) { @@ -3731,4 +3759,62 @@ void GLES2Implementation::EndBatchReadAccessSharedImageCHROMIUM() { CheckGLError(); } +void GLES2Implementation::BlendEquationiOES(GLuint buf, GLenum mode) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendEquationiOES(" << buf + << ", " << GLES2Util::GetStringEnum(mode) << ")"); + helper_->BlendEquationiOES(buf, mode); + CheckGLError(); +} + +void GLES2Implementation::BlendEquationSeparateiOES(GLuint buf, + GLenum modeRGB, + GLenum modeAlpha) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendEquationSeparateiOES(" + << buf << ", " << GLES2Util::GetStringEnum(modeRGB) << ", " + << GLES2Util::GetStringEnum(modeAlpha) << ")"); + helper_->BlendEquationSeparateiOES(buf, modeRGB, modeAlpha); + CheckGLError(); +} + +void GLES2Implementation::BlendFunciOES(GLuint buf, GLenum src, GLenum dst) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendFunciOES(" << buf << ", " + << GLES2Util::GetStringEnum(src) << ", " + << GLES2Util::GetStringEnum(dst) << ")"); + helper_->BlendFunciOES(buf, src, dst); + CheckGLError(); +} + +void GLES2Implementation::BlendFuncSeparateiOES(GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendFuncSeparateiOES(" << buf + << ", " << GLES2Util::GetStringEnum(srcRGB) << ", " + << GLES2Util::GetStringEnum(dstRGB) << ", " + << GLES2Util::GetStringEnum(srcAlpha) << ", " + << GLES2Util::GetStringEnum(dstAlpha) << ")"); + helper_->BlendFuncSeparateiOES(buf, srcRGB, dstRGB, srcAlpha, dstAlpha); + CheckGLError(); +} + +void GLES2Implementation::ColorMaskiOES(GLuint buf, + GLboolean r, + GLboolean g, + GLboolean b, + GLboolean a) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glColorMaskiOES(" << buf << ", " + << GLES2Util::GetStringBool(r) << ", " + << GLES2Util::GetStringBool(g) << ", " + << GLES2Util::GetStringBool(b) << ", " + << GLES2Util::GetStringBool(a) << ")"); + helper_->ColorMaskiOES(buf, r, g, b, a); + CheckGLError(); +} + #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h index 5205f3e98f7..c7c3b16a382 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h @@ -758,6 +758,24 @@ TEST_F(GLES2ImplementationTest, GetBooleanv) { EXPECT_EQ(static_cast<ResultType>(1), result); } +TEST_F(GLES2ImplementationTest, GetBooleani_v) { + struct Cmds { + cmds::GetBooleani_v cmd; + }; + typedef cmds::GetBooleani_v::Result::Type ResultType; + ResultType result = 0; + Cmds expected; + ExpectedMemoryInfo result1 = + GetExpectedResultMemory(sizeof(uint32_t) + sizeof(ResultType)); + expected.cmd.Init(123, 2, result1.id, result1.offset); + EXPECT_CALL(*command_buffer(), OnFlush()) + .WillOnce(SetMemory(result1.ptr, SizedResultHelper<ResultType>(1))) + .RetiresOnSaturation(); + gl_->GetBooleani_v(123, 2, &result); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); + EXPECT_EQ(static_cast<ResultType>(1), result); +} + TEST_F(GLES2ImplementationTest, GetBufferParameteri64v) { struct Cmds { cmds::GetBufferParameteri64v cmd; @@ -3124,4 +3142,81 @@ TEST_F(GLES2ImplementationTest, EndBatchReadAccessSharedImageCHROMIUM) { gl_->EndBatchReadAccessSharedImageCHROMIUM(); EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); } + +TEST_F(GLES2ImplementationTest, EnableiOES) { + struct Cmds { + cmds::EnableiOES cmd; + }; + Cmds expected; + expected.cmd.Init(1, 2); + + gl_->EnableiOES(1, 2); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} + +TEST_F(GLES2ImplementationTest, DisableiOES) { + struct Cmds { + cmds::DisableiOES cmd; + }; + Cmds expected; + expected.cmd.Init(1, 2); + + gl_->DisableiOES(1, 2); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} + +TEST_F(GLES2ImplementationTest, BlendEquationiOES) { + struct Cmds { + cmds::BlendEquationiOES cmd; + }; + Cmds expected; + expected.cmd.Init(1, GL_FUNC_SUBTRACT); + + gl_->BlendEquationiOES(1, GL_FUNC_SUBTRACT); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} + +TEST_F(GLES2ImplementationTest, BlendEquationSeparateiOES) { + struct Cmds { + cmds::BlendEquationSeparateiOES cmd; + }; + Cmds expected; + expected.cmd.Init(1, GL_FUNC_SUBTRACT, GL_FUNC_SUBTRACT); + + gl_->BlendEquationSeparateiOES(1, GL_FUNC_SUBTRACT, GL_FUNC_SUBTRACT); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} + +TEST_F(GLES2ImplementationTest, BlendFunciOES) { + struct Cmds { + cmds::BlendFunciOES cmd; + }; + Cmds expected; + expected.cmd.Init(1, 2, 3); + + gl_->BlendFunciOES(1, 2, 3); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} + +TEST_F(GLES2ImplementationTest, BlendFuncSeparateiOES) { + struct Cmds { + cmds::BlendFuncSeparateiOES cmd; + }; + Cmds expected; + expected.cmd.Init(1, 2, 3, 4, 5); + + gl_->BlendFuncSeparateiOES(1, 2, 3, 4, 5); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} + +TEST_F(GLES2ImplementationTest, ColorMaskiOES) { + struct Cmds { + cmds::ColorMaskiOES cmd; + }; + Cmds expected; + expected.cmd.Init(1, true, true, true, true); + + gl_->ColorMaskiOES(1, true, true, true, true); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h index 11954f6a4c4..a6cfcf3b536 100644 --- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h @@ -235,6 +235,7 @@ virtual void GetAttachedShaders(GLuint program, GLuint* shaders) = 0; virtual GLint GetAttribLocation(GLuint program, const char* name) = 0; virtual void GetBooleanv(GLenum pname, GLboolean* params) = 0; +virtual void GetBooleani_v(GLenum pname, GLuint index, GLboolean* data) = 0; virtual void GetBufferParameteri64v(GLenum target, GLenum pname, GLint64* params) = 0; @@ -967,4 +968,22 @@ virtual void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, virtual void EndSharedImageAccessDirectCHROMIUM(GLuint texture) = 0; virtual void BeginBatchReadAccessSharedImageCHROMIUM() = 0; virtual void EndBatchReadAccessSharedImageCHROMIUM() = 0; +virtual void EnableiOES(GLenum target, GLuint index) = 0; +virtual void DisableiOES(GLenum target, GLuint index) = 0; +virtual void BlendEquationiOES(GLuint buf, GLenum mode) = 0; +virtual void BlendEquationSeparateiOES(GLuint buf, + GLenum modeRGB, + GLenum modeAlpha) = 0; +virtual void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) = 0; +virtual void BlendFuncSeparateiOES(GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha) = 0; +virtual void ColorMaskiOES(GLuint buf, + GLboolean r, + GLboolean g, + GLboolean b, + GLboolean a) = 0; +virtual GLboolean IsEnablediOES(GLenum target, GLuint index) = 0; #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h index af3f5723eba..0af83a4f52b 100644 --- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h @@ -232,6 +232,7 @@ void GetAttachedShaders(GLuint program, GLuint* shaders) override; GLint GetAttribLocation(GLuint program, const char* name) override; void GetBooleanv(GLenum pname, GLboolean* params) override; +void GetBooleani_v(GLenum pname, GLuint index, GLboolean* data) override; void GetBufferParameteri64v(GLenum target, GLenum pname, GLint64* params) override; @@ -937,4 +938,22 @@ void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override; void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override; void BeginBatchReadAccessSharedImageCHROMIUM() override; void EndBatchReadAccessSharedImageCHROMIUM() override; +void EnableiOES(GLenum target, GLuint index) override; +void DisableiOES(GLenum target, GLuint index) override; +void BlendEquationiOES(GLuint buf, GLenum mode) override; +void BlendEquationSeparateiOES(GLuint buf, + GLenum modeRGB, + GLenum modeAlpha) override; +void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) override; +void BlendFuncSeparateiOES(GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha) override; +void ColorMaskiOES(GLuint buf, + GLboolean r, + GLboolean g, + GLboolean b, + GLboolean a) override; +GLboolean IsEnablediOES(GLenum target, GLuint index) override; #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h index 1ba3ccf0850..2c8542ba0f0 100644 --- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h @@ -274,6 +274,9 @@ GLint GLES2InterfaceStub::GetAttribLocation(GLuint /* program */, } void GLES2InterfaceStub::GetBooleanv(GLenum /* pname */, GLboolean* /* params */) {} +void GLES2InterfaceStub::GetBooleani_v(GLenum /* pname */, + GLuint /* index */, + GLboolean* /* data */) {} void GLES2InterfaceStub::GetBufferParameteri64v(GLenum /* target */, GLenum /* pname */, GLint64* /* params */) {} @@ -1251,4 +1254,28 @@ void GLES2InterfaceStub::EndSharedImageAccessDirectCHROMIUM( GLuint /* texture */) {} void GLES2InterfaceStub::BeginBatchReadAccessSharedImageCHROMIUM() {} void GLES2InterfaceStub::EndBatchReadAccessSharedImageCHROMIUM() {} +void GLES2InterfaceStub::EnableiOES(GLenum /* target */, GLuint /* index */) {} +void GLES2InterfaceStub::DisableiOES(GLenum /* target */, GLuint /* index */) {} +void GLES2InterfaceStub::BlendEquationiOES(GLuint /* buf */, + GLenum /* mode */) {} +void GLES2InterfaceStub::BlendEquationSeparateiOES(GLuint /* buf */, + GLenum /* modeRGB */, + GLenum /* modeAlpha */) {} +void GLES2InterfaceStub::BlendFunciOES(GLuint /* buf */, + GLenum /* src */, + GLenum /* dst */) {} +void GLES2InterfaceStub::BlendFuncSeparateiOES(GLuint /* buf */, + GLenum /* srcRGB */, + GLenum /* dstRGB */, + GLenum /* srcAlpha */, + GLenum /* dstAlpha */) {} +void GLES2InterfaceStub::ColorMaskiOES(GLuint /* buf */, + GLboolean /* r */, + GLboolean /* g */, + GLboolean /* b */, + GLboolean /* a */) {} +GLboolean GLES2InterfaceStub::IsEnablediOES(GLenum /* target */, + GLuint /* index */) { + return 0; +} #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h index 5091689bd0e..d3d3e45c124 100644 --- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h @@ -232,6 +232,7 @@ void GetAttachedShaders(GLuint program, GLuint* shaders) override; GLint GetAttribLocation(GLuint program, const char* name) override; void GetBooleanv(GLenum pname, GLboolean* params) override; +void GetBooleani_v(GLenum pname, GLuint index, GLboolean* data) override; void GetBufferParameteri64v(GLenum target, GLenum pname, GLint64* params) override; @@ -937,4 +938,22 @@ void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override; void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override; void BeginBatchReadAccessSharedImageCHROMIUM() override; void EndBatchReadAccessSharedImageCHROMIUM() override; +void EnableiOES(GLenum target, GLuint index) override; +void DisableiOES(GLenum target, GLuint index) override; +void BlendEquationiOES(GLuint buf, GLenum mode) override; +void BlendEquationSeparateiOES(GLuint buf, + GLenum modeRGB, + GLenum modeAlpha) override; +void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) override; +void BlendFuncSeparateiOES(GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha) override; +void ColorMaskiOES(GLuint buf, + GLboolean r, + GLboolean g, + GLboolean b, + GLboolean a) override; +GLboolean IsEnablediOES(GLenum target, GLuint index) override; #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h index ce0e76e739b..bc4518d66f7 100644 --- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h @@ -592,6 +592,13 @@ void GLES2TraceImplementation::GetBooleanv(GLenum pname, GLboolean* params) { gl_->GetBooleanv(pname, params); } +void GLES2TraceImplementation::GetBooleani_v(GLenum pname, + GLuint index, + GLboolean* data) { + TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetBooleani_v"); + gl_->GetBooleani_v(pname, index, data); +} + void GLES2TraceImplementation::GetBufferParameteri64v(GLenum target, GLenum pname, GLint64* params) { @@ -2651,4 +2658,56 @@ void GLES2TraceImplementation::EndBatchReadAccessSharedImageCHROMIUM() { gl_->EndBatchReadAccessSharedImageCHROMIUM(); } +void GLES2TraceImplementation::EnableiOES(GLenum target, GLuint index) { + TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::EnableiOES"); + gl_->EnableiOES(target, index); +} + +void GLES2TraceImplementation::DisableiOES(GLenum target, GLuint index) { + TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DisableiOES"); + gl_->DisableiOES(target, index); +} + +void GLES2TraceImplementation::BlendEquationiOES(GLuint buf, GLenum mode) { + TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendEquationiOES"); + gl_->BlendEquationiOES(buf, mode); +} + +void GLES2TraceImplementation::BlendEquationSeparateiOES(GLuint buf, + GLenum modeRGB, + GLenum modeAlpha) { + TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendEquationSeparateiOES"); + gl_->BlendEquationSeparateiOES(buf, modeRGB, modeAlpha); +} + +void GLES2TraceImplementation::BlendFunciOES(GLuint buf, + GLenum src, + GLenum dst) { + TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendFunciOES"); + gl_->BlendFunciOES(buf, src, dst); +} + +void GLES2TraceImplementation::BlendFuncSeparateiOES(GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha) { + TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendFuncSeparateiOES"); + gl_->BlendFuncSeparateiOES(buf, srcRGB, dstRGB, srcAlpha, dstAlpha); +} + +void GLES2TraceImplementation::ColorMaskiOES(GLuint buf, + GLboolean r, + GLboolean g, + GLboolean b, + GLboolean a) { + TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ColorMaskiOES"); + gl_->ColorMaskiOES(buf, r, g, b, a); +} + +GLboolean GLES2TraceImplementation::IsEnablediOES(GLenum target, GLuint index) { + TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsEnablediOES"); + return gl_->IsEnablediOES(target, index); +} + #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/implementation_base.cc b/chromium/gpu/command_buffer/client/implementation_base.cc index 15ae10c9ad1..a2404fd65a7 100644 --- a/chromium/gpu/command_buffer/client/implementation_base.cc +++ b/chromium/gpu/command_buffer/client/implementation_base.cc @@ -7,6 +7,7 @@ #include <algorithm> #include "base/bind.h" +#include "base/logging.h" #include "base/strings/stringprintf.h" #include "base/trace_event/memory_dump_manager.h" #include "base/trace_event/trace_event.h" diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc index 92057b8752f..f1b93c14f5f 100644 --- a/chromium/gpu/command_buffer/client/raster_implementation.cc +++ b/chromium/gpu/command_buffer/client/raster_implementation.cc @@ -991,10 +991,6 @@ void RasterImplementation::GetQueryObjectui64vEXT(GLuint id, void* RasterImplementation::MapRasterCHROMIUM(uint32_t size, uint32_t* size_allocated) { *size_allocated = 0u; - if (size < 0) { - SetGLError(GL_INVALID_VALUE, "glMapRasterCHROMIUM", "negative size"); - return nullptr; - } if (raster_mapped_buffer_) { SetGLError(GL_INVALID_OPERATION, "glMapRasterCHROMIUM", "already mapped"); return nullptr; @@ -1010,10 +1006,6 @@ void* RasterImplementation::MapRasterCHROMIUM(uint32_t size, } void* RasterImplementation::MapFontBuffer(uint32_t size) { - if (size < 0) { - SetGLError(GL_INVALID_VALUE, "glMapFontBufferCHROMIUM", "negative size"); - return nullptr; - } if (font_mapped_buffer_) { SetGLError(GL_INVALID_OPERATION, "glMapFontBufferCHROMIUM", "already mapped"); @@ -1036,11 +1028,6 @@ void* RasterImplementation::MapFontBuffer(uint32_t size) { void RasterImplementation::UnmapRasterCHROMIUM(uint32_t raster_written_size, uint32_t total_written_size) { - if (total_written_size < 0) { - SetGLError(GL_INVALID_VALUE, "glUnmapRasterCHROMIUM", - "negative written_size"); - return; - } if (!raster_mapped_buffer_) { SetGLError(GL_INVALID_OPERATION, "glUnmapRasterCHROMIUM", "not mapped"); return; diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc index 163e7c0a5e9..299dd4f9874 100644 --- a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc +++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc @@ -39,6 +39,8 @@ GLenum SkColorTypeToGLDataFormat(SkColorType color_type) { return GL_RGBA; case kBGRA_8888_SkColorType: return GL_BGRA_EXT; + case kGray_8_SkColorType: + return GL_LUMINANCE; default: DLOG(ERROR) << "Unknown SkColorType " << color_type; } @@ -50,6 +52,7 @@ GLenum SkColorTypeToGLDataType(SkColorType color_type) { switch (color_type) { case kRGBA_8888_SkColorType: case kBGRA_8888_SkColorType: + case kGray_8_SkColorType: return GL_UNSIGNED_BYTE; default: DLOG(ERROR) << "Unknown SkColorType " << color_type; @@ -170,17 +173,19 @@ void RasterImplementationGLES::WritePixels(const gpu::Mailbox& dest_mailbox, GLuint row_bytes, const SkImageInfo& src_info, const void* src_pixels) { - DCHECK_EQ(row_bytes, src_info.minRowBytes()); + DCHECK_GE(row_bytes, src_info.minRowBytes()); GLuint texture_id = CreateAndConsumeForGpuRaster(dest_mailbox); BeginSharedImageAccessDirectCHROMIUM( texture_id, GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM); + gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, row_bytes / src_info.bytesPerPixel()); gl_->BindTexture(texture_target, texture_id); gl_->TexSubImage2D(texture_target, 0, dst_x_offset, dst_y_offset, src_info.width(), src_info.height(), SkColorTypeToGLDataFormat(src_info.colorType()), SkColorTypeToGLDataType(src_info.colorType()), src_pixels); gl_->BindTexture(texture_target, 0); + gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, 0); EndSharedImageAccessDirectCHROMIUM(texture_id); DeleteGpuRasterTexture(texture_id); diff --git a/chromium/gpu/command_buffer/client/ring_buffer.h b/chromium/gpu/command_buffer/client/ring_buffer.h index f0260979f33..faaef510a9e 100644 --- a/chromium/gpu/command_buffer/client/ring_buffer.h +++ b/chromium/gpu/command_buffer/client/ring_buffer.h @@ -10,7 +10,6 @@ #include <stdint.h> #include "base/containers/circular_deque.h" -#include "base/logging.h" #include "base/macros.h" #include "gpu/gpu_export.h" diff --git a/chromium/gpu/command_buffer/client/shared_image_interface.cc b/chromium/gpu/command_buffer/client/shared_image_interface.cc index 8b340c00ca3..1830ef08730 100644 --- a/chromium/gpu/command_buffer/client/shared_image_interface.cc +++ b/chromium/gpu/command_buffer/client/shared_image_interface.cc @@ -10,4 +10,7 @@ uint32_t SharedImageInterface::UsageForMailbox(const Mailbox& mailbox) { return 0u; } +void SharedImageInterface::NotifyMailboxAdded(const Mailbox& /*mailbox*/, + uint32_t /*usage*/) {} + } // namespace gpu diff --git a/chromium/gpu/command_buffer/client/shared_image_interface.h b/chromium/gpu/command_buffer/client/shared_image_interface.h index 18369b51acd..ade24e373b9 100644 --- a/chromium/gpu/command_buffer/client/shared_image_interface.h +++ b/chromium/gpu/command_buffer/client/shared_image_interface.h @@ -149,7 +149,9 @@ class GPU_EXPORT SharedImageInterface { // wrapping it in GpuMemoryBufferHandle and then creating GpuMemoryBuffer from // that handle. virtual void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id, - zx::channel token) = 0; + zx::channel token, + gfx::BufferFormat format, + gfx::BufferUsage usage) = 0; virtual void ReleaseSysmemBufferCollection( gfx::SysmemBufferCollectionId id) = 0; @@ -163,6 +165,11 @@ class GPU_EXPORT SharedImageInterface { // commands on this interface have executed on the service side. virtual SyncToken GenVerifiedSyncToken() = 0; + // Wait on this SyncToken to be released before executing new commands on + // this interface on the service side. This is an async wait for all the + // previous commands which will be sent to server on the next flush(). + virtual void WaitSyncToken(const gpu::SyncToken& sync_token) = 0; + // Flush the SharedImageInterface, issuing any deferred IPCs. virtual void Flush() = 0; @@ -181,6 +188,10 @@ class GPU_EXPORT SharedImageInterface { // Provides the usage flags supported by the given |mailbox|. This must have // been created using a SharedImageInterface on the same channel. virtual uint32_t UsageForMailbox(const Mailbox& mailbox); + + // Informs that existing |mailbox| with |usage| can be passed to + // DestroySharedImage(). + virtual void NotifyMailboxAdded(const Mailbox& mailbox, uint32_t usage); }; } // namespace gpu diff --git a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc index 8ca8609db9e..6bd739d0cc6 100644 --- a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc +++ b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc @@ -12,6 +12,7 @@ #include <memory> #include "base/compiler_specific.h" +#include "base/memory/aligned_memory.h" #include "gpu/command_buffer/client/client_test_helper.h" #include "gpu/command_buffer/client/cmd_buffer_helper.h" #include "gpu/command_buffer/common/command_buffer.h" @@ -218,11 +219,11 @@ TEST_F(TransferBufferTest, TooLargeAllocation) { TEST_F(TransferBufferTest, MemoryAlignmentAfterZeroAllocation) { Initialize(); void* ptr = transfer_buffer_->Alloc(0); - EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u); + EXPECT_TRUE(base::IsAligned(ptr, kAlignment)); transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken()); // Check that the pointer is aligned on the following allocation. ptr = transfer_buffer_->Alloc(4); - EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u); + EXPECT_TRUE(base::IsAligned(ptr, kAlignment)); transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken()); } diff --git a/chromium/gpu/command_buffer/common/BUILD.gn b/chromium/gpu/command_buffer/common/BUILD.gn index bcf81ea98b9..9696af1c3a3 100644 --- a/chromium/gpu/command_buffer/common/BUILD.gn +++ b/chromium/gpu/command_buffer/common/BUILD.gn @@ -34,6 +34,14 @@ group("raster") { } } +group("mailbox") { + if (is_component_build) { + public_deps = [ "//gpu:mailbox" ] + } else { + public_deps = [ ":mailbox_sources" ] + } +} + group("webgpu") { public_deps = [ ":webgpu_sources" ] } @@ -68,8 +76,6 @@ jumbo_source_set("common_sources") { "gpu_memory_buffer_support.h", "id_allocator.cc", "id_allocator.h", - "mailbox.cc", - "mailbox.h", "mailbox_holder.cc", "mailbox_holder.h", "presentation_feedback_utils.cc", @@ -92,6 +98,7 @@ jumbo_source_set("common_sources") { configs += [ "//gpu:gpu_implementation" ] public_deps = [ + ":mailbox", "//base/util/type_safety", "//mojo/public/cpp/system", "//ui/gfx:memory_buffer", @@ -105,6 +112,16 @@ jumbo_source_set("common_sources") { configs += [ "//third_party/khronos:khronos_headers" ] } +source_set("mailbox_sources") { + visibility = [ "//gpu/*" ] + defines = [ "IS_GPU_MAILBOX_IMPL" ] + sources = [ + "mailbox.cc", + "mailbox.h", + ] + deps = [ "//base" ] +} + source_set("gles2_sources") { visibility = [ "//gpu/*" ] diff --git a/chromium/gpu/command_buffer/common/cmd_buffer_common.h b/chromium/gpu/command_buffer/common/cmd_buffer_common.h index ae83e190a06..a4a01c35c3b 100644 --- a/chromium/gpu/command_buffer/common/cmd_buffer_common.h +++ b/chromium/gpu/command_buffer/common/cmd_buffer_common.h @@ -10,7 +10,7 @@ #include <stddef.h> #include <stdint.h> -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "gpu/command_buffer/common/bitfield_helpers.h" #include "gpu/gpu_export.h" diff --git a/chromium/gpu/command_buffer/common/discardable_handle.cc b/chromium/gpu/command_buffer/common/discardable_handle.cc index c32bd8c20ba..8540d91ac28 100644 --- a/chromium/gpu/command_buffer/common/discardable_handle.cc +++ b/chromium/gpu/command_buffer/common/discardable_handle.cc @@ -5,6 +5,7 @@ #include "gpu/command_buffer/common/discardable_handle.h" #include "base/atomicops.h" +#include "base/logging.h" #include "gpu/command_buffer/common/buffer.h" namespace gpu { diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format.h b/chromium/gpu/command_buffer/common/gles2_cmd_format.h index 6bcbe51c0f3..ecb2dc4a03d 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_format.h +++ b/chromium/gpu/command_buffer/common/gles2_cmd_format.h @@ -12,7 +12,7 @@ #include <string.h> #include "base/atomicops.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/rand_util.h" #include "base/trace_event/trace_event.h" diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h index 05a48970424..2fc6747211d 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h +++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h @@ -4155,6 +4155,61 @@ static_assert(offsetof(GetBooleanv, params_shm_id) == 8, static_assert(offsetof(GetBooleanv, params_shm_offset) == 12, "offset of GetBooleanv params_shm_offset should be 12"); +struct GetBooleani_v { + typedef GetBooleani_v ValueType; + static const CommandId kCmdId = kGetBooleani_v; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + typedef SizedResult<GLboolean> Result; + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLenum _pname, + GLuint _index, + uint32_t _data_shm_id, + uint32_t _data_shm_offset) { + SetHeader(); + pname = _pname; + index = _index; + data_shm_id = _data_shm_id; + data_shm_offset = _data_shm_offset; + } + + void* Set(void* cmd, + GLenum _pname, + GLuint _index, + uint32_t _data_shm_id, + uint32_t _data_shm_offset) { + static_cast<ValueType*>(cmd)->Init(_pname, _index, _data_shm_id, + _data_shm_offset); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t pname; + uint32_t index; + uint32_t data_shm_id; + uint32_t data_shm_offset; +}; + +static_assert(sizeof(GetBooleani_v) == 20, + "size of GetBooleani_v should be 20"); +static_assert(offsetof(GetBooleani_v, header) == 0, + "offset of GetBooleani_v header should be 0"); +static_assert(offsetof(GetBooleani_v, pname) == 4, + "offset of GetBooleani_v pname should be 4"); +static_assert(offsetof(GetBooleani_v, index) == 8, + "offset of GetBooleani_v index should be 8"); +static_assert(offsetof(GetBooleani_v, data_shm_id) == 12, + "offset of GetBooleani_v data_shm_id should be 12"); +static_assert(offsetof(GetBooleani_v, data_shm_offset) == 16, + "offset of GetBooleani_v data_shm_offset should be 16"); + struct GetBufferParameteri64v { typedef GetBufferParameteri64v ValueType; static const CommandId kCmdId = kGetBufferParameteri64v; @@ -16777,4 +16832,367 @@ static_assert( offsetof(EndBatchReadAccessSharedImageCHROMIUM, header) == 0, "offset of EndBatchReadAccessSharedImageCHROMIUM header should be 0"); +struct EnableiOES { + typedef EnableiOES ValueType; + static const CommandId kCmdId = kEnableiOES; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLenum _target, GLuint _index) { + SetHeader(); + target = _target; + index = _index; + } + + void* Set(void* cmd, GLenum _target, GLuint _index) { + static_cast<ValueType*>(cmd)->Init(_target, _index); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t target; + uint32_t index; +}; + +static_assert(sizeof(EnableiOES) == 12, "size of EnableiOES should be 12"); +static_assert(offsetof(EnableiOES, header) == 0, + "offset of EnableiOES header should be 0"); +static_assert(offsetof(EnableiOES, target) == 4, + "offset of EnableiOES target should be 4"); +static_assert(offsetof(EnableiOES, index) == 8, + "offset of EnableiOES index should be 8"); + +struct DisableiOES { + typedef DisableiOES ValueType; + static const CommandId kCmdId = kDisableiOES; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLenum _target, GLuint _index) { + SetHeader(); + target = _target; + index = _index; + } + + void* Set(void* cmd, GLenum _target, GLuint _index) { + static_cast<ValueType*>(cmd)->Init(_target, _index); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t target; + uint32_t index; +}; + +static_assert(sizeof(DisableiOES) == 12, "size of DisableiOES should be 12"); +static_assert(offsetof(DisableiOES, header) == 0, + "offset of DisableiOES header should be 0"); +static_assert(offsetof(DisableiOES, target) == 4, + "offset of DisableiOES target should be 4"); +static_assert(offsetof(DisableiOES, index) == 8, + "offset of DisableiOES index should be 8"); + +struct BlendEquationiOES { + typedef BlendEquationiOES ValueType; + static const CommandId kCmdId = kBlendEquationiOES; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLuint _buf, GLenum _mode) { + SetHeader(); + buf = _buf; + mode = _mode; + } + + void* Set(void* cmd, GLuint _buf, GLenum _mode) { + static_cast<ValueType*>(cmd)->Init(_buf, _mode); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t buf; + uint32_t mode; +}; + +static_assert(sizeof(BlendEquationiOES) == 12, + "size of BlendEquationiOES should be 12"); +static_assert(offsetof(BlendEquationiOES, header) == 0, + "offset of BlendEquationiOES header should be 0"); +static_assert(offsetof(BlendEquationiOES, buf) == 4, + "offset of BlendEquationiOES buf should be 4"); +static_assert(offsetof(BlendEquationiOES, mode) == 8, + "offset of BlendEquationiOES mode should be 8"); + +struct BlendEquationSeparateiOES { + typedef BlendEquationSeparateiOES ValueType; + static const CommandId kCmdId = kBlendEquationSeparateiOES; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLuint _buf, GLenum _modeRGB, GLenum _modeAlpha) { + SetHeader(); + buf = _buf; + modeRGB = _modeRGB; + modeAlpha = _modeAlpha; + } + + void* Set(void* cmd, GLuint _buf, GLenum _modeRGB, GLenum _modeAlpha) { + static_cast<ValueType*>(cmd)->Init(_buf, _modeRGB, _modeAlpha); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t buf; + uint32_t modeRGB; + uint32_t modeAlpha; +}; + +static_assert(sizeof(BlendEquationSeparateiOES) == 16, + "size of BlendEquationSeparateiOES should be 16"); +static_assert(offsetof(BlendEquationSeparateiOES, header) == 0, + "offset of BlendEquationSeparateiOES header should be 0"); +static_assert(offsetof(BlendEquationSeparateiOES, buf) == 4, + "offset of BlendEquationSeparateiOES buf should be 4"); +static_assert(offsetof(BlendEquationSeparateiOES, modeRGB) == 8, + "offset of BlendEquationSeparateiOES modeRGB should be 8"); +static_assert(offsetof(BlendEquationSeparateiOES, modeAlpha) == 12, + "offset of BlendEquationSeparateiOES modeAlpha should be 12"); + +struct BlendFunciOES { + typedef BlendFunciOES ValueType; + static const CommandId kCmdId = kBlendFunciOES; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLuint _buf, GLenum _src, GLenum _dst) { + SetHeader(); + buf = _buf; + src = _src; + dst = _dst; + } + + void* Set(void* cmd, GLuint _buf, GLenum _src, GLenum _dst) { + static_cast<ValueType*>(cmd)->Init(_buf, _src, _dst); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t buf; + uint32_t src; + uint32_t dst; +}; + +static_assert(sizeof(BlendFunciOES) == 16, + "size of BlendFunciOES should be 16"); +static_assert(offsetof(BlendFunciOES, header) == 0, + "offset of BlendFunciOES header should be 0"); +static_assert(offsetof(BlendFunciOES, buf) == 4, + "offset of BlendFunciOES buf should be 4"); +static_assert(offsetof(BlendFunciOES, src) == 8, + "offset of BlendFunciOES src should be 8"); +static_assert(offsetof(BlendFunciOES, dst) == 12, + "offset of BlendFunciOES dst should be 12"); + +struct BlendFuncSeparateiOES { + typedef BlendFuncSeparateiOES ValueType; + static const CommandId kCmdId = kBlendFuncSeparateiOES; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLuint _buf, + GLenum _srcRGB, + GLenum _dstRGB, + GLenum _srcAlpha, + GLenum _dstAlpha) { + SetHeader(); + buf = _buf; + srcRGB = _srcRGB; + dstRGB = _dstRGB; + srcAlpha = _srcAlpha; + dstAlpha = _dstAlpha; + } + + void* Set(void* cmd, + GLuint _buf, + GLenum _srcRGB, + GLenum _dstRGB, + GLenum _srcAlpha, + GLenum _dstAlpha) { + static_cast<ValueType*>(cmd)->Init(_buf, _srcRGB, _dstRGB, _srcAlpha, + _dstAlpha); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t buf; + uint32_t srcRGB; + uint32_t dstRGB; + uint32_t srcAlpha; + uint32_t dstAlpha; +}; + +static_assert(sizeof(BlendFuncSeparateiOES) == 24, + "size of BlendFuncSeparateiOES should be 24"); +static_assert(offsetof(BlendFuncSeparateiOES, header) == 0, + "offset of BlendFuncSeparateiOES header should be 0"); +static_assert(offsetof(BlendFuncSeparateiOES, buf) == 4, + "offset of BlendFuncSeparateiOES buf should be 4"); +static_assert(offsetof(BlendFuncSeparateiOES, srcRGB) == 8, + "offset of BlendFuncSeparateiOES srcRGB should be 8"); +static_assert(offsetof(BlendFuncSeparateiOES, dstRGB) == 12, + "offset of BlendFuncSeparateiOES dstRGB should be 12"); +static_assert(offsetof(BlendFuncSeparateiOES, srcAlpha) == 16, + "offset of BlendFuncSeparateiOES srcAlpha should be 16"); +static_assert(offsetof(BlendFuncSeparateiOES, dstAlpha) == 20, + "offset of BlendFuncSeparateiOES dstAlpha should be 20"); + +struct ColorMaskiOES { + typedef ColorMaskiOES ValueType; + static const CommandId kCmdId = kColorMaskiOES; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLuint _buf, + GLboolean _r, + GLboolean _g, + GLboolean _b, + GLboolean _a) { + SetHeader(); + buf = _buf; + r = _r; + g = _g; + b = _b; + a = _a; + } + + void* Set(void* cmd, + GLuint _buf, + GLboolean _r, + GLboolean _g, + GLboolean _b, + GLboolean _a) { + static_cast<ValueType*>(cmd)->Init(_buf, _r, _g, _b, _a); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t buf; + uint32_t r; + uint32_t g; + uint32_t b; + uint32_t a; +}; + +static_assert(sizeof(ColorMaskiOES) == 24, + "size of ColorMaskiOES should be 24"); +static_assert(offsetof(ColorMaskiOES, header) == 0, + "offset of ColorMaskiOES header should be 0"); +static_assert(offsetof(ColorMaskiOES, buf) == 4, + "offset of ColorMaskiOES buf should be 4"); +static_assert(offsetof(ColorMaskiOES, r) == 8, + "offset of ColorMaskiOES r should be 8"); +static_assert(offsetof(ColorMaskiOES, g) == 12, + "offset of ColorMaskiOES g should be 12"); +static_assert(offsetof(ColorMaskiOES, b) == 16, + "offset of ColorMaskiOES b should be 16"); +static_assert(offsetof(ColorMaskiOES, a) == 20, + "offset of ColorMaskiOES a should be 20"); + +struct IsEnablediOES { + typedef IsEnablediOES ValueType; + static const CommandId kCmdId = kIsEnablediOES; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + typedef uint32_t Result; + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLenum _target, + GLuint _index, + uint32_t _result_shm_id, + uint32_t _result_shm_offset) { + SetHeader(); + target = _target; + index = _index; + result_shm_id = _result_shm_id; + result_shm_offset = _result_shm_offset; + } + + void* Set(void* cmd, + GLenum _target, + GLuint _index, + uint32_t _result_shm_id, + uint32_t _result_shm_offset) { + static_cast<ValueType*>(cmd)->Init(_target, _index, _result_shm_id, + _result_shm_offset); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t target; + uint32_t index; + uint32_t result_shm_id; + uint32_t result_shm_offset; +}; + +static_assert(sizeof(IsEnablediOES) == 20, + "size of IsEnablediOES should be 20"); +static_assert(offsetof(IsEnablediOES, header) == 0, + "offset of IsEnablediOES header should be 0"); +static_assert(offsetof(IsEnablediOES, target) == 4, + "offset of IsEnablediOES target should be 4"); +static_assert(offsetof(IsEnablediOES, index) == 8, + "offset of IsEnablediOES index should be 8"); +static_assert(offsetof(IsEnablediOES, result_shm_id) == 12, + "offset of IsEnablediOES result_shm_id should be 12"); +static_assert(offsetof(IsEnablediOES, result_shm_offset) == 16, + "offset of IsEnablediOES result_shm_offset should be 16"); + #endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h index 791783b0664..a07cf3607ae 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h +++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h @@ -1327,6 +1327,21 @@ TEST_F(GLES2FormatTest, GetBooleanv) { CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); } +TEST_F(GLES2FormatTest, GetBooleani_v) { + cmds::GetBooleani_v& cmd = *GetBufferAs<cmds::GetBooleani_v>(); + void* next_cmd = + cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12), + static_cast<uint32_t>(13), static_cast<uint32_t>(14)); + EXPECT_EQ(static_cast<uint32_t>(cmds::GetBooleani_v::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLenum>(11), cmd.pname); + EXPECT_EQ(static_cast<GLuint>(12), cmd.index); + EXPECT_EQ(static_cast<uint32_t>(13), cmd.data_shm_id); + EXPECT_EQ(static_cast<uint32_t>(14), cmd.data_shm_offset); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + TEST_F(GLES2FormatTest, GetBufferParameteri64v) { cmds::GetBufferParameteri64v& cmd = *GetBufferAs<cmds::GetBufferParameteri64v>(); @@ -5558,4 +5573,116 @@ TEST_F(GLES2FormatTest, EndBatchReadAccessSharedImageCHROMIUM) { CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); } +TEST_F(GLES2FormatTest, EnableiOES) { + cmds::EnableiOES& cmd = *GetBufferAs<cmds::EnableiOES>(); + void* next_cmd = + cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12)); + EXPECT_EQ(static_cast<uint32_t>(cmds::EnableiOES::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLenum>(11), cmd.target); + EXPECT_EQ(static_cast<GLuint>(12), cmd.index); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + +TEST_F(GLES2FormatTest, DisableiOES) { + cmds::DisableiOES& cmd = *GetBufferAs<cmds::DisableiOES>(); + void* next_cmd = + cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12)); + EXPECT_EQ(static_cast<uint32_t>(cmds::DisableiOES::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLenum>(11), cmd.target); + EXPECT_EQ(static_cast<GLuint>(12), cmd.index); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + +TEST_F(GLES2FormatTest, BlendEquationiOES) { + cmds::BlendEquationiOES& cmd = *GetBufferAs<cmds::BlendEquationiOES>(); + void* next_cmd = + cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12)); + EXPECT_EQ(static_cast<uint32_t>(cmds::BlendEquationiOES::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLuint>(11), cmd.buf); + EXPECT_EQ(static_cast<GLenum>(12), cmd.mode); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + +TEST_F(GLES2FormatTest, BlendEquationSeparateiOES) { + cmds::BlendEquationSeparateiOES& cmd = + *GetBufferAs<cmds::BlendEquationSeparateiOES>(); + void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), + static_cast<GLenum>(12), static_cast<GLenum>(13)); + EXPECT_EQ(static_cast<uint32_t>(cmds::BlendEquationSeparateiOES::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLuint>(11), cmd.buf); + EXPECT_EQ(static_cast<GLenum>(12), cmd.modeRGB); + EXPECT_EQ(static_cast<GLenum>(13), cmd.modeAlpha); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + +TEST_F(GLES2FormatTest, BlendFunciOES) { + cmds::BlendFunciOES& cmd = *GetBufferAs<cmds::BlendFunciOES>(); + void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), + static_cast<GLenum>(12), static_cast<GLenum>(13)); + EXPECT_EQ(static_cast<uint32_t>(cmds::BlendFunciOES::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLuint>(11), cmd.buf); + EXPECT_EQ(static_cast<GLenum>(12), cmd.src); + EXPECT_EQ(static_cast<GLenum>(13), cmd.dst); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + +TEST_F(GLES2FormatTest, BlendFuncSeparateiOES) { + cmds::BlendFuncSeparateiOES& cmd = + *GetBufferAs<cmds::BlendFuncSeparateiOES>(); + void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11), + static_cast<GLenum>(12), static_cast<GLenum>(13), + static_cast<GLenum>(14), static_cast<GLenum>(15)); + EXPECT_EQ(static_cast<uint32_t>(cmds::BlendFuncSeparateiOES::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLuint>(11), cmd.buf); + EXPECT_EQ(static_cast<GLenum>(12), cmd.srcRGB); + EXPECT_EQ(static_cast<GLenum>(13), cmd.dstRGB); + EXPECT_EQ(static_cast<GLenum>(14), cmd.srcAlpha); + EXPECT_EQ(static_cast<GLenum>(15), cmd.dstAlpha); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + +TEST_F(GLES2FormatTest, ColorMaskiOES) { + cmds::ColorMaskiOES& cmd = *GetBufferAs<cmds::ColorMaskiOES>(); + void* next_cmd = + cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLboolean>(12), + static_cast<GLboolean>(13), static_cast<GLboolean>(14), + static_cast<GLboolean>(15)); + EXPECT_EQ(static_cast<uint32_t>(cmds::ColorMaskiOES::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLuint>(11), cmd.buf); + EXPECT_EQ(static_cast<GLboolean>(12), cmd.r); + EXPECT_EQ(static_cast<GLboolean>(13), cmd.g); + EXPECT_EQ(static_cast<GLboolean>(14), cmd.b); + EXPECT_EQ(static_cast<GLboolean>(15), cmd.a); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + +TEST_F(GLES2FormatTest, IsEnablediOES) { + cmds::IsEnablediOES& cmd = *GetBufferAs<cmds::IsEnablediOES>(); + void* next_cmd = + cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12), + static_cast<uint32_t>(13), static_cast<uint32_t>(14)); + EXPECT_EQ(static_cast<uint32_t>(cmds::IsEnablediOES::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLenum>(11), cmd.target); + EXPECT_EQ(static_cast<GLuint>(12), cmd.index); + EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_id); + EXPECT_EQ(static_cast<uint32_t>(14), cmd.result_shm_offset); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + #endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_TEST_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h index 5ed67e6e384..5d2242b00d5 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h +++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h @@ -98,255 +98,264 @@ OP(GetAttachedShaders) /* 339 */ \ OP(GetAttribLocation) /* 340 */ \ OP(GetBooleanv) /* 341 */ \ - OP(GetBufferParameteri64v) /* 342 */ \ - OP(GetBufferParameteriv) /* 343 */ \ - OP(GetError) /* 344 */ \ - OP(GetFloatv) /* 345 */ \ - OP(GetFragDataLocation) /* 346 */ \ - OP(GetFramebufferAttachmentParameteriv) /* 347 */ \ - OP(GetInteger64v) /* 348 */ \ - OP(GetIntegeri_v) /* 349 */ \ - OP(GetInteger64i_v) /* 350 */ \ - OP(GetIntegerv) /* 351 */ \ - OP(GetInternalformativ) /* 352 */ \ - OP(GetProgramiv) /* 353 */ \ - OP(GetProgramInfoLog) /* 354 */ \ - OP(GetRenderbufferParameteriv) /* 355 */ \ - OP(GetSamplerParameterfv) /* 356 */ \ - OP(GetSamplerParameteriv) /* 357 */ \ - OP(GetShaderiv) /* 358 */ \ - OP(GetShaderInfoLog) /* 359 */ \ - OP(GetShaderPrecisionFormat) /* 360 */ \ - OP(GetShaderSource) /* 361 */ \ - OP(GetString) /* 362 */ \ - OP(GetSynciv) /* 363 */ \ - OP(GetTexParameterfv) /* 364 */ \ - OP(GetTexParameteriv) /* 365 */ \ - OP(GetTransformFeedbackVarying) /* 366 */ \ - OP(GetUniformBlockIndex) /* 367 */ \ - OP(GetUniformfv) /* 368 */ \ - OP(GetUniformiv) /* 369 */ \ - OP(GetUniformuiv) /* 370 */ \ - OP(GetUniformIndices) /* 371 */ \ - OP(GetUniformLocation) /* 372 */ \ - OP(GetVertexAttribfv) /* 373 */ \ - OP(GetVertexAttribiv) /* 374 */ \ - OP(GetVertexAttribIiv) /* 375 */ \ - OP(GetVertexAttribIuiv) /* 376 */ \ - OP(GetVertexAttribPointerv) /* 377 */ \ - OP(Hint) /* 378 */ \ - OP(InvalidateFramebufferImmediate) /* 379 */ \ - OP(InvalidateSubFramebufferImmediate) /* 380 */ \ - OP(IsBuffer) /* 381 */ \ - OP(IsEnabled) /* 382 */ \ - OP(IsFramebuffer) /* 383 */ \ - OP(IsProgram) /* 384 */ \ - OP(IsRenderbuffer) /* 385 */ \ - OP(IsSampler) /* 386 */ \ - OP(IsShader) /* 387 */ \ - OP(IsSync) /* 388 */ \ - OP(IsTexture) /* 389 */ \ - OP(IsTransformFeedback) /* 390 */ \ - OP(LineWidth) /* 391 */ \ - OP(LinkProgram) /* 392 */ \ - OP(PauseTransformFeedback) /* 393 */ \ - OP(PixelStorei) /* 394 */ \ - OP(PolygonOffset) /* 395 */ \ - OP(ReadBuffer) /* 396 */ \ - OP(ReadPixels) /* 397 */ \ - OP(ReleaseShaderCompiler) /* 398 */ \ - OP(RenderbufferStorage) /* 399 */ \ - OP(ResumeTransformFeedback) /* 400 */ \ - OP(SampleCoverage) /* 401 */ \ - OP(SamplerParameterf) /* 402 */ \ - OP(SamplerParameterfvImmediate) /* 403 */ \ - OP(SamplerParameteri) /* 404 */ \ - OP(SamplerParameterivImmediate) /* 405 */ \ - OP(Scissor) /* 406 */ \ - OP(ShaderBinary) /* 407 */ \ - OP(ShaderSourceBucket) /* 408 */ \ - OP(MultiDrawBeginCHROMIUM) /* 409 */ \ - OP(MultiDrawEndCHROMIUM) /* 410 */ \ - OP(MultiDrawArraysCHROMIUM) /* 411 */ \ - OP(MultiDrawArraysInstancedCHROMIUM) /* 412 */ \ - OP(MultiDrawArraysInstancedBaseInstanceCHROMIUM) /* 413 */ \ - OP(MultiDrawElementsCHROMIUM) /* 414 */ \ - OP(MultiDrawElementsInstancedCHROMIUM) /* 415 */ \ - OP(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM) /* 416 */ \ - OP(StencilFunc) /* 417 */ \ - OP(StencilFuncSeparate) /* 418 */ \ - OP(StencilMask) /* 419 */ \ - OP(StencilMaskSeparate) /* 420 */ \ - OP(StencilOp) /* 421 */ \ - OP(StencilOpSeparate) /* 422 */ \ - OP(TexImage2D) /* 423 */ \ - OP(TexImage3D) /* 424 */ \ - OP(TexParameterf) /* 425 */ \ - OP(TexParameterfvImmediate) /* 426 */ \ - OP(TexParameteri) /* 427 */ \ - OP(TexParameterivImmediate) /* 428 */ \ - OP(TexStorage3D) /* 429 */ \ - OP(TexSubImage2D) /* 430 */ \ - OP(TexSubImage3D) /* 431 */ \ - OP(TransformFeedbackVaryingsBucket) /* 432 */ \ - OP(Uniform1f) /* 433 */ \ - OP(Uniform1fvImmediate) /* 434 */ \ - OP(Uniform1i) /* 435 */ \ - OP(Uniform1ivImmediate) /* 436 */ \ - OP(Uniform1ui) /* 437 */ \ - OP(Uniform1uivImmediate) /* 438 */ \ - OP(Uniform2f) /* 439 */ \ - OP(Uniform2fvImmediate) /* 440 */ \ - OP(Uniform2i) /* 441 */ \ - OP(Uniform2ivImmediate) /* 442 */ \ - OP(Uniform2ui) /* 443 */ \ - OP(Uniform2uivImmediate) /* 444 */ \ - OP(Uniform3f) /* 445 */ \ - OP(Uniform3fvImmediate) /* 446 */ \ - OP(Uniform3i) /* 447 */ \ - OP(Uniform3ivImmediate) /* 448 */ \ - OP(Uniform3ui) /* 449 */ \ - OP(Uniform3uivImmediate) /* 450 */ \ - OP(Uniform4f) /* 451 */ \ - OP(Uniform4fvImmediate) /* 452 */ \ - OP(Uniform4i) /* 453 */ \ - OP(Uniform4ivImmediate) /* 454 */ \ - OP(Uniform4ui) /* 455 */ \ - OP(Uniform4uivImmediate) /* 456 */ \ - OP(UniformBlockBinding) /* 457 */ \ - OP(UniformMatrix2fvImmediate) /* 458 */ \ - OP(UniformMatrix2x3fvImmediate) /* 459 */ \ - OP(UniformMatrix2x4fvImmediate) /* 460 */ \ - OP(UniformMatrix3fvImmediate) /* 461 */ \ - OP(UniformMatrix3x2fvImmediate) /* 462 */ \ - OP(UniformMatrix3x4fvImmediate) /* 463 */ \ - OP(UniformMatrix4fvImmediate) /* 464 */ \ - OP(UniformMatrix4x2fvImmediate) /* 465 */ \ - OP(UniformMatrix4x3fvImmediate) /* 466 */ \ - OP(UseProgram) /* 467 */ \ - OP(ValidateProgram) /* 468 */ \ - OP(VertexAttrib1f) /* 469 */ \ - OP(VertexAttrib1fvImmediate) /* 470 */ \ - OP(VertexAttrib2f) /* 471 */ \ - OP(VertexAttrib2fvImmediate) /* 472 */ \ - OP(VertexAttrib3f) /* 473 */ \ - OP(VertexAttrib3fvImmediate) /* 474 */ \ - OP(VertexAttrib4f) /* 475 */ \ - OP(VertexAttrib4fvImmediate) /* 476 */ \ - OP(VertexAttribI4i) /* 477 */ \ - OP(VertexAttribI4ivImmediate) /* 478 */ \ - OP(VertexAttribI4ui) /* 479 */ \ - OP(VertexAttribI4uivImmediate) /* 480 */ \ - OP(VertexAttribIPointer) /* 481 */ \ - OP(VertexAttribPointer) /* 482 */ \ - OP(Viewport) /* 483 */ \ - OP(WaitSync) /* 484 */ \ - OP(BlitFramebufferCHROMIUM) /* 485 */ \ - OP(RenderbufferStorageMultisampleCHROMIUM) /* 486 */ \ - OP(RenderbufferStorageMultisampleAdvancedAMD) /* 487 */ \ - OP(RenderbufferStorageMultisampleEXT) /* 488 */ \ - OP(FramebufferTexture2DMultisampleEXT) /* 489 */ \ - OP(TexStorage2DEXT) /* 490 */ \ - OP(GenQueriesEXTImmediate) /* 491 */ \ - OP(DeleteQueriesEXTImmediate) /* 492 */ \ - OP(QueryCounterEXT) /* 493 */ \ - OP(BeginQueryEXT) /* 494 */ \ - OP(BeginTransformFeedback) /* 495 */ \ - OP(EndQueryEXT) /* 496 */ \ - OP(EndTransformFeedback) /* 497 */ \ - OP(SetDisjointValueSyncCHROMIUM) /* 498 */ \ - OP(InsertEventMarkerEXT) /* 499 */ \ - OP(PushGroupMarkerEXT) /* 500 */ \ - OP(PopGroupMarkerEXT) /* 501 */ \ - OP(GenVertexArraysOESImmediate) /* 502 */ \ - OP(DeleteVertexArraysOESImmediate) /* 503 */ \ - OP(IsVertexArrayOES) /* 504 */ \ - OP(BindVertexArrayOES) /* 505 */ \ - OP(FramebufferParameteri) /* 506 */ \ - OP(BindImageTexture) /* 507 */ \ - OP(DispatchCompute) /* 508 */ \ - OP(DispatchComputeIndirect) /* 509 */ \ - OP(DrawArraysIndirect) /* 510 */ \ - OP(DrawElementsIndirect) /* 511 */ \ - OP(GetProgramInterfaceiv) /* 512 */ \ - OP(GetProgramResourceIndex) /* 513 */ \ - OP(GetProgramResourceName) /* 514 */ \ - OP(GetProgramResourceiv) /* 515 */ \ - OP(GetProgramResourceLocation) /* 516 */ \ - OP(MemoryBarrierEXT) /* 517 */ \ - OP(MemoryBarrierByRegion) /* 518 */ \ - OP(SwapBuffers) /* 519 */ \ - OP(GetMaxValueInBufferCHROMIUM) /* 520 */ \ - OP(EnableFeatureCHROMIUM) /* 521 */ \ - OP(MapBufferRange) /* 522 */ \ - OP(UnmapBuffer) /* 523 */ \ - OP(FlushMappedBufferRange) /* 524 */ \ - OP(ResizeCHROMIUM) /* 525 */ \ - OP(GetRequestableExtensionsCHROMIUM) /* 526 */ \ - OP(RequestExtensionCHROMIUM) /* 527 */ \ - OP(GetProgramInfoCHROMIUM) /* 528 */ \ - OP(GetUniformBlocksCHROMIUM) /* 529 */ \ - OP(GetTransformFeedbackVaryingsCHROMIUM) /* 530 */ \ - OP(GetUniformsES3CHROMIUM) /* 531 */ \ - OP(DescheduleUntilFinishedCHROMIUM) /* 532 */ \ - OP(GetTranslatedShaderSourceANGLE) /* 533 */ \ - OP(PostSubBufferCHROMIUM) /* 534 */ \ - OP(CopyTextureCHROMIUM) /* 535 */ \ - OP(CopySubTextureCHROMIUM) /* 536 */ \ - OP(DrawArraysInstancedANGLE) /* 537 */ \ - OP(DrawArraysInstancedBaseInstanceANGLE) /* 538 */ \ - OP(DrawElementsInstancedANGLE) /* 539 */ \ - OP(DrawElementsInstancedBaseVertexBaseInstanceANGLE) /* 540 */ \ - OP(VertexAttribDivisorANGLE) /* 541 */ \ - OP(ProduceTextureDirectCHROMIUMImmediate) /* 542 */ \ - OP(CreateAndConsumeTextureINTERNALImmediate) /* 543 */ \ - OP(BindUniformLocationCHROMIUMBucket) /* 544 */ \ - OP(BindTexImage2DCHROMIUM) /* 545 */ \ - OP(BindTexImage2DWithInternalformatCHROMIUM) /* 546 */ \ - OP(ReleaseTexImage2DCHROMIUM) /* 547 */ \ - OP(TraceBeginCHROMIUM) /* 548 */ \ - OP(TraceEndCHROMIUM) /* 549 */ \ - OP(DiscardFramebufferEXTImmediate) /* 550 */ \ - OP(LoseContextCHROMIUM) /* 551 */ \ - OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 552 */ \ - OP(DrawBuffersEXTImmediate) /* 553 */ \ - OP(DiscardBackbufferCHROMIUM) /* 554 */ \ - OP(ScheduleOverlayPlaneCHROMIUM) /* 555 */ \ - OP(ScheduleCALayerSharedStateCHROMIUM) /* 556 */ \ - OP(ScheduleCALayerCHROMIUM) /* 557 */ \ - OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 558 */ \ - OP(CommitOverlayPlanesCHROMIUM) /* 559 */ \ - OP(FlushDriverCachesCHROMIUM) /* 560 */ \ - OP(ScheduleDCLayerCHROMIUM) /* 561 */ \ - OP(SetActiveURLCHROMIUM) /* 562 */ \ - OP(ContextVisibilityHintCHROMIUM) /* 563 */ \ - OP(CoverageModulationCHROMIUM) /* 564 */ \ - OP(BlendBarrierKHR) /* 565 */ \ - OP(BindFragDataLocationIndexedEXTBucket) /* 566 */ \ - OP(BindFragDataLocationEXTBucket) /* 567 */ \ - OP(GetFragDataIndexEXT) /* 568 */ \ - OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 569 */ \ - OP(OverlayPromotionHintCHROMIUM) /* 570 */ \ - OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 571 */ \ - OP(SetDrawRectangleCHROMIUM) /* 572 */ \ - OP(SetEnableDCLayersCHROMIUM) /* 573 */ \ - OP(InitializeDiscardableTextureCHROMIUM) /* 574 */ \ - OP(UnlockDiscardableTextureCHROMIUM) /* 575 */ \ - OP(LockDiscardableTextureCHROMIUM) /* 576 */ \ - OP(TexStorage2DImageCHROMIUM) /* 577 */ \ - OP(SetColorSpaceMetadataCHROMIUM) /* 578 */ \ - OP(WindowRectanglesEXTImmediate) /* 579 */ \ - OP(CreateGpuFenceINTERNAL) /* 580 */ \ - OP(WaitGpuFenceCHROMIUM) /* 581 */ \ - OP(DestroyGpuFenceCHROMIUM) /* 582 */ \ - OP(SetReadbackBufferShadowAllocationINTERNAL) /* 583 */ \ - OP(FramebufferTextureMultiviewOVR) /* 584 */ \ - OP(MaxShaderCompilerThreadsKHR) /* 585 */ \ - OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 586 */ \ - OP(BeginSharedImageAccessDirectCHROMIUM) /* 587 */ \ - OP(EndSharedImageAccessDirectCHROMIUM) /* 588 */ \ - OP(BeginBatchReadAccessSharedImageCHROMIUM) /* 589 */ \ - OP(EndBatchReadAccessSharedImageCHROMIUM) /* 590 */ + OP(GetBooleani_v) /* 342 */ \ + OP(GetBufferParameteri64v) /* 343 */ \ + OP(GetBufferParameteriv) /* 344 */ \ + OP(GetError) /* 345 */ \ + OP(GetFloatv) /* 346 */ \ + OP(GetFragDataLocation) /* 347 */ \ + OP(GetFramebufferAttachmentParameteriv) /* 348 */ \ + OP(GetInteger64v) /* 349 */ \ + OP(GetIntegeri_v) /* 350 */ \ + OP(GetInteger64i_v) /* 351 */ \ + OP(GetIntegerv) /* 352 */ \ + OP(GetInternalformativ) /* 353 */ \ + OP(GetProgramiv) /* 354 */ \ + OP(GetProgramInfoLog) /* 355 */ \ + OP(GetRenderbufferParameteriv) /* 356 */ \ + OP(GetSamplerParameterfv) /* 357 */ \ + OP(GetSamplerParameteriv) /* 358 */ \ + OP(GetShaderiv) /* 359 */ \ + OP(GetShaderInfoLog) /* 360 */ \ + OP(GetShaderPrecisionFormat) /* 361 */ \ + OP(GetShaderSource) /* 362 */ \ + OP(GetString) /* 363 */ \ + OP(GetSynciv) /* 364 */ \ + OP(GetTexParameterfv) /* 365 */ \ + OP(GetTexParameteriv) /* 366 */ \ + OP(GetTransformFeedbackVarying) /* 367 */ \ + OP(GetUniformBlockIndex) /* 368 */ \ + OP(GetUniformfv) /* 369 */ \ + OP(GetUniformiv) /* 370 */ \ + OP(GetUniformuiv) /* 371 */ \ + OP(GetUniformIndices) /* 372 */ \ + OP(GetUniformLocation) /* 373 */ \ + OP(GetVertexAttribfv) /* 374 */ \ + OP(GetVertexAttribiv) /* 375 */ \ + OP(GetVertexAttribIiv) /* 376 */ \ + OP(GetVertexAttribIuiv) /* 377 */ \ + OP(GetVertexAttribPointerv) /* 378 */ \ + OP(Hint) /* 379 */ \ + OP(InvalidateFramebufferImmediate) /* 380 */ \ + OP(InvalidateSubFramebufferImmediate) /* 381 */ \ + OP(IsBuffer) /* 382 */ \ + OP(IsEnabled) /* 383 */ \ + OP(IsFramebuffer) /* 384 */ \ + OP(IsProgram) /* 385 */ \ + OP(IsRenderbuffer) /* 386 */ \ + OP(IsSampler) /* 387 */ \ + OP(IsShader) /* 388 */ \ + OP(IsSync) /* 389 */ \ + OP(IsTexture) /* 390 */ \ + OP(IsTransformFeedback) /* 391 */ \ + OP(LineWidth) /* 392 */ \ + OP(LinkProgram) /* 393 */ \ + OP(PauseTransformFeedback) /* 394 */ \ + OP(PixelStorei) /* 395 */ \ + OP(PolygonOffset) /* 396 */ \ + OP(ReadBuffer) /* 397 */ \ + OP(ReadPixels) /* 398 */ \ + OP(ReleaseShaderCompiler) /* 399 */ \ + OP(RenderbufferStorage) /* 400 */ \ + OP(ResumeTransformFeedback) /* 401 */ \ + OP(SampleCoverage) /* 402 */ \ + OP(SamplerParameterf) /* 403 */ \ + OP(SamplerParameterfvImmediate) /* 404 */ \ + OP(SamplerParameteri) /* 405 */ \ + OP(SamplerParameterivImmediate) /* 406 */ \ + OP(Scissor) /* 407 */ \ + OP(ShaderBinary) /* 408 */ \ + OP(ShaderSourceBucket) /* 409 */ \ + OP(MultiDrawBeginCHROMIUM) /* 410 */ \ + OP(MultiDrawEndCHROMIUM) /* 411 */ \ + OP(MultiDrawArraysCHROMIUM) /* 412 */ \ + OP(MultiDrawArraysInstancedCHROMIUM) /* 413 */ \ + OP(MultiDrawArraysInstancedBaseInstanceCHROMIUM) /* 414 */ \ + OP(MultiDrawElementsCHROMIUM) /* 415 */ \ + OP(MultiDrawElementsInstancedCHROMIUM) /* 416 */ \ + OP(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM) /* 417 */ \ + OP(StencilFunc) /* 418 */ \ + OP(StencilFuncSeparate) /* 419 */ \ + OP(StencilMask) /* 420 */ \ + OP(StencilMaskSeparate) /* 421 */ \ + OP(StencilOp) /* 422 */ \ + OP(StencilOpSeparate) /* 423 */ \ + OP(TexImage2D) /* 424 */ \ + OP(TexImage3D) /* 425 */ \ + OP(TexParameterf) /* 426 */ \ + OP(TexParameterfvImmediate) /* 427 */ \ + OP(TexParameteri) /* 428 */ \ + OP(TexParameterivImmediate) /* 429 */ \ + OP(TexStorage3D) /* 430 */ \ + OP(TexSubImage2D) /* 431 */ \ + OP(TexSubImage3D) /* 432 */ \ + OP(TransformFeedbackVaryingsBucket) /* 433 */ \ + OP(Uniform1f) /* 434 */ \ + OP(Uniform1fvImmediate) /* 435 */ \ + OP(Uniform1i) /* 436 */ \ + OP(Uniform1ivImmediate) /* 437 */ \ + OP(Uniform1ui) /* 438 */ \ + OP(Uniform1uivImmediate) /* 439 */ \ + OP(Uniform2f) /* 440 */ \ + OP(Uniform2fvImmediate) /* 441 */ \ + OP(Uniform2i) /* 442 */ \ + OP(Uniform2ivImmediate) /* 443 */ \ + OP(Uniform2ui) /* 444 */ \ + OP(Uniform2uivImmediate) /* 445 */ \ + OP(Uniform3f) /* 446 */ \ + OP(Uniform3fvImmediate) /* 447 */ \ + OP(Uniform3i) /* 448 */ \ + OP(Uniform3ivImmediate) /* 449 */ \ + OP(Uniform3ui) /* 450 */ \ + OP(Uniform3uivImmediate) /* 451 */ \ + OP(Uniform4f) /* 452 */ \ + OP(Uniform4fvImmediate) /* 453 */ \ + OP(Uniform4i) /* 454 */ \ + OP(Uniform4ivImmediate) /* 455 */ \ + OP(Uniform4ui) /* 456 */ \ + OP(Uniform4uivImmediate) /* 457 */ \ + OP(UniformBlockBinding) /* 458 */ \ + OP(UniformMatrix2fvImmediate) /* 459 */ \ + OP(UniformMatrix2x3fvImmediate) /* 460 */ \ + OP(UniformMatrix2x4fvImmediate) /* 461 */ \ + OP(UniformMatrix3fvImmediate) /* 462 */ \ + OP(UniformMatrix3x2fvImmediate) /* 463 */ \ + OP(UniformMatrix3x4fvImmediate) /* 464 */ \ + OP(UniformMatrix4fvImmediate) /* 465 */ \ + OP(UniformMatrix4x2fvImmediate) /* 466 */ \ + OP(UniformMatrix4x3fvImmediate) /* 467 */ \ + OP(UseProgram) /* 468 */ \ + OP(ValidateProgram) /* 469 */ \ + OP(VertexAttrib1f) /* 470 */ \ + OP(VertexAttrib1fvImmediate) /* 471 */ \ + OP(VertexAttrib2f) /* 472 */ \ + OP(VertexAttrib2fvImmediate) /* 473 */ \ + OP(VertexAttrib3f) /* 474 */ \ + OP(VertexAttrib3fvImmediate) /* 475 */ \ + OP(VertexAttrib4f) /* 476 */ \ + OP(VertexAttrib4fvImmediate) /* 477 */ \ + OP(VertexAttribI4i) /* 478 */ \ + OP(VertexAttribI4ivImmediate) /* 479 */ \ + OP(VertexAttribI4ui) /* 480 */ \ + OP(VertexAttribI4uivImmediate) /* 481 */ \ + OP(VertexAttribIPointer) /* 482 */ \ + OP(VertexAttribPointer) /* 483 */ \ + OP(Viewport) /* 484 */ \ + OP(WaitSync) /* 485 */ \ + OP(BlitFramebufferCHROMIUM) /* 486 */ \ + OP(RenderbufferStorageMultisampleCHROMIUM) /* 487 */ \ + OP(RenderbufferStorageMultisampleAdvancedAMD) /* 488 */ \ + OP(RenderbufferStorageMultisampleEXT) /* 489 */ \ + OP(FramebufferTexture2DMultisampleEXT) /* 490 */ \ + OP(TexStorage2DEXT) /* 491 */ \ + OP(GenQueriesEXTImmediate) /* 492 */ \ + OP(DeleteQueriesEXTImmediate) /* 493 */ \ + OP(QueryCounterEXT) /* 494 */ \ + OP(BeginQueryEXT) /* 495 */ \ + OP(BeginTransformFeedback) /* 496 */ \ + OP(EndQueryEXT) /* 497 */ \ + OP(EndTransformFeedback) /* 498 */ \ + OP(SetDisjointValueSyncCHROMIUM) /* 499 */ \ + OP(InsertEventMarkerEXT) /* 500 */ \ + OP(PushGroupMarkerEXT) /* 501 */ \ + OP(PopGroupMarkerEXT) /* 502 */ \ + OP(GenVertexArraysOESImmediate) /* 503 */ \ + OP(DeleteVertexArraysOESImmediate) /* 504 */ \ + OP(IsVertexArrayOES) /* 505 */ \ + OP(BindVertexArrayOES) /* 506 */ \ + OP(FramebufferParameteri) /* 507 */ \ + OP(BindImageTexture) /* 508 */ \ + OP(DispatchCompute) /* 509 */ \ + OP(DispatchComputeIndirect) /* 510 */ \ + OP(DrawArraysIndirect) /* 511 */ \ + OP(DrawElementsIndirect) /* 512 */ \ + OP(GetProgramInterfaceiv) /* 513 */ \ + OP(GetProgramResourceIndex) /* 514 */ \ + OP(GetProgramResourceName) /* 515 */ \ + OP(GetProgramResourceiv) /* 516 */ \ + OP(GetProgramResourceLocation) /* 517 */ \ + OP(MemoryBarrierEXT) /* 518 */ \ + OP(MemoryBarrierByRegion) /* 519 */ \ + OP(SwapBuffers) /* 520 */ \ + OP(GetMaxValueInBufferCHROMIUM) /* 521 */ \ + OP(EnableFeatureCHROMIUM) /* 522 */ \ + OP(MapBufferRange) /* 523 */ \ + OP(UnmapBuffer) /* 524 */ \ + OP(FlushMappedBufferRange) /* 525 */ \ + OP(ResizeCHROMIUM) /* 526 */ \ + OP(GetRequestableExtensionsCHROMIUM) /* 527 */ \ + OP(RequestExtensionCHROMIUM) /* 528 */ \ + OP(GetProgramInfoCHROMIUM) /* 529 */ \ + OP(GetUniformBlocksCHROMIUM) /* 530 */ \ + OP(GetTransformFeedbackVaryingsCHROMIUM) /* 531 */ \ + OP(GetUniformsES3CHROMIUM) /* 532 */ \ + OP(DescheduleUntilFinishedCHROMIUM) /* 533 */ \ + OP(GetTranslatedShaderSourceANGLE) /* 534 */ \ + OP(PostSubBufferCHROMIUM) /* 535 */ \ + OP(CopyTextureCHROMIUM) /* 536 */ \ + OP(CopySubTextureCHROMIUM) /* 537 */ \ + OP(DrawArraysInstancedANGLE) /* 538 */ \ + OP(DrawArraysInstancedBaseInstanceANGLE) /* 539 */ \ + OP(DrawElementsInstancedANGLE) /* 540 */ \ + OP(DrawElementsInstancedBaseVertexBaseInstanceANGLE) /* 541 */ \ + OP(VertexAttribDivisorANGLE) /* 542 */ \ + OP(ProduceTextureDirectCHROMIUMImmediate) /* 543 */ \ + OP(CreateAndConsumeTextureINTERNALImmediate) /* 544 */ \ + OP(BindUniformLocationCHROMIUMBucket) /* 545 */ \ + OP(BindTexImage2DCHROMIUM) /* 546 */ \ + OP(BindTexImage2DWithInternalformatCHROMIUM) /* 547 */ \ + OP(ReleaseTexImage2DCHROMIUM) /* 548 */ \ + OP(TraceBeginCHROMIUM) /* 549 */ \ + OP(TraceEndCHROMIUM) /* 550 */ \ + OP(DiscardFramebufferEXTImmediate) /* 551 */ \ + OP(LoseContextCHROMIUM) /* 552 */ \ + OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 553 */ \ + OP(DrawBuffersEXTImmediate) /* 554 */ \ + OP(DiscardBackbufferCHROMIUM) /* 555 */ \ + OP(ScheduleOverlayPlaneCHROMIUM) /* 556 */ \ + OP(ScheduleCALayerSharedStateCHROMIUM) /* 557 */ \ + OP(ScheduleCALayerCHROMIUM) /* 558 */ \ + OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 559 */ \ + OP(CommitOverlayPlanesCHROMIUM) /* 560 */ \ + OP(FlushDriverCachesCHROMIUM) /* 561 */ \ + OP(ScheduleDCLayerCHROMIUM) /* 562 */ \ + OP(SetActiveURLCHROMIUM) /* 563 */ \ + OP(ContextVisibilityHintCHROMIUM) /* 564 */ \ + OP(CoverageModulationCHROMIUM) /* 565 */ \ + OP(BlendBarrierKHR) /* 566 */ \ + OP(BindFragDataLocationIndexedEXTBucket) /* 567 */ \ + OP(BindFragDataLocationEXTBucket) /* 568 */ \ + OP(GetFragDataIndexEXT) /* 569 */ \ + OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 570 */ \ + OP(OverlayPromotionHintCHROMIUM) /* 571 */ \ + OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 572 */ \ + OP(SetDrawRectangleCHROMIUM) /* 573 */ \ + OP(SetEnableDCLayersCHROMIUM) /* 574 */ \ + OP(InitializeDiscardableTextureCHROMIUM) /* 575 */ \ + OP(UnlockDiscardableTextureCHROMIUM) /* 576 */ \ + OP(LockDiscardableTextureCHROMIUM) /* 577 */ \ + OP(TexStorage2DImageCHROMIUM) /* 578 */ \ + OP(SetColorSpaceMetadataCHROMIUM) /* 579 */ \ + OP(WindowRectanglesEXTImmediate) /* 580 */ \ + OP(CreateGpuFenceINTERNAL) /* 581 */ \ + OP(WaitGpuFenceCHROMIUM) /* 582 */ \ + OP(DestroyGpuFenceCHROMIUM) /* 583 */ \ + OP(SetReadbackBufferShadowAllocationINTERNAL) /* 584 */ \ + OP(FramebufferTextureMultiviewOVR) /* 585 */ \ + OP(MaxShaderCompilerThreadsKHR) /* 586 */ \ + OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 587 */ \ + OP(BeginSharedImageAccessDirectCHROMIUM) /* 588 */ \ + OP(EndSharedImageAccessDirectCHROMIUM) /* 589 */ \ + OP(BeginBatchReadAccessSharedImageCHROMIUM) /* 590 */ \ + OP(EndBatchReadAccessSharedImageCHROMIUM) /* 591 */ \ + OP(EnableiOES) /* 592 */ \ + OP(DisableiOES) /* 593 */ \ + OP(BlendEquationiOES) /* 594 */ \ + OP(BlendEquationSeparateiOES) /* 595 */ \ + OP(BlendFunciOES) /* 596 */ \ + OP(BlendFuncSeparateiOES) /* 597 */ \ + OP(ColorMaskiOES) /* 598 */ \ + OP(IsEnablediOES) /* 599 */ enum CommandId { kOneBeforeStartPoint = diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc index 5a062329521..90a60f27ac2 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc +++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc @@ -15,7 +15,8 @@ #include <sstream> -#include "base/logging.h" +#include "base/check_op.h" +#include "base/notreached.h" #include "base/numerics/safe_math.h" #include "base/stl_util.h" diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h index 238fce0975b..4d6be9b2fb4 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h +++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h @@ -14,7 +14,7 @@ #include <limits> #include <string> -#include "base/logging.h" +#include "base/check.h" #include "base/macros.h" #include "base/numerics/safe_math.h" #include "gpu/command_buffer/common/gles2_utils_export.h" diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h index f7510c53ddd..3ceac4dfc5b 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h +++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h @@ -2685,6 +2685,10 @@ static const GLES2Util::EnumToString enum_to_string_table[] = { "GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM", }, { + 0x8AF8, + "GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM", + }, + { 0x8B30, "GL_FRAGMENT_SHADER", }, @@ -7447,6 +7451,13 @@ std::string GLES2Util::GetStringIndexedGLState(uint32_t value) { {GL_UNIFORM_BUFFER_BINDING, "GL_UNIFORM_BUFFER_BINDING"}, {GL_UNIFORM_BUFFER_SIZE, "GL_UNIFORM_BUFFER_SIZE"}, {GL_UNIFORM_BUFFER_START, "GL_UNIFORM_BUFFER_START"}, + {GL_BLEND_EQUATION_RGB, "GL_BLEND_EQUATION_RGB"}, + {GL_BLEND_EQUATION_ALPHA, "GL_BLEND_EQUATION_ALPHA"}, + {GL_BLEND_SRC_RGB, "GL_BLEND_SRC_RGB"}, + {GL_BLEND_SRC_ALPHA, "GL_BLEND_SRC_ALPHA"}, + {GL_BLEND_DST_RGB, "GL_BLEND_DST_RGB"}, + {GL_BLEND_DST_ALPHA, "GL_BLEND_DST_ALPHA"}, + {GL_COLOR_WRITEMASK, "GL_COLOR_WRITEMASK"}, }; return GLES2Util::GetQualifiedEnumString(string_table, base::size(string_table), value); @@ -7777,6 +7788,8 @@ std::string GLES2Util::GetStringShaderType(uint32_t value) { std::string GLES2Util::GetStringSharedImageAccessMode(uint32_t value) { static const EnumToString string_table[] = { + {GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM, + "GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM"}, {GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM, "GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM"}, {GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM, diff --git a/chromium/gpu/command_buffer/common/mailbox.h b/chromium/gpu/command_buffer/common/mailbox.h index 5cd20835b43..9c68b87b316 100644 --- a/chromium/gpu/command_buffer/common/mailbox.h +++ b/chromium/gpu/command_buffer/common/mailbox.h @@ -10,7 +10,7 @@ #include <string> -#include "gpu/gpu_export.h" +#include "base/component_export.h" // From gl2/gl2ext.h. #ifndef GL_MAILBOX_SIZE_CHROMIUM @@ -26,7 +26,7 @@ namespace gpu { // name is valid. // See src/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_texture_mailbox.txt for more // details. -struct GPU_EXPORT Mailbox { +struct COMPONENT_EXPORT(GPU_MAILBOX) Mailbox { using Name = int8_t[GL_MAILBOX_SIZE_CHROMIUM]; Mailbox(); diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format.h b/chromium/gpu/command_buffer/common/raster_cmd_format.h index 3e7a7e1b7f1..c8b8b072c68 100644 --- a/chromium/gpu/command_buffer/common/raster_cmd_format.h +++ b/chromium/gpu/command_buffer/common/raster_cmd_format.h @@ -12,7 +12,6 @@ #include <string.h> #include "base/atomicops.h" -#include "base/logging.h" #include "base/macros.h" #include "components/viz/common/resources/resource_format.h" #include "gpu/command_buffer/common/bitfield_helpers.h" diff --git a/chromium/gpu/command_buffer/common/swap_buffers_complete_params.h b/chromium/gpu/command_buffer/common/swap_buffers_complete_params.h index c2971e249c4..766c0bcb20b 100644 --- a/chromium/gpu/command_buffer/common/swap_buffers_complete_params.h +++ b/chromium/gpu/command_buffer/common/swap_buffers_complete_params.h @@ -5,8 +5,10 @@ #ifndef GPU_COMMAND_BUFFER_COMMON_SWAP_BUFFERS_COMPLETE_PARAMS_H_ #define GPU_COMMAND_BUFFER_COMMON_SWAP_BUFFERS_COMPLETE_PARAMS_H_ +#include "base/optional.h" #include "gpu/command_buffer/common/texture_in_use_response.h" #include "ui/gfx/ca_layer_params.h" +#include "ui/gfx/geometry/rect.h" #include "ui/gfx/swap_result.h" namespace gpu { @@ -20,6 +22,12 @@ struct GPU_EXPORT SwapBuffersCompleteParams { ~SwapBuffersCompleteParams(); gfx::SwapResponse swap_response; + + // Damage area of the current backing buffer compare to the previous swapped + // buffer. The renderer can use it as hint for minimizing drawing area for the + // next frame. + base::Optional<gfx::Rect> frame_buffer_damage_area; + // Used only on macOS, for coordinating IOSurface reuse with the system // WindowServer. gpu::TextureInUseResponses texture_in_use_responses; diff --git a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt index 6115cb03744..bdc472c3878 100644 --- a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt +++ b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt @@ -87,6 +87,7 @@ GL_APICALL void GL_APIENTRY glGetActiveUniformsiv (GLidProgram program, GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLidProgram program, GLsizeiNotNegative maxcount, GLsizeiOptional* count, GLuint* shaders); GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLidProgram program, const char* name); GL_APICALL void GL_APIENTRY glGetBooleanv (GLenumGLState pname, GLboolean* params); +GL_APICALL void GL_APIENTRY glGetBooleani_v (GLenumIndexedGLState pname, GLuint index, GLboolean* data); GL_APICALL void GL_APIENTRY glGetBufferParameteri64v (GLenumBufferTarget target, GLenumBufferParameter64 pname, GLint64* params); GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenumBufferTarget target, GLenumBufferParameter pname, GLint* params); GL_APICALL GLenum GL_APIENTRY glGetError (void); @@ -415,3 +416,13 @@ GL_APICALL void GL_APIENTRY glBeginSharedImageAccessDirectCHROMIUM (GLui GL_APICALL void GL_APIENTRY glEndSharedImageAccessDirectCHROMIUM (GLuint texture); GL_APICALL void GL_APIENTRY glBeginBatchReadAccessSharedImageCHROMIUM (void); GL_APICALL void GL_APIENTRY glEndBatchReadAccessSharedImageCHROMIUM (void); + +// Extension OES_draw_buffers_indexed +GL_APICALL void GL_APIENTRY glEnableiOES (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDisableiOES (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glBlendEquationiOES (GLuint buf, GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparateiOES (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunciOES (GLuint buf, GLenum src, GLenum dst); +GL_APICALL void GL_APIENTRY glBlendFuncSeparateiOES (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +GL_APICALL void GL_APIENTRY glColorMaskiOES (GLuint buf, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GL_APICALL GLboolean GL_APIENTRY glIsEnablediOES (GLenum target, GLuint index); diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn index b6ad2bd114b..16cb6065726 100644 --- a/chromium/gpu/command_buffer/service/BUILD.gn +++ b/chromium/gpu/command_buffer/service/BUILD.gn @@ -227,6 +227,7 @@ target(link_target_type, "gles2_sources") { "shared_image_backing_factory.h", "shared_image_backing_factory_gl_texture.cc", "shared_image_backing_factory_gl_texture.h", + "shared_image_backing_factory_gl_texture_internal.h", "shared_image_factory.cc", "shared_image_factory.h", "shared_image_manager.cc", @@ -235,6 +236,8 @@ target(link_target_type, "gles2_sources") { "shared_image_representation.h", "shared_image_representation_skia_gl.cc", "shared_image_representation_skia_gl.h", + "shared_memory_region_wrapper.cc", + "shared_memory_region_wrapper.h", "skia_utils.cc", "skia_utils.h", "texture_definition.cc", diff --git a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc index 9d0d7f74222..43337b5c2ba 100644 --- a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc +++ b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc @@ -46,10 +46,10 @@ AbstractTextureImplOnSharedContext::AbstractTextureImplOnSharedContext( texture_ = new gpu::gles2::Texture(service_id); texture_->SetLightweightRef(); texture_->SetTarget(target, 1); - texture_->sampler_state_.min_filter = GL_LINEAR; - texture_->sampler_state_.mag_filter = GL_LINEAR; - texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE; - texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE; + texture_->set_min_filter(GL_LINEAR); + texture_->set_mag_filter(GL_LINEAR); + texture_->set_wrap_t(GL_CLAMP_TO_EDGE); + texture_->set_wrap_s(GL_CLAMP_TO_EDGE); gfx::Rect cleared_rect; texture_->SetLevelInfo(target, 0, internal_format, width, height, depth, border, format, type, cleared_rect); diff --git a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc index 5224766c14d..a53dae3bdcd 100644 --- a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc +++ b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc @@ -6,10 +6,102 @@ #include <android/hardware_buffer.h> +#include "base/android/scoped_hardware_buffer_handle.h" #include "base/check.h" #include "base/notreached.h" +#include "components/viz/common/gpu/vulkan_context_provider.h" +#include "components/viz/common/resources/resource_format_utils.h" +#include "gpu/command_buffer/service/shared_context_state.h" +#include "gpu/command_buffer/service/texture_manager.h" +#include "gpu/vulkan/vulkan_image.h" +#include "ui/gfx/color_space.h" +#include "ui/gfx/geometry/size.h" +#include "ui/gl/gl_gl_api_implementation.h" +#include "ui/gl/gl_image_ahardwarebuffer.h" +#include "ui/gl/scoped_binders.h" namespace gpu { +namespace { + +gles2::Texture* MakeGLTexture( + GLenum target, + GLuint service_id, + scoped_refptr<gl::GLImageAHardwareBuffer> egl_image, + const gfx::Size& size, + const gfx::Rect& cleared_rect) { + auto* texture = new gles2::Texture(service_id); + texture->SetLightweightRef(); + texture->SetTarget(target, 1); + texture->set_min_filter(GL_LINEAR); + texture->set_mag_filter(GL_LINEAR); + texture->set_wrap_t(GL_CLAMP_TO_EDGE); + texture->set_wrap_s(GL_CLAMP_TO_EDGE); + + texture->SetLevelInfo(target, 0, egl_image->GetInternalFormat(), size.width(), + size.height(), 1, 0, egl_image->GetDataFormat(), + egl_image->GetDataType(), cleared_rect); + texture->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND); + texture->SetImmutable(true, false); + return texture; +} + +scoped_refptr<gles2::TexturePassthrough> MakeGLTexturePassthrough( + GLenum target, + GLuint service_id, + scoped_refptr<gl::GLImageAHardwareBuffer> egl_image, + const size_t estimated_size) { + auto passthrough_texture = + base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target); + passthrough_texture->SetEstimatedSize(estimated_size); + passthrough_texture->SetLevelImage(target, 0, egl_image.get()); + passthrough_texture->set_is_bind_pending(false); + return passthrough_texture; +} + +void GenGLTextureInternal( + AHardwareBuffer* buffer, + GLenum target, + const gfx::ColorSpace& color_space, + const gfx::Size& size, + const size_t estimated_size, + const gfx::Rect& cleared_rect, + scoped_refptr<gles2::TexturePassthrough>* passthrough_texture, + gles2::Texture** texture) { + gl::GLApi* api = gl::g_current_gl_context; + GLuint service_id = 0; + api->glGenTexturesFn(1, &service_id); + gl::ScopedTextureBinder texture_binder(target, service_id); + + api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + // Create an egl image using AHardwareBuffer. + auto egl_image = base::MakeRefCounted<gl::GLImageAHardwareBuffer>(size); + if (!egl_image->Initialize(buffer, false)) { + LOG(ERROR) << "Failed to create EGL image"; + api->glDeleteTexturesFn(1, &service_id); + return; + } + + if (!egl_image->BindTexImage(target)) { + LOG(ERROR) << "Failed to bind egl image"; + api->glDeleteTexturesFn(1, &service_id); + return; + } + egl_image->SetColorSpace(color_space); + + if (passthrough_texture) { + *passthrough_texture = MakeGLTexturePassthrough( + target, service_id, std::move(egl_image), estimated_size); + } else { + *texture = MakeGLTexture(target, service_id, std::move(egl_image), size, + cleared_rect); + } +} + +} // namespace bool AHardwareBufferSupportedFormat(viz::ResourceFormat format) { switch (format) { @@ -46,4 +138,46 @@ unsigned int AHardwareBufferFormat(viz::ResourceFormat format) { } } +gles2::Texture* GenGLTexture(AHardwareBuffer* buffer, + GLenum target, + const gfx::ColorSpace& color_space, + const gfx::Size& size, + const size_t estimated_size, + const gfx::Rect& cleared_rect) { + gles2::Texture* texture = nullptr; + GenGLTextureInternal(buffer, target, color_space, size, estimated_size, + cleared_rect, nullptr /* passthrough_texture */, + &texture); + return texture; +} + +scoped_refptr<gles2::TexturePassthrough> GenGLTexturePassthrough( + AHardwareBuffer* buffer, + GLenum target, + const gfx::ColorSpace& color_space, + const gfx::Size& size, + const size_t estimated_size, + const gfx::Rect& cleared_rect) { + scoped_refptr<gles2::TexturePassthrough> passthrough_texture; + GenGLTextureInternal(buffer, target, color_space, size, estimated_size, + cleared_rect, &passthrough_texture, + nullptr /* texture */); + return passthrough_texture; +} + +std::unique_ptr<VulkanImage> CreateVkImageFromAhbHandle( + base::android::ScopedHardwareBufferHandle ahb_handle, + SharedContextState* context_state, + const gfx::Size& size, + const viz::ResourceFormat& format) { + DCHECK(context_state); + DCHECK(context_state->GrContextIsVulkan()); + + auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue(); + gfx::GpuMemoryBufferHandle gmb_handle(std::move(ahb_handle)); + return VulkanImage::CreateFromGpuMemoryBufferHandle( + device_queue, std::move(gmb_handle), size, ToVkFormat(format), + 0 /* usage */); +} + } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h index 77a32393676..a3106ae53a5 100644 --- a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h +++ b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h @@ -5,10 +5,36 @@ #ifndef GPU_COMMAND_BUFFER_SERVICE_AHARDWAREBUFFER_UTILS_H_ #define GPU_COMMAND_BUFFER_SERVICE_AHARDWAREBUFFER_UTILS_H_ +#include <memory> + +#include "base/memory/scoped_refptr.h" #include "components/viz/common/resources/resource_format.h" #include "gpu/gpu_gles2_export.h" +extern "C" typedef struct AHardwareBuffer AHardwareBuffer; + +typedef unsigned int GLenum; + +namespace base { +namespace android { +class ScopedHardwareBufferHandle; +} // namespace android +} // namespace base + +namespace gfx { +class ColorSpace; +class Rect; +class Size; +} // namespace gfx + namespace gpu { +class SharedContextState; +class VulkanImage; + +namespace gles2 { +class Texture; +class TexturePassthrough; +} // namespace gles2 // TODO(vikassoni): In future we will need to expose the set of formats and // constraints (e.g. max size) to the clients somehow that are available for @@ -25,6 +51,33 @@ AHardwareBufferSupportedFormat(viz::ResourceFormat format); // Returns the corresponding AHardwareBuffer format. unsigned int GPU_GLES2_EXPORT AHardwareBufferFormat(viz::ResourceFormat format); +// Generates a gles2 texture from AHB. This method must be called with a current +// GLContext which will be used to create the Texture. This method adds a +// lightweight ref on the Texture which the caller is responsible for releasing. +gles2::Texture* GenGLTexture(AHardwareBuffer* buffer, + GLenum target, + const gfx::ColorSpace& color_space, + const gfx::Size& size, + const size_t estimated_size, + const gfx::Rect& cleared_rect); + +// Generates a passthrough texture from AHB. This method must be called with a +// current GLContext which will be used to create the Texture. +scoped_refptr<gles2::TexturePassthrough> GenGLTexturePassthrough( + AHardwareBuffer* buffer, + GLenum target, + const gfx::ColorSpace& color_space, + const gfx::Size& size, + const size_t estimated_size, + const gfx::Rect& cleared_rect); + +// Create a vulkan image from the AHB handle. +std::unique_ptr<VulkanImage> CreateVkImageFromAhbHandle( + base::android::ScopedHardwareBufferHandle ahb_handle, + SharedContextState* context_state, + const gfx::Size& size, + const viz::ResourceFormat& format); + } // namespace gpu #endif // GPU_COMMAND_BUFFER_SERVICE_AHARDWAREBUFFER_UTILS_H_ diff --git a/chromium/gpu/command_buffer/service/buffer_manager.h b/chromium/gpu/command_buffer/service/buffer_manager.h index 86c3561104a..67b042d99af 100644 --- a/chromium/gpu/command_buffer/service/buffer_manager.h +++ b/chromium/gpu/command_buffer/service/buffer_manager.h @@ -14,7 +14,7 @@ #include <unordered_map> #include <vector> -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "gpu/command_buffer/common/buffer.h" diff --git a/chromium/gpu/command_buffer/service/client_service_map.h b/chromium/gpu/command_buffer/service/client_service_map.h index d19484ee1a7..56caf0d3b2c 100644 --- a/chromium/gpu/command_buffer/service/client_service_map.h +++ b/chromium/gpu/command_buffer/service/client_service_map.h @@ -9,7 +9,7 @@ #include <unordered_map> #include <vector> -#include "base/logging.h" +#include "base/check.h" namespace gpu { diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc index 51452b9ce43..f05b4688919 100644 --- a/chromium/gpu/command_buffer/service/context_group.cc +++ b/chromium/gpu/command_buffer/service/context_group.cc @@ -58,6 +58,7 @@ DisallowedFeatures AdjustDisallowedFeatures( adjusted_disallowed_features.ext_texture_filter_anisotropic = true; adjusted_disallowed_features.ext_float_blend = true; adjusted_disallowed_features.oes_fbo_render_mipmap = true; + adjusted_disallowed_features.oes_draw_buffers_indexed = true; } return adjusted_disallowed_features; } diff --git a/chromium/gpu/command_buffer/service/context_state.h b/chromium/gpu/command_buffer/service/context_state.h index 531313953a4..3f04a3dd6aa 100644 --- a/chromium/gpu/command_buffer/service/context_state.h +++ b/chromium/gpu/command_buffer/service/context_state.h @@ -10,7 +10,8 @@ #include <memory> #include <vector> -#include "base/logging.h" +#include "base/check_op.h" +#include "base/notreached.h" #include "gpu/command_buffer/service/gl_utils.h" #include "gpu/command_buffer/service/sampler_manager.h" #include "gpu/command_buffer/service/shader_manager.h" diff --git a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h index ec299201fb8..eb55bbd0845 100644 --- a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h +++ b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h @@ -116,51 +116,6 @@ class MockCopyTextureResourceManager bool dither, CopyTextureMethod method, CopyTexImageResourceManager* luma_emulation_blitter) override {} - void DoCopySubTextureWithTransform( - DecoderContext* decoder, - GLenum source_target, - GLuint source_id, - GLint source_level, - GLenum source_internal_format, - GLenum dest_target, - GLuint dest_id, - GLint dest_level, - GLenum dest_internal_format, - GLint xoffset, - GLint yoffset, - GLint x, - GLint y, - GLsizei width, - GLsizei height, - GLsizei dest_width, - GLsizei dest_height, - GLsizei source_width, - GLsizei source_height, - bool flip_y, - bool premultiply_alpha, - bool unpremultiply_alpha, - bool dither, - const GLfloat transform_matrix[16], - CopyTexImageResourceManager* luma_emulation_blitter) override {} - void DoCopyTextureWithTransform( - DecoderContext* decoder, - GLenum source_target, - GLuint source_id, - GLint source_level, - GLenum source_format, - GLenum dest_target, - GLuint dest_id, - GLint dest_level, - GLenum dest_format, - GLsizei width, - GLsizei height, - bool flip_y, - bool premultiply_alpha, - bool unpremultiply_alpha, - bool dither, - const GLfloat transform_matrix[16], - CopyTextureMethod method, - CopyTexImageResourceManager* luma_emulation_blitter) override {} private: DISALLOW_COPY_AND_ASSIGN(MockCopyTextureResourceManager); diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc index 05778fdf5b7..5e38edb1270 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc +++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc @@ -8,9 +8,7 @@ #include <vector> #include "base/stl_util.h" -#include "base/system/sys_info.h" #include "build/build_config.h" -#include "components/viz/common/resources/resource_format_utils.h" #include "components/viz/common/resources/resource_sizes.h" #include "gpu/command_buffer/service/external_vk_image_gl_representation.h" #include "gpu/command_buffer/service/external_vk_image_skia_representation.h" @@ -23,6 +21,7 @@ #include "gpu/vulkan/vulkan_function_pointers.h" #include "gpu/vulkan/vulkan_image.h" #include "gpu/vulkan/vulkan_util.h" +#include "third_party/skia/include/gpu/GrBackendSemaphore.h" #include "ui/gfx/buffer_format_util.h" #include "ui/gl/buildflags.h" #include "ui/gl/gl_context.h" @@ -138,8 +137,19 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create( auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue(); VkFormat vk_format = ToVkFormat(format); - VkImageUsageFlags vk_usage = - VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; + + constexpr auto kUsageNeedsColorAttachment = + SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_RASTER | + SHARED_IMAGE_USAGE_OOP_RASTERIZATION | SHARED_IMAGE_USAGE_WEBGPU; + VkImageUsageFlags vk_usage = VK_IMAGE_USAGE_SAMPLED_BIT; + if (usage & kUsageNeedsColorAttachment) { + vk_usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + if (format == viz::ETC1) { + DLOG(ERROR) << "ETC1 format cannot be used as color attachment."; + return nullptr; + } + } + if (is_transfer_dst) vk_usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; @@ -155,10 +165,20 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create( auto* vulkan_implementation = context_state->vk_context_provider()->GetVulkanImplementation(); - VkImageCreateFlags vk_flags = - vulkan_implementation->enforce_protected_memory() - ? VK_IMAGE_CREATE_PROTECTED_BIT - : 0; + VkImageCreateFlags vk_flags = 0; + + // In protected mode mark the image as protected, except when the image needs + // GLES2, but not Raster usage. ANGLE currenctly doesn't support protected + // images. Some clients request GLES2 and Raster usage (e.g. see + // GpuMemoryBufferVideoFramePool). In that case still allocate protected + // image, which ensures that image can still usable, but it may not work in + // some scenarios (e.g. when the video frame is used in WebGL). + if (vulkan_implementation->enforce_protected_memory() && + (!(usage & SHARED_IMAGE_USAGE_GLES2) || + (usage & SHARED_IMAGE_USAGE_RASTER))) { + vk_flags |= VK_IMAGE_CREATE_PROTECTED_BIT; + } + std::unique_ptr<VulkanImage> image; if (is_external) { image = VulkanImage::CreateWithExternalMemory(device_queue, size, vk_format, @@ -176,7 +196,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create( color_space, usage, context_state, std::move(image), command_pool); if (!pixel_data.empty()) { - backing->WritePixels( + backing->WritePixelsWithCallback( pixel_data.size(), 0, base::BindOnce([](const void* data, size_t size, void* buffer) { memcpy(buffer, data, size); }, @@ -228,73 +248,10 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB( } DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER); - if (!base::IsValueInRangeForNumericType<size_t>(handle.stride)) - return nullptr; - int32_t width_in_bytes = 0; - if (!viz::ResourceSizes::MaybeWidthInBytes(size.width(), resource_format, - &width_in_bytes)) { - DLOG(ERROR) << "ResourceSizes::MaybeWidthInBytes() failed."; + SharedMemoryRegionWrapper shared_memory_wrapper; + if (!shared_memory_wrapper.Initialize(handle, size, resource_format)) return nullptr; - } - - if (handle.stride < width_in_bytes) { - DLOG(ERROR) << "Invalid GMB stride."; - return nullptr; - } - - auto bits_per_pixel = viz::BitsPerPixel(resource_format); - switch (bits_per_pixel) { - case 64: - case 32: - case 16: - if (handle.stride % (bits_per_pixel / 8) != 0) { - DLOG(ERROR) << "Invalid GMB stride."; - return nullptr; - } - break; - case 8: - case 4: - break; - case 12: - // We are not supporting YVU420 and YUV_420_BIPLANAR format. - default: - NOTREACHED(); - return nullptr; - } - - if (!handle.region.IsValid()) { - DLOG(ERROR) << "Invalid GMB shared memory region."; - return nullptr; - } - - base::CheckedNumeric<size_t> checked_size = handle.stride; - checked_size *= size.height(); - if (!checked_size.IsValid()) { - DLOG(ERROR) << "Invalid GMB size."; - return nullptr; - } - - // Minimize the amount of address space we use but make sure offset is a - // multiple of page size as required by MapAt(). - size_t memory_offset = - handle.offset % base::SysInfo::VMAllocationGranularity(); - size_t map_offset = - base::SysInfo::VMAllocationGranularity() * - (handle.offset / base::SysInfo::VMAllocationGranularity()); - checked_size += memory_offset; - if (!checked_size.IsValid()) { - DLOG(ERROR) << "Invalid GMB size."; - return nullptr; - } - - auto shared_memory_mapping = handle.region.MapAt( - static_cast<off_t>(map_offset), checked_size.ValueOrDie()); - - if (!shared_memory_mapping.IsValid()) { - DLOG(ERROR) << "Failed to map shared memory."; - return nullptr; - } auto backing = Create(context_state, command_pool, mailbox, resource_format, size, color_space, usage, image_usage_cache, @@ -302,8 +259,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB( if (!backing) return nullptr; - backing->InstallSharedMemory(std::move(shared_memory_mapping), handle.stride, - memory_offset); + backing->InstallSharedMemory(std::move(shared_memory_wrapper)); return backing; } @@ -366,21 +322,36 @@ bool ExternalVkImageBacking::BeginAccess( bool readonly, std::vector<SemaphoreHandle>* semaphore_handles, bool is_gl) { + DLOG_IF(ERROR, gl_reads_in_progress_ != 0 && !is_gl) + << "Backing is being accessed by both GL and Vulkan."; + // Do not need do anything for the second and following GL read access. + if (is_gl && readonly && gl_reads_in_progress_) { + ++gl_reads_in_progress_; + return true; + } + if (readonly && !reads_in_progress_) { UpdateContent(kInVkImage); if (texture_) UpdateContent(kInGLTexture); } + if (!BeginAccessInternal(readonly, semaphore_handles)) return false; if (!is_gl) return true; + if (readonly) { + DCHECK(!gl_reads_in_progress_); + gl_reads_in_progress_ = 1; + } + if (use_separate_gl_texture()) return true; DCHECK(need_synchronization()); + DCHECK(is_gl); auto command_buffer = command_pool_->CreatePrimaryCommandBuffer(); { @@ -401,7 +372,7 @@ bool ExternalVkImageBacking::BeginAccess( uint32_t vulkan_queue_index = context_state_->vk_context_provider() ->GetDeviceQueue() ->GetVulkanQueueIndex(); - // Transfer image queue faimily ownership to external, so the image can be + // Transfer image queue family ownership to external, so the image can be // used by GL. command_buffer->TransitionImageLayout(image_info.fImage, image_layout, image_layout, vulkan_queue_index, @@ -422,9 +393,9 @@ bool ExternalVkImageBacking::BeginAccess( // TODO(penghuang): ask skia to do it for us to avoid this queue submission. command_buffer->Submit(wait_semaphores.size(), wait_semaphores.data(), 1, &signal_semaphore); - auto end_access_semphore_handle = + auto end_access_semaphore_handle = vulkan_implementation()->GetSemaphoreHandle(device(), signal_semaphore); - semaphore_handles->push_back(std::move(end_access_semphore_handle)); + semaphore_handles->push_back(std::move(end_access_semaphore_handle)); auto* fence_helper = context_state_->vk_context_provider()->GetDeviceQueue()->GetFenceHelper(); @@ -440,7 +411,17 @@ bool ExternalVkImageBacking::BeginAccess( void ExternalVkImageBacking::EndAccess(bool readonly, SemaphoreHandle semaphore_handle, bool is_gl) { + if (is_gl && readonly) { + DCHECK(gl_reads_in_progress_); + if (--gl_reads_in_progress_ > 0) { + DCHECK(!semaphore_handle.is_valid()); + return; + } + } + + // Only transite image layout and queue back when it is the last gl access. if (is_gl && !use_separate_gl_texture()) { + DCHECK(semaphore_handle.is_valid()); auto command_buffer = command_pool_->CreatePrimaryCommandBuffer(); { ScopedSingleUseCommandBufferRecorder recorder(*command_buffer); @@ -629,10 +610,10 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager, texture_ = new gles2::Texture(texture_service_id); texture_->SetLightweightRef(); texture_->SetTarget(GL_TEXTURE_2D, 1); - texture_->sampler_state_.min_filter = GL_LINEAR; - texture_->sampler_state_.mag_filter = GL_LINEAR; - texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE; - texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE; + texture_->set_min_filter(GL_LINEAR); + texture_->set_mag_filter(GL_LINEAR); + texture_->set_wrap_t(GL_CLAMP_TO_EDGE); + texture_->set_wrap_s(GL_CLAMP_TO_EDGE); // If the backing is already cleared, no need to clear it again. gfx::Rect cleared_rect; if (IsCleared()) @@ -689,14 +670,10 @@ ExternalVkImageBacking::ProduceSkia( } void ExternalVkImageBacking::InstallSharedMemory( - base::WritableSharedMemoryMapping shared_memory_mapping, - size_t stride, - size_t memory_offset) { - DCHECK(!shared_memory_mapping_.IsValid()); - DCHECK(shared_memory_mapping.IsValid()); - shared_memory_mapping_ = std::move(shared_memory_mapping); - stride_ = stride; - memory_offset_ = memory_offset; + SharedMemoryRegionWrapper shared_memory_wrapper) { + DCHECK(!shared_memory_wrapper_.IsValid()); + DCHECK(shared_memory_wrapper.IsValid()); + shared_memory_wrapper_ = std::move(shared_memory_wrapper); Update(nullptr); } @@ -713,18 +690,10 @@ void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) { if (content_flags == kInVkImage) { if (latest_content_ & kInSharedMemory) { - if (!shared_memory_mapping_.IsValid()) + if (!shared_memory_wrapper_.IsValid()) return; - auto pixel_data = - shared_memory_mapping_.GetMemoryAsSpan<const uint8_t>().subspan( - memory_offset_); - if (!WritePixels( - pixel_data.size(), stride_, - base::BindOnce([](const void* data, size_t size, - void* buffer) { memcpy(buffer, data, size); }, - pixel_data.data(), pixel_data.size()))) { + if (!WritePixels()) return; - } latest_content_ |= use_separate_gl_texture() ? kInVkImage : kInVkImage | kInGLTexture; return; @@ -748,9 +717,10 @@ void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) { } } -bool ExternalVkImageBacking::WritePixels(size_t data_size, - size_t stride, - FillBufferCallback callback) { +bool ExternalVkImageBacking::WritePixelsWithCallback( + size_t data_size, + size_t stride, + FillBufferCallback callback) { DCHECK(stride == 0 || size().height() * stride <= data_size); VkBufferCreateInfo buffer_create_info = { @@ -811,6 +781,8 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size, size().width(), size().height()); } + SetCleared(); + if (!need_synchronization()) { DCHECK(handles.empty()); command_buffer->Submit(0, nullptr, 0, nullptr); @@ -823,7 +795,6 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size, std::move(command_buffer)); fence_helper->EnqueueBufferCleanupForSubmittedWork(stage_buffer, stage_allocation); - return true; } @@ -841,10 +812,11 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size, begin_access_semaphores.data(), 1, &end_access_semaphore); - auto end_access_semphore_handle = vulkan_implementation()->GetSemaphoreHandle( - device(), end_access_semaphore); + auto end_access_semaphore_handle = + vulkan_implementation()->GetSemaphoreHandle(device(), + end_access_semaphore); EndAccessInternal(false /* readonly */, - std::move(end_access_semphore_handle)); + std::move(end_access_semaphore_handle)); auto* fence_helper = context_state_->vk_context_provider()->GetDeviceQueue()->GetFenceHelper(); @@ -855,10 +827,69 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size, begin_access_semaphores); fence_helper->EnqueueBufferCleanupForSubmittedWork(stage_buffer, stage_allocation); - return true; } +bool ExternalVkImageBacking::WritePixels() { + std::vector<gpu::SemaphoreHandle> handles; + if (!BeginAccessInternal(false /* readonly */, &handles)) { + DLOG(ERROR) << "BeginAccess() failed."; + return false; + } + + std::vector<GrBackendSemaphore> begin_access_semaphores; + begin_access_semaphores.reserve(handles.size() + 1); + for (auto& handle : handles) { + VkSemaphore semaphore = vulkan_implementation()->ImportSemaphoreHandle( + device(), std::move(handle)); + begin_access_semaphores.emplace_back(); + begin_access_semaphores.back().initVulkan(semaphore); + } + + auto* gr_context = context_state_->gr_context(); + gr_context->wait(begin_access_semaphores.size(), + begin_access_semaphores.data()); + + auto info = SkImageInfo::Make(size().width(), size().height(), + ResourceFormatToClosestSkColorType( + /*gpu_compositing=*/true, format()), + kOpaque_SkAlphaType); + SkPixmap pixmap(info, shared_memory_wrapper_.GetMemory(), + shared_memory_wrapper_.GetStride()); + + if (!gr_context->updateBackendTexture(backend_texture_, &pixmap, + /*levels=*/1, nullptr, nullptr)) { + DLOG(ERROR) << "updateBackendTexture() failed."; + } + + if (!need_synchronization()) { + DCHECK(handles.empty()); + EndAccessInternal(false /* readonly */, SemaphoreHandle()); + return true; + } + + VkSemaphore end_access_semaphore = + vulkan_implementation()->CreateExternalSemaphore(device()); + GrBackendSemaphore end_access_backend_semaphore; + end_access_backend_semaphore.initVulkan(end_access_semaphore); + + GrFlushInfo flush_info = { + .fNumSemaphores = 1, + .fSignalSemaphores = &end_access_backend_semaphore, + }; + + gr_context->flush(flush_info); + // Submit so the |end_access_semaphore| is ready for waiting. + gr_context->submit(); + + auto end_access_semaphore_handle = + vulkan_implementation()->GetSemaphoreHandle(device(), + end_access_semaphore); + EndAccessInternal(false /* readonly */, + std::move(end_access_semaphore_handle)); + return true; +} // namespace gpu + void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() { DCHECK(use_separate_gl_texture()); DCHECK_NE(!!texture_, !!texture_passthrough_); @@ -907,16 +938,16 @@ void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() { ScopedPixelStore pack_skip_rows(api, GL_PACK_SKIP_ROWS, 0); ScopedPixelStore pack_aligment(api, GL_PACK_ALIGNMENT, 1); - WritePixels(checked_size.ValueOrDie(), 0, - base::BindOnce( - [](gl::GLApi* api, const gfx::Size& size, GLenum format, - GLenum type, void* buffer) { - api->glReadPixelsFn(0, 0, size.width(), size.height(), - format, type, buffer); - DCHECK_EQ(api->glGetErrorFn(), - static_cast<GLenum>(GL_NO_ERROR)); - }, - api, size(), gl_format, gl_type)); + WritePixelsWithCallback( + checked_size.ValueOrDie(), 0, + base::BindOnce( + [](gl::GLApi* api, const gfx::Size& size, GLenum format, GLenum type, + void* buffer) { + api->glReadPixelsFn(0, 0, size.width(), size.height(), format, type, + buffer); + DCHECK_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR)); + }, + api, size(), gl_format, gl_type)); api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, old_framebuffer); api->glDeleteFramebuffersEXTFn(1, &framebuffer); } @@ -957,9 +988,7 @@ void ExternalVkImageBacking::CopyPixelsFromShmToGLTexture() { checked_size *= size().height(); DCHECK(checked_size.IsValid()); - auto pixel_data = - shared_memory_mapping_.GetMemoryAsSpan<const uint8_t>().subspan( - memory_offset_); + auto pixel_data = shared_memory_wrapper_.GetMemoryAsSpan(); api->glTexSubImage2DFn(GL_TEXTURE_2D, 0, 0, 0, size().width(), size().height(), gl_format, gl_type, pixel_data.data()); @@ -1023,7 +1052,9 @@ void ExternalVkImageBacking::EndAccessInternal( is_write_in_progress_ = false; } - if (need_synchronization()) { + // synchronization is not needed if it is not the last gl access. + if (need_synchronization() && reads_in_progress_ == 0) { + DCHECK(!is_write_in_progress_); DCHECK(semaphore_handle.is_valid()); if (readonly) { read_semaphore_handles_.push_back(std::move(semaphore_handle)); @@ -1032,8 +1063,6 @@ void ExternalVkImageBacking::EndAccessInternal( DCHECK(read_semaphore_handles_.empty()); write_semaphore_handle_ = std::move(semaphore_handle); } - } else { - DCHECK(!semaphore_handle.is_valid()); } } diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h index 7e7dc67b627..e3d1103d649 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_backing.h +++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h @@ -9,7 +9,6 @@ #include <vector> #include "base/memory/scoped_refptr.h" -#include "base/memory/shared_memory_mapping.h" #include "base/optional.h" #include "base/util/type_safety/pass_key.h" #include "build/build_config.h" @@ -17,6 +16,7 @@ #include "gpu/command_buffer/common/shared_image_usage.h" #include "gpu/command_buffer/service/shared_context_state.h" #include "gpu/command_buffer/service/shared_image_backing.h" +#include "gpu/command_buffer/service/shared_memory_region_wrapper.h" #include "gpu/command_buffer/service/texture_manager.h" #include "gpu/vulkan/semaphore_handle.h" #include "gpu/vulkan/vulkan_device_queue.h" @@ -99,6 +99,9 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking { return !context_state()->support_vulkan_external_object(); } + uint32_t reads_in_progress() const { return reads_in_progress_; } + uint32_t gl_reads_in_progress() const { return gl_reads_in_progress_; } + // Notifies the backing that an access will start. Return false if there is // currently any other conflict access in progress. Otherwise, returns true // and semaphore handles which will be waited on before accessing. @@ -157,17 +160,17 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking { #endif // Install a shared memory GMB to the backing. - void InstallSharedMemory( - base::WritableSharedMemoryMapping shared_memory_mapping, - size_t stride, - size_t memory_offset); + void InstallSharedMemory(SharedMemoryRegionWrapper shared_memory_wrapper); // Returns texture_service_id for ProduceGLTexture and GLTexturePassthrough. GLuint ProduceGLTextureInternal(); using FillBufferCallback = base::OnceCallback<void(void* buffer)>; - bool WritePixels(size_t data_size, - size_t stride, - FillBufferCallback callback); + // TODO(penghuang): Remove it when GrContext::updateBackendTexture() supports + // compressed texture and callback. + bool WritePixelsWithCallback(size_t data_size, + size_t stride, + FillBufferCallback callback); + bool WritePixels(); void CopyPixelsFromGLTextureToVkImage(); void CopyPixelsFromShmToGLTexture(); @@ -181,13 +184,12 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking { bool is_write_in_progress_ = false; uint32_t reads_in_progress_ = 0; + uint32_t gl_reads_in_progress_ = 0; gles2::Texture* texture_ = nullptr; scoped_refptr<gles2::TexturePassthrough> texture_passthrough_; // GMB related stuff. - base::WritableSharedMemoryMapping shared_memory_mapping_; - size_t stride_ = 0; - size_t memory_offset_ = 0; + SharedMemoryRegionWrapper shared_memory_wrapper_; enum LatestContent { kInVkImage = 1 << 0, diff --git a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc index 34fdcde0c1d..bbad54274a2 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc +++ b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc @@ -77,8 +77,10 @@ bool ExternalVkImageGLRepresentationShared::BeginAccess(GLenum mode) { } DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM || - mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM); - const bool readonly = (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM); + mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM || + mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM); + const bool readonly = + (mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM); std::vector<SemaphoreHandle> handles; if (!backing_impl()->BeginAccess(readonly, &handles, true /* is_gl */)) @@ -111,16 +113,17 @@ void ExternalVkImageGLRepresentationShared::EndAccess() { DCHECK(current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM || current_access_mode_ == - GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM); + GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM || + current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM); const bool readonly = - (current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM); + (current_access_mode_ != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM); current_access_mode_ = 0; - VkSemaphore semaphore = VK_NULL_HANDLE; SemaphoreHandle semaphore_handle; - GLuint gl_semaphore = 0; - if (backing_impl()->need_synchronization()) { - semaphore = + if (backing_impl()->need_synchronization() && + backing_impl()->gl_reads_in_progress() <= 1) { + DCHECK(readonly == !!backing_impl()->gl_reads_in_progress()); + VkSemaphore semaphore = vk_implementation()->CreateExternalSemaphore(backing_impl()->device()); if (semaphore == VK_NULL_HANDLE) { // TODO(crbug.com/933452): We should be able to handle this failure more @@ -142,7 +145,8 @@ void ExternalVkImageGLRepresentationShared::EndAccess() { } SemaphoreHandle dup_semaphore_handle = semaphore_handle.Duplicate(); - gl_semaphore = ImportVkSemaphoreIntoGL(std::move(dup_semaphore_handle)); + GLuint gl_semaphore = + ImportVkSemaphoreIntoGL(std::move(dup_semaphore_handle)); if (!gl_semaphore) { // TODO(crbug.com/933452): We should be able to semaphore_handle this @@ -152,24 +156,21 @@ void ExternalVkImageGLRepresentationShared::EndAccess() { << "Vulkan"; return; } - } - GrVkImageInfo info; - auto result = backing_impl()->backend_texture().getVkImageInfo(&info); - DCHECK(result); - GLenum dst_layout = ToGLImageLayout(info.fImageLayout); - if (backing_impl()->need_synchronization()) { + GrVkImageInfo info; + auto result = backing_impl()->backend_texture().getVkImageInfo(&info); + DCHECK(result); + GLenum dst_layout = ToGLImageLayout(info.fImageLayout); api()->glSignalSemaphoreEXTFn(gl_semaphore, 0, nullptr, 1, &texture_service_id_, &dst_layout); api()->glDeleteSemaphoresEXTFn(1, &gl_semaphore); // Base on the spec, the glSignalSemaphoreEXT() call just inserts signal // semaphore command in the gl context. It may or may not flush the context - // which depends on the impelemntation. So to make it safe, we always call + // which depends on the implementation. So to make it safe, we always call // glFlush() here. If the implementation does flush in the // glSignalSemaphoreEXT() call, the glFlush() call should be a noop. api()->glFlushFn(); } - backing_impl()->EndAccess(readonly, std::move(semaphore_handle), true /* is_gl */); } diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc index 692eb8feadd..3211b8b59c8 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc +++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc @@ -61,12 +61,12 @@ sk_sp<SkSurface> ExternalVkImageSkiaRepresentation::BeginWriteAccess( final_msaa_count != surface_msaa_count_) { SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType( true /* gpu_compositing */, format()); - surface = SkSurface::MakeFromBackendTextureAsRenderTarget( + surface = SkSurface::MakeFromBackendTexture( gr_context, promise_texture->backendTexture(), kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, backing_impl()->color_space().ToSkColorSpace(), &surface_props); if (!surface) { - LOG(ERROR) << "MakeFromBackendTextureAsRenderTarget() failed."; + LOG(ERROR) << "MakeFromBackendTexture() failed."; backing_impl()->context_state()->EraseCachedSkSurface(this); return nullptr; } diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc index 20e5298d9f6..ca410861576 100644 --- a/chromium/gpu/command_buffer/service/feature_info.cc +++ b/chromium/gpu/command_buffer/service/feature_info.cc @@ -170,35 +170,6 @@ bool IsWebGLDrawBuffersSupported(bool webglCompatibilityContext, } // anonymous namespace. -namespace { - -enum GpuTextureResultR16_L16 { - // Values synced with 'GpuTextureResultR16_L16' in - // src/tools/metrics/histograms/histograms.xml - kHaveNone = 0, - kHaveR16 = 1, - kHaveL16 = 2, - kHaveR16AndL16 = 3, - kMax = kHaveR16AndL16 -}; - -// TODO(riju): For UMA, remove after crbug.com/759456 is resolved. -bool g_r16_is_present; -bool g_l16_is_present; - -GpuTextureResultR16_L16 GpuTextureUMAHelper() { - if (g_r16_is_present && g_l16_is_present) { - return GpuTextureResultR16_L16::kHaveR16AndL16; - } else if (g_r16_is_present) { - return GpuTextureResultR16_L16::kHaveR16; - } else if (g_l16_is_present) { - return GpuTextureResultR16_L16::kHaveL16; - } - return GpuTextureResultR16_L16::kHaveNone; -} - -} // anonymous namespace. - FeatureInfo::FeatureFlags::FeatureFlags() = default; FeatureInfo::FeatureInfo() { @@ -250,11 +221,6 @@ void FeatureInfo::InitializeBasicState(const base::CommandLine* command_line) { feature_flags_.is_swiftshader_for_webgl = (useGL == gl::kGLImplementationSwiftShaderForWebGLName); - feature_flags_.is_swiftshader = - (useGL == gl::kGLImplementationSwiftShaderName) || - ((useGL == gl::kGLImplementationANGLEName) && - (useANGLE == gl::kANGLEImplementationSwiftShaderName)); - // The shader translator is needed to translate from WebGL-conformant GLES SL // to normal GLES SL, enforce WebGL conformance, translate from GLES SL 1.0 to // target context GLSL, implement emulation of OpenGL ES features on OpenGL, @@ -420,6 +386,13 @@ void FeatureInfo::EnableCHROMIUMColorBufferFloatRGB() { AddExtensionString("GL_CHROMIUM_color_buffer_float_rgb"); } +void FeatureInfo::EnableOESDrawBuffersIndexed() { + if (!feature_flags_.oes_draw_buffers_indexed) { + AddExtensionString("GL_OES_draw_buffers_indexed"); + feature_flags_.oes_draw_buffers_indexed = true; + } +} + void FeatureInfo::EnableOESFboRenderMipmap() { if (!feature_flags_.oes_fbo_render_mipmap) { AddExtensionString("GL_OES_fbo_render_mipmap"); @@ -778,6 +751,14 @@ void FeatureInfo::InitializeFeatures() { validators_.index_type.AddValue(GL_UNSIGNED_INT); } + // Note (crbug.com/1058744): not implemented for validating command decoder + if (is_passthrough_cmd_decoder_ && + gfx::HasExtension(extensions, "GL_OES_draw_buffers_indexed")) { + if (!disallowed_features_.oes_draw_buffers_indexed) { + EnableOESDrawBuffersIndexed(); + } + } + if (gl_version_info_->IsAtLeastGL(3, 0) || gl_version_info_->is_es3 || gfx::HasExtension(extensions, "GL_OES_fbo_render_mipmap") || gfx::HasExtension(extensions, "GL_EXT_framebuffer_object")) { @@ -1461,7 +1442,6 @@ void FeatureInfo::InitializeFeatures() { gfx::HasExtension(extensions, "GL_EXT_texture_norm16"))) { AddExtensionString("GL_EXT_texture_norm16"); feature_flags_.ext_texture_norm16 = true; - g_r16_is_present = true; validators_.pixel_type.AddValue(GL_UNSIGNED_SHORT); validators_.pixel_type.AddValue(GL_SHORT); @@ -1511,10 +1491,6 @@ void FeatureInfo::InitializeFeatures() { feature_flags_.gpu_memory_buffer_formats.Add(gfx::BufferFormat::R_16); } - UMA_HISTOGRAM_ENUMERATION( - "GPU.TextureR16Ext_LuminanceF16", GpuTextureUMAHelper(), - static_cast<int>(GpuTextureResultR16_L16::kMax) + 1); - if (enable_es3 && gfx::HasExtension(extensions, "GL_EXT_window_rectangles")) { AddExtensionString("GL_EXT_window_rectangles"); feature_flags_.ext_window_rectangles = true; @@ -1945,9 +1921,6 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures( validators_.texture_internal_format_storage.AddValue( GL_LUMINANCE_ALPHA16F_EXT); } - - g_l16_is_present = - enable_texture_half_float && feature_flags_.ext_texture_storage; } bool FeatureInfo::IsES3Capable() const { diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h index 4a3255b6b6e..ac6c4d8e393 100644 --- a/chromium/gpu/command_buffer/service/feature_info.h +++ b/chromium/gpu/command_buffer/service/feature_info.h @@ -90,7 +90,6 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> { bool ext_discard_framebuffer = false; bool angle_depth_texture = false; bool is_swiftshader_for_webgl = false; - bool is_swiftshader = false; bool chromium_texture_filtering_hint = false; bool angle_texture_usage = false; bool ext_texture_storage = false; @@ -151,6 +150,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> { bool webgl_multi_draw_instanced_base_vertex_base_instance = false; bool ext_texture_compression_bptc = false; bool ext_texture_compression_rgtc = false; + bool oes_draw_buffers_indexed = false; }; FeatureInfo(); @@ -213,6 +213,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> { void EnableEXTColorBufferFloat(); void EnableEXTColorBufferHalfFloat(); void EnableEXTTextureFilterAnisotropic(); + void EnableOESDrawBuffersIndexed(); void EnableOESFboRenderMipmap(); void EnableOESTextureFloatLinear(); void EnableOESTextureHalfFloatLinear(); diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual.cc b/chromium/gpu/command_buffer/service/gl_context_virtual.cc index c6e000cd9a6..c8662354eb6 100644 --- a/chromium/gpu/command_buffer/service/gl_context_virtual.cc +++ b/chromium/gpu/command_buffer/service/gl_context_virtual.cc @@ -87,10 +87,12 @@ void GLContextVirtual::SetSafeToForceGpuSwitch() { } unsigned int GLContextVirtual::CheckStickyGraphicsResetStatus() { - // Don't pretend we know which one of the virtual contexts was responsible. unsigned int reset_status = shared_context_->CheckStickyGraphicsResetStatus(); - return reset_status == GL_NO_ERROR ? GL_NO_ERROR - : GL_UNKNOWN_CONTEXT_RESET_ARB; + if (reset_status == GL_NO_ERROR) + return GL_NO_ERROR; + shared_context_->MarkVirtualContextLost(); + // Don't pretend we know which one of the virtual contexts was responsible. + return GL_UNKNOWN_CONTEXT_RESET_ARB; } void GLContextVirtual::SetUnbindFboOnMakeCurrent() { diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h index 501e5cc35d4..27084971eff 100644 --- a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h +++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h @@ -16,12 +16,6 @@ namespace gles2 { // that supply a texture matrix. class GPU_GLES2_EXPORT GLStreamTextureImage : public gl::GLImage { public: - // Get the matrix. - // Copy the texture matrix for this image into |matrix|. - // Subclasses must return a matrix appropriate for a coordinate system where - // UV=(0,0) corresponds to the top left corner of the image. - virtual void GetTextureMatrix(float matrix[16]) = 0; - // TODO(weiliangc): When Overlay is moved off command buffer and we use // SharedImage in all cases, this API should be deleted. virtual void NotifyPromotionHint(bool promotion_hint, @@ -32,19 +26,6 @@ class GPU_GLES2_EXPORT GLStreamTextureImage : public gl::GLImage { protected: ~GLStreamTextureImage() override = default; - - // Convenience function for subclasses that deal with SurfaceTextures, whose - // coordinate system has (0,0) at the bottom left of the image. - // [ a e i m ] [ 1 0 0 0 ] [ a -e i m+e ] - // [ b f j n ] [ 0 -1 0 1 ] = [ b -f j n+f ] - // [ c g k o ] [ 0 0 1 0 ] [ c -g k o+g ] - // [ d h l p ] [ 0 0 0 1 ] [ d -h l p+h ] - static void YInvertMatrix(float matrix[16]) { - for (int i = 0; i < 4; ++i) { - matrix[i + 12] += matrix[i + 4]; - matrix[i + 4] = -matrix[i + 4]; - } - } }; } // namespace gles2 diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h index 9c35dae2934..1c7271618ca 100644 --- a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h +++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h @@ -42,7 +42,6 @@ class GLStreamTextureImageStub : public GLStreamTextureImage { bool EmulatingRGB() const override; // Overridden from GLStreamTextureImage: - void GetTextureMatrix(float matrix[16]) override {} void NotifyPromotionHint(bool promotion_hint, int display_x, int display_y, diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc index 2eb24507574..c84b347b150 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc @@ -464,12 +464,9 @@ std::string GetFragmentShaderSource(unsigned glslVersion, // Main shader source. source += "uniform SamplerType u_sampler;\n" - "uniform mat4 u_tex_coord_transform;\n" "VARYING TexCoordPrecision vec2 v_uv;\n" "void main(void) {\n" - " TexCoordPrecision vec4 uv =\n" - " u_tex_coord_transform * vec4(v_uv, 0, 1);\n" - " vec4 color = TextureLookup(u_sampler, uv.st);\n"; + " vec4 color = TextureLookup(u_sampler, v_uv);\n"; // Premultiply or un-premultiply alpha. Must always do this, even // if the destination format doesn't have an alpha channel. @@ -927,59 +924,12 @@ class CopyTextureResourceManagerImpl bool dither, CopyTextureMethod method, CopyTexImageResourceManager* luma_emulation_blitter) override; - void DoCopySubTextureWithTransform( - DecoderContext* decoder, - GLenum source_target, - GLuint source_id, - GLint source_level, - GLenum source_internal_format, - GLenum dest_target, - GLuint dest_id, - GLint dest_level, - GLenum dest_internal_format, - GLint xoffset, - GLint yoffset, - GLint x, - GLint y, - GLsizei width, - GLsizei height, - GLsizei dest_width, - GLsizei dest_height, - GLsizei source_width, - GLsizei source_height, - bool flip_y, - bool premultiply_alpha, - bool unpremultiply_alpha, - bool dither, - const GLfloat transform_matrix[16], - CopyTexImageResourceManager* luma_emulation_blitter) override; - void DoCopyTextureWithTransform( - DecoderContext* decoder, - GLenum source_target, - GLuint source_id, - GLint source_level, - GLenum source_format, - GLenum dest_target, - GLuint dest_id, - GLint dest_level, - GLenum dest_format, - GLsizei width, - GLsizei height, - bool flip_y, - bool premultiply_alpha, - bool unpremultiply_alpha, - bool dither, - const GLfloat transform_matrix[16], - CopyTextureMethod method, - CopyTexImageResourceManager* luma_emulation_blitter) override; - private: struct ProgramInfo { ProgramInfo() : program(0u), vertex_source_mult_handle(0u), vertex_source_add_handle(0u), - tex_coord_transform_handle(0u), sampler_handle(0u) {} GLuint program; @@ -989,7 +939,6 @@ class CopyTextureResourceManagerImpl GLuint vertex_source_mult_handle; GLuint vertex_source_add_handle; - GLuint tex_coord_transform_handle; GLuint sampler_handle; }; @@ -1017,7 +966,6 @@ class CopyTextureResourceManagerImpl bool premultiply_alpha, bool unpremultiply_alpha, bool dither, - const GLfloat transform_matrix[16], CopyTexImageResourceManager* luma_emulation_blitter); bool initialized_; @@ -1117,32 +1065,6 @@ void CopyTextureResourceManagerImpl::Destroy() { buffer_id_ = 0; } -void CopyTextureResourceManagerImpl::DoCopyTexture( - DecoderContext* decoder, - GLenum source_target, - GLuint source_id, - GLint source_level, - GLenum source_internal_format, - GLenum dest_target, - GLuint dest_id, - GLint dest_level, - GLenum dest_internal_format, - GLsizei width, - GLsizei height, - bool flip_y, - bool premultiply_alpha, - bool unpremultiply_alpha, - bool dither, - CopyTextureMethod method, - gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) { - // Use kIdentityMatrix if no transform passed in. - DoCopyTextureWithTransform( - decoder, source_target, source_id, source_level, source_internal_format, - dest_target, dest_id, dest_level, dest_internal_format, width, height, - flip_y, premultiply_alpha, unpremultiply_alpha, dither, kIdentityMatrix, - method, luma_emulation_blitter); -} - void CopyTextureResourceManagerImpl::DoCopySubTexture( DecoderContext* decoder, GLenum source_target, @@ -1210,12 +1132,12 @@ void CopyTextureResourceManagerImpl::DoCopySubTexture( dest_height = height; } - DoCopySubTextureWithTransform( + DoCopyTextureInternal( decoder, source_target, source_id, source_level, source_internal_format, dest_target, dest_texture, dest_level, dest_internal_format, dest_xoffset, dest_yoffset, x, y, width, height, dest_width, dest_height, source_width, source_height, flip_y, premultiply_alpha, unpremultiply_alpha, dither, - kIdentityMatrix, luma_emulation_blitter); + luma_emulation_blitter); if (method == CopyTextureMethod::DRAW_AND_COPY || method == CopyTextureMethod::DRAW_AND_READBACK) { @@ -1237,41 +1159,7 @@ void CopyTextureResourceManagerImpl::DoCopySubTexture( } } -void CopyTextureResourceManagerImpl::DoCopySubTextureWithTransform( - DecoderContext* decoder, - GLenum source_target, - GLuint source_id, - GLint source_level, - GLenum source_internal_format, - GLenum dest_target, - GLuint dest_id, - GLint dest_level, - GLenum dest_internal_format, - GLint xoffset, - GLint yoffset, - GLint x, - GLint y, - GLsizei width, - GLsizei height, - GLsizei dest_width, - GLsizei dest_height, - GLsizei source_width, - GLsizei source_height, - bool flip_y, - bool premultiply_alpha, - bool unpremultiply_alpha, - bool dither, - const GLfloat transform_matrix[16], - gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) { - DoCopyTextureInternal( - decoder, source_target, source_id, source_level, source_internal_format, - dest_target, dest_id, dest_level, dest_internal_format, xoffset, yoffset, - x, y, width, height, dest_width, dest_height, source_width, source_height, - flip_y, premultiply_alpha, unpremultiply_alpha, dither, transform_matrix, - luma_emulation_blitter); -} - -void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform( +void CopyTextureResourceManagerImpl::DoCopyTexture( DecoderContext* decoder, GLenum source_target, GLuint source_id, @@ -1287,7 +1175,6 @@ void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform( bool premultiply_alpha, bool unpremultiply_alpha, bool dither, - const GLfloat transform_matrix[16], CopyTextureMethod method, gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) { GLsizei dest_width = width; @@ -1326,12 +1213,11 @@ void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform( dest_internal_format = adjusted_internal_format; } - DoCopyTextureInternal(decoder, source_target, source_id, source_level, - source_internal_format, dest_target, dest_texture, - dest_level, dest_internal_format, 0, 0, 0, 0, width, - height, dest_width, dest_height, width, height, flip_y, - premultiply_alpha, unpremultiply_alpha, dither, - transform_matrix, luma_emulation_blitter); + DoCopyTextureInternal( + decoder, source_target, source_id, source_level, source_internal_format, + dest_target, dest_texture, dest_level, dest_internal_format, 0, 0, 0, 0, + width, height, dest_width, dest_height, width, height, flip_y, + premultiply_alpha, unpremultiply_alpha, dither, luma_emulation_blitter); if (method == CopyTextureMethod::DRAW_AND_COPY || method == CopyTextureMethod::DRAW_AND_READBACK) { @@ -1375,7 +1261,6 @@ void CopyTextureResourceManagerImpl::DoCopyTextureInternal( bool premultiply_alpha, bool unpremultiply_alpha, bool dither, - const GLfloat transform_matrix[16], gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) { DCHECK(source_target == GL_TEXTURE_2D || source_target == GL_TEXTURE_RECTANGLE_ARB || @@ -1465,15 +1350,10 @@ void CopyTextureResourceManagerImpl::DoCopyTextureInternal( info->vertex_source_add_handle = glGetUniformLocation(info->program, "u_vertex_source_add"); - info->tex_coord_transform_handle = - glGetUniformLocation(info->program, "u_tex_coord_transform"); info->sampler_handle = glGetUniformLocation(info->program, "u_sampler"); } glUseProgram(info->program); - glUniformMatrix4fv(info->tex_coord_transform_handle, 1, GL_FALSE, - transform_matrix); - // Note: For simplicity, the calculations in this comment block use a single // dimension. All calculations trivially extend to the x-y plane. // The target subrange in the source texture has coordinates [x, x + width]. diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h index 351e181a635..33207ea04a0 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h @@ -103,57 +103,6 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager { CopyTextureMethod method, CopyTexImageResourceManager* luma_emulation_blitter) = 0; - virtual void DoCopySubTextureWithTransform( - DecoderContext* decoder, - GLenum source_target, - GLuint source_id, - GLint source_level, - GLenum source_internal_format, - GLenum dest_target, - GLuint dest_id, - GLint dest_level, - GLenum dest_internal_format, - GLint xoffset, - GLint yoffset, - GLint x, - GLint y, - GLsizei width, - GLsizei height, - GLsizei dest_width, - GLsizei dest_height, - GLsizei source_width, - GLsizei source_height, - bool flip_y, - bool premultiply_alpha, - bool unpremultiply_alpha, - bool dither, - const GLfloat transform_matrix[16], - CopyTexImageResourceManager* luma_emulation_blitter) = 0; - - // This will apply a transform on the texture coordinates before sampling - // the source texture and copying to the destination texture. The transform - // matrix should be given in column-major form, so it can be passed - // directly to GL. - virtual void DoCopyTextureWithTransform( - DecoderContext* decoder, - GLenum source_target, - GLuint source_id, - GLint source_level, - GLenum source_format, - GLenum dest_target, - GLuint dest_id, - GLint dest_level, - GLenum dest_format, - GLsizei width, - GLsizei height, - bool flip_y, - bool premultiply_alpha, - bool unpremultiply_alpha, - bool dither, - const GLfloat transform_matrix[16], - CopyTextureMethod method, - CopyTexImageResourceManager* luma_emulation_blitter) = 0; - // The attributes used during invocation of the extension. static const GLuint kVertexPositionAttrib = 0; diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc index d729023a0b9..250c811b727 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc @@ -1092,8 +1092,7 @@ class GLES2DecoderImpl : public GLES2Decoder, // Callback for async SwapBuffers. void FinishAsyncSwapBuffers(uint64_t swap_id, - gfx::SwapResult result, - std::unique_ptr<gfx::GpuFence>); + gfx::SwapCompletionResult result); void FinishSwapBuffers(gfx::SwapResult result); void DoCommitOverlayPlanes(uint64_t swap_id, GLbitfield flags); @@ -1703,6 +1702,9 @@ class GLES2DecoderImpl : public GLES2Decoder, // Wrapper for glDisable void DoDisable(GLenum cap); + // Wrapper for glDisableiOES + void DoDisableiOES(GLenum target, GLuint index); + // Wrapper for glDisableVertexAttribArray. void DoDisableVertexAttribArray(GLuint index); @@ -1737,6 +1739,9 @@ class GLES2DecoderImpl : public GLES2Decoder, // Wrapper for glEnable void DoEnable(GLenum cap); + // Wrapper for glEnableiOES + void DoEnableiOES(GLenum target, GLuint index); + // Wrapper for glEnableVertexAttribArray. void DoEnableVertexAttribArray(GLuint index); @@ -1809,11 +1814,17 @@ class GLES2DecoderImpl : public GLES2Decoder, // Wrapper for glGetIntegerv. void DoGetIntegerv(GLenum pname, GLint* params, GLsizei params_size); - // Helper for DoGetIntegeri_v and DoGetInteger64i_v. + // Helper for DoGetBooleani_v, DoGetIntegeri_v and DoGetInteger64i_v. template <typename TYPE> void GetIndexedIntegerImpl( const char* function_name, GLenum target, GLuint index, TYPE* data); + // Wrapper for glGetBooleani_v. + void DoGetBooleani_v(GLenum target, + GLuint index, + GLboolean* params, + GLsizei params_size); + // Wrapper for glGetIntegeri_v. void DoGetIntegeri_v(GLenum target, GLuint index, @@ -1925,6 +1936,8 @@ class GLES2DecoderImpl : public GLES2Decoder, bool DoIsVertexArrayOES(GLuint client_id); bool DoIsSync(GLuint client_id); + bool DoIsEnablediOES(GLenum target, GLuint index); + void DoLineWidth(GLfloat width); // Wrapper for glLinkProgram @@ -4116,8 +4129,7 @@ gpu::ContextResult GLES2DecoderImpl::Initialize( InitializeGLDebugLogging(true, GLDebugMessageCallback, &logger_); } - if (feature_info_->feature_flags().chromium_texture_filtering_hint && - feature_info_->feature_flags().is_swiftshader) { + if (feature_info_->feature_flags().chromium_texture_filtering_hint) { api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST); } @@ -7778,6 +7790,7 @@ void GLES2DecoderImpl::GetIndexedIntegerImpl( state_.GetWindowRectangle(index, data); return; } + scoped_refptr<IndexedBufferBindingHost> bindings; switch (target) { case GL_TRANSFORM_FEEDBACK_BUFFER_BINDING: @@ -7798,6 +7811,16 @@ void GLES2DecoderImpl::GetIndexedIntegerImpl( } bindings = state_.indexed_uniform_buffer_bindings.get(); break; + case GL_BLEND_SRC_RGB: + case GL_BLEND_SRC_ALPHA: + case GL_BLEND_DST_RGB: + case GL_BLEND_DST_ALPHA: + case GL_BLEND_EQUATION_RGB: + case GL_BLEND_EQUATION_ALPHA: + case GL_COLOR_WRITEMASK: + // Note (crbug.com/1058744): not implemented for validating command + // decoder + break; default: NOTREACHED(); break; @@ -7819,12 +7842,29 @@ void GLES2DecoderImpl::GetIndexedIntegerImpl( case GL_UNIFORM_BUFFER_START: *data = static_cast<TYPE>(bindings->GetBufferStart(index)); break; + case GL_BLEND_SRC_RGB: + case GL_BLEND_SRC_ALPHA: + case GL_BLEND_DST_RGB: + case GL_BLEND_DST_ALPHA: + case GL_BLEND_EQUATION_RGB: + case GL_BLEND_EQUATION_ALPHA: + case GL_COLOR_WRITEMASK: + // Note (crbug.com/1058744): not implemented for validating command + // decoder + break; default: NOTREACHED(); break; } } +void GLES2DecoderImpl::DoGetBooleani_v(GLenum target, + GLuint index, + GLboolean* params, + GLsizei params_size) { + GetIndexedIntegerImpl<GLboolean>("glGetBooleani_v", target, index, params); +} + void GLES2DecoderImpl::DoGetIntegeri_v(GLenum target, GLuint index, GLint* params, @@ -8358,6 +8398,10 @@ void GLES2DecoderImpl::DoDisable(GLenum cap) { } } +void GLES2DecoderImpl::DoDisableiOES(GLenum target, GLuint index) { + api()->glDisableiOESFn(target, index); +} + void GLES2DecoderImpl::DoEnable(GLenum cap) { if (SetCapabilityState(cap, true)) { if (cap == GL_PRIMITIVE_RESTART_FIXED_INDEX && @@ -8375,6 +8419,10 @@ void GLES2DecoderImpl::DoEnable(GLenum cap) { } } +void GLES2DecoderImpl::DoEnableiOES(GLenum target, GLuint index) { + api()->glEnableiOESFn(target, index); +} + void GLES2DecoderImpl::DoDepthRangef(GLclampf znear, GLclampf zfar) { state_.z_near = base::ClampToRange(znear, 0.0f, 1.0f); state_.z_far = base::ClampToRange(zfar, 0.0f, 1.0f); @@ -10418,32 +10466,9 @@ void GLES2DecoderImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM( GLint fake_location, GLboolean transpose, const volatile GLfloat* transform) { - float gl_matrix[16]; - // This refers to the bound external texture on the active unit. TextureUnit& unit = state_.texture_units[state_.active_texture_unit]; - if (TextureRef* texture_ref = unit.bound_texture_external_oes.get()) { - if (GLStreamTextureImage* image = - texture_ref->texture()->GetLevelStreamTextureImage( - GL_TEXTURE_EXTERNAL_OES, 0)) { - gfx::Transform st_transform(gfx::Transform::kSkipInitialization); - gfx::Transform pre_transform(gfx::Transform::kSkipInitialization); - image->GetTextureMatrix(gl_matrix); - st_transform.matrix().setColMajorf(gl_matrix); - // const_cast is safe, because setColMajorf only does a memcpy. - // TODO(piman): can we remove this assumption without having to introduce - // an extra copy? - pre_transform.matrix().setColMajorf( - const_cast<const GLfloat*>(transform)); - gfx::Transform(pre_transform, st_transform) - .matrix() - .asColMajorf(gl_matrix); - } else { - // Missing stream texture. Treat matrix as identity. - memcpy(gl_matrix, const_cast<const GLfloat*>(transform), - sizeof(gl_matrix)); - } - } else { + if (!unit.bound_texture_external_oes.get()) { LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "DoUniformMatrix4vStreamTextureMatrix", "no texture bound"); @@ -10459,7 +10484,8 @@ void GLES2DecoderImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM( return; } - api()->glUniformMatrix4fvFn(real_location, count, transpose, gl_matrix); + api()->glUniformMatrix4fvFn(real_location, count, transpose, + const_cast<const GLfloat*>(transform)); } void GLES2DecoderImpl::DoUniformMatrix2x3fv(GLint fake_location, @@ -12382,6 +12408,11 @@ bool GLES2DecoderImpl::DoIsEnabled(GLenum cap) { return state_.GetEnabled(cap); } +bool GLES2DecoderImpl::DoIsEnablediOES(GLenum target, GLuint index) { + // Note (crbug.com/1058744): not implemented for validating command decoder + return false; +} + bool GLES2DecoderImpl::DoIsBuffer(GLuint client_id) { const Buffer* buffer = GetBuffer(client_id); return buffer && buffer->IsValid() && !buffer->IsDeleted(); @@ -16997,14 +17028,13 @@ void GLES2DecoderImpl::DoSwapBuffers(uint64_t swap_id, GLbitfield flags) { void GLES2DecoderImpl::FinishAsyncSwapBuffers( uint64_t swap_id, - gfx::SwapResult result, - std::unique_ptr<gfx::GpuFence> gpu_fence) { + gfx::SwapCompletionResult result) { TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", swap_id); // Handling of the out-fence should have already happened before reaching // this function, so we don't expect to get a valid fence here. - DCHECK(!gpu_fence); + DCHECK(!result.gpu_fence); - FinishSwapBuffers(result); + FinishSwapBuffers(result.swap_result); } void GLES2DecoderImpl::FinishSwapBuffers(gfx::SwapResult result) { @@ -17438,7 +17468,7 @@ error::Error GLES2DecoderImpl::HandleDescheduleUntilFinishedCHROMIUM( if (fence) deschedule_until_finished_fences_.push_back(std::move(fence)); - if (deschedule_until_finished_fences_.size() == 1) + if (deschedule_until_finished_fences_.size() <= 1) return error::kNoError; DCHECK_EQ(2u, deschedule_until_finished_fences_.size()); @@ -18210,24 +18240,6 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM( unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE, unpack_unmultiply_alpha == GL_TRUE, false /* dither */); - // GL_TEXTURE_EXTERNAL_OES texture requires that we apply a transform matrix - // before presenting. - if (source_target == GL_TEXTURE_EXTERNAL_OES) { - if (GLStreamTextureImage* texture_image = - source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES, - source_level)) { - GLfloat transform_matrix[16]; - texture_image->GetTextureMatrix(transform_matrix); - copy_texture_chromium_->DoCopyTextureWithTransform( - this, source_target, source_texture->service_id(), source_level, - source_internal_format, dest_target, dest_texture->service_id(), - dest_level, internal_format, source_width, source_height, - unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE, - unpack_unmultiply_alpha == GL_TRUE, false /* dither */, - transform_matrix, method, copy_tex_image_blit_.get()); - return; - } - } copy_texture_chromium_->DoCopyTexture( this, source_target, source_texture->service_id(), source_level, source_internal_format, dest_target, dest_texture->service_id(), @@ -18431,26 +18443,6 @@ void GLES2DecoderImpl::CopySubTextureHelper(const char* function_name, DoBindOrCopyTexImageIfNeeded(source_texture, source_target, 0); - // GL_TEXTURE_EXTERNAL_OES texture requires apply a transform matrix - // before presenting. - if (source_target == GL_TEXTURE_EXTERNAL_OES) { - if (GLStreamTextureImage* texture_image = - source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES, - source_level)) { - GLfloat transform_matrix[16]; - texture_image->GetTextureMatrix(transform_matrix); - copy_texture_chromium_->DoCopySubTextureWithTransform( - this, source_target, source_texture->service_id(), source_level, - source_internal_format, dest_target, dest_texture->service_id(), - dest_level, dest_internal_format, xoffset, yoffset, x, y, width, - height, dest_width, dest_height, source_width, source_height, - unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE, - unpack_unmultiply_alpha == GL_TRUE, dither == GL_TRUE, - transform_matrix, copy_tex_image_blit_.get()); - return; - } - } - CopyTextureMethod method = GetCopyTextureCHROMIUMMethod( GetFeatureInfo(), source_target, source_level, source_internal_format, source_type, dest_binding_target, dest_level, dest_internal_format, diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h index 7491797e7fd..147e831f5cf 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h @@ -69,6 +69,7 @@ struct GPU_GLES2_EXPORT DisallowedFeatures { oes_texture_half_float_linear = false; ext_float_blend = false; oes_fbo_render_mipmap = false; + oes_draw_buffers_indexed = false; } bool operator==(const DisallowedFeatures& other) const { @@ -85,6 +86,7 @@ struct GPU_GLES2_EXPORT DisallowedFeatures { bool oes_texture_half_float_linear = false; bool ext_float_blend = false; bool oes_fbo_render_mipmap = false; + bool oes_draw_buffers_indexed = false; }; // This class implements the DecoderContext interface, decoding GLES2 diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h index f8be401bf99..615acf88e5a 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h @@ -1314,6 +1314,43 @@ error::Error GLES2DecoderImpl::HandleGetBooleanv( return error::kNoError; } +error::Error GLES2DecoderImpl::HandleGetBooleani_v( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + if (!feature_info_->IsWebGL2OrES3OrHigherContext()) + return error::kUnknownCommand; + const volatile gles2::cmds::GetBooleani_v& c = + *static_cast<const volatile gles2::cmds::GetBooleani_v*>(cmd_data); + GLenum pname = static_cast<GLenum>(c.pname); + GLuint index = static_cast<GLuint>(c.index); + typedef cmds::GetBooleani_v::Result Result; + GLsizei num_values = 0; + if (!GetNumValuesReturnedForGLGet(pname, &num_values)) { + LOCAL_SET_GL_ERROR_INVALID_ENUM(":GetBooleani_v", pname, "pname"); + return error::kNoError; + } + uint32_t checked_size = 0; + if (!Result::ComputeSize(num_values).AssignIfValid(&checked_size)) { + return error::kOutOfBounds; + } + Result* result = GetSharedMemoryAs<Result*>(c.data_shm_id, c.data_shm_offset, + checked_size); + GLboolean* data = result ? result->GetData() : nullptr; + if (!validators_->indexed_g_l_state.IsValid(pname)) { + LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetBooleani_v", pname, "pname"); + return error::kNoError; + } + if (data == nullptr) { + return error::kOutOfBounds; + } + // Check that the client initialized the result. + if (result->size != 0) { + return error::kInvalidArguments; + } + DoGetBooleani_v(pname, index, data, num_values); + result->SetNumResults(num_values); + return error::kNoError; +} error::Error GLES2DecoderImpl::HandleGetBufferParameteri64v( uint32_t immediate_data_size, const volatile void* cmd_data) { @@ -5617,6 +5654,141 @@ error::Error GLES2DecoderImpl::HandleEndBatchReadAccessSharedImageCHROMIUM( return error::kNoError; } +error::Error GLES2DecoderImpl::HandleEnableiOES(uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::EnableiOES& c = + *static_cast<const volatile gles2::cmds::EnableiOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLenum target = static_cast<GLenum>(c.target); + GLuint index = static_cast<GLuint>(c.index); + DoEnableiOES(target, index); + return error::kNoError; +} + +error::Error GLES2DecoderImpl::HandleDisableiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::DisableiOES& c = + *static_cast<const volatile gles2::cmds::DisableiOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLenum target = static_cast<GLenum>(c.target); + GLuint index = static_cast<GLuint>(c.index); + DoDisableiOES(target, index); + return error::kNoError; +} + +error::Error GLES2DecoderImpl::HandleBlendEquationiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::BlendEquationiOES& c = + *static_cast<const volatile gles2::cmds::BlendEquationiOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLenum mode = static_cast<GLenum>(c.mode); + api()->glBlendEquationiOESFn(buf, mode); + return error::kNoError; +} + +error::Error GLES2DecoderImpl::HandleBlendEquationSeparateiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::BlendEquationSeparateiOES& c = + *static_cast<const volatile gles2::cmds::BlendEquationSeparateiOES*>( + cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLenum modeRGB = static_cast<GLenum>(c.modeRGB); + GLenum modeAlpha = static_cast<GLenum>(c.modeAlpha); + api()->glBlendEquationSeparateiOESFn(buf, modeRGB, modeAlpha); + return error::kNoError; +} + +error::Error GLES2DecoderImpl::HandleBlendFunciOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::BlendFunciOES& c = + *static_cast<const volatile gles2::cmds::BlendFunciOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLenum src = static_cast<GLenum>(c.src); + GLenum dst = static_cast<GLenum>(c.dst); + api()->glBlendFunciOESFn(buf, src, dst); + return error::kNoError; +} + +error::Error GLES2DecoderImpl::HandleBlendFuncSeparateiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::BlendFuncSeparateiOES& c = + *static_cast<const volatile gles2::cmds::BlendFuncSeparateiOES*>( + cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLenum srcRGB = static_cast<GLenum>(c.srcRGB); + GLenum dstRGB = static_cast<GLenum>(c.dstRGB); + GLenum srcAlpha = static_cast<GLenum>(c.srcAlpha); + GLenum dstAlpha = static_cast<GLenum>(c.dstAlpha); + api()->glBlendFuncSeparateiOESFn(buf, srcRGB, dstRGB, srcAlpha, dstAlpha); + return error::kNoError; +} + +error::Error GLES2DecoderImpl::HandleColorMaskiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::ColorMaskiOES& c = + *static_cast<const volatile gles2::cmds::ColorMaskiOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLboolean r = static_cast<GLboolean>(c.r); + GLboolean g = static_cast<GLboolean>(c.g); + GLboolean b = static_cast<GLboolean>(c.b); + GLboolean a = static_cast<GLboolean>(c.a); + api()->glColorMaskiOESFn(buf, r, g, b, a); + return error::kNoError; +} + +error::Error GLES2DecoderImpl::HandleIsEnablediOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::IsEnablediOES& c = + *static_cast<const volatile gles2::cmds::IsEnablediOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLenum target = static_cast<GLenum>(c.target); + GLuint index = static_cast<GLuint>(c.index); + typedef cmds::IsEnablediOES::Result Result; + Result* result_dst = GetSharedMemoryAs<Result*>( + c.result_shm_id, c.result_shm_offset, sizeof(*result_dst)); + if (!result_dst) { + return error::kOutOfBounds; + } + *result_dst = DoIsEnablediOES(target, index); + return error::kNoError; +} + bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) { switch (cap) { case GL_BLEND: diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc index 6736ed33120..3fa8cacc086 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc @@ -24,6 +24,7 @@ #include "gpu/command_buffer/service/program_cache.h" #include "gpu/command_buffer/service/shared_image_representation.h" #include "ui/gl/gl_version_info.h" +#include "ui/gl/gpu_switching_manager.h" #include "ui/gl/progress_reporter.h" #if defined(OS_WIN) @@ -1092,8 +1093,7 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize( bound_buffers_[GL_DISPATCH_INDIRECT_BUFFER] = 0; } - if (feature_info_->feature_flags().chromium_texture_filtering_hint && - feature_info_->feature_flags().is_swiftshader) { + if (feature_info_->feature_flags().chromium_texture_filtering_hint) { api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST); } @@ -1210,6 +1210,11 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize( api()->glDisableFn(GL_TEXTURE_RECTANGLE_ANGLE); #endif + // Register this object as a GPU switching observer. + if (feature_info_->IsWebGLContext()) { + ui::GpuSwitchingManager::GetInstance()->AddObserver(this); + } + set_initialized(); return gpu::ContextResult::kSuccess; } @@ -1315,6 +1320,11 @@ void GLES2DecoderPassthroughImpl::Destroy(bool have_context) { } deschedule_until_finished_fences_.clear(); + // Unregister this object as a GPU switching observer. + if (feature_info_->IsWebGLContext()) { + ui::GpuSwitchingManager::GetInstance()->RemoveObserver(this); + } + // Destroy the surface before the context, some surface destructors make GL // calls. surface_ = nullptr; @@ -1872,6 +1882,12 @@ gpu::gles2::Logger* GLES2DecoderPassthroughImpl::GetLogger() { return &logger_; } +void GLES2DecoderPassthroughImpl::OnGpuSwitched( + gl::GpuPreference active_gpu_heuristic) { + // Send OnGpuSwitched notification to renderer process via decoder client. + client()->OnGpuSwitched(active_gpu_heuristic); +} + void GLES2DecoderPassthroughImpl::BeginDecoding() { gpu_tracer_->BeginDecoding(); gpu_trace_commands_ = gpu_tracer_->IsTracing() && *gpu_decoder_category_; @@ -2865,14 +2881,13 @@ bool GLES2DecoderPassthroughImpl::IsEmulatedFramebufferBound( void GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult( const char* function_name, uint64_t swap_id, - gfx::SwapResult result, - std::unique_ptr<gfx::GpuFence> gpu_fence) { + gfx::SwapCompletionResult result) { TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", swap_id); // Handling of the out-fence should have already happened before reaching // this function, so we don't expect to get a valid fence here. - DCHECK(!gpu_fence); + DCHECK(!result.gpu_fence); - CheckSwapBuffersResult(result, function_name); + CheckSwapBuffersResult(result.swap_result, function_name); } error::Error GLES2DecoderPassthroughImpl::CheckSwapBuffersResult( diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h index ade661ee2af..7ff062e8b00 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h @@ -35,6 +35,7 @@ #include "ui/gl/gl_fence.h" #include "ui/gl/gl_image.h" #include "ui/gl/gl_surface.h" +#include "ui/gl/gpu_switching_observer.h" namespace gl { class GLFence; @@ -139,7 +140,9 @@ struct PassthroughResources { std::unordered_map<GLuint, MappedBuffer> mapped_buffer_map; }; -class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder { +class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl + : public GLES2Decoder, + public ui::GpuSwitchingObserver { public: GLES2DecoderPassthroughImpl(DecoderClient* client, CommandBufferServiceBase* command_buffer_service, @@ -347,6 +350,9 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder { // directly, and needing to know if they failed due to loss. bool CheckResetStatus() override; + // Implement GpuSwitchingObserver. + void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override; + Logger* GetLogger() override; void BeginDecoding() override; @@ -465,8 +471,7 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder { void CheckSwapBuffersAsyncResult(const char* function_name, uint64_t swap_id, - gfx::SwapResult result, - std::unique_ptr<gfx::GpuFence> gpu_fence); + gfx::SwapCompletionResult result); error::Error CheckSwapBuffersResult(gfx::SwapResult result, const char* function_name); diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h index be04a014d68..069eb85a96b 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h @@ -257,6 +257,11 @@ error::Error DoGetBooleanv(GLenum pname, GLsizei bufsize, GLsizei* length, GLboolean* params); +error::Error DoGetBooleani_v(GLenum pname, + GLuint index, + GLsizei bufsize, + GLsizei* length, + GLboolean* data); error::Error DoGetBufferParameteri64v(GLenum target, GLenum pname, GLsizei bufsize, @@ -1143,4 +1148,22 @@ error::Error DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id, error::Error DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id); error::Error DoBeginBatchReadAccessSharedImageCHROMIUM(void); error::Error DoEndBatchReadAccessSharedImageCHROMIUM(void); +error::Error DoEnableiOES(GLenum target, GLuint index); +error::Error DoDisableiOES(GLenum target, GLuint index); +error::Error DoBlendEquationiOES(GLuint buf, GLenum mode); +error::Error DoBlendEquationSeparateiOES(GLuint buf, + GLenum modeRGB, + GLenum modeAlpha); +error::Error DoBlendFunciOES(GLuint buf, GLenum sfactor, GLenum dfactor); +error::Error DoBlendFuncSeparateiOES(GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha); +error::Error DoColorMaskiOES(GLuint buf, + GLboolean red, + GLboolean green, + GLboolean blue, + GLboolean alpha); +error::Error DoIsEnablediOES(GLenum target, GLuint index, uint32_t* result); #endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_PASSTHROUGH_DOER_PROTOTYPES_H_ diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc index ecad6a45c14..19086f610a0 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc @@ -615,6 +615,12 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendEquation(GLenum mode) { return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl::DoBlendEquationiOES(GLuint buf, + GLenum mode) { + api()->glBlendEquationiOESFn(buf, mode); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparate( GLenum modeRGB, GLenum modeAlpha) { @@ -622,12 +628,27 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparate( return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparateiOES( + GLuint buf, + GLenum modeRGB, + GLenum modeAlpha) { + api()->glBlendEquationSeparateiOESFn(buf, modeRGB, modeAlpha); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::DoBlendFunc(GLenum sfactor, GLenum dfactor) { api()->glBlendFuncFn(sfactor, dfactor); return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl::DoBlendFunciOES(GLuint buf, + GLenum sfactor, + GLenum dfactor) { + api()->glBlendFunciOESFn(buf, sfactor, dfactor); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparate(GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, @@ -636,6 +657,16 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparate(GLenum srcRGB, return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparateiOES( + GLuint buf, + GLenum srcRGB, + GLenum dstRGB, + GLenum srcAlpha, + GLenum dstAlpha) { + api()->glBlendFuncSeparateiOESFn(buf, srcRGB, dstRGB, srcAlpha, dstAlpha); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::DoBufferData(GLenum target, GLsizeiptr size, const void* data, @@ -744,6 +775,15 @@ error::Error GLES2DecoderPassthroughImpl::DoColorMask(GLboolean red, return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl::DoColorMaskiOES(GLuint buf, + GLboolean red, + GLboolean green, + GLboolean blue, + GLboolean alpha) { + api()->glColorMaskiOESFn(buf, red, green, blue, alpha); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::DoCompileShader(GLuint shader) { api()->glCompileShaderFn(GetShaderServiceID(shader, resources_)); return error::kNoError; @@ -1591,6 +1631,15 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBooleanv(GLenum pname, }); } +error::Error GLES2DecoderPassthroughImpl::DoGetBooleani_v(GLenum pname, + GLuint index, + GLsizei bufsize, + GLsizei* length, + GLboolean* data) { + glGetBooleani_vRobustANGLE(pname, index, bufsize, length, data); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteri64v( GLenum target, GLenum pname, @@ -2238,6 +2287,13 @@ error::Error GLES2DecoderPassthroughImpl::DoIsEnabled(GLenum cap, return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl::DoIsEnablediOES(GLenum target, + GLuint index, + uint32_t* result) { + *result = api()->glIsEnablediOESFn(target, index); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::DoIsFramebuffer(GLuint framebuffer, uint32_t* result) { *result = api()->glIsFramebufferEXTFn( @@ -5062,26 +5118,8 @@ GLES2DecoderPassthroughImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM( return error::kNoError; } - float gl_matrix[16] = {}; - - GLStreamTextureImage* image = - bound_texture->GetStreamLevelImage(kTextureTarget, 0); - if (image) { - gfx::Transform st_transform(gfx::Transform::kSkipInitialization); - gfx::Transform pre_transform(gfx::Transform::kSkipInitialization); - image->GetTextureMatrix(gl_matrix); - st_transform.matrix().setColMajorf(gl_matrix); - // const_cast is safe, because setColMajorf only does a memcpy. - // TODO(piman): can we remove this assumption without having to introduce - // an extra copy? - pre_transform.matrix().setColMajorf(const_cast<const GLfloat*>(transform)); - gfx::Transform(pre_transform, st_transform).matrix().asColMajorf(gl_matrix); - } else { - // Missing stream texture. Treat matrix as identity. - memcpy(gl_matrix, const_cast<const GLfloat*>(transform), sizeof(gl_matrix)); - } - - api()->glUniformMatrix4fvFn(location, 1, transpose, gl_matrix); + api()->glUniformMatrix4fvFn(location, 1, transpose, + const_cast<const GLfloat*>(transform)); return error::kNoError; } @@ -5406,7 +5444,8 @@ error::Error GLES2DecoderPassthroughImpl::DoBeginSharedImageAccessDirectCHROMIUM( GLuint client_id, GLenum mode) { - if (mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM && + if (mode != GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM && + mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM && mode != GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) { InsertError(GL_INVALID_ENUM, "unrecognized access mode"); return error::kNoError; @@ -5460,5 +5499,17 @@ GLES2DecoderPassthroughImpl::DoEndBatchReadAccessSharedImageCHROMIUM() { return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl::DoEnableiOES(GLenum target, + GLuint index) { + api()->glEnableiOESFn(target, index); + return error::kNoError; +} + +error::Error GLES2DecoderPassthroughImpl::DoDisableiOES(GLenum target, + GLuint index) { + api()->glDisableiOESFn(target, index); + return error::kNoError; +} + } // namespace gles2 } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc index 7e7ca53b9c9..cddfeff0b97 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc @@ -1130,6 +1130,37 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetBooleanv( return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl::HandleGetBooleani_v( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + if (!feature_info_->IsWebGL2OrES3OrHigherContext()) + return error::kUnknownCommand; + const volatile gles2::cmds::GetBooleani_v& c = + *static_cast<const volatile gles2::cmds::GetBooleani_v*>(cmd_data); + GLenum pname = static_cast<GLenum>(c.pname); + GLuint index = static_cast<GLuint>(c.index); + unsigned int buffer_size = 0; + typedef cmds::GetBooleani_v::Result Result; + Result* result = GetSharedMemoryAndSizeAs<Result*>( + c.data_shm_id, c.data_shm_offset, sizeof(Result), &buffer_size); + GLboolean* data = result ? result->GetData() : nullptr; + if (data == nullptr) { + return error::kOutOfBounds; + } + GLsizei bufsize = Result::ComputeMaxResults(buffer_size); + GLsizei written_values = 0; + GLsizei* length = &written_values; + error::Error error = DoGetBooleani_v(pname, index, bufsize, length, data); + if (error != error::kNoError) { + return error; + } + if (written_values > bufsize) { + return error::kOutOfBounds; + } + result->SetNumResults(written_values); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::HandleGetBufferParameteri64v( uint32_t immediate_data_size, const volatile void* cmd_data) { @@ -4888,5 +4919,166 @@ GLES2DecoderPassthroughImpl::HandleEndBatchReadAccessSharedImageCHROMIUM( return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl::HandleEnableiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::EnableiOES& c = + *static_cast<const volatile gles2::cmds::EnableiOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLenum target = static_cast<GLenum>(c.target); + GLuint index = static_cast<GLuint>(c.index); + error::Error error = DoEnableiOES(target, index); + if (error != error::kNoError) { + return error; + } + return error::kNoError; +} + +error::Error GLES2DecoderPassthroughImpl::HandleDisableiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::DisableiOES& c = + *static_cast<const volatile gles2::cmds::DisableiOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLenum target = static_cast<GLenum>(c.target); + GLuint index = static_cast<GLuint>(c.index); + error::Error error = DoDisableiOES(target, index); + if (error != error::kNoError) { + return error; + } + return error::kNoError; +} + +error::Error GLES2DecoderPassthroughImpl::HandleBlendEquationiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::BlendEquationiOES& c = + *static_cast<const volatile gles2::cmds::BlendEquationiOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLenum mode = static_cast<GLenum>(c.mode); + error::Error error = DoBlendEquationiOES(buf, mode); + if (error != error::kNoError) { + return error; + } + return error::kNoError; +} + +error::Error GLES2DecoderPassthroughImpl::HandleBlendEquationSeparateiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::BlendEquationSeparateiOES& c = + *static_cast<const volatile gles2::cmds::BlendEquationSeparateiOES*>( + cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLenum modeRGB = static_cast<GLenum>(c.modeRGB); + GLenum modeAlpha = static_cast<GLenum>(c.modeAlpha); + error::Error error = DoBlendEquationSeparateiOES(buf, modeRGB, modeAlpha); + if (error != error::kNoError) { + return error; + } + return error::kNoError; +} + +error::Error GLES2DecoderPassthroughImpl::HandleBlendFunciOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::BlendFunciOES& c = + *static_cast<const volatile gles2::cmds::BlendFunciOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLenum src = static_cast<GLenum>(c.src); + GLenum dst = static_cast<GLenum>(c.dst); + error::Error error = DoBlendFunciOES(buf, src, dst); + if (error != error::kNoError) { + return error; + } + return error::kNoError; +} + +error::Error GLES2DecoderPassthroughImpl::HandleBlendFuncSeparateiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::BlendFuncSeparateiOES& c = + *static_cast<const volatile gles2::cmds::BlendFuncSeparateiOES*>( + cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLenum srcRGB = static_cast<GLenum>(c.srcRGB); + GLenum dstRGB = static_cast<GLenum>(c.dstRGB); + GLenum srcAlpha = static_cast<GLenum>(c.srcAlpha); + GLenum dstAlpha = static_cast<GLenum>(c.dstAlpha); + error::Error error = + DoBlendFuncSeparateiOES(buf, srcRGB, dstRGB, srcAlpha, dstAlpha); + if (error != error::kNoError) { + return error; + } + return error::kNoError; +} + +error::Error GLES2DecoderPassthroughImpl::HandleColorMaskiOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::ColorMaskiOES& c = + *static_cast<const volatile gles2::cmds::ColorMaskiOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLuint buf = static_cast<GLuint>(c.buf); + GLboolean r = static_cast<GLboolean>(c.r); + GLboolean g = static_cast<GLboolean>(c.g); + GLboolean b = static_cast<GLboolean>(c.b); + GLboolean a = static_cast<GLboolean>(c.a); + error::Error error = DoColorMaskiOES(buf, r, g, b, a); + if (error != error::kNoError) { + return error; + } + return error::kNoError; +} + +error::Error GLES2DecoderPassthroughImpl::HandleIsEnablediOES( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::IsEnablediOES& c = + *static_cast<const volatile gles2::cmds::IsEnablediOES*>(cmd_data); + if (!features().oes_draw_buffers_indexed) { + return error::kUnknownCommand; + } + + GLenum target = static_cast<GLenum>(c.target); + GLuint index = static_cast<GLuint>(c.index); + typedef cmds::IsEnablediOES::Result Result; + Result* result = GetSharedMemoryAs<Result*>( + c.result_shm_id, c.result_shm_offset, sizeof(*result)); + if (!result) { + return error::kOutOfBounds; + } + error::Error error = DoIsEnablediOES(target, index, result); + if (error != error::kNoError) { + return error; + } + return error::kNoError; +} + } // namespace gles2 } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc index bd8874bf3b2..97996f33dd9 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc @@ -176,21 +176,6 @@ void GLES2DecoderTestBase::SpecializedSetup< } template <> -void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramiv, 0>( - bool valid) { - if (valid) { - // GetProgramiv calls ClearGLError then GetError to make sure - // it actually got a value so it can report correctly to the client. - EXPECT_CALL(*gl_, GetError()) - .WillOnce(Return(GL_NO_ERROR)) - .RetiresOnSaturation(); - EXPECT_CALL(*gl_, GetError()) - .WillOnce(Return(GL_NO_ERROR)) - .RetiresOnSaturation(); - } -} - -template <> void GLES2DecoderTestBase:: SpecializedSetup<cmds::GenTransformFeedbacksImmediate, 0>(bool valid) { if (valid) { diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h index 3d5553178ca..3bc69db4302 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h @@ -1233,57 +1233,4 @@ TEST_P(GLES2DecoderTest1, GetIntegervInvalidArgs1_1) { EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd)); EXPECT_EQ(0u, result->size); } - -TEST_P(GLES2DecoderTest1, GetProgramivValidArgs) { - SpecializedSetup<cmds::GetProgramiv, 0>(true); - typedef cmds::GetProgramiv::Result Result; - Result* result = static_cast<Result*>(shared_memory_address_); - result->size = 0; - cmds::GetProgramiv cmd; - cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_, - shared_memory_offset_); - EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); - EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DELETE_STATUS), - result->GetNumResults()); - EXPECT_EQ(GL_NO_ERROR, GetGLError()); -} - -TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs1_0) { - EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0); - SpecializedSetup<cmds::GetProgramiv, 0>(false); - cmds::GetProgramiv::Result* result = - static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_); - result->size = 0; - cmds::GetProgramiv cmd; - cmd.Init(client_program_id_, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, - shared_memory_id_, shared_memory_offset_); - EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); - EXPECT_EQ(0u, result->size); - EXPECT_EQ(GL_INVALID_ENUM, GetGLError()); -} - -TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_0) { - EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0); - SpecializedSetup<cmds::GetProgramiv, 0>(false); - cmds::GetProgramiv::Result* result = - static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_); - result->size = 0; - cmds::GetProgramiv cmd; - cmd.Init(client_program_id_, GL_DELETE_STATUS, kInvalidSharedMemoryId, 0); - EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd)); - EXPECT_EQ(0u, result->size); -} - -TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_1) { - EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0); - SpecializedSetup<cmds::GetProgramiv, 0>(false); - cmds::GetProgramiv::Result* result = - static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_); - result->size = 0; - cmds::GetProgramiv cmd; - cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_, - kInvalidSharedMemoryOffset); - EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd)); - EXPECT_EQ(0u, result->size); -} #endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_1_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc index 8288b6d3b18..75b71b0ed28 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc @@ -691,6 +691,20 @@ void GLES2DecoderTestBase::SpecializedSetup< } template <> +void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramiv, 0>(bool valid) { + if (valid) { + // GetProgramiv calls ClearGLError then GetError to make sure + // it actually got a value so it can report correctly to the client. + EXPECT_CALL(*gl_, GetError()) + .WillOnce(Return(GL_NO_ERROR)) + .RetiresOnSaturation(); + EXPECT_CALL(*gl_, GetError()) + .WillOnce(Return(GL_NO_ERROR)) + .RetiresOnSaturation(); + } +} + +template <> void GLES2DecoderTestBase::SpecializedSetup<cmds::RenderbufferStorage, 0>( bool valid) { DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_, @@ -851,12 +865,6 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3ivImmediate, 0>( } template <> -void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>( - bool /* valid */) { - SetupShaderForUniform(GL_FLOAT_VEC4); -} - -template <> void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>( bool /* valid */) { SetupShaderForUniform(GL_FLOAT_MAT2); diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h index 47fd77225a4..fa45593b8f5 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h @@ -12,6 +12,59 @@ #ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_ #define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_ +TEST_P(GLES2DecoderTest2, GetProgramivValidArgs) { + SpecializedSetup<cmds::GetProgramiv, 0>(true); + typedef cmds::GetProgramiv::Result Result; + Result* result = static_cast<Result*>(shared_memory_address_); + result->size = 0; + cmds::GetProgramiv cmd; + cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_, + shared_memory_offset_); + EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); + EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DELETE_STATUS), + result->GetNumResults()); + EXPECT_EQ(GL_NO_ERROR, GetGLError()); +} + +TEST_P(GLES2DecoderTest2, GetProgramivInvalidArgs1_0) { + EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0); + SpecializedSetup<cmds::GetProgramiv, 0>(false); + cmds::GetProgramiv::Result* result = + static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_); + result->size = 0; + cmds::GetProgramiv cmd; + cmd.Init(client_program_id_, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, + shared_memory_id_, shared_memory_offset_); + EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); + EXPECT_EQ(0u, result->size); + EXPECT_EQ(GL_INVALID_ENUM, GetGLError()); +} + +TEST_P(GLES2DecoderTest2, GetProgramivInvalidArgs2_0) { + EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0); + SpecializedSetup<cmds::GetProgramiv, 0>(false); + cmds::GetProgramiv::Result* result = + static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_); + result->size = 0; + cmds::GetProgramiv cmd; + cmd.Init(client_program_id_, GL_DELETE_STATUS, kInvalidSharedMemoryId, 0); + EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd)); + EXPECT_EQ(0u, result->size); +} + +TEST_P(GLES2DecoderTest2, GetProgramivInvalidArgs2_1) { + EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0); + SpecializedSetup<cmds::GetProgramiv, 0>(false); + cmds::GetProgramiv::Result* result = + static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_); + result->size = 0; + cmds::GetProgramiv cmd; + cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_, + kInvalidSharedMemoryOffset); + EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd)); + EXPECT_EQ(0u, result->size); +} + TEST_P(GLES2DecoderTest2, GetProgramInfoLogValidArgs) { const char* kInfo = "hello"; const uint32_t kBucketId = 123; @@ -1298,13 +1351,4 @@ TEST_P(GLES2DecoderTest2, Uniform3ivImmediateValidArgs) { EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp))); EXPECT_EQ(GL_NO_ERROR, GetGLError()); } - -TEST_P(GLES2DecoderTest2, Uniform4fValidArgs) { - EXPECT_CALL(*gl_, Uniform4fv(1, 1, _)); - SpecializedSetup<cmds::Uniform4f, 0>(true); - cmds::Uniform4f cmd; - cmd.Init(1, 2, 3, 4, 5); - EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); - EXPECT_EQ(GL_NO_ERROR, GetGLError()); -} #endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc index 580131f038c..b37cb1943d8 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc @@ -51,6 +51,12 @@ INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderTest3, ::testing::Bool()); INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest3, ::testing::Bool()); template <> +void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>( + bool /* valid */) { + SetupShaderForUniform(GL_FLOAT_VEC4); +} + +template <> void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4fvImmediate, 0>( bool /* valid */) { SetupShaderForUniform(GL_FLOAT_VEC4); diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h index 00161c02032..10ec529b465 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h @@ -12,6 +12,15 @@ #ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_ #define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_ +TEST_P(GLES2DecoderTest3, Uniform4fValidArgs) { + EXPECT_CALL(*gl_, Uniform4fv(1, 1, _)); + SpecializedSetup<cmds::Uniform4f, 0>(true); + cmds::Uniform4f cmd; + cmd.Init(1, 2, 3, 4, 5); + EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); + EXPECT_EQ(GL_NO_ERROR, GetGLError()); +} + TEST_P(GLES2DecoderTest3, Uniform4fvImmediateValidArgs) { cmds::Uniform4fvImmediate& cmd = *GetImmediateAs<cmds::Uniform4fvImmediate>(); SpecializedSetup<cmds::Uniform4fvImmediate, 0>(true); diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h index 5284cc802b9..38d67d62bb3 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h @@ -437,6 +437,13 @@ static const GLenum valid_g_l_state_table_es3[] = { GL_UNPACK_SKIP_IMAGES, GL_UNPACK_SKIP_PIXELS, GL_UNPACK_SKIP_ROWS, + GL_BLEND_EQUATION_RGB, + GL_BLEND_EQUATION_ALPHA, + GL_BLEND_SRC_RGB, + GL_BLEND_SRC_ALPHA, + GL_BLEND_DST_RGB, + GL_BLEND_DST_ALPHA, + GL_COLOR_WRITEMASK, }; bool Validators::GetMaxIndexTypeValidator::IsValid(const GLenum value) const { @@ -512,6 +519,13 @@ static const GLenum valid_indexed_g_l_state_table[] = { GL_UNIFORM_BUFFER_BINDING, GL_UNIFORM_BUFFER_SIZE, GL_UNIFORM_BUFFER_START, + GL_BLEND_EQUATION_RGB, + GL_BLEND_EQUATION_ALPHA, + GL_BLEND_SRC_RGB, + GL_BLEND_SRC_ALPHA, + GL_BLEND_DST_RGB, + GL_BLEND_DST_ALPHA, + GL_COLOR_WRITEMASK, }; bool Validators::InternalFormatParameterValidator::IsValid( @@ -773,6 +787,7 @@ bool Validators::ShaderTypeValidator::IsValid(const GLenum value) const { bool Validators::SharedImageAccessModeValidator::IsValid( const GLenum value) const { switch (value) { + case GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM: case GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM: case GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM: return true; diff --git a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc index fa686c24432..97b6d43e986 100644 --- a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc +++ b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc @@ -46,9 +46,11 @@ GpuCommandBufferMemoryTracker::GpuCommandBufferMemoryTracker( : command_buffer_id_(command_buffer_id), client_tracing_id_(client_tracing_id), context_type_(context_type), - memory_pressure_listener_(base::BindRepeating( - &GpuCommandBufferMemoryTracker::LogMemoryStatsPressure, - base::Unretained(this))), + memory_pressure_listener_( + FROM_HERE, + base::BindRepeating( + &GpuCommandBufferMemoryTracker::LogMemoryStatsPressure, + base::Unretained(this))), observer_(observer) { // Set up |memory_stats_timer_| to call LogMemoryPeriodic periodically // via the provided |task_runner|. diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc index c743800f0a5..c1ebdde777a 100644 --- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc +++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc @@ -45,22 +45,38 @@ bool IsSurfaceControl(TextureOwner::Mode mode) { } } // namespace +// This class is safe to be created/destroyed on different threads. This is made +// sure by destruction happening on correct thread. This class is not thread +// safe to be used concurrently on multiple thraeads. class ImageReaderGLOwner::ScopedHardwareBufferImpl : public base::android::ScopedHardwareBufferFenceSync { public: - ScopedHardwareBufferImpl(scoped_refptr<ImageReaderGLOwner> texture_owner, + ScopedHardwareBufferImpl(base::WeakPtr<ImageReaderGLOwner> texture_owner, AImage* image, base::android::ScopedHardwareBufferHandle handle, base::ScopedFD fence_fd) : base::android::ScopedHardwareBufferFenceSync(std::move(handle), - std::move(fence_fd)), + std::move(fence_fd), + base::ScopedFD(), + true /* is_video */), texture_owner_(std::move(texture_owner)), - image_(image) { + image_(image), + task_runner_(base::ThreadTaskRunnerHandle::Get()) { DCHECK(image_); texture_owner_->RegisterRefOnImage(image_); } + ~ScopedHardwareBufferImpl() override { - texture_owner_->ReleaseRefOnImage(image_, std::move(read_fence_)); + if (task_runner_->RunsTasksInCurrentSequence()) { + if (texture_owner_) { + texture_owner_->ReleaseRefOnImage(image_, std::move(read_fence_)); + } + } else { + task_runner_->PostTask( + FROM_HERE, + base::BindOnce(&gpu::ImageReaderGLOwner::ReleaseRefOnImage, + texture_owner_, image_, std::move(read_fence_))); + } } void SetReadFence(base::ScopedFD fence_fd, bool has_context) final { @@ -72,8 +88,9 @@ class ImageReaderGLOwner::ScopedHardwareBufferImpl private: base::ScopedFD read_fence_; - scoped_refptr<ImageReaderGLOwner> texture_owner_; + base::WeakPtr<ImageReaderGLOwner> texture_owner_; AImage* image_; + scoped_refptr<base::SingleThreadTaskRunner> task_runner_; }; ImageReaderGLOwner::ImageReaderGLOwner( @@ -305,7 +322,7 @@ ImageReaderGLOwner::GetAHardwareBuffer() { return nullptr; return std::make_unique<ScopedHardwareBufferImpl>( - this, current_image_ref_->image(), + weak_factory_.GetWeakPtr(), current_image_ref_->image(), base::android::ScopedHardwareBufferHandle::Create(buffer), current_image_ref_->GetReadyFence()); } @@ -367,96 +384,6 @@ void ImageReaderGLOwner::ReleaseRefOnImage(AImage* image, image_refs_.erase(it); } -void ImageReaderGLOwner::GetTransformMatrix(float mtx[]) { - DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); - - // Assign a Y inverted Identity matrix. Both MCVD and AVDA path performs a Y - // inversion of this matrix later. Hence if we assign a Y inverted matrix - // here, it simply becomes an identity matrix later and will have no effect - // on the image data. - static constexpr float kYInvertedIdentity[16]{1, 0, 0, 0, 0, -1, 0, 0, - 0, 0, 1, 0, 0, 1, 0, 1}; - memcpy(mtx, kYInvertedIdentity, sizeof(kYInvertedIdentity)); - - - // Get the crop rectangle associated with this image. The crop rectangle - // specifies the region of valid pixels in the image. - gfx::Rect crop_rect = GetCropRect(); - if (crop_rect.IsEmpty()) - return; - - // Get the AHardwareBuffer to query its dimensions. - AHardwareBuffer* buffer = nullptr; - loader_.AImage_getHardwareBuffer(current_image_ref_->image(), &buffer); - if (!buffer) { - DLOG(ERROR) << "Unable to get an AHardwareBuffer from the image"; - return; - } - - // Get the buffer descriptor. Note that for querying the buffer descriptor, we - // do not need to wait on the AHB to be ready. - AHardwareBuffer_Desc desc; - base::AndroidHardwareBufferCompat::GetInstance().Describe(buffer, &desc); - - // Note: Below calculation of shrink_amount and the transform matrix params - // tx,ty,sx,sy is copied from the android - // SurfaceTexture::computeCurrentTransformMatrix() - - // https://android.googlesource.com/platform/frameworks/native/+/5c1139f/libs/gui/SurfaceTexture.cpp#516. - // We are assuming here that bilinear filtering is always enabled for - // sampling the texture. - float shrink_amount = 0.0f; - float tx = 0.0f, ty = 0.0f, sx = 1.0f, sy = 1.0f; - - // In order to prevent bilinear sampling beyond the edge of the - // crop rectangle we may need to shrink it by 2 texels in each - // dimension. Normally this would just need to take 1/2 a texel - // off each end, but because the chroma channels of YUV420 images - // are subsampled we may need to shrink the crop region by a whole - // texel on each side. - switch (desc.format) { - case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM: - case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM: - case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM: - case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM: - // We know there's no subsampling of any channels, so we - // only need to shrink by a half a pixel. - shrink_amount = 0.5; - break; - default: - // If we don't recognize the format, we must assume the - // worst case (that we care about), which is YUV420. - shrink_amount = 1.0; - } - - int32_t crop_rect_width = crop_rect.width(); - int32_t crop_rect_height = crop_rect.height(); - int32_t crop_rect_left = crop_rect.x(); - int32_t crop_rect_bottom = crop_rect.y() + crop_rect_height; - int32_t buffer_width = desc.width; - int32_t buffer_height = desc.height; - DCHECK_GT(buffer_width, 0); - DCHECK_GT(buffer_height, 0); - - // Only shrink the dimensions that are not the size of the buffer. - if (crop_rect_width < buffer_width) { - tx = (float(crop_rect_left) + shrink_amount) / buffer_width; - sx = (float(crop_rect_width) - (2.0f * shrink_amount)) / buffer_width; - } - - if (crop_rect_height < buffer_height) { - ty = (float(buffer_height - crop_rect_bottom) + shrink_amount) / - buffer_height; - sy = (float(crop_rect_height) - (2.0f * shrink_amount)) / buffer_height; - } - - // Update the transform matrix with above parameters by also taking into - // account Y inversion/ vertical flip. - mtx[0] = sx; - mtx[5] = 0 - sy; - mtx[12] = tx; - mtx[13] = 1 - ty; -} - void ImageReaderGLOwner::ReleaseBackBuffers() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); // ReleaseBackBuffers() call is not required with image reader. @@ -482,7 +409,7 @@ void ImageReaderGLOwner::OnFrameAvailable(void* context, AImageReader* reader) { image_reader_ptr->frame_available_cb_.Run(); } -void ImageReaderGLOwner::GetCodedSizeAndVisibleRect( +bool ImageReaderGLOwner::GetCodedSizeAndVisibleRect( gfx::Size rotated_visible_size, gfx::Size* coded_size, gfx::Rect* visible_rect) { @@ -499,7 +426,7 @@ void ImageReaderGLOwner::GetCodedSizeAndVisibleRect( if (!buffer) { *coded_size = gfx::Size(); *visible_rect = gfx::Rect(); - return; + return false; } // Get the buffer descriptor. Note that for querying the buffer descriptor, we // do not need to wait on the AHB to be ready. @@ -508,6 +435,8 @@ void ImageReaderGLOwner::GetCodedSizeAndVisibleRect( *visible_rect = GetCropRect(); *coded_size = gfx::Size(desc.width, desc.height); + + return true; } ImageReaderGLOwner::ImageRef::ImageRef() = default; diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.h b/chromium/gpu/command_buffer/service/image_reader_gl_owner.h index 0d9f93f0475..b6c2d2c0d3b 100644 --- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.h +++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.h @@ -9,6 +9,7 @@ #include "base/android/android_image_reader_compat.h" #include "base/containers/flat_map.h" +#include "base/memory/weak_ptr.h" #include "gpu/command_buffer/service/texture_owner.h" #include "gpu/gpu_gles2_export.h" #include "ui/gl/gl_fence_egl.h" @@ -37,12 +38,10 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner { gl::ScopedJavaSurface CreateJavaSurface() const override; void UpdateTexImage() override; void EnsureTexImageBound() override; - void GetTransformMatrix(float mtx[16]) override; void ReleaseBackBuffers() override; std::unique_ptr<base::android::ScopedHardwareBufferFenceSync> GetAHardwareBuffer() override; - gfx::Rect GetCropRect() override; - void GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size, + bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size, gfx::Size* coded_size, gfx::Rect* visible_rect) override; @@ -89,6 +88,8 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner { void RegisterRefOnImage(AImage* image); void ReleaseRefOnImage(AImage* image, base::ScopedFD fence_fd); + gfx::Rect GetCropRect(); + static void OnFrameAvailable(void* context, AImageReader* reader); // AImageReader instance @@ -132,6 +133,8 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner { THREAD_CHECKER(thread_checker_); + base::WeakPtrFactory<ImageReaderGLOwner> weak_factory_{this}; + DISALLOW_COPY_AND_ASSIGN(ImageReaderGLOwner); }; diff --git a/chromium/gpu/command_buffer/service/memory_tracking.h b/chromium/gpu/command_buffer/service/memory_tracking.h index ea211deddf6..d55a130e2ec 100644 --- a/chromium/gpu/command_buffer/service/memory_tracking.h +++ b/chromium/gpu/command_buffer/service/memory_tracking.h @@ -9,7 +9,7 @@ #include <stdint.h> #include <string> -#include "base/logging.h" +#include "base/check.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/trace_event/trace_event.h" diff --git a/chromium/gpu/command_buffer/service/mock_texture_owner.h b/chromium/gpu/command_buffer/service/mock_texture_owner.h index e98cdc4a729..2cf23affb93 100644 --- a/chromium/gpu/command_buffer/service/mock_texture_owner.h +++ b/chromium/gpu/command_buffer/service/mock_texture_owner.h @@ -33,12 +33,11 @@ class MockTextureOwner : public TextureOwner { MOCK_CONST_METHOD0(CreateJavaSurface, gl::ScopedJavaSurface()); MOCK_METHOD0(UpdateTexImage, void()); MOCK_METHOD0(EnsureTexImageBound, void()); - MOCK_METHOD1(GetTransformMatrix, void(float mtx[16])); MOCK_METHOD0(ReleaseBackBuffers, void()); MOCK_METHOD1(OnTextureDestroyed, void(gpu::gles2::AbstractTexture*)); MOCK_METHOD1(SetFrameAvailableCallback, void(const base::RepeatingClosure&)); MOCK_METHOD3(GetCodedSizeAndVisibleRect, - void(gfx::Size rotated_visible_size, + bool(gfx::Size rotated_visible_size, gfx::Size* coded_size, gfx::Rect* visible_rect)); @@ -48,15 +47,9 @@ class MockTextureOwner : public TextureOwner { return nullptr; } - gfx::Rect GetCropRect() override { - ++get_crop_rect_count; - return gfx::Rect(); - } - gl::GLContext* fake_context; gl::GLSurface* fake_surface; int get_a_hardware_buffer_count = 0; - int get_crop_rect_count = 0; bool expect_update_tex_image; protected: diff --git a/chromium/gpu/command_buffer/service/mocks.h b/chromium/gpu/command_buffer/service/mocks.h index d01e9b6c4bd..ac9a5efb25c 100644 --- a/chromium/gpu/command_buffer/service/mocks.h +++ b/chromium/gpu/command_buffer/service/mocks.h @@ -16,7 +16,6 @@ #include <string> #include <vector> -#include "base/logging.h" #include "gpu/command_buffer/common/cmd_buffer_common.h" #include "gpu/command_buffer/service/async_api_interface.h" #include "gpu/command_buffer/service/memory_tracking.h" diff --git a/chromium/gpu/command_buffer/service/program_manager.h b/chromium/gpu/command_buffer/service/program_manager.h index 800de937f5d..f99a099b0c8 100644 --- a/chromium/gpu/command_buffer/service/program_manager.h +++ b/chromium/gpu/command_buffer/service/program_manager.h @@ -13,7 +13,7 @@ #include <string> #include <vector> -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "gpu/command_buffer/service/common_decoder.h" diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc index 12302131817..aec7821dc48 100644 --- a/chromium/gpu/command_buffer/service/raster_decoder.cc +++ b/chromium/gpu/command_buffer/service/raster_decoder.cc @@ -208,7 +208,8 @@ bool AllowedBetweenBeginEndRaster(CommandId command) { // avoid it as much as possible. class RasterDecoderImpl final : public RasterDecoder, public gles2::ErrorStateClient, - public ServiceFontManager::Client { + public ServiceFontManager::Client, + public SharedContextState::ContextLostObserver { public: RasterDecoderImpl(DecoderClient* client, CommandBufferServiceBase* command_buffer_service, @@ -365,6 +366,9 @@ class RasterDecoderImpl final : public RasterDecoder, scoped_refptr<Buffer> GetShmBuffer(uint32_t shm_id) override; void ReportProgress() override; + // SharedContextState::ContextLostObserver implementation. + void OnContextLost() override; + private: gles2::ContextState* state() const { if (use_passthrough_) { @@ -401,7 +405,7 @@ class RasterDecoderImpl final : public RasterDecoder, if (!flush_workaround_disabled_for_test_) { TRACE_EVENT0("gpu", "RasterDecoderImpl::FlushToWorkAroundMacCrashes"); if (gr_context()) - gr_context()->flush(); + gr_context()->flushAndSubmit(); api()->glFlushFn(); // Flushes can be expensive, yield to allow interruption after each flush. @@ -583,8 +587,6 @@ class RasterDecoderImpl final : public RasterDecoder, bool use_passthrough_ = false; bool use_ddl_ = false; - bool reset_by_robustness_extension_ = false; - // The current decoder error communicates the decoder error through command // processing functions that do not return the error value. Should be set // only if not returning an error. @@ -756,9 +758,12 @@ RasterDecoderImpl::RasterDecoderImpl( font_manager_(base::MakeRefCounted<ServiceFontManager>(this)), is_privileged_(is_privileged) { DCHECK(shared_context_state_); + shared_context_state_->AddContextLostObserver(this); } -RasterDecoderImpl::~RasterDecoderImpl() = default; +RasterDecoderImpl::~RasterDecoderImpl() { + shared_context_state_->RemoveContextLostObserver(this); +} base::WeakPtr<DecoderContext> RasterDecoderImpl::AsWeakPtr() { return weak_ptr_factory_.GetWeakPtr(); @@ -854,16 +859,12 @@ void RasterDecoderImpl::Destroy(bool have_context) { DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty()); end_semaphores_.clear(); sk_surface_ = nullptr; - if (shared_image_) { - scoped_shared_image_write_.reset(); - shared_image_.reset(); - } else { - sk_surface_for_testing_.reset(); - } - } - if (gr_context()) { - gr_context()->flush(); } + if (gr_context()) + gr_context()->flushAndSubmit(); + scoped_shared_image_write_.reset(); + shared_image_.reset(); + sk_surface_for_testing_.reset(); } copy_tex_image_blit_.reset(); @@ -891,18 +892,11 @@ bool RasterDecoderImpl::MakeCurrent() { if (shared_context_state_->context_lost() || !shared_context_state_->MakeCurrent(nullptr)) { LOG(ERROR) << " RasterDecoderImpl: Context lost during MakeCurrent."; - MarkContextLost(error::kMakeCurrentFailed); return false; } DCHECK_EQ(api(), gl::g_current_gl_context); - if (CheckResetStatus()) { - LOG(ERROR) - << " RasterDecoderImpl: Context reset detected after MakeCurrent."; - return false; - } - // Rebind textures if the service ids may have changed. RestoreAllExternalTextureBindingsIfNeeded(); @@ -948,6 +942,10 @@ Capabilities RasterDecoderImpl::GetCapabilities() { #else NOTREACHED(); #endif + } else if (shared_context_state_->GrContextIsDawn()) { + // TODO(crbug.com/1090476): Query Dawn for this value once an API exists for + // capabilities. + caps.max_texture_size = 8192; } else { NOTIMPLEMENTED(); } @@ -1113,55 +1111,27 @@ void RasterDecoderImpl::SetLevelInfo(uint32_t client_id, } bool RasterDecoderImpl::WasContextLost() const { - return context_lost_; + return shared_context_state_->context_lost(); } bool RasterDecoderImpl::WasContextLostByRobustnessExtension() const { - return WasContextLost() && reset_by_robustness_extension_; + return shared_context_state_->device_needs_reset(); } void RasterDecoderImpl::MarkContextLost(error::ContextLostReason reason) { - // Only lose the context once. - if (WasContextLost()) - return; + shared_context_state_->MarkContextLost(reason); +} - // Don't make GL calls in here, the context might not be current. - context_lost_ = true; - command_buffer_service()->SetContextLostReason(reason); +void RasterDecoderImpl::OnContextLost() { + DCHECK(shared_context_state_->context_lost()); + command_buffer_service()->SetContextLostReason( + *shared_context_state_->context_lost_reason()); current_decoder_error_ = error::kLostContext; } bool RasterDecoderImpl::CheckResetStatus() { DCHECK(!WasContextLost()); - DCHECK(shared_context_state_->context()->IsCurrent(nullptr)); - - // If the reason for the call was a GL error, we can try to determine the - // reset status more accurately. - GLenum driver_status = - shared_context_state_->context()->CheckStickyGraphicsResetStatus(); - if (driver_status == GL_NO_ERROR) - return false; - - LOG(ERROR) << "RasterDecoder context lost via ARB/EXT_robustness. Reset " - "status = " - << gles2::GLES2Util::GetStringEnum(driver_status); - - switch (driver_status) { - case GL_GUILTY_CONTEXT_RESET_ARB: - MarkContextLost(error::kGuilty); - break; - case GL_INNOCENT_CONTEXT_RESET_ARB: - MarkContextLost(error::kInnocent); - break; - case GL_UNKNOWN_CONTEXT_RESET_ARB: - MarkContextLost(error::kUnknown); - break; - default: - NOTREACHED(); - return false; - } - reset_by_robustness_extension_ = true; - return true; + return shared_context_state_->CheckResetStatus(/*needs_gl=*/false); } gles2::Logger* RasterDecoderImpl::GetLogger() { @@ -1500,14 +1470,13 @@ void RasterDecoderImpl::DisableFlushWorkaroundForTest() { void RasterDecoderImpl::OnContextLostError() { if (!WasContextLost()) { // Need to lose current context before broadcasting! - CheckResetStatus(); - reset_by_robustness_extension_ = true; + shared_context_state_->CheckResetStatus(/*needs_gl=*/false); } } void RasterDecoderImpl::OnOutOfMemoryError() { if (lose_context_when_out_of_memory_ && !WasContextLost()) { - if (!CheckResetStatus()) { + if (!shared_context_state_->CheckResetStatus(/*needs_gl=*/false)) { MarkContextLost(error::kOutOfMemory); } } @@ -2071,17 +2040,14 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL( if (gles2::GLStreamTextureImage* image = source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES, source_level)) { - GLfloat transform_matrix[16]; - image->GetTextureMatrix(transform_matrix); - - copy_texture_chromium_->DoCopySubTextureWithTransform( + copy_texture_chromium_->DoCopySubTexture( this, source_target, source_texture->service_id(), source_level, source_internal_format, dest_target, dest_texture->service_id(), dest_level, dest_internal_format, xoffset, yoffset, x, y, width, height, dest_size.width(), dest_size.height(), source_size.width(), source_size.height(), unpack_flip_y, unpack_premultiply_alpha, - false /* unpack_unmultiply_alpha */, false /* dither */, - transform_matrix, copy_tex_image_blit_.get()); + /*unpack_unmultiply_alpha=*/false, /*dither=*/false, + gles2::CopyTextureMethod::DIRECT_DRAW, copy_tex_image_blit_.get()); dest_texture->SetLevelClearedRect(dest_target, dest_level, new_cleared_rect); return; @@ -2255,8 +2221,13 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALSkia( }; gpu::AddVulkanCleanupTaskForSkiaFlush( shared_context_state_->vk_context_provider(), &flush_info); - dest_scoped_access->surface()->flush( + auto result = dest_scoped_access->surface()->flush( SkSurface::BackendSurfaceAccess::kNoAccess, flush_info); + // If the |end_semaphores| is empty, we can deferred the queue submission. + if (!end_semaphores.empty()) { + DCHECK_EQ(result, GrSemaphoresSubmitted::kYes); + gr_context()->submit(); + } if (!dest_shared_image->IsCleared()) { dest_shared_image->SetClearedRect(new_cleared_rect); @@ -2297,6 +2268,15 @@ void RasterDecoderImpl::DoWritePixelsINTERNAL(GLint x_offset, return; } + if (SkColorTypeBytesPerPixel(viz::ResourceFormatToClosestSkColorType( + true, dest_shared_image->format())) != + SkColorTypeBytesPerPixel(static_cast<SkColorType>(src_sk_color_type))) { + LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels", + "Bytes per pixel for src SkColorType and dst " + "SkColorType must be the same."); + return; + } + // If present, the color space is serialized into shared memory before the // pixel data. sk_sp<SkColorSpace> color_space; @@ -2375,8 +2355,12 @@ void RasterDecoderImpl::DoWritePixelsINTERNAL(GLint x_offset, }; gpu::AddVulkanCleanupTaskForSkiaFlush( shared_context_state_->vk_context_provider(), &flush_info); - dest_scoped_access->surface()->flush( + auto result = dest_scoped_access->surface()->flush( SkSurface::BackendSurfaceAccess::kNoAccess, flush_info); + if (!end_semaphores.empty()) { + DCHECK_EQ(result, GrSemaphoresSubmitted::kYes); + gr_context()->submit(); + } if (!dest_shared_image->IsCleared()) { dest_shared_image->SetClearedRect( @@ -2565,8 +2549,12 @@ void RasterDecoderImpl::DoConvertYUVMailboxesToRGBINTERNAL( }; gpu::AddVulkanCleanupTaskForSkiaFlush( shared_context_state_->vk_context_provider(), &flush_info); - dest_scoped_access->surface()->flush( + auto result = dest_scoped_access->surface()->flush( SkSurface::BackendSurfaceAccess::kNoAccess, flush_info); + if (!end_semaphores.empty()) { + DCHECK_EQ(result, GrSemaphoresSubmitted::kYes); + gr_context()->submit(); + } if (!images[YUVConversionMailboxIndex::kDestIndex]->IsCleared() && drew_image) { @@ -2899,13 +2887,15 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() { .fNumSemaphores = end_semaphores_.size(), .fSignalSemaphores = end_semaphores_.data(), }; - AddVulkanCleanupTaskForSkiaFlush( - shared_context_state_->vk_context_provider(), &flush_info); auto result = sk_surface_->flush(SkSurface::BackendSurfaceAccess::kPresent, flush_info); - DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty()); - end_semaphores_.clear(); - + // If |end_semaphores_| is not empty, we will submit work to the queue. + // Otherwise the queue submission can be deferred.. + if (!end_semaphores_.empty()) { + DCHECK(result == GrSemaphoresSubmitted::kYes); + gr_context()->submit(); + end_semaphores_.clear(); + } // The DDL pins memory for the recorded ops so it must be kept alive until // its flushed. ddl_.reset(); @@ -2913,13 +2903,10 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() { shared_context_state_->UpdateSkiaOwnedMemorySize(); sk_surface_ = nullptr; - if (!shared_image_) { - // Test only path for SetUpForRasterCHROMIUMForTest. - sk_surface_for_testing_.reset(); - } else { - scoped_shared_image_write_.reset(); - shared_image_.reset(); - } + scoped_shared_image_write_.reset(); + shared_image_.reset(); + // Test only path for SetUpForRasterCHROMIUMForTest. + sk_surface_for_testing_.reset(); // Unlock all font handles. This needs to be deferred until // SkSurface::flush since that flushes batched Gr operations diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc index 9375b1e17ae..ff476d4f3cf 100644 --- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc +++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc @@ -187,7 +187,7 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) { new gl::GLShareGroup(), surface_, context_, feature_info()->workarounds().use_virtualized_gl_contexts, base::DoNothing(), GpuPreferences().gr_context_type); - + shared_context_state_->disable_check_reset_status_throttling_for_test_ = true; shared_context_state_->InitializeGL(GpuPreferences(), feature_info_); command_buffer_service_.reset(new FakeCommandBufferServiceBase()); @@ -213,10 +213,14 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) { gpu::ContextResult::kSuccess); EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true)); + EXPECT_CALL(*gl_, GetError()) + .WillOnce(Return(GL_NO_ERROR)) + .RetiresOnSaturation(); if (context_->HasRobustness()) { EXPECT_CALL(*gl_, GetGraphicsResetStatusARB()) .WillOnce(Return(GL_NO_ERROR)); } + decoder_->MakeCurrent(); decoder_->BeginDecoding(); diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc index 2f778122fa8..d82e5e8b852 100644 --- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc +++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc @@ -34,13 +34,13 @@ class RasterDecoderOOMTest : public RasterDecoderManualInitTest { if (context_->HasRobustness()) { EXPECT_CALL(*gl_, GetGraphicsResetStatusARB()) .WillOnce(Return(reset_status)); + EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_CONTEXT_LOST_KHR)); + } else { + EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_NO_ERROR)); } - // glGetError merges driver error state with decoder error state. Return - // GL_NO_ERROR from mock driver and GL_OUT_OF_MEMORY from decoder. - EXPECT_CALL(*gl_, GetError()) - .WillOnce(Return(GL_NO_ERROR)) - .RetiresOnSaturation(); + // RasterDecoder::HandleGetError merges driver error state with decoder + // error state. Return GL_OUT_OF_MEMORY from decoder. GetDecoder()->SetOOMErrorForTest(); cmds::GetError cmd; @@ -112,9 +112,9 @@ class RasterDecoderLostContextTest : public RasterDecoderManualInitTest { void DoGetErrorWithContextLost(GLenum reset_status) { DCHECK(context_->HasExtension("GL_KHR_robustness")); - EXPECT_CALL(*gl_, GetError()) - .WillOnce(Return(GL_CONTEXT_LOST_KHR)) - .RetiresOnSaturation(); + // Once context loss has occurred, driver will always return + // GL_CONTEXT_LOST_KHR. + EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_CONTEXT_LOST_KHR)); EXPECT_CALL(*gl_, GetGraphicsResetStatusARB()) .WillOnce(Return(reset_status)); cmds::GetError cmd; @@ -147,6 +147,20 @@ TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrent) { ClearCurrentDecoderError(); } +TEST_P(RasterDecoderLostContextTest, LostFromDriverOOM) { + Init(/*has_robustness=*/false); + EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true)); + EXPECT_CALL(*gl_, GetError()).WillOnce(Return(GL_OUT_OF_MEMORY)); + EXPECT_FALSE(decoder_->WasContextLost()); + decoder_->MakeCurrent(); + EXPECT_TRUE(decoder_->WasContextLost()); + EXPECT_EQ(error::kOutOfMemory, GetContextLostReason()); + + // We didn't process commands, so we need to clear the decoder error, + // so that we can shut down cleanly. + ClearCurrentDecoderError(); +} + TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrentWithRobustness) { Init(/*has_robustness=*/true); // with robustness // If we can't make the context current, we cannot query the robustness @@ -215,6 +229,7 @@ TEST_P(RasterDecoderLostContextTest, LostFromResetAfterMakeCurrent) { Init(/*has_robustness=*/true); InSequence seq; EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true)); + EXPECT_CALL(*gl_, GetError()).WillOnce(Return(GL_CONTEXT_LOST_KHR)); EXPECT_CALL(*gl_, GetGraphicsResetStatusARB()) .WillOnce(Return(GL_GUILTY_CONTEXT_RESET_KHR)); decoder_->MakeCurrent(); diff --git a/chromium/gpu/command_buffer/service/sampler_manager.h b/chromium/gpu/command_buffer/service/sampler_manager.h index 2b46c8dd099..9828eb0354e 100644 --- a/chromium/gpu/command_buffer/service/sampler_manager.h +++ b/chromium/gpu/command_buffer/service/sampler_manager.h @@ -8,7 +8,6 @@ #include <unordered_map> #include <vector> -#include "base/logging.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "gpu/command_buffer/service/feature_info.h" diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc index 95bc584365f..b9d88280b52 100644 --- a/chromium/gpu/command_buffer/service/scheduler.cc +++ b/chromium/gpu/command_buffer/service/scheduler.cc @@ -8,6 +8,7 @@ #include "base/bind.h" #include "base/callback.h" +#include "base/logging.h" #include "base/metrics/histogram_macros.h" #include "base/single_thread_task_runner.h" #include "base/stl_util.h" diff --git a/chromium/gpu/command_buffer/service/service_font_manager.cc b/chromium/gpu/command_buffer/service/service_font_manager.cc index 50a1b4a74e2..dc41e910e7f 100644 --- a/chromium/gpu/command_buffer/service/service_font_manager.cc +++ b/chromium/gpu/command_buffer/service/service_font_manager.cc @@ -7,6 +7,7 @@ #include <inttypes.h> #include "base/debug/dump_without_crashing.h" +#include "base/logging.h" #include "base/metrics/histogram_macros.h" #include "base/rand_util.h" #include "base/strings/stringprintf.h" diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc index 4432b9222dc..bd7c709e241 100644 --- a/chromium/gpu/command_buffer/service/service_utils.cc +++ b/chromium/gpu/command_buffer/service/service_utils.cc @@ -165,8 +165,7 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) { gpu_preferences.enable_dawn_backend_validation = command_line->HasSwitch(switches::kEnableDawnBackendValidation); gpu_preferences.gr_context_type = ParseGrContextType(); - gpu_preferences.use_vulkan = ParseVulkanImplementationName( - command_line, gpu_preferences.gr_context_type); + gpu_preferences.use_vulkan = ParseVulkanImplementationName(command_line); gpu_preferences.disable_vulkan_surface = command_line->HasSwitch(switches::kDisableVulkanSurface); @@ -192,8 +191,7 @@ GrContextType ParseGrContextType() { } VulkanImplementationName ParseVulkanImplementationName( - const base::CommandLine* command_line, - GrContextType gr_context_type) { + const base::CommandLine* command_line) { if (command_line->HasSwitch(switches::kUseVulkan)) { auto value = command_line->GetSwitchValueASCII(switches::kUseVulkan); if (value.empty() || value == switches::kVulkanImplementationNameNative) { @@ -202,11 +200,17 @@ VulkanImplementationName ParseVulkanImplementationName( return VulkanImplementationName::kSwiftshader; } } - // If the vulkan implementation is not set from --use-vulkan, the native - // vulkan implementation will be used by default. - return gr_context_type == GrContextType::kVulkan - ? VulkanImplementationName::kNative - : VulkanImplementationName::kNone; + + // GrContext is not going to use Vulkan. + if (!base::FeatureList::IsEnabled(features::kVulkan)) + return VulkanImplementationName::kNone; + + // If the vulkan feature is enabled from command line, we will force to use + // vulkan even if it is blacklisted. + return base::FeatureList::GetInstance()->IsFeatureOverriddenFromCommandLine( + features::kVulkan.name, base::FeatureList::OVERRIDE_ENABLE_FEATURE) + ? VulkanImplementationName::kForcedNative + : VulkanImplementationName::kNative; } } // namespace gles2 diff --git a/chromium/gpu/command_buffer/service/service_utils.h b/chromium/gpu/command_buffer/service/service_utils.h index 76a802e433c..e10ff73dcfb 100644 --- a/chromium/gpu/command_buffer/service/service_utils.h +++ b/chromium/gpu/command_buffer/service/service_utils.h @@ -40,10 +40,10 @@ ParseGpuPreferences(const base::CommandLine* command_line); GPU_GLES2_EXPORT GrContextType ParseGrContextType(); // Parse the value of --use-vulkan from the command line. If unspecified and -// a Vulkan GrContext is going to be used, default to the native implementation. +// features::kVulkan is enabled (GrContext is going to use vulkan), default to +// the native implementation. GPU_GLES2_EXPORT VulkanImplementationName -ParseVulkanImplementationName(const base::CommandLine* command_line, - GrContextType gr_context_type); +ParseVulkanImplementationName(const base::CommandLine* command_line); } // namespace gles2 } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/shader_manager.h b/chromium/gpu/command_buffer/service/shader_manager.h index deb4a491486..873f92ddcdf 100644 --- a/chromium/gpu/command_buffer/service/shader_manager.h +++ b/chromium/gpu/command_buffer/service/shader_manager.h @@ -8,7 +8,7 @@ #include <string> #include <unordered_map> -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "gpu/command_buffer/service/gl_utils.h" diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc index 765ad3a0d62..900a182c781 100644 --- a/chromium/gpu/command_buffer/service/shared_context_state.cc +++ b/chromium/gpu/command_buffer/service/shared_context_state.cc @@ -69,7 +69,7 @@ size_t MaxNumSkSurface() { namespace gpu { void SharedContextState::compileError(const char* shader, const char* errors) { - if (!context_lost_) { + if (!context_lost()) { LOG(ERROR) << "Skia shader compilation error\n" << "------------------------\n" << shader << "\nErrors:\n" @@ -163,7 +163,7 @@ SharedContextState::~SharedContextState() { // The context should be current so that texture deletes that result from // destroying the cache happen in the right context (unless the context is // lost in which case we don't delete the textures). - DCHECK(IsCurrent(nullptr) || context_lost_); + DCHECK(IsCurrent(nullptr) || context_lost()); transfer_cache_.reset(); // We should have the last ref on this GrContext to ensure we're not holding @@ -191,7 +191,7 @@ SharedContextState::~SharedContextState() { this); } -void SharedContextState::InitializeGrContext( +bool SharedContextState::InitializeGrContext( const GpuPreferences& gpu_preferences, const GpuDriverBugWorkarounds& workarounds, GrContextOptions::PersistentCache* cache, @@ -220,7 +220,7 @@ void SharedContextState::InitializeGrContext( if (!interface) { LOG(ERROR) << "OOP raster support disabled: GrGLInterface creation " "failed."; - return; + return false; } if (activity_flags && cache) { @@ -255,12 +255,13 @@ void SharedContextState::InitializeGrContext( } if (!gr_context_) { - LOG(ERROR) << "OOP raster support disabled: GrContext creation " - "failed."; - } else { - gr_context_->setResourceCacheLimit(max_resource_cache_bytes); + LOG(ERROR) << "OOP raster support disabled: GrContext creation failed."; + return false; } + + gr_context_->setResourceCacheLimit(max_resource_cache_bytes); transfer_cache_ = std::make_unique<ServiceTransferCache>(gpu_preferences); + return true; } bool SharedContextState::InitializeGL( @@ -424,28 +425,23 @@ bool SharedContextState::InitializeGL( } bool SharedContextState::MakeCurrent(gl::GLSurface* surface, bool needs_gl) { - if (context_lost_) + if (context_lost()) return false; - if (gr_context_ && gr_context_->abandoned()) { - MarkContextLost(); - return false; - } - - if (!GrContextIsGL() && !needs_gl) - return true; - - gl::GLSurface* dont_care_surface = - last_current_surface_ ? last_current_surface_ : surface_.get(); - surface = surface ? surface : dont_care_surface; + const bool using_gl = GrContextIsGL() || needs_gl; + if (using_gl) { + gl::GLSurface* dont_care_surface = + last_current_surface_ ? last_current_surface_ : surface_.get(); + surface = surface ? surface : dont_care_surface; - if (!context_->MakeCurrent(surface)) { - MarkContextLost(); - return false; + if (!context_->MakeCurrent(surface)) { + MarkContextLost(error::kMakeCurrentFailed); + return false; + } + last_current_surface_ = surface; } - last_current_surface_ = surface; - return true; + return !CheckResetStatus(needs_gl); } void SharedContextState::ReleaseCurrent(gl::GLSurface* surface) { @@ -456,14 +452,14 @@ void SharedContextState::ReleaseCurrent(gl::GLSurface* surface) { return; last_current_surface_ = nullptr; - if (!context_lost_) + if (!context_lost()) context_->ReleaseCurrent(surface); } -void SharedContextState::MarkContextLost() { - if (!context_lost_) { +void SharedContextState::MarkContextLost(error::ContextLostReason reason) { + if (!context_lost()) { scoped_refptr<SharedContextState> prevent_last_ref_drop = this; - context_lost_ = true; + context_lost_reason_ = reason; // context_state_ could be nullptr for some unittests. if (context_state_) context_state_->MarkContextLost(); @@ -486,7 +482,7 @@ void SharedContextState::MarkContextLost() { bool SharedContextState::IsCurrent(gl::GLSurface* surface) { if (!GrContextIsGL()) return true; - if (context_lost_) + if (context_lost()) return false; return context_->IsCurrent(surface); } @@ -669,4 +665,79 @@ QueryManager* SharedContextState::GetQueryManager() { return nullptr; } +bool SharedContextState::CheckResetStatus(bool needs_gl) { + DCHECK(!context_lost()); + + if (device_needs_reset_) + return true; + + if (gr_context_) { + // Maybe Skia detected VK_ERROR_DEVICE_LOST. + if (gr_context_->abandoned()) { + LOG(ERROR) << "SharedContextState context lost via Skia."; + device_needs_reset_ = true; + MarkContextLost(error::kUnknown); + return true; + } + + if (gr_context_->oomed()) { + LOG(ERROR) << "SharedContextState context lost via Skia OOM."; + device_needs_reset_ = true; + MarkContextLost(error::kOutOfMemory); + return true; + } + } + + // Not using GL. + if (!GrContextIsGL() && !needs_gl) + return false; + + // GL is not initialized. + if (!context_state_) + return false; + + GLenum error = context_state_->api()->glGetErrorFn(); + if (error == GL_OUT_OF_MEMORY) { + LOG(ERROR) << "SharedContextState lost due to GL_OUT_OF_MEMORY"; + MarkContextLost(error::kOutOfMemory); + device_needs_reset_ = true; + return true; + } + + // Checking the reset status is expensive on some OS/drivers + // (https://crbug.com/1090232). Rate limit it. + constexpr base::TimeDelta kMinCheckDelay = + base::TimeDelta::FromMilliseconds(5); + base::Time now = base::Time::Now(); + if (!disable_check_reset_status_throttling_for_test_ && + now < last_gl_check_graphics_reset_status_ + kMinCheckDelay) { + return false; + } + last_gl_check_graphics_reset_status_ = now; + + GLenum driver_status = context()->CheckStickyGraphicsResetStatus(); + if (driver_status == GL_NO_ERROR) + return false; + LOG(ERROR) << "SharedContextState context lost via ARB/EXT_robustness. Reset " + "status = " + << gles2::GLES2Util::GetStringEnum(driver_status); + + switch (driver_status) { + case GL_GUILTY_CONTEXT_RESET_ARB: + MarkContextLost(error::kGuilty); + break; + case GL_INNOCENT_CONTEXT_RESET_ARB: + MarkContextLost(error::kInnocent); + break; + case GL_UNKNOWN_CONTEXT_RESET_ARB: + MarkContextLost(error::kUnknown); + break; + default: + NOTREACHED(); + break; + } + device_needs_reset_ = true; + return true; +} + } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/shared_context_state.h b/chromium/gpu/command_buffer/service/shared_context_state.h index 21e9687ffa5..6a7fcf0fc12 100644 --- a/chromium/gpu/command_buffer/service/shared_context_state.h +++ b/chromium/gpu/command_buffer/service/shared_context_state.h @@ -13,8 +13,12 @@ #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/observer_list.h" +#include "base/optional.h" +#include "base/time/time.h" #include "base/trace_event/memory_dump_provider.h" #include "build/build_config.h" +#include "gpu/command_buffer/common/constants.h" +#include "gpu/command_buffer/common/gl2_types.h" #include "gpu/command_buffer/common/skia_utils.h" #include "gpu/command_buffer/service/gl_context_virtual_delegate.h" #include "gpu/command_buffer/service/memory_tracking.h" @@ -47,6 +51,10 @@ class FeatureInfo; struct ContextState; } // namespace gles2 +namespace raster { +class RasterDecoderTestBase; +} // namespace raster + class GPU_GLES2_EXPORT SharedContextState : public base::trace_event::MemoryDumpProvider, public gpu::GLContextVirtualDelegate, @@ -68,7 +76,7 @@ class GPU_GLES2_EXPORT SharedContextState base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor = nullptr); - void InitializeGrContext(const GpuPreferences& gpu_preferences, + bool InitializeGrContext(const GpuPreferences& gpu_preferences, const GpuDriverBugWorkarounds& workarounds, GrContextOptions::PersistentCache* cache, GpuProcessActivityFlags* activity_flags = nullptr, @@ -92,7 +100,7 @@ class GPU_GLES2_EXPORT SharedContextState bool MakeCurrent(gl::GLSurface* surface, bool needs_gl = false); void ReleaseCurrent(gl::GLSurface* surface); - void MarkContextLost(); + void MarkContextLost(error::ContextLostReason reason = error::kUnknown); bool IsCurrent(gl::GLSurface* surface); void PurgeMemory( @@ -122,7 +130,10 @@ class GPU_GLES2_EXPORT SharedContextState void compileError(const char* shader, const char* errors) override; gles2::FeatureInfo* feature_info() { return feature_info_.get(); } gles2::ContextState* context_state() const { return context_state_.get(); } - bool context_lost() const { return context_lost_; } + bool context_lost() const { return !!context_lost_reason_; } + base::Optional<error::ContextLostReason> context_lost_reason() { + return context_lost_reason_; + } bool need_context_state_reset() const { return need_context_state_reset_; } void set_need_context_state_reset(bool reset) { need_context_state_reset_ = reset; @@ -179,8 +190,14 @@ class GPU_GLES2_EXPORT SharedContextState return found->second->unique(); } + // Updates |context_lost_reason| and returns true if lost + // (e.g. VK_ERROR_DEVICE_LOST or GL_UNKNOWN_CONTEXT_RESET_ARB). + bool CheckResetStatus(bool needs_gl); + bool device_needs_reset() { return device_needs_reset_; } + private: friend class base::RefCounted<SharedContextState>; + friend class raster::RasterDecoderTestBase; // Observer which is notified when SkiaOutputSurfaceImpl takes ownership of a // shared image, and forward information to both histograms and task manager. @@ -265,11 +282,15 @@ class GPU_GLES2_EXPORT SharedContextState // driver's GL state. bool need_context_state_reset_ = false; - bool context_lost_ = false; + base::Optional<error::ContextLostReason> context_lost_reason_; base::ObserverList<ContextLostObserver>::Unchecked context_lost_observers_; base::MRUCache<void*, sk_sp<SkSurface>> sk_surface_cache_; + bool device_needs_reset_ = false; + base::Time last_gl_check_graphics_reset_status_; + bool disable_check_reset_status_throttling_for_test_ = false; + base::WeakPtrFactory<SharedContextState> weak_ptr_factory_{this}; DISALLOW_COPY_AND_ASSIGN(SharedContextState); diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc index 3117ba53f49..6cb0ebeb6b6 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc @@ -37,7 +37,8 @@ class SharedImageRepresentationEglImageGLTexture } bool BeginAccess(GLenum mode) override { - if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) { + if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM || + mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) { if (!egl_backing()->BeginRead(this)) return false; mode_ = RepresentationAccessMode::kRead; @@ -262,10 +263,10 @@ gles2::Texture* SharedImageBackingEglImage::GenEGLImageSibling() { auto* texture = new gles2::Texture(service_id); texture->SetLightweightRef(); texture->SetTarget(target, 1 /*max_levels*/); - texture->sampler_state_.min_filter = GL_LINEAR; - texture->sampler_state_.mag_filter = GL_LINEAR; - texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE; - texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE; + texture->set_min_filter(GL_LINEAR); + texture->set_mag_filter(GL_LINEAR); + texture->set_wrap_t(GL_CLAMP_TO_EDGE); + texture->set_wrap_s(GL_CLAMP_TO_EDGE); // If the backing is already cleared, no need to clear it again. gfx::Rect cleared_rect; diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc index cc4b235d582..044f201a9e9 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc @@ -70,6 +70,9 @@ class OverlayImage final : public gl::GLImage { base::ScopedFD TakeEndFence() { DCHECK(!begin_read_fence_.is_valid()); + + previous_end_read_fence_ = + base::ScopedFD(HANDLE_EINTR(dup(end_read_fence_.get()))); return std::move(end_read_fence_); } @@ -78,7 +81,7 @@ class OverlayImage final : public gl::GLImage { GetAHardwareBuffer() override { return std::make_unique<ScopedHardwareBufferFenceSyncImpl>( this, base::android::ScopedHardwareBufferHandle::Create(handle_.get()), - std::move(begin_read_fence_)); + std::move(begin_read_fence_), std::move(previous_end_read_fence_)); } protected: @@ -91,14 +94,20 @@ class OverlayImage final : public gl::GLImage { ScopedHardwareBufferFenceSyncImpl( scoped_refptr<OverlayImage> image, base::android::ScopedHardwareBufferHandle handle, - base::ScopedFD fence_fd) - : ScopedHardwareBufferFenceSync(std::move(handle), std::move(fence_fd)), + base::ScopedFD fence_fd, + base::ScopedFD available_fence_fd) + : ScopedHardwareBufferFenceSync(std::move(handle), + std::move(fence_fd), + std::move(available_fence_fd), + false /* is_video */), image_(std::move(image)) {} ~ScopedHardwareBufferFenceSyncImpl() override = default; void SetReadFence(base::ScopedFD fence_fd, bool has_context) override { DCHECK(!image_->begin_read_fence_.is_valid()); DCHECK(!image_->end_read_fence_.is_valid()); + DCHECK(!image_->previous_end_read_fence_.is_valid()); + image_->end_read_fence_ = std::move(fence_fd); } @@ -115,6 +124,10 @@ class OverlayImage final : public gl::GLImage { // completion. The image content should not be modified before passing this // fence. base::ScopedFD end_read_fence_; + + // The fence for overlay controller from the last frame where this buffer was + // presented. + base::ScopedFD previous_end_read_fence_; }; } // namespace @@ -170,7 +183,6 @@ class SharedImageBackingAHB : public ClearTrackingSharedImageBacking { MemoryTypeTracker* tracker) override; private: - gles2::Texture* GenGLTexture(); const base::android::ScopedHardwareBufferHandle hardware_buffer_handle_; // Not guarded by |lock_| as we do not use legacy_texture_ in threadsafe @@ -213,7 +225,8 @@ class SharedImageRepresentationGLTextureAHB gles2::Texture* GetTexture() override { return texture_; } bool BeginAccess(GLenum mode) override { - if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) { + if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM || + mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) { base::ScopedFD write_sync_fd; if (!ahb_backing()->BeginRead(this, &write_sync_fd)) return false; @@ -228,7 +241,8 @@ class SharedImageRepresentationGLTextureAHB return false; } - if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) { + if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM || + mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) { mode_ = RepresentationAccessMode::kRead; } else if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) { mode_ = RepresentationAccessMode::kWrite; @@ -316,12 +330,12 @@ class SharedImageRepresentationSkiaVkAHB surface_props != surface_->props()) { SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType( /*gpu_compositing=*/true, format()); - surface_ = SkSurface::MakeFromBackendTextureAsRenderTarget( + surface_ = SkSurface::MakeFromBackendTexture( gr_context, promise_texture_->backendTexture(), kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, color_space().ToSkColorSpace(), &surface_props); if (!surface_) { - LOG(ERROR) << "MakeFromBackendTextureAsRenderTarget() failed."; + LOG(ERROR) << "MakeFromBackendTexture() failed."; return nullptr; } surface_msaa_count_ = final_msaa_count; @@ -582,7 +596,9 @@ bool SharedImageBackingAHB::ProduceLegacyMailbox( DCHECK(!is_writing_); DCHECK_EQ(size_t{0}, active_readers_.size()); DCHECK(hardware_buffer_handle_.is_valid()); - legacy_texture_ = GenGLTexture(); + legacy_texture_ = + GenGLTexture(hardware_buffer_handle_.get(), GL_TEXTURE_2D, color_space(), + size(), estimated_size(), ClearedRect()); if (!legacy_texture_) return false; // Make sure our |legacy_texture_| has the right initial cleared rect. @@ -602,7 +618,16 @@ SharedImageBackingAHB::ProduceGLTexture(SharedImageManager* manager, MemoryTypeTracker* tracker) { // Use same texture for all the texture representations generated from same // backing. - auto* texture = GenGLTexture(); + DCHECK(hardware_buffer_handle_.is_valid()); + + // Note that we are not using GL_TEXTURE_EXTERNAL_OES target(here and all + // other places in this file) since sksurface + // doesn't supports it. As per the egl documentation - + // https://www.khronos.org/registry/OpenGL/extensions/OES/OES_EGL_image_external.txt + // if GL_OES_EGL_image is supported then <target> may also be TEXTURE_2D. + auto* texture = + GenGLTexture(hardware_buffer_handle_.get(), GL_TEXTURE_2D, color_space(), + size(), estimated_size(), ClearedRect()); if (!texture) return nullptr; @@ -620,11 +645,9 @@ SharedImageBackingAHB::ProduceSkia( // Check whether we are in Vulkan mode OR GL mode and accordingly create // Skia representation. if (context_state->GrContextIsVulkan()) { - auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue(); - gfx::GpuMemoryBufferHandle gmb_handle(GetAhbHandle()); - auto vulkan_image = VulkanImage::CreateFromGpuMemoryBufferHandle( - device_queue, std::move(gmb_handle), size(), ToVkFormat(format()), - 0 /* usage */); + auto vulkan_image = CreateVkImageFromAhbHandle( + GetAhbHandle(), context_state.get(), size(), format()); + if (!vulkan_image) return nullptr; @@ -633,8 +656,10 @@ SharedImageBackingAHB::ProduceSkia( tracker); } DCHECK(context_state->GrContextIsGL()); - - auto* texture = GenGLTexture(); + DCHECK(hardware_buffer_handle_.is_valid()); + auto* texture = + GenGLTexture(hardware_buffer_handle_.get(), GL_TEXTURE_2D, color_space(), + size(), estimated_size(), ClearedRect()); if (!texture) return nullptr; auto gl_representation = @@ -759,64 +784,6 @@ void SharedImageBackingAHB::EndOverlayAccess() { read_sync_fd_ = gl::MergeFDs(std::move(read_sync_fd_), std::move(fence_fd)); } -gles2::Texture* SharedImageBackingAHB::GenGLTexture() { - DCHECK(hardware_buffer_handle_.is_valid()); - - // Target for AHB backed egl images. - // Note that we are not using GL_TEXTURE_EXTERNAL_OES target since sksurface - // doesn't supports it. As per the egl documentation - - // https://www.khronos.org/registry/OpenGL/extensions/OES/OES_EGL_image_external.txt - // if GL_OES_EGL_image is supported then <target> may also be TEXTURE_2D. - GLenum target = GL_TEXTURE_2D; - GLenum get_target = GL_TEXTURE_BINDING_2D; - - // Create a gles2 texture using the AhardwareBuffer. - gl::GLApi* api = gl::g_current_gl_context; - GLuint service_id = 0; - api->glGenTexturesFn(1, &service_id); - GLint old_texture_binding = 0; - api->glGetIntegervFn(get_target, &old_texture_binding); - api->glBindTextureFn(target, service_id); - api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); - api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); - - // Create an egl image using AHardwareBuffer. - auto egl_image = base::MakeRefCounted<gl::GLImageAHardwareBuffer>(size()); - if (!egl_image->Initialize(hardware_buffer_handle_.get(), false)) { - LOG(ERROR) << "Failed to create EGL image"; - api->glBindTextureFn(target, old_texture_binding); - api->glDeleteTexturesFn(1, &service_id); - return nullptr; - } - if (!egl_image->BindTexImage(target)) { - LOG(ERROR) << "Failed to bind egl image"; - api->glBindTextureFn(target, old_texture_binding); - api->glDeleteTexturesFn(1, &service_id); - return nullptr; - } - egl_image->SetColorSpace(color_space()); - - // Create a gles2 Texture. - auto* texture = new gles2::Texture(service_id); - texture->SetLightweightRef(); - texture->SetTarget(target, 1); - texture->sampler_state_.min_filter = GL_LINEAR; - texture->sampler_state_.mag_filter = GL_LINEAR; - texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE; - texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE; - - texture->SetLevelInfo(target, 0, egl_image->GetInternalFormat(), - size().width(), size().height(), 1, 0, - egl_image->GetDataFormat(), egl_image->GetDataType(), - ClearedRect()); - texture->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND); - texture->SetImmutable(true, false); - api->glBindTextureFn(target, old_texture_binding); - return texture; -} - SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB( const GpuDriverBugWorkarounds& workarounds, const GpuFeatureInfo& gpu_feature_info) { @@ -1065,11 +1032,8 @@ SharedImageBackingFactoryAHB::CreateSharedImage( const gfx::ColorSpace& color_space, uint32_t usage, base::span<const uint8_t> pixel_data) { - auto backing = - MakeBacking(mailbox, format, size, color_space, usage, false, pixel_data); - if (backing) - backing->OnWriteSucceeded(); - return backing; + return MakeBacking(mailbox, format, size, color_space, usage, false, + pixel_data); } bool SharedImageBackingFactoryAHB::CanImportGpuMemoryBuffer( diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc index 91798bb9b36..bd4e77afcbd 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc @@ -25,6 +25,7 @@ #include "gpu/command_buffer/service/service_utils.h" #include "gpu/command_buffer/service/shared_context_state.h" #include "gpu/command_buffer/service/shared_image_backing.h" +#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h" #include "gpu/command_buffer/service/shared_image_factory.h" #include "gpu/command_buffer/service/shared_image_representation.h" #include "gpu/command_buffer/service/skia_utils.h" @@ -51,6 +52,10 @@ #include "gpu/command_buffer/service/shared_image_batch_access_manager.h" #endif +#if defined(OS_MACOSX) +#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h" +#endif + namespace gpu { namespace { @@ -189,23 +194,6 @@ class ScopedRestoreTexture { DISALLOW_COPY_AND_ASSIGN(ScopedRestoreTexture); }; -GLuint MakeTextureAndSetParameters(gl::GLApi* api, - GLenum target, - bool framebuffer_attachment_angle) { - GLuint service_id = 0; - api->glGenTexturesFn(1, &service_id); - api->glBindTextureFn(target, service_id); - api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); - api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); - if (framebuffer_attachment_angle) { - api->glTexParameteriFn(target, GL_TEXTURE_USAGE_ANGLE, - GL_FRAMEBUFFER_ATTACHMENT_ANGLE); - } - return service_id; -} - std::unique_ptr<SharedImageRepresentationDawn> ProduceDawnCommon( SharedImageFactory* factory, SharedImageManager* manager, @@ -301,518 +289,718 @@ std::unique_ptr<SharedImageRepresentationDawn> ProduceDawnCommon( return manager->ProduceDawn(dst_mailbox, tracker, device); } +size_t EstimatedSize(viz::ResourceFormat format, const gfx::Size& size) { + size_t estimated_size = 0; + viz::ResourceSizes::MaybeSizeInBytes(size, format, &estimated_size); + return estimated_size; +} + } // anonymous namespace +/////////////////////////////////////////////////////////////////////////////// +// SharedImageRepresentationGLTextureImpl + // Representation of a SharedImageBackingGLTexture as a GL Texture. -class SharedImageRepresentationGLTextureImpl - : public SharedImageRepresentationGLTexture { - public: - SharedImageRepresentationGLTextureImpl(SharedImageManager* manager, - SharedImageBacking* backing, - MemoryTypeTracker* tracker, - gles2::Texture* texture) - : SharedImageRepresentationGLTexture(manager, backing, tracker), - texture_(texture) {} +SharedImageRepresentationGLTextureImpl::SharedImageRepresentationGLTextureImpl( + SharedImageManager* manager, + SharedImageBacking* backing, + Client* client, + MemoryTypeTracker* tracker, + gles2::Texture* texture) + : SharedImageRepresentationGLTexture(manager, backing, tracker), + client_(client), + texture_(texture) {} - gles2::Texture* GetTexture() override { return texture_; } +gles2::Texture* SharedImageRepresentationGLTextureImpl::GetTexture() { + return texture_; +} - private: - gles2::Texture* texture_; -}; +bool SharedImageRepresentationGLTextureImpl::BeginAccess(GLenum mode) { + if (client_) + return client_->OnGLTextureBeginAccess(mode); + return true; +} -// Representation of a SharedImageBackingGLTexturePassthrough as a GL -// TexturePassthrough. -class SharedImageRepresentationGLTexturePassthroughImpl - : public SharedImageRepresentationGLTexturePassthrough { - public: - SharedImageRepresentationGLTexturePassthroughImpl( - SharedImageManager* manager, - SharedImageBacking* backing, - MemoryTypeTracker* tracker, - scoped_refptr<gles2::TexturePassthrough> texture_passthrough) - : SharedImageRepresentationGLTexturePassthrough(manager, - backing, - tracker), - texture_passthrough_(std::move(texture_passthrough)) {} - - const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough() - override { - return texture_passthrough_; - } +/////////////////////////////////////////////////////////////////////////////// +// SharedImageRepresentationGLTexturePassthroughImpl + +SharedImageRepresentationGLTexturePassthroughImpl:: + SharedImageRepresentationGLTexturePassthroughImpl( + SharedImageManager* manager, + SharedImageBacking* backing, + Client* client, + MemoryTypeTracker* tracker, + scoped_refptr<gles2::TexturePassthrough> texture_passthrough) + : SharedImageRepresentationGLTexturePassthrough(manager, backing, tracker), + client_(client), + texture_passthrough_(std::move(texture_passthrough)) {} + +SharedImageRepresentationGLTexturePassthroughImpl:: + ~SharedImageRepresentationGLTexturePassthroughImpl() = default; + +const scoped_refptr<gles2::TexturePassthrough>& +SharedImageRepresentationGLTexturePassthroughImpl::GetTexturePassthrough() { + return texture_passthrough_; +} - void EndAccess() override { - GLenum target = texture_passthrough_->target(); - gl::GLImage* image = texture_passthrough_->GetLevelImage(target, 0); - if (!image) - return; - if (image->ShouldBindOrCopy() == gl::GLImage::BIND) { - gl::ScopedTextureBinder binder(target, - texture_passthrough_->service_id()); - image->ReleaseTexImage(target); - image->BindTexImage(target); - } - } +bool SharedImageRepresentationGLTexturePassthroughImpl::BeginAccess( + GLenum mode) { + if (client_) + return client_->OnGLTexturePassthroughBeginAccess(mode); + return true; +} - private: - scoped_refptr<gles2::TexturePassthrough> texture_passthrough_; -}; +/////////////////////////////////////////////////////////////////////////////// +// SharedImageBackingGLCommon -class SharedImageBackingWithReadAccess : public SharedImageBacking { - public: - SharedImageBackingWithReadAccess(const Mailbox& mailbox, - viz::ResourceFormat format, - const gfx::Size& size, - const gfx::ColorSpace& color_space, - uint32_t usage, - size_t estimated_size, - bool is_thread_safe) - : SharedImageBacking(mailbox, - format, - size, - color_space, - usage, - estimated_size, - is_thread_safe) {} - ~SharedImageBackingWithReadAccess() override = default; - - virtual void BeginReadAccess() = 0; -}; +// static +void SharedImageBackingGLCommon::MakeTextureAndSetParameters( + GLenum target, + GLuint service_id, + bool framebuffer_attachment_angle, + scoped_refptr<gles2::TexturePassthrough>* passthrough_texture, + gles2::Texture** texture) { + if (!service_id) { + gl::GLApi* api = gl::g_current_gl_context; + ScopedRestoreTexture scoped_restore(api, target); -class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia { - public: - SharedImageRepresentationSkiaImpl( - SharedImageManager* manager, - SharedImageBackingWithReadAccess* backing, - scoped_refptr<SharedContextState> context_state, - sk_sp<SkPromiseImageTexture> cached_promise_texture, - MemoryTypeTracker* tracker, - GLenum target, - GLuint service_id) - : SharedImageRepresentationSkia(manager, backing, tracker), - context_state_(std::move(context_state)), - promise_texture_(cached_promise_texture) { - if (!promise_texture_) { - GrBackendTexture backend_texture; - GetGrBackendTexture(context_state_->feature_info(), target, size(), - service_id, format(), &backend_texture); - promise_texture_ = SkPromiseImageTexture::Make(backend_texture); + api->glGenTexturesFn(1, &service_id); + api->glBindTextureFn(target, service_id); + api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + if (framebuffer_attachment_angle) { + api->glTexParameteriFn(target, GL_TEXTURE_USAGE_ANGLE, + GL_FRAMEBUFFER_ATTACHMENT_ANGLE); } + } + if (passthrough_texture) { + *passthrough_texture = + base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target); + } + if (texture) { + *texture = new gles2::Texture(service_id); + (*texture)->SetLightweightRef(); + (*texture)->SetTarget(target, 1); + (*texture)->set_min_filter(GL_LINEAR); + (*texture)->set_mag_filter(GL_LINEAR); + (*texture)->set_wrap_s(GL_CLAMP_TO_EDGE); + (*texture)->set_wrap_t(GL_CLAMP_TO_EDGE); + } +} + +/////////////////////////////////////////////////////////////////////////////// +// SharedImageRepresentationSkiaImpl + +SharedImageRepresentationSkiaImpl::SharedImageRepresentationSkiaImpl( + SharedImageManager* manager, + SharedImageBacking* backing, + Client* client, + scoped_refptr<SharedContextState> context_state, + sk_sp<SkPromiseImageTexture> promise_texture, + MemoryTypeTracker* tracker) + : SharedImageRepresentationSkia(manager, backing, tracker), + client_(client), + context_state_(std::move(context_state)), + promise_texture_(promise_texture) { + DCHECK(promise_texture_); #if DCHECK_IS_ON() + if (context_state_->GrContextIsGL()) context_ = gl::GLContext::GetCurrent(); #endif - } +} - ~SharedImageRepresentationSkiaImpl() override { - if (write_surface_) { - DLOG(ERROR) << "SharedImageRepresentationSkia was destroyed while still " - << "open for write access."; - } +SharedImageRepresentationSkiaImpl::~SharedImageRepresentationSkiaImpl() { + if (write_surface_) { + DLOG(ERROR) << "SharedImageRepresentationSkia was destroyed while still " + << "open for write access."; } +} - sk_sp<SkSurface> BeginWriteAccess( - int final_msaa_count, - const SkSurfaceProps& surface_props, - std::vector<GrBackendSemaphore>* begin_semaphores, - std::vector<GrBackendSemaphore>* end_semaphores) override { - CheckContext(); - if (write_surface_) - return nullptr; - - if (!promise_texture_) { - return nullptr; - } - SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType( - /*gpu_compositing=*/true, format()); - auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget( - context_state_->gr_context(), promise_texture_->backendTexture(), - kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, - backing()->color_space().ToSkColorSpace(), &surface_props); - write_surface_ = surface.get(); - return surface; - } +sk_sp<SkSurface> SharedImageRepresentationSkiaImpl::BeginWriteAccess( + int final_msaa_count, + const SkSurfaceProps& surface_props, + std::vector<GrBackendSemaphore>* begin_semaphores, + std::vector<GrBackendSemaphore>* end_semaphores) { + CheckContext(); + if (client_ && !client_->OnSkiaBeginWriteAccess()) + return nullptr; + if (write_surface_) + return nullptr; - void EndWriteAccess(sk_sp<SkSurface> surface) override { - DCHECK_EQ(surface.get(), write_surface_); - DCHECK(surface->unique()); - CheckContext(); - // TODO(ericrk): Keep the surface around for re-use. - write_surface_ = nullptr; + if (!promise_texture_) { + return nullptr; } + SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType( + /*gpu_compositing=*/true, format()); + auto surface = SkSurface::MakeFromBackendTexture( + context_state_->gr_context(), promise_texture_->backendTexture(), + kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, + backing()->color_space().ToSkColorSpace(), &surface_props); + write_surface_ = surface.get(); + return surface; +} - sk_sp<SkPromiseImageTexture> BeginReadAccess( - std::vector<GrBackendSemaphore>* begin_semaphores, - std::vector<GrBackendSemaphore>* end_semaphores) override { - CheckContext(); - static_cast<SharedImageBackingWithReadAccess*>(backing()) - ->BeginReadAccess(); - return promise_texture_; - } +void SharedImageRepresentationSkiaImpl::EndWriteAccess( + sk_sp<SkSurface> surface) { + DCHECK_EQ(surface.get(), write_surface_); + DCHECK(surface->unique()); + CheckContext(); + // TODO(ericrk): Keep the surface around for re-use. + write_surface_ = nullptr; +} - void EndReadAccess() override { - // TODO(ericrk): Handle begin/end correctness checks. - } +sk_sp<SkPromiseImageTexture> SharedImageRepresentationSkiaImpl::BeginReadAccess( + std::vector<GrBackendSemaphore>* begin_semaphores, + std::vector<GrBackendSemaphore>* end_semaphores) { + CheckContext(); + if (client_ && !client_->OnSkiaBeginReadAccess()) + return nullptr; + return promise_texture_; +} - bool SupportsMultipleConcurrentReadAccess() override { return true; } +void SharedImageRepresentationSkiaImpl::EndReadAccess() { + // TODO(ericrk): Handle begin/end correctness checks. +} - sk_sp<SkPromiseImageTexture> promise_texture() { return promise_texture_; } +bool SharedImageRepresentationSkiaImpl::SupportsMultipleConcurrentReadAccess() { + return true; +} - private: - void CheckContext() { +void SharedImageRepresentationSkiaImpl::CheckContext() { #if DCHECK_IS_ON() + if (context_) DCHECK(gl::GLContext::GetCurrent() == context_); #endif - } +} - scoped_refptr<SharedContextState> context_state_; - sk_sp<SkPromiseImageTexture> promise_texture_; +/////////////////////////////////////////////////////////////////////////////// +// SharedImageBackingGLTexture - SkSurface* write_surface_ = nullptr; -#if DCHECK_IS_ON() - gl::GLContext* context_; -#endif -}; - -// Implementation of SharedImageBacking that creates a GL Texture and stores it -// as a gles2::Texture. Can be used with the legacy mailbox implementation. -class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess { - public: - SharedImageBackingGLTexture(const Mailbox& mailbox, - viz::ResourceFormat format, - const gfx::Size& size, - const gfx::ColorSpace& color_space, - uint32_t usage, - gles2::Texture* texture, - const UnpackStateAttribs& attribs) - : SharedImageBackingWithReadAccess(mailbox, - format, - size, - color_space, - usage, - texture->estimated_size(), - false /* is_thread_safe */), - texture_(texture), - attribs_(attribs) { - DCHECK(texture_); - gl::GLImage* image = - texture_->GetLevelImage(texture_->target(), 0, nullptr); - if (image) - native_pixmap_ = image->GetNativePixmap(); +SharedImageBackingGLTexture::SharedImageBackingGLTexture( + const Mailbox& mailbox, + viz::ResourceFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + uint32_t usage, + bool is_passthrough) + : SharedImageBacking(mailbox, + format, + size, + color_space, + usage, + EstimatedSize(format, size), + false /* is_thread_safe */), + is_passthrough_(is_passthrough) {} + +SharedImageBackingGLTexture::~SharedImageBackingGLTexture() { + if (IsPassthrough()) { + if (passthrough_texture_) { + if (!have_context()) + passthrough_texture_->MarkContextLost(); + passthrough_texture_.reset(); + } + } else { + if (texture_) { + texture_->RemoveLightweightRef(have_context()); + texture_ = nullptr; + } } +} - ~SharedImageBackingGLTexture() override { - DCHECK(texture_); - texture_->RemoveLightweightRef(have_context()); - texture_ = nullptr; +GLenum SharedImageBackingGLTexture::GetGLTarget() const { + return texture_ ? texture_->target() : passthrough_texture_->target(); +} - if (rgb_emulation_texture_) { - rgb_emulation_texture_->RemoveLightweightRef(have_context()); - rgb_emulation_texture_ = nullptr; - } +GLuint SharedImageBackingGLTexture::GetGLServiceId() const { + return texture_ ? texture_->service_id() : passthrough_texture_->service_id(); +} + +void SharedImageBackingGLTexture::OnMemoryDump( + const std::string& dump_name, + base::trace_event::MemoryAllocatorDump* dump, + base::trace_event::ProcessMemoryDump* pmd, + uint64_t client_tracing_id) { + const auto client_guid = GetSharedImageGUIDForTracing(mailbox()); + if (!IsPassthrough()) { + const auto service_guid = + gl::GetGLTextureServiceGUIDForTracing(texture_->service_id()); + pmd->CreateSharedGlobalAllocatorDump(service_guid); + pmd->AddOwnershipEdge(client_guid, service_guid, /* importance */ 2); + texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name); } +} - gfx::Rect ClearedRect() const override { +gfx::Rect SharedImageBackingGLTexture::ClearedRect() const { + if (IsPassthrough()) { + // This backing is used exclusively with ANGLE which handles clear tracking + // internally. Act as though the texture is always cleared. + return gfx::Rect(size()); + } else { return texture_->GetLevelClearedRect(texture_->target(), 0); } +} - void SetClearedRect(const gfx::Rect& cleared_rect) override { +void SharedImageBackingGLTexture::SetClearedRect( + const gfx::Rect& cleared_rect) { + if (!IsPassthrough()) texture_->SetLevelClearedRect(texture_->target(), 0, cleared_rect); - } - - void Update(std::unique_ptr<gfx::GpuFence> in_fence) override { - GLenum target = texture_->target(); - gl::GLApi* api = gl::g_current_gl_context; - ScopedRestoreTexture scoped_restore(api, target); - api->glBindTextureFn(target, texture_->service_id()); - - gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND; - gl::GLImage* image = texture_->GetLevelImage(target, 0, &old_state); - if (!image) - return; - if (old_state == gles2::Texture::BOUND) - image->ReleaseTexImage(target); - - if (in_fence) { - // TODO(dcastagna): Don't wait for the fence if the SharedImage is going - // to be scanned out as an HW overlay. Currently we don't know that at - // this point and we always bind the image, therefore we need to wait for - // the fence. - std::unique_ptr<gl::GLFence> egl_fence = - gl::GLFence::CreateFromGpuFence(*in_fence.get()); - egl_fence->ServerWait(); - } - gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND; - if (image->ShouldBindOrCopy() == gl::GLImage::BIND && - image->BindTexImage(target)) { - new_state = gles2::Texture::BOUND; - } - if (old_state != new_state) - texture_->SetLevelImage(target, 0, image, new_state); - } +} - bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override { - DCHECK(texture_); +bool SharedImageBackingGLTexture::ProduceLegacyMailbox( + MailboxManager* mailbox_manager) { + if (IsPassthrough()) + mailbox_manager->ProduceTexture(mailbox(), passthrough_texture_.get()); + else mailbox_manager->ProduceTexture(mailbox(), texture_); - return true; - } + return true; +} - void OnMemoryDump(const std::string& dump_name, - base::trace_event::MemoryAllocatorDump* dump, - base::trace_event::ProcessMemoryDump* pmd, - uint64_t client_tracing_id) override { - // Add a |service_guid| which expresses shared ownership between the - // various GPU dumps. - auto client_guid = GetSharedImageGUIDForTracing(mailbox()); - auto service_guid = - gl::GetGLTextureServiceGUIDForTracing(texture_->service_id()); - pmd->CreateSharedGlobalAllocatorDump(service_guid); - // TODO(piman): coalesce constant with TextureManager::DumpTextureRef. - int importance = 2; // This client always owns the ref. +std::unique_ptr<SharedImageRepresentationGLTexture> +SharedImageBackingGLTexture::ProduceGLTexture(SharedImageManager* manager, + MemoryTypeTracker* tracker) { + DCHECK(texture_); + return std::make_unique<SharedImageRepresentationGLTextureImpl>( + manager, this, nullptr, tracker, texture_); +} - pmd->AddOwnershipEdge(client_guid, service_guid, importance); +std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> +SharedImageBackingGLTexture::ProduceGLTexturePassthrough( + SharedImageManager* manager, + MemoryTypeTracker* tracker) { + DCHECK(passthrough_texture_); + return std::make_unique<SharedImageRepresentationGLTexturePassthroughImpl>( + manager, this, nullptr, tracker, passthrough_texture_); +} - // Dump all sub-levels held by the texture. They will appear below the - // main gl/textures/client_X/mailbox_Y dump. - texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name); +std::unique_ptr<SharedImageRepresentationDawn> +SharedImageBackingGLTexture::ProduceDawn(SharedImageManager* manager, + MemoryTypeTracker* tracker, + WGPUDevice device) { + if (!factory()) { + DLOG(ERROR) << "No SharedImageFactory to create a dawn representation."; + return nullptr; } - void BeginReadAccess() override { - GLenum target = texture_->target(); - gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND; - gl::GLImage* image = texture_->GetLevelImage(target, 0, &old_state); - if (image && old_state == gpu::gles2::Texture::UNBOUND) { - gl::GLApi* api = gl::g_current_gl_context; - ScopedRestoreTexture scoped_restore(api, target); - api->glBindTextureFn(target, texture_->service_id()); - gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND; - if (image->ShouldBindOrCopy() == gl::GLImage::BIND) { - if (image->BindTexImage(target)) - new_state = gles2::Texture::BOUND; - } else { - ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs_, - /*upload=*/true); - if (image->CopyTexImage(target)) - new_state = gles2::Texture::COPIED; - } - if (old_state != new_state) - texture_->SetLevelImage(target, 0, image, new_state); - } - } + return ProduceDawnCommon(factory(), manager, tracker, device, this, + IsPassthrough()); +} - scoped_refptr<gfx::NativePixmap> GetNativePixmap() override { - return native_pixmap_; +std::unique_ptr<SharedImageRepresentationSkia> +SharedImageBackingGLTexture::ProduceSkia( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + scoped_refptr<SharedContextState> context_state) { + if (!cached_promise_texture_) { + GrBackendTexture backend_texture; + GetGrBackendTexture(context_state->feature_info(), GetGLTarget(), size(), + GetGLServiceId(), format(), &backend_texture); + cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture); } + return std::make_unique<SharedImageRepresentationSkiaImpl>( + manager, this, nullptr, std::move(context_state), cached_promise_texture_, + tracker); +} - protected: - std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture( - SharedImageManager* manager, - MemoryTypeTracker* tracker) override { - return std::make_unique<SharedImageRepresentationGLTextureImpl>( - manager, this, tracker, texture_); +void SharedImageBackingGLTexture::Update( + std::unique_ptr<gfx::GpuFence> in_fence) {} + +void SharedImageBackingGLTexture::InitializeGLTexture( + GLuint service_id, + const SharedImageBackingGLCommon::InitializeGLTextureParams& params) { + SharedImageBackingGLCommon::MakeTextureAndSetParameters( + params.target, service_id, params.framebuffer_attachment_angle, + IsPassthrough() ? &passthrough_texture_ : nullptr, + IsPassthrough() ? nullptr : &texture_); + + if (IsPassthrough()) { + passthrough_texture_->SetEstimatedSize(EstimatedSize(format(), size())); + } else { + texture_->SetLevelInfo(params.target, 0, params.internal_format, + size().width(), size().height(), 1, 0, params.format, + params.type, + params.is_cleared ? gfx::Rect(size()) : gfx::Rect()); + texture_->SetImmutable(true, params.has_immutable_storage); } +} - std::unique_ptr<SharedImageRepresentationGLTexture> - ProduceRGBEmulationGLTexture(SharedImageManager* manager, - MemoryTypeTracker* tracker) override { - if (!rgb_emulation_texture_) { - GLenum target = texture_->target(); - gl::GLApi* api = gl::g_current_gl_context; - ScopedRestoreTexture scoped_restore(api, target); - - // Set to false as this code path is only used on Mac. - bool framebuffer_attachment_angle = false; - GLuint service_id = MakeTextureAndSetParameters( - api, target, framebuffer_attachment_angle); - - gles2::Texture::ImageState image_state = gles2::Texture::BOUND; - gl::GLImage* image = texture_->GetLevelImage(target, 0, &image_state); - if (!image) { - LOG(ERROR) << "Texture is not bound to an image."; - return nullptr; - } +void SharedImageBackingGLTexture::SetCompatibilitySwizzle( + const gles2::Texture::CompatibilitySwizzle* swizzle) { + if (!IsPassthrough()) + texture_->SetCompatibilitySwizzle(swizzle); +} - DCHECK(image->ShouldBindOrCopy() == gl::GLImage::BIND); - const GLenum internal_format = GL_RGB; - if (!image->BindTexImageWithInternalformat(target, internal_format)) { - LOG(ERROR) << "Failed to bind image to rgb texture."; - api->glDeleteTexturesFn(1, &service_id); - return nullptr; - } +/////////////////////////////////////////////////////////////////////////////// +// SharedImageBackingGLImage - rgb_emulation_texture_ = new gles2::Texture(service_id); - rgb_emulation_texture_->SetLightweightRef(); - rgb_emulation_texture_->SetTarget(target, 1); - rgb_emulation_texture_->sampler_state_.min_filter = GL_LINEAR; - rgb_emulation_texture_->sampler_state_.mag_filter = GL_LINEAR; - rgb_emulation_texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE; - rgb_emulation_texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE; - - GLenum format = gles2::TextureManager::ExtractFormatFromStorageFormat( - internal_format); - GLenum type = - gles2::TextureManager::ExtractTypeFromStorageFormat(internal_format); - - const gles2::Texture::LevelInfo* info = texture_->GetLevelInfo(target, 0); - rgb_emulation_texture_->SetLevelInfo(target, 0, internal_format, - info->width, info->height, 1, 0, - format, type, info->cleared_rect); - - rgb_emulation_texture_->SetLevelImage(target, 0, image, image_state); - rgb_emulation_texture_->SetImmutable(true, false); - } +SharedImageBackingGLImage::SharedImageBackingGLImage( + scoped_refptr<gl::GLImage> image, + const Mailbox& mailbox, + viz::ResourceFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + uint32_t usage, + const SharedImageBackingGLCommon::InitializeGLTextureParams& params, + const UnpackStateAttribs& attribs, + bool is_passthrough) + : SharedImageBacking(mailbox, + format, + size, + color_space, + usage, + EstimatedSize(format, size), + false /* is_thread_safe */), + image_(image), + gl_params_(params), + gl_unpack_attribs_(attribs), + is_passthrough_(is_passthrough), + weak_factory_(this) { + DCHECK(image_); +} - return std::make_unique<SharedImageRepresentationGLTextureImpl>( - manager, this, tracker, rgb_emulation_texture_); +SharedImageBackingGLImage::~SharedImageBackingGLImage() { + if (rgb_emulation_texture_) { + rgb_emulation_texture_->RemoveLightweightRef(have_context()); + rgb_emulation_texture_ = nullptr; } - - std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia( - SharedImageManager* manager, - MemoryTypeTracker* tracker, - scoped_refptr<SharedContextState> context_state) override { - auto result = std::make_unique<SharedImageRepresentationSkiaImpl>( - manager, this, std::move(context_state), cached_promise_texture_, - tracker, texture_->target(), texture_->service_id()); - cached_promise_texture_ = result->promise_texture(); - return result; + if (IsPassthrough()) { + if (passthrough_texture_) { + if (!have_context()) + passthrough_texture_->MarkContextLost(); + passthrough_texture_.reset(); + } + } else { + if (texture_) { + texture_->RemoveLightweightRef(have_context()); + texture_ = nullptr; + } } +} - std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( - SharedImageManager* manager, - MemoryTypeTracker* tracker, - WGPUDevice device) override { - if (!factory()) { - DLOG(ERROR) << "No SharedImageFactory to create a dawn representation."; - return nullptr; - } +GLenum SharedImageBackingGLImage::GetGLTarget() const { + return gl_params_.target; +} - return ProduceDawnCommon(factory(), manager, tracker, device, this, false); - } +GLuint SharedImageBackingGLImage::GetGLServiceId() const { + return texture_ ? texture_->service_id() : passthrough_texture_->service_id(); +} - private: - gles2::Texture* texture_ = nullptr; - gles2::Texture* rgb_emulation_texture_ = nullptr; - sk_sp<SkPromiseImageTexture> cached_promise_texture_; - const UnpackStateAttribs attribs_; - scoped_refptr<gfx::NativePixmap> native_pixmap_; -}; +scoped_refptr<gfx::NativePixmap> SharedImageBackingGLImage::GetNativePixmap() { + if (IsPassthrough()) + return nullptr; -// Implementation of SharedImageBacking that creates a GL Texture and stores it -// as a gles2::TexturePassthrough. Can be used with the legacy mailbox -// implementation. -class SharedImageBackingPassthroughGLTexture - : public SharedImageBackingWithReadAccess { - public: - SharedImageBackingPassthroughGLTexture( - const Mailbox& mailbox, - viz::ResourceFormat format, - const gfx::Size& size, - const gfx::ColorSpace& color_space, - uint32_t usage, - scoped_refptr<gles2::TexturePassthrough> passthrough_texture) - : SharedImageBackingWithReadAccess(mailbox, - format, - size, - color_space, - usage, - passthrough_texture->estimated_size(), - false /* is_thread_safe */), - texture_passthrough_(std::move(passthrough_texture)) { - DCHECK(texture_passthrough_); - } + return image_->GetNativePixmap(); +} - ~SharedImageBackingPassthroughGLTexture() override { - DCHECK(texture_passthrough_); - if (!have_context()) - texture_passthrough_->MarkContextLost(); - texture_passthrough_.reset(); +void SharedImageBackingGLImage::OnMemoryDump( + const std::string& dump_name, + base::trace_event::MemoryAllocatorDump* dump, + base::trace_event::ProcessMemoryDump* pmd, + uint64_t client_tracing_id) { + // Add a |service_guid| which expresses shared ownership between the + // various GPU dumps. + auto client_guid = GetSharedImageGUIDForTracing(mailbox()); + auto service_guid = gl::GetGLTextureServiceGUIDForTracing(GetGLServiceId()); + pmd->CreateSharedGlobalAllocatorDump(service_guid); + // TODO(piman): coalesce constant with TextureManager::DumpTextureRef. + int importance = 2; // This client always owns the ref. + + pmd->AddOwnershipEdge(client_guid, service_guid, importance); + + if (IsPassthrough()) { + auto* gl_image = passthrough_texture_->GetLevelImage(GetGLTarget(), 0); + if (gl_image) + gl_image->OnMemoryDump(pmd, client_tracing_id, dump_name); + } else { + // Dump all sub-levels held by the texture. They will appear below the + // main gl/textures/client_X/mailbox_Y dump. + texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name); } +} - gfx::Rect ClearedRect() const override { +gfx::Rect SharedImageBackingGLImage::ClearedRect() const { + if (IsPassthrough()) { // This backing is used exclusively with ANGLE which handles clear tracking // internally. Act as though the texture is always cleared. return gfx::Rect(size()); + } else { + return texture_->GetLevelClearedRect(texture_->target(), 0); } +} +void SharedImageBackingGLImage::SetClearedRect(const gfx::Rect& cleared_rect) { + if (!IsPassthrough()) + texture_->SetLevelClearedRect(texture_->target(), 0, cleared_rect); +} +bool SharedImageBackingGLImage::ProduceLegacyMailbox( + MailboxManager* mailbox_manager) { + if (IsPassthrough()) + mailbox_manager->ProduceTexture(mailbox(), passthrough_texture_.get()); + else + mailbox_manager->ProduceTexture(mailbox(), texture_); + return true; +} - void SetClearedRect(const gfx::Rect& cleared_rect) override {} +std::unique_ptr<SharedImageRepresentationGLTexture> +SharedImageBackingGLImage::ProduceGLTexture(SharedImageManager* manager, + MemoryTypeTracker* tracker) { + DCHECK(texture_); + return std::make_unique<SharedImageRepresentationGLTextureImpl>( + manager, this, this, tracker, texture_); +} +std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> +SharedImageBackingGLImage::ProduceGLTexturePassthrough( + SharedImageManager* manager, + MemoryTypeTracker* tracker) { + DCHECK(passthrough_texture_); + return std::make_unique<SharedImageRepresentationGLTexturePassthroughImpl>( + manager, this, this, tracker, passthrough_texture_); +} - void Update(std::unique_ptr<gfx::GpuFence> in_fence) override { - GLenum target = texture_passthrough_->target(); - gl::GLApi* api = gl::g_current_gl_context; - ScopedRestoreTexture scoped_restore(api, target); - api->glBindTextureFn(target, texture_passthrough_->service_id()); - - gl::GLImage* image = texture_passthrough_->GetLevelImage(target, 0); - if (!image) - return; - image->ReleaseTexImage(target); - if (image->ShouldBindOrCopy() == gl::GLImage::BIND) - image->BindTexImage(target); - else - image->CopyTexImage(target); +std::unique_ptr<SharedImageRepresentationOverlay> +SharedImageBackingGLImage::ProduceOverlay(SharedImageManager* manager, + MemoryTypeTracker* tracker) { +#if defined(OS_MACOSX) + return SharedImageBackingFactoryIOSurface::ProduceOverlay(manager, this, + tracker, image_); +#else // defined(OS_MACOSX) + return SharedImageBacking::ProduceOverlay(manager, tracker); +#endif // !defined(OS_MACOSX) +} + +std::unique_ptr<SharedImageRepresentationDawn> +SharedImageBackingGLImage::ProduceDawn(SharedImageManager* manager, + MemoryTypeTracker* tracker, + WGPUDevice device) { +#if defined(OS_MACOSX) + auto result = SharedImageBackingFactoryIOSurface::ProduceDawn( + manager, this, tracker, device, image_); + if (result) + return result; +#endif // defined(OS_MACOSX) + if (!factory()) { + DLOG(ERROR) << "No SharedImageFactory to create a dawn representation."; + return nullptr; } - bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override { - DCHECK(texture_passthrough_); - mailbox_manager->ProduceTexture(mailbox(), texture_passthrough_.get()); - return true; + return ProduceDawnCommon(factory(), manager, tracker, device, this, + IsPassthrough()); +} + +std::unique_ptr<SharedImageRepresentationSkia> +SharedImageBackingGLImage::ProduceSkia( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + scoped_refptr<SharedContextState> context_state) { + if (!cached_promise_texture_) { + if (context_state->GrContextIsMetal()) { +#if defined(OS_MACOSX) + cached_promise_texture_ = + SharedImageBackingFactoryIOSurface::ProduceSkiaPromiseTextureMetal( + this, context_state, image_); + DCHECK(cached_promise_texture_); +#endif + } else { + GrBackendTexture backend_texture; + GetGrBackendTexture(context_state->feature_info(), GetGLTarget(), size(), + GetGLServiceId(), format(), &backend_texture); + cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture); + } } + return std::make_unique<SharedImageRepresentationSkiaImpl>( + manager, this, this, std::move(context_state), cached_promise_texture_, + tracker); +} - void OnMemoryDump(const std::string& dump_name, - base::trace_event::MemoryAllocatorDump* dump, - base::trace_event::ProcessMemoryDump* pmd, - uint64_t client_tracing_id) override { - // Add a |service_guid| which expresses shared ownership between the - // various GPU dumps. - auto client_guid = GetSharedImageGUIDForTracing(mailbox()); - auto service_guid = gl::GetGLTextureServiceGUIDForTracing( - texture_passthrough_->service_id()); - pmd->CreateSharedGlobalAllocatorDump(service_guid); +std::unique_ptr<SharedImageRepresentationGLTexture> +SharedImageBackingGLImage::ProduceRGBEmulationGLTexture( + SharedImageManager* manager, + MemoryTypeTracker* tracker) { + if (IsPassthrough()) + return nullptr; - int importance = 2; // This client always owns the ref. - pmd->AddOwnershipEdge(client_guid, service_guid, importance); + if (!rgb_emulation_texture_) { + const GLenum target = GetGLTarget(); + gl::GLApi* api = gl::g_current_gl_context; + ScopedRestoreTexture scoped_restore(api, target); - auto* gl_image = texture_passthrough_->GetLevelImage( - texture_passthrough_->target(), /*level=*/0); - if (gl_image) - gl_image->OnMemoryDump(pmd, client_tracing_id, dump_name); + // Set to false as this code path is only used on Mac. + const bool framebuffer_attachment_angle = false; + SharedImageBackingGLCommon::MakeTextureAndSetParameters( + target, 0 /* service_id */, framebuffer_attachment_angle, nullptr, + &rgb_emulation_texture_); + api->glBindTextureFn(target, rgb_emulation_texture_->service_id()); + + gles2::Texture::ImageState image_state = gles2::Texture::BOUND; + gl::GLImage* image = texture_->GetLevelImage(target, 0, &image_state); + DCHECK_EQ(image, image_.get()); + + DCHECK(image->ShouldBindOrCopy() == gl::GLImage::BIND); + const GLenum internal_format = GL_RGB; + if (!image->BindTexImageWithInternalformat(target, internal_format)) { + LOG(ERROR) << "Failed to bind image to rgb texture."; + rgb_emulation_texture_->RemoveLightweightRef(true /* have_context */); + rgb_emulation_texture_ = nullptr; + return nullptr; + } + GLenum format = + gles2::TextureManager::ExtractFormatFromStorageFormat(internal_format); + GLenum type = + gles2::TextureManager::ExtractTypeFromStorageFormat(internal_format); + + const gles2::Texture::LevelInfo* info = texture_->GetLevelInfo(target, 0); + rgb_emulation_texture_->SetLevelInfo(target, 0, internal_format, + info->width, info->height, 1, 0, + format, type, info->cleared_rect); + + rgb_emulation_texture_->SetLevelImage(target, 0, image, image_state); + rgb_emulation_texture_->SetImmutable(true, false); } - void BeginReadAccess() override {} + return std::make_unique<SharedImageRepresentationGLTextureImpl>( + manager, this, this, tracker, rgb_emulation_texture_); +} - protected: - std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> - ProduceGLTexturePassthrough(SharedImageManager* manager, - MemoryTypeTracker* tracker) override { - return std::make_unique<SharedImageRepresentationGLTexturePassthroughImpl>( - manager, this, tracker, texture_passthrough_); +void SharedImageBackingGLImage::Update( + std::unique_ptr<gfx::GpuFence> in_fence) { + if (in_fence) { + // TODO(dcastagna): Don't wait for the fence if the SharedImage is going + // to be scanned out as an HW overlay. Currently we don't know that at + // this point and we always bind the image, therefore we need to wait for + // the fence. + std::unique_ptr<gl::GLFence> egl_fence = + gl::GLFence::CreateFromGpuFence(*in_fence.get()); + egl_fence->ServerWait(); } - std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia( - SharedImageManager* manager, - MemoryTypeTracker* tracker, - scoped_refptr<SharedContextState> context_state) override { - auto result = std::make_unique<SharedImageRepresentationSkiaImpl>( - manager, this, std::move(context_state), cached_promise_texture_, - tracker, texture_passthrough_->target(), - texture_passthrough_->service_id()); - cached_promise_texture_ = result->promise_texture(); - return result; + image_bind_or_copy_needed_ = true; +} + +bool SharedImageBackingGLImage::OnGLTextureBeginAccess(GLenum mode) { + if (mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) + return true; + return BindOrCopyImageIfNeeded(); +} + +bool SharedImageBackingGLImage::OnGLTexturePassthroughBeginAccess(GLenum mode) { + if (mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) + return true; + return BindOrCopyImageIfNeeded(); +} + +bool SharedImageBackingGLImage::OnSkiaBeginReadAccess() { + return BindOrCopyImageIfNeeded(); +} + +bool SharedImageBackingGLImage::OnSkiaBeginWriteAccess() { + return BindOrCopyImageIfNeeded(); +} + +bool SharedImageBackingGLImage::InitializeGLTexture() { + SharedImageBackingGLCommon::MakeTextureAndSetParameters( + gl_params_.target, 0 /* service_id */, + gl_params_.framebuffer_attachment_angle, + IsPassthrough() ? &passthrough_texture_ : nullptr, + IsPassthrough() ? nullptr : &texture_); + + // Set the GLImage to be unbound from the texture. + if (IsPassthrough()) { + passthrough_texture_->SetEstimatedSize(EstimatedSize(format(), size())); + passthrough_texture_->SetLevelImage(gl_params_.target, 0, image_.get()); + passthrough_texture_->set_is_bind_pending(true); + } else { + texture_->SetLevelInfo( + gl_params_.target, 0, gl_params_.internal_format, size().width(), + size().height(), 1, 0, gl_params_.format, gl_params_.type, + gl_params_.is_cleared ? gfx::Rect(size()) : gfx::Rect()); + texture_->SetLevelImage(gl_params_.target, 0, image_.get(), + gles2::Texture::UNBOUND); + texture_->SetImmutable(true, false /* has_immutable_storage */); } - std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( - SharedImageManager* manager, - MemoryTypeTracker* tracker, - WGPUDevice device) override { - if (!factory()) { - DLOG(ERROR) << "No SharedImageFactory to create a dawn representation."; - return nullptr; + // Historically we have bound GLImages at initialization, rather than waiting + // until the bound representation is actually needed. + if (image_->ShouldBindOrCopy() == gl::GLImage::BIND) + return BindOrCopyImageIfNeeded(); + return true; +} + +bool SharedImageBackingGLImage::BindOrCopyImageIfNeeded() { + if (!image_bind_or_copy_needed_) + return true; + + const GLenum target = GetGLTarget(); + gl::GLApi* api = gl::g_current_gl_context; + ScopedRestoreTexture scoped_restore(api, target); + api->glBindTextureFn(target, GetGLServiceId()); + + // Un-bind the GLImage from the texture if it is currently bound. + if (image_->ShouldBindOrCopy() == gl::GLImage::BIND) { + bool is_bound = false; + if (IsPassthrough()) { + is_bound = !passthrough_texture_->is_bind_pending(); + } else { + gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND; + texture_->GetLevelImage(target, 0, &old_state); + is_bound = old_state == gles2::Texture::BOUND; } + if (is_bound) + image_->ReleaseTexImage(target); + } - return ProduceDawnCommon(factory(), manager, tracker, device, this, true); + // Bind or copy the GLImage to the texture. + gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND; + if (image_->ShouldBindOrCopy() == gl::GLImage::BIND) { + if (gl_params_.is_rgb_emulation) { + if (!image_->BindTexImageWithInternalformat(target, GL_RGB)) { + LOG(ERROR) << "Failed to bind GLImage to RGB target"; + return false; + } + } else { + if (!image_->BindTexImage(target)) { + LOG(ERROR) << "Failed to bind GLImage to target"; + return false; + } + } + new_state = gles2::Texture::BOUND; + } else { + ScopedResetAndRestoreUnpackState scoped_unpack_state(api, + gl_unpack_attribs_, + /*upload=*/true); + if (!image_->CopyTexImage(target)) { + LOG(ERROR) << "Failed to copy GLImage to target"; + return false; + } + new_state = gles2::Texture::COPIED; + } + if (IsPassthrough()) { + passthrough_texture_->set_is_bind_pending(new_state == + gles2::Texture::UNBOUND); + } else { + texture_->SetLevelImage(target, 0, image_.get(), new_state); } - private: - scoped_refptr<gles2::TexturePassthrough> texture_passthrough_; - sk_sp<SkPromiseImageTexture> cached_promise_texture_; -}; + image_bind_or_copy_needed_ = false; + return true; +} + +void SharedImageBackingGLImage::InitializePixels(GLenum format, + GLenum type, + const uint8_t* data) { + DCHECK_EQ(image_->ShouldBindOrCopy(), gl::GLImage::BIND); + BindOrCopyImageIfNeeded(); + + const GLenum target = GetGLTarget(); + gl::GLApi* api = gl::g_current_gl_context; + ScopedRestoreTexture scoped_restore(api, target); + api->glBindTextureFn(target, GetGLServiceId()); + ScopedResetAndRestoreUnpackState scoped_unpack_state( + api, gl_unpack_attribs_, true /* uploading_data */); + api->glTexSubImage2DFn(target, 0, 0, 0, size().width(), size().height(), + format, type, data); +} + +/////////////////////////////////////////////////////////////////////////////// +// SharedImageBackingFactoryGLTexture SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture( const GpuPreferences& gpu_preferences, @@ -1019,44 +1207,27 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage( image->SetColorSpace(color_space); viz::ResourceFormat format = viz::GetResourceFormat(buffer_format); - - gl::GLApi* api = gl::g_current_gl_context; - ScopedRestoreTexture scoped_restore(api, target); - const bool for_framebuffer_attachment = (usage & (SHARED_IMAGE_USAGE_RASTER | SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0; - GLuint service_id = MakeTextureAndSetParameters( - api, target, for_framebuffer_attachment && texture_usage_angle_); - bool is_rgb_emulation = usage & SHARED_IMAGE_USAGE_RGB_EMULATION; - - gles2::Texture::ImageState image_state = gles2::Texture::UNBOUND; - if (image->ShouldBindOrCopy() == gl::GLImage::BIND) { - bool is_bound = false; - if (is_rgb_emulation) - is_bound = image->BindTexImageWithInternalformat(target, GL_RGB); - else - is_bound = image->BindTexImage(target); - if (is_bound) { - image_state = gles2::Texture::BOUND; - } else { - LOG(ERROR) << "Failed to bind image to target."; - api->glDeleteTexturesFn(1, &service_id); - return nullptr; - } - } else if (use_passthrough_) { - image->CopyTexImage(target); - image_state = gles2::Texture::COPIED; - } + const bool is_rgb_emulation = (usage & SHARED_IMAGE_USAGE_RGB_EMULATION) != 0; - GLuint internal_format = + SharedImageBackingGLCommon::InitializeGLTextureParams params; + params.target = target; + params.internal_format = is_rgb_emulation ? GL_RGB : image->GetInternalFormat(); - GLenum gl_format = is_rgb_emulation ? GL_RGB : image->GetDataFormat(); - GLenum gl_type = image->GetDataType(); - - return MakeBacking(use_passthrough_, mailbox, target, service_id, image, - image_state, internal_format, gl_format, gl_type, nullptr, - true, false, format, size, color_space, usage, attribs); + params.format = is_rgb_emulation ? GL_RGB : image->GetDataFormat(); + params.type = image->GetDataType(); + params.is_cleared = true; + params.is_rgb_emulation = is_rgb_emulation; + params.framebuffer_attachment_angle = + for_framebuffer_attachment && texture_usage_angle_; + auto result = std::make_unique<SharedImageBackingGLImage>( + image, mailbox, format, size, color_space, usage, params, attribs, + use_passthrough_); + if (!result->InitializeGLTexture()) + return nullptr; + return std::move(result); } std::unique_ptr<SharedImageBacking> @@ -1068,11 +1239,17 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageForTest( viz::ResourceFormat format, const gfx::Size& size, uint32_t usage) { - return MakeBacking(false, mailbox, target, service_id, nullptr, - gles2::Texture::UNBOUND, viz::GLInternalFormat(format), - viz::GLDataFormat(format), viz::GLDataType(format), - nullptr, is_cleared, false, format, size, - gfx::ColorSpace(), usage, UnpackStateAttribs()); + auto result = std::make_unique<SharedImageBackingGLTexture>( + mailbox, format, size, gfx::ColorSpace(), usage, + false /* is_passthrough */); + SharedImageBackingGLCommon::InitializeGLTextureParams params; + params.target = target; + params.internal_format = viz::GLInternalFormat(format); + params.format = viz::GLDataFormat(format); + params.type = viz::GLDataType(format); + params.is_cleared = is_cleared; + result->InitializeGLTexture(service_id, params); + return std::move(result); } scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLTexture::MakeGLImage( @@ -1109,66 +1286,6 @@ bool SharedImageBackingFactoryGLTexture::CanImportGpuMemoryBuffer( } std::unique_ptr<SharedImageBacking> -SharedImageBackingFactoryGLTexture::MakeBacking( - bool passthrough, - const Mailbox& mailbox, - GLenum target, - GLuint service_id, - scoped_refptr<gl::GLImage> image, - gles2::Texture::ImageState image_state, - GLuint level_info_internal_format, - GLuint gl_format, - GLuint gl_type, - const gles2::Texture::CompatibilitySwizzle* swizzle, - bool is_cleared, - bool has_immutable_storage, - viz::ResourceFormat format, - const gfx::Size& size, - const gfx::ColorSpace& color_space, - uint32_t usage, - const UnpackStateAttribs& attribs) { - if (passthrough) { - scoped_refptr<gles2::TexturePassthrough> passthrough_texture = - base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target); - if (image) { - passthrough_texture->SetLevelImage(target, 0, image.get()); - passthrough_texture->set_is_bind_pending(image_state == - gles2::Texture::UNBOUND); - } - - // Get the texture size from ANGLE and set it on the passthrough texture. - GLint texture_memory_size = 0; - gl::GLApi* api = gl::g_current_gl_context; - api->glGetTexParameterivFn(target, GL_MEMORY_SIZE_ANGLE, - &texture_memory_size); - passthrough_texture->SetEstimatedSize(texture_memory_size); - - return std::make_unique<SharedImageBackingPassthroughGLTexture>( - mailbox, format, size, color_space, usage, - std::move(passthrough_texture)); - } else { - gles2::Texture* texture = new gles2::Texture(service_id); - texture->SetLightweightRef(); - texture->SetTarget(target, 1); - texture->sampler_state_.min_filter = GL_LINEAR; - texture->sampler_state_.mag_filter = GL_LINEAR; - texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE; - texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE; - texture->SetLevelInfo(target, 0, level_info_internal_format, size.width(), - size.height(), 1, 0, gl_format, gl_type, - is_cleared ? gfx::Rect(size) : gfx::Rect()); - if (swizzle) - texture->SetCompatibilitySwizzle(swizzle); - if (image) - texture->SetLevelImage(target, 0, image.get(), image_state); - texture->SetImmutable(true, has_immutable_storage); - - return std::make_unique<SharedImageBackingGLTexture>( - mailbox, format, size, color_space, usage, texture, attribs); - } -} - -std::unique_ptr<SharedImageBacking> SharedImageBackingFactoryGLTexture::MakeEglImageBacking( const Mailbox& mailbox, viz::ResourceFormat format, @@ -1291,23 +1408,17 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageInternal( } } - gl::GLApi* api = gl::g_current_gl_context; - ScopedRestoreTexture scoped_restore(api, target); - const bool for_framebuffer_attachment = (usage & (SHARED_IMAGE_USAGE_RASTER | SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0; - GLuint service_id = MakeTextureAndSetParameters( - api, target, for_framebuffer_attachment && texture_usage_angle_); scoped_refptr<gl::GLImage> image; + // TODO(piman): We pretend the texture was created in an ES2 context, so that // it can be used in other ES2 contexts, and so we have to pass gl_format as // the internal format in the LevelInfo. https://crbug.com/628064 GLuint level_info_internal_format = format_info.gl_format; bool is_cleared = false; - bool needs_subimage_upload = false; - bool has_immutable_storage = false; if (use_buffer) { image = image_factory_->CreateAnonymousImage( size, format_info.buffer_format, gfx::BufferUsage::SCANOUT, @@ -1322,55 +1433,79 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageInternal( surface_handle, &is_cleared); } // The allocated image should not require copy. - if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND || - !image->BindTexImage(target)) { - LOG(ERROR) << "CreateSharedImage: Failed to " - << (image ? "bind" : "create") << " image"; - api->glDeleteTexturesFn(1, &service_id); + if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND) { + LOG(ERROR) << "CreateSharedImage: Failed to create bindable image"; return nullptr; } level_info_internal_format = image->GetInternalFormat(); if (color_space.IsValid()) image->SetColorSpace(color_space); - needs_subimage_upload = !pixel_data.empty(); - } else if (format_info.supports_storage) { - api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format, - size.width(), size.height()); - has_immutable_storage = true; - needs_subimage_upload = !pixel_data.empty(); - } else if (format_info.is_compressed) { - ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs, - !pixel_data.empty()); - api->glCompressedTexImage2DFn(target, 0, format_info.image_internal_format, - size.width(), size.height(), 0, - pixel_data.size(), pixel_data.data()); - } else { - ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs, - !pixel_data.empty()); - api->glTexImage2DFn(target, 0, format_info.image_internal_format, - size.width(), size.height(), 0, - format_info.adjusted_format, format_info.gl_type, - pixel_data.data()); } - // If we are using a buffer or TexStorage API but have data to upload, do so - // now via TexSubImage2D. - if (needs_subimage_upload) { - ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs, - !pixel_data.empty()); - api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(), - format_info.adjusted_format, format_info.gl_type, - pixel_data.data()); - } + SharedImageBackingGLCommon::InitializeGLTextureParams params; + params.target = target; + params.internal_format = level_info_internal_format; + params.format = format_info.gl_format; + params.type = format_info.gl_type; + params.is_cleared = pixel_data.empty() ? is_cleared : true; + params.has_immutable_storage = !image && format_info.supports_storage; + params.framebuffer_attachment_angle = + for_framebuffer_attachment && texture_usage_angle_; + + if (image) { + DCHECK(!format_info.swizzle); + auto result = std::make_unique<SharedImageBackingGLImage>( + image, mailbox, format, size, color_space, usage, params, attribs, + use_passthrough_); + if (!result->InitializeGLTexture()) + return nullptr; + if (!pixel_data.empty()) { + result->InitializePixels(format_info.adjusted_format, format_info.gl_type, + pixel_data.data()); + } + return std::move(result); + } else { + auto result = std::make_unique<SharedImageBackingGLTexture>( + mailbox, format, size, color_space, usage, use_passthrough_); + result->InitializeGLTexture(0, params); - return MakeBacking( - use_passthrough_, mailbox, target, service_id, image, - gles2::Texture::BOUND, level_info_internal_format, format_info.gl_format, - format_info.gl_type, format_info.swizzle, - pixel_data.empty() ? is_cleared : true, has_immutable_storage, format, - size, color_space, usage, attribs); + gl::GLApi* api = gl::g_current_gl_context; + ScopedRestoreTexture scoped_restore(api, target); + api->glBindTextureFn(target, result->GetGLServiceId()); + + if (format_info.supports_storage) { + api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format, + size.width(), size.height()); + + if (!pixel_data.empty()) { + ScopedResetAndRestoreUnpackState scoped_unpack_state( + api, attribs, true /* uploading_data */); + api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(), + format_info.adjusted_format, format_info.gl_type, + pixel_data.data()); + } + } else if (format_info.is_compressed) { + ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs, + !pixel_data.empty()); + api->glCompressedTexImage2DFn( + target, 0, format_info.image_internal_format, size.width(), + size.height(), 0, pixel_data.size(), pixel_data.data()); + } else { + ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs, + !pixel_data.empty()); + api->glTexImage2DFn(target, 0, format_info.image_internal_format, + size.width(), size.height(), 0, + format_info.adjusted_format, format_info.gl_type, + pixel_data.data()); + } + result->SetCompatibilitySwizzle(format_info.swizzle); + return std::move(result); + } } +/////////////////////////////////////////////////////////////////////////////// +// SharedImageBackingFactoryGLTexture::FormatInfo + SharedImageBackingFactoryGLTexture::FormatInfo::FormatInfo() = default; SharedImageBackingFactoryGLTexture::FormatInfo::~FormatInfo() = default; diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h index 257cca42041..b73c65631d9 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h @@ -94,24 +94,6 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture gfx::BufferFormat format, SurfaceHandle surface_handle, const gfx::Size& size); - static std::unique_ptr<SharedImageBacking> MakeBacking( - bool passthrough, - const Mailbox& mailbox, - GLenum target, - GLuint service_id, - scoped_refptr<gl::GLImage> image, - gles2::Texture::ImageState image_state, - GLuint internal_format, - GLuint gl_format, - GLuint gl_type, - const gles2::Texture::CompatibilitySwizzle* swizzle, - bool is_cleared, - bool has_immutable_storage, - viz::ResourceFormat format, - const gfx::Size& size, - const gfx::ColorSpace& color_space, - uint32_t usage, - const UnpackStateAttribs& attribs); // This is meant to be used only on Android. Return nullptr for other // platforms. diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h new file mode 100644 index 00000000000..dafdfd4a359 --- /dev/null +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h @@ -0,0 +1,296 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_TEXTURE_INTERNAL_H_ +#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_TEXTURE_INTERNAL_H_ + +#include "gpu/command_buffer/service/shared_image_backing.h" +#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h" +#include "gpu/command_buffer/service/shared_image_representation.h" + +namespace gpu { + +// Representation of a SharedImageBackingGLTexture or SharedImageBackingGLImage +// as a GL Texture. +class SharedImageRepresentationGLTextureImpl + : public SharedImageRepresentationGLTexture { + public: + class Client { + public: + virtual bool OnGLTextureBeginAccess(GLenum mode) = 0; + }; + SharedImageRepresentationGLTextureImpl(SharedImageManager* manager, + SharedImageBacking* backing, + Client* client, + MemoryTypeTracker* tracker, + gles2::Texture* texture); + + private: + // SharedImageRepresentationGLTexture: + gles2::Texture* GetTexture() override; + bool BeginAccess(GLenum mode) override; + + Client* const client_ = nullptr; + gles2::Texture* texture_; +}; + +// Representation of a SharedImageBackingGLTexture or +// SharedImageBackingGLTexturePassthrough as a GL TexturePassthrough. +class SharedImageRepresentationGLTexturePassthroughImpl + : public SharedImageRepresentationGLTexturePassthrough { + public: + class Client { + public: + virtual bool OnGLTexturePassthroughBeginAccess(GLenum mode) = 0; + }; + SharedImageRepresentationGLTexturePassthroughImpl( + SharedImageManager* manager, + SharedImageBacking* backing, + Client* client, + MemoryTypeTracker* tracker, + scoped_refptr<gles2::TexturePassthrough> texture_passthrough); + ~SharedImageRepresentationGLTexturePassthroughImpl() override; + + private: + // SharedImageRepresentationGLTexturePassthrough: + const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough() + override; + bool BeginAccess(GLenum mode) override; + + Client* const client_ = nullptr; + scoped_refptr<gles2::TexturePassthrough> texture_passthrough_; +}; + +// Common helper functions for SharedImageBackingGLTexture and +// SharedImageBackingPassthroughGLImage. +class SharedImageBackingGLCommon : public SharedImageBacking { + public: + // These parameters are used to explicitly initialize a GL texture. + // TODO(https://crbug.com/1092155): The goal here is to cache these parameters + // (which are specified at initialization), so that the GL texture can be + // allocated and bound lazily. In that world, |service_id| will not be a + // parameter, but will be allocated lazily, and |image| will be handled by the + // relevant sub-class. + struct InitializeGLTextureParams { + GLenum target = 0; + GLenum internal_format = 0; + GLenum format = 0; + GLenum type = 0; + bool is_cleared = false; + bool is_rgb_emulation = false; + bool framebuffer_attachment_angle = false; + bool has_immutable_storage = false; + }; + + // Helper function to create a GL texture. + static void MakeTextureAndSetParameters( + GLenum target, + GLuint service_id, + bool framebuffer_attachment_angle, + scoped_refptr<gles2::TexturePassthrough>* passthrough_texture, + gles2::Texture** texture); +}; + +// Skia representation for both SharedImageBackingGLCommon. +class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia { + public: + class Client { + public: + virtual bool OnSkiaBeginReadAccess() = 0; + virtual bool OnSkiaBeginWriteAccess() = 0; + }; + SharedImageRepresentationSkiaImpl( + SharedImageManager* manager, + SharedImageBacking* backing, + Client* client, + scoped_refptr<SharedContextState> context_state, + sk_sp<SkPromiseImageTexture> promise_texture, + MemoryTypeTracker* tracker); + ~SharedImageRepresentationSkiaImpl() override; + + void SetBeginReadAccessCallback( + base::RepeatingClosure begin_read_access_callback); + + private: + // SharedImageRepresentationSkia: + sk_sp<SkSurface> BeginWriteAccess( + int final_msaa_count, + const SkSurfaceProps& surface_props, + std::vector<GrBackendSemaphore>* begin_semaphores, + std::vector<GrBackendSemaphore>* end_semaphores) override; + void EndWriteAccess(sk_sp<SkSurface> surface) override; + sk_sp<SkPromiseImageTexture> BeginReadAccess( + std::vector<GrBackendSemaphore>* begin_semaphores, + std::vector<GrBackendSemaphore>* end_semaphores) override; + void EndReadAccess() override; + bool SupportsMultipleConcurrentReadAccess() override; + + void CheckContext(); + + Client* const client_ = nullptr; + scoped_refptr<SharedContextState> context_state_; + sk_sp<SkPromiseImageTexture> promise_texture_; + + SkSurface* write_surface_ = nullptr; +#if DCHECK_IS_ON() + gl::GLContext* context_ = nullptr; +#endif +}; + +// Implementation of SharedImageBacking that creates a GL Texture that is not +// backed by a GLImage. +class SharedImageBackingGLTexture : public SharedImageBacking { + public: + SharedImageBackingGLTexture(const Mailbox& mailbox, + viz::ResourceFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + uint32_t usage, + bool is_passthrough); + SharedImageBackingGLTexture(const SharedImageBackingGLTexture&) = delete; + SharedImageBackingGLTexture& operator=(const SharedImageBackingGLTexture&) = + delete; + ~SharedImageBackingGLTexture() override; + + void InitializeGLTexture( + GLuint service_id, + const SharedImageBackingGLCommon::InitializeGLTextureParams& params); + void SetCompatibilitySwizzle( + const gles2::Texture::CompatibilitySwizzle* swizzle); + + GLenum GetGLTarget() const; + GLuint GetGLServiceId() const; + + private: + // SharedImageBacking: + void OnMemoryDump(const std::string& dump_name, + base::trace_event::MemoryAllocatorDump* dump, + base::trace_event::ProcessMemoryDump* pmd, + uint64_t client_tracing_id) override; + gfx::Rect ClearedRect() const final; + void SetClearedRect(const gfx::Rect& cleared_rect) final; + bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final; + std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture( + SharedImageManager* manager, + MemoryTypeTracker* tracker) final; + std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> + ProduceGLTexturePassthrough(SharedImageManager* manager, + MemoryTypeTracker* tracker) final; + std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + WGPUDevice device) final; + std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + scoped_refptr<SharedContextState> context_state) override; + void Update(std::unique_ptr<gfx::GpuFence> in_fence) override; + + bool IsPassthrough() const { return is_passthrough_; } + + const bool is_passthrough_; + gles2::Texture* texture_ = nullptr; + scoped_refptr<gles2::TexturePassthrough> passthrough_texture_; + + sk_sp<SkPromiseImageTexture> cached_promise_texture_; +}; + +// Implementation of SharedImageBacking that creates a GL Texture that is backed +// by a GLImage and stores it as a gles2::Texture. Can be used with the legacy +// mailbox implementation. +class SharedImageBackingGLImage + : public SharedImageBacking, + public SharedImageRepresentationGLTextureImpl::Client, + public SharedImageRepresentationGLTexturePassthroughImpl::Client, + public SharedImageRepresentationSkiaImpl::Client { + public: + SharedImageBackingGLImage( + scoped_refptr<gl::GLImage> image, + const Mailbox& mailbox, + viz::ResourceFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + uint32_t usage, + const SharedImageBackingGLCommon::InitializeGLTextureParams& params, + const SharedImageBackingFactoryGLTexture::UnpackStateAttribs& attribs, + bool is_passthrough); + SharedImageBackingGLImage(const SharedImageBackingGLImage& other) = delete; + SharedImageBackingGLImage& operator=(const SharedImageBackingGLImage& other) = + delete; + ~SharedImageBackingGLImage() override; + + bool InitializeGLTexture(); + void InitializePixels(GLenum format, GLenum type, const uint8_t* data); + + GLenum GetGLTarget() const; + GLuint GetGLServiceId() const; + + private: + // SharedImageBacking: + scoped_refptr<gfx::NativePixmap> GetNativePixmap() override; + void OnMemoryDump(const std::string& dump_name, + base::trace_event::MemoryAllocatorDump* dump, + base::trace_event::ProcessMemoryDump* pmd, + uint64_t client_tracing_id) override; + gfx::Rect ClearedRect() const final; + void SetClearedRect(const gfx::Rect& cleared_rect) final; + bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final; + std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture( + SharedImageManager* manager, + MemoryTypeTracker* tracker) final; + std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> + ProduceGLTexturePassthrough(SharedImageManager* manager, + MemoryTypeTracker* tracker) final; + std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay( + SharedImageManager* manager, + MemoryTypeTracker* tracker) final; + std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + WGPUDevice device) final; + std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + scoped_refptr<SharedContextState> context_state) override; + std::unique_ptr<SharedImageRepresentationGLTexture> + ProduceRGBEmulationGLTexture(SharedImageManager* manager, + MemoryTypeTracker* tracker) override; + void Update(std::unique_ptr<gfx::GpuFence> in_fence) override; + + // SharedImageRepresentationGLTextureImpl::Client: + bool OnGLTextureBeginAccess(GLenum mode) override; + + // SharedImageRepresentationGLTexturePassthroughImpl::Client: + bool OnGLTexturePassthroughBeginAccess(GLenum mode) override; + + // SharedImageRepresentationGLTextureImpl::Client: + bool OnSkiaBeginReadAccess() override; + bool OnSkiaBeginWriteAccess() override; + + bool IsPassthrough() const { return is_passthrough_; } + + scoped_refptr<gl::GLImage> image_; + + // If |image_bind_or_copy_needed_| is true, then either bind or copy |image_| + // to the GL texture, and un-set |image_bind_or_copy_needed_|. + bool BindOrCopyImageIfNeeded(); + bool image_bind_or_copy_needed_ = true; + + const SharedImageBackingGLCommon::InitializeGLTextureParams gl_params_; + const SharedImageBackingFactoryGLTexture::UnpackStateAttribs + gl_unpack_attribs_; + const bool is_passthrough_; + + gles2::Texture* rgb_emulation_texture_ = nullptr; + gles2::Texture* texture_ = nullptr; + scoped_refptr<gles2::TexturePassthrough> passthrough_texture_; + + sk_sp<SkPromiseImageTexture> cached_promise_texture_; + + base::WeakPtrFactory<SharedImageBackingGLImage> weak_factory_; +}; + +} // namespace gpu + +#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_TEXTURE_INTERNAL_H_ diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc index fb37ea94ee4..ccbe66b99c9 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc @@ -863,6 +863,22 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest, EXPECT_TRUE(stub_image->bound()); int update_counter = stub_image->update_counter(); ref->Update(nullptr); + EXPECT_EQ(stub_image->update_counter(), update_counter); + EXPECT_TRUE(stub_image->bound()); + + // TODO(https://crbug.com/1092155): When we lazily bind the GLImage, this + // will be needed to trigger binding the GLImage. + { + auto skia_representation = + shared_image_representation_factory_->ProduceSkia(mailbox, + context_state_); + std::vector<GrBackendSemaphore> begin_semaphores; + std::vector<GrBackendSemaphore> end_semaphores; + std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess> + scoped_read_access; + skia_representation->BeginScopedReadAccess(&begin_semaphores, + &end_semaphores); + } EXPECT_TRUE(stub_image->bound()); EXPECT_GT(stub_image->update_counter(), update_counter); } diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h index 4d7006bc582..d0335b8a227 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h @@ -9,9 +9,12 @@ #include "base/macros.h" #include "components/viz/common/resources/resource_format.h" +#include "gpu/command_buffer/service/shared_image_backing.h" #include "gpu/command_buffer/service/shared_image_backing_factory.h" +#include "gpu/command_buffer/service/shared_image_representation.h" #include "gpu/gpu_gles2_export.h" #include "ui/gl/gl_bindings.h" +#include "ui/gl/gl_image.h" namespace gfx { class Size; @@ -22,7 +25,6 @@ namespace gpu { class GpuDriverBugWorkarounds; struct GpuFeatureInfo; struct Mailbox; -class SharedImageBacking; // Implementation of SharedImageBackingFactory that produce IOSurface backed // SharedImages. This is meant to be used on macOS only. @@ -34,6 +36,24 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryIOSurface bool use_gl); ~SharedImageBackingFactoryIOSurface() override; + // Helper functions used used by SharedImageRepresentationGLImage to do + // IOSurface-specific sharing. + static sk_sp<SkPromiseImageTexture> ProduceSkiaPromiseTextureMetal( + SharedImageBacking* backing, + scoped_refptr<SharedContextState> context_state, + scoped_refptr<gl::GLImage> image); + static std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay( + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + scoped_refptr<gl::GLImage> image); + static std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + WGPUDevice device, + scoped_refptr<gl::GLImage> image); + // SharedImageBackingFactory implementation. std::unique_ptr<SharedImageBacking> CreateSharedImage( const Mailbox& mailbox, diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm index f2e9f952c4b..7e01171b0c1 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm @@ -110,28 +110,11 @@ base::scoped_nsprotocol<id<MTLTexture>> API_AVAILABLE(macos(10.11)) viz::ResourceFormat format) { TRACE_EVENT0("gpu", "SharedImageBackingFactoryIOSurface::CreateMetalTexture"); base::scoped_nsprotocol<id<MTLTexture>> mtl_texture; - MTLPixelFormat mtl_pixel_format; - switch (format) { - case viz::RED_8: - case viz::ALPHA_8: - case viz::LUMINANCE_8: - mtl_pixel_format = MTLPixelFormatR8Unorm; - break; - case viz::RG_88: - mtl_pixel_format = MTLPixelFormatRG8Unorm; - break; - case viz::RGBA_8888: - mtl_pixel_format = MTLPixelFormatRGBA8Unorm; - break; - case viz::BGRA_8888: - mtl_pixel_format = MTLPixelFormatBGRA8Unorm; - break; - default: - // TODO(https://crbug.com/952063): Add support for all formats supported - // by GLImageIOSurface. - DLOG(ERROR) << "Resource format not yet supported in Metal."; - return mtl_texture; - } + MTLPixelFormat mtl_pixel_format = + static_cast<MTLPixelFormat>(viz::ToMTLPixelFormat(format)); + if (mtl_pixel_format == MTLPixelFormatInvalid) + return mtl_texture; + base::scoped_nsobject<MTLTextureDescriptor> mtl_tex_desc( [MTLTextureDescriptor new]); [mtl_tex_desc setTextureType:MTLTextureType2D]; @@ -186,6 +169,32 @@ class SharedImageRepresentationGLTextureIOSurface DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLTextureIOSurface); }; +class SharedImageRepresentationGLTexturePassthroughIOSurface + : public SharedImageRepresentationGLTexturePassthrough { + public: + SharedImageRepresentationGLTexturePassthroughIOSurface( + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + scoped_refptr<gles2::TexturePassthrough> texture_passthrough) + : SharedImageRepresentationGLTexturePassthrough(manager, + backing, + tracker), + texture_passthrough_(texture_passthrough) {} + + const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough() + override { + return texture_passthrough_; + } + bool BeginAccess(GLenum mode) override { return true; } + void EndAccess() override { FlushIOSurfaceGLOperations(); } + + private: + scoped_refptr<gles2::TexturePassthrough> texture_passthrough_; + DISALLOW_COPY_AND_ASSIGN( + SharedImageRepresentationGLTexturePassthroughIOSurface); +}; + // Representation of a SharedImageBackingIOSurface as a Skia Texture. class SharedImageRepresentationSkiaIOSurface : public SharedImageRepresentationSkia { @@ -217,7 +226,7 @@ class SharedImageRepresentationSkiaIOSurface SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType( /*gpu_compositing=*/true, format()); - return SkSurface::MakeFromBackendTextureAsRenderTarget( + return SkSurface::MakeFromBackendTexture( context_state_->gr_context(), promise_texture_->backendTexture(), kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, backing()->color_space().ToSkColorSpace(), &surface_props); @@ -250,6 +259,26 @@ class SharedImageRepresentationSkiaIOSurface gles2::Texture* const gles2_texture_; }; +class SharedImageRepresentationOverlayIOSurface + : public SharedImageRepresentationOverlay { + public: + SharedImageRepresentationOverlayIOSurface(SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + scoped_refptr<gl::GLImage> gl_image) + : SharedImageRepresentationOverlay(manager, backing, tracker), + gl_image_(gl_image) {} + + ~SharedImageRepresentationOverlayIOSurface() override { EndReadAccess(); } + + private: + bool BeginReadAccess() override { return true; } + void EndReadAccess() override {} + gl::GLImage* GetGLImage() override { return gl_image_.get(); } + + scoped_refptr<gl::GLImage> gl_image_; +}; + // Representation of a SharedImageBackingIOSurface as a Dawn Texture. #if BUILDFLAG(USE_DAWN) class SharedImageRepresentationDawnIOSurface @@ -414,7 +443,7 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking { bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final { DCHECK(io_surface_); - legacy_texture_ = GenGLTexture(); + GenGLTexture(&legacy_texture_, nullptr); if (!legacy_texture_) { return false; } @@ -432,15 +461,28 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking { std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture( SharedImageManager* manager, MemoryTypeTracker* tracker) final { - gles2::Texture* texture = GenGLTexture(); - if (!texture) { + gles2::Texture* texture = nullptr; + GenGLTexture(&texture, nullptr); + if (!texture) return nullptr; - } - return std::make_unique<SharedImageRepresentationGLTextureIOSurface>( manager, this, tracker, texture); } + std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> + ProduceGLTexturePassthrough(SharedImageManager* manager, + MemoryTypeTracker* tracker) override { + TRACE_EVENT0("gpu", + "SharedImageBackingFactoryIOSurface::GenGLTexturePassthrough"); + scoped_refptr<gles2::TexturePassthrough> texture_passthrough; + GenGLTexture(nullptr, &texture_passthrough); + if (!texture_passthrough) + return nullptr; + return std::make_unique< + SharedImageRepresentationGLTexturePassthroughIOSurface>( + manager, this, tracker, texture_passthrough); + } + std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia( SharedImageManager* manager, MemoryTypeTracker* tracker, @@ -448,7 +490,7 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking { gles2::Texture* gles2_texture = nullptr; GrBackendTexture gr_backend_texture; if (context_state->GrContextIsGL()) { - gles2_texture = GenGLTexture(); + GenGLTexture(&gles2_texture, nullptr); if (!gles2_texture) return nullptr; GetGrBackendTexture( @@ -475,6 +517,15 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking { gles2_texture); } + std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay( + SharedImageManager* manager, + MemoryTypeTracker* tracker) override { + if (!EnsureGLImage()) + return nullptr; + return SharedImageBackingFactoryIOSurface::ProduceOverlay( + manager, this, tracker, gl_image_); + } + std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( SharedImageManager* manager, MemoryTypeTracker* tracker, @@ -493,19 +544,35 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking { } private: - gles2::Texture* GenGLTexture() { + bool EnsureGLImage() { + if (!gl_image_) { + GLFormatInfo gl_info = GetGLFormatInfo(format()); + scoped_refptr<gl::GLImageIOSurface> gl_image( + gl::GLImageIOSurface::Create(size(), gl_info.internal_format)); + if (!gl_image->Initialize(io_surface_, gfx::GenericSharedMemoryId(), + viz::BufferFormat(format()))) { + LOG(ERROR) << "Failed to create GLImageIOSurface"; + } else { + gl_image_ = gl_image; + } + } + return !!gl_image_; + } + + void GenGLTexture( + gles2::Texture** texture, + scoped_refptr<gles2::TexturePassthrough>* texture_passthrough) { TRACE_EVENT0("gpu", "SharedImageBackingFactoryIOSurface::GenGLTexture"); GLFormatInfo gl_info = GetGLFormatInfo(format()); DCHECK(gl_info.supported); + if (texture) + *texture = nullptr; + if (texture_passthrough) + *texture_passthrough = nullptr; // Wrap the IOSurface in a GLImageIOSurface - scoped_refptr<gl::GLImageIOSurface> image( - gl::GLImageIOSurface::Create(size(), gl_info.internal_format)); - if (!image->Initialize(io_surface_, gfx::GenericSharedMemoryId(), - viz::BufferFormat(format()))) { - LOG(ERROR) << "Failed to create GLImageIOSurface"; - return nullptr; - } + if (!EnsureGLImage()) + return; gl::GLApi* api = gl::g_current_gl_context; @@ -527,37 +594,48 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking { GL_CLAMP_TO_EDGE); // Bind the GLImageIOSurface to our texture - if (!image->BindTexImage(GL_TEXTURE_RECTANGLE)) { + if (!gl_image_->BindTexImage(GL_TEXTURE_RECTANGLE)) { LOG(ERROR) << "Failed to bind GLImageIOSurface"; api->glBindTextureFn(GL_TEXTURE_RECTANGLE, old_texture_binding); api->glDeleteTexturesFn(1, &service_id); - return nullptr; + return; } // If the backing is already cleared, no need to clear it again. gfx::Rect cleared_rect = ClearedRect(); // Manually create a gles2::Texture wrapping our driver texture. - gles2::Texture* texture = new gles2::Texture(service_id); - texture->SetLightweightRef(); - texture->SetTarget(GL_TEXTURE_RECTANGLE, 1); - texture->sampler_state_.min_filter = GL_LINEAR; - texture->sampler_state_.mag_filter = GL_LINEAR; - texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE; - texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE; - texture->SetLevelInfo(GL_TEXTURE_RECTANGLE, 0, gl_info.internal_format, - size().width(), size().height(), 1, 0, gl_info.format, - gl_info.type, cleared_rect); - texture->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, image.get(), - gles2::Texture::BOUND); - texture->SetImmutable(true, false); - - DCHECK_EQ(image->GetInternalFormat(), gl_info.internal_format); + if (texture) { + *texture = new gles2::Texture(service_id); + (*texture)->SetLightweightRef(); + (*texture)->SetTarget(GL_TEXTURE_RECTANGLE, 1); + (*texture)->set_min_filter(GL_LINEAR); + (*texture)->set_mag_filter(GL_LINEAR); + (*texture)->set_wrap_t(GL_CLAMP_TO_EDGE); + (*texture)->set_wrap_s(GL_CLAMP_TO_EDGE); + (*texture)->SetLevelInfo(GL_TEXTURE_RECTANGLE, 0, gl_info.internal_format, + size().width(), size().height(), 1, 0, + gl_info.format, gl_info.type, cleared_rect); + (*texture)->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, gl_image_.get(), + gles2::Texture::BOUND); + (*texture)->SetImmutable(true, false); + } + if (texture_passthrough) { + *texture_passthrough = scoped_refptr<gles2::TexturePassthrough>( + new gles2::TexturePassthrough(service_id, GL_TEXTURE_RECTANGLE, + gl_info.internal_format, size().width(), + size().height(), 1, 0, gl_info.format, + gl_info.type)); + (*texture_passthrough) + ->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, gl_image_.get()); + } + + DCHECK_EQ(gl_image_->GetInternalFormat(), gl_info.internal_format); api->glBindTextureFn(GL_TEXTURE_RECTANGLE, old_texture_binding); - return texture; } + scoped_refptr<gl::GLImageIOSurface> gl_image_; base::ScopedCFTypeRef<IOSurfaceRef> io_surface_; base::Optional<WGPUTextureFormat> dawn_format_; base::scoped_nsprotocol<id<MTLTexture>> mtl_texture_; @@ -715,4 +793,73 @@ bool SharedImageBackingFactoryIOSurface::CanImportGpuMemoryBuffer( return false; } +// static +sk_sp<SkPromiseImageTexture> +SharedImageBackingFactoryIOSurface::ProduceSkiaPromiseTextureMetal( + SharedImageBacking* backing, + scoped_refptr<SharedContextState> context_state, + scoped_refptr<gl::GLImage> image) { + if (@available(macOS 10.11, *)) { + DCHECK(context_state->GrContextIsMetal()); + + base::ScopedCFTypeRef<IOSurfaceRef> io_surface = + static_cast<gl::GLImageIOSurface*>(image.get())->io_surface(); + + id<MTLDevice> mtl_device = + context_state->metal_context_provider()->GetMTLDevice(); + auto mtl_texture = CreateMetalTexture(mtl_device, io_surface.get(), + backing->size(), backing->format()); + DCHECK(mtl_texture); + + GrMtlTextureInfo info; + info.fTexture.retain(mtl_texture.get()); + auto gr_backend_texture = + GrBackendTexture(backing->size().width(), backing->size().height(), + GrMipMapped::kNo, info); + return SkPromiseImageTexture::Make(gr_backend_texture); + } + return nullptr; +} + +// static +std::unique_ptr<SharedImageRepresentationOverlay> +SharedImageBackingFactoryIOSurface::ProduceOverlay( + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + scoped_refptr<gl::GLImage> image) { + return std::make_unique<SharedImageRepresentationOverlayIOSurface>( + manager, backing, tracker, image); +} + +// static +std::unique_ptr<SharedImageRepresentationDawn> +SharedImageBackingFactoryIOSurface::ProduceDawn( + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + WGPUDevice device, + scoped_refptr<gl::GLImage> image) { +#if BUILDFLAG(USE_DAWN) + // See comments in SharedImageBackingFactoryIOSurface::CreateSharedImage + // regarding RGBA versus BGRA. + viz::ResourceFormat actual_format = backing->format(); + if (actual_format == viz::RGBA_8888) + actual_format = viz::BGRA_8888; + + base::ScopedCFTypeRef<IOSurfaceRef> io_surface = + static_cast<gl::GLImageIOSurface*>(image.get())->io_surface(); + + base::Optional<WGPUTextureFormat> wgpu_format = + viz::ToWGPUFormat(actual_format); + if (wgpu_format.value() == WGPUTextureFormat_Undefined) + return nullptr; + + return std::make_unique<SharedImageRepresentationDawnIOSurface>( + manager, backing, tracker, device, io_surface, wgpu_format.value()); +#else // BUILDFLAG(USE_DAWN) + return nullptr; +#endif // BUILDFLAG(USE_DAWN) +} + } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc index 2eb65e9ba98..36d96fb7896 100644 --- a/chromium/gpu/command_buffer/service/shared_image_factory.cc +++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc @@ -86,8 +86,7 @@ SharedImageFactory::SharedImageFactory( shared_context_state_(context_state), memory_tracker_(std::make_unique<MemoryTypeTracker>(memory_tracker)), using_vulkan_(context_state && context_state->GrContextIsVulkan()), - using_metal_(context_state && context_state->GrContextIsMetal()), - using_dawn_(context_state && context_state->GrContextIsDawn()) { + using_skia_dawn_(context_state && context_state->GrContextIsDawn()) { bool use_gl = gl::GetGLImplementation() != gl::kGLImplementationNone; if (use_gl) { gl_backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>( @@ -203,7 +202,7 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox, SharedImageBackingFactory* factory = nullptr; if (backing_factory_for_testing_) { factory = backing_factory_for_testing_; - } else if (!using_vulkan_ && !using_dawn_) { + } else if (!using_vulkan_ && !using_skia_dawn_) { allow_legacy_mailbox = true; factory = gl_backing_factory_.get(); } else { @@ -213,6 +212,8 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox, return false; auto backing = factory->CreateSharedImage(mailbox, format, size, color_space, usage, data); + if (backing) + backing->OnWriteSucceeded(); return RegisterBacking(std::move(backing), allow_legacy_mailbox); } @@ -235,6 +236,8 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox, auto backing = factory->CreateSharedImage(mailbox, client_id, std::move(handle), format, surface_handle, size, color_space, usage); + if (backing) + backing->OnWriteSucceeded(); return RegisterBacking(std::move(backing), allow_legacy_mailbox); } @@ -310,7 +313,9 @@ bool SharedImageFactory::PresentSwapChain(const Mailbox& mailbox) { #if defined(OS_FUCHSIA) bool SharedImageFactory::RegisterSysmemBufferCollection( gfx::SysmemBufferCollectionId id, - zx::channel token) { + zx::channel token, + gfx::BufferFormat format, + gfx::BufferUsage usage) { decltype(buffer_collections_)::iterator it; bool inserted; std::tie(it, inserted) = @@ -331,9 +336,9 @@ bool SharedImageFactory::RegisterSysmemBufferCollection( VkDevice device = vulkan_context_provider_->GetDeviceQueue()->GetVulkanDevice(); DCHECK(device != VK_NULL_HANDLE); - it->second = - vulkan_context_provider_->GetVulkanImplementation() - ->RegisterSysmemBufferCollection(device, id, std::move(token)); + it->second = vulkan_context_provider_->GetVulkanImplementation() + ->RegisterSysmemBufferCollection( + device, id, std::move(token), format, usage); return true; } @@ -371,6 +376,25 @@ bool SharedImageFactory::IsSharedBetweenThreads(uint32_t usage) { (usage & SHARED_IMAGE_USAGE_DISPLAY); } +bool SharedImageFactory::CanUseWrappedSkImage(uint32_t usage) const { + if (!wrapped_sk_image_factory_) + return false; + + constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER | + SHARED_IMAGE_USAGE_OOP_RASTERIZATION | + SHARED_IMAGE_USAGE_DISPLAY; + + if (using_vulkan_ || using_skia_dawn_) { + // For SkiaRenderer/Vulkan+Dawn use WrappedSkImage if the usage is only + // raster and/or display. + return (usage & kWrappedSkImageUsage) && !(usage & ~kWrappedSkImageUsage); + } else { + // For d SkiaRenderer/GL only use WrappedSkImages for OOP-R because + // CopySubTexture() doesn't use Skia. https://crbug.com/984045 + return usage == kWrappedSkImageUsage; + } +} + SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage( uint32_t usage, viz::ResourceFormat format, @@ -382,12 +406,9 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage( bool using_dawn = usage & SHARED_IMAGE_USAGE_WEBGPU; bool vulkan_usage = using_vulkan_ && (usage & SHARED_IMAGE_USAGE_DISPLAY); bool gl_usage = usage & SHARED_IMAGE_USAGE_GLES2; - bool share_between_gl_metal = - using_metal_ && (usage & SHARED_IMAGE_USAGE_OOP_RASTERIZATION); bool share_between_threads = IsSharedBetweenThreads(usage); bool share_between_gl_vulkan = gl_usage && vulkan_usage; bool using_interop_factory = share_between_gl_vulkan || using_dawn || - share_between_gl_metal || (usage & SHARED_IMAGE_USAGE_VIDEO_DECODE) || (share_between_threads && vulkan_usage); @@ -397,23 +418,25 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage( using_interop_factory |= usage & SHARED_IMAGE_USAGE_SCANOUT; #endif - // wrapped_sk_image_factory_ is only used for OOPR and supports - // a limited number of flags (e.g. no SHARED_IMAGE_USAGE_SCANOUT). - constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER | - SHARED_IMAGE_USAGE_OOP_RASTERIZATION | - SHARED_IMAGE_USAGE_DISPLAY; - bool using_wrapped_sk_image = - wrapped_sk_image_factory_ && (usage == kWrappedSkImageUsage) && - !using_interop_factory && !share_between_threads; - using_interop_factory |= vulkan_usage && !using_wrapped_sk_image; + bool using_wrapped_sk_image = !using_interop_factory && + !share_between_threads && + CanUseWrappedSkImage(usage); + if (using_wrapped_sk_image) { + if (gmb_type == gfx::EMPTY_BUFFER || + wrapped_sk_image_factory_->CanImportGpuMemoryBuffer(gmb_type)) { + *allow_legacy_mailbox = false; + return wrapped_sk_image_factory_.get(); + } + } + + using_interop_factory |= vulkan_usage; if (gmb_type != gfx::EMPTY_BUFFER) { bool interop_factory_supports_gmb = interop_backing_factory_ && interop_backing_factory_->CanImportGpuMemoryBuffer(gmb_type); - if (using_wrapped_sk_image || - (using_interop_factory && !interop_backing_factory_)) { + if (using_interop_factory && !interop_backing_factory_) { LOG(ERROR) << "Unable to screate SharedImage backing: no support for the " "requested GpuMemoryBufferType."; return nullptr; @@ -424,11 +447,8 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage( using_interop_factory |= interop_factory_supports_gmb; } - *allow_legacy_mailbox = !using_wrapped_sk_image && !using_interop_factory && - !using_vulkan_ && !share_between_threads; - - if (using_wrapped_sk_image) - return wrapped_sk_image_factory_.get(); + *allow_legacy_mailbox = + !using_interop_factory && !using_vulkan_ && !share_between_threads; if (using_interop_factory) { // TODO(crbug.com/969114): Not all shared image factory implementations diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h index 60cb6aa9346..9753cf95a2a 100644 --- a/chromium/gpu/command_buffer/service/shared_image_factory.h +++ b/chromium/gpu/command_buffer/service/shared_image_factory.h @@ -99,7 +99,9 @@ class GPU_GLES2_EXPORT SharedImageFactory { #if defined(OS_FUCHSIA) bool RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id, - zx::channel token); + zx::channel token, + gfx::BufferFormat format, + gfx::BufferUsage usage); bool ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id); #endif // defined(OS_FUCHSIA) @@ -117,20 +119,23 @@ class GPU_GLES2_EXPORT SharedImageFactory { void RegisterSharedImageBackingFactoryForTesting( SharedImageBackingFactory* factory); + MailboxManager* mailbox_manager() { return mailbox_manager_; } + private: bool IsSharedBetweenThreads(uint32_t usage); + bool CanUseWrappedSkImage(uint32_t usage) const; SharedImageBackingFactory* GetFactoryByUsage( uint32_t usage, viz::ResourceFormat format, bool* allow_legacy_mailbox, gfx::GpuMemoryBufferType gmb_type = gfx::EMPTY_BUFFER); + MailboxManager* mailbox_manager_; SharedImageManager* shared_image_manager_; SharedContextState* shared_context_state_; std::unique_ptr<MemoryTypeTracker> memory_tracker_; const bool using_vulkan_; - const bool using_metal_; - const bool using_dawn_; + const bool using_skia_dawn_; // The set of SharedImages which have been created (and are being kept alive) // by this factory. diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc index ec4004578a8..578b38c7b84 100644 --- a/chromium/gpu/command_buffer/service/shared_image_manager.cc +++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc @@ -103,17 +103,19 @@ SharedImageManager::Register(std::unique_ptr<SharedImageBacking> backing, DCHECK(backing->mailbox().IsSharedImage()); AutoLock autolock(this); - const auto lower_bound = images_.lower_bound(backing->mailbox()); - if (lower_bound != images_.end() && - (*lower_bound)->mailbox() == backing->mailbox()) { + if (images_.find(backing->mailbox()) != images_.end()) { LOG(ERROR) << "SharedImageManager::Register: Trying to register an " "already registered mailbox."; return nullptr; } + // TODO(jonross): Determine how the direct destruction of a + // SharedImageRepresentationFactoryRef leads to ref-counting issues as + // well as thread-checking failures in tests. auto factory_ref = std::make_unique<SharedImageRepresentationFactoryRef>( this, backing.get(), tracker); - images_.emplace_hint(lower_bound, std::move(backing)); + images_.emplace(std::move(backing)); + return factory_ref; } @@ -301,21 +303,32 @@ void SharedImageManager::OnRepresentationDestroyed( CALLED_ON_VALID_THREAD(); AutoLock autolock(this); - auto found = images_.find(mailbox); - if (found == images_.end()) { - LOG(ERROR) << "SharedImageManager::OnRepresentationDestroyed: Trying to " - "destroy a non existent mailbox."; - return; + + { + auto found = images_.find(mailbox); + if (found == images_.end()) { + LOG(ERROR) << "SharedImageManager::OnRepresentationDestroyed: Trying to " + "destroy a non existent mailbox."; + return; + } + + // TODO(piman): When the original (factory) representation is destroyed, we + // should treat the backing as pending destruction and prevent additional + // representations from being created. This will help avoid races due to a + // consumer getting lucky with timing due to a representation inadvertently + // extending a backing's lifetime. + (*found)->ReleaseRef(representation); } - // TODO(piman): When the original (factory) representation is destroyed, we - // should treat the backing as pending destruction and prevent additional - // representations from being created. This will help avoid races due to a - // consumer getting lucky with timing due to a representation inadvertently - // extending a backing's lifetime. - (*found)->ReleaseRef(representation); - if (!(*found)->HasAnyRefs()) - images_.erase(found); + { + // TODO(jonross): Once the pending destruction TODO above is addressed then + // this block can be removed, and the deletion can occur directly. Currently + // SharedImageManager::OnRepresentationDestroyed can be nested, so we need + // to get the iterator again. + auto found = images_.find(mailbox); + if (found != images_.end() && (!(*found)->HasAnyRefs())) + images_.erase(found); + } } void SharedImageManager::OnMemoryDump(const Mailbox& mailbox, diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.cc b/chromium/gpu/command_buffer/service/shared_image_representation.cc index 20196375765..fd2d31b5b2e 100644 --- a/chromium/gpu/command_buffer/service/shared_image_representation.cc +++ b/chromium/gpu/command_buffer/service/shared_image_representation.cc @@ -117,6 +117,8 @@ SharedImageRepresentationSkia::BeginScopedWriteAccess( if (!surface) return nullptr; + backing()->OnWriteSucceeded(); + return std::make_unique<ScopedWriteAccess>( util::PassKey<SharedImageRepresentationSkia>(), this, std::move(surface)); } @@ -157,6 +159,8 @@ SharedImageRepresentationSkia::BeginScopedReadAccess( if (!promise_image_texture) return nullptr; + backing()->OnReadSucceeded(); + return std::make_unique<ScopedReadAccess>( util::PassKey<SharedImageRepresentationSkia>(), this, std::move(promise_image_texture)); @@ -178,6 +182,8 @@ SharedImageRepresentationOverlay::BeginScopedReadAccess(bool needs_gl_image) { if (!BeginReadAccess()) return nullptr; + backing()->OnReadSucceeded(); + return std::make_unique<ScopedReadAccess>( util::PassKey<SharedImageRepresentationOverlay>(), this, needs_gl_image ? GetGLImage() : nullptr); @@ -205,6 +211,16 @@ SharedImageRepresentationDawn::BeginScopedAccess( WGPUTexture texture = BeginAccess(usage); if (!texture) return nullptr; + + constexpr auto kWriteUsage = + WGPUTextureUsage_CopyDst | WGPUTextureUsage_OutputAttachment; + + if (usage & kWriteUsage) { + backing()->OnWriteSucceeded(); + } else { + backing()->OnReadSucceeded(); + } + return std::make_unique<ScopedAccess>( util::PassKey<SharedImageRepresentationDawn>(), this, texture); } diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc index c931778902f..5645db88629 100644 --- a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc +++ b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc @@ -63,10 +63,10 @@ SharedImageRepresentationGLOzone::Create( gles2::Texture* texture = new gles2::Texture(gl_texture_service_id); texture->SetLightweightRef(); texture->SetTarget(GL_TEXTURE_2D, 1 /*max_levels=*/); - texture->sampler_state_.min_filter = GL_LINEAR; - texture->sampler_state_.mag_filter = GL_LINEAR; - texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE; - texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE; + texture->set_min_filter(GL_LINEAR); + texture->set_mag_filter(GL_LINEAR); + texture->set_wrap_t(GL_CLAMP_TO_EDGE); + texture->set_wrap_s(GL_CLAMP_TO_EDGE); GLenum gl_format = viz::GLDataFormat(format); GLenum gl_type = viz::GLDataType(format); diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc index d23d5358e2f..a5d75a204f0 100644 --- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc +++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc @@ -96,9 +96,6 @@ sk_sp<SkSurface> SharedImageRepresentationSkiaGL::BeginWriteAccess( SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType( /*gpu_compositing=*/true, format()); - // TODO(https://crbug.com/1054033): Switch back to - // MakeFromBackendTextureAsRenderTarget once we no longer use GLRendererCopier - // with surfaceless surfaces. auto surface = SkSurface::MakeFromBackendTexture( context_state_->gr_context(), promise_texture_->backendTexture(), kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, diff --git a/chromium/gpu/command_buffer/service/shared_image_video.cc b/chromium/gpu/command_buffer/service/shared_image_video.cc index 50838310832..db9b2524073 100644 --- a/chromium/gpu/command_buffer/service/shared_image_video.cc +++ b/chromium/gpu/command_buffer/service/shared_image_video.cc @@ -13,6 +13,7 @@ #include "components/viz/common/resources/resource_sizes.h" #include "gpu/command_buffer/common/shared_image_usage.h" #include "gpu/command_buffer/service/abstract_texture.h" +#include "gpu/command_buffer/service/ahardwarebuffer_utils.h" #include "gpu/command_buffer/service/mailbox_manager.h" #include "gpu/command_buffer/service/memory_tracking.h" #include "gpu/command_buffer/service/shared_context_state.h" @@ -145,9 +146,9 @@ class SharedImageRepresentationGLTextureVideo gles2::Texture* GetTexture() override { return texture_; } bool BeginAccess(GLenum mode) override { - // This representation should only be called for read. - DCHECK_EQ(mode, - static_cast<GLenum>(GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM)); + // This representation should only be called for read or overlay. + DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM || + mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM); auto* video_backing = static_cast<SharedImageVideo*>(backing()); video_backing->BeginGLReadAccess(); @@ -182,9 +183,9 @@ class SharedImageRepresentationGLTexturePassthroughVideo } bool BeginAccess(GLenum mode) override { - // This representation should only be called for read. - DCHECK_EQ(mode, - static_cast<GLenum>(GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM)); + // This representation should only be called for read or overlay. + DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM || + mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM); auto* video_backing = static_cast<SharedImageVideo*>(backing()); video_backing->BeginGLReadAccess(); @@ -266,13 +267,10 @@ class SharedImageRepresentationVideoSkiaVk if (!vulkan_image_) { DCHECK(!promise_texture_); - gfx::GpuMemoryBufferHandle gmb_handle( - scoped_hardware_buffer_->TakeBuffer()); - auto* device_queue = - context_state_->vk_context_provider()->GetDeviceQueue(); - vulkan_image_ = VulkanImage::CreateFromGpuMemoryBufferHandle( - device_queue, std::move(gmb_handle), size(), ToVkFormat(format()), - 0 /* usage */); + + vulkan_image_ = + CreateVkImageFromAhbHandle(scoped_hardware_buffer_->TakeBuffer(), + context_state_.get(), size(), format()); if (!vulkan_image_) return nullptr; diff --git a/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc new file mode 100644 index 00000000000..213099665fd --- /dev/null +++ b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc @@ -0,0 +1,127 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/command_buffer/service/shared_memory_region_wrapper.h" + +#include "base/logging.h" +#include "base/numerics/checked_math.h" +#include "base/system/sys_info.h" +#include "components/viz/common/resources/resource_format_utils.h" +#include "components/viz/common/resources/resource_sizes.h" +#include "ui/gfx/gpu_memory_buffer.h" + +namespace gpu { +namespace { + +// Validate that |stride| will work for pixels with |size| and |format|. +bool ValidateStride(const gfx::Size size, + viz::ResourceFormat format, + int32_t stride) { + if (!base::IsValueInRangeForNumericType<size_t>(stride)) + return false; + + int32_t min_width_in_bytes = 0; + if (!viz::ResourceSizes::MaybeWidthInBytes(size.width(), format, + &min_width_in_bytes)) { + return false; + } + + if (stride < min_width_in_bytes) + return false; + + // Check that stride is a multiple of pixel byte size. + int bits_per_pixel = viz::BitsPerPixel(format); + switch (bits_per_pixel) { + case 64: + case 32: + case 16: + if (stride % (bits_per_pixel / 8) != 0) + return false; + break; + case 8: + case 4: + break; + default: + // YVU420 and YUV_420_BIPLANAR format aren't supported. + NOTREACHED(); + return false; + } + + return true; +} + +} // namespace + +SharedMemoryRegionWrapper::SharedMemoryRegionWrapper() = default; +SharedMemoryRegionWrapper::SharedMemoryRegionWrapper( + SharedMemoryRegionWrapper&& other) = default; +SharedMemoryRegionWrapper& SharedMemoryRegionWrapper::operator=( + SharedMemoryRegionWrapper&& other) = default; +SharedMemoryRegionWrapper::~SharedMemoryRegionWrapper() = default; + +bool SharedMemoryRegionWrapper::Initialize( + const gfx::GpuMemoryBufferHandle& handle, + const gfx::Size& size, + viz::ResourceFormat format) { + DCHECK(!mapping_.IsValid()); + + if (!handle.region.IsValid()) { + DLOG(ERROR) << "Invalid GMB shared memory region."; + return false; + } + + if (!ValidateStride(size, format, handle.stride)) { + DLOG(ERROR) << "Invalid GMB stride."; + return false; + } + + // Minimize the amount of address space we use but make sure offset is a + // multiple of page size as required by MapAt(). + size_t allocation_granularity = base::SysInfo::VMAllocationGranularity(); + size_t memory_offset = handle.offset % allocation_granularity; + size_t map_offset = + allocation_granularity * (handle.offset / allocation_granularity); + + base::CheckedNumeric<size_t> checked_size = handle.stride; + checked_size *= size.height(); + checked_size += memory_offset; + if (!checked_size.IsValid()) { + DLOG(ERROR) << "Invalid GMB size."; + return false; + } + + mapping_ = handle.region.MapAt(static_cast<off_t>(map_offset), + checked_size.ValueOrDie()); + + if (!mapping_.IsValid()) { + DLOG(ERROR) << "Failed to map shared memory."; + return false; + } + + offset_ = memory_offset; + stride_ = handle.stride; + + return true; +} + +bool SharedMemoryRegionWrapper::IsValid() const { + return mapping_.IsValid(); +} + +uint8_t* SharedMemoryRegionWrapper::GetMemory() const { + DCHECK(IsValid()); + return mapping_.GetMemoryAs<uint8_t>() + offset_; +} + +base::span<const uint8_t> SharedMemoryRegionWrapper::GetMemoryAsSpan() const { + DCHECK(IsValid()); + return mapping_.GetMemoryAsSpan<const uint8_t>().subspan(offset_); +} + +size_t SharedMemoryRegionWrapper::GetStride() const { + DCHECK(IsValid()); + return stride_; +} + +} // namespace gpu diff --git a/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h new file mode 100644 index 00000000000..280a09b840c --- /dev/null +++ b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h @@ -0,0 +1,48 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_MEMORY_REGION_WRAPPER_H_ +#define GPU_COMMAND_BUFFER_SERVICE_SHARED_MEMORY_REGION_WRAPPER_H_ + +#include "base/containers/span.h" +#include "base/memory/shared_memory_mapping.h" +#include "components/viz/common/resources/resource_format.h" +#include "ui/gfx/geometry/size.h" + +namespace gfx { +struct GpuMemoryBufferHandle; +} + +namespace gpu { + +// Wrapper for shared memory region from a GpuMemoryBuffer with type +// SHARED_MEMORY_BUFFER. +class SharedMemoryRegionWrapper { + public: + SharedMemoryRegionWrapper(); + SharedMemoryRegionWrapper(SharedMemoryRegionWrapper&& other); + SharedMemoryRegionWrapper& operator=(SharedMemoryRegionWrapper&& other); + ~SharedMemoryRegionWrapper(); + + // Validates that size, stride and format parameters make sense and maps + // memory for shared memory owned by |handle|. Shared memory stays mapped + // until destruction. + bool Initialize(const gfx::GpuMemoryBufferHandle& handle, + const gfx::Size& size, + viz::ResourceFormat format); + + bool IsValid() const; + uint8_t* GetMemory() const; + base::span<const uint8_t> GetMemoryAsSpan() const; + size_t GetStride() const; + + private: + base::WritableSharedMemoryMapping mapping_; + size_t offset_ = 0; + size_t stride_ = 0; +}; + +} // namespace gpu + +#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_MEMORY_REGION_WRAPPER_H_ diff --git a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc index 7ee33fbf629..a99a5c4279f 100644 --- a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc +++ b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc @@ -72,16 +72,6 @@ void SurfaceTextureGLOwner::EnsureTexImageBound() { NOTREACHED(); } -void SurfaceTextureGLOwner::GetTransformMatrix(float mtx[]) { - DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); - // If we don't have a SurfaceTexture, then the matrix doesn't matter. We - // still initialize it for good measure. - if (surface_texture_) - surface_texture_->GetTransformMatrix(mtx); - else - memset(mtx, 0, sizeof(mtx[0]) * 16); -} - void SurfaceTextureGLOwner::ReleaseBackBuffers() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); if (surface_texture_) @@ -104,12 +94,7 @@ SurfaceTextureGLOwner::GetAHardwareBuffer() { return nullptr; } -gfx::Rect SurfaceTextureGLOwner::GetCropRect() { - NOTREACHED() << "Don't use GetCropRect with SurfaceTextureGLOwner"; - return gfx::Rect(); -} - -void SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect( +bool SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect( gfx::Size rotated_visible_size, gfx::Size* coded_size, gfx::Rect* visible_rect) { @@ -119,7 +104,7 @@ void SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect( if (!surface_texture_) { *visible_rect = gfx::Rect(); *coded_size = gfx::Size(); - return; + return false; } float mtx[16]; @@ -154,6 +139,8 @@ void SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect( base::debug::DumpWithoutCrashing(); } + + return true; } // static diff --git a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h index 80d23de9035..d1ecf45dab2 100644 --- a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h +++ b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h @@ -33,12 +33,10 @@ class GPU_GLES2_EXPORT SurfaceTextureGLOwner : public TextureOwner { gl::ScopedJavaSurface CreateJavaSurface() const override; void UpdateTexImage() override; void EnsureTexImageBound() override; - void GetTransformMatrix(float mtx[16]) override; void ReleaseBackBuffers() override; std::unique_ptr<base::android::ScopedHardwareBufferFenceSync> GetAHardwareBuffer() override; - gfx::Rect GetCropRect() override; - void GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size, + bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size, gfx::Size* coded_size, gfx::Rect* visible_rect) override; diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.h b/chromium/gpu/command_buffer/service/sync_point_manager.h index c6fe88dc0be..496840b4448 100644 --- a/chromium/gpu/command_buffer/service/sync_point_manager.h +++ b/chromium/gpu/command_buffer/service/sync_point_manager.h @@ -15,7 +15,7 @@ #include "base/atomic_sequence_num.h" #include "base/callback.h" -#include "base/logging.h" +#include "base/check.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/synchronization/condition_variable.h" diff --git a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc index d07cfd627d2..290bf973958 100644 --- a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc +++ b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc @@ -146,10 +146,10 @@ TestSharedImageBacking::TestSharedImageBacking( texture_ = new gles2::Texture(service_id_); texture_->SetLightweightRef(); texture_->SetTarget(GL_TEXTURE_2D, 1); - texture_->sampler_state_.min_filter = GL_LINEAR; - texture_->sampler_state_.mag_filter = GL_LINEAR; - texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE; - texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE; + texture_->set_min_filter(GL_LINEAR); + texture_->set_mag_filter(GL_LINEAR); + texture_->set_wrap_t(GL_CLAMP_TO_EDGE); + texture_->set_wrap_s(GL_CLAMP_TO_EDGE); texture_->SetLevelInfo(GL_TEXTURE_2D, 0, GLInternalFormat(format), size.width(), size.height(), 1, 0, GLDataFormat(format), GLDataType(format), gfx::Rect()); diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h index 0fece4ee111..56ffa63b427 100644 --- a/chromium/gpu/command_buffer/service/texture_manager.h +++ b/chromium/gpu/command_buffer/service/texture_manager.h @@ -35,24 +35,7 @@ class ProgressReporter; namespace gpu { class DecoderContext; -class ExternalVkImageBacking; -class ExternalVkImageGlRepresentation; class ServiceDiscardableManager; -class SharedImageBackingGLTexture; -class SharedImageBackingFactoryGLTexture; -class SharedImageBackingAHB; -class SharedImageBackingEglImage; -class SharedImageRepresentationGLTexture; -class SharedImageRepresentationEglImageGLTexture; -class SharedImageRepresentationGLTextureAHB; -class SharedImageRepresentationSkiaGLAHB; -class SharedImageBackingIOSurface; -class SharedImageRepresentationGLTextureIOSurface; -class SharedImageRepresentationSkiaIOSurface; -class SharedImageRepresentationGLOzone; -class SharedImageVideo; -class StreamTexture; -class TestSharedImageBacking; namespace gles2 { class GLStreamTextureImage; @@ -187,6 +170,28 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase { GLenum alpha; }; + struct LevelInfo { + LevelInfo(); + LevelInfo(const LevelInfo& rhs); + ~LevelInfo(); + + gfx::Rect cleared_rect; + GLenum target = 0; + GLint level = -1; + GLenum internal_format = 0; + GLsizei width = 0; + GLsizei height = 0; + GLsizei depth = 0; + GLint border = 0; + GLenum format = 0; + GLenum type = 0; + scoped_refptr<gl::GLImage> image; + scoped_refptr<GLStreamTextureImage> stream_texture_image; + ImageState image_state = UNBOUND; + uint32_t estimated_size = 0; + bool internal_workaround = false; + }; + explicit Texture(GLuint service_id); // TextureBase implementation: @@ -198,22 +203,36 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase { return sampler_state_; } + void set_min_filter(GLenum min_filter) { + sampler_state_.min_filter = min_filter; + } + GLenum min_filter() const { return sampler_state_.min_filter; } + void set_mag_filter(GLenum mag_filter) { + sampler_state_.mag_filter = mag_filter; + } + GLenum mag_filter() const { return sampler_state_.mag_filter; } + void set_wrap_r(GLenum wrap_r) { sampler_state_.wrap_r = wrap_r; } + GLenum wrap_r() const { return sampler_state_.wrap_r; } + void set_wrap_s(GLenum wrap_s) { sampler_state_.wrap_s = wrap_s; } + GLenum wrap_s() const { return sampler_state_.wrap_s; } + void set_wrap_t(GLenum wrap_t) { sampler_state_.wrap_t = wrap_t; } + GLenum wrap_t() const { return sampler_state_.wrap_t; } @@ -429,26 +448,43 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase { // Returns GL_NONE on error. GLenum GetInternalFormatOfBaseLevel() const; + void SetLightweightRef(); + + void RemoveLightweightRef(bool have_context); + + // Set the info for a particular level. + void SetLevelInfo(GLenum target, + GLint level, + GLenum internal_format, + GLsizei width, + GLsizei height, + GLsizei depth, + GLint border, + GLenum format, + GLenum type, + const gfx::Rect& cleared_rect); + + // Returns the LevelInfo for |target| and |level| if it's set, else nullptr. + const LevelInfo* GetLevelInfo(GLint target, GLint level) const; + + // Sets the Texture's target + // Parameters: + // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP or + // GL_TEXTURE_EXTERNAL_OES or GL_TEXTURE_RECTANGLE_ARB + // GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3) + // max_levels: The maximum levels this type of target can have. + void SetTarget(GLenum target, GLint max_levels); + + void SetCompatibilitySwizzle(const CompatibilitySwizzle* swizzle); + + bool NeedsMips() const { + return sampler_state_.min_filter != GL_NEAREST && + sampler_state_.min_filter != GL_LINEAR; + } + private: friend class MailboxManagerSync; friend class MailboxManagerTest; - friend class gpu::ExternalVkImageBacking; - friend class gpu::ExternalVkImageGlRepresentation; - friend class gpu::SharedImageVideo; - friend class gpu::SharedImageBackingGLTexture; - friend class gpu::SharedImageBackingFactoryGLTexture; - friend class gpu::SharedImageBackingAHB; - friend class gpu::SharedImageBackingEglImage; - friend class gpu::SharedImageRepresentationGLTextureAHB; - friend class gpu::SharedImageRepresentationEglImageGLTexture; - friend class gpu::SharedImageRepresentationSkiaGLAHB; - friend class gpu::SharedImageBackingIOSurface; - friend class gpu::SharedImageRepresentationGLTextureIOSurface; - friend class gpu::SharedImageRepresentationSkiaIOSurface; - friend class gpu::SharedImageRepresentationGLOzone; - friend class gpu::StreamTexture; - friend class gpu::TestSharedImageBacking; - friend class AbstractTextureImplOnSharedContext; friend class TextureDefinition; friend class TextureManager; friend class TextureRef; @@ -458,8 +494,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase { ~Texture() override; void AddTextureRef(TextureRef* ref); void RemoveTextureRef(TextureRef* ref, bool have_context); - void SetLightweightRef(); - void RemoveLightweightRef(bool have_context); void MaybeDeleteThis(bool have_context); // Condition on which this texture is renderable. Can be ONLY_IF_NPOT if it @@ -474,28 +508,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase { CAN_RENDER_NEEDS_VALIDATION, }; - struct LevelInfo { - LevelInfo(); - LevelInfo(const LevelInfo& rhs); - ~LevelInfo(); - - gfx::Rect cleared_rect; - GLenum target; - GLint level; - GLenum internal_format; - GLsizei width; - GLsizei height; - GLsizei depth; - GLint border; - GLenum format; - GLenum type; - scoped_refptr<gl::GLImage> image; - scoped_refptr<GLStreamTextureImage> stream_texture_image; - ImageState image_state; - uint32_t estimated_size; - bool internal_workaround; - }; - struct FaceInfo { FaceInfo(); FaceInfo(const FaceInfo& other); @@ -514,23 +526,9 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase { GLStreamTextureImage* stream_texture_image, ImageState state); - // Returns the LevelInfo for |target| and |level| if it's set, else NULL. - const LevelInfo* GetLevelInfo(GLint target, GLint level) const; // Returns NULL if the base level is not defined. const LevelInfo* GetBaseLevelInfo() const; - // Set the info for a particular level. - void SetLevelInfo(GLenum target, - GLint level, - GLenum internal_format, - GLsizei width, - GLsizei height, - GLsizei depth, - GLint border, - GLenum format, - GLenum type, - const gfx::Rect& cleared_rect); - // Causes us to report |service_id| as our service id, but does not delete // it when we are destroyed. Will rebind any OES_EXTERNAL texture units to // our new service id in all contexts. If |service_id| is zero, then we @@ -573,11 +571,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase { // Makes each of the mip levels as though they were generated. void MarkMipmapsGenerated(); - bool NeedsMips() const { - return sampler_state_.min_filter != GL_NEAREST && - sampler_state_.min_filter != GL_LINEAR; - } - // True if this texture meets all the GLES2 criteria for rendering. // See section 3.8.2 of the GLES2 spec. bool CanRender(const FeatureInfo* feature_info) const; @@ -618,14 +611,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase { GLenum type, bool immutable); - // Sets the Texture's target - // Parameters: - // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP or - // GL_TEXTURE_EXTERNAL_OES or GL_TEXTURE_RECTANGLE_ARB - // GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3) - // max_levels: The maximum levels this type of target can have. - void SetTarget(GLenum target, GLint max_levels); - // Update info about this texture. void Update(); @@ -682,7 +667,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase { GLuint owned_service_id() const { return owned_service_id_; } GLenum GetCompatibilitySwizzleForChannel(GLenum channel); - void SetCompatibilitySwizzle(const CompatibilitySwizzle* swizzle); // Info about each face and level of texture. std::vector<FaceInfo> face_infos_; diff --git a/chromium/gpu/command_buffer/service/texture_owner.h b/chromium/gpu/command_buffer/service/texture_owner.h index 8f0fced6b6a..81bd88c1bda 100644 --- a/chromium/gpu/command_buffer/service/texture_owner.h +++ b/chromium/gpu/command_buffer/service/texture_owner.h @@ -83,7 +83,6 @@ class GPU_GLES2_EXPORT TextureOwner virtual void EnsureTexImageBound() = 0; // Transformation matrix if any associated with the texture image. - virtual void GetTransformMatrix(float mtx[16]) = 0; virtual void ReleaseBackBuffers() = 0; // Retrieves the AHardwareBuffer from the latest available image data. @@ -92,10 +91,6 @@ class GPU_GLES2_EXPORT TextureOwner virtual std::unique_ptr<base::android::ScopedHardwareBufferFenceSync> GetAHardwareBuffer() = 0; - // Provides the crop rectangle associated with the most recent image. The - // crop rectangle specifies the region of valid pixels in the image. - virtual gfx::Rect GetCropRect() = 0; - // Retrieves backing size and visible rect associated with the most recent // image. |rotated_visible_size| is the size of the visible region // post-transform in pixels and is used for SurfaceTexture case. Transform @@ -103,7 +98,8 @@ class GPU_GLES2_EXPORT TextureOwner // expect to have rotation and MediaPlayer reports rotated size. For // MediaCodec we don't expect rotation in ST so visible_size (i.e crop rect // from codec) can be used. - virtual void GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size, + // Returns whether call was successful or not. + virtual bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size, gfx::Size* coded_size, gfx::Rect* visible_rect) = 0; diff --git a/chromium/gpu/command_buffer/service/vertex_array_manager.h b/chromium/gpu/command_buffer/service/vertex_array_manager.h index 2053fb2342b..cc1f84a8c79 100644 --- a/chromium/gpu/command_buffer/service/vertex_array_manager.h +++ b/chromium/gpu/command_buffer/service/vertex_array_manager.h @@ -9,7 +9,6 @@ #include <unordered_map> -#include "base/logging.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "gpu/command_buffer/service/gl_utils.h" diff --git a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h index 044d0255992..0bdb52371b7 100644 --- a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h +++ b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h @@ -10,7 +10,7 @@ #include <list> #include <vector> -#include "base/logging.h" +#include "base/check_op.h" #include "base/memory/ref_counted.h" #include "build/build_config.h" #include "gpu/command_buffer/service/buffer_manager.h" diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc index a4c8530dfd8..514e52a4d78 100644 --- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc +++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc @@ -435,10 +435,7 @@ class WebGPUDecoderImpl final : public WebGPUDecoder { const volatile void* buffer, int num_entries, int* entries_processed) override; - base::StringPiece GetLogPrefix() override { - NOTIMPLEMENTED(); - return ""; - } + base::StringPiece GetLogPrefix() override { return "WebGPUDecoderImpl"; } void BindImage(uint32_t client_texture_id, uint32_t texture_target, gl::GLImage* image, diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc index 591d0923180..7cd628f1325 100644 --- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc +++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc @@ -17,6 +17,7 @@ #include "gpu/command_buffer/service/shared_context_state.h" #include "gpu/command_buffer/service/shared_image_backing.h" #include "gpu/command_buffer/service/shared_image_representation.h" +#include "gpu/command_buffer/service/shared_memory_region_wrapper.h" #include "gpu/command_buffer/service/skia_utils.h" #include "skia/buildflags.h" #include "third_party/skia/include/core/SkCanvas.h" @@ -25,6 +26,7 @@ #include "third_party/skia/include/core/SkSurfaceProps.h" #include "third_party/skia/include/gpu/GrBackendSurface.h" #include "third_party/skia/include/gpu/GrTypes.h" +#include "ui/gfx/buffer_format_util.h" #include "ui/gl/gl_context.h" #include "ui/gl/gl_gl_api_implementation.h" #include "ui/gl/trace_util.h" @@ -39,6 +41,13 @@ namespace raster { namespace { +SkImageInfo MakeSkImageInfo(const gfx::Size& size, viz::ResourceFormat format) { + return SkImageInfo::Make(size.width(), size.height(), + ResourceFormatToClosestSkColorType( + /*gpu_compositing=*/true, format), + kOpaque_SkAlphaType); +} + class WrappedSkImage : public ClearTrackingSharedImageBacking { public: ~WrappedSkImage() override { @@ -59,7 +68,24 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking { return false; } - void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {} + void Update(std::unique_ptr<gfx::GpuFence> in_fence) override { + if (shared_memory_wrapper_.IsValid()) { + DCHECK(!in_fence); + + if (context_state_->context_lost()) + return; + + DCHECK(context_state_->IsCurrent(nullptr)); + + SkImageInfo info = MakeSkImageInfo(size(), format()); + SkPixmap pixmap(info, shared_memory_wrapper_.GetMemory(), + shared_memory_wrapper_.GetStride()); + if (!context_state_->gr_context()->updateBackendTexture( + backend_texture_, &pixmap, /*levels=*/1, nullptr, nullptr)) { + DLOG(ERROR) << "Failed to update WrappedSkImage texture"; + } + } + } void OnMemoryDump(const std::string& dump_name, base::trace_event::MemoryAllocatorDump* dump, @@ -138,11 +164,28 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking { DCHECK(!!context_state_); } - bool Initialize(const SkImageInfo& info, base::span<const uint8_t> data) { + bool InitializeGMB(const SkImageInfo& info, + SharedMemoryRegionWrapper shm_wrapper) { + if (Initialize(info, shm_wrapper.GetMemoryAsSpan(), + shm_wrapper.GetStride())) { + shared_memory_wrapper_ = std::move(shm_wrapper); + return true; + } + return false; + } + + // |pixels| optionally contains pixel data to upload to the texture. If pixel + // data is provided and the image format is not ETC1 then |stride| is used. If + // |stride| is non-zero then it's used as the stride, otherwise + // SkImageInfo::minRowBytes() is used for the stride. For ETC1 textures pixel + // data must be provided since updating compressed textures is not supported. + bool Initialize(const SkImageInfo& info, + base::span<const uint8_t> pixels, + size_t stride) { if (context_state_->context_lost()) return false; - DCHECK(context_state_->IsCurrent(nullptr)); + DCHECK(context_state_->IsCurrent(nullptr)); context_state_->set_need_context_state_reset(true); #if BUILDFLAG(ENABLE_VULKAN) @@ -156,33 +199,30 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking { auto is_protected = GrProtected::kNo; #endif - if (!data.empty()) { + if (pixels.data()) { if (format() == viz::ResourceFormat::ETC1) { backend_texture_ = context_state_->gr_context()->createCompressedBackendTexture( size().width(), size().height(), SkImage::kETC1_CompressionType, - data.data(), data.size(), GrMipMapped::kNo, is_protected); + pixels.data(), pixels.size(), GrMipMapped::kNo, is_protected); } else { - SkBitmap bitmap; - if (!bitmap.installPixels(info, const_cast<uint8_t*>(data.data()), - info.minRowBytes())) { - return false; - } + if (!stride) + stride = info.minRowBytes(); + SkPixmap pixmap(info, pixels.data(), stride); backend_texture_ = context_state_->gr_context()->createBackendTexture( - bitmap.pixmap(), GrRenderable::kNo, is_protected); + pixmap, GrRenderable::kNo, is_protected); } if (!backend_texture_.isValid()) return false; SetCleared(); - OnWriteSucceeded(); } else { + DCHECK_NE(format(), viz::ResourceFormat::ETC1); +#if DCHECK_IS_ON() // Initializing to bright green makes it obvious if the pixels are not // properly set before they are displayed (e.g. https://crbug.com/956555). // We don't do this on release builds because there is a slight overhead. - -#if DCHECK_IS_ON() backend_texture_ = context_state_->gr_context()->createBackendTexture( size().width(), size().height(), GetSkColorType(), SkColors::kBlue, GrMipMapped::kNo, GrRenderable::kYes, is_protected); @@ -191,12 +231,12 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking { size().width(), size().height(), GetSkColorType(), GrMipMapped::kNo, GrRenderable::kYes, is_protected); #endif - } - if (!backend_texture_.isValid()) { - DLOG(ERROR) << "createBackendTexture() failed with SkColorType:" - << GetSkColorType(); - return false; + if (!backend_texture_.isValid()) { + DLOG(ERROR) << "createBackendTexture() failed with SkColorType:" + << GetSkColorType(); + return false; + } } promise_texture_ = SkPromiseImageTexture::Make(backend_texture_); @@ -236,6 +276,9 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking { sk_sp<SkPromiseImageTexture> promise_texture_; int surface_msaa_count_ = 0; + // Set for shared memory GMB. + SharedMemoryRegionWrapper shared_memory_wrapper_; + uint64_t tracing_id_ = 0; DISALLOW_COPY_AND_ASSIGN(WrappedSkImage); @@ -324,15 +367,12 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage( const gfx::ColorSpace& color_space, uint32_t usage, base::span<const uint8_t> data) { - auto info = SkImageInfo::Make(size.width(), size.height(), - ResourceFormatToClosestSkColorType( - /*gpu_compositing=*/true, format), - kOpaque_SkAlphaType); + auto info = MakeSkImageInfo(size, format); size_t estimated_size = info.computeMinByteSize(); std::unique_ptr<WrappedSkImage> texture( new WrappedSkImage(mailbox, format, size, color_space, usage, estimated_size, context_state_)); - if (!texture->Initialize(info, data)) + if (!texture->Initialize(info, data, /*stride=*/0)) return nullptr; return texture; } @@ -346,13 +386,41 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage( const gfx::Size& size, const gfx::ColorSpace& color_space, uint32_t usage) { - NOTREACHED(); - return nullptr; + DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER); + + if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) { + DLOG(ERROR) << "Invalid image size for format."; + return nullptr; + } + + if (gfx::NumberOfPlanesForLinearBufferFormat(buffer_format) != 1) { + DLOG(ERROR) << "Invalid image format."; + return nullptr; + } + + viz::ResourceFormat format = viz::GetResourceFormat(buffer_format); + + // The Skia API to handle compressed texture is limited and not compatible + // with updating the texture or custom strides. + DCHECK_NE(format, viz::ResourceFormat::ETC1); + + SharedMemoryRegionWrapper shm_wrapper; + if (!shm_wrapper.Initialize(handle, size, format)) + return nullptr; + + auto info = MakeSkImageInfo(size, format); + std::unique_ptr<WrappedSkImage> texture( + new WrappedSkImage(mailbox, format, size, color_space, usage, + info.computeMinByteSize(), context_state_)); + if (!texture->InitializeGMB(info, std::move(shm_wrapper))) + return nullptr; + + return texture; } bool WrappedSkImageFactory::CanImportGpuMemoryBuffer( gfx::GpuMemoryBufferType memory_buffer_type) { - return false; + return memory_buffer_type == gfx::SHARED_MEMORY_BUFFER; } std::unique_ptr<SharedImageRepresentationSkia> WrappedSkImage::ProduceSkia( |