diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-03-11 11:32:04 +0100 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-03-18 13:40:17 +0000 |
commit | 31ccca0778db85c159634478b4ec7997f6704860 (patch) | |
tree | 3d33fc3afd9d5ec95541e1bbe074a9cf8da12a0e /chromium/gpu | |
parent | 248b70b82a40964d5594eb04feca0fa36716185d (diff) | |
download | qtwebengine-chromium-31ccca0778db85c159634478b4ec7997f6704860.tar.gz |
BASELINE: Update Chromium to 80.0.3987.136
Change-Id: I98e1649aafae85ba3a83e67af00bb27ef301db7b
Reviewed-by: Jüri Valdmann <juri.valdmann@qt.io>
Diffstat (limited to 'chromium/gpu')
244 files changed, 9904 insertions, 3834 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn index 8bebff79ab1..8d9d5eb6be5 100644 --- a/chromium/gpu/BUILD.gn +++ b/chromium/gpu/BUILD.gn @@ -181,6 +181,10 @@ jumbo_static_library("test_support") { "command_buffer/service/error_state_mock.h", "command_buffer/service/gles2_cmd_decoder_mock.cc", "command_buffer/service/gles2_cmd_decoder_mock.h", + "command_buffer/service/mocks.cc", + "command_buffer/service/mocks.h", + "command_buffer/service/test_helper.cc", + "command_buffer/service/test_helper.h", "ipc/raster_in_process_context.cc", "ipc/raster_in_process_context.h", "ipc/service/gpu_memory_buffer_factory_test_template.h", @@ -206,6 +210,7 @@ jumbo_static_library("test_support") { "//gpu/skia_bindings:skia_bindings", "//testing/gmock", "//testing/gtest", + "//third_party/angle:translator", "//ui/gfx:test_support", "//ui/gl:gl_unittest_utils", "//ui/gl:test_support", @@ -290,7 +295,6 @@ test("gl_tests") { "command_buffer/service/shared_image_manager_unittest.cc", "command_buffer/tests/compressed_texture_test.cc", "command_buffer/tests/es3_misc_functions_unittest.cc", - "command_buffer/tests/gl_apply_screen_space_antialiasing_CHROMIUM_unittest.cc", "command_buffer/tests/gl_bgra_mipmap_unittest.cc", "command_buffer/tests/gl_bind_uniform_location_unittest.cc", "command_buffer/tests/gl_chromium_framebuffer_mixed_samples_unittest.cc", @@ -348,6 +352,7 @@ test("gl_tests") { if (use_dawn) { sources += [ + "command_buffer/service/webgpu_decoder_unittest.cc", "command_buffer/tests/webgpu_fence_unittest.cc", "command_buffer/tests/webgpu_mailbox_unittest.cc", "command_buffer/tests/webgpu_test.cc", @@ -393,10 +398,6 @@ test("gl_tests") { deps += [ "//ui/ozone" ] } - if (use_dawn) { - deps += [ "//third_party/dawn/src/dawn:libdawn" ] - } - libs = [] if (is_android) { @@ -419,7 +420,8 @@ test("gl_tests") { if (use_dawn) { deps += [ "//third_party/dawn:libdawn_native", - "//third_party/dawn/src/dawn:libdawn", + "//third_party/dawn/src/dawn:dawncpp", + "//third_party/dawn/src/dawn:libdawn_proc", ] } } @@ -511,8 +513,6 @@ test("gpu_unittests") { "command_buffer/service/indexed_buffer_binding_host_unittest.cc", "command_buffer/service/mailbox_manager_unittest.cc", "command_buffer/service/memory_program_cache_unittest.cc", - "command_buffer/service/mocks.cc", - "command_buffer/service/mocks.h", "command_buffer/service/multi_draw_manager_unittest.cc", "command_buffer/service/passthrough_program_cache_unittest.cc", "command_buffer/service/path_manager_unittest.cc", @@ -533,8 +533,6 @@ test("gpu_unittests") { "command_buffer/service/shader_translator_unittest.cc", "command_buffer/service/shared_context_state_unittest.cc", "command_buffer/service/sync_point_manager_unittest.cc", - "command_buffer/service/test_helper.cc", - "command_buffer/service/test_helper.h", "command_buffer/service/texture_manager_unittest.cc", "command_buffer/service/transfer_buffer_manager_unittest.cc", "command_buffer/service/transform_feedback_manager_unittest.cc", @@ -558,6 +556,7 @@ test("gpu_unittests") { "config/gpu_test_expectations_parser_unittest.cc", "config/gpu_util_unittest.cc", "ipc/client/command_buffer_proxy_impl_unittest.cc", + "ipc/client/image_decode_accelerator_proxy_unittest.cc", "ipc/common/gpu_memory_buffer_impl_shared_memory_unittest.cc", "ipc/common/gpu_memory_buffer_impl_test_template.h", "ipc/common/mojom_traits_unittest.cc", @@ -575,10 +574,6 @@ test("gpu_unittests") { sources += [ "ipc/service/image_decode_accelerator_stub_unittest.cc" ] } - if (use_dawn) { - sources += [ "command_buffer/service/webgpu_decoder_unittest.cc" ] - } - if (is_mac) { sources += [ "ipc/common/gpu_memory_buffer_impl_io_surface_unittest.cc", diff --git a/chromium/gpu/GLES2/gl2chromium_autogen.h b/chromium/gpu/GLES2/gl2chromium_autogen.h index f5c24e009ed..d3bcadd5c69 100644 --- a/chromium/gpu/GLES2/gl2chromium_autogen.h +++ b/chromium/gpu/GLES2/gl2chromium_autogen.h @@ -171,9 +171,13 @@ #define glMultiDrawArraysWEBGL GLES2_GET_FUN(MultiDrawArraysWEBGL) #define glMultiDrawArraysInstancedWEBGL \ GLES2_GET_FUN(MultiDrawArraysInstancedWEBGL) +#define glMultiDrawArraysInstancedBaseInstanceWEBGL \ + GLES2_GET_FUN(MultiDrawArraysInstancedBaseInstanceWEBGL) #define glMultiDrawElementsWEBGL GLES2_GET_FUN(MultiDrawElementsWEBGL) #define glMultiDrawElementsInstancedWEBGL \ GLES2_GET_FUN(MultiDrawElementsInstancedWEBGL) +#define glMultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL \ + GLES2_GET_FUN(MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL) #define glStencilFunc GLES2_GET_FUN(StencilFunc) #define glStencilFuncSeparate GLES2_GET_FUN(StencilFuncSeparate) #define glStencilMask GLES2_GET_FUN(StencilMask) @@ -318,7 +322,11 @@ #define glCopyTextureCHROMIUM GLES2_GET_FUN(CopyTextureCHROMIUM) #define glCopySubTextureCHROMIUM GLES2_GET_FUN(CopySubTextureCHROMIUM) #define glDrawArraysInstancedANGLE GLES2_GET_FUN(DrawArraysInstancedANGLE) +#define glDrawArraysInstancedBaseInstanceANGLE \ + GLES2_GET_FUN(DrawArraysInstancedBaseInstanceANGLE) #define glDrawElementsInstancedANGLE GLES2_GET_FUN(DrawElementsInstancedANGLE) +#define glDrawElementsInstancedBaseVertexBaseInstanceANGLE \ + GLES2_GET_FUN(DrawElementsInstancedBaseVertexBaseInstanceANGLE) #define glVertexAttribDivisorANGLE GLES2_GET_FUN(VertexAttribDivisorANGLE) #define glProduceTextureDirectCHROMIUM \ GLES2_GET_FUN(ProduceTextureDirectCHROMIUM) @@ -387,8 +395,6 @@ #define glCoverageModulationCHROMIUM GLES2_GET_FUN(CoverageModulationCHROMIUM) #define glGetGraphicsResetStatusKHR GLES2_GET_FUN(GetGraphicsResetStatusKHR) #define glBlendBarrierKHR GLES2_GET_FUN(BlendBarrierKHR) -#define glApplyScreenSpaceAntialiasingCHROMIUM \ - GLES2_GET_FUN(ApplyScreenSpaceAntialiasingCHROMIUM) #define glBindFragDataLocationIndexedEXT \ GLES2_GET_FUN(BindFragDataLocationIndexedEXT) #define glBindFragDataLocationEXT GLES2_GET_FUN(BindFragDataLocationEXT) diff --git a/chromium/gpu/angle_deqp_tests_main.cc b/chromium/gpu/angle_deqp_tests_main.cc index 7567b6a7eb2..2c1351a6821 100644 --- a/chromium/gpu/angle_deqp_tests_main.cc +++ b/chromium/gpu/angle_deqp_tests_main.cc @@ -35,8 +35,9 @@ int main(int argc, char** argv) { angle::InitTestHarness(&argc, argv); base::TestSuite test_suite(argc, argv); - // The process priority is lowered by the constructor of tcu::ANGLEPlatform(). - test_suite.DisableCheckForProcessPriority(); + // The process and thread priorities are modified by + // StabilizeCPUForBenchmarking()/SetLowPriorityProcess(). + test_suite.DisableCheckForThreadAndProcessPriority(); int rt = base::LaunchUnitTestsSerially( argc, argv, base::BindOnce(&RunHelper, base::Unretained(&test_suite))); diff --git a/chromium/gpu/angle_perftests_main.cc b/chromium/gpu/angle_perftests_main.cc index 341410b0385..09a97d4e8da 100644 --- a/chromium/gpu/angle_perftests_main.cc +++ b/chromium/gpu/angle_perftests_main.cc @@ -27,6 +27,10 @@ int main(int argc, char** argv) { ANGLEProcessPerfTestArgs(&argc, argv); base::TestSuite test_suite(argc, argv); + + // The thread priority is modified by StabilizeCPUForBenchmarking(). + test_suite.DisableCheckForThreadAndProcessPriority(); + int rt = base::LaunchUnitTestsSerially( argc, argv, base::BindOnce(&RunHelper, base::Unretained(&test_suite))); return rt; diff --git a/chromium/gpu/angle_unittest_main.cc b/chromium/gpu/angle_unittest_main.cc index f41877188f5..1c2199058ba 100644 --- a/chromium/gpu/angle_unittest_main.cc +++ b/chromium/gpu/angle_unittest_main.cc @@ -24,7 +24,10 @@ int main(int argc, char** argv) { base::CommandLine::Init(argc, argv); testing::InitGoogleMock(&argc, argv); sh::Initialize(); + base::TestSuite test_suite(argc, argv); + test_suite.DisableCheckForThreadAndProcessPriority(); + int rt = base::LaunchUnitTestsSerially( argc, argv, base::BindOnce(&RunHelper, base::Unretained(&test_suite))); sh::Finalize(); diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py index 14c5df3f80d..7705e56fc3f 100755 --- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py +++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py @@ -1765,13 +1765,6 @@ _FUNCTION_INFO = { 'impl_func': False, 'client_test': False, }, - 'ApplyScreenSpaceAntialiasingCHROMIUM': { - 'decoder_func': 'DoApplyScreenSpaceAntialiasingCHROMIUM', - 'extension': 'CHROMIUM_screen_space_antialiasing', - 'extension_flag': 'chromium_screen_space_antialiasing', - 'unit_test': False, - 'client_test': False, - }, 'AttachShader': {'decoder_func': 'DoAttachShader'}, 'BindAttribLocation': { 'type': 'GLchar', @@ -2979,8 +2972,8 @@ _FUNCTION_INFO = { 'uint32_t counts_shm_id, uint32_t counts_shm_offset, ' 'uint32_t instance_counts_shm_id, ' 'uint32_t instance_counts_shm_offset, GLsizei drawcount', - 'extension': 'WEBGL_multi_draw_instanced', - 'extension_flag': 'webgl_multi_draw_instanced', + 'extension': 'WEBGL_multi_draw', + 'extension_flag': 'webgl_multi_draw', 'data_transfer_methods': ['shm'], 'size_args': { 'firsts': 'drawcount * sizeof(GLint)', @@ -2992,6 +2985,31 @@ _FUNCTION_INFO = { 'internal': True, 'trace_level': 2, }, + 'MultiDrawArraysInstancedBaseInstanceCHROMIUM': { + 'type': 'Custom', + 'cmd_args': 'GLenumDrawMode mode, ' + 'uint32_t firsts_shm_id, uint32_t firsts_shm_offset, ' + 'uint32_t counts_shm_id, uint32_t counts_shm_offset, ' + 'uint32_t instance_counts_shm_id, ' + 'uint32_t instance_counts_shm_offset, ' + 'uint32_t baseinstances_shm_id, ' + 'uint32_t baseinstances_shm_offset, ' + 'GLsizei drawcount', + 'extension': 'WEBGL_multi_draw_instanced_base_vertex_base_instance', + 'extension_flag': 'webgl_multi_draw_instanced_base_vertex_base_instance', + 'data_transfer_methods': ['shm'], + 'size_args': { + 'firsts': 'drawcount * sizeof(GLint)', + 'counts': 'drawcount * sizeof(GLsizei)', + 'instance_counts': 'drawcount * sizeof(GLsizei)', + 'baseinstances': 'drawcount * sizeof(GLuint)', + }, + 'defer_draws': True, + 'impl_func': False, + 'client_test': False, + 'internal': True, + 'trace_level': 2, + }, 'MultiDrawElementsCHROMIUM': { 'type': 'Custom', 'cmd_args': 'GLenumDrawMode mode, ' @@ -3019,8 +3037,8 @@ _FUNCTION_INFO = { 'uint32_t offsets_shm_id, uint32_t offsets_shm_offset, ' 'uint32_t instance_counts_shm_id, ' 'uint32_t instance_counts_shm_offset, GLsizei drawcount', - 'extension': 'WEBGL_multi_draw_instanced', - 'extension_flag': 'webgl_multi_draw_instanced', + 'extension': 'WEBGL_multi_draw', + 'extension_flag': 'webgl_multi_draw', 'data_transfer_methods': ['shm'], 'size_args': { 'counts': 'drawcount * sizeof(GLsizei)', @@ -3032,6 +3050,35 @@ _FUNCTION_INFO = { 'internal': True, 'trace_level': 2, }, + 'MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM': { + 'type': 'Custom', + 'cmd_args': 'GLenumDrawMode mode, ' + 'uint32_t counts_shm_id, uint32_t counts_shm_offset, ' + 'GLenumIndexType type, ' + 'uint32_t offsets_shm_id, uint32_t offsets_shm_offset, ' + 'uint32_t instance_counts_shm_id, ' + 'uint32_t instance_counts_shm_offset, ' + 'uint32_t basevertices_shm_id, ' + 'uint32_t basevertices_shm_offset, ' + 'uint32_t baseinstances_shm_id, ' + 'uint32_t baseinstances_shm_offset, ' + 'GLsizei drawcount', + 'extension': 'WEBGL_multi_draw_instanced_base_vertex_base_instance', + 'extension_flag': 'webgl_multi_draw_instanced_base_vertex_base_instance', + 'data_transfer_methods': ['shm'], + 'size_args': { + 'counts': 'drawcount * sizeof(GLsizei)', + 'offsets': 'drawcount * sizeof(GLsizei)', + 'instance_counts': 'drawcount * sizeof(GLsizei)', + 'basevertices': 'drawcount * sizeof(GLint)', + 'baseinstances': 'drawcount * sizeof(GLuint)', + }, + 'defer_draws': True, + 'impl_func': False, + 'client_test': False, + 'internal': True, + 'trace_level': 2, + }, 'MultiDrawArraysWEBGL': { 'type': 'NoCommand', 'extension': 'WEBGL_multi_draw', @@ -3039,8 +3086,13 @@ _FUNCTION_INFO = { }, 'MultiDrawArraysInstancedWEBGL': { 'type': 'NoCommand', - 'extension': 'WEBGL_multi_draw_instanced', - 'extension_flag': 'webgl_multi_draw_instanced', + 'extension': 'WEBGL_multi_draw', + 'extension_flag': 'webgl_multi_draw', + }, + 'MultiDrawArraysInstancedBaseInstanceWEBGL': { + 'type': 'NoCommand', + 'extension': 'WEBGL_multi_draw_instanced_base_vertex_base_instance', + 'extension_flag': 'webgl_multi_draw_instanced_base_vertex_base_instance', }, 'MultiDrawElementsWEBGL': { 'type': 'NoCommand', @@ -3049,8 +3101,13 @@ _FUNCTION_INFO = { }, 'MultiDrawElementsInstancedWEBGL': { 'type': 'NoCommand', - 'extension': 'WEBGL_multi_draw_instanced', - 'extension_flag': 'webgl_multi_draw_instanced', + 'extension': 'WEBGL_multi_draw', + 'extension_flag': 'webgl_multi_draw', + }, + 'MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL': { + 'type': 'NoCommand', + 'extension': 'WEBGL_multi_draw_instanced_base_vertex_base_instance', + 'extension_flag': 'webgl_multi_draw_instanced_base_vertex_base_instance', }, 'OverlayPromotionHintCHROMIUM': { 'decoder_func': 'DoOverlayPromotionHintCHROMIUM', @@ -3640,6 +3697,15 @@ _FUNCTION_INFO = { 'defer_draws': True, 'trace_level': 2, }, + 'DrawArraysInstancedBaseInstanceANGLE': { + 'type': 'Custom', + 'impl_func': False, + 'cmd_args': 'GLenumDrawMode mode, GLint first, GLsizei count, ' + 'GLsizei primcount, GLuint baseinstance', + 'extension': 'ANGLE_base_vertex_base_instance', + 'defer_draws': True, + 'trace_level': 2, + }, 'DrawBuffersEXT': { 'type': 'PUTn', 'decoder_func': 'DoDrawBuffersEXT', @@ -3662,6 +3728,17 @@ _FUNCTION_INFO = { 'defer_draws': True, 'trace_level': 2, }, + 'DrawElementsInstancedBaseVertexBaseInstanceANGLE': { + 'type': 'Custom', + 'impl_func': False, + 'cmd_args': 'GLenumDrawMode mode, GLsizei count, ' + 'GLenumIndexType type, GLuint index_offset, GLsizei primcount, ' + 'GLint basevertex, GLuint baseinstance', + 'extension': 'ANGLE_base_vertex_base_instance', + 'client_test': False, + 'defer_draws': True, + 'trace_level': 2, + }, 'VertexAttribDivisorANGLE': { 'type': 'Custom', 'impl_func': False, diff --git a/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py b/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py index e90815dc7f7..bafd94dc683 100755 --- a/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py +++ b/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py @@ -20,6 +20,7 @@ _NAMED_TYPE_INFO = { 'PowerPreference': { 'type': 'PowerPreference', 'valid': [ + 'PowerPreference::kDefault', 'PowerPreference::kHighPerformance', 'PowerPreference::kLowPower', ], @@ -62,7 +63,21 @@ _FUNCTION_INFO = { }, 'RequestAdapter': { 'impl_func': False, - 'cmd_args': 'uint32_t power_preference' + 'internal': True, + 'cmd_args': 'uint32_t request_adapter_serial, uint32_t power_preference' + }, + 'RequestDevice': { + 'impl_func': False, + 'internal': True, + 'data_transfer_methods': ['shm'], + 'cmd_args': 'uint32_t adapter_service_id, ' + 'uint32_t request_device_properties_shm_id, ' + 'uint32_t request_device_properties_shm_offset, ' + 'uint32_t request_device_properties_size', + 'size_args': { + 'request_device_properties': + 'request_device_properties_size * sizeof(char)', + }, }, } diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h index 52e11e51071..d6e0485819b 100644 --- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h @@ -755,6 +755,16 @@ GLES2MultiDrawArraysInstancedWEBGL(GLenum mode, gles2::GetGLContext()->MultiDrawArraysInstancedWEBGL( mode, firsts, counts, instance_counts, drawcount); } +void GL_APIENTRY +GLES2MultiDrawArraysInstancedBaseInstanceWEBGL(GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount) { + gles2::GetGLContext()->MultiDrawArraysInstancedBaseInstanceWEBGL( + mode, firsts, counts, instance_counts, baseinstances, drawcount); +} void GL_APIENTRY GLES2MultiDrawElementsWEBGL(GLenum mode, const GLsizei* counts, GLenum type, @@ -773,6 +783,19 @@ GLES2MultiDrawElementsInstancedWEBGL(GLenum mode, gles2::GetGLContext()->MultiDrawElementsInstancedWEBGL( mode, counts, type, offsets, instance_counts, drawcount); } +void GL_APIENTRY GLES2MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount) { + gles2::GetGLContext()->MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + mode, counts, type, offsets, instance_counts, basevertices, baseinstances, + drawcount); +} void GL_APIENTRY GLES2StencilFunc(GLenum func, GLint ref, GLuint mask) { gles2::GetGLContext()->StencilFunc(func, ref, mask); } @@ -1497,6 +1520,15 @@ void GL_APIENTRY GLES2DrawArraysInstancedANGLE(GLenum mode, gles2::GetGLContext()->DrawArraysInstancedANGLE(mode, first, count, primcount); } +void GL_APIENTRY +GLES2DrawArraysInstancedBaseInstanceANGLE(GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance) { + gles2::GetGLContext()->DrawArraysInstancedBaseInstanceANGLE( + mode, first, count, primcount, baseinstance); +} void GL_APIENTRY GLES2DrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, @@ -1505,6 +1537,17 @@ void GL_APIENTRY GLES2DrawElementsInstancedANGLE(GLenum mode, gles2::GetGLContext()->DrawElementsInstancedANGLE(mode, count, type, indices, primcount); } +void GL_APIENTRY +GLES2DrawElementsInstancedBaseVertexBaseInstanceANGLE(GLenum mode, + GLsizei count, + GLenum type, + const void* indices, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) { + gles2::GetGLContext()->DrawElementsInstancedBaseVertexBaseInstanceANGLE( + mode, count, type, indices, primcount, basevertex, baseinstance); +} void GL_APIENTRY GLES2VertexAttribDivisorANGLE(GLuint index, GLuint divisor) { gles2::GetGLContext()->VertexAttribDivisorANGLE(index, divisor); } @@ -1823,9 +1866,6 @@ GLenum GL_APIENTRY GLES2GetGraphicsResetStatusKHR() { void GL_APIENTRY GLES2BlendBarrierKHR() { gles2::GetGLContext()->BlendBarrierKHR(); } -void GL_APIENTRY GLES2ApplyScreenSpaceAntialiasingCHROMIUM() { - gles2::GetGLContext()->ApplyScreenSpaceAntialiasingCHROMIUM(); -} void GL_APIENTRY GLES2BindFragDataLocationIndexedEXT(GLuint program, GLuint colorNumber, GLuint index, @@ -2578,6 +2618,11 @@ extern const NameToFunc g_gles2_function_table[] = { reinterpret_cast<GLES2FunctionPointer>(glMultiDrawArraysInstancedWEBGL), }, { + "glMultiDrawArraysInstancedBaseInstanceWEBGL", + reinterpret_cast<GLES2FunctionPointer>( + glMultiDrawArraysInstancedBaseInstanceWEBGL), + }, + { "glMultiDrawElementsWEBGL", reinterpret_cast<GLES2FunctionPointer>(glMultiDrawElementsWEBGL), }, @@ -2587,6 +2632,11 @@ extern const NameToFunc g_gles2_function_table[] = { glMultiDrawElementsInstancedWEBGL), }, { + "glMultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL", + reinterpret_cast<GLES2FunctionPointer>( + glMultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL), + }, + { "glStencilFunc", reinterpret_cast<GLES2FunctionPointer>(glStencilFunc), }, @@ -3135,10 +3185,20 @@ extern const NameToFunc g_gles2_function_table[] = { reinterpret_cast<GLES2FunctionPointer>(glDrawArraysInstancedANGLE), }, { + "glDrawArraysInstancedBaseInstanceANGLE", + reinterpret_cast<GLES2FunctionPointer>( + glDrawArraysInstancedBaseInstanceANGLE), + }, + { "glDrawElementsInstancedANGLE", reinterpret_cast<GLES2FunctionPointer>(glDrawElementsInstancedANGLE), }, { + "glDrawElementsInstancedBaseVertexBaseInstanceANGLE", + reinterpret_cast<GLES2FunctionPointer>( + glDrawElementsInstancedBaseVertexBaseInstanceANGLE), + }, + { "glVertexAttribDivisorANGLE", reinterpret_cast<GLES2FunctionPointer>(glVertexAttribDivisorANGLE), }, @@ -3354,11 +3414,6 @@ extern const NameToFunc g_gles2_function_table[] = { reinterpret_cast<GLES2FunctionPointer>(glBlendBarrierKHR), }, { - "glApplyScreenSpaceAntialiasingCHROMIUM", - reinterpret_cast<GLES2FunctionPointer>( - glApplyScreenSpaceAntialiasingCHROMIUM), - }, - { "glBindFragDataLocationIndexedEXT", reinterpret_cast<GLES2FunctionPointer>( glBindFragDataLocationIndexedEXT), diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h index a00f5bf1584..4c50bbfd008 100644 --- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h @@ -1553,6 +1553,27 @@ void MultiDrawArraysInstancedCHROMIUM(GLenum mode, } } +void MultiDrawArraysInstancedBaseInstanceCHROMIUM( + GLenum mode, + uint32_t firsts_shm_id, + uint32_t firsts_shm_offset, + uint32_t counts_shm_id, + uint32_t counts_shm_offset, + uint32_t instance_counts_shm_id, + uint32_t instance_counts_shm_offset, + uint32_t baseinstances_shm_id, + uint32_t baseinstances_shm_offset, + GLsizei drawcount) { + gles2::cmds::MultiDrawArraysInstancedBaseInstanceCHROMIUM* c = + GetCmdSpace<gles2::cmds::MultiDrawArraysInstancedBaseInstanceCHROMIUM>(); + if (c) { + c->Init(mode, firsts_shm_id, firsts_shm_offset, counts_shm_id, + counts_shm_offset, instance_counts_shm_id, + instance_counts_shm_offset, baseinstances_shm_id, + baseinstances_shm_offset, drawcount); + } +} + void MultiDrawElementsCHROMIUM(GLenum mode, uint32_t counts_shm_id, uint32_t counts_shm_offset, @@ -1586,6 +1607,33 @@ void MultiDrawElementsInstancedCHROMIUM(GLenum mode, } } +void MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM( + GLenum mode, + uint32_t counts_shm_id, + uint32_t counts_shm_offset, + GLenum type, + uint32_t offsets_shm_id, + uint32_t offsets_shm_offset, + uint32_t instance_counts_shm_id, + uint32_t instance_counts_shm_offset, + uint32_t basevertices_shm_id, + uint32_t basevertices_shm_offset, + uint32_t baseinstances_shm_id, + uint32_t baseinstances_shm_offset, + GLsizei drawcount) { + gles2::cmds::MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM* c = + GetCmdSpace< + gles2::cmds:: + MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM>(); + if (c) { + c->Init(mode, counts_shm_id, counts_shm_offset, type, offsets_shm_id, + offsets_shm_offset, instance_counts_shm_id, + instance_counts_shm_offset, basevertices_shm_id, + basevertices_shm_offset, baseinstances_shm_id, + baseinstances_shm_offset, drawcount); + } +} + void StencilFunc(GLenum func, GLint ref, GLuint mask) { gles2::cmds::StencilFunc* c = GetCmdSpace<gles2::cmds::StencilFunc>(); if (c) { @@ -2794,6 +2842,18 @@ void DrawArraysInstancedANGLE(GLenum mode, } } +void DrawArraysInstancedBaseInstanceANGLE(GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance) { + gles2::cmds::DrawArraysInstancedBaseInstanceANGLE* c = + GetCmdSpace<gles2::cmds::DrawArraysInstancedBaseInstanceANGLE>(); + if (c) { + c->Init(mode, first, count, primcount, baseinstance); + } +} + void DrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, @@ -2806,6 +2866,22 @@ void DrawElementsInstancedANGLE(GLenum mode, } } +void DrawElementsInstancedBaseVertexBaseInstanceANGLE(GLenum mode, + GLsizei count, + GLenum type, + GLuint index_offset, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) { + gles2::cmds::DrawElementsInstancedBaseVertexBaseInstanceANGLE* c = + GetCmdSpace< + gles2::cmds::DrawElementsInstancedBaseVertexBaseInstanceANGLE>(); + if (c) { + c->Init(mode, count, type, index_offset, primcount, basevertex, + baseinstance); + } +} + void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) { gles2::cmds::VertexAttribDivisorANGLE* c = GetCmdSpace<gles2::cmds::VertexAttribDivisorANGLE>(); @@ -3361,14 +3437,6 @@ void BlendBarrierKHR() { } } -void ApplyScreenSpaceAntialiasingCHROMIUM() { - gles2::cmds::ApplyScreenSpaceAntialiasingCHROMIUM* c = - GetCmdSpace<gles2::cmds::ApplyScreenSpaceAntialiasingCHROMIUM>(); - if (c) { - c->Init(); - } -} - void BindFragDataLocationIndexedEXTBucket(GLuint program, GLuint colorNumber, GLuint index, diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc index 914d6f120ec..18298a136e5 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation.cc +++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc @@ -53,6 +53,7 @@ #include "gpu/command_buffer/common/sync_token.h" #include "ui/gfx/geometry/rect.h" #include "ui/gfx/geometry/rect_f.h" +#include "ui/gl/gpu_preference.h" #if !defined(__native_client__) #include "ui/gfx/color_space.h" @@ -386,17 +387,19 @@ void GLES2Implementation::OnGpuControlSwapBuffersCompleted( std::move(callback).Run(params); } -void GLES2Implementation::OnGpuSwitched() { - share_group_->SetGpuSwitched(true); +void GLES2Implementation::OnGpuSwitched( + gl::GpuPreference active_gpu_heuristic) { + gpu_switched_ = true; + active_gpu_heuristic_ = active_gpu_heuristic; } -GLboolean GLES2Implementation::DidGpuSwitch() { - // TODO(zmo): Redesign this code; it works for now because the share group - // only contains one context but in the future only the first OpenGL context - // in the share group will receive GL_TRUE as the return value. - bool gpu_changed = share_group_->GetGpuSwitched(); - share_group_->SetGpuSwitched(false); - return gpu_changed ? GL_TRUE : GL_FALSE; +GLboolean GLES2Implementation::DidGpuSwitch(gl::GpuPreference* active_gpu) { + if (gpu_switched_) { + *active_gpu = active_gpu_heuristic_; + } + GLboolean result = gpu_switched_ ? GL_TRUE : GL_FALSE; + gpu_switched_ = false; + return result; } void GLES2Implementation::SendErrorMessage(std::string message, int32_t id) { @@ -2521,6 +2524,36 @@ void GLES2Implementation::MultiDrawArraysInstancedWEBGLHelper( helper_->MultiDrawEndCHROMIUM(); } +void GLES2Implementation::MultiDrawArraysInstancedBaseInstanceWEBGLHelper( + GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount) { + DCHECK_GT(drawcount, 0); + + uint32_t buffer_size = ComputeCombinedCopySize( + drawcount, firsts, counts, instance_counts, baseinstances); + ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_); + + helper_->MultiDrawBeginCHROMIUM(drawcount); + auto DoMultiDraw = [&](const std::array<uint32_t, 4>& offsets, uint32_t, + uint32_t copy_count) { + helper_->MultiDrawArraysInstancedBaseInstanceCHROMIUM( + mode, buffer.shm_id(), buffer.offset() + offsets[0], buffer.shm_id(), + buffer.offset() + offsets[1], buffer.shm_id(), + buffer.offset() + offsets[2], buffer.shm_id(), + buffer.offset() + offsets[3], copy_count); + }; + if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, firsts, counts, + instance_counts, baseinstances)) { + SetGLError(GL_OUT_OF_MEMORY, "glMultiDrawArraysInstancedBaseInstanceWEBGL", + "out of memory"); + } + helper_->MultiDrawEndCHROMIUM(); +} + void GLES2Implementation::MultiDrawElementsWEBGLHelper(GLenum mode, const GLsizei* counts, GLenum type, @@ -2574,6 +2607,42 @@ void GLES2Implementation::MultiDrawElementsInstancedWEBGLHelper( helper_->MultiDrawEndCHROMIUM(); } +void GLES2Implementation:: + MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGLHelper( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount) { + DCHECK_GT(drawcount, 0); + + uint32_t buffer_size = ComputeCombinedCopySize( + drawcount, counts, offsets, instance_counts, basevertices, baseinstances); + ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_); + + helper_->MultiDrawBeginCHROMIUM(drawcount); + auto DoMultiDraw = [&](const std::array<uint32_t, 5>& offsets, uint32_t, + uint32_t copy_count) { + helper_->MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM( + mode, buffer.shm_id(), buffer.offset() + offsets[0], type, + buffer.shm_id(), buffer.offset() + offsets[1], buffer.shm_id(), + buffer.offset() + offsets[2], buffer.shm_id(), + buffer.offset() + offsets[3], buffer.shm_id(), + buffer.offset() + offsets[4], copy_count); + }; + if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, counts, + offsets, instance_counts, basevertices, + baseinstances)) { + SetGLError(GL_OUT_OF_MEMORY, + "glMultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL", + "out of memory"); + } + helper_->MultiDrawEndCHROMIUM(); +} + void GLES2Implementation::MultiDrawArraysWEBGL(GLenum mode, const GLint* firsts, const GLsizei* counts, @@ -2629,6 +2698,39 @@ void GLES2Implementation::MultiDrawArraysInstancedWEBGL( CheckGLError(); } +void GLES2Implementation::MultiDrawArraysInstancedBaseInstanceWEBGL( + GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() + << "] glMultiDrawArraysInstancedBaseInstanceWEBGL(" + << GLES2Util::GetStringDrawMode(mode) << ", " << firsts + << ", " << counts << ", " << instance_counts << ", " + << baseinstances << ", " << drawcount << ")"); + if (drawcount < 0) { + SetGLError(GL_INVALID_VALUE, "glMultiDrawArraysInstancedBaseInstanceWEBGL", + "drawcount < 0"); + return; + } + if (drawcount == 0) { + return; + } + // This is for an extension for WebGL which doesn't support client side arrays + if (vertex_array_object_manager_->SupportsClientSideBuffers()) { + SetGLError(GL_INVALID_OPERATION, + "glMultiDrawArraysInstancedBaseInstanceWEBGL", + "Missing array buffer for vertex attribute"); + return; + } + MultiDrawArraysInstancedBaseInstanceWEBGLHelper( + mode, firsts, counts, instance_counts, baseinstances, drawcount); + CheckGLError(); +} + void GLES2Implementation::MultiDrawElementsWEBGL(GLenum mode, const GLsizei* counts, GLenum type, @@ -2698,6 +2800,50 @@ void GLES2Implementation::MultiDrawElementsInstancedWEBGL( CheckGLError(); } +void GLES2Implementation::MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG( + "[" << GetLogPrefix() + << "] glMultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL(" + << GLES2Util::GetStringDrawMode(mode) << ", " << counts << ", " + << GLES2Util::GetStringIndexType(type) << ", " << offsets << ", " + << instance_counts << ", " << basevertices << ", " << baseinstances + << drawcount << ")"); + if (drawcount < 0) { + SetGLError(GL_INVALID_VALUE, "glMultiDrawElementsInstancedWEBGL", + "drawcount < 0"); + return; + } + if (drawcount == 0) { + return; + } + // This is for an extension for WebGL which doesn't support client side arrays + if (vertex_array_object_manager_->bound_element_array_buffer() == 0) { + SetGLError(GL_INVALID_OPERATION, + "glMultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL", + "No element array buffer"); + return; + } + if (vertex_array_object_manager_->SupportsClientSideBuffers()) { + SetGLError(GL_INVALID_OPERATION, + "glMultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL", + "Missing array buffer for vertex attribute"); + return; + } + MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGLHelper( + mode, counts, type, offsets, instance_counts, basevertices, baseinstances, + drawcount); + CheckGLError(); +} + void GLES2Implementation::RemoveTransferBuffer(BufferTracker::Buffer* buffer) { int32_t token = buffer->last_usage_token(); @@ -6468,6 +6614,51 @@ void GLES2Implementation::DrawArraysInstancedANGLE(GLenum mode, CheckGLError(); } +void GLES2Implementation::DrawArraysInstancedBaseInstanceANGLE( + GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG( + "[" << GetLogPrefix() << "] glDrawArraysInstancedBaseInstanceANGLE(" + << GLES2Util::GetStringDrawMode(mode) << ", " << first << ", " + << count << ", " << primcount << ", " << baseinstance << ")"); + if (count < 0) { + SetGLError(GL_INVALID_VALUE, "glDrawArraysInstancedBaseInstanceANGLE", + "count < 0"); + return; + } + if (primcount < 0) { + SetGLError(GL_INVALID_VALUE, "glDrawArraysInstancedBaseInstanceANGLE", + "primcount < 0"); + return; + } + if (primcount == 0) { + return; + } + bool simulated = false; + if (vertex_array_object_manager_->SupportsClientSideBuffers()) { + GLsizei num_elements; + if (!base::CheckAdd(first, count).AssignIfValid(&num_elements)) { + SetGLError(GL_INVALID_VALUE, "glDrawArraysInstancedBaseInstanceANGLE", + "first+count overflow"); + return; + } + // Client side buffer is not used by WebGL so leave it as is. + if (!vertex_array_object_manager_->SetupSimulatedClientSideBuffers( + "glDrawArraysInstancedBaseInstanceANGLE", this, helper_, + num_elements, primcount, &simulated)) { + return; + } + } + helper_->DrawArraysInstancedBaseInstanceANGLE(mode, first, count, primcount, + baseinstance); + RestoreArrayBuffer(simulated); + CheckGLError(); +} + void GLES2Implementation::DrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, @@ -6508,6 +6699,54 @@ void GLES2Implementation::DrawElementsInstancedANGLE(GLenum mode, CheckGLError(); } +void GLES2Implementation::DrawElementsInstancedBaseVertexBaseInstanceANGLE( + GLenum mode, + GLsizei count, + GLenum type, + const void* indices, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) { + GPU_CLIENT_SINGLE_THREAD_CHECK(); + GPU_CLIENT_LOG("[" << GetLogPrefix() + << "] glDrawElementsInstancedBaseVertexBaseInstanceANGLE(" + << GLES2Util::GetStringDrawMode(mode) << ", " << count + << ", " << GLES2Util::GetStringIndexType(type) << ", " + << static_cast<const void*>(indices) << ", " << primcount + << ", " << basevertex << ", " << baseinstance << ")"); + if (count < 0) { + SetGLError(GL_INVALID_VALUE, + "glDrawElementsInstancedBaseVertexBaseInstanceANGLE", + "count less than 0."); + return; + } + if (primcount < 0) { + SetGLError(GL_INVALID_VALUE, + "glDrawElementsInstancedBaseVertexBaseInstanceANGLE", + "primcount < 0"); + return; + } + GLuint offset = 0; + bool simulated = false; + if (count > 0 && primcount > 0) { + if (vertex_array_object_manager_->bound_element_array_buffer() != 0 && + !ValidateOffset("glDrawElementsInstancedBaseVertexBaseInstanceANGLE", + reinterpret_cast<GLintptr>(indices))) { + return; + } + // Client side buffer is not used by WebGL so leave it as is. + if (!vertex_array_object_manager_->SetupSimulatedIndexAndClientSideBuffers( + "glDrawElementsInstancedBaseVertexBaseInstanceANGLE", this, helper_, + count, type, primcount, indices, &offset, &simulated)) { + return; + } + } + helper_->DrawElementsInstancedBaseVertexBaseInstanceANGLE( + mode, count, type, offset, primcount, basevertex, baseinstance); + RestoreElementAndArrayBuffers(simulated); + CheckGLError(); +} + void GLES2Implementation::ProduceTextureDirectCHROMIUM(GLuint texture, GLbyte* data) { GPU_CLIENT_SINGLE_THREAD_CHECK(); diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h index 9f3d8b2b334..ea262ee7c7c 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation.h +++ b/chromium/gpu/command_buffer/client/gles2_implementation.h @@ -99,7 +99,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface, // GLES2Interface implementation void FreeSharedMemory(void*) override; - GLboolean DidGpuSwitch() final; + GLboolean DidGpuSwitch(gl::GpuPreference* active_gpu) final; // Include the auto-generated part of this class. We split this because // it means we can easily edit the non-auto generated parts right here in @@ -400,7 +400,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface, void OnGpuControlErrorMessage(const char* message, int32_t id) final; void OnGpuControlSwapBuffersCompleted( const SwapBuffersCompleteParams& params) final; - void OnGpuSwitched() final; + void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) final; void OnSwapBufferPresented(uint64_t swap_id, const gfx::PresentationFeedback& feedback) final; void OnGpuControlReturnData(base::span<const uint8_t> data) final; @@ -515,6 +515,14 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface, const GLsizei* instanceCounts, GLsizei drawcount); + void MultiDrawArraysInstancedBaseInstanceWEBGLHelper( + GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instanceCounts, + const GLuint* baseInstances, + GLsizei drawcount); + void MultiDrawElementsWEBGLHelper(GLenum mode, const GLsizei* counts, GLenum type, @@ -528,6 +536,16 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface, const GLsizei* instanceCounts, GLsizei drawcount); + void MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGLHelper( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instanceCounts, + const GLint* baseVertices, + const GLuint* baseInstances, + GLsizei drawcount); + GLuint CreateImageCHROMIUMHelper(ClientBuffer buffer, GLsizei width, GLsizei height, @@ -860,6 +878,9 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface, std::string last_active_url_; + bool gpu_switched_ = false; + gl::GpuPreference active_gpu_heuristic_ = gl::GpuPreference::kDefault; + base::WeakPtrFactory<GLES2Implementation> weak_ptr_factory_{this}; DISALLOW_COPY_AND_ASSIGN(GLES2Implementation); diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h index 9639c2d2cf6..109e29e43c0 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h @@ -539,6 +539,13 @@ void MultiDrawArraysInstancedWEBGL(GLenum mode, const GLsizei* instance_counts, GLsizei drawcount) override; +void MultiDrawArraysInstancedBaseInstanceWEBGL(GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount) override; + void MultiDrawElementsWEBGL(GLenum mode, const GLsizei* counts, GLenum type, @@ -552,6 +559,16 @@ void MultiDrawElementsInstancedWEBGL(GLenum mode, const GLsizei* instance_counts, GLsizei drawcount) override; +void MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount) override; + void StencilFunc(GLenum func, GLint ref, GLuint mask) override; void StencilFuncSeparate(GLenum face, @@ -1056,12 +1073,27 @@ void DrawArraysInstancedANGLE(GLenum mode, GLsizei count, GLsizei primcount) override; +void DrawArraysInstancedBaseInstanceANGLE(GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance) override; + void DrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, const void* indices, GLsizei primcount) override; +void DrawElementsInstancedBaseVertexBaseInstanceANGLE( + GLenum mode, + GLsizei count, + GLenum type, + const void* indices, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) override; + void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) override; void ProduceTextureDirectCHROMIUM(GLuint texture, GLbyte* mailbox) override; @@ -1286,8 +1318,6 @@ GLenum GetGraphicsResetStatusKHR() override; void BlendBarrierKHR() override; -void ApplyScreenSpaceAntialiasingCHROMIUM() override; - void BindFragDataLocationIndexedEXT(GLuint program, GLuint colorNumber, GLuint index, diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h index 11918e119ae..ea02c67be96 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h @@ -3680,15 +3680,6 @@ void GLES2Implementation::BlendBarrierKHR() { CheckGLError(); } -void GLES2Implementation::ApplyScreenSpaceAntialiasingCHROMIUM() { - GPU_CLIENT_SINGLE_THREAD_CHECK(); - GPU_CLIENT_LOG("[" << GetLogPrefix() - << "] glApplyScreenSpaceAntialiasingCHROMIUM(" - << ")"); - helper_->ApplyScreenSpaceAntialiasingCHROMIUM(); - CheckGLError(); -} - void GLES2Implementation::UniformMatrix4fvStreamTextureMatrixCHROMIUM( GLint location, GLboolean transpose, diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h index 0127b476ba8..b83aaedf4cf 100644 --- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h @@ -2845,6 +2845,17 @@ TEST_F(GLES2ImplementationTest, DrawArraysInstancedANGLE) { EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); } +TEST_F(GLES2ImplementationTest, DrawArraysInstancedBaseInstanceANGLE) { + struct Cmds { + cmds::DrawArraysInstancedBaseInstanceANGLE cmd; + }; + Cmds expected; + expected.cmd.Init(GL_POINTS, 2, 3, 4, 5); + + gl_->DrawArraysInstancedBaseInstanceANGLE(GL_POINTS, 2, 3, 4, 5); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} + TEST_F(GLES2ImplementationTest, VertexAttribDivisorANGLE) { struct Cmds { cmds::VertexAttribDivisorANGLE cmd; diff --git a/chromium/gpu/command_buffer/client/gles2_interface.cc b/chromium/gpu/command_buffer/client/gles2_interface.cc index 722a39b84c8..6b3457668be 100644 --- a/chromium/gpu/command_buffer/client/gles2_interface.cc +++ b/chromium/gpu/command_buffer/client/gles2_interface.cc @@ -9,7 +9,7 @@ namespace gpu { namespace gles2 { -GLboolean GLES2Interface::DidGpuSwitch() { +GLboolean GLES2Interface::DidGpuSwitch(gl::GpuPreference* active_gpu) { return GL_FALSE; } diff --git a/chromium/gpu/command_buffer/client/gles2_interface.h b/chromium/gpu/command_buffer/client/gles2_interface.h index 1a6e382d93f..ec6728c279d 100644 --- a/chromium/gpu/command_buffer/client/gles2_interface.h +++ b/chromium/gpu/command_buffer/client/gles2_interface.h @@ -22,6 +22,10 @@ class Vector2d; class Vector2dF; } // namespace gfx +namespace gl { +enum class GpuPreference; +} + extern "C" typedef struct _ClientBuffer* ClientBuffer; extern "C" typedef struct _GLColorSpace* GLColorSpace; extern "C" typedef struct _ClientGpuFence* ClientGpuFence; @@ -37,7 +41,11 @@ class GLES2Interface : public InterfaceBase { virtual void FreeSharedMemory(void*) {} - virtual GLboolean DidGpuSwitch(); + // Returns true if the active GPU switched since the last time this + // method was called. If so, |active_gpu| will be written with the + // results of the heuristic indicating which GPU is active; + // kDefault if "unknown", or kLowPower or kHighPerformance if known. + virtual GLboolean DidGpuSwitch(gl::GpuPreference* active_gpu); // Include the auto-generated part of this class. We split this because // it means we can easily edit the non-auto generated parts right here in diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h index 138c7a99800..7d055f33c22 100644 --- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h @@ -391,6 +391,13 @@ virtual void MultiDrawArraysInstancedWEBGL(GLenum mode, const GLsizei* counts, const GLsizei* instance_counts, GLsizei drawcount) = 0; +virtual void MultiDrawArraysInstancedBaseInstanceWEBGL( + GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount) = 0; virtual void MultiDrawElementsWEBGL(GLenum mode, const GLsizei* counts, GLenum type, @@ -402,6 +409,15 @@ virtual void MultiDrawElementsInstancedWEBGL(GLenum mode, const GLsizei* offsets, const GLsizei* instance_counts, GLsizei drawcount) = 0; +virtual void MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount) = 0; virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) = 0; virtual void StencilFuncSeparate(GLenum face, GLenum func, @@ -785,11 +801,24 @@ virtual void DrawArraysInstancedANGLE(GLenum mode, GLint first, GLsizei count, GLsizei primcount) = 0; +virtual void DrawArraysInstancedBaseInstanceANGLE(GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance) = 0; virtual void DrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, const void* indices, GLsizei primcount) = 0; +virtual void DrawElementsInstancedBaseVertexBaseInstanceANGLE( + GLenum mode, + GLsizei count, + GLenum type, + const void* indices, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) = 0; virtual void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) = 0; virtual void ProduceTextureDirectCHROMIUM(GLuint texture, GLbyte* mailbox) = 0; virtual GLuint CreateAndConsumeTextureCHROMIUM(const GLbyte* mailbox) = 0; @@ -969,7 +998,6 @@ virtual void ContextVisibilityHintCHROMIUM(GLboolean visibility) = 0; virtual void CoverageModulationCHROMIUM(GLenum components) = 0; virtual GLenum GetGraphicsResetStatusKHR() = 0; virtual void BlendBarrierKHR() = 0; -virtual void ApplyScreenSpaceAntialiasingCHROMIUM() = 0; virtual void BindFragDataLocationIndexedEXT(GLuint program, GLuint colorNumber, GLuint index, diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h index a35b78ffc5e..637c3afdb81 100644 --- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h @@ -382,6 +382,12 @@ void MultiDrawArraysInstancedWEBGL(GLenum mode, const GLsizei* counts, const GLsizei* instance_counts, GLsizei drawcount) override; +void MultiDrawArraysInstancedBaseInstanceWEBGL(GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount) override; void MultiDrawElementsWEBGL(GLenum mode, const GLsizei* counts, GLenum type, @@ -393,6 +399,15 @@ void MultiDrawElementsInstancedWEBGL(GLenum mode, const GLsizei* offsets, const GLsizei* instance_counts, GLsizei drawcount) override; +void MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount) override; void StencilFunc(GLenum func, GLint ref, GLuint mask) override; void StencilFuncSeparate(GLenum face, GLenum func, @@ -762,11 +777,24 @@ void DrawArraysInstancedANGLE(GLenum mode, GLint first, GLsizei count, GLsizei primcount) override; +void DrawArraysInstancedBaseInstanceANGLE(GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance) override; void DrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, const void* indices, GLsizei primcount) override; +void DrawElementsInstancedBaseVertexBaseInstanceANGLE( + GLenum mode, + GLsizei count, + GLenum type, + const void* indices, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) override; void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) override; void ProduceTextureDirectCHROMIUM(GLuint texture, GLbyte* mailbox) override; GLuint CreateAndConsumeTextureCHROMIUM(const GLbyte* mailbox) override; @@ -940,7 +968,6 @@ void ContextVisibilityHintCHROMIUM(GLboolean visibility) override; void CoverageModulationCHROMIUM(GLenum components) override; GLenum GetGraphicsResetStatusKHR() override; void BlendBarrierKHR() override; -void ApplyScreenSpaceAntialiasingCHROMIUM() override; void BindFragDataLocationIndexedEXT(GLuint program, GLuint colorNumber, GLuint index, diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h index afb9efd30e4..20857b8c343 100644 --- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h @@ -503,6 +503,13 @@ void GLES2InterfaceStub::MultiDrawArraysInstancedWEBGL( const GLsizei* /* counts */, const GLsizei* /* instance_counts */, GLsizei /* drawcount */) {} +void GLES2InterfaceStub::MultiDrawArraysInstancedBaseInstanceWEBGL( + GLenum /* mode */, + const GLint* /* firsts */, + const GLsizei* /* counts */, + const GLsizei* /* instance_counts */, + const GLuint* /* baseinstances */, + GLsizei /* drawcount */) {} void GLES2InterfaceStub::MultiDrawElementsWEBGL(GLenum /* mode */, const GLsizei* /* counts */, GLenum /* type */, @@ -515,6 +522,15 @@ void GLES2InterfaceStub::MultiDrawElementsInstancedWEBGL( const GLsizei* /* offsets */, const GLsizei* /* instance_counts */, GLsizei /* drawcount */) {} +void GLES2InterfaceStub::MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + GLenum /* mode */, + const GLsizei* /* counts */, + GLenum /* type */, + const GLsizei* /* offsets */, + const GLsizei* /* instance_counts */, + const GLint* /* basevertices */, + const GLuint* /* baseinstances */, + GLsizei /* drawcount */) {} void GLES2InterfaceStub::StencilFunc(GLenum /* func */, GLint /* ref */, GLuint /* mask */) {} @@ -1021,11 +1037,25 @@ void GLES2InterfaceStub::DrawArraysInstancedANGLE(GLenum /* mode */, GLint /* first */, GLsizei /* count */, GLsizei /* primcount */) {} +void GLES2InterfaceStub::DrawArraysInstancedBaseInstanceANGLE( + GLenum /* mode */, + GLint /* first */, + GLsizei /* count */, + GLsizei /* primcount */, + GLuint /* baseinstance */) {} void GLES2InterfaceStub::DrawElementsInstancedANGLE(GLenum /* mode */, GLsizei /* count */, GLenum /* type */, const void* /* indices */, GLsizei /* primcount */) {} +void GLES2InterfaceStub::DrawElementsInstancedBaseVertexBaseInstanceANGLE( + GLenum /* mode */, + GLsizei /* count */, + GLenum /* type */, + const void* /* indices */, + GLsizei /* primcount */, + GLint /* basevertex */, + GLuint /* baseinstance */) {} void GLES2InterfaceStub::VertexAttribDivisorANGLE(GLuint /* index */, GLuint /* divisor */) {} void GLES2InterfaceStub::ProduceTextureDirectCHROMIUM(GLuint /* texture */, @@ -1243,7 +1273,6 @@ GLenum GLES2InterfaceStub::GetGraphicsResetStatusKHR() { return 0; } void GLES2InterfaceStub::BlendBarrierKHR() {} -void GLES2InterfaceStub::ApplyScreenSpaceAntialiasingCHROMIUM() {} void GLES2InterfaceStub::BindFragDataLocationIndexedEXT( GLuint /* program */, GLuint /* colorNumber */, diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h index 81b31c3c7be..75637ef77b0 100644 --- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h @@ -382,6 +382,12 @@ void MultiDrawArraysInstancedWEBGL(GLenum mode, const GLsizei* counts, const GLsizei* instance_counts, GLsizei drawcount) override; +void MultiDrawArraysInstancedBaseInstanceWEBGL(GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount) override; void MultiDrawElementsWEBGL(GLenum mode, const GLsizei* counts, GLenum type, @@ -393,6 +399,15 @@ void MultiDrawElementsInstancedWEBGL(GLenum mode, const GLsizei* offsets, const GLsizei* instance_counts, GLsizei drawcount) override; +void MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount) override; void StencilFunc(GLenum func, GLint ref, GLuint mask) override; void StencilFuncSeparate(GLenum face, GLenum func, @@ -762,11 +777,24 @@ void DrawArraysInstancedANGLE(GLenum mode, GLint first, GLsizei count, GLsizei primcount) override; +void DrawArraysInstancedBaseInstanceANGLE(GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance) override; void DrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, const void* indices, GLsizei primcount) override; +void DrawElementsInstancedBaseVertexBaseInstanceANGLE( + GLenum mode, + GLsizei count, + GLenum type, + const void* indices, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) override; void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) override; void ProduceTextureDirectCHROMIUM(GLuint texture, GLbyte* mailbox) override; GLuint CreateAndConsumeTextureCHROMIUM(const GLbyte* mailbox) override; @@ -940,7 +968,6 @@ void ContextVisibilityHintCHROMIUM(GLboolean visibility) override; void CoverageModulationCHROMIUM(GLenum components) override; GLenum GetGraphicsResetStatusKHR() override; void BlendBarrierKHR() override; -void ApplyScreenSpaceAntialiasingCHROMIUM() override; void BindFragDataLocationIndexedEXT(GLuint program, GLuint colorNumber, GLuint index, diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h index 56cd1905fb0..7f13e0254e4 100644 --- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h +++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h @@ -1085,6 +1085,19 @@ void GLES2TraceImplementation::MultiDrawArraysInstancedWEBGL( drawcount); } +void GLES2TraceImplementation::MultiDrawArraysInstancedBaseInstanceWEBGL( + GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount) { + TRACE_EVENT_BINARY_EFFICIENT0( + "gpu", "GLES2Trace::MultiDrawArraysInstancedBaseInstanceWEBGL"); + gl_->MultiDrawArraysInstancedBaseInstanceWEBGL( + mode, firsts, counts, instance_counts, baseinstances, drawcount); +} + void GLES2TraceImplementation::MultiDrawElementsWEBGL(GLenum mode, const GLsizei* counts, GLenum type, @@ -1107,6 +1120,24 @@ void GLES2TraceImplementation::MultiDrawElementsInstancedWEBGL( instance_counts, drawcount); } +void GLES2TraceImplementation:: + MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount) { + TRACE_EVENT_BINARY_EFFICIENT0( + "gpu", + "GLES2Trace::MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL"); + gl_->MultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL( + mode, counts, type, offsets, instance_counts, basevertices, baseinstances, + drawcount); +} + void GLES2TraceImplementation::StencilFunc(GLenum func, GLint ref, GLuint mask) { @@ -2143,6 +2174,18 @@ void GLES2TraceImplementation::DrawArraysInstancedANGLE(GLenum mode, gl_->DrawArraysInstancedANGLE(mode, first, count, primcount); } +void GLES2TraceImplementation::DrawArraysInstancedBaseInstanceANGLE( + GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance) { + TRACE_EVENT_BINARY_EFFICIENT0( + "gpu", "GLES2Trace::DrawArraysInstancedBaseInstanceANGLE"); + gl_->DrawArraysInstancedBaseInstanceANGLE(mode, first, count, primcount, + baseinstance); +} + void GLES2TraceImplementation::DrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, @@ -2153,6 +2196,20 @@ void GLES2TraceImplementation::DrawElementsInstancedANGLE(GLenum mode, gl_->DrawElementsInstancedANGLE(mode, count, type, indices, primcount); } +void GLES2TraceImplementation::DrawElementsInstancedBaseVertexBaseInstanceANGLE( + GLenum mode, + GLsizei count, + GLenum type, + const void* indices, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) { + TRACE_EVENT_BINARY_EFFICIENT0( + "gpu", "GLES2Trace::DrawElementsInstancedBaseVertexBaseInstanceANGLE"); + gl_->DrawElementsInstancedBaseVertexBaseInstanceANGLE( + mode, count, type, indices, primcount, basevertex, baseinstance); +} + void GLES2TraceImplementation::VertexAttribDivisorANGLE(GLuint index, GLuint divisor) { TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttribDivisorANGLE"); @@ -2609,12 +2666,6 @@ void GLES2TraceImplementation::BlendBarrierKHR() { gl_->BlendBarrierKHR(); } -void GLES2TraceImplementation::ApplyScreenSpaceAntialiasingCHROMIUM() { - TRACE_EVENT_BINARY_EFFICIENT0( - "gpu", "GLES2Trace::ApplyScreenSpaceAntialiasingCHROMIUM"); - gl_->ApplyScreenSpaceAntialiasingCHROMIUM(); -} - void GLES2TraceImplementation::BindFragDataLocationIndexedEXT( GLuint program, GLuint colorNumber, diff --git a/chromium/gpu/command_buffer/client/gpu_control_client.h b/chromium/gpu/command_buffer/client/gpu_control_client.h index a2bbec285ca..c88be16c02c 100644 --- a/chromium/gpu/command_buffer/client/gpu_control_client.h +++ b/chromium/gpu/command_buffer/client/gpu_control_client.h @@ -9,6 +9,7 @@ #include "base/containers/span.h" #include "ui/gfx/presentation_feedback.h" +#include "ui/gl/gpu_preference.h" namespace gpu { struct SwapBuffersCompleteParams; @@ -28,7 +29,7 @@ class GpuControlClient { virtual void OnGpuControlErrorMessage(const char* message, int32_t id) = 0; virtual void OnGpuControlSwapBuffersCompleted( const SwapBuffersCompleteParams& params) = 0; - virtual void OnGpuSwitched() {} + virtual void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) {} virtual void OnSwapBufferPresented( uint64_t swap_id, const gfx::PresentationFeedback& feedback) = 0; diff --git a/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.cc b/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.cc index ecd730625a7..f8277537bca 100644 --- a/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.cc +++ b/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.cc @@ -4,10 +4,59 @@ #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" +#include <inttypes.h> + +#include "base/strings/stringprintf.h" +#include "base/trace_event/process_memory_dump.h" +#include "ui/gfx/buffer_format_util.h" + namespace gpu { GpuMemoryBufferManager::GpuMemoryBufferManager() = default; GpuMemoryBufferManager::~GpuMemoryBufferManager() = default; +GpuMemoryBufferManager::AllocatedBufferInfo::AllocatedBufferInfo( + const gfx::GpuMemoryBufferHandle& handle, + const gfx::Size& size, + gfx::BufferFormat format) + : buffer_id_(handle.id), + type_(handle.type), + size_in_bytes_(gfx::BufferSizeForBufferFormat(size, format)) { + DCHECK_NE(gfx::EMPTY_BUFFER, type_); + + if (type_ == gfx::SHARED_MEMORY_BUFFER) + shared_memory_guid_ = handle.region.GetGUID(); +} + +GpuMemoryBufferManager::AllocatedBufferInfo::~AllocatedBufferInfo() = default; + +bool GpuMemoryBufferManager::AllocatedBufferInfo::OnMemoryDump( + base::trace_event::ProcessMemoryDump* pmd, + int client_id, + uint64_t client_tracing_process_id) const { + base::trace_event::MemoryAllocatorDump* dump = pmd->CreateAllocatorDump( + base::StringPrintf("gpumemorybuffer/client_0x%" PRIX32 "/buffer_%d", + client_id, buffer_id_.id)); + if (!dump) + return false; + + dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, + base::trace_event::MemoryAllocatorDump::kUnitsBytes, + size_in_bytes_); + + // Create the shared ownership edge to avoid double counting memory. + if (type_ == gfx::SHARED_MEMORY_BUFFER) { + pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid_, + /*importance=*/0); + } else { + auto shared_buffer_guid = gfx::GetGenericSharedGpuMemoryGUIDForTracing( + client_tracing_process_id, buffer_id_); + pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid); + pmd->AddOwnershipEdge(dump->guid(), shared_buffer_guid); + } + + return true; +} + } // namespace gpu diff --git a/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.h b/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.h index de0f4e71734..9d0a221b6ce 100644 --- a/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.h +++ b/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.h @@ -33,6 +33,29 @@ class GPU_EXPORT GpuMemoryBufferManager { // thread. virtual void SetDestructionSyncToken(gfx::GpuMemoryBuffer* buffer, const gpu::SyncToken& sync_token) = 0; + + protected: + class GPU_EXPORT AllocatedBufferInfo { + public: + AllocatedBufferInfo(const gfx::GpuMemoryBufferHandle& handle, + const gfx::Size& size, + gfx::BufferFormat format); + ~AllocatedBufferInfo(); + + gfx::GpuMemoryBufferType type() const { return type_; } + + // Add a memory dump for this buffer to |pmd|. Returns false if adding the + // dump failed. + bool OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd, + int client_id, + uint64_t client_tracing_process_id) const; + + private: + gfx::GpuMemoryBufferId buffer_id_; + gfx::GpuMemoryBufferType type_; + size_t size_in_bytes_; + base::UnguessableToken shared_memory_guid_; + }; }; } // namespace gpu diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc index 93da3f458e2..87e03d9e090 100644 --- a/chromium/gpu/command_buffer/client/raster_implementation.cc +++ b/chromium/gpu/command_buffer/client/raster_implementation.cc @@ -1205,7 +1205,7 @@ void RasterImplementation::IssueImageDecodeCacheEntryCreation( } GLuint RasterImplementation::CreateAndConsumeForGpuRaster( - const GLbyte* mailbox) { + const gpu::Mailbox& mailbox) { NOTREACHED(); return 0; } @@ -1221,6 +1221,15 @@ void RasterImplementation::EndGpuRaster() { NOTREACHED(); } +void RasterImplementation::BeginSharedImageAccessDirectCHROMIUM(GLuint texture, + GLenum mode) { + NOTREACHED(); +} + +void RasterImplementation::EndSharedImageAccessDirectCHROMIUM(GLuint texture) { + NOTREACHED(); +} + void RasterImplementation::TraceBeginCHROMIUM(const char* category_name, const char* trace_name) { GPU_CLIENT_SINGLE_THREAD_CHECK(); diff --git a/chromium/gpu/command_buffer/client/raster_implementation.h b/chromium/gpu/command_buffer/client/raster_implementation.h index f8beb586e84..4661b207f66 100644 --- a/chromium/gpu/command_buffer/client/raster_implementation.h +++ b/chromium/gpu/command_buffer/client/raster_implementation.h @@ -140,10 +140,13 @@ class RASTER_EXPORT RasterImplementation : public RasterInterface, uint32_t transfer_cache_entry_id, const gfx::ColorSpace& target_color_space, bool needs_mips) override; - GLuint CreateAndConsumeForGpuRaster(const GLbyte* mailbox) override; + GLuint CreateAndConsumeForGpuRaster(const gpu::Mailbox& mailbox) override; void DeleteGpuRasterTexture(GLuint texture) override; void BeginGpuRaster() override; void EndGpuRaster() override; + void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, + GLenum mode) override; + void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override; // ContextSupport implementation. void SetAggressivelyFreeResources(bool aggressively_free_resources) override; diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc index f1ed53f1c6d..bf10d7a7db3 100644 --- a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc +++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc @@ -160,12 +160,13 @@ SyncToken RasterImplementationGLES::ScheduleImageDecode( } GLuint RasterImplementationGLES::CreateAndConsumeForGpuRaster( - const GLbyte* mailbox) { - return gl_->CreateAndConsumeTextureCHROMIUM(mailbox); + const gpu::Mailbox& mailbox) { + DCHECK(mailbox.IsSharedImage()); + return gl_->CreateAndTexStorage2DSharedImageCHROMIUM(mailbox.name); } void RasterImplementationGLES::DeleteGpuRasterTexture(GLuint texture) { - gl_->DeleteTextures(1, &texture); + gl_->DeleteTextures(1u, &texture); } void RasterImplementationGLES::BeginGpuRaster() { @@ -186,6 +187,17 @@ void RasterImplementationGLES::EndGpuRaster() { gl_->ActiveTexture(GL_TEXTURE0); } +void RasterImplementationGLES::BeginSharedImageAccessDirectCHROMIUM( + GLuint texture, + GLenum mode) { + gl_->BeginSharedImageAccessDirectCHROMIUM(texture, mode); +} + +void RasterImplementationGLES::EndSharedImageAccessDirectCHROMIUM( + GLuint texture) { + gl_->EndSharedImageAccessDirectCHROMIUM(texture); +} + void RasterImplementationGLES::TraceBeginCHROMIUM(const char* category_name, const char* trace_name) { gl_->TraceBeginCHROMIUM(category_name, trace_name); diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.h b/chromium/gpu/command_buffer/client/raster_implementation_gles.h index 3b74173c528..5d93204409b 100644 --- a/chromium/gpu/command_buffer/client/raster_implementation_gles.h +++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.h @@ -88,10 +88,13 @@ class RASTER_EXPORT RasterImplementationGLES : public RasterInterface { bool needs_mips) override; // Raster via GrContext. - GLuint CreateAndConsumeForGpuRaster(const GLbyte* mailbox) override; + GLuint CreateAndConsumeForGpuRaster(const gpu::Mailbox& mailbox) override; void DeleteGpuRasterTexture(GLuint texture) override; void BeginGpuRaster() override; void EndGpuRaster() override; + void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, + GLenum mode) override; + void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override; void TraceBeginCHROMIUM(const char* category_name, const char* trace_name) override; diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc index d50e428f95b..93285232dce 100644 --- a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc +++ b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc @@ -29,8 +29,10 @@ #include "ui/gfx/color_space.h" using testing::_; +using testing::Eq; using testing::Gt; using testing::Le; +using testing::Pointee; using testing::Return; using testing::SetArgPointee; using testing::StrEq; @@ -84,9 +86,11 @@ class RasterMockGLES2Interface : public gles2::GLES2InterfaceStub { MOCK_METHOD3(TexParameteri, void(GLenum target, GLenum pname, GLint param)); // Mailboxes. - MOCK_METHOD2(ProduceTextureDirectCHROMIUM, - void(GLuint texture, GLbyte* mailbox)); - MOCK_METHOD1(CreateAndConsumeTextureCHROMIUM, GLuint(const GLbyte* mailbox)); + MOCK_METHOD1(CreateAndTexStorage2DSharedImageCHROMIUM, + GLuint(const GLbyte* mailbox)); + MOCK_METHOD2(BeginSharedImageAccessDirectCHROMIUM, + void(GLuint texture, GLenum mode)); + MOCK_METHOD1(EndSharedImageAccessDirectCHROMIUM, void(GLuint texture)); // Image objects. MOCK_METHOD4(CreateImageCHROMIUM, @@ -403,29 +407,35 @@ TEST_F(RasterImplementationGLESTest, GetQueryObjectui64vEXT) { ri_->GetQueryObjectui64vEXT(kQueryId, kQueryParam, &result); } -TEST_F(RasterImplementationGLESTest, DeleteGpuRasterTexture) { - GLuint texture_id = 3; - gpu::Mailbox mailbox; - - EXPECT_CALL(*gl_, CreateAndConsumeTextureCHROMIUM(mailbox.name)) - .WillOnce(Return(texture_id)) - .RetiresOnSaturation(); - - EXPECT_EQ(texture_id, ri_->CreateAndConsumeForGpuRaster(mailbox.name)); +TEST_F(RasterImplementationGLESTest, CreateAndConsumeForGpuRaster) { + const GLuint kTextureId = 23; + const auto mailbox = gpu::Mailbox::GenerateForSharedImage(); + EXPECT_CALL(*gl_, CreateAndTexStorage2DSharedImageCHROMIUM(mailbox.name)) + .WillOnce(Return(kTextureId)); + GLuint texture_id = ri_->CreateAndConsumeForGpuRaster(mailbox); + EXPECT_EQ(kTextureId, texture_id); +} - EXPECT_CALL(*gl_, DeleteTextures(1, _)).Times(1); - ri_->DeleteGpuRasterTexture(texture_id); +TEST_F(RasterImplementationGLESTest, DeleteGpuRasterTexture) { + const GLuint kTextureId = 23; + EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(Eq(kTextureId)))).Times(1); + ri_->DeleteGpuRasterTexture(kTextureId); } -TEST_F(RasterImplementationGLESTest, CreateAndConsumeForGpuRaster) { +TEST_F(RasterImplementationGLESTest, BeginSharedImageAccess) { const GLuint kTextureId = 23; - GLuint texture_id = 0; - gpu::Mailbox mailbox; + EXPECT_CALL(*gl_, + BeginSharedImageAccessDirectCHROMIUM( + kTextureId, GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM)) + .Times(1); + ri_->BeginSharedImageAccessDirectCHROMIUM( + kTextureId, GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM); +} - EXPECT_CALL(*gl_, CreateAndConsumeTextureCHROMIUM(mailbox.name)) - .WillOnce(Return(kTextureId)); - texture_id = ri_->CreateAndConsumeForGpuRaster(mailbox.name); - EXPECT_EQ(kTextureId, texture_id); +TEST_F(RasterImplementationGLESTest, EndSharedImageAccess) { + const GLuint kTextureId = 23; + EXPECT_CALL(*gl_, EndSharedImageAccessDirectCHROMIUM(kTextureId)).Times(1); + ri_->EndSharedImageAccessDirectCHROMIUM(kTextureId); } TEST_F(RasterImplementationGLESTest, BeginGpuRaster) { diff --git a/chromium/gpu/command_buffer/client/raster_interface.h b/chromium/gpu/command_buffer/client/raster_interface.h index ccd2db389f3..250272d867e 100644 --- a/chromium/gpu/command_buffer/client/raster_interface.h +++ b/chromium/gpu/command_buffer/client/raster_interface.h @@ -82,10 +82,13 @@ class RasterInterface : public InterfaceBase { bool needs_mips) = 0; // Raster via GrContext. - virtual GLuint CreateAndConsumeForGpuRaster(const GLbyte* mailbox) = 0; + virtual GLuint CreateAndConsumeForGpuRaster(const gpu::Mailbox& mailbox) = 0; virtual void DeleteGpuRasterTexture(GLuint texture) = 0; virtual void BeginGpuRaster() = 0; virtual void EndGpuRaster() = 0; + virtual void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, + GLenum mode) = 0; + virtual void EndSharedImageAccessDirectCHROMIUM(GLuint texture) = 0; // Include the auto-generated part of this class. We split this because // it means we can easily edit the non-auto generated parts right here in diff --git a/chromium/gpu/command_buffer/client/share_group.cc b/chromium/gpu/command_buffer/client/share_group.cc index ceb61d8bd39..a9233874578 100644 --- a/chromium/gpu/command_buffer/client/share_group.cc +++ b/chromium/gpu/command_buffer/client/share_group.cc @@ -401,16 +401,6 @@ bool ShareGroup::IsLost() const { return lost_; } -void ShareGroup::SetGpuSwitched(bool gpu_switched) { - base::AutoLock hold(gpu_switched_lock_); - gpu_switched_ = gpu_switched; -} - -bool ShareGroup::GetGpuSwitched() const { - base::AutoLock hold(gpu_switched_lock_); - return gpu_switched_; -} - void ShareGroup::SetProgramInfoManagerForTesting(ProgramInfoManager* manager) { program_info_manager_.reset(manager); } diff --git a/chromium/gpu/command_buffer/client/share_group.h b/chromium/gpu/command_buffer/client/share_group.h index a1b6f74f68c..7d074d1a542 100644 --- a/chromium/gpu/command_buffer/client/share_group.h +++ b/chromium/gpu/command_buffer/client/share_group.h @@ -165,9 +165,6 @@ class GLES2_IMPL_EXPORT ShareGroup // thread safe as contexts may be on different threads. bool IsLost() const; - void SetGpuSwitched(bool gpu_switched); - bool GetGpuSwitched() const; - private: friend class gpu::RefCountedThreadSafe<ShareGroup>; friend class gpu::gles2::GLES2ImplementationTest; @@ -189,9 +186,6 @@ class GLES2_IMPL_EXPORT ShareGroup mutable base::Lock lost_lock_; bool lost_ = false; - mutable base::Lock gpu_switched_lock_; - bool gpu_switched_ = false; - DISALLOW_COPY_AND_ASSIGN(ShareGroup); }; diff --git a/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h index a91ea1a6ef2..9d0bec45260 100644 --- a/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h +++ b/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h @@ -43,10 +43,23 @@ void DissociateMailbox(GLuint texture_id, GLuint texture_generation) { } } -void RequestAdapter(uint32_t power_preference) { +void RequestAdapter(uint32_t request_adapter_serial, + uint32_t power_preference) { webgpu::cmds::RequestAdapter* c = GetCmdSpace<webgpu::cmds::RequestAdapter>(); if (c) { - c->Init(power_preference); + c->Init(request_adapter_serial, power_preference); + } +} + +void RequestDevice(uint32_t adapter_service_id, + uint32_t request_device_properties_shm_id, + uint32_t request_device_properties_shm_offset, + uint32_t request_device_properties_size) { + webgpu::cmds::RequestDevice* c = GetCmdSpace<webgpu::cmds::RequestDevice>(); + if (c) { + c->Init(adapter_service_id, request_device_properties_shm_id, + request_device_properties_shm_offset, + request_device_properties_size); } } diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.cc b/chromium/gpu/command_buffer/client/webgpu_implementation.cc index 4093874d30e..185b3bd9935 100644 --- a/chromium/gpu/command_buffer/client/webgpu_implementation.cc +++ b/chromium/gpu/command_buffer/client/webgpu_implementation.cc @@ -238,11 +238,58 @@ void WebGPUImplementation::OnGpuControlReturnData( TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"), "WebGPUImplementation::OnGpuControlReturnData", "bytes", data.size()); - if (!wire_client_->HandleCommands( - reinterpret_cast<const char*>(data.data()), data.size())) { - // TODO(enga): Lose the context. + + if (data.size() <= sizeof(cmds::DawnReturnDataHeader)) { + // TODO(jiawei.shao@intel.com): Lose the context. NOTREACHED(); } + const cmds::DawnReturnDataHeader& dawnReturnDataHeader = + *reinterpret_cast<const cmds::DawnReturnDataHeader*>(data.data()); + + const uint8_t* dawnReturnDataBody = + data.data() + sizeof(cmds::DawnReturnDataHeader); + size_t dawnReturnDataSize = data.size() - sizeof(cmds::DawnReturnDataHeader); + + switch (dawnReturnDataHeader.return_data_type) { + case DawnReturnDataType::kDawnCommands: + if (!wire_client_->HandleCommands( + reinterpret_cast<const char*>(dawnReturnDataBody), + dawnReturnDataSize)) { + // TODO(enga): Lose the context. + NOTREACHED(); + } + break; + case DawnReturnDataType::kRequestedDawnAdapterProperties: { + const cmds::DawnReturnAdapterInfo* returned_adapter_info = + reinterpret_cast<const cmds::DawnReturnAdapterInfo*>( + dawnReturnDataBody); + + GLuint request_adapter_serial = + returned_adapter_info->adapter_ids.request_adapter_serial; + auto request_callback_iter = + request_adapter_callback_map_.find(request_adapter_serial); + if (request_callback_iter == request_adapter_callback_map_.end()) { + // TODO(jiawei.shao@intel.com): Lose the context. + NOTREACHED(); + break; + } + auto& request_callback = request_callback_iter->second; + GLuint adapter_service_id = + returned_adapter_info->adapter_ids.adapter_service_id; + WGPUDeviceProperties adapter_properties = {}; + const volatile char* deserialized_buffer = + reinterpret_cast<const volatile char*>( + returned_adapter_info->deserialized_buffer); + dawn_wire::DeserializeWGPUDeviceProperties(&adapter_properties, + deserialized_buffer); + std::move(request_callback).Run(adapter_service_id, adapter_properties); + request_adapter_callback_map_.erase(request_callback_iter); + } break; + default: + // TODO(jiawei.shao@intel.com): Lose the context. + NOTREACHED(); + break; + } #endif } @@ -325,7 +372,7 @@ void WebGPUImplementation::FlushCommands() { helper_->Flush(); } -DawnDevice WebGPUImplementation::GetDefaultDevice() { +WGPUDevice WebGPUImplementation::GetDefaultDevice() { #if BUILDFLAG(USE_DAWN) return wire_client_->GetDevice(); #else @@ -334,7 +381,7 @@ DawnDevice WebGPUImplementation::GetDefaultDevice() { #endif } -ReservedTexture WebGPUImplementation::ReserveTexture(DawnDevice device) { +ReservedTexture WebGPUImplementation::ReserveTexture(WGPUDevice device) { #if BUILDFLAG(USE_DAWN) dawn_wire::ReservedTexture reservation = wire_client_->ReserveTexture(device); return {reservation.texture, reservation.id, reservation.generation}; @@ -344,11 +391,64 @@ ReservedTexture WebGPUImplementation::ReserveTexture(DawnDevice device) { #endif } -void WebGPUImplementation::RequestAdapter(PowerPreference power_preference) { - GPU_CLIENT_SINGLE_THREAD_CHECK(); - GPU_CLIENT_LOG("[" << GetLogPrefix() << "] wgRequestAdapter(" - << static_cast<uint32_t>(power_preference) << ")"); - helper_->RequestAdapter(static_cast<uint32_t>(power_preference)); +uint32_t WebGPUImplementation::NextRequestAdapterSerial() { + return ++request_adapter_serial_; +} + +bool WebGPUImplementation::RequestAdapterAsync( + PowerPreference power_preference, + base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)> + request_adapter_callback) { + uint32_t request_adapter_serial = NextRequestAdapterSerial(); + + // Avoid the overflow of request_adapter_serial and old slot being reused. + if (request_adapter_callback_map_.find(request_adapter_serial) != + request_adapter_callback_map_.end()) { + return false; + } + + helper_->RequestAdapter(request_adapter_serial, + static_cast<uint32_t>(power_preference)); + helper_->Flush(); + + request_adapter_callback_map_[request_adapter_serial] = + std::move(request_adapter_callback); + + return true; +} + +bool WebGPUImplementation::RequestDevice( + uint32_t requested_adapter_id, + const WGPUDeviceProperties* requested_device_properties) { +#if BUILDFLAG(USE_DAWN) + if (!requested_device_properties) { + helper_->RequestDevice(requested_adapter_id, 0, 0, 0); + return true; + } + + size_t serialized_device_properties_size = + dawn_wire::SerializedWGPUDevicePropertiesSize( + requested_device_properties); + DCHECK_NE(0u, serialized_device_properties_size); + + // Both transfer_buffer and c2s_buffer_ are created with transfer_buffer_, + // so we need to make c2s_buffer_ clean before transferring + // requested_device_properties with transfer_buffer. + Flush(); + ScopedTransferBufferPtr transfer_buffer(serialized_device_properties_size, + helper_, transfer_buffer_); + dawn_wire::SerializeWGPUDeviceProperties( + requested_device_properties, + reinterpret_cast<char*>(transfer_buffer.address())); + helper_->RequestDevice(requested_adapter_id, transfer_buffer.shm_id(), + transfer_buffer.offset(), + serialized_device_properties_size); + transfer_buffer.Release(); + return true; +#else + NOTREACHED(); + return false; +#endif } } // namespace webgpu diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.h b/chromium/gpu/command_buffer/client/webgpu_implementation.h index 982a1c3b122..3d703010cf6 100644 --- a/chromium/gpu/command_buffer/client/webgpu_implementation.h +++ b/chromium/gpu/command_buffer/client/webgpu_implementation.h @@ -5,7 +5,7 @@ #ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_H_ #define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_H_ -#include <dawn/dawn.h> +#include <dawn/webgpu.h> #include <dawn_wire/WireClient.h> #include <memory> @@ -117,12 +117,20 @@ class WEBGPU_EXPORT WebGPUImplementation final // WebGPUInterface implementation const DawnProcTable& GetProcs() const override; void FlushCommands() override; - DawnDevice GetDefaultDevice() override; - ReservedTexture ReserveTexture(DawnDevice device) override; + WGPUDevice GetDefaultDevice() override; + ReservedTexture ReserveTexture(WGPUDevice device) override; + bool RequestAdapterAsync( + PowerPreference power_preference, + base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)> + request_adapter_callback) override; + bool RequestDevice( + uint32_t requested_adapter_id, + const WGPUDeviceProperties* requested_device_properties) override; private: const char* GetLogPrefix() const { return "webgpu"; } void CheckGLError() {} + uint32_t NextRequestAdapterSerial(); WebGPUCmdHelper* helper_; #if BUILDFLAG(USE_DAWN) @@ -137,6 +145,12 @@ class WEBGPU_EXPORT WebGPUImplementation final LogSettings log_settings_; + base::flat_map< + uint32_t, + base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)>> + request_adapter_callback_map_; + uint32_t request_adapter_serial_ = 0; + DISALLOW_COPY_AND_ASSIGN(WebGPUImplementation); }; diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h b/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h index 7578de19291..1e299de566d 100644 --- a/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h +++ b/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h @@ -22,7 +22,4 @@ void AssociateMailbox(GLuint device_id, void DissociateMailbox(GLuint texture_id, GLuint texture_generation) override; -void RequestAdapter(PowerPreference power_preference = - PowerPreference::kHighPerformance) override; - #endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h index 40c2c409b8b..66a91a31401 100644 --- a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h +++ b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h @@ -39,15 +39,4 @@ TEST_F(WebGPUImplementationTest, DissociateMailbox) { gl_->DissociateMailbox(1, 2); EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); } - -TEST_F(WebGPUImplementationTest, RequestAdapter) { - struct Cmds { - cmds::RequestAdapter cmd; - }; - Cmds expected; - expected.cmd.Init(1); - - gl_->RequestAdapter(PowerPreference::kHighPerformance); - EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); -} #endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_UNITTEST_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/webgpu_interface.h b/chromium/gpu/command_buffer/client/webgpu_interface.h index 6255fa7db2a..28f519527da 100644 --- a/chromium/gpu/command_buffer/client/webgpu_interface.h +++ b/chromium/gpu/command_buffer/client/webgpu_interface.h @@ -5,9 +5,10 @@ #ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_H_ #define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_H_ -#include <dawn/dawn.h> #include <dawn/dawn_proc_table.h> +#include <dawn/webgpu.h> +#include "base/callback.h" #include "gpu/command_buffer/client/interface_base.h" #include "gpu/command_buffer/common/webgpu_cmd_enums.h" @@ -15,7 +16,7 @@ namespace gpu { namespace webgpu { struct ReservedTexture { - DawnTexture texture; + WGPUTexture texture; uint32_t id; uint32_t generation; }; @@ -27,8 +28,15 @@ class WebGPUInterface : public InterfaceBase { virtual const DawnProcTable& GetProcs() const = 0; virtual void FlushCommands() = 0; - virtual DawnDevice GetDefaultDevice() = 0; - virtual ReservedTexture ReserveTexture(DawnDevice device) = 0; + virtual WGPUDevice GetDefaultDevice() = 0; + virtual ReservedTexture ReserveTexture(WGPUDevice device) = 0; + virtual bool RequestAdapterAsync( + PowerPreference power_preference, + base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)> + request_adapter_callback) = 0; + virtual bool RequestDevice( + uint32_t adapter_service_id, + const WGPUDeviceProperties* requested_device_properties) = 0; // Include the auto-generated part of this class. We split this because // it means we can easily edit the non-auto generated parts right here in diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h b/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h index 2c3618263b7..92784474d30 100644 --- a/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h +++ b/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h @@ -21,6 +21,4 @@ virtual void AssociateMailbox(GLuint device_id, const GLbyte* mailbox) = 0; virtual void DissociateMailbox(GLuint texture_id, GLuint texture_generation) = 0; -virtual void RequestAdapter( - PowerPreference power_preference = PowerPreference::kHighPerformance) = 0; #endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc index c0163e33dfc..f2ce5c9da9f 100644 --- a/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc +++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc @@ -23,12 +23,23 @@ const DawnProcTable& WebGPUInterfaceStub::GetProcs() const { return null_procs_; } void WebGPUInterfaceStub::FlushCommands() {} -DawnDevice WebGPUInterfaceStub::GetDefaultDevice() { +WGPUDevice WebGPUInterfaceStub::GetDefaultDevice() { return nullptr; } -ReservedTexture WebGPUInterfaceStub::ReserveTexture(DawnDevice device) { +ReservedTexture WebGPUInterfaceStub::ReserveTexture(WGPUDevice device) { return {nullptr, 0, 0}; } +bool WebGPUInterfaceStub::RequestAdapterAsync( + PowerPreference power_preference, + base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)> + request_adapter_callback) { + return false; +} +bool WebGPUInterfaceStub::RequestDevice( + uint32_t adapter_service_id, + const WGPUDeviceProperties* requested_device_properties) { + return false; +} // Include the auto-generated part of this class. We split this because // it means we can easily edit the non-auto generated parts right here in diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h index e16a57ec132..efeae734f76 100644 --- a/chromium/gpu/command_buffer/client/webgpu_interface_stub.h +++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h @@ -25,8 +25,15 @@ class WebGPUInterfaceStub : public WebGPUInterface { // WebGPUInterface implementation const DawnProcTable& GetProcs() const override; void FlushCommands() override; - DawnDevice GetDefaultDevice() override; - ReservedTexture ReserveTexture(DawnDevice device) override; + WGPUDevice GetDefaultDevice() override; + ReservedTexture ReserveTexture(WGPUDevice device) override; + bool RequestAdapterAsync( + PowerPreference power_preference, + base::OnceCallback<void(uint32_t, const WGPUDeviceProperties&)> + request_adapter_callback) override; + bool RequestDevice( + uint32_t adapter_service_id, + const WGPUDeviceProperties* requested_device_properties) override; // Include the auto-generated part of this class. We split this because // it means we can easily edit the non-auto generated parts right here in diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h index d3f713a2679..6d23a6873e3 100644 --- a/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h +++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h @@ -19,5 +19,4 @@ void AssociateMailbox(GLuint device_id, GLuint usage, const GLbyte* mailbox) override; void DissociateMailbox(GLuint texture_id, GLuint texture_generation) override; -void RequestAdapter(PowerPreference power_preference) override; #endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h index 31bafe7a9b7..0d89b6896a4 100644 --- a/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h +++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h @@ -20,6 +20,4 @@ void WebGPUInterfaceStub::AssociateMailbox(GLuint /* device_id */, const GLbyte* /* mailbox */) {} void WebGPUInterfaceStub::DissociateMailbox(GLuint /* texture_id */, GLuint /* texture_generation */) {} -void WebGPUInterfaceStub::RequestAdapter( - PowerPreference /* power_preference */) {} #endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_IMPL_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h index d180e54b6db..3635c8ae565 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h +++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h @@ -7711,6 +7711,120 @@ static_assert( offsetof(MultiDrawArraysInstancedCHROMIUM, drawcount) == 32, "offset of MultiDrawArraysInstancedCHROMIUM drawcount should be 32"); +struct MultiDrawArraysInstancedBaseInstanceCHROMIUM { + typedef MultiDrawArraysInstancedBaseInstanceCHROMIUM ValueType; + static const CommandId kCmdId = kMultiDrawArraysInstancedBaseInstanceCHROMIUM; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLenum _mode, + uint32_t _firsts_shm_id, + uint32_t _firsts_shm_offset, + uint32_t _counts_shm_id, + uint32_t _counts_shm_offset, + uint32_t _instance_counts_shm_id, + uint32_t _instance_counts_shm_offset, + uint32_t _baseinstances_shm_id, + uint32_t _baseinstances_shm_offset, + GLsizei _drawcount) { + SetHeader(); + mode = _mode; + firsts_shm_id = _firsts_shm_id; + firsts_shm_offset = _firsts_shm_offset; + counts_shm_id = _counts_shm_id; + counts_shm_offset = _counts_shm_offset; + instance_counts_shm_id = _instance_counts_shm_id; + instance_counts_shm_offset = _instance_counts_shm_offset; + baseinstances_shm_id = _baseinstances_shm_id; + baseinstances_shm_offset = _baseinstances_shm_offset; + drawcount = _drawcount; + } + + void* Set(void* cmd, + GLenum _mode, + uint32_t _firsts_shm_id, + uint32_t _firsts_shm_offset, + uint32_t _counts_shm_id, + uint32_t _counts_shm_offset, + uint32_t _instance_counts_shm_id, + uint32_t _instance_counts_shm_offset, + uint32_t _baseinstances_shm_id, + uint32_t _baseinstances_shm_offset, + GLsizei _drawcount) { + static_cast<ValueType*>(cmd)->Init( + _mode, _firsts_shm_id, _firsts_shm_offset, _counts_shm_id, + _counts_shm_offset, _instance_counts_shm_id, + _instance_counts_shm_offset, _baseinstances_shm_id, + _baseinstances_shm_offset, _drawcount); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t mode; + uint32_t firsts_shm_id; + uint32_t firsts_shm_offset; + uint32_t counts_shm_id; + uint32_t counts_shm_offset; + uint32_t instance_counts_shm_id; + uint32_t instance_counts_shm_offset; + uint32_t baseinstances_shm_id; + uint32_t baseinstances_shm_offset; + int32_t drawcount; +}; + +static_assert( + sizeof(MultiDrawArraysInstancedBaseInstanceCHROMIUM) == 44, + "size of MultiDrawArraysInstancedBaseInstanceCHROMIUM should be 44"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, header) == + 0, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM header " + "should be 0"); +static_assert( + offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, mode) == 4, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM mode should be 4"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, + firsts_shm_id) == 8, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM " + "firsts_shm_id should be 8"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, + firsts_shm_offset) == 12, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM " + "firsts_shm_offset should be 12"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, + counts_shm_id) == 16, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM " + "counts_shm_id should be 16"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, + counts_shm_offset) == 20, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM " + "counts_shm_offset should be 20"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, + instance_counts_shm_id) == 24, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM " + "instance_counts_shm_id should be 24"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, + instance_counts_shm_offset) == 28, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM " + "instance_counts_shm_offset should be 28"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, + baseinstances_shm_id) == 32, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM " + "baseinstances_shm_id should be 32"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, + baseinstances_shm_offset) == 36, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM " + "baseinstances_shm_offset should be 36"); +static_assert(offsetof(MultiDrawArraysInstancedBaseInstanceCHROMIUM, + drawcount) == 40, + "offset of MultiDrawArraysInstancedBaseInstanceCHROMIUM " + "drawcount should be 40"); + struct MultiDrawElementsCHROMIUM { typedef MultiDrawElementsCHROMIUM ValueType; static const CommandId kCmdId = kMultiDrawElementsCHROMIUM; @@ -7883,6 +7997,162 @@ static_assert( offsetof(MultiDrawElementsInstancedCHROMIUM, drawcount) == 36, "offset of MultiDrawElementsInstancedCHROMIUM drawcount should be 36"); +struct MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM { + typedef MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM ValueType; + static const CommandId kCmdId = + kMultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLenum _mode, + uint32_t _counts_shm_id, + uint32_t _counts_shm_offset, + GLenum _type, + uint32_t _offsets_shm_id, + uint32_t _offsets_shm_offset, + uint32_t _instance_counts_shm_id, + uint32_t _instance_counts_shm_offset, + uint32_t _basevertices_shm_id, + uint32_t _basevertices_shm_offset, + uint32_t _baseinstances_shm_id, + uint32_t _baseinstances_shm_offset, + GLsizei _drawcount) { + SetHeader(); + mode = _mode; + counts_shm_id = _counts_shm_id; + counts_shm_offset = _counts_shm_offset; + type = _type; + offsets_shm_id = _offsets_shm_id; + offsets_shm_offset = _offsets_shm_offset; + instance_counts_shm_id = _instance_counts_shm_id; + instance_counts_shm_offset = _instance_counts_shm_offset; + basevertices_shm_id = _basevertices_shm_id; + basevertices_shm_offset = _basevertices_shm_offset; + baseinstances_shm_id = _baseinstances_shm_id; + baseinstances_shm_offset = _baseinstances_shm_offset; + drawcount = _drawcount; + } + + void* Set(void* cmd, + GLenum _mode, + uint32_t _counts_shm_id, + uint32_t _counts_shm_offset, + GLenum _type, + uint32_t _offsets_shm_id, + uint32_t _offsets_shm_offset, + uint32_t _instance_counts_shm_id, + uint32_t _instance_counts_shm_offset, + uint32_t _basevertices_shm_id, + uint32_t _basevertices_shm_offset, + uint32_t _baseinstances_shm_id, + uint32_t _baseinstances_shm_offset, + GLsizei _drawcount) { + static_cast<ValueType*>(cmd)->Init( + _mode, _counts_shm_id, _counts_shm_offset, _type, _offsets_shm_id, + _offsets_shm_offset, _instance_counts_shm_id, + _instance_counts_shm_offset, _basevertices_shm_id, + _basevertices_shm_offset, _baseinstances_shm_id, + _baseinstances_shm_offset, _drawcount); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t mode; + uint32_t counts_shm_id; + uint32_t counts_shm_offset; + uint32_t type; + uint32_t offsets_shm_id; + uint32_t offsets_shm_offset; + uint32_t instance_counts_shm_id; + uint32_t instance_counts_shm_offset; + uint32_t basevertices_shm_id; + uint32_t basevertices_shm_offset; + uint32_t baseinstances_shm_id; + uint32_t baseinstances_shm_offset; + int32_t drawcount; +}; + +static_assert( + sizeof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM) == 56, + "size of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM should " + "be 56"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + header) == 0, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM header " + "should be 0"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, mode) == + 4, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM mode " + "should be 4"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + counts_shm_id) == 8, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "counts_shm_id should be 8"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + counts_shm_offset) == 12, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "counts_shm_offset should be 12"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, type) == + 16, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM type " + "should be 16"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + offsets_shm_id) == 20, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "offsets_shm_id should be 20"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + offsets_shm_offset) == 24, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "offsets_shm_offset should be 24"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + instance_counts_shm_id) == 28, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "instance_counts_shm_id should be 28"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + instance_counts_shm_offset) == 32, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "instance_counts_shm_offset should be 32"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + basevertices_shm_id) == 36, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "basevertices_shm_id should be 36"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + basevertices_shm_offset) == 40, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "basevertices_shm_offset should be 40"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + baseinstances_shm_id) == 44, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "baseinstances_shm_id should be 44"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + baseinstances_shm_offset) == 48, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "baseinstances_shm_offset should be 48"); +static_assert( + offsetof(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM, + drawcount) == 52, + "offset of MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM " + "drawcount should be 52"); + struct StencilFunc { typedef StencilFunc ValueType; static const CommandId kCmdId = kStencilFunc; @@ -13896,6 +14166,71 @@ static_assert(offsetof(DrawArraysInstancedANGLE, count) == 12, static_assert(offsetof(DrawArraysInstancedANGLE, primcount) == 16, "offset of DrawArraysInstancedANGLE primcount should be 16"); +struct DrawArraysInstancedBaseInstanceANGLE { + typedef DrawArraysInstancedBaseInstanceANGLE ValueType; + static const CommandId kCmdId = kDrawArraysInstancedBaseInstanceANGLE; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLenum _mode, + GLint _first, + GLsizei _count, + GLsizei _primcount, + GLuint _baseinstance) { + SetHeader(); + mode = _mode; + first = _first; + count = _count; + primcount = _primcount; + baseinstance = _baseinstance; + } + + void* Set(void* cmd, + GLenum _mode, + GLint _first, + GLsizei _count, + GLsizei _primcount, + GLuint _baseinstance) { + static_cast<ValueType*>(cmd)->Init(_mode, _first, _count, _primcount, + _baseinstance); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t mode; + int32_t first; + int32_t count; + int32_t primcount; + uint32_t baseinstance; +}; + +static_assert(sizeof(DrawArraysInstancedBaseInstanceANGLE) == 24, + "size of DrawArraysInstancedBaseInstanceANGLE should be 24"); +static_assert( + offsetof(DrawArraysInstancedBaseInstanceANGLE, header) == 0, + "offset of DrawArraysInstancedBaseInstanceANGLE header should be 0"); +static_assert( + offsetof(DrawArraysInstancedBaseInstanceANGLE, mode) == 4, + "offset of DrawArraysInstancedBaseInstanceANGLE mode should be 4"); +static_assert( + offsetof(DrawArraysInstancedBaseInstanceANGLE, first) == 8, + "offset of DrawArraysInstancedBaseInstanceANGLE first should be 8"); +static_assert( + offsetof(DrawArraysInstancedBaseInstanceANGLE, count) == 12, + "offset of DrawArraysInstancedBaseInstanceANGLE count should be 12"); +static_assert( + offsetof(DrawArraysInstancedBaseInstanceANGLE, primcount) == 16, + "offset of DrawArraysInstancedBaseInstanceANGLE primcount should be 16"); +static_assert( + offsetof(DrawArraysInstancedBaseInstanceANGLE, baseinstance) == 20, + "offset of DrawArraysInstancedBaseInstanceANGLE baseinstance should be 20"); + struct DrawElementsInstancedANGLE { typedef DrawElementsInstancedANGLE ValueType; static const CommandId kCmdId = kDrawElementsInstancedANGLE; @@ -13955,6 +14290,95 @@ static_assert(offsetof(DrawElementsInstancedANGLE, index_offset) == 16, static_assert(offsetof(DrawElementsInstancedANGLE, primcount) == 20, "offset of DrawElementsInstancedANGLE primcount should be 20"); +struct DrawElementsInstancedBaseVertexBaseInstanceANGLE { + typedef DrawElementsInstancedBaseVertexBaseInstanceANGLE ValueType; + static const CommandId kCmdId = + kDrawElementsInstancedBaseVertexBaseInstanceANGLE; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(GLenum _mode, + GLsizei _count, + GLenum _type, + GLuint _index_offset, + GLsizei _primcount, + GLint _basevertex, + GLuint _baseinstance) { + SetHeader(); + mode = _mode; + count = _count; + type = _type; + index_offset = _index_offset; + primcount = _primcount; + basevertex = _basevertex; + baseinstance = _baseinstance; + } + + void* Set(void* cmd, + GLenum _mode, + GLsizei _count, + GLenum _type, + GLuint _index_offset, + GLsizei _primcount, + GLint _basevertex, + GLuint _baseinstance) { + static_cast<ValueType*>(cmd)->Init(_mode, _count, _type, _index_offset, + _primcount, _basevertex, _baseinstance); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t mode; + int32_t count; + uint32_t type; + uint32_t index_offset; + int32_t primcount; + int32_t basevertex; + uint32_t baseinstance; +}; + +static_assert( + sizeof(DrawElementsInstancedBaseVertexBaseInstanceANGLE) == 32, + "size of DrawElementsInstancedBaseVertexBaseInstanceANGLE should be 32"); +static_assert(offsetof(DrawElementsInstancedBaseVertexBaseInstanceANGLE, + header) == 0, + "offset of DrawElementsInstancedBaseVertexBaseInstanceANGLE " + "header should be 0"); +static_assert(offsetof(DrawElementsInstancedBaseVertexBaseInstanceANGLE, + mode) == 4, + "offset of DrawElementsInstancedBaseVertexBaseInstanceANGLE mode " + "should be 4"); +static_assert(offsetof(DrawElementsInstancedBaseVertexBaseInstanceANGLE, + count) == 8, + "offset of DrawElementsInstancedBaseVertexBaseInstanceANGLE " + "count should be 8"); +static_assert(offsetof(DrawElementsInstancedBaseVertexBaseInstanceANGLE, + type) == 12, + "offset of DrawElementsInstancedBaseVertexBaseInstanceANGLE type " + "should be 12"); +static_assert(offsetof(DrawElementsInstancedBaseVertexBaseInstanceANGLE, + index_offset) == 16, + "offset of DrawElementsInstancedBaseVertexBaseInstanceANGLE " + "index_offset should be 16"); +static_assert(offsetof(DrawElementsInstancedBaseVertexBaseInstanceANGLE, + primcount) == 20, + "offset of DrawElementsInstancedBaseVertexBaseInstanceANGLE " + "primcount should be 20"); +static_assert(offsetof(DrawElementsInstancedBaseVertexBaseInstanceANGLE, + basevertex) == 24, + "offset of DrawElementsInstancedBaseVertexBaseInstanceANGLE " + "basevertex should be 24"); +static_assert(offsetof(DrawElementsInstancedBaseVertexBaseInstanceANGLE, + baseinstance) == 28, + "offset of DrawElementsInstancedBaseVertexBaseInstanceANGLE " + "baseinstance should be 28"); + struct VertexAttribDivisorANGLE { typedef VertexAttribDivisorANGLE ValueType; static const CommandId kCmdId = kVertexAttribDivisorANGLE; @@ -16607,34 +17031,6 @@ static_assert(sizeof(BlendBarrierKHR) == 4, static_assert(offsetof(BlendBarrierKHR, header) == 0, "offset of BlendBarrierKHR header should be 0"); -struct ApplyScreenSpaceAntialiasingCHROMIUM { - typedef ApplyScreenSpaceAntialiasingCHROMIUM ValueType; - static const CommandId kCmdId = kApplyScreenSpaceAntialiasingCHROMIUM; - static const cmd::ArgFlags kArgFlags = cmd::kFixed; - static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); - - static uint32_t ComputeSize() { - return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT - } - - void SetHeader() { header.SetCmd<ValueType>(); } - - void Init() { SetHeader(); } - - void* Set(void* cmd) { - static_cast<ValueType*>(cmd)->Init(); - return NextCmdAddress<ValueType>(cmd); - } - - gpu::CommandHeader header; -}; - -static_assert(sizeof(ApplyScreenSpaceAntialiasingCHROMIUM) == 4, - "size of ApplyScreenSpaceAntialiasingCHROMIUM should be 4"); -static_assert( - offsetof(ApplyScreenSpaceAntialiasingCHROMIUM, header) == 0, - "offset of ApplyScreenSpaceAntialiasingCHROMIUM header should be 0"); - struct BindFragDataLocationIndexedEXTBucket { typedef BindFragDataLocationIndexedEXTBucket ValueType; static const CommandId kCmdId = kBindFragDataLocationIndexedEXTBucket; diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h index 314e2a7bfbb..43c019660e3 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h +++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h @@ -2360,6 +2360,32 @@ TEST_F(GLES2FormatTest, MultiDrawArraysInstancedCHROMIUM) { CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); } +TEST_F(GLES2FormatTest, MultiDrawArraysInstancedBaseInstanceCHROMIUM) { + cmds::MultiDrawArraysInstancedBaseInstanceCHROMIUM& cmd = + *GetBufferAs<cmds::MultiDrawArraysInstancedBaseInstanceCHROMIUM>(); + void* next_cmd = + cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<uint32_t>(12), + static_cast<uint32_t>(13), static_cast<uint32_t>(14), + static_cast<uint32_t>(15), static_cast<uint32_t>(16), + static_cast<uint32_t>(17), static_cast<uint32_t>(18), + static_cast<uint32_t>(19), static_cast<GLsizei>(20)); + EXPECT_EQ(static_cast<uint32_t>( + cmds::MultiDrawArraysInstancedBaseInstanceCHROMIUM::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLenum>(11), cmd.mode); + EXPECT_EQ(static_cast<uint32_t>(12), cmd.firsts_shm_id); + EXPECT_EQ(static_cast<uint32_t>(13), cmd.firsts_shm_offset); + EXPECT_EQ(static_cast<uint32_t>(14), cmd.counts_shm_id); + EXPECT_EQ(static_cast<uint32_t>(15), cmd.counts_shm_offset); + EXPECT_EQ(static_cast<uint32_t>(16), cmd.instance_counts_shm_id); + EXPECT_EQ(static_cast<uint32_t>(17), cmd.instance_counts_shm_offset); + EXPECT_EQ(static_cast<uint32_t>(18), cmd.baseinstances_shm_id); + EXPECT_EQ(static_cast<uint32_t>(19), cmd.baseinstances_shm_offset); + EXPECT_EQ(static_cast<GLsizei>(20), cmd.drawcount); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + TEST_F(GLES2FormatTest, MultiDrawElementsCHROMIUM) { cmds::MultiDrawElementsCHROMIUM& cmd = *GetBufferAs<cmds::MultiDrawElementsCHROMIUM>(); @@ -2404,6 +2430,39 @@ TEST_F(GLES2FormatTest, MultiDrawElementsInstancedCHROMIUM) { CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); } +TEST_F(GLES2FormatTest, + MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM) { + cmds::MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM& cmd = + *GetBufferAs< + cmds::MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM>(); + void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11), + static_cast<uint32_t>(12), static_cast<uint32_t>(13), + static_cast<GLenum>(14), static_cast<uint32_t>(15), + static_cast<uint32_t>(16), static_cast<uint32_t>(17), + static_cast<uint32_t>(18), static_cast<uint32_t>(19), + static_cast<uint32_t>(20), static_cast<uint32_t>(21), + static_cast<uint32_t>(22), static_cast<GLsizei>(23)); + EXPECT_EQ(static_cast<uint32_t>( + cmds::MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM:: + kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLenum>(11), cmd.mode); + EXPECT_EQ(static_cast<uint32_t>(12), cmd.counts_shm_id); + EXPECT_EQ(static_cast<uint32_t>(13), cmd.counts_shm_offset); + EXPECT_EQ(static_cast<GLenum>(14), cmd.type); + EXPECT_EQ(static_cast<uint32_t>(15), cmd.offsets_shm_id); + EXPECT_EQ(static_cast<uint32_t>(16), cmd.offsets_shm_offset); + EXPECT_EQ(static_cast<uint32_t>(17), cmd.instance_counts_shm_id); + EXPECT_EQ(static_cast<uint32_t>(18), cmd.instance_counts_shm_offset); + EXPECT_EQ(static_cast<uint32_t>(19), cmd.basevertices_shm_id); + EXPECT_EQ(static_cast<uint32_t>(20), cmd.basevertices_shm_offset); + EXPECT_EQ(static_cast<uint32_t>(21), cmd.baseinstances_shm_id); + EXPECT_EQ(static_cast<uint32_t>(22), cmd.baseinstances_shm_offset); + EXPECT_EQ(static_cast<GLsizei>(23), cmd.drawcount); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + TEST_F(GLES2FormatTest, StencilFunc) { cmds::StencilFunc& cmd = *GetBufferAs<cmds::StencilFunc>(); void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11), @@ -4587,6 +4646,24 @@ TEST_F(GLES2FormatTest, DrawArraysInstancedANGLE) { CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); } +TEST_F(GLES2FormatTest, DrawArraysInstancedBaseInstanceANGLE) { + cmds::DrawArraysInstancedBaseInstanceANGLE& cmd = + *GetBufferAs<cmds::DrawArraysInstancedBaseInstanceANGLE>(); + void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11), + static_cast<GLint>(12), static_cast<GLsizei>(13), + static_cast<GLsizei>(14), static_cast<GLuint>(15)); + EXPECT_EQ( + static_cast<uint32_t>(cmds::DrawArraysInstancedBaseInstanceANGLE::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLenum>(11), cmd.mode); + EXPECT_EQ(static_cast<GLint>(12), cmd.first); + EXPECT_EQ(static_cast<GLsizei>(13), cmd.count); + EXPECT_EQ(static_cast<GLsizei>(14), cmd.primcount); + EXPECT_EQ(static_cast<GLuint>(15), cmd.baseinstance); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + TEST_F(GLES2FormatTest, DrawElementsInstancedANGLE) { cmds::DrawElementsInstancedANGLE& cmd = *GetBufferAs<cmds::DrawElementsInstancedANGLE>(); @@ -4604,6 +4681,27 @@ TEST_F(GLES2FormatTest, DrawElementsInstancedANGLE) { CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); } +TEST_F(GLES2FormatTest, DrawElementsInstancedBaseVertexBaseInstanceANGLE) { + cmds::DrawElementsInstancedBaseVertexBaseInstanceANGLE& cmd = + *GetBufferAs<cmds::DrawElementsInstancedBaseVertexBaseInstanceANGLE>(); + void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11), + static_cast<GLsizei>(12), static_cast<GLenum>(13), + static_cast<GLuint>(14), static_cast<GLsizei>(15), + static_cast<GLint>(16), static_cast<GLuint>(17)); + EXPECT_EQ(static_cast<uint32_t>( + cmds::DrawElementsInstancedBaseVertexBaseInstanceANGLE::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<GLenum>(11), cmd.mode); + EXPECT_EQ(static_cast<GLsizei>(12), cmd.count); + EXPECT_EQ(static_cast<GLenum>(13), cmd.type); + EXPECT_EQ(static_cast<GLuint>(14), cmd.index_offset); + EXPECT_EQ(static_cast<GLsizei>(15), cmd.primcount); + EXPECT_EQ(static_cast<GLint>(16), cmd.basevertex); + EXPECT_EQ(static_cast<GLuint>(17), cmd.baseinstance); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + TEST_F(GLES2FormatTest, VertexAttribDivisorANGLE) { cmds::VertexAttribDivisorANGLE& cmd = *GetBufferAs<cmds::VertexAttribDivisorANGLE>(); @@ -5456,17 +5554,6 @@ TEST_F(GLES2FormatTest, BlendBarrierKHR) { CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); } -TEST_F(GLES2FormatTest, ApplyScreenSpaceAntialiasingCHROMIUM) { - cmds::ApplyScreenSpaceAntialiasingCHROMIUM& cmd = - *GetBufferAs<cmds::ApplyScreenSpaceAntialiasingCHROMIUM>(); - void* next_cmd = cmd.Set(&cmd); - EXPECT_EQ( - static_cast<uint32_t>(cmds::ApplyScreenSpaceAntialiasingCHROMIUM::kCmdId), - cmd.header.command); - EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); - CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); -} - TEST_F(GLES2FormatTest, BindFragDataLocationIndexedEXTBucket) { cmds::BindFragDataLocationIndexedEXTBucket& cmd = *GetBufferAs<cmds::BindFragDataLocationIndexedEXTBucket>(); diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h index 456a345b568..e2b7aa68549 100644 --- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h +++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h @@ -11,360 +11,363 @@ #ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_IDS_AUTOGEN_H_ #define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_IDS_AUTOGEN_H_ -#define GLES2_COMMAND_LIST(OP) \ - OP(ActiveTexture) /* 256 */ \ - OP(AttachShader) /* 257 */ \ - OP(BindAttribLocationBucket) /* 258 */ \ - OP(BindBuffer) /* 259 */ \ - OP(BindBufferBase) /* 260 */ \ - OP(BindBufferRange) /* 261 */ \ - OP(BindFramebuffer) /* 262 */ \ - OP(BindRenderbuffer) /* 263 */ \ - OP(BindSampler) /* 264 */ \ - OP(BindTexture) /* 265 */ \ - OP(BindTransformFeedback) /* 266 */ \ - OP(BlendColor) /* 267 */ \ - OP(BlendEquation) /* 268 */ \ - OP(BlendEquationSeparate) /* 269 */ \ - OP(BlendFunc) /* 270 */ \ - OP(BlendFuncSeparate) /* 271 */ \ - OP(BufferData) /* 272 */ \ - OP(BufferSubData) /* 273 */ \ - OP(CheckFramebufferStatus) /* 274 */ \ - OP(Clear) /* 275 */ \ - OP(ClearBufferfi) /* 276 */ \ - OP(ClearBufferfvImmediate) /* 277 */ \ - OP(ClearBufferivImmediate) /* 278 */ \ - OP(ClearBufferuivImmediate) /* 279 */ \ - OP(ClearColor) /* 280 */ \ - OP(ClearDepthf) /* 281 */ \ - OP(ClearStencil) /* 282 */ \ - OP(ClientWaitSync) /* 283 */ \ - OP(ColorMask) /* 284 */ \ - OP(CompileShader) /* 285 */ \ - OP(CompressedTexImage2DBucket) /* 286 */ \ - OP(CompressedTexImage2D) /* 287 */ \ - OP(CompressedTexSubImage2DBucket) /* 288 */ \ - OP(CompressedTexSubImage2D) /* 289 */ \ - OP(CompressedTexImage3DBucket) /* 290 */ \ - OP(CompressedTexImage3D) /* 291 */ \ - OP(CompressedTexSubImage3DBucket) /* 292 */ \ - OP(CompressedTexSubImage3D) /* 293 */ \ - OP(CopyBufferSubData) /* 294 */ \ - OP(CopyTexImage2D) /* 295 */ \ - OP(CopyTexSubImage2D) /* 296 */ \ - OP(CopyTexSubImage3D) /* 297 */ \ - OP(CreateProgram) /* 298 */ \ - OP(CreateShader) /* 299 */ \ - OP(CullFace) /* 300 */ \ - OP(DeleteBuffersImmediate) /* 301 */ \ - OP(DeleteFramebuffersImmediate) /* 302 */ \ - OP(DeleteProgram) /* 303 */ \ - OP(DeleteRenderbuffersImmediate) /* 304 */ \ - OP(DeleteSamplersImmediate) /* 305 */ \ - OP(DeleteSync) /* 306 */ \ - OP(DeleteShader) /* 307 */ \ - OP(DeleteTexturesImmediate) /* 308 */ \ - OP(DeleteTransformFeedbacksImmediate) /* 309 */ \ - OP(DepthFunc) /* 310 */ \ - OP(DepthMask) /* 311 */ \ - OP(DepthRangef) /* 312 */ \ - OP(DetachShader) /* 313 */ \ - OP(Disable) /* 314 */ \ - OP(DisableVertexAttribArray) /* 315 */ \ - OP(DrawArrays) /* 316 */ \ - OP(DrawElements) /* 317 */ \ - OP(Enable) /* 318 */ \ - OP(EnableVertexAttribArray) /* 319 */ \ - OP(FenceSync) /* 320 */ \ - OP(Finish) /* 321 */ \ - OP(Flush) /* 322 */ \ - OP(FramebufferRenderbuffer) /* 323 */ \ - OP(FramebufferTexture2D) /* 324 */ \ - OP(FramebufferTextureLayer) /* 325 */ \ - OP(FrontFace) /* 326 */ \ - OP(GenBuffersImmediate) /* 327 */ \ - OP(GenerateMipmap) /* 328 */ \ - OP(GenFramebuffersImmediate) /* 329 */ \ - OP(GenRenderbuffersImmediate) /* 330 */ \ - OP(GenSamplersImmediate) /* 331 */ \ - OP(GenTexturesImmediate) /* 332 */ \ - OP(GenTransformFeedbacksImmediate) /* 333 */ \ - OP(GetActiveAttrib) /* 334 */ \ - OP(GetActiveUniform) /* 335 */ \ - OP(GetActiveUniformBlockiv) /* 336 */ \ - OP(GetActiveUniformBlockName) /* 337 */ \ - OP(GetActiveUniformsiv) /* 338 */ \ - OP(GetAttachedShaders) /* 339 */ \ - OP(GetAttribLocation) /* 340 */ \ - OP(GetBooleanv) /* 341 */ \ - OP(GetBufferParameteri64v) /* 342 */ \ - OP(GetBufferParameteriv) /* 343 */ \ - OP(GetError) /* 344 */ \ - OP(GetFloatv) /* 345 */ \ - OP(GetFragDataLocation) /* 346 */ \ - OP(GetFramebufferAttachmentParameteriv) /* 347 */ \ - OP(GetInteger64v) /* 348 */ \ - OP(GetIntegeri_v) /* 349 */ \ - OP(GetInteger64i_v) /* 350 */ \ - OP(GetIntegerv) /* 351 */ \ - OP(GetInternalformativ) /* 352 */ \ - OP(GetProgramiv) /* 353 */ \ - OP(GetProgramInfoLog) /* 354 */ \ - OP(GetRenderbufferParameteriv) /* 355 */ \ - OP(GetSamplerParameterfv) /* 356 */ \ - OP(GetSamplerParameteriv) /* 357 */ \ - OP(GetShaderiv) /* 358 */ \ - OP(GetShaderInfoLog) /* 359 */ \ - OP(GetShaderPrecisionFormat) /* 360 */ \ - OP(GetShaderSource) /* 361 */ \ - OP(GetString) /* 362 */ \ - OP(GetSynciv) /* 363 */ \ - OP(GetTexParameterfv) /* 364 */ \ - OP(GetTexParameteriv) /* 365 */ \ - OP(GetTransformFeedbackVarying) /* 366 */ \ - OP(GetUniformBlockIndex) /* 367 */ \ - OP(GetUniformfv) /* 368 */ \ - OP(GetUniformiv) /* 369 */ \ - OP(GetUniformuiv) /* 370 */ \ - OP(GetUniformIndices) /* 371 */ \ - OP(GetUniformLocation) /* 372 */ \ - OP(GetVertexAttribfv) /* 373 */ \ - OP(GetVertexAttribiv) /* 374 */ \ - OP(GetVertexAttribIiv) /* 375 */ \ - OP(GetVertexAttribIuiv) /* 376 */ \ - OP(GetVertexAttribPointerv) /* 377 */ \ - OP(Hint) /* 378 */ \ - OP(InvalidateFramebufferImmediate) /* 379 */ \ - OP(InvalidateSubFramebufferImmediate) /* 380 */ \ - OP(IsBuffer) /* 381 */ \ - OP(IsEnabled) /* 382 */ \ - OP(IsFramebuffer) /* 383 */ \ - OP(IsProgram) /* 384 */ \ - OP(IsRenderbuffer) /* 385 */ \ - OP(IsSampler) /* 386 */ \ - OP(IsShader) /* 387 */ \ - OP(IsSync) /* 388 */ \ - OP(IsTexture) /* 389 */ \ - OP(IsTransformFeedback) /* 390 */ \ - OP(LineWidth) /* 391 */ \ - OP(LinkProgram) /* 392 */ \ - OP(PauseTransformFeedback) /* 393 */ \ - OP(PixelStorei) /* 394 */ \ - OP(PolygonOffset) /* 395 */ \ - OP(ReadBuffer) /* 396 */ \ - OP(ReadPixels) /* 397 */ \ - OP(ReleaseShaderCompiler) /* 398 */ \ - OP(RenderbufferStorage) /* 399 */ \ - OP(ResumeTransformFeedback) /* 400 */ \ - OP(SampleCoverage) /* 401 */ \ - OP(SamplerParameterf) /* 402 */ \ - OP(SamplerParameterfvImmediate) /* 403 */ \ - OP(SamplerParameteri) /* 404 */ \ - OP(SamplerParameterivImmediate) /* 405 */ \ - OP(Scissor) /* 406 */ \ - OP(ShaderBinary) /* 407 */ \ - OP(ShaderSourceBucket) /* 408 */ \ - OP(MultiDrawBeginCHROMIUM) /* 409 */ \ - OP(MultiDrawEndCHROMIUM) /* 410 */ \ - OP(MultiDrawArraysCHROMIUM) /* 411 */ \ - OP(MultiDrawArraysInstancedCHROMIUM) /* 412 */ \ - OP(MultiDrawElementsCHROMIUM) /* 413 */ \ - OP(MultiDrawElementsInstancedCHROMIUM) /* 414 */ \ - OP(StencilFunc) /* 415 */ \ - OP(StencilFuncSeparate) /* 416 */ \ - OP(StencilMask) /* 417 */ \ - OP(StencilMaskSeparate) /* 418 */ \ - OP(StencilOp) /* 419 */ \ - OP(StencilOpSeparate) /* 420 */ \ - OP(TexImage2D) /* 421 */ \ - OP(TexImage3D) /* 422 */ \ - OP(TexParameterf) /* 423 */ \ - OP(TexParameterfvImmediate) /* 424 */ \ - OP(TexParameteri) /* 425 */ \ - OP(TexParameterivImmediate) /* 426 */ \ - OP(TexStorage3D) /* 427 */ \ - OP(TexSubImage2D) /* 428 */ \ - OP(TexSubImage3D) /* 429 */ \ - OP(TransformFeedbackVaryingsBucket) /* 430 */ \ - OP(Uniform1f) /* 431 */ \ - OP(Uniform1fvImmediate) /* 432 */ \ - OP(Uniform1i) /* 433 */ \ - OP(Uniform1ivImmediate) /* 434 */ \ - OP(Uniform1ui) /* 435 */ \ - OP(Uniform1uivImmediate) /* 436 */ \ - OP(Uniform2f) /* 437 */ \ - OP(Uniform2fvImmediate) /* 438 */ \ - OP(Uniform2i) /* 439 */ \ - OP(Uniform2ivImmediate) /* 440 */ \ - OP(Uniform2ui) /* 441 */ \ - OP(Uniform2uivImmediate) /* 442 */ \ - OP(Uniform3f) /* 443 */ \ - OP(Uniform3fvImmediate) /* 444 */ \ - OP(Uniform3i) /* 445 */ \ - OP(Uniform3ivImmediate) /* 446 */ \ - OP(Uniform3ui) /* 447 */ \ - OP(Uniform3uivImmediate) /* 448 */ \ - OP(Uniform4f) /* 449 */ \ - OP(Uniform4fvImmediate) /* 450 */ \ - OP(Uniform4i) /* 451 */ \ - OP(Uniform4ivImmediate) /* 452 */ \ - OP(Uniform4ui) /* 453 */ \ - OP(Uniform4uivImmediate) /* 454 */ \ - OP(UniformBlockBinding) /* 455 */ \ - OP(UniformMatrix2fvImmediate) /* 456 */ \ - OP(UniformMatrix2x3fvImmediate) /* 457 */ \ - OP(UniformMatrix2x4fvImmediate) /* 458 */ \ - OP(UniformMatrix3fvImmediate) /* 459 */ \ - OP(UniformMatrix3x2fvImmediate) /* 460 */ \ - OP(UniformMatrix3x4fvImmediate) /* 461 */ \ - OP(UniformMatrix4fvImmediate) /* 462 */ \ - OP(UniformMatrix4x2fvImmediate) /* 463 */ \ - OP(UniformMatrix4x3fvImmediate) /* 464 */ \ - OP(UseProgram) /* 465 */ \ - OP(ValidateProgram) /* 466 */ \ - OP(VertexAttrib1f) /* 467 */ \ - OP(VertexAttrib1fvImmediate) /* 468 */ \ - OP(VertexAttrib2f) /* 469 */ \ - OP(VertexAttrib2fvImmediate) /* 470 */ \ - OP(VertexAttrib3f) /* 471 */ \ - OP(VertexAttrib3fvImmediate) /* 472 */ \ - OP(VertexAttrib4f) /* 473 */ \ - OP(VertexAttrib4fvImmediate) /* 474 */ \ - OP(VertexAttribI4i) /* 475 */ \ - OP(VertexAttribI4ivImmediate) /* 476 */ \ - OP(VertexAttribI4ui) /* 477 */ \ - OP(VertexAttribI4uivImmediate) /* 478 */ \ - OP(VertexAttribIPointer) /* 479 */ \ - OP(VertexAttribPointer) /* 480 */ \ - OP(Viewport) /* 481 */ \ - OP(WaitSync) /* 482 */ \ - OP(BlitFramebufferCHROMIUM) /* 483 */ \ - OP(RenderbufferStorageMultisampleCHROMIUM) /* 484 */ \ - OP(RenderbufferStorageMultisampleAdvancedAMD) /* 485 */ \ - OP(RenderbufferStorageMultisampleEXT) /* 486 */ \ - OP(FramebufferTexture2DMultisampleEXT) /* 487 */ \ - OP(TexStorage2DEXT) /* 488 */ \ - OP(GenQueriesEXTImmediate) /* 489 */ \ - OP(DeleteQueriesEXTImmediate) /* 490 */ \ - OP(QueryCounterEXT) /* 491 */ \ - OP(BeginQueryEXT) /* 492 */ \ - OP(BeginTransformFeedback) /* 493 */ \ - OP(EndQueryEXT) /* 494 */ \ - OP(EndTransformFeedback) /* 495 */ \ - OP(SetDisjointValueSyncCHROMIUM) /* 496 */ \ - OP(InsertEventMarkerEXT) /* 497 */ \ - OP(PushGroupMarkerEXT) /* 498 */ \ - OP(PopGroupMarkerEXT) /* 499 */ \ - OP(GenVertexArraysOESImmediate) /* 500 */ \ - OP(DeleteVertexArraysOESImmediate) /* 501 */ \ - OP(IsVertexArrayOES) /* 502 */ \ - OP(BindVertexArrayOES) /* 503 */ \ - OP(FramebufferParameteri) /* 504 */ \ - OP(BindImageTexture) /* 505 */ \ - OP(DispatchCompute) /* 506 */ \ - OP(DispatchComputeIndirect) /* 507 */ \ - OP(DrawArraysIndirect) /* 508 */ \ - OP(DrawElementsIndirect) /* 509 */ \ - OP(GetProgramInterfaceiv) /* 510 */ \ - OP(GetProgramResourceIndex) /* 511 */ \ - OP(GetProgramResourceName) /* 512 */ \ - OP(GetProgramResourceiv) /* 513 */ \ - OP(GetProgramResourceLocation) /* 514 */ \ - OP(MemoryBarrierEXT) /* 515 */ \ - OP(MemoryBarrierByRegion) /* 516 */ \ - OP(SwapBuffers) /* 517 */ \ - OP(GetMaxValueInBufferCHROMIUM) /* 518 */ \ - OP(EnableFeatureCHROMIUM) /* 519 */ \ - OP(MapBufferRange) /* 520 */ \ - OP(UnmapBuffer) /* 521 */ \ - OP(FlushMappedBufferRange) /* 522 */ \ - OP(ResizeCHROMIUM) /* 523 */ \ - OP(GetRequestableExtensionsCHROMIUM) /* 524 */ \ - OP(RequestExtensionCHROMIUM) /* 525 */ \ - OP(GetProgramInfoCHROMIUM) /* 526 */ \ - OP(GetUniformBlocksCHROMIUM) /* 527 */ \ - OP(GetTransformFeedbackVaryingsCHROMIUM) /* 528 */ \ - OP(GetUniformsES3CHROMIUM) /* 529 */ \ - OP(DescheduleUntilFinishedCHROMIUM) /* 530 */ \ - OP(GetTranslatedShaderSourceANGLE) /* 531 */ \ - OP(PostSubBufferCHROMIUM) /* 532 */ \ - OP(CopyTextureCHROMIUM) /* 533 */ \ - OP(CopySubTextureCHROMIUM) /* 534 */ \ - OP(DrawArraysInstancedANGLE) /* 535 */ \ - OP(DrawElementsInstancedANGLE) /* 536 */ \ - OP(VertexAttribDivisorANGLE) /* 537 */ \ - OP(ProduceTextureDirectCHROMIUMImmediate) /* 538 */ \ - OP(CreateAndConsumeTextureINTERNALImmediate) /* 539 */ \ - OP(BindUniformLocationCHROMIUMBucket) /* 540 */ \ - OP(BindTexImage2DCHROMIUM) /* 541 */ \ - OP(BindTexImage2DWithInternalformatCHROMIUM) /* 542 */ \ - OP(ReleaseTexImage2DCHROMIUM) /* 543 */ \ - OP(TraceBeginCHROMIUM) /* 544 */ \ - OP(TraceEndCHROMIUM) /* 545 */ \ - OP(DiscardFramebufferEXTImmediate) /* 546 */ \ - OP(LoseContextCHROMIUM) /* 547 */ \ - OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 548 */ \ - OP(DrawBuffersEXTImmediate) /* 549 */ \ - OP(DiscardBackbufferCHROMIUM) /* 550 */ \ - OP(ScheduleOverlayPlaneCHROMIUM) /* 551 */ \ - OP(ScheduleCALayerSharedStateCHROMIUM) /* 552 */ \ - OP(ScheduleCALayerCHROMIUM) /* 553 */ \ - OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 554 */ \ - OP(CommitOverlayPlanesCHROMIUM) /* 555 */ \ - OP(FlushDriverCachesCHROMIUM) /* 556 */ \ - OP(ScheduleDCLayerCHROMIUM) /* 557 */ \ - OP(SetActiveURLCHROMIUM) /* 558 */ \ - OP(MatrixLoadfCHROMIUMImmediate) /* 559 */ \ - OP(MatrixLoadIdentityCHROMIUM) /* 560 */ \ - OP(GenPathsCHROMIUM) /* 561 */ \ - OP(DeletePathsCHROMIUM) /* 562 */ \ - OP(IsPathCHROMIUM) /* 563 */ \ - OP(PathCommandsCHROMIUM) /* 564 */ \ - OP(PathParameterfCHROMIUM) /* 565 */ \ - OP(PathParameteriCHROMIUM) /* 566 */ \ - OP(PathStencilFuncCHROMIUM) /* 567 */ \ - OP(StencilFillPathCHROMIUM) /* 568 */ \ - OP(StencilStrokePathCHROMIUM) /* 569 */ \ - OP(CoverFillPathCHROMIUM) /* 570 */ \ - OP(CoverStrokePathCHROMIUM) /* 571 */ \ - OP(StencilThenCoverFillPathCHROMIUM) /* 572 */ \ - OP(StencilThenCoverStrokePathCHROMIUM) /* 573 */ \ - OP(StencilFillPathInstancedCHROMIUM) /* 574 */ \ - OP(StencilStrokePathInstancedCHROMIUM) /* 575 */ \ - OP(CoverFillPathInstancedCHROMIUM) /* 576 */ \ - OP(CoverStrokePathInstancedCHROMIUM) /* 577 */ \ - OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 578 */ \ - OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 579 */ \ - OP(BindFragmentInputLocationCHROMIUMBucket) /* 580 */ \ - OP(ProgramPathFragmentInputGenCHROMIUM) /* 581 */ \ - OP(ContextVisibilityHintCHROMIUM) /* 582 */ \ - OP(CoverageModulationCHROMIUM) /* 583 */ \ - OP(BlendBarrierKHR) /* 584 */ \ - OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 585 */ \ - OP(BindFragDataLocationIndexedEXTBucket) /* 586 */ \ - OP(BindFragDataLocationEXTBucket) /* 587 */ \ - OP(GetFragDataIndexEXT) /* 588 */ \ - OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 589 */ \ - OP(OverlayPromotionHintCHROMIUM) /* 590 */ \ - OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 591 */ \ - OP(SetDrawRectangleCHROMIUM) /* 592 */ \ - OP(SetEnableDCLayersCHROMIUM) /* 593 */ \ - OP(InitializeDiscardableTextureCHROMIUM) /* 594 */ \ - OP(UnlockDiscardableTextureCHROMIUM) /* 595 */ \ - OP(LockDiscardableTextureCHROMIUM) /* 596 */ \ - OP(TexStorage2DImageCHROMIUM) /* 597 */ \ - OP(SetColorSpaceMetadataCHROMIUM) /* 598 */ \ - OP(WindowRectanglesEXTImmediate) /* 599 */ \ - OP(CreateGpuFenceINTERNAL) /* 600 */ \ - OP(WaitGpuFenceCHROMIUM) /* 601 */ \ - OP(DestroyGpuFenceCHROMIUM) /* 602 */ \ - OP(SetReadbackBufferShadowAllocationINTERNAL) /* 603 */ \ - OP(FramebufferTextureMultiviewOVR) /* 604 */ \ - OP(MaxShaderCompilerThreadsKHR) /* 605 */ \ - OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 606 */ \ - OP(BeginSharedImageAccessDirectCHROMIUM) /* 607 */ \ - OP(EndSharedImageAccessDirectCHROMIUM) /* 608 */ +#define GLES2_COMMAND_LIST(OP) \ + OP(ActiveTexture) /* 256 */ \ + OP(AttachShader) /* 257 */ \ + OP(BindAttribLocationBucket) /* 258 */ \ + OP(BindBuffer) /* 259 */ \ + OP(BindBufferBase) /* 260 */ \ + OP(BindBufferRange) /* 261 */ \ + OP(BindFramebuffer) /* 262 */ \ + OP(BindRenderbuffer) /* 263 */ \ + OP(BindSampler) /* 264 */ \ + OP(BindTexture) /* 265 */ \ + OP(BindTransformFeedback) /* 266 */ \ + OP(BlendColor) /* 267 */ \ + OP(BlendEquation) /* 268 */ \ + OP(BlendEquationSeparate) /* 269 */ \ + OP(BlendFunc) /* 270 */ \ + OP(BlendFuncSeparate) /* 271 */ \ + OP(BufferData) /* 272 */ \ + OP(BufferSubData) /* 273 */ \ + OP(CheckFramebufferStatus) /* 274 */ \ + OP(Clear) /* 275 */ \ + OP(ClearBufferfi) /* 276 */ \ + OP(ClearBufferfvImmediate) /* 277 */ \ + OP(ClearBufferivImmediate) /* 278 */ \ + OP(ClearBufferuivImmediate) /* 279 */ \ + OP(ClearColor) /* 280 */ \ + OP(ClearDepthf) /* 281 */ \ + OP(ClearStencil) /* 282 */ \ + OP(ClientWaitSync) /* 283 */ \ + OP(ColorMask) /* 284 */ \ + OP(CompileShader) /* 285 */ \ + OP(CompressedTexImage2DBucket) /* 286 */ \ + OP(CompressedTexImage2D) /* 287 */ \ + OP(CompressedTexSubImage2DBucket) /* 288 */ \ + OP(CompressedTexSubImage2D) /* 289 */ \ + OP(CompressedTexImage3DBucket) /* 290 */ \ + OP(CompressedTexImage3D) /* 291 */ \ + OP(CompressedTexSubImage3DBucket) /* 292 */ \ + OP(CompressedTexSubImage3D) /* 293 */ \ + OP(CopyBufferSubData) /* 294 */ \ + OP(CopyTexImage2D) /* 295 */ \ + OP(CopyTexSubImage2D) /* 296 */ \ + OP(CopyTexSubImage3D) /* 297 */ \ + OP(CreateProgram) /* 298 */ \ + OP(CreateShader) /* 299 */ \ + OP(CullFace) /* 300 */ \ + OP(DeleteBuffersImmediate) /* 301 */ \ + OP(DeleteFramebuffersImmediate) /* 302 */ \ + OP(DeleteProgram) /* 303 */ \ + OP(DeleteRenderbuffersImmediate) /* 304 */ \ + OP(DeleteSamplersImmediate) /* 305 */ \ + OP(DeleteSync) /* 306 */ \ + OP(DeleteShader) /* 307 */ \ + OP(DeleteTexturesImmediate) /* 308 */ \ + OP(DeleteTransformFeedbacksImmediate) /* 309 */ \ + OP(DepthFunc) /* 310 */ \ + OP(DepthMask) /* 311 */ \ + OP(DepthRangef) /* 312 */ \ + OP(DetachShader) /* 313 */ \ + OP(Disable) /* 314 */ \ + OP(DisableVertexAttribArray) /* 315 */ \ + OP(DrawArrays) /* 316 */ \ + OP(DrawElements) /* 317 */ \ + OP(Enable) /* 318 */ \ + OP(EnableVertexAttribArray) /* 319 */ \ + OP(FenceSync) /* 320 */ \ + OP(Finish) /* 321 */ \ + OP(Flush) /* 322 */ \ + OP(FramebufferRenderbuffer) /* 323 */ \ + OP(FramebufferTexture2D) /* 324 */ \ + OP(FramebufferTextureLayer) /* 325 */ \ + OP(FrontFace) /* 326 */ \ + OP(GenBuffersImmediate) /* 327 */ \ + OP(GenerateMipmap) /* 328 */ \ + OP(GenFramebuffersImmediate) /* 329 */ \ + OP(GenRenderbuffersImmediate) /* 330 */ \ + OP(GenSamplersImmediate) /* 331 */ \ + OP(GenTexturesImmediate) /* 332 */ \ + OP(GenTransformFeedbacksImmediate) /* 333 */ \ + OP(GetActiveAttrib) /* 334 */ \ + OP(GetActiveUniform) /* 335 */ \ + OP(GetActiveUniformBlockiv) /* 336 */ \ + OP(GetActiveUniformBlockName) /* 337 */ \ + OP(GetActiveUniformsiv) /* 338 */ \ + OP(GetAttachedShaders) /* 339 */ \ + OP(GetAttribLocation) /* 340 */ \ + OP(GetBooleanv) /* 341 */ \ + OP(GetBufferParameteri64v) /* 342 */ \ + OP(GetBufferParameteriv) /* 343 */ \ + OP(GetError) /* 344 */ \ + OP(GetFloatv) /* 345 */ \ + OP(GetFragDataLocation) /* 346 */ \ + OP(GetFramebufferAttachmentParameteriv) /* 347 */ \ + OP(GetInteger64v) /* 348 */ \ + OP(GetIntegeri_v) /* 349 */ \ + OP(GetInteger64i_v) /* 350 */ \ + OP(GetIntegerv) /* 351 */ \ + OP(GetInternalformativ) /* 352 */ \ + OP(GetProgramiv) /* 353 */ \ + OP(GetProgramInfoLog) /* 354 */ \ + OP(GetRenderbufferParameteriv) /* 355 */ \ + OP(GetSamplerParameterfv) /* 356 */ \ + OP(GetSamplerParameteriv) /* 357 */ \ + OP(GetShaderiv) /* 358 */ \ + OP(GetShaderInfoLog) /* 359 */ \ + OP(GetShaderPrecisionFormat) /* 360 */ \ + OP(GetShaderSource) /* 361 */ \ + OP(GetString) /* 362 */ \ + OP(GetSynciv) /* 363 */ \ + OP(GetTexParameterfv) /* 364 */ \ + OP(GetTexParameteriv) /* 365 */ \ + OP(GetTransformFeedbackVarying) /* 366 */ \ + OP(GetUniformBlockIndex) /* 367 */ \ + OP(GetUniformfv) /* 368 */ \ + OP(GetUniformiv) /* 369 */ \ + OP(GetUniformuiv) /* 370 */ \ + OP(GetUniformIndices) /* 371 */ \ + OP(GetUniformLocation) /* 372 */ \ + OP(GetVertexAttribfv) /* 373 */ \ + OP(GetVertexAttribiv) /* 374 */ \ + OP(GetVertexAttribIiv) /* 375 */ \ + OP(GetVertexAttribIuiv) /* 376 */ \ + OP(GetVertexAttribPointerv) /* 377 */ \ + OP(Hint) /* 378 */ \ + OP(InvalidateFramebufferImmediate) /* 379 */ \ + OP(InvalidateSubFramebufferImmediate) /* 380 */ \ + OP(IsBuffer) /* 381 */ \ + OP(IsEnabled) /* 382 */ \ + OP(IsFramebuffer) /* 383 */ \ + OP(IsProgram) /* 384 */ \ + OP(IsRenderbuffer) /* 385 */ \ + OP(IsSampler) /* 386 */ \ + OP(IsShader) /* 387 */ \ + OP(IsSync) /* 388 */ \ + OP(IsTexture) /* 389 */ \ + OP(IsTransformFeedback) /* 390 */ \ + OP(LineWidth) /* 391 */ \ + OP(LinkProgram) /* 392 */ \ + OP(PauseTransformFeedback) /* 393 */ \ + OP(PixelStorei) /* 394 */ \ + OP(PolygonOffset) /* 395 */ \ + OP(ReadBuffer) /* 396 */ \ + OP(ReadPixels) /* 397 */ \ + OP(ReleaseShaderCompiler) /* 398 */ \ + OP(RenderbufferStorage) /* 399 */ \ + OP(ResumeTransformFeedback) /* 400 */ \ + OP(SampleCoverage) /* 401 */ \ + OP(SamplerParameterf) /* 402 */ \ + OP(SamplerParameterfvImmediate) /* 403 */ \ + OP(SamplerParameteri) /* 404 */ \ + OP(SamplerParameterivImmediate) /* 405 */ \ + OP(Scissor) /* 406 */ \ + OP(ShaderBinary) /* 407 */ \ + OP(ShaderSourceBucket) /* 408 */ \ + OP(MultiDrawBeginCHROMIUM) /* 409 */ \ + OP(MultiDrawEndCHROMIUM) /* 410 */ \ + OP(MultiDrawArraysCHROMIUM) /* 411 */ \ + OP(MultiDrawArraysInstancedCHROMIUM) /* 412 */ \ + OP(MultiDrawArraysInstancedBaseInstanceCHROMIUM) /* 413 */ \ + OP(MultiDrawElementsCHROMIUM) /* 414 */ \ + OP(MultiDrawElementsInstancedCHROMIUM) /* 415 */ \ + OP(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM) /* 416 */ \ + OP(StencilFunc) /* 417 */ \ + OP(StencilFuncSeparate) /* 418 */ \ + OP(StencilMask) /* 419 */ \ + OP(StencilMaskSeparate) /* 420 */ \ + OP(StencilOp) /* 421 */ \ + OP(StencilOpSeparate) /* 422 */ \ + OP(TexImage2D) /* 423 */ \ + OP(TexImage3D) /* 424 */ \ + OP(TexParameterf) /* 425 */ \ + OP(TexParameterfvImmediate) /* 426 */ \ + OP(TexParameteri) /* 427 */ \ + OP(TexParameterivImmediate) /* 428 */ \ + OP(TexStorage3D) /* 429 */ \ + OP(TexSubImage2D) /* 430 */ \ + OP(TexSubImage3D) /* 431 */ \ + OP(TransformFeedbackVaryingsBucket) /* 432 */ \ + OP(Uniform1f) /* 433 */ \ + OP(Uniform1fvImmediate) /* 434 */ \ + OP(Uniform1i) /* 435 */ \ + OP(Uniform1ivImmediate) /* 436 */ \ + OP(Uniform1ui) /* 437 */ \ + OP(Uniform1uivImmediate) /* 438 */ \ + OP(Uniform2f) /* 439 */ \ + OP(Uniform2fvImmediate) /* 440 */ \ + OP(Uniform2i) /* 441 */ \ + OP(Uniform2ivImmediate) /* 442 */ \ + OP(Uniform2ui) /* 443 */ \ + OP(Uniform2uivImmediate) /* 444 */ \ + OP(Uniform3f) /* 445 */ \ + OP(Uniform3fvImmediate) /* 446 */ \ + OP(Uniform3i) /* 447 */ \ + OP(Uniform3ivImmediate) /* 448 */ \ + OP(Uniform3ui) /* 449 */ \ + OP(Uniform3uivImmediate) /* 450 */ \ + OP(Uniform4f) /* 451 */ \ + OP(Uniform4fvImmediate) /* 452 */ \ + OP(Uniform4i) /* 453 */ \ + OP(Uniform4ivImmediate) /* 454 */ \ + OP(Uniform4ui) /* 455 */ \ + OP(Uniform4uivImmediate) /* 456 */ \ + OP(UniformBlockBinding) /* 457 */ \ + OP(UniformMatrix2fvImmediate) /* 458 */ \ + OP(UniformMatrix2x3fvImmediate) /* 459 */ \ + OP(UniformMatrix2x4fvImmediate) /* 460 */ \ + OP(UniformMatrix3fvImmediate) /* 461 */ \ + OP(UniformMatrix3x2fvImmediate) /* 462 */ \ + OP(UniformMatrix3x4fvImmediate) /* 463 */ \ + OP(UniformMatrix4fvImmediate) /* 464 */ \ + OP(UniformMatrix4x2fvImmediate) /* 465 */ \ + OP(UniformMatrix4x3fvImmediate) /* 466 */ \ + OP(UseProgram) /* 467 */ \ + OP(ValidateProgram) /* 468 */ \ + OP(VertexAttrib1f) /* 469 */ \ + OP(VertexAttrib1fvImmediate) /* 470 */ \ + OP(VertexAttrib2f) /* 471 */ \ + OP(VertexAttrib2fvImmediate) /* 472 */ \ + OP(VertexAttrib3f) /* 473 */ \ + OP(VertexAttrib3fvImmediate) /* 474 */ \ + OP(VertexAttrib4f) /* 475 */ \ + OP(VertexAttrib4fvImmediate) /* 476 */ \ + OP(VertexAttribI4i) /* 477 */ \ + OP(VertexAttribI4ivImmediate) /* 478 */ \ + OP(VertexAttribI4ui) /* 479 */ \ + OP(VertexAttribI4uivImmediate) /* 480 */ \ + OP(VertexAttribIPointer) /* 481 */ \ + OP(VertexAttribPointer) /* 482 */ \ + OP(Viewport) /* 483 */ \ + OP(WaitSync) /* 484 */ \ + OP(BlitFramebufferCHROMIUM) /* 485 */ \ + OP(RenderbufferStorageMultisampleCHROMIUM) /* 486 */ \ + OP(RenderbufferStorageMultisampleAdvancedAMD) /* 487 */ \ + OP(RenderbufferStorageMultisampleEXT) /* 488 */ \ + OP(FramebufferTexture2DMultisampleEXT) /* 489 */ \ + OP(TexStorage2DEXT) /* 490 */ \ + OP(GenQueriesEXTImmediate) /* 491 */ \ + OP(DeleteQueriesEXTImmediate) /* 492 */ \ + OP(QueryCounterEXT) /* 493 */ \ + OP(BeginQueryEXT) /* 494 */ \ + OP(BeginTransformFeedback) /* 495 */ \ + OP(EndQueryEXT) /* 496 */ \ + OP(EndTransformFeedback) /* 497 */ \ + OP(SetDisjointValueSyncCHROMIUM) /* 498 */ \ + OP(InsertEventMarkerEXT) /* 499 */ \ + OP(PushGroupMarkerEXT) /* 500 */ \ + OP(PopGroupMarkerEXT) /* 501 */ \ + OP(GenVertexArraysOESImmediate) /* 502 */ \ + OP(DeleteVertexArraysOESImmediate) /* 503 */ \ + OP(IsVertexArrayOES) /* 504 */ \ + OP(BindVertexArrayOES) /* 505 */ \ + OP(FramebufferParameteri) /* 506 */ \ + OP(BindImageTexture) /* 507 */ \ + OP(DispatchCompute) /* 508 */ \ + OP(DispatchComputeIndirect) /* 509 */ \ + OP(DrawArraysIndirect) /* 510 */ \ + OP(DrawElementsIndirect) /* 511 */ \ + OP(GetProgramInterfaceiv) /* 512 */ \ + OP(GetProgramResourceIndex) /* 513 */ \ + OP(GetProgramResourceName) /* 514 */ \ + OP(GetProgramResourceiv) /* 515 */ \ + OP(GetProgramResourceLocation) /* 516 */ \ + OP(MemoryBarrierEXT) /* 517 */ \ + OP(MemoryBarrierByRegion) /* 518 */ \ + OP(SwapBuffers) /* 519 */ \ + OP(GetMaxValueInBufferCHROMIUM) /* 520 */ \ + OP(EnableFeatureCHROMIUM) /* 521 */ \ + OP(MapBufferRange) /* 522 */ \ + OP(UnmapBuffer) /* 523 */ \ + OP(FlushMappedBufferRange) /* 524 */ \ + OP(ResizeCHROMIUM) /* 525 */ \ + OP(GetRequestableExtensionsCHROMIUM) /* 526 */ \ + OP(RequestExtensionCHROMIUM) /* 527 */ \ + OP(GetProgramInfoCHROMIUM) /* 528 */ \ + OP(GetUniformBlocksCHROMIUM) /* 529 */ \ + OP(GetTransformFeedbackVaryingsCHROMIUM) /* 530 */ \ + OP(GetUniformsES3CHROMIUM) /* 531 */ \ + OP(DescheduleUntilFinishedCHROMIUM) /* 532 */ \ + OP(GetTranslatedShaderSourceANGLE) /* 533 */ \ + OP(PostSubBufferCHROMIUM) /* 534 */ \ + OP(CopyTextureCHROMIUM) /* 535 */ \ + OP(CopySubTextureCHROMIUM) /* 536 */ \ + OP(DrawArraysInstancedANGLE) /* 537 */ \ + OP(DrawArraysInstancedBaseInstanceANGLE) /* 538 */ \ + OP(DrawElementsInstancedANGLE) /* 539 */ \ + OP(DrawElementsInstancedBaseVertexBaseInstanceANGLE) /* 540 */ \ + OP(VertexAttribDivisorANGLE) /* 541 */ \ + OP(ProduceTextureDirectCHROMIUMImmediate) /* 542 */ \ + OP(CreateAndConsumeTextureINTERNALImmediate) /* 543 */ \ + OP(BindUniformLocationCHROMIUMBucket) /* 544 */ \ + OP(BindTexImage2DCHROMIUM) /* 545 */ \ + OP(BindTexImage2DWithInternalformatCHROMIUM) /* 546 */ \ + OP(ReleaseTexImage2DCHROMIUM) /* 547 */ \ + OP(TraceBeginCHROMIUM) /* 548 */ \ + OP(TraceEndCHROMIUM) /* 549 */ \ + OP(DiscardFramebufferEXTImmediate) /* 550 */ \ + OP(LoseContextCHROMIUM) /* 551 */ \ + OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 552 */ \ + OP(DrawBuffersEXTImmediate) /* 553 */ \ + OP(DiscardBackbufferCHROMIUM) /* 554 */ \ + OP(ScheduleOverlayPlaneCHROMIUM) /* 555 */ \ + OP(ScheduleCALayerSharedStateCHROMIUM) /* 556 */ \ + OP(ScheduleCALayerCHROMIUM) /* 557 */ \ + OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 558 */ \ + OP(CommitOverlayPlanesCHROMIUM) /* 559 */ \ + OP(FlushDriverCachesCHROMIUM) /* 560 */ \ + OP(ScheduleDCLayerCHROMIUM) /* 561 */ \ + OP(SetActiveURLCHROMIUM) /* 562 */ \ + OP(MatrixLoadfCHROMIUMImmediate) /* 563 */ \ + OP(MatrixLoadIdentityCHROMIUM) /* 564 */ \ + OP(GenPathsCHROMIUM) /* 565 */ \ + OP(DeletePathsCHROMIUM) /* 566 */ \ + OP(IsPathCHROMIUM) /* 567 */ \ + OP(PathCommandsCHROMIUM) /* 568 */ \ + OP(PathParameterfCHROMIUM) /* 569 */ \ + OP(PathParameteriCHROMIUM) /* 570 */ \ + OP(PathStencilFuncCHROMIUM) /* 571 */ \ + OP(StencilFillPathCHROMIUM) /* 572 */ \ + OP(StencilStrokePathCHROMIUM) /* 573 */ \ + OP(CoverFillPathCHROMIUM) /* 574 */ \ + OP(CoverStrokePathCHROMIUM) /* 575 */ \ + OP(StencilThenCoverFillPathCHROMIUM) /* 576 */ \ + OP(StencilThenCoverStrokePathCHROMIUM) /* 577 */ \ + OP(StencilFillPathInstancedCHROMIUM) /* 578 */ \ + OP(StencilStrokePathInstancedCHROMIUM) /* 579 */ \ + OP(CoverFillPathInstancedCHROMIUM) /* 580 */ \ + OP(CoverStrokePathInstancedCHROMIUM) /* 581 */ \ + OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 582 */ \ + OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 583 */ \ + OP(BindFragmentInputLocationCHROMIUMBucket) /* 584 */ \ + OP(ProgramPathFragmentInputGenCHROMIUM) /* 585 */ \ + OP(ContextVisibilityHintCHROMIUM) /* 586 */ \ + OP(CoverageModulationCHROMIUM) /* 587 */ \ + OP(BlendBarrierKHR) /* 588 */ \ + OP(BindFragDataLocationIndexedEXTBucket) /* 589 */ \ + OP(BindFragDataLocationEXTBucket) /* 590 */ \ + OP(GetFragDataIndexEXT) /* 591 */ \ + OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 592 */ \ + OP(OverlayPromotionHintCHROMIUM) /* 593 */ \ + OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 594 */ \ + OP(SetDrawRectangleCHROMIUM) /* 595 */ \ + OP(SetEnableDCLayersCHROMIUM) /* 596 */ \ + OP(InitializeDiscardableTextureCHROMIUM) /* 597 */ \ + OP(UnlockDiscardableTextureCHROMIUM) /* 598 */ \ + OP(LockDiscardableTextureCHROMIUM) /* 599 */ \ + OP(TexStorage2DImageCHROMIUM) /* 600 */ \ + OP(SetColorSpaceMetadataCHROMIUM) /* 601 */ \ + OP(WindowRectanglesEXTImmediate) /* 602 */ \ + OP(CreateGpuFenceINTERNAL) /* 603 */ \ + OP(WaitGpuFenceCHROMIUM) /* 604 */ \ + OP(DestroyGpuFenceCHROMIUM) /* 605 */ \ + OP(SetReadbackBufferShadowAllocationINTERNAL) /* 606 */ \ + OP(FramebufferTextureMultiviewOVR) /* 607 */ \ + OP(MaxShaderCompilerThreadsKHR) /* 608 */ \ + OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 609 */ \ + OP(BeginSharedImageAccessDirectCHROMIUM) /* 610 */ \ + OP(EndSharedImageAccessDirectCHROMIUM) /* 611 */ enum CommandId { kOneBeforeStartPoint = diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h b/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h index 2f5c970ba2e..b630fbbeab8 100644 --- a/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h +++ b/chromium/gpu/command_buffer/common/webgpu_cmd_enums.h @@ -11,14 +11,15 @@ namespace webgpu { enum class PowerPreference : uint32_t { kLowPower, kHighPerformance, + kDefault, kNumPowerPreferences }; -// These numbers must not change -static_assert(static_cast<int>(PowerPreference::kLowPower) == 0, - "kLowPower should equal 0"); -static_assert(static_cast<int>(PowerPreference::kHighPerformance) == 1, - "kHighPerformance should equal 1"); +enum class DawnReturnDataType : uint32_t { + kDawnCommands, + kRequestedDawnAdapterProperties, + kNumDawnReturnDataType +}; } // namespace webgpu } // namespace gpu diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format.h index 09d6a09fe78..781a76abb46 100644 --- a/chromium/gpu/command_buffer/common/webgpu_cmd_format.h +++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format.h @@ -14,6 +14,7 @@ #include "base/macros.h" #include "gpu/command_buffer/common/common_cmd_format.h" #include "gpu/command_buffer/common/gl2_types.h" +#include "gpu/command_buffer/common/webgpu_cmd_enums.h" #include "gpu/command_buffer/common/webgpu_cmd_ids.h" #include "ui/gfx/buffer_types.h" @@ -21,6 +22,35 @@ namespace gpu { namespace webgpu { namespace cmds { +#define GPU_DAWN_RETURN_DATA_ALIGNMENT (8) +struct alignas(GPU_DAWN_RETURN_DATA_ALIGNMENT) DawnReturnDataHeader { + DawnReturnDataType return_data_type; +}; + +static_assert( + sizeof(DawnReturnDataHeader) % GPU_DAWN_RETURN_DATA_ALIGNMENT == 0, + "DawnReturnDataHeader must align to GPU_DAWN_RETURN_DATA_ALIGNMENT"); + +struct alignas(GPU_DAWN_RETURN_DATA_ALIGNMENT) DawnReturnAdapterIDs { + uint32_t request_adapter_serial; + uint32_t adapter_service_id; +}; + +static_assert( + sizeof(DawnReturnAdapterIDs) % GPU_DAWN_RETURN_DATA_ALIGNMENT == 0, + "DawnReturnAdapterIDs must align to GPU_DAWN_RETURN_DATA_ALIGNMENT"); + +struct alignas(GPU_DAWN_RETURN_DATA_ALIGNMENT) DawnReturnAdapterInfo { + DawnReturnAdapterIDs adapter_ids; + char deserialized_buffer[]; +}; + +static_assert(offsetof(DawnReturnAdapterInfo, deserialized_buffer) % + GPU_DAWN_RETURN_DATA_ALIGNMENT == + 0, + "The offset of deserialized_buffer must align to " + "GPU_DAWN_RETURN_DATA_ALIGNMENT"); + // Command buffer is GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT byte aligned. #pragma pack(push, 4) static_assert(GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT == 4, diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h index e05c017bf77..006325bfefd 100644 --- a/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h +++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h @@ -176,25 +176,91 @@ struct RequestAdapter { void SetHeader() { header.SetCmd<ValueType>(); } - void Init(uint32_t _power_preference) { + void Init(uint32_t _request_adapter_serial, uint32_t _power_preference) { SetHeader(); + request_adapter_serial = _request_adapter_serial; power_preference = _power_preference; } - void* Set(void* cmd, uint32_t _power_preference) { - static_cast<ValueType*>(cmd)->Init(_power_preference); + void* Set(void* cmd, + uint32_t _request_adapter_serial, + uint32_t _power_preference) { + static_cast<ValueType*>(cmd)->Init(_request_adapter_serial, + _power_preference); return NextCmdAddress<ValueType>(cmd); } gpu::CommandHeader header; + uint32_t request_adapter_serial; uint32_t power_preference; }; -static_assert(sizeof(RequestAdapter) == 8, - "size of RequestAdapter should be 8"); +static_assert(sizeof(RequestAdapter) == 12, + "size of RequestAdapter should be 12"); static_assert(offsetof(RequestAdapter, header) == 0, "offset of RequestAdapter header should be 0"); -static_assert(offsetof(RequestAdapter, power_preference) == 4, - "offset of RequestAdapter power_preference should be 4"); +static_assert(offsetof(RequestAdapter, request_adapter_serial) == 4, + "offset of RequestAdapter request_adapter_serial should be 4"); +static_assert(offsetof(RequestAdapter, power_preference) == 8, + "offset of RequestAdapter power_preference should be 8"); + +struct RequestDevice { + typedef RequestDevice ValueType; + static const CommandId kCmdId = kRequestDevice; + static const cmd::ArgFlags kArgFlags = cmd::kFixed; + static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3); + + static uint32_t ComputeSize() { + return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT + } + + void SetHeader() { header.SetCmd<ValueType>(); } + + void Init(uint32_t _adapter_service_id, + uint32_t _request_device_properties_shm_id, + uint32_t _request_device_properties_shm_offset, + uint32_t _request_device_properties_size) { + SetHeader(); + adapter_service_id = _adapter_service_id; + request_device_properties_shm_id = _request_device_properties_shm_id; + request_device_properties_shm_offset = + _request_device_properties_shm_offset; + request_device_properties_size = _request_device_properties_size; + } + + void* Set(void* cmd, + uint32_t _adapter_service_id, + uint32_t _request_device_properties_shm_id, + uint32_t _request_device_properties_shm_offset, + uint32_t _request_device_properties_size) { + static_cast<ValueType*>(cmd)->Init( + _adapter_service_id, _request_device_properties_shm_id, + _request_device_properties_shm_offset, _request_device_properties_size); + return NextCmdAddress<ValueType>(cmd); + } + + gpu::CommandHeader header; + uint32_t adapter_service_id; + uint32_t request_device_properties_shm_id; + uint32_t request_device_properties_shm_offset; + uint32_t request_device_properties_size; +}; + +static_assert(sizeof(RequestDevice) == 20, + "size of RequestDevice should be 20"); +static_assert(offsetof(RequestDevice, header) == 0, + "offset of RequestDevice header should be 0"); +static_assert(offsetof(RequestDevice, adapter_service_id) == 4, + "offset of RequestDevice adapter_service_id should be 4"); +static_assert( + offsetof(RequestDevice, request_device_properties_shm_id) == 8, + "offset of RequestDevice request_device_properties_shm_id should be 8"); +static_assert(offsetof(RequestDevice, request_device_properties_shm_offset) == + 12, + "offset of RequestDevice request_device_properties_shm_offset " + "should be 12"); +static_assert( + offsetof(RequestDevice, request_device_properties_size) == 16, + "offset of RequestDevice request_device_properties_size should be 16"); #endif // GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_FORMAT_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h index 39f18fe8f17..d2285ae44ac 100644 --- a/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h +++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h @@ -81,11 +81,29 @@ TEST_F(WebGPUFormatTest, DissociateMailbox) { TEST_F(WebGPUFormatTest, RequestAdapter) { cmds::RequestAdapter& cmd = *GetBufferAs<cmds::RequestAdapter>(); - void* next_cmd = cmd.Set(&cmd, static_cast<uint32_t>(11)); + void* next_cmd = + cmd.Set(&cmd, static_cast<uint32_t>(11), static_cast<uint32_t>(12)); EXPECT_EQ(static_cast<uint32_t>(cmds::RequestAdapter::kCmdId), cmd.header.command); EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); - EXPECT_EQ(static_cast<uint32_t>(11), cmd.power_preference); + EXPECT_EQ(static_cast<uint32_t>(11), cmd.request_adapter_serial); + EXPECT_EQ(static_cast<uint32_t>(12), cmd.power_preference); + CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); +} + +TEST_F(WebGPUFormatTest, RequestDevice) { + cmds::RequestDevice& cmd = *GetBufferAs<cmds::RequestDevice>(); + void* next_cmd = + cmd.Set(&cmd, static_cast<uint32_t>(11), static_cast<uint32_t>(12), + static_cast<uint32_t>(13), static_cast<uint32_t>(14)); + EXPECT_EQ(static_cast<uint32_t>(cmds::RequestDevice::kCmdId), + cmd.header.command); + EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); + EXPECT_EQ(static_cast<uint32_t>(11), cmd.adapter_service_id); + EXPECT_EQ(static_cast<uint32_t>(12), cmd.request_device_properties_shm_id); + EXPECT_EQ(static_cast<uint32_t>(13), + cmd.request_device_properties_shm_offset); + EXPECT_EQ(static_cast<uint32_t>(14), cmd.request_device_properties_size); CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd)); } diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h index a46d5e78bcb..3e706909d44 100644 --- a/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h +++ b/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h @@ -15,7 +15,8 @@ OP(DawnCommands) /* 256 */ \ OP(AssociateMailboxImmediate) /* 257 */ \ OP(DissociateMailbox) /* 258 */ \ - OP(RequestAdapter) /* 259 */ + OP(RequestAdapter) /* 259 */ \ + OP(RequestDevice) /* 260 */ enum CommandId { kOneBeforeStartPoint = diff --git a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt index d5dcb209d7f..94aee8014c2 100644 --- a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt +++ b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt @@ -159,18 +159,22 @@ GL_APICALL void GL_APIENTRY glShallowFinishCHROMIUM (void); GL_APICALL void GL_APIENTRY glShallowFlushCHROMIUM (void); GL_APICALL void GL_APIENTRY glOrderingBarrierCHROMIUM (void); -// Extensions WEBGL_multi_draw and WEBGL_multi_draw_instanced +// Extensions WEBGL_multi_draw // WEBGL entrypoints are public, CHROMIUM entrypoints are internal to the command buffer GL_APICALL void GL_APIENTRY glMultiDrawBeginCHROMIUM (GLsizei drawcount); GL_APICALL void GL_APIENTRY glMultiDrawEndCHROMIUM (void); GL_APICALL void GL_APIENTRY glMultiDrawArraysCHROMIUM (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, GLsizei drawcount); GL_APICALL void GL_APIENTRY glMultiDrawArraysInstancedCHROMIUM (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, const GLsizei* instance_counts, GLsizei drawcount); +GL_APICALL void GL_APIENTRY glMultiDrawArraysInstancedBaseInstanceCHROMIUM (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, const GLsizei* instance_counts, const GLuint* baseinstances, GLsizei drawcount); GL_APICALL void GL_APIENTRY glMultiDrawElementsCHROMIUM (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, GLsizei drawcount); GL_APICALL void GL_APIENTRY glMultiDrawElementsInstancedCHROMIUM (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, const GLsizei* instance_counts, GLsizei drawcount); +GL_APICALL void GL_APIENTRY glMultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, const GLsizei* instance_counts, GLint* basevertices, GLuint* baseinstances, GLsizei drawcount); GL_APICALL void GL_APIENTRY glMultiDrawArraysWEBGL (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, GLsizei drawcount); GL_APICALL void GL_APIENTRY glMultiDrawArraysInstancedWEBGL (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, const GLsizei* instance_counts, GLsizei drawcount); +GL_APICALL void GL_APIENTRY glMultiDrawArraysInstancedBaseInstanceWEBGL (GLenumDrawMode mode, const GLint* firsts, const GLsizei* counts, const GLsizei* instance_counts, const GLuint* baseinstances, GLsizei drawcount); GL_APICALL void GL_APIENTRY glMultiDrawElementsWEBGL (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, GLsizei drawcount); GL_APICALL void GL_APIENTRY glMultiDrawElementsInstancedWEBGL (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, const GLsizei* instance_counts, GLsizei drawcount); +GL_APICALL void GL_APIENTRY glMultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL (GLenumDrawMode mode, const GLsizei* counts, GLenumIndexType type, const GLsizei* offsets, const GLsizei* instance_counts, const GLint* basevertices, const GLuint* baseinstances, GLsizei drawcount); GL_APICALL void GL_APIENTRY glStencilFunc (GLenumCmpFunction func, GLint ref, GLuint mask); GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenumFaceType face, GLenumCmpFunction func, GLint ref, GLuint mask); @@ -314,7 +318,9 @@ GL_APICALL void GL_APIENTRY glPostSubBufferCHROMIUM (GLuint64 swap_id, G GL_APICALL void GL_APIENTRY glCopyTextureCHROMIUM (GLuint source_id, GLint source_level, GLenumTextureTarget dest_target, GLuint dest_id, GLint dest_level, GLintTextureInternalFormat internalformat, GLenumPixelType dest_type, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha); GL_APICALL void GL_APIENTRY glCopySubTextureCHROMIUM (GLuint source_id, GLint source_level, GLenumTextureTarget dest_target, GLuint dest_id, GLint dest_level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha); GL_APICALL void GL_APIENTRY glDrawArraysInstancedANGLE (GLenumDrawMode mode, GLint first, GLsizei count, GLsizei primcount); +GL_APICALL void GL_APIENTRY glDrawArraysInstancedBaseInstanceANGLE (GLenumDrawMode mode, GLint first, GLsizei count, GLsizei primcount, GLuint baseinstance); GL_APICALL void GL_APIENTRY glDrawElementsInstancedANGLE (GLenumDrawMode mode, GLsizei count, GLenumIndexType type, const void* indices, GLsizei primcount); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexBaseInstanceANGLE (GLenumDrawMode mode, GLsizei count, GLenumIndexType type, const void* indices, GLsizei primcount, GLint basevertex, GLuint baseinstance); GL_APICALL void GL_APIENTRY glVertexAttribDivisorANGLE (GLuint index, GLuint divisor); GL_APICALL void GL_APIENTRY glProduceTextureDirectCHROMIUM (GLidBindTexture texture, GLbyte* mailbox); GL_APICALL GLuint GL_APIENTRY glCreateAndConsumeTextureCHROMIUM (const GLbyte* mailbox); @@ -377,9 +383,6 @@ GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusKHR (void); // Extension KHR_blend_equation_advanced GL_APICALL void GL_APIENTRY glBlendBarrierKHR (void); -// Extension GL_CHROMIUM_screen_space_antialiasing -GL_APICALL void GL_APIENTRY glApplyScreenSpaceAntialiasingCHROMIUM (void); - // Extension EXT_blend_func_extended GL_APICALL void GL_APIENTRY glBindFragDataLocationIndexedEXT (GLidProgram program, GLuint colorNumber, GLuint index, const char* name); GL_APICALL void GL_APIENTRY glBindFragDataLocationEXT (GLidProgram program, GLuint colorNumber, const char* name); diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn index c62afb23a62..2709ff8f555 100644 --- a/chromium/gpu/command_buffer/service/BUILD.gn +++ b/chromium/gpu/command_buffer/service/BUILD.gn @@ -5,6 +5,7 @@ import("//build/config/jumbo.gni") import("//build/config/ui.gni") import("//gpu/vulkan/features.gni") +import("//skia/features.gni") import("//third_party/protobuf/proto_library.gni") import("//ui/gl/features.gni") @@ -136,8 +137,6 @@ target(link_target_type, "gles2_sources") { "gl_state_restorer_impl.h", "gl_utils.cc", "gl_utils.h", - "gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc", - "gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h", "gles2_cmd_clear_framebuffer.cc", "gles2_cmd_clear_framebuffer.h", "gles2_cmd_copy_tex_image.cc", @@ -311,6 +310,7 @@ target(link_target_type, "gles2_sources") { "//gpu/config", "//gpu/ipc/common", "//gpu/vulkan:buildflags", + "//skia:buildflags", "//third_party/angle:angle_image_util", "//third_party/angle:commit_id", "//third_party/angle:translator", @@ -326,6 +326,12 @@ target(link_target_type, "gles2_sources") { "//ui/gl/init", ] + if (use_ozone) { + deps += [ + "//ui/ozone", + ] + } + if (enable_vulkan) { deps += [ "//components/viz/common:vulkan_context_provider", @@ -345,6 +351,13 @@ target(link_target_type, "gles2_sources") { ] } + if (use_ozone) { + sources += [ + "shared_image_backing_ozone.cc", + "shared_image_backing_ozone.h", + ] + } + if (is_linux && use_dawn) { sources += [ "external_vk_image_dawn_representation.cc", @@ -373,6 +386,13 @@ target(link_target_type, "gles2_sources") { ] } + if (skia_use_dawn) { + deps += [ + "//components/viz/common:dawn_context_provider", + "//third_party/dawn/src/dawn:libdawn", + ] + } + if (is_android) { if (!is_debug) { # On Android optimize more since this component can be a bottleneck. diff --git a/chromium/gpu/command_buffer/service/DEPS b/chromium/gpu/command_buffer/service/DEPS index 1eed74db592..ee97fc38e53 100644 --- a/chromium/gpu/command_buffer/service/DEPS +++ b/chromium/gpu/command_buffer/service/DEPS @@ -1,7 +1,9 @@ include_rules = [ "+cc/paint", + "+skia", "+third_party/skia", "+components/crash/core/common/crash_key.h", + "+components/viz/common/gpu/dawn_context_provider.h", "+components/viz/common/gpu/metal_context_provider.h", "+components/viz/common/gpu/vulkan_context_provider.h", "+components/viz/common/resources/resource_format.h", diff --git a/chromium/gpu/command_buffer/service/client_service_map.h b/chromium/gpu/command_buffer/service/client_service_map.h index 42974ae5f77..d19484ee1a7 100644 --- a/chromium/gpu/command_buffer/service/client_service_map.h +++ b/chromium/gpu/command_buffer/service/client_service_map.h @@ -76,7 +76,7 @@ class ClientServiceMap { return true; } if (client_id == 0) { - *service_id = 0; + *service_id = ServiceType{}; return true; } return false; @@ -168,4 +168,4 @@ class ClientServiceMap { } // namespace gles2 } // namespace gpu -#endif // GPU_COMMAND_BUFFER_SERVICE_CLIENT_SERVICE_MAP_H_
\ No newline at end of file +#endif // GPU_COMMAND_BUFFER_SERVICE_CLIENT_SERVICE_MAP_H_ diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc index 6ad8895beae..57035f6dac3 100644 --- a/chromium/gpu/command_buffer/service/context_group.cc +++ b/chromium/gpu/command_buffer/service/context_group.cc @@ -58,6 +58,7 @@ DisallowedFeatures AdjustDisallowedFeatures( adjusted_disallowed_features.oes_texture_half_float_linear = true; adjusted_disallowed_features.ext_texture_filter_anisotropic = true; adjusted_disallowed_features.ext_float_blend = true; + adjusted_disallowed_features.oes_fbo_render_mipmap = true; } return adjusted_disallowed_features; } @@ -125,7 +126,7 @@ ContextGroup::ContextGroup( shared_image_representation_factory_( std::make_unique<SharedImageRepresentationFactory>( shared_image_manager, - memory_tracker.get())) { + memory_tracker_.get())) { DCHECK(discardable_manager); DCHECK(feature_info_); DCHECK(mailbox_manager_); @@ -620,18 +621,17 @@ void ContextGroup::Destroy(DecoderContext* decoder, bool have_context) { ReportProgress(); } - memory_tracker_ = nullptr; - if (passthrough_discardable_manager_) { passthrough_discardable_manager_->DeleteContextGroup(this); } if (passthrough_resources_) { gl::GLApi* api = have_context ? gl::g_current_gl_context : nullptr; - passthrough_resources_->Destroy(api); + passthrough_resources_->Destroy(api, progress_reporter_); passthrough_resources_.reset(); ReportProgress(); } + memory_tracker_ = nullptr; } uint32_t ContextGroup::GetMemRepresented() const { diff --git a/chromium/gpu/command_buffer/service/dawn_platform.cc b/chromium/gpu/command_buffer/service/dawn_platform.cc index 27694eb1739..c89bb9da89a 100644 --- a/chromium/gpu/command_buffer/service/dawn_platform.cc +++ b/chromium/gpu/command_buffer/service/dawn_platform.cc @@ -15,8 +15,10 @@ DawnPlatform::DawnPlatform() = default; DawnPlatform::~DawnPlatform() = default; const unsigned char* DawnPlatform::GetTraceCategoryEnabledFlag( - const char* category_group) { - return TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); + dawn_platform::TraceCategory category) { + // For now, all Dawn trace categories are put under "gpu.dawn" + return TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED( + TRACE_DISABLED_BY_DEFAULT("gpu.dawn")); } double DawnPlatform::MonotonicallyIncreasingTime() { diff --git a/chromium/gpu/command_buffer/service/dawn_platform.h b/chromium/gpu/command_buffer/service/dawn_platform.h index 30849b18f0e..ffa4e766526 100644 --- a/chromium/gpu/command_buffer/service/dawn_platform.h +++ b/chromium/gpu/command_buffer/service/dawn_platform.h @@ -16,7 +16,7 @@ class DawnPlatform : public dawn_platform::Platform { ~DawnPlatform() override; const unsigned char* GetTraceCategoryEnabledFlag( - const char* category_group) override; + dawn_platform::TraceCategory category) override; double MonotonicallyIncreasingTime() override; diff --git a/chromium/gpu/command_buffer/service/decoder_client.h b/chromium/gpu/command_buffer/service/decoder_client.h index 158341998b6..85ebf14cc8b 100644 --- a/chromium/gpu/command_buffer/service/decoder_client.h +++ b/chromium/gpu/command_buffer/service/decoder_client.h @@ -11,6 +11,7 @@ #include "base/containers/span.h" #include "gpu/gpu_export.h" +#include "ui/gl/gpu_preference.h" #include "url/gurl.h" namespace gpu { @@ -23,7 +24,7 @@ class GPU_EXPORT DecoderClient { virtual void OnConsoleMessage(int32_t id, const std::string& message) = 0; // Notifies the renderer process that the active GPU changed. - virtual void OnGpuSwitched() {} + virtual void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) {} // Cache a newly linked shader. virtual void CacheShader(const std::string& key, diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc index cc0af9deea6..b99410e49d8 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc +++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc @@ -169,18 +169,18 @@ class ScopedPixelStore { DISALLOW_COPY_AND_ASSIGN(ScopedPixelStore); }; -base::Optional<DawnTextureFormat> GetDawnFormat(viz::ResourceFormat format) { +base::Optional<WGPUTextureFormat> GetWGPUFormat(viz::ResourceFormat format) { switch (format) { case viz::RED_8: case viz::ALPHA_8: case viz::LUMINANCE_8: - return DAWN_TEXTURE_FORMAT_R8_UNORM; + return WGPUTextureFormat_R8Unorm; case viz::RG_88: - return DAWN_TEXTURE_FORMAT_RG8_UNORM; + return WGPUTextureFormat_RG8Unorm; case viz::RGBA_8888: - return DAWN_TEXTURE_FORMAT_RGBA8_UNORM; + return WGPUTextureFormat_RGBA8Unorm; case viz::BGRA_8888: - return DAWN_TEXTURE_FORMAT_BGRA8_UNORM; + return WGPUTextureFormat_BGRA8Unorm; default: return {}; } @@ -265,7 +265,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create( auto backing = base::WrapUnique(new ExternalVkImageBacking( mailbox, format, size, color_space, usage, context_state, image, memory, requirements.size, vk_format, command_pool, GrVkYcbcrConversionInfo(), - GetDawnFormat(format), mem_alloc_info.memoryTypeIndex)); + GetWGPUFormat(format), mem_alloc_info.memoryTypeIndex)); if (!pixel_data.empty()) { backing->WritePixels( @@ -330,7 +330,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB( return base::WrapUnique(new ExternalVkImageBacking( mailbox, resource_format, size, color_space, usage, context_state, vk_image, vk_device_memory, memory_size, vk_image_info.format, - command_pool, gr_ycbcr_info, GetDawnFormat(resource_format), {})); + command_pool, gr_ycbcr_info, GetWGPUFormat(resource_format), {})); } if (gfx::NumberOfPlanesForLinearBufferFormat(buffer_format) != 1) { @@ -431,7 +431,7 @@ ExternalVkImageBacking::ExternalVkImageBacking( VkFormat vk_format, VulkanCommandPool* command_pool, const GrVkYcbcrConversionInfo& ycbcr_info, - base::Optional<DawnTextureFormat> dawn_format, + base::Optional<WGPUTextureFormat> wgpu_format, base::Optional<uint32_t> memory_type_index) : SharedImageBacking(mailbox, format, @@ -450,7 +450,7 @@ ExternalVkImageBacking::ExternalVkImageBacking( usage & SHARED_IMAGE_USAGE_PROTECTED, ycbcr_info)), command_pool_(command_pool), - dawn_format_(dawn_format), + wgpu_format_(wgpu_format), memory_type_index_(memory_type_index) {} ExternalVkImageBacking::~ExternalVkImageBacking() { @@ -461,8 +461,11 @@ bool ExternalVkImageBacking::BeginAccess( bool readonly, std::vector<SemaphoreHandle>* semaphore_handles, bool is_gl) { - if (readonly && !reads_in_progress_) - UpdateContent(is_gl ? kInGLTexture : kInVkImage); + if (readonly && !reads_in_progress_) { + UpdateContent(kInVkImage); + if (texture_) + UpdateContent(kInGLTexture); + } return BeginAccessInternal(readonly, semaphore_handles); } @@ -513,6 +516,11 @@ void ExternalVkImageBacking::Destroy() { context_state()->MakeCurrent(nullptr, true /* need_gl */); texture_->RemoveLightweightRef(have_context()); } + if (texture_passthrough_) { + if (!have_context()) + texture_passthrough_->MarkContextLost(); + texture_passthrough_ = nullptr; + } } bool ExternalVkImageBacking::ProduceLegacyMailbox( @@ -526,9 +534,9 @@ bool ExternalVkImageBacking::ProduceLegacyMailbox( std::unique_ptr<SharedImageRepresentationDawn> ExternalVkImageBacking::ProduceDawn(SharedImageManager* manager, MemoryTypeTracker* tracker, - DawnDevice dawnDevice) { + WGPUDevice wgpuDevice) { #if defined(OS_LINUX) && BUILDFLAG(USE_DAWN) - if (!dawn_format_) { + if (!wgpu_format_) { DLOG(ERROR) << "Format not supported for Dawn"; return nullptr; } @@ -548,7 +556,7 @@ ExternalVkImageBacking::ProduceDawn(SharedImageManager* manager, } return std::make_unique<ExternalVkImageDawnRepresentation>( - manager, this, tracker, dawnDevice, dawn_format_.value(), memory_fd, + manager, this, tracker, wgpuDevice, wgpu_format_.value(), memory_fd, image_info.fAlloc.fSize, memory_type_index_.value()); #else // !defined(OS_LINUX) || !BUILDFLAG(USE_DAWN) NOTIMPLEMENTED_LOG_ONCE(); @@ -556,62 +564,80 @@ ExternalVkImageBacking::ProduceDawn(SharedImageManager* manager, #endif } +GLuint ExternalVkImageBacking::ProduceGLTextureInternal() { +#if defined(OS_LINUX) + GrVkImageInfo image_info; + bool result = backend_texture_.getVkImageInfo(&image_info); + DCHECK(result); + + gl::GLApi* api = gl::g_current_gl_context; + GLuint memory_object = 0; + if (!use_separate_gl_texture()) { + int memory_fd = GetMemoryFd(image_info); + if (memory_fd < 0) { + return 0; + } + + api->glCreateMemoryObjectsEXTFn(1, &memory_object); + api->glImportMemoryFdEXTFn(memory_object, image_info.fAlloc.fSize, + GL_HANDLE_TYPE_OPAQUE_FD_EXT, memory_fd); + } + + GLuint internal_format = viz::TextureStorageFormat(format()); + GLint old_texture_binding = 0; + api->glGetIntegervFn(GL_TEXTURE_BINDING_2D, &old_texture_binding); + GLuint texture_service_id = 0; + api->glGenTexturesFn(1, &texture_service_id); + api->glBindTextureFn(GL_TEXTURE_2D, texture_service_id); + api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + if (use_separate_gl_texture()) { + api->glTexStorage2DEXTFn(GL_TEXTURE_2D, 1, internal_format, size().width(), + size().height()); + } else { + DCHECK(memory_object); + if (internal_format == GL_BGRA8_EXT) { + // BGRA8 internal format is not well supported, so use RGBA8 instead. + api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, GL_RGBA8, size().width(), + size().height(), memory_object, 0); + api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_B, GL_RED); + api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_R, GL_BLUE); + } else { + api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, internal_format, + size().width(), size().height(), + memory_object, 0); + } + } + api->glBindTextureFn(GL_TEXTURE_2D, old_texture_binding); + return texture_service_id; +#elif defined(OS_FUCHSIA) + NOTIMPLEMENTED_LOG_ONCE(); + return 0; +#else // !defined(OS_LINUX) && !defined(OS_FUCHSIA) +#error Unsupported OS +#endif +} + std::unique_ptr<SharedImageRepresentationGLTexture> ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager, MemoryTypeTracker* tracker) { + DCHECK(!texture_passthrough_); if (!(usage() & SHARED_IMAGE_USAGE_GLES2)) { DLOG(ERROR) << "The backing is not created with GLES2 usage."; return nullptr; } -#if defined(OS_FUCHSIA) - NOTIMPLEMENTED_LOG_ONCE(); - return nullptr; -#elif defined(OS_LINUX) - GrVkImageInfo image_info; - bool result = backend_texture_.getVkImageInfo(&image_info); - DCHECK(result); +#if defined(OS_LINUX) if (!texture_) { - gl::GLApi* api = gl::g_current_gl_context; - GLuint memory_object = 0; - if (!use_separate_gl_texture()) { - int memory_fd = GetMemoryFd(image_info); - if (memory_fd < 0) { - return nullptr; - } - - api->glCreateMemoryObjectsEXTFn(1, &memory_object); - api->glImportMemoryFdEXTFn(memory_object, image_info.fAlloc.fSize, - GL_HANDLE_TYPE_OPAQUE_FD_EXT, memory_fd); - } - + GLuint texture_service_id = ProduceGLTextureInternal(); + if (!texture_service_id) + return nullptr; GLuint internal_format = viz::TextureStorageFormat(format()); - GLint old_texture_binding = 0; - api->glGetIntegervFn(GL_TEXTURE_BINDING_2D, &old_texture_binding); - GLuint texture_service_id; - api->glGenTexturesFn(1, &texture_service_id); - api->glBindTextureFn(GL_TEXTURE_2D, texture_service_id); - api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); - api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); - if (use_separate_gl_texture()) { - api->glTexStorage2DEXTFn(GL_TEXTURE_2D, 1, internal_format, - size().width(), size().height()); - } else { - DCHECK(memory_object); - if (internal_format == GL_BGRA8_EXT) { - // BGRA8 internal format is not well supported, so use RGBA8 instead. - api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, GL_RGBA8, size().width(), - size().height(), memory_object, 0); - api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_B, GL_RED); - api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_R, GL_BLUE); - } else { - api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, internal_format, - size().width(), size().height(), - memory_object, 0); - } - } + GLenum gl_format = viz::GLDataFormat(format()); + GLenum gl_type = viz::GLDataType(format()); + texture_ = new gles2::Texture(texture_service_id); texture_->SetLightweightRef(); texture_->SetTarget(GL_TEXTURE_2D, 1); @@ -624,17 +650,16 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager, if (is_cleared_) cleared_rect = gfx::Rect(size()); - GLenum gl_format = viz::GLDataFormat(format()); - GLenum gl_type = viz::GLDataType(format()); texture_->SetLevelInfo(GL_TEXTURE_2D, 0, internal_format, size().width(), size().height(), 1, 0, gl_format, gl_type, cleared_rect); texture_->SetImmutable(true, true); - - api->glBindTextureFn(GL_TEXTURE_2D, old_texture_binding); } - return std::make_unique<ExternalVkImageGlRepresentation>( + return std::make_unique<ExternalVkImageGLRepresentation>( manager, this, tracker, texture_, texture_->service_id()); +#elif defined(OS_FUCHSIA) + NOTIMPLEMENTED_LOG_ONCE(); + return nullptr; #else // !defined(OS_LINUX) && !defined(OS_FUCHSIA) #error Unsupported OS #endif @@ -644,8 +669,35 @@ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> ExternalVkImageBacking::ProduceGLTexturePassthrough( SharedImageManager* manager, MemoryTypeTracker* tracker) { - // Passthrough command decoder is not currently used on Linux. + DCHECK(!texture_); + if (!(usage() & SHARED_IMAGE_USAGE_GLES2)) { + DLOG(ERROR) << "The backing is not created with GLES2 usage."; + return nullptr; + } + +#if defined(OS_LINUX) + if (!texture_passthrough_) { + GLuint texture_service_id = ProduceGLTextureInternal(); + if (!texture_service_id) + return nullptr; + GLuint internal_format = viz::TextureStorageFormat(format()); + GLenum gl_format = viz::GLDataFormat(format()); + GLenum gl_type = viz::GLDataType(format()); + + texture_passthrough_ = base::MakeRefCounted<gpu::gles2::TexturePassthrough>( + texture_service_id, GL_TEXTURE_2D, internal_format, size().width(), + size().height(), + /*depth=*/1, /*border=*/0, gl_format, gl_type); + } + + return std::make_unique<ExternalVkImageGLPassthroughRepresentation>( + manager, this, tracker, texture_passthrough_->service_id()); +#elif defined(OS_FUCHSIA) + NOTIMPLEMENTED_LOG_ONCE(); return nullptr; +#else // !defined(OS_LINUX) && !defined(OS_FUCHSIA) +#error Unsupported OS +#endif } std::unique_ptr<SharedImageRepresentationSkia> @@ -879,7 +931,9 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size, void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() { DCHECK(use_separate_gl_texture()); - DCHECK(texture_); + DCHECK_NE(!!texture_, !!texture_passthrough_); + const GLuint texture_service_id = + texture_ ? texture_->service_id() : texture_passthrough_->service_id(); DCHECK_GE(format(), 0); DCHECK_LE(format(), viz::RESOURCE_FORMAT_MAX); @@ -908,7 +962,7 @@ void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() { api->glGenFramebuffersEXTFn(1, &framebuffer); api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, framebuffer); api->glFramebufferTexture2DEXTFn(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_TEXTURE_2D, texture_->service_id(), 0); + GL_TEXTURE_2D, texture_service_id, 0); GLenum status = api->glCheckFramebufferStatusEXTFn(GL_READ_FRAMEBUFFER); DCHECK_EQ(status, static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE)) << "CheckFramebufferStatusEXT() failed."; @@ -939,7 +993,9 @@ void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() { void ExternalVkImageBacking::CopyPixelsFromShmToGLTexture() { DCHECK(use_separate_gl_texture()); - DCHECK(texture_); + DCHECK_NE(!!texture_, !!texture_passthrough_); + const GLuint texture_service_id = + texture_ ? texture_->service_id() : texture_passthrough_->service_id(); DCHECK_GE(format(), 0); DCHECK_LE(format(), viz::RESOURCE_FORMAT_MAX); @@ -964,7 +1020,7 @@ void ExternalVkImageBacking::CopyPixelsFromShmToGLTexture() { gl::GLApi* api = gl::g_current_gl_context; GLint old_texture; api->glGetIntegervFn(GL_TEXTURE_BINDING_2D, &old_texture); - api->glBindTextureFn(GL_TEXTURE_2D, texture_->service_id()); + api->glBindTextureFn(GL_TEXTURE_2D, texture_service_id); base::CheckedNumeric<size_t> checked_size = bytes_per_pixel; checked_size *= size().width(); diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h index fb5d85903b8..6bab8393dd2 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_backing.h +++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h @@ -51,6 +51,10 @@ class ExternalVkImageBacking final : public SharedImageBacking { SharedContextState* context_state() const { return context_state_; } const GrBackendTexture& backend_texture() const { return backend_texture_; } + const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough() + const { + return texture_passthrough_; + } VulkanImplementation* vulkan_implementation() const { return context_state()->vk_context_provider()->GetVulkanImplementation(); } @@ -103,7 +107,7 @@ class ExternalVkImageBacking final : public SharedImageBacking { std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( SharedImageManager* manager, MemoryTypeTracker* tracker, - DawnDevice dawnDevice) override; + WGPUDevice dawnDevice) override; std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture( SharedImageManager* manager, MemoryTypeTracker* tracker) override; @@ -128,7 +132,7 @@ class ExternalVkImageBacking final : public SharedImageBacking { VkFormat vk_format, VulkanCommandPool* command_pool, const GrVkYcbcrConversionInfo& ycbcr_info, - base::Optional<DawnTextureFormat> dawn_format, + base::Optional<WGPUTextureFormat> wgpu_format, base::Optional<uint32_t> memory_type_index); #ifdef OS_LINUX @@ -141,6 +145,8 @@ class ExternalVkImageBacking final : public SharedImageBacking { base::WritableSharedMemoryMapping shared_memory_mapping, size_t stride, size_t memory_offset); + // Returns texture_service_id for ProduceGLTexture and GLTexturePassthrough. + GLuint ProduceGLTextureInternal(); using FillBufferCallback = base::OnceCallback<void(void* buffer)>; bool WritePixels(size_t data_size, @@ -160,6 +166,7 @@ class ExternalVkImageBacking final : public SharedImageBacking { bool is_write_in_progress_ = false; uint32_t reads_in_progress_ = 0; gles2::Texture* texture_ = nullptr; + scoped_refptr<gles2::TexturePassthrough> texture_passthrough_; // GMB related stuff. base::WritableSharedMemoryMapping shared_memory_mapping_; @@ -173,7 +180,7 @@ class ExternalVkImageBacking final : public SharedImageBacking { }; uint32_t latest_content_ = 0; - base::Optional<DawnTextureFormat> dawn_format_; + base::Optional<WGPUTextureFormat> wgpu_format_; base::Optional<uint32_t> memory_type_index_; DISALLOW_COPY_AND_ASSIGN(ExternalVkImageBacking); diff --git a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc index 9e08c403f21..f7f0427f6ec 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc +++ b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc @@ -23,14 +23,14 @@ ExternalVkImageDawnRepresentation::ExternalVkImageDawnRepresentation( SharedImageManager* manager, SharedImageBacking* backing, MemoryTypeTracker* tracker, - DawnDevice device, - DawnTextureFormat dawn_format, + WGPUDevice device, + WGPUTextureFormat wgpu_format, int memory_fd, VkDeviceSize allocation_size, uint32_t memory_type_index) : SharedImageRepresentationDawn(manager, backing, tracker), device_(device), - dawn_format_(dawn_format), + wgpu_format_(wgpu_format), memory_fd_(memory_fd), allocation_size_(allocation_size), memory_type_index_(memory_type_index), @@ -47,19 +47,19 @@ ExternalVkImageDawnRepresentation::~ExternalVkImageDawnRepresentation() { dawn_procs_.deviceRelease(device_); } -DawnTexture ExternalVkImageDawnRepresentation::BeginAccess( - DawnTextureUsage usage) { +WGPUTexture ExternalVkImageDawnRepresentation::BeginAccess( + WGPUTextureUsage usage) { std::vector<SemaphoreHandle> handles; if (!backing_impl()->BeginAccess(false, &handles, false /* is_gl */)) { return nullptr; } - DawnTextureDescriptor texture_descriptor = {}; + WGPUTextureDescriptor texture_descriptor = {}; texture_descriptor.nextInChain = nullptr; - texture_descriptor.format = dawn_format_; + texture_descriptor.format = wgpu_format_; texture_descriptor.usage = usage; - texture_descriptor.dimension = DAWN_TEXTURE_DIMENSION_2D; + texture_descriptor.dimension = WGPUTextureDimension_2D; texture_descriptor.size = {size().width(), size().height(), 1}; texture_descriptor.arrayLayerCount = 1; texture_descriptor.mipLevelCount = 1; @@ -81,7 +81,7 @@ DawnTexture ExternalVkImageDawnRepresentation::BeginAccess( descriptor.waitFDs.push_back(handle.TakeHandle().release()); } - texture_ = dawn_native::vulkan::WrapVulkanImageOpaqueFD(device_, &descriptor); + texture_ = dawn_native::vulkan::WrapVulkanImage(device_, &descriptor); if (texture_) { // Keep a reference to the texture so that it stays valid (its content diff --git a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.h b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.h index e605db8e46b..f26e0d12f06 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.h +++ b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.h @@ -15,24 +15,24 @@ class ExternalVkImageDawnRepresentation : public SharedImageRepresentationDawn { ExternalVkImageDawnRepresentation(SharedImageManager* manager, SharedImageBacking* backing, MemoryTypeTracker* tracker, - DawnDevice device, - DawnTextureFormat dawn_format, + WGPUDevice device, + WGPUTextureFormat dawn_format, int memory_fd, VkDeviceSize allocation_size, uint32_t memory_type_index); ~ExternalVkImageDawnRepresentation() override; - DawnTexture BeginAccess(DawnTextureUsage usage) override; + WGPUTexture BeginAccess(WGPUTextureUsage usage) override; void EndAccess() override; private: - const DawnDevice device_; - const DawnTextureFormat dawn_format_; + const WGPUDevice device_; + const WGPUTextureFormat wgpu_format_; const int memory_fd_; const VkDeviceSize allocation_size_; const uint32_t memory_type_index_; - DawnTexture texture_ = nullptr; + WGPUTexture texture_ = nullptr; // TODO(cwallez@chromium.org): Load procs only once when the factory is // created and pass a pointer to them around? diff --git a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc index 96035d8913a..58f60deabe2 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc +++ b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc @@ -58,27 +58,17 @@ GLenum ToGLImageLayout(VkImageLayout layout) { } // namespace -ExternalVkImageGlRepresentation::ExternalVkImageGlRepresentation( - SharedImageManager* manager, +ExternalVkImageGLRepresentationShared::ExternalVkImageGLRepresentationShared( SharedImageBacking* backing, - MemoryTypeTracker* tracker, - gles2::Texture* texture, GLuint texture_service_id) - : SharedImageRepresentationGLTexture(manager, backing, tracker), - texture_(texture), + : backing_(static_cast<ExternalVkImageBacking*>(backing)), texture_service_id_(texture_service_id) {} -ExternalVkImageGlRepresentation::~ExternalVkImageGlRepresentation() {} - -gles2::Texture* ExternalVkImageGlRepresentation::GetTexture() { - return texture_; -} - -bool ExternalVkImageGlRepresentation::BeginAccess(GLenum mode) { +bool ExternalVkImageGLRepresentationShared::BeginAccess(GLenum mode) { // There should not be multiple accesses in progress on the same // representation. if (current_access_mode_) { - LOG(ERROR) << "BeginAccess called on ExternalVkImageGlRepresentation before" + LOG(ERROR) << "BeginAccess called on ExternalVkImageGLRepresentation before" << " the previous access ended."; return false; } @@ -87,7 +77,7 @@ bool ExternalVkImageGlRepresentation::BeginAccess(GLenum mode) { mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM); const bool readonly = (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM); - if (!readonly && backing()->format() == viz::ResourceFormat::BGRA_8888) { + if (!readonly && backing_impl()->format() == viz::ResourceFormat::BGRA_8888) { NOTIMPLEMENTED() << "BeginAccess write on a BGRA_8888 backing is not supported."; return false; @@ -114,11 +104,11 @@ bool ExternalVkImageGlRepresentation::BeginAccess(GLenum mode) { return true; } -void ExternalVkImageGlRepresentation::EndAccess() { +void ExternalVkImageGLRepresentationShared::EndAccess() { if (!current_access_mode_) { // TODO(crbug.com/933452): We should be able to handle this failure more // gracefully rather than shutting down the whole process. - LOG(ERROR) << "EndAccess called on ExternalVkImageGlRepresentation before " + LOG(ERROR) << "EndAccess called on ExternalVkImageGLRepresentation before " << "BeginAccess"; return; } @@ -140,7 +130,7 @@ void ExternalVkImageGlRepresentation::EndAccess() { // TODO(crbug.com/933452): We should be able to handle this failure more // gracefully rather than shutting down the whole process. LOG(FATAL) << "Unable to create a VkSemaphore in " - << "ExternalVkImageGlRepresentation for synchronization with " + << "ExternalVkImageGLRepresentation for synchronization with " << "Vulkan"; return; } @@ -150,7 +140,7 @@ void ExternalVkImageGlRepresentation::EndAccess() { vkDestroySemaphore(backing_impl()->device(), semaphore, nullptr); if (!semaphore_handle.is_valid()) { LOG(FATAL) << "Unable to export VkSemaphore into GL in " - << "ExternalVkImageGlRepresentation for synchronization with " + << "ExternalVkImageGLRepresentation for synchronization with " << "Vulkan"; return; } @@ -162,7 +152,7 @@ void ExternalVkImageGlRepresentation::EndAccess() { // TODO(crbug.com/933452): We should be able to semaphore_handle this // failure more gracefully rather than shutting down the whole process. LOG(FATAL) << "Unable to export VkSemaphore into GL in " - << "ExternalVkImageGlRepresentation for synchronization with " + << "ExternalVkImageGLRepresentation for synchronization with " << "Vulkan"; return; } @@ -182,7 +172,7 @@ void ExternalVkImageGlRepresentation::EndAccess() { true /* is_gl */); } -GLuint ExternalVkImageGlRepresentation::ImportVkSemaphoreIntoGL( +GLuint ExternalVkImageGLRepresentationShared::ImportVkSemaphoreIntoGL( SemaphoreHandle handle) { if (!handle.is_valid()) return 0; @@ -209,4 +199,50 @@ GLuint ExternalVkImageGlRepresentation::ImportVkSemaphoreIntoGL( #endif } +ExternalVkImageGLRepresentation::ExternalVkImageGLRepresentation( + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + gles2::Texture* texture, + GLuint texture_service_id) + : SharedImageRepresentationGLTexture(manager, backing, tracker), + texture_(texture), + representation_shared_(backing, texture_service_id) {} + +ExternalVkImageGLRepresentation::~ExternalVkImageGLRepresentation() {} + +gles2::Texture* ExternalVkImageGLRepresentation::GetTexture() { + return texture_; +} + +bool ExternalVkImageGLRepresentation::BeginAccess(GLenum mode) { + return representation_shared_.BeginAccess(mode); +} +void ExternalVkImageGLRepresentation::EndAccess() { + representation_shared_.EndAccess(); +} + +ExternalVkImageGLPassthroughRepresentation:: + ExternalVkImageGLPassthroughRepresentation(SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + GLuint texture_service_id) + : SharedImageRepresentationGLTexturePassthrough(manager, backing, tracker), + representation_shared_(backing, texture_service_id) {} + +ExternalVkImageGLPassthroughRepresentation:: + ~ExternalVkImageGLPassthroughRepresentation() {} + +const scoped_refptr<gles2::TexturePassthrough>& +ExternalVkImageGLPassthroughRepresentation::GetTexturePassthrough() { + return representation_shared_.backing_impl()->GetTexturePassthrough(); +} + +bool ExternalVkImageGLPassthroughRepresentation::BeginAccess(GLenum mode) { + return representation_shared_.BeginAccess(mode); +} +void ExternalVkImageGLPassthroughRepresentation::EndAccess() { + representation_shared_.EndAccess(); +} + } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.h b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.h index 13a5e325664..8fa45fffc34 100644 --- a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.h +++ b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.h @@ -5,31 +5,28 @@ #ifndef GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_GL_REPRESENTATION_H_ #define GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_GL_REPRESENTATION_H_ +#include <memory> + #include "gpu/command_buffer/service/external_vk_image_backing.h" #include "gpu/command_buffer/service/shared_image_representation.h" namespace gpu { -class ExternalVkImageGlRepresentation - : public SharedImageRepresentationGLTexture { +// ExternalVkImageGLRepresentationShared implements BeginAccess and EndAccess +// methods for ExternalVkImageGLRepresentation and +// ExternalVkImageGLPassthroughRepresentation. +class ExternalVkImageGLRepresentationShared { public: - ExternalVkImageGlRepresentation(SharedImageManager* manager, - SharedImageBacking* backing, - MemoryTypeTracker* tracker, - gles2::Texture* texture, - GLuint texture_service_id); - ~ExternalVkImageGlRepresentation() override; + ExternalVkImageGLRepresentationShared(SharedImageBacking* backing, + GLuint texture_service_id); + ~ExternalVkImageGLRepresentationShared() = default; - // SharedImageRepresentationGLTexture implementation. - gles2::Texture* GetTexture() override; - bool BeginAccess(GLenum mode) override; - void EndAccess() override; + bool BeginAccess(GLenum mode); + void EndAccess(); - private: - ExternalVkImageBacking* backing_impl() { - return static_cast<ExternalVkImageBacking*>(backing()); - } + ExternalVkImageBacking* backing_impl() { return backing_; } + private: gpu::VulkanImplementation* vk_implementation() { return backing_impl() ->context_state() @@ -58,11 +55,54 @@ class ExternalVkImageGlRepresentation GLuint ImportVkSemaphoreIntoGL(SemaphoreHandle handle); void DestroyEndAccessSemaphore(); - gles2::Texture* texture_ = nullptr; + ExternalVkImageBacking* backing_; GLuint texture_service_id_ = 0; GLenum current_access_mode_ = 0; - DISALLOW_COPY_AND_ASSIGN(ExternalVkImageGlRepresentation); + DISALLOW_COPY_AND_ASSIGN(ExternalVkImageGLRepresentationShared); +}; + +class ExternalVkImageGLRepresentation + : public SharedImageRepresentationGLTexture { + public: + ExternalVkImageGLRepresentation(SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + gles2::Texture* texture, + GLuint texture_service_id); + ~ExternalVkImageGLRepresentation() override; + + // SharedImageRepresentationGLTexture implementation. + gles2::Texture* GetTexture() override; + bool BeginAccess(GLenum mode) override; + void EndAccess() override; + + private: + gles2::Texture* texture_ = nullptr; + ExternalVkImageGLRepresentationShared representation_shared_; + + DISALLOW_COPY_AND_ASSIGN(ExternalVkImageGLRepresentation); +}; + +class ExternalVkImageGLPassthroughRepresentation + : public SharedImageRepresentationGLTexturePassthrough { + public: + ExternalVkImageGLPassthroughRepresentation(SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker, + GLuint texture_service_id); + ~ExternalVkImageGLPassthroughRepresentation() override; + + // SharedImageRepresentationGLTexturePassthrough implementation. + const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough() + override; + bool BeginAccess(GLenum mode) override; + void EndAccess() override; + + private: + ExternalVkImageGLRepresentationShared representation_shared_; + + DISALLOW_COPY_AND_ASSIGN(ExternalVkImageGLPassthroughRepresentation); }; } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc index a737820a6d9..4381c016645 100644 --- a/chromium/gpu/command_buffer/service/feature_info.cc +++ b/chromium/gpu/command_buffer/service/feature_info.cc @@ -417,6 +417,13 @@ void FeatureInfo::EnableCHROMIUMColorBufferFloatRGB() { AddExtensionString("GL_CHROMIUM_color_buffer_float_rgb"); } +void FeatureInfo::EnableOESFboRenderMipmap() { + if (!feature_flags_.oes_fbo_render_mipmap) { + AddExtensionString("GL_OES_fbo_render_mipmap"); + feature_flags_.oes_fbo_render_mipmap = true; + } +} + void FeatureInfo::EnableOESTextureFloatLinear() { if (!oes_texture_float_linear_available_) return; @@ -712,6 +719,14 @@ void FeatureInfo::InitializeFeatures() { validators_.index_type.AddValue(GL_UNSIGNED_INT); } + if (gl_version_info_->IsAtLeastGL(3, 0) || gl_version_info_->is_es3 || + gfx::HasExtension(extensions, "GL_OES_fbo_render_mipmap") || + gfx::HasExtension(extensions, "GL_EXT_framebuffer_object")) { + if (!disallowed_features_.oes_fbo_render_mipmap) { + EnableOESFboRenderMipmap(); + } + } + bool has_srgb_framebuffer_support = false; if (gl_version_info_->IsAtLeastGL(3, 2) || (gl_version_info_->IsAtLeastGL(2, 0) && @@ -991,26 +1006,6 @@ void FeatureInfo::InitializeFeatures() { validators_.capability.AddValue(GL_SAMPLE_ALPHA_TO_ONE_EXT); } - if (gfx::HasExtension(extensions, "GL_INTEL_framebuffer_CMAA")) { - feature_flags_.chromium_screen_space_antialiasing = true; - AddExtensionString("GL_CHROMIUM_screen_space_antialiasing"); - } else if (gl_version_info_->IsAtLeastGLES(3, 1) || - (gl_version_info_->IsAtLeastGL(3, 0) && - gfx::HasExtension(extensions, - "GL_ARB_shading_language_420pack") && - gfx::HasExtension(extensions, "GL_ARB_texture_storage") && - gfx::HasExtension(extensions, "GL_ARB_texture_gather") && - gfx::HasExtension(extensions, - "GL_ARB_explicit_uniform_location") && - gfx::HasExtension(extensions, - "GL_ARB_explicit_attrib_location") && - gfx::HasExtension(extensions, - "GL_ARB_shader_image_load_store"))) { - feature_flags_.chromium_screen_space_antialiasing = true; - feature_flags_.use_chromium_screen_space_antialiasing_via_shaders = true; - AddExtensionString("GL_CHROMIUM_screen_space_antialiasing"); - } - if (gfx::HasExtension(extensions, "GL_OES_depth24") || gl::HasDesktopGLFeatures() || gl_version_info_->is_es3) { AddExtensionString("GL_OES_depth24"); @@ -1570,14 +1565,47 @@ void FeatureInfo::InitializeFeatures() { if (!is_passthrough_cmd_decoder_ || gfx::HasExtension(extensions, "GL_ANGLE_multi_draw")) { - feature_flags_.webgl_multi_draw = true; - AddExtensionString("GL_WEBGL_multi_draw"); if (gfx::HasExtension(extensions, "GL_ANGLE_instanced_arrays") || feature_flags_.angle_instanced_arrays || gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile) { - feature_flags_.webgl_multi_draw_instanced = true; - AddExtensionString("GL_WEBGL_multi_draw_instanced"); + feature_flags_.webgl_multi_draw = true; + AddExtensionString("GL_WEBGL_multi_draw"); + } + } + +#if defined(OS_MACOSX) + if (is_passthrough_cmd_decoder_ && + gfx::HasExtension(extensions, "GL_ANGLE_base_vertex_base_instance")) { +#else + if ((!is_passthrough_cmd_decoder_ && + ((gl_version_info_->IsAtLeastGLES(3, 2) && + gfx::HasExtension(extensions, "GL_EXT_base_instance")) || + (gl_version_info_->is_desktop_core_profile && + gl_version_info_->IsAtLeastGL(4, 2)))) || + gfx::HasExtension(extensions, "GL_ANGLE_base_vertex_base_instance")) { +#endif + // TODO(shrekshao): change condition to the following after workaround for + // Mac AMD and non-native base instance support are implemented, or when + // angle is universally used. + // + // if ((!is_passthrough_cmd_decoder_ && + // ((gl_version_info_->IsAtLeastGLES(3, 2) || + // gfx::HasExtension(extensions, + // "GL_OES_draw_elements_base_vertex_base_instance") || + // gfx::HasExtension(extensions, + // "GL_EXT_draw_elements_base_vertex_base_instance")) || + // (gl_version_info_->is_desktop_core_profile && + // gl_version_info_->IsAtLeastGL(3, 2)))) || + // gfx::HasExtension(extensions, "GL_ANGLE_base_vertex_base_instance")) + // { + feature_flags_.webgl_draw_instanced_base_vertex_base_instance = true; + AddExtensionString("GL_WEBGL_draw_instanced_base_vertex_base_instance"); + if (feature_flags_.webgl_multi_draw) { + feature_flags_.webgl_multi_draw_instanced_base_vertex_base_instance = + true; + AddExtensionString( + "GL_WEBGL_multi_draw_instanced_base_vertex_base_instance"); } } diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h index 3402be518ac..2ca859e9cd8 100644 --- a/chromium/gpu/command_buffer/service/feature_info.h +++ b/chromium/gpu/command_buffer/service/feature_info.h @@ -141,11 +141,13 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> { bool android_surface_control = false; bool khr_robust_buffer_access_behavior = false; bool webgl_multi_draw = false; - bool webgl_multi_draw_instanced = false; bool nv_internalformat_sample_query = false; bool amd_framebuffer_multisample_advanced = false; bool ext_float_blend = false; bool chromium_completion_query = false; + bool oes_fbo_render_mipmap = false; + bool webgl_draw_instanced_base_vertex_base_instance = false; + bool webgl_multi_draw_instanced_base_vertex_base_instance = false; }; FeatureInfo(); @@ -208,6 +210,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> { void EnableEXTColorBufferFloat(); void EnableEXTColorBufferHalfFloat(); void EnableEXTTextureFilterAnisotropic(); + void EnableOESFboRenderMipmap(); void EnableOESTextureFloatLinear(); void EnableOESTextureHalfFloatLinear(); diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual.cc b/chromium/gpu/command_buffer/service/gl_context_virtual.cc index 81703702c64..c6e000cd9a6 100644 --- a/chromium/gpu/command_buffer/service/gl_context_virtual.cc +++ b/chromium/gpu/command_buffer/service/gl_context_virtual.cc @@ -13,12 +13,6 @@ #include "ui/gl/gpu_preference.h" #include "ui/gl/gpu_timing.h" -// TODO(crbug.com/892490): remove this once the cause of this bug is -// known. -#if defined(OS_ANDROID) -#include "base/debug/dump_without_crashing.h" -#endif - namespace gpu { GLContextVirtual::GLContextVirtual( @@ -45,11 +39,6 @@ bool GLContextVirtual::MakeCurrent(gl::GLSurface* surface) { return shared_context_->MakeVirtuallyCurrent(this, surface); LOG(ERROR) << "Trying to make virtual context current without decoder."; -// TODO(crbug.com/892490): remove this once the cause of this bug is -// known. -#if defined(OS_ANDROID) - base::debug::DumpWithoutCrashing(); -#endif return false; } diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc deleted file mode 100644 index 433277b4f12..00000000000 --- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc +++ /dev/null @@ -1,1851 +0,0 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h" - -#include "base/logging.h" -#include "gpu/command_buffer/service/framebuffer_manager.h" -#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h" -#include "gpu/command_buffer/service/gles2_cmd_decoder.h" -#include "gpu/command_buffer/service/texture_manager.h" -#include "ui/gl/gl_context.h" -#include "ui/gl/gl_gl_api_implementation.h" -#include "ui/gl/gl_version_info.h" - -#define SHADER(Src) #Src - -namespace gpu { -namespace gles2 { - -ApplyFramebufferAttachmentCMAAINTELResourceManager:: - ApplyFramebufferAttachmentCMAAINTELResourceManager() - : initialized_(false), - textures_initialized_(false), - is_in_gamma_correct_mode_(false), - supports_usampler_(true), - supports_r8_image_(true), - is_gles31_compatible_(false), - frame_id_(0), - width_(0), - height_(0), - edges0_shader_(0), - edges1_shader_(0), - edges_combine_shader_(0), - process_and_apply_shader_(0), - debug_display_edges_shader_(0), - cmaa_framebuffer_(0), - rgba8_texture_(0), - working_color_texture_(0), - edges0_texture_(0), - edges1_texture_(0), - mini4_edge_texture_(0), - mini4_edge_depth_texture_(0), - edges0_shader_result_rgba_texture_slot1_(0), - edges0_shader_target_texture_slot2_(0), - edges1_shader_result_edge_texture_(0), - process_and_apply_shader_result_rgba_texture_slot1_(0), - edges_combine_shader_result_edge_texture_(0) {} - -ApplyFramebufferAttachmentCMAAINTELResourceManager:: - ~ApplyFramebufferAttachmentCMAAINTELResourceManager() { - Destroy(); -} - -void ApplyFramebufferAttachmentCMAAINTELResourceManager::Initialize( - gles2::GLES2Decoder* decoder) { - DCHECK(decoder); - is_gles31_compatible_ = - decoder->GetGLContext()->GetVersionInfo()->IsAtLeastGLES(3, 1); - - if (is_gles31_compatible_) { - supports_r8_image_ = - decoder->GetGLContext()->HasExtension("GL_NV_image_formats"); - - // ES 3.0 requires GL_RGBA8UI is color renderable. - supports_usampler_ = true; - } else { - // CMAA requires GL_ARB_shader_image_load_store for GL, and it requires r8 - // image texture. - DCHECK(decoder->GetGLContext()->HasExtension( - "GL_ARB_shader_image_load_store")); - supports_r8_image_ = true; - - // Check if RGBA8UI is supported as an FBO colour target with depth. - // If not supported, GLSL needs to convert the data to/from float so there - // is a small extra cost. - { - glActiveTexture(GL_TEXTURE0); - - GLuint rgba8ui_texture = 0, depth_texture = 0; - glGenTextures(1, &rgba8ui_texture); - glBindTexture(GL_TEXTURE_2D, rgba8ui_texture); - glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8UI, 4, 4); - - glGenTextures(1, &depth_texture); - glBindTexture(GL_TEXTURE_2D, depth_texture); - glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_DEPTH_COMPONENT16, 4, 4); - - // Create the FBO - GLuint rgba8ui_framebuffer = 0; - glGenFramebuffersEXT(1, &rgba8ui_framebuffer); - glBindFramebufferEXT(GL_FRAMEBUFFER, rgba8ui_framebuffer); - - // Bind to the FBO to test support - glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_TEXTURE_2D, rgba8ui_texture, 0); - glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, - GL_TEXTURE_2D, depth_texture, 0); - GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); - - supports_usampler_ = (status == GL_FRAMEBUFFER_COMPLETE); - - glDeleteFramebuffersEXT(1, &rgba8ui_framebuffer); - glDeleteTextures(1, &rgba8ui_texture); - glDeleteTextures(1, &depth_texture); - - decoder->RestoreTextureUnitBindings(0); - decoder->RestoreActiveTexture(); - decoder->RestoreFramebufferBindings(); - } - } - - VLOG(1) << "ApplyFramebufferAttachmentCMAAINTEL: " - << "Supports USampler is " << (supports_usampler_ ? "true" : "false"); - VLOG(1) << "ApplyFramebufferAttachmentCMAAINTEL: " - << "Supports R8 Images is " - << (supports_r8_image_ ? "true" : "false"); - - // Create the shaders - std::ostringstream defines, edge1, edge2, combineEdges, blur, displayEdges, - cmaa_frag; - - cmaa_frag << cmaa_frag_s1_ << cmaa_frag_s2_; - std::string cmaa_frag_string = cmaa_frag.str(); - const char* cmaa_frag_c_str = cmaa_frag_string.c_str(); - - if (supports_usampler_) { - defines << "#define SUPPORTS_USAMPLER2D\n"; - } - - if (is_in_gamma_correct_mode_) { - defines << "#define IN_GAMMA_CORRECT_MODE\n"; - } - - if (supports_r8_image_) { - defines << "#define EDGE_READ_FORMAT r8\n"; - } else { - defines << "#define EDGE_READ_FORMAT r32f\n"; - } - - displayEdges << defines.str() << "#define DISPLAY_EDGES\n"; - debug_display_edges_shader_ = - CreateProgram(displayEdges.str().c_str(), vert_str_, cmaa_frag_c_str); - - edge1 << defines.str() << "#define DETECT_EDGES1\n"; - edges0_shader_ = - CreateProgram(edge1.str().c_str(), vert_str_, cmaa_frag_c_str); - - edge2 << defines.str() << "#define DETECT_EDGES2\n"; - edges1_shader_ = - CreateProgram(edge2.str().c_str(), vert_str_, cmaa_frag_c_str); - - combineEdges << defines.str() << "#define COMBINE_EDGES\n"; - edges_combine_shader_ = - CreateProgram(combineEdges.str().c_str(), vert_str_, cmaa_frag_c_str); - - blur << defines.str() << "#define BLUR_EDGES\n"; - process_and_apply_shader_ = - CreateProgram(blur.str().c_str(), vert_str_, cmaa_frag_c_str); - - edges0_shader_result_rgba_texture_slot1_ = - glGetUniformLocation(edges0_shader_, "g_resultRGBATextureSlot1"); - edges0_shader_target_texture_slot2_ = - glGetUniformLocation(edges0_shader_, "g_targetTextureSlot2"); - edges1_shader_result_edge_texture_ = - glGetUniformLocation(edges1_shader_, "g_resultEdgeTexture"); - edges_combine_shader_result_edge_texture_ = - glGetUniformLocation(edges_combine_shader_, "g_resultEdgeTexture"); - process_and_apply_shader_result_rgba_texture_slot1_ = glGetUniformLocation( - process_and_apply_shader_, "g_resultRGBATextureSlot1"); - - initialized_ = true; -} - -void ApplyFramebufferAttachmentCMAAINTELResourceManager::Destroy() { - if (!initialized_) - return; - - ReleaseTextures(); - - glDeleteProgram(process_and_apply_shader_); - glDeleteProgram(edges_combine_shader_); - glDeleteProgram(edges1_shader_); - glDeleteProgram(edges0_shader_); - glDeleteProgram(debug_display_edges_shader_); - - initialized_ = false; -} - -// Apply CMAA(Conservative Morphological Anti-Aliasing) algorithm to the -// color attachments of currently bound draw framebuffer. -// Reference GL_INTEL_framebuffer_CMAA for details. -void ApplyFramebufferAttachmentCMAAINTELResourceManager:: - ApplyFramebufferAttachmentCMAAINTEL( - GLES2Decoder* decoder, - Framebuffer* framebuffer, - CopyTextureCHROMIUMResourceManager* copier, - TextureManager* texture_manager) { - DCHECK(decoder); - DCHECK(initialized_); - if (!framebuffer) - return; - - glDisable(GL_SCISSOR_TEST); - glDisable(GL_STENCIL_TEST); - glDisable(GL_CULL_FACE); - glDisable(GL_BLEND); - if (decoder->GetFeatureInfo()->feature_flags().ext_window_rectangles) { - glWindowRectanglesEXT(GL_EXCLUSIVE_EXT, 0, nullptr); - } - - // Process each color attachment of the current draw framebuffer. - uint32_t max_draw_buffers = decoder->GetContextGroup()->max_draw_buffers(); - for (uint32_t i = 0; i < max_draw_buffers; i++) { - const gles2::Framebuffer::Attachment* attachment = - framebuffer->GetAttachment(GL_COLOR_ATTACHMENT0 + i); - if (attachment && attachment->IsTextureAttachment()) { - // Get the texture info. - GLuint source_texture_client_id = attachment->object_name(); - GLuint source_texture = 0; - if (!decoder->GetServiceTextureId(source_texture_client_id, - &source_texture)) - continue; - GLsizei width = attachment->width(); - GLsizei height = attachment->height(); - GLenum internal_format = attachment->internal_format(); - - // Resize internal structures - only if needed. - OnSize(width, height); - - // CMAA internally expects GL_RGBA8 textures. - // Process using a GL_RGBA8 copy if this is not the case. - DCHECK(attachment->object_name()); - TextureRef* texture = - texture_manager->GetTexture(attachment->object_name()); - const bool rgba_immutable = - texture->texture()->HasImmutableStorage() && - TextureManager::ExtractFormatFromStorageFormat(internal_format) == - GL_RGBA; - const bool do_copy = !rgba_immutable; - - // CMAA Effect - if (do_copy) { - ApplyCMAAEffectTexture(source_texture, rgba8_texture_, do_copy); - - // Source format for DoCopySubTexture is always GL_RGBA8. - CopyTextureMethod method = CopyTextureMethod::DIRECT_COPY; - bool copy_tex_image_format_valid = - !GLES2Util::IsIntegerFormat(internal_format) && - GLES2Util::GetColorEncodingFromInternalFormat(internal_format) != - GL_SRGB && - internal_format != GL_BGRA_EXT && internal_format != GL_BGRA8_EXT; - if (GLES2Util::IsSizedColorFormat(internal_format)) { - int dr, dg, db, da; - GLES2Util::GetColorFormatComponentSizes(internal_format, 0, &dr, &dg, - &db, &da); - if ((dr > 0 && dr != 8) || (dg > 0 && dg != 8) || - (db > 0 && db != 8) || (da > 0 && da != 8)) { - copy_tex_image_format_valid = false; - } - } - if (!copy_tex_image_format_valid) - method = CopyTextureMethod::DIRECT_DRAW; - bool color_renderable = - Texture::ColorRenderable(decoder->GetFeatureInfo(), internal_format, - texture->texture()->IsImmutable()); -#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) - // glDrawArrays is faster than glCopyTexSubImage2D on IA Mesa driver, - // although opposite in Android. - // TODO(dshwang): After Mesa fixes this issue, remove this hack. - // https://bugs.freedesktop.org/show_bug.cgi?id=98478, crbug.com/535198. - if (color_renderable) - method = CopyTextureMethod::DIRECT_DRAW; -#endif - if (method == CopyTextureMethod::DIRECT_DRAW && !color_renderable) - method = CopyTextureMethod::DRAW_AND_COPY; - - // LUMINANCE, LUMINANCE_ALPHA and ALPHA textures aren't - // renderable, so we don't need to pass in the luma emulation - // blitter to this point. - copier->DoCopySubTexture(decoder, GL_TEXTURE_2D, rgba8_texture_, 0, - GL_RGBA8, GL_TEXTURE_2D, source_texture, 0, - internal_format, 0, 0, 0, 0, width_, height_, - width_, height_, width_, height_, false, false, - false, false, method, nullptr); - } else { - ApplyCMAAEffectTexture(source_texture, source_texture, do_copy); - } - - decoder->RestoreTextureState(source_texture); - } - } - - // Restore state - decoder->RestoreAllAttributes(); - decoder->RestoreTextureUnitBindings(0); - decoder->RestoreTextureUnitBindings(1); - decoder->RestoreActiveTexture(); - decoder->RestoreProgramBindings(); - decoder->RestoreBufferBindings(); - decoder->RestoreFramebufferBindings(); - decoder->RestoreGlobalState(); -} - -void ApplyFramebufferAttachmentCMAAINTELResourceManager::ApplyCMAAEffectTexture( - GLuint source_texture, - GLuint dest_texture, - bool do_copy) { - frame_id_++; - - GLuint edge_texture_a; - GLuint edge_texture_b; - - // Flip flop - One pass clears the texture that needs clearing for the other - // one (actually it's only important that it clears the highest bit) - if ((frame_id_ % 2) == 0) { - edge_texture_a = edges0_texture_; - edge_texture_b = edges1_texture_; - } else { - edge_texture_a = edges1_texture_; - edge_texture_b = edges0_texture_; - } - - // Setup the main fbo - glBindFramebufferEXT(GL_FRAMEBUFFER, cmaa_framebuffer_); - glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, - mini4_edge_texture_, 0); - glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, - mini4_edge_depth_texture_, 0); -#if DCHECK_IS_ON() - GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); - if (status != GL_FRAMEBUFFER_COMPLETE) { - DLOG(ERROR) << "ApplyFramebufferAttachmentCMAAINTEL: " - << "Incomplete framebuffer."; - Destroy(); - return; - } -#endif - - // Setup the viewport to match the fbo - glViewport(0, 0, (width_ + 1) / 2, (height_ + 1) / 2); - glEnable(GL_DEPTH_TEST); - - // Detect edges Pass 0 - // - For every pixel detect edges to the right and down and output depth - // mask where edges detected (1 - far, for detected, 0-near for empty - // pixels) - - // Inputs - // g_screenTexture source_texture tex0 - // Outputs - // gl_FragDepth mini4_edge_depth_texture_ fbo.depth - // out uvec4 outEdges mini4_edge_texture_ fbo.col - // image2D g_resultRGBATextureSlot1 working_color_texture_ image1 - GLenum edge_format = supports_r8_image_ ? GL_R8 : GL_R32F; - - { - glUseProgram(edges0_shader_); - glUniform2f(0, 1.0f / width_, 1.0f / height_); - glDepthMask(GL_TRUE); - glDepthFunc(GL_ALWAYS); - glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); - - if (!is_gles31_compatible_) { - glUniform1i(edges0_shader_result_rgba_texture_slot1_, 1); - glUniform1i(edges0_shader_target_texture_slot2_, 2); - } - glBindImageTextureEXT(1, working_color_texture_, 0, GL_FALSE, 0, - GL_WRITE_ONLY, GL_RGBA8); - if (do_copy) { - glUniform1i(2, GL_TRUE); - glBindImageTextureEXT(2, dest_texture, 0, GL_FALSE, 0, GL_WRITE_ONLY, - GL_RGBA8); - } else { - glUniform1i(2, GL_FALSE); - } - - glActiveTexture(GL_TEXTURE0); - glBindTexture(GL_TEXTURE_2D, source_texture); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - - glDrawArrays(GL_TRIANGLES, 0, 3); - } - - // Detect edges Pass 1 (finish the previous pass edge processing). - // Do the culling of non-dominant local edges (leave mainly locally dominant - // edges) and merge Right and Bottom edges into TopRightBottomLeft - - // Inputs - // g_src0Texture4Uint mini4_edge_texture_ tex1 - // Outputs - // image2D g_resultEdgeTexture edge_texture_b image0 - { - glUseProgram(edges1_shader_); - glUniform2f(0, 1.0f / width_, 1.0f / height_); - glDepthMask(GL_FALSE); - glDepthFunc(GL_LESS); - glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); - - if (!is_gles31_compatible_) { - glUniform1i(edges1_shader_result_edge_texture_, 0); - } - glBindImageTextureEXT(0, edge_texture_b, 0, GL_FALSE, 0, GL_WRITE_ONLY, - edge_format); - - glActiveTexture(GL_TEXTURE1); - glBindTexture(GL_TEXTURE_2D, mini4_edge_texture_); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); - - glDrawArrays(GL_TRIANGLES, 0, 3); - } - - // - Combine RightBottom (.xy) edges from previous pass into - // RightBottomLeftTop (.xyzw) edges and output it into the mask. - // - On all pixels with any edge, input buffer into a temporary color buffer - // needed for correct blending in the next pass (other pixels not needed - // so not copied to avoid bandwidth use). - // - On all pixels with 2 or more edges output positive depth mask for the - // next pass. - - // Inputs - // g_src0TextureFlt edge_texture_b tex1 //ps - // Outputs - // image2D g_resultEdgeTexture edge_texture_a image2 - // gl_FragDepth mini4_edge_texture_ fbo.depth - { - // Combine edges: each pixel will now contain info on all (top, right, - // bottom, left) edges; also mark depth 1 value on all pixels with any edge - // and also copy source color data but only on edge pixels - glUseProgram(edges_combine_shader_); - glUniform2f(0, 1.0f / width_, 1.0f / height_); - glDepthMask(GL_TRUE); - glDepthFunc(GL_LESS); - glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); - - if (!is_gles31_compatible_) { - glUniform1i(edges_combine_shader_result_edge_texture_, 0); - } - glBindImageTextureEXT(0, edge_texture_a, 0, GL_FALSE, 0, GL_WRITE_ONLY, - edge_format); - - glActiveTexture(GL_TEXTURE1); - glBindTexture(GL_TEXTURE_2D, edge_texture_b); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); - - glDrawArrays(GL_TRIANGLES, 0, 3); - } - - // Using depth mask and [earlydepthstencil] to work on pixels with 2, 3, 4 - // edges: - // - First blend simple blur map for 2,3,4 edge pixels - // - Then do the lines (line length counter -should- guarantee no overlap - // with other pixels - pixels with 1 edge are excluded in the previous - // pass and the pixels with 2 parallel edges are excluded in the simple - // blur) - - // Inputs - // g_screenTexture working_color_texture_ tex0 - // g_src0TextureFlt edge_texture_a tex1 //ps - // sampled - // Outputs - // g_resultRGBATextureSlot1 dest_texture image1 - // gl_FragDepth mini4_edge_texture_ fbo.depth - { - glUseProgram(process_and_apply_shader_); - glUniform2f(0, 1.0f / width_, 1.0f / height_); - glDepthMask(GL_FALSE); - glDepthFunc(GL_LESS); - glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); - - if (!is_gles31_compatible_) { - glUniform1i(process_and_apply_shader_result_rgba_texture_slot1_, 1); - } - glBindImageTextureEXT(1, dest_texture, 0, GL_FALSE, 0, GL_WRITE_ONLY, - GL_RGBA8); - - glActiveTexture(GL_TEXTURE0); - glBindTexture(GL_TEXTURE_2D, working_color_texture_); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); - - glActiveTexture(GL_TEXTURE1); - glBindTexture(GL_TEXTURE_2D, edge_texture_a); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); - - glDrawArrays(GL_TRIANGLES, 0, 3); - } - - glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); - glDisable(GL_DEPTH_TEST); - glDepthMask(GL_FALSE); - glActiveTexture(GL_TEXTURE0); -} - -void ApplyFramebufferAttachmentCMAAINTELResourceManager::OnSize(GLint width, - GLint height) { - if (height_ == height && width_ == width) - return; - - ReleaseTextures(); - - height_ = height; - width_ = width; - - glGenTextures(1, &rgba8_texture_); - glBindTexture(GL_TEXTURE_2D, rgba8_texture_); - glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8, width, height); - - // Edges texture - R8 - // OpenGLES has no single component 8/16-bit image support, so needs to be R32 - // Although CHT does support R8. - GLenum edge_format = supports_r8_image_ ? GL_R8 : GL_R32F; - glGenTextures(1, &edges0_texture_); - glBindTexture(GL_TEXTURE_2D, edges0_texture_); - glTexStorage2DEXT(GL_TEXTURE_2D, 1, edge_format, width, height); - - glGenTextures(1, &edges1_texture_); - glBindTexture(GL_TEXTURE_2D, edges1_texture_); - glTexStorage2DEXT(GL_TEXTURE_2D, 1, edge_format, width, height); - - // Color working texture - RGBA8 - glGenTextures(1, &working_color_texture_); - glBindTexture(GL_TEXTURE_2D, working_color_texture_); - glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8, width, height); - - // Half*half compressed 4-edge-per-pixel texture - RGBA8 - glGenTextures(1, &mini4_edge_texture_); - glBindTexture(GL_TEXTURE_2D, mini4_edge_texture_); - GLenum format = GL_RGBA8UI; - if (!supports_usampler_) { - format = GL_RGBA8; - } - glTexStorage2DEXT(GL_TEXTURE_2D, 1, format, (width + 1) / 2, - (height + 1) / 2); - - // Depth - glGenTextures(1, &mini4_edge_depth_texture_); - glBindTexture(GL_TEXTURE_2D, mini4_edge_depth_texture_); - glTexStorage2DEXT(GL_TEXTURE_2D, 1, GL_DEPTH_COMPONENT16, (width + 1) / 2, - (height + 1) / 2); - - // Create the FBO - glGenFramebuffersEXT(1, &cmaa_framebuffer_); - glBindFramebufferEXT(GL_FRAMEBUFFER, cmaa_framebuffer_); - - // We need to clear the textures before they are first used. - // The algorithm self-clears them later. - glViewport(0, 0, width_, height_); - glClearColor(0.0f, 0.0f, 0.0f, 0.0f); - - glBindFramebufferEXT(GL_FRAMEBUFFER, cmaa_framebuffer_); - glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, - edges0_texture_, 0); - glClear(GL_COLOR_BUFFER_BIT); - - glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, - edges1_texture_, 0); - glClear(GL_COLOR_BUFFER_BIT); - - textures_initialized_ = true; -} - -void ApplyFramebufferAttachmentCMAAINTELResourceManager::ReleaseTextures() { - if (textures_initialized_) { - glDeleteFramebuffersEXT(1, &cmaa_framebuffer_); - glDeleteTextures(1, &rgba8_texture_); - glDeleteTextures(1, &edges0_texture_); - glDeleteTextures(1, &edges1_texture_); - glDeleteTextures(1, &mini4_edge_texture_); - glDeleteTextures(1, &mini4_edge_depth_texture_); - glDeleteTextures(1, &working_color_texture_); - } - textures_initialized_ = false; -} - -GLuint ApplyFramebufferAttachmentCMAAINTELResourceManager::CreateProgram( - const char* defines, - const char* vs_source, - const char* fs_source) { - GLuint program = glCreateProgram(); - - GLuint vs = CreateShader(GL_VERTEX_SHADER, defines, vs_source); - GLuint fs = CreateShader(GL_FRAGMENT_SHADER, defines, fs_source); - - glAttachShader(program, vs); - glDeleteShader(vs); - glAttachShader(program, fs); - glDeleteShader(fs); - - glLinkProgram(program); - GLint link_status; - glGetProgramiv(program, GL_LINK_STATUS, &link_status); - - if (link_status == 0) { -#if DCHECK_IS_ON() - GLint info_log_length; - glGetProgramiv(program, GL_INFO_LOG_LENGTH, &info_log_length); - std::vector<GLchar> info_log(info_log_length); - glGetProgramInfoLog(program, static_cast<GLsizei>(info_log.size()), nullptr, - &info_log[0]); - DLOG(ERROR) << "ApplyFramebufferAttachmentCMAAINTEL: " - << "program link failed: " << &info_log[0]; -#endif - glDeleteProgram(program); - program = 0; - } - - return program; -} - -GLuint ApplyFramebufferAttachmentCMAAINTELResourceManager::CreateShader( - GLenum type, - const char* defines, - const char* source) { - GLuint shader = glCreateShader(type); - - const char header_es31[] = - "#version 310 es \n"; - const char header_gl130[] = - "#version 130 \n" - "#extension GL_ARB_shading_language_420pack : require \n" - "#extension GL_ARB_texture_gather : require \n" - "#extension GL_ARB_explicit_uniform_location : require \n" - "#extension GL_ARB_explicit_attrib_location : require \n" - "#extension GL_ARB_shader_image_load_store : require \n"; - - std::ostringstream header; - if (is_gles31_compatible_) { - header << header_es31; - if (supports_r8_image_) - header << "#extension GL_NV_image_formats : require\n"; - } else { - header << header_gl130; - } - - std::string header_str = header.str(); - const char* source_array[4] = {header_str.c_str(), defines, "\n", source}; - glShaderSource(shader, 4, source_array, nullptr); - - glCompileShader(shader); - - GLint compile_result; - glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_result); - if (compile_result == 0) { -#if DCHECK_IS_ON() - GLint info_log_length; - glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &info_log_length); - std::vector<GLchar> info_log(info_log_length); - glGetShaderInfoLog(shader, static_cast<GLsizei>(info_log.size()), nullptr, - &info_log[0]); - DLOG(ERROR) << "ApplyFramebufferAttachmentCMAAINTEL: " - << "shader compilation failed: " - << (type == GL_VERTEX_SHADER - ? "GL_VERTEX_SHADER" - : (type == GL_FRAGMENT_SHADER ? "GL_FRAGMENT_SHADER" - : "UNKNOWN_SHADER")) - << " shader compilation failed: " << &info_log[0]; -#endif - glDeleteShader(shader); - shader = 0; - } - - return shader; -} - -/* clang-format off */ -// Shaders used in the CMAA algorithm. -const char ApplyFramebufferAttachmentCMAAINTELResourceManager::vert_str_[] = - SHADER( - precision highp float; - // No input data. - // Verts are autogenerated. - // - // vertexID 0,1,2 should generate - // POS: (-1,-1), (+3,-1), (-1,+3) - // - // This generates a triangle that completely covers the -1->1 viewport - // - void main() - { - float x = -1.0 + float((gl_VertexID & 1) << 2); - float y = -1.0 + float((gl_VertexID & 2) << 1); - gl_Position = vec4(x, y, 0.0, 1.0); - } - ); - -const char ApplyFramebufferAttachmentCMAAINTELResourceManager::cmaa_frag_s1_[] = - SHADER( - precision highp float; - precision highp int; - - \n#define SETTINGS_ALLOW_SHORT_Zs 1\n - \n#define EDGE_DETECT_THRESHOLD 13.0f\n - \n#define saturate(x) clamp((x), 0.0, 1.0)\n - - // bind to a uniform buffer bind point 0 - layout(location = 0) uniform vec2 g_OneOverScreenSize; - \n#ifndef EDGE_DETECT_THRESHOLD\n - layout(location = 1) uniform float g_ColorThreshold; - \n#endif\n - \n#ifdef DETECT_EDGES1\n - layout(location = 2) uniform int g_DoCopy; - \n#endif\n - - \n#ifdef SUPPORTS_USAMPLER2D\n - \n#define USAMPLER usampler2D\n - \n#define UVEC4 uvec4\n - \n#define LOAD_UINT(arg) arg\n - \n#define STORE_UVEC4(arg) arg\n - \n#else\n - \n#define USAMPLER sampler2D\n - \n#define UVEC4 vec4\n - \n#define LOAD_UINT(arg) uint(arg * 255.0f)\n - \n#define STORE_UVEC4(arg) vec4(float(arg.x) / 255.0f, - float(arg.y) / 255.0f, - float(arg.z) / 255.0f, - float(arg.w) / 255.0f)\n - \n#endif\n - - // bind to texture stage 0/1 - layout(binding = 0) uniform highp sampler2D g_screenTexture; - layout(binding = 1) uniform highp sampler2D g_src0TextureFlt; - layout(binding = 1) uniform highp USAMPLER g_src0Texture4Uint; - - // bind to image stage 0/1/2 - \n#ifdef GL_ES\n - layout(binding = 0, EDGE_READ_FORMAT) restrict writeonly uniform highp - image2D g_resultEdgeTexture; - layout(binding = 1, rgba8) restrict writeonly uniform highp - image2D g_resultRGBATextureSlot1; - layout(binding = 2, rgba8) restrict writeonly uniform highp - image2D g_targetTextureSlot2; - \n#else\n - layout(EDGE_READ_FORMAT) restrict writeonly uniform highp - image2D g_resultEdgeTexture; - layout(rgba8) restrict writeonly uniform highp - image2D g_resultRGBATextureSlot1; - layout(rgba8) restrict writeonly uniform highp - image2D g_targetTextureSlot2; - \n#endif\n - - // Constants - const vec4 c_lumWeights = vec4(0.2126f, 0.7152f, 0.0722f, 0.0000f); - - \n#ifdef EDGE_DETECT_THRESHOLD\n - const float c_ColorThreshold = 1.0f / EDGE_DETECT_THRESHOLD; - \n#endif\n - - // Must be even number; Will work with ~16 pretty good too for - // additional performance, or with ~64 for highest quality. - const int c_maxLineLength = 64; - - const vec4 c_edgeDebugColours[5] = vec4[5](vec4(0.5, 0.5, 0.5, 0.4), - vec4(1.0, 0.1, 1.0, 0.8), - vec4(0.9, 0.0, 0.0, 0.8), - vec4(0.0, 0.9, 0.0, 0.8), - vec4(0.0, 0.0, 0.9, 0.8)); - - // this isn't needed if colour UAV is _SRGB but that doesn't work - // everywhere - \n#ifdef IN_GAMMA_CORRECT_MODE\n - /////////////////////////////////////////////////////////////////////// - // - // SRGB Helper Functions taken from D3DX_DXGIFormatConvert.inl - float D3DX_FLOAT_to_SRGB(float val) { - if (val < 0.0031308f) - val *= 12.92f; - else { - val = 1.055f * pow(val, 1.0f / 2.4f) - 0.055f; - } - return val; - } - // - vec3 D3DX_FLOAT3_to_SRGB(vec3 val) { - vec3 outVal; - outVal.x = D3DX_FLOAT_to_SRGB(val.x); - outVal.y = D3DX_FLOAT_to_SRGB(val.y); - outVal.z = D3DX_FLOAT_to_SRGB(val.z); - return outVal; - } - /////////////////////////////////////////////////////////////////////// - \n#endif\n // IN_GAMMA_CORRECT_MODE - - // how .rgba channels from the edge texture maps to pixel edges: - // - // A - 0x02 - // |¯¯¯¯¯¯¯¯¯| - // | | - // 0x04 - B | pixel | R - 0x01 - // | | - // |_________| - // G - 0x08 - // - // (A - there's an edge between us and a pixel at the bottom) - // (R - there's an edge between us and a pixel to the right) - // (G - there's an edge between us and a pixel above us) - // (B - there's an edge between us and a pixel to the left) - - // Expecting values of 1 and 0 only! - uint PackEdge(uvec4 edges) { - return (edges.x << 0u) | (edges.y << 1u) | (edges.z << 2u) | - (edges.w << 3u); - } - - uvec4 UnpackEdge(uint value) { - uvec4 ret; - ret.x = (value & 0x01u) != 0u ? 1u : 0u; - ret.y = (value & 0x02u) != 0u ? 1u : 0u; - ret.z = (value & 0x04u) != 0u ? 1u : 0u; - ret.w = (value & 0x08u) != 0u ? 1u : 0u; - return ret; - } - - vec4 PackBlurAAInfo(ivec2 pixelPos, uint shapeType) { - uint packedEdges = uint( - texelFetch(g_src0TextureFlt, pixelPos, 0).r * 255.5); - - float retval = float(packedEdges + (shapeType << 4u)); - - return vec4(retval / 255.0); - } - - void UnpackBlurAAInfo(float packedValue, out uint edges, - out uint shapeType) { - uint packedValueInt = uint(packedValue * 255.5); - edges = packedValueInt & 0xFu; - shapeType = packedValueInt >> 4u; - } - - float EdgeDetectColorCalcDiff(vec3 colorA, vec3 colorB) { - \n#ifdef IN_BGR_MODE\n - vec3 LumWeights = c_lumWeights.bgr; - \n#else\n - vec3 LumWeights = c_lumWeights.rgb; - \n#endif\n - - return dot(abs(colorA.rgb - colorB.rgb), LumWeights); - } - - bool EdgeDetectColor(vec3 colorA, vec3 colorB) { - \n#ifdef EDGE_DETECT_THRESHOLD\n - return EdgeDetectColorCalcDiff(colorA, colorB) > c_ColorThreshold; - \n#else\n - return EdgeDetectColorCalcDiff(colorA, colorB) > g_ColorThreshold; - \n#endif\n - } - - void FindLineLength(out int lineLengthLeft, - out int lineLengthRight, - ivec2 screenPos, - const bool horizontal, - const bool invertedZShape, - const ivec2 stepRight) { - // TODO: there must be a cleaner and faster way to get to these - - // a precalculated array indexing maybe? - uint maskLeft = uint(0); - uint bitsContinueLeft = uint(0); - uint maskRight = uint(0); - uint bitsContinueRight = uint(0); - { - // Horizontal (vertical is the same, just rotated 90º counter-clockwise) - // Inverted Z case: // Normal Z case: - // __ // __ - // X| // X| - // -- // -- - // - // Vertical - // Inverted Z case: // Normal Z case: - // | // | - // -- // -- - // X| // |X - uint maskTraceLeft = uint(0); - uint maskTraceRight = uint(0); - uint maskStopLeft = uint(0); - uint maskStopRight = uint(0); - if (horizontal) { - if (invertedZShape) { - maskTraceLeft = 0x08u; // tracing bottom edge - maskTraceRight = 0x02u; // tracing top edge - } else { - maskTraceLeft = 0x02u; // tracing top edge - maskTraceRight = 0x08u; // tracing bottom edge - } - maskStopLeft = 0x01u; // stop on right edge - maskStopRight = 0x04u; // stop on left edge - } else { - if (invertedZShape) { - maskTraceLeft = 0x01u; // tracing right edge - maskTraceRight = 0x04u; // tracing left edge - } else { - maskTraceLeft = 0x04u; // tracing left edge - maskTraceRight = 0x01u; // tracing right edge - } - maskStopLeft = 0x02u; // stop on top edge - maskStopRight = 0x08u; // stop on bottom edge - } - - maskLeft = maskTraceLeft | maskStopLeft; - bitsContinueLeft = maskTraceLeft; - maskRight = maskTraceRight | maskStopRight; - bitsContinueRight = maskTraceRight; - } - /////////////////////////////////////////////////////////////////////// - - \n#ifdef SETTINGS_ALLOW_SHORT_Zs\n - int i = 1; - \n#else\n - int i = 2; // starting from 2 because we already know it's at least 2 - \n#endif\n - for (; i < c_maxLineLength; i++) { - uint edgeLeft = uint( - texelFetch(g_src0TextureFlt, - ivec2(screenPos.xy - stepRight * i), 0).r * 255.5); - uint edgeRight = uint( - texelFetch(g_src0TextureFlt, - ivec2(screenPos.xy + stepRight * (i + 1)), - 0).r * 255.5); - - // stop on encountering 'stopping' edge (as defined by masks) - int stopLeft = (edgeLeft & maskLeft) != bitsContinueLeft ? 1 : 0; - int stopRight = - (edgeRight & maskRight) != bitsContinueRight ? 1 : 0; - - if (bool(stopLeft) || bool(stopRight)) { - lineLengthLeft = 1 + i - stopLeft; - lineLengthRight = 1 + i - stopRight; - return; - } - } - lineLengthLeft = lineLengthRight = i; - return; - } - - void ProcessDetectedZ(ivec2 screenPos, bool horizontal, - bool invertedZShape) { - int lineLengthLeft = 0; - int lineLengthRight = 0; - - ivec2 stepRight = (horizontal) ? (ivec2(1, 0)) : (ivec2(0, 1)); - vec2 blendDir = (horizontal) ? (vec2(0, -1)) : (vec2(1, 0)); - - FindLineLength(lineLengthLeft, lineLengthRight, screenPos, - horizontal, invertedZShape, stepRight); - - vec2 pixelSize = g_OneOverScreenSize; - - float leftOdd = 0.15 * float(lineLengthLeft % 2); - float rightOdd = 0.15 * float(lineLengthRight % 2); - - int loopFrom = -int((lineLengthLeft + 1) / 2) + 1; - int loopTo = int((lineLengthRight + 1) / 2); - - float totalLength = float(loopTo - loopFrom) + 1.0 - leftOdd - - rightOdd; - - for (int i = loopFrom; i <= loopTo; i++) { - highp ivec2 pixelPos = screenPos + stepRight * i; - vec2 pixelPosFlt = vec2(float(pixelPos.x) + 0.5, - float(pixelPos.y) + 0.5); - - \n#ifdef DEBUG_OUTPUT_AAINFO\n - imageStore(g_resultEdgeTexture, pixelPos, - PackBlurAAInfo(pixelPos, 1u)); - \n#endif\n - - float m = (float(i) + 0.5 - leftOdd - float(loopFrom)) / - totalLength; - m = saturate(m); - float k = m - ((i > 0) ? 1.0 : 0.0); - k = (invertedZShape) ? (k) : (-k); - - vec4 color = textureLod(g_screenTexture, - (pixelPosFlt + blendDir * k) * pixelSize, - 0.0); - - \n#ifdef IN_GAMMA_CORRECT_MODE\n - color.rgb = D3DX_FLOAT3_to_SRGB(color.rgb); - \n#endif\n - imageStore(g_resultRGBATextureSlot1, pixelPos, color); - } - } - - vec4 CalcDbgDisplayColor(const vec4 blurMap) { - vec3 pixelC = vec3(0.0, 0.0, 0.0); - vec3 pixelL = vec3(0.0, 0.0, 1.0); - vec3 pixelT = vec3(1.0, 0.0, 0.0); - vec3 pixelR = vec3(0.0, 1.0, 0.0); - vec3 pixelB = vec3(0.8, 0.8, 0.0); - - const float centerWeight = 1.0; - float fromBelowWeight = (1.0 / (1.0 - blurMap.x)) - 1.0; - float fromAboveWeight = (1.0 / (1.0 - blurMap.y)) - 1.0; - float fromRightWeight = (1.0 / (1.0 - blurMap.z)) - 1.0; - float fromLeftWeight = (1.0 / (1.0 - blurMap.w)) - 1.0; - - float weightSum = centerWeight + dot(vec4(fromBelowWeight, - fromAboveWeight, - fromRightWeight, - fromLeftWeight), - vec4(1, 1, 1, 1)); - - vec4 pixel; - - pixel.rgb = pixelC.rgb + fromAboveWeight * pixelT + - fromBelowWeight * pixelB + - fromLeftWeight * pixelL + - fromRightWeight * pixelR; - pixel.rgb /= weightSum; - - pixel.a = dot(pixel.rgb, vec3(1, 1, 1)) * 100.0; - - return saturate(pixel); - } - - \n#ifdef DETECT_EDGES1\n - layout(location = 0) out UVEC4 outEdges; - void DetectEdges1() { - uvec4 outputEdges; - ivec2 screenPosI = ivec2(gl_FragCoord.xy) * ivec2(2, 2); - - // .rgb contains colour, .a contains flag whether to output it to - // working colour texture - vec4 pixel00 = texelFetch(g_screenTexture, screenPosI.xy, 0); - vec4 pixel10 = - texelFetchOffset(g_screenTexture, screenPosI.xy, 0, ivec2(1, 0)); - vec4 pixel20 = - texelFetchOffset(g_screenTexture, screenPosI.xy, 0, ivec2(2, 0)); - vec4 pixel01 = - texelFetchOffset(g_screenTexture, screenPosI.xy, 0, ivec2(0, 1)); - vec4 pixel11 = - texelFetchOffset(g_screenTexture, screenPosI.xy, 0, ivec2(1, 1)); - vec4 pixel21 = - texelFetchOffset(g_screenTexture, screenPosI.xy, 0, ivec2(2, 1)); - vec4 pixel02 = - texelFetchOffset(g_screenTexture, screenPosI.xy, 0, ivec2(0, 2)); - vec4 pixel12 = - texelFetchOffset(g_screenTexture, screenPosI.xy, 0, ivec2(1, 2)); - - if (g_DoCopy == 1) { - imageStore(g_targetTextureSlot2, screenPosI.xy + ivec2(0, 0), pixel00); - imageStore(g_targetTextureSlot2, screenPosI.xy + ivec2(1, 0), pixel10); - imageStore(g_targetTextureSlot2, screenPosI.xy + ivec2(0, 1), pixel01); - imageStore(g_targetTextureSlot2, screenPosI.xy + ivec2(1, 1), pixel11); - } - - float storeFlagPixel00 = 0.0; - float storeFlagPixel10 = 0.0; - float storeFlagPixel20 = 0.0; - float storeFlagPixel01 = 0.0; - float storeFlagPixel11 = 0.0; - float storeFlagPixel21 = 0.0; - float storeFlagPixel02 = 0.0; - float storeFlagPixel12 = 0.0; - - vec2 et; - - \n#ifdef EDGE_DETECT_THRESHOLD\n - float threshold = c_ColorThreshold; - \n#else\n - float threshold = g_ColorThreshold; - \n#endif\n - - { - et.x = EdgeDetectColorCalcDiff(pixel00.rgb, pixel10.rgb); - et.y = EdgeDetectColorCalcDiff(pixel00.rgb, pixel01.rgb); - et = saturate(et - threshold); - ivec2 eti = ivec2(et * 15.0 + 0.99); - outputEdges.x = uint(eti.x | (eti.y << 4)); - - storeFlagPixel00 += et.x; - storeFlagPixel00 += et.y; - storeFlagPixel10 += et.x; - storeFlagPixel01 += et.y; - } - - { - et.x = EdgeDetectColorCalcDiff(pixel10.rgb, pixel20.rgb); - et.y = EdgeDetectColorCalcDiff(pixel10.rgb, pixel11.rgb); - et = saturate(et - threshold); - ivec2 eti = ivec2(et * 15.0 + 0.99); - outputEdges.y = uint(eti.x | (eti.y << 4)); - - storeFlagPixel10 += et.x; - storeFlagPixel10 += et.y; - storeFlagPixel20 += et.x; - storeFlagPixel11 += et.y; - } - - { - et.x = EdgeDetectColorCalcDiff(pixel01.rgb, pixel11.rgb); - et.y = EdgeDetectColorCalcDiff(pixel01.rgb, pixel02.rgb); - et = saturate(et - threshold); - ivec2 eti = ivec2(et * 15.0 + 0.99); - outputEdges.z = uint(eti.x | (eti.y << 4)); - - storeFlagPixel01 += et.x; - storeFlagPixel01 += et.y; - storeFlagPixel11 += et.x; - storeFlagPixel02 += et.y; - } - - { - et.x = EdgeDetectColorCalcDiff(pixel11.rgb, pixel21.rgb); - et.y = EdgeDetectColorCalcDiff(pixel11.rgb, pixel12.rgb); - et = saturate(et - threshold); - ivec2 eti = ivec2(et * 15.0 + 0.99); - outputEdges.w = uint(eti.x | (eti.y << 4)); - - storeFlagPixel11 += et.x; - storeFlagPixel11 += et.y; - storeFlagPixel21 += et.x; - storeFlagPixel12 += et.y; - } - - gl_FragDepth = any(bvec4(outputEdges)) ? 1.0 : 0.0; - - if (gl_FragDepth != 0.0) { - if (storeFlagPixel00 != 0.0) - imageStore(g_resultRGBATextureSlot1, screenPosI.xy + ivec2(0, 0), - pixel00); - if (storeFlagPixel10 != 0.0) - imageStore(g_resultRGBATextureSlot1, screenPosI.xy + ivec2(1, 0), - pixel10); - if (storeFlagPixel20 != 0.0) - imageStore(g_resultRGBATextureSlot1, screenPosI.xy + ivec2(2, 0), - pixel20); - if (storeFlagPixel01 != 0.0) - imageStore(g_resultRGBATextureSlot1, screenPosI.xy + ivec2(0, 1), - pixel01); - if (storeFlagPixel02 != 0.0) - imageStore(g_resultRGBATextureSlot1, screenPosI.xy + ivec2(0, 2), - pixel02); - if (storeFlagPixel11 != 0.0) - imageStore(g_resultRGBATextureSlot1, screenPosI.xy + ivec2(1, 1), - pixel11); - if (storeFlagPixel21 != 0.0) - imageStore(g_resultRGBATextureSlot1, screenPosI.xy + ivec2(2, 1), - pixel21); - if (storeFlagPixel12 != 0.0) - imageStore(g_resultRGBATextureSlot1, screenPosI.xy + ivec2(1, 2), - pixel12); - } - outEdges = STORE_UVEC4(outputEdges); - } - \n#endif\n // DETECT_EDGES1 - - vec2 UnpackThresholds(uint val) { - return vec2(val & 0x0Fu, val >> 4u) / 15.0f; - } - - uvec4 PruneNonDominantEdges(vec4 edges[3]) { - vec4 maxE4 = vec4(0.0, 0.0, 0.0, 0.0); - - float avg = 0.0; - - for (int i = 0; i < 3; i++) { - maxE4 = max(maxE4, edges[i]); - - avg = dot(edges[i], vec4(1, 1, 1, 1) / (3.0 * 4.0)); - } - - vec2 maxE2 = max(maxE4.xy, maxE4.zw); - float maxE = max(maxE2.x, maxE2.y); - - float threshold = avg * 0.65 + maxE * 0.35; - - // threshold = 0.0001; // this disables non-dominant edge pruning! - - uint cx = edges[0].x >= threshold ? 1u : 0u; - uint cy = edges[0].y >= threshold ? 1u : 0u; - return uvec4(cx, cy, 0, 0); - } - - void CollectEdges(int offX, - int offY, - out vec4 edges[3], - const uint packedVals[6 * 6]) { - vec2 pixelP0P0 = UnpackThresholds(packedVals[(offX)*6+(offY)]); - vec2 pixelP1P0 = UnpackThresholds(packedVals[(offX+1)*6+(offY)]); - vec2 pixelP0P1 = UnpackThresholds(packedVals[(offX)*6+(offY+1)]); - vec2 pixelM1P0 = UnpackThresholds(packedVals[(offX-1)*6 +(offY)]); - vec2 pixelP0M1 = UnpackThresholds(packedVals[(offX)*6+(offY-1)]); - vec2 pixelP1M1 = UnpackThresholds(packedVals[(offX+1)*6 +(offY-1)]); - vec2 pixelM1P1 = UnpackThresholds(packedVals[(offX-1)*6+(offY+1)]); - - edges[0].x = pixelP0P0.x; - edges[0].y = pixelP0P0.y; - edges[0].z = pixelP1P0.x; - edges[0].w = pixelP1P0.y; - edges[1].x = pixelP0P1.x; - edges[1].y = pixelP0P1.y; - edges[1].z = pixelM1P0.x; - edges[1].w = pixelM1P0.y; - edges[2].x = pixelP0M1.x; - edges[2].y = pixelP0M1.y; - edges[2].z = pixelP1M1.y; - edges[2].w = pixelM1P1.x; - } - ); - -const char ApplyFramebufferAttachmentCMAAINTELResourceManager::cmaa_frag_s2_[] = - SHADER( - \n#ifdef DETECT_EDGES2\n - layout(early_fragment_tests) in; - void DetectEdges2() { - ivec2 screenPosI = ivec2(gl_FragCoord.xy); - uvec2 notTopRight = - uvec2(notEqual((screenPosI + 1), textureSize(g_src0Texture4Uint, 0))); - - // source : edge differences from previous pass - uint packedVals[6 * 6]; - - // center pixel (our output) - UVEC4 packedQ4 = texelFetch(g_src0Texture4Uint, screenPosI.xy, 0); - packedVals[(2) * 6 + (2)] = LOAD_UINT(packedQ4.x); - packedVals[(3) * 6 + (2)] = LOAD_UINT(packedQ4.y); - packedVals[(2) * 6 + (3)] = LOAD_UINT(packedQ4.z); - packedVals[(3) * 6 + (3)] = LOAD_UINT(packedQ4.w); - - vec4 edges[3]; - if (bool(packedVals[(2) * 6 + (2)]) || - bool(packedVals[(3) * 6 + (2)])) { - UVEC4 packedQ1 = texelFetchOffset(g_src0Texture4Uint, - screenPosI.xy, 0, ivec2(0, -1)); - packedVals[(2) * 6 + (0)] = LOAD_UINT(packedQ1.x); - packedVals[(3) * 6 + (0)] = LOAD_UINT(packedQ1.y); - packedVals[(2) * 6 + (1)] = LOAD_UINT(packedQ1.z); - packedVals[(3) * 6 + (1)] = LOAD_UINT(packedQ1.w); - } - - if (bool(packedVals[(2) * 6 + (2)]) || - bool(packedVals[(2) * 6 + (3)])) { - UVEC4 packedQ3 = texelFetchOffset(g_src0Texture4Uint, - screenPosI.xy, 0, ivec2(-1, 0)); - packedVals[(0) * 6 + (2)] = LOAD_UINT(packedQ3.x); - packedVals[(1) * 6 + (2)] = LOAD_UINT(packedQ3.y); - packedVals[(0) * 6 + (3)] = LOAD_UINT(packedQ3.z); - packedVals[(1) * 6 + (3)] = LOAD_UINT(packedQ3.w); - } - - if (bool(packedVals[(2) * 6 + (2)])) { - CollectEdges(2, 2, edges, packedVals); - uint pe = PackEdge(PruneNonDominantEdges(edges)); - if (pe != 0u) { - imageStore(g_resultEdgeTexture, 2 * screenPosI.xy + ivec2(0, 0), - vec4(float(0x80u | pe) / 255.0, 0, 0, 0)); - } - } - - if (bool(packedVals[(3) * 6 + (2)]) || - bool(packedVals[(3) * 6 + (3)])) { - UVEC4 packedQ5 = texelFetchOffset(g_src0Texture4Uint, - screenPosI.xy, 0, ivec2(1, 0)); - packedVals[(4) * 6 + (2)] = LOAD_UINT(packedQ5.x); - packedVals[(5) * 6 + (2)] = LOAD_UINT(packedQ5.y); - packedVals[(4) * 6 + (3)] = LOAD_UINT(packedQ5.z); - packedVals[(5) * 6 + (3)] = LOAD_UINT(packedQ5.w); - } - - if (bool(packedVals[(3) * 6 + (2)])) { - UVEC4 packedQ2 = texelFetchOffset(g_src0Texture4Uint, - screenPosI.xy, 0, ivec2(1, -1)); - packedVals[(4) * 6 + (0)] = LOAD_UINT(packedQ2.x); - packedVals[(5) * 6 + (0)] = LOAD_UINT(packedQ2.y); - packedVals[(4) * 6 + (1)] = LOAD_UINT(packedQ2.z); - packedVals[(5) * 6 + (1)] = LOAD_UINT(packedQ2.w); - - CollectEdges(3, 2, edges, packedVals); - uvec4 dominant_edges = PruneNonDominantEdges(edges); - // The rightmost edge of the texture is not edge. - // Note: texelFetch() on out of range gives an undefined value. - uint pe = PackEdge(dominant_edges * uvec4(notTopRight.x, 1, 1, 1)); - if (pe != 0u) { - imageStore(g_resultEdgeTexture, 2 * screenPosI.xy + ivec2(1, 0), - vec4(float(0x80u | pe) / 255.0, 0, 0, 0)); - } - } - - if (bool(packedVals[(2) * 6 + (3)]) || - bool(packedVals[(3) * 6 + (3)])) { - UVEC4 packedQ7 = texelFetchOffset(g_src0Texture4Uint, - screenPosI.xy, 0, ivec2(0, 1)); - packedVals[(2) * 6 + (4)] = LOAD_UINT(packedQ7.x); - packedVals[(3) * 6 + (4)] = LOAD_UINT(packedQ7.y); - packedVals[(2) * 6 + (5)] = LOAD_UINT(packedQ7.z); - packedVals[(3) * 6 + (5)] = LOAD_UINT(packedQ7.w); - } - - if (bool(packedVals[(2) * 6 + (3)])) { - UVEC4 packedQ6 = texelFetchOffset(g_src0Texture4Uint, - screenPosI.xy, 0, ivec2(-1, -1)); - packedVals[(0) * 6 + (4)] = LOAD_UINT(packedQ6.x); - packedVals[(1) * 6 + (4)] = LOAD_UINT(packedQ6.y); - packedVals[(0) * 6 + (5)] = LOAD_UINT(packedQ6.z); - packedVals[(1) * 6 + (5)] = LOAD_UINT(packedQ6.w); - - CollectEdges(2, 3, edges, packedVals); - uvec4 dominant_edges = PruneNonDominantEdges(edges); - uint pe = PackEdge(dominant_edges * uvec4(1, notTopRight.y, 1, 1)); - if (pe != 0u) { - imageStore(g_resultEdgeTexture, 2 * screenPosI.xy + ivec2(0, 1), - vec4(float(0x80u | pe) / 255.0, 0, 0, 0)); - } - } - - if (bool(packedVals[(3) * 6 + (3)])) { - CollectEdges(3, 3, edges, packedVals); - uvec4 dominant_edges = PruneNonDominantEdges(edges); - uint pe = PackEdge(dominant_edges * uvec4(notTopRight, 1, 1)); - if (pe != 0u) { - imageStore(g_resultEdgeTexture, 2 * screenPosI.xy + ivec2(1, 1), - vec4(float(0x80u | pe) / 255.0, 0, 0, 0)); - } - } - } - \n#endif\n // DETECT_EDGES2 - - \n#ifdef COMBINE_EDGES\n - void CombineEdges() { - ivec3 screenPosIBase = ivec3(ivec2(gl_FragCoord.xy) * 2, 0); - vec3 screenPosBase = vec3(screenPosIBase); - uvec2 notBottomLeft = uvec2(notEqual(screenPosIBase.xy, ivec2(0, 0))); - uint packedEdgesArray[3 * 3]; - - // use only if it has the 'prev frame' flag:[sample * 255.0 - 127.5] - //-> if it has the last bit flag (128), it's going to stay above 0 - uvec4 sampA = uvec4( - textureGatherOffset(g_src0TextureFlt, - screenPosBase.xy * g_OneOverScreenSize, - ivec2(1, 0)) * 255.0 - 127.5); - uvec4 sampB = uvec4( - textureGatherOffset(g_src0TextureFlt, - screenPosBase.xy * g_OneOverScreenSize, - ivec2(0, 1)) * 255.0 - 127.5); - uint sampC = uint( - texelFetchOffset(g_src0TextureFlt, screenPosIBase.xy, 0, - ivec2(1, 1)).r * 255.0 - 127.5); - - packedEdgesArray[(0) * 3 + (0)] = 0u; - // The bottom-most edge of the texture is not edge. - // Note: texelFetch() on out of range gives an undefined value. - packedEdgesArray[(1) * 3 + (0)] = sampA.w * notBottomLeft.y; - packedEdgesArray[(2) * 3 + (0)] = sampA.z * notBottomLeft.y; - packedEdgesArray[(1) * 3 + (1)] = sampA.x; - packedEdgesArray[(2) * 3 + (1)] = sampA.y; - // The left-most edge of the texture is not edge. - packedEdgesArray[(0) * 3 + (1)] = sampB.w * notBottomLeft.x; - packedEdgesArray[(0) * 3 + (2)] = sampB.x * notBottomLeft.x; - packedEdgesArray[(1) * 3 + (2)] = sampB.y; - packedEdgesArray[(2) * 3 + (2)] = sampC; - - uvec4 pixelsC = uvec4(packedEdgesArray[(1 + 0) * 3 + (1 + 0)], - packedEdgesArray[(1 + 1) * 3 + (1 + 0)], - packedEdgesArray[(1 + 0) * 3 + (1 + 1)], - packedEdgesArray[(1 + 1) * 3 + (1 + 1)]); - uvec4 pixelsL = uvec4(packedEdgesArray[(0 + 0) * 3 + (1 + 0)], - packedEdgesArray[(0 + 1) * 3 + (1 + 0)], - packedEdgesArray[(0 + 0) * 3 + (1 + 1)], - packedEdgesArray[(0 + 1) * 3 + (1 + 1)]); - uvec4 pixelsU = uvec4(packedEdgesArray[(1 + 0) * 3 + (0 + 0)], - packedEdgesArray[(1 + 1) * 3 + (0 + 0)], - packedEdgesArray[(1 + 0) * 3 + (0 + 1)], - packedEdgesArray[(1 + 1) * 3 + (0 + 1)]); - - uvec4 outEdge4 = - pixelsC | ((pixelsL & 0x01u) << 2u) | ((pixelsU & 0x02u) << 2u); - vec4 outEdge4Flt = vec4(outEdge4) / 255.0; - - imageStore(g_resultEdgeTexture, screenPosIBase.xy + ivec2(0, 0), - outEdge4Flt.xxxx); - imageStore(g_resultEdgeTexture, screenPosIBase.xy + ivec2(1, 0), - outEdge4Flt.yyyy); - imageStore(g_resultEdgeTexture, screenPosIBase.xy + ivec2(0, 1), - outEdge4Flt.zzzz); - imageStore(g_resultEdgeTexture, screenPosIBase.xy + ivec2(1, 1), - outEdge4Flt.wwww); - - // uvec4 numberOfEdges4 = uvec4(bitCount(outEdge4)); - // gl_FragDepth = - // any(greaterThan(numberOfEdges4, uvec4(1))) ? 1.0 : 0.0; - - gl_FragDepth = - any(greaterThan(outEdge4, uvec4(1))) ? 1.0 : 0.0; - } - \n#endif\n // COMBINE_EDGES - - \n#ifdef BLUR_EDGES\n - layout(early_fragment_tests) in; - void BlurEdges() { - // Each |gl_FragCoord| updates 4 texels of the original texture, which are - // 2x|gl_FragCoord| + (-1 or 0, -1 or 0) in the unnormalized texture - // coordinate, which is the coordinate used by texelFetch(). - // e.g. when gl_FragCoord == (3.5, 3.5), this fragment shader covers - // (6,6) (6,7) (7,6) (7,7) texels. - // Note: gl_FragCoord == (0.5, 0.5) (i.e. left-bottom-most fragment) - // covers (0,0) (0,1) (1,0) (1,1) texels - // gl_FragCoord == ((w/2)-0.5, (h/2)-0.5) (i.e. right-top-most fragment) - // covers (w-2,h-2) (w-2,h-1) (w-1,h-2) (w-1,h-1) - ivec3 screenPosIBase = ivec3(ivec2(gl_FragCoord.xy) * 2, 0); - vec3 screenPosBase = vec3(screenPosIBase); - - // When gl_FragCoord == (0.5, 0.5) (i.e. left-bottom-most fragment), - // |sampA| textureGatherOffset() looks up (-1,-1), (-1,0), (0,-1), (0,0). - // (-1,-1), (-1,0), (0,-1) must be handled. - // Note: textureGatherOffset() on out of range gives an undefined value. - uvec2 notBottomLeft = uvec2(notEqual(screenPosIBase.xy, ivec2(0, 0))); - // When gl_FragCoord == ((w/2)-0.5, (h/2)-0.5) (i.e. right-top-most - // fragment), |sampD| looks up (w-1, h-1), (w-1, h), (w, h-1), (w, h). - // (w-1, h), (w, h-1), (w, h) must be handled. - uvec2 notTopRight = uvec2( - notEqual((screenPosIBase.xy + 2), textureSize(g_src0TextureFlt, 0))); - - uint forFollowUpCount = 0u; - ivec4 forFollowUpCoords[4]; - - uint packedEdgesArray[4 * 4]; - - uvec4 sampA = uvec4( - textureGatherOffset(g_src0TextureFlt, - screenPosBase.xy * g_OneOverScreenSize, - ivec2(0, 0)) *255.5); - uvec4 sampB = uvec4( - textureGatherOffset(g_src0TextureFlt, - screenPosBase.xy * g_OneOverScreenSize, - ivec2(2, 0)) *255.5); - uvec4 sampC = uvec4( - textureGatherOffset(g_src0TextureFlt, - screenPosBase.xy * g_OneOverScreenSize, - ivec2(0, 2)) *255.5); - uvec4 sampD = uvec4( - textureGatherOffset(g_src0TextureFlt, - screenPosBase.xy * g_OneOverScreenSize, - ivec2(2, 2)) *255.5); - - packedEdgesArray[(0) * 4 + (0)] = - sampA.w * notBottomLeft.x * notBottomLeft.y; - packedEdgesArray[(1) * 4 + (0)] = sampA.z * notBottomLeft.y; - packedEdgesArray[(0) * 4 + (1)] = sampA.x * notBottomLeft.x; - packedEdgesArray[(1) * 4 + (1)] = sampA.y; - packedEdgesArray[(2) * 4 + (0)] = sampB.w * notBottomLeft.y; - packedEdgesArray[(3) * 4 + (0)] = - sampB.z * notBottomLeft.y * notTopRight.x; - packedEdgesArray[(2) * 4 + (1)] = sampB.x; - packedEdgesArray[(3) * 4 + (1)] = sampB.y * notTopRight.x; - packedEdgesArray[(0) * 4 + (2)] = sampC.w * notBottomLeft.x; - packedEdgesArray[(1) * 4 + (2)] = sampC.z; - packedEdgesArray[(0) * 4 + (3)] = - sampC.x * notBottomLeft.x * notTopRight.y; - packedEdgesArray[(1) * 4 + (3)] = sampC.y * notTopRight.y; - packedEdgesArray[(2) * 4 + (2)] = sampD.w; - packedEdgesArray[(3) * 4 + (2)] = sampD.z * notTopRight.x; - packedEdgesArray[(2) * 4 + (3)] = sampD.x * notTopRight.y; - packedEdgesArray[(3) * 4 + (3)] = sampD.y * notTopRight.x * notTopRight.y; - - for (int _i = 0; _i < 4; _i++) { - int _x = _i % 2; - int _y = _i / 2; - - ivec3 screenPosI = screenPosIBase + ivec3(_x, _y, 0); - - uint packedEdgesC = packedEdgesArray[(1 + _x) * 4 + (1 + _y)]; - - uvec4 edges = UnpackEdge(packedEdgesC); - uint numberOfEdges = edges.x + edges.y + edges.z + edges.w; - if (numberOfEdges <= 1u) - continue; - - vec4 edgesFlt = vec4(edges); - float fromRight = edgesFlt.r; - float fromAbove = edgesFlt.g; - float fromLeft = edgesFlt.b; - float fromBelow = edgesFlt.a; - - vec4 xFroms = vec4(fromBelow, fromAbove, fromRight, fromLeft); - - float blurCoeff = 0.0; - - // These are additional blurs that complement the main line-based - // blurring; Unlike line-based, these do not necessarily preserve - // the total amount of screen colour as they will take - // neighbouring pixel colours and apply them to the one currently - // processed. - - // 1.) L-like shape. - // For this shape, the total amount of screen colour will be - // preserved when this is a part of a (zigzag) diagonal line as the - // corners from the other side will do the same and take some of - // the current pixel's colour in return. - // However, in the case when this is an actual corner, the pixel's - // colour will be partially overwritten by it's 2 neighbours. - if (numberOfEdges == 2u) - { - // with value of 0.15, the pixel will retain approx 77% of its - // colour and the remaining 23% will come from its 2 neighbours - // (which are likely to be blurred too in the opposite direction) - blurCoeff = 0.15; - - // Only do blending if it's L shape - if we're between two - // parallel edges, don't do anything - blurCoeff *= (1.0 - fromBelow * fromAbove) * - (1.0 - fromRight * fromLeft); - - if (blurCoeff == 0.0) - continue; - - uint packedEdgesL = packedEdgesArray[(0 + _x) * 4 + (1 + _y)]; - uint packedEdgesB = packedEdgesArray[(1 + _x) * 4 + (0 + _y)]; - uint packedEdgesR = packedEdgesArray[(2 + _x) * 4 + (1 + _y)]; - uint packedEdgesT = packedEdgesArray[(1 + _x) * 4 + (2 + _y)]; - - // Don't blend large L shape because it would be the intended shape - // with high probability. e.g. rectangle - // large_l1 large_l2 large_l3 large_l4 - // _ _ | | _ _ - // X| X| |X |X - // | ¯¯¯¯ ¯¯¯¯ | - bool large_l1 = (packedEdgesC == (0x01u | 0x02u)) && - bool(packedEdgesL & 0x02u) && - bool(packedEdgesB & 0x01u); - bool large_l2 = (packedEdgesC == (0x01u | 0x08u)) && - bool(packedEdgesL & 0x08u) && - bool(packedEdgesT & 0x01u); - bool large_l3 = (packedEdgesC == (0x04u | 0x08u)) && - bool(packedEdgesR & 0x08u) && - bool(packedEdgesT & 0x04u); - bool large_l4 = (packedEdgesC == (0x02u | 0x04u)) && - bool(packedEdgesR & 0x02u) && - bool(packedEdgesB & 0x04u); - if (large_l1 || large_l2 || large_l3 || large_l4) - continue; - - // Don't blend isolated L shape because it's not a closed geometry. - // isolated_l1 isolated_l2 isolated_l3 isolated_l4 - // _ _ - // X| X| |X |X - // ¯¯ ¯¯ - bool isolated_l1 = (packedEdgesC == (0x01u | 0x02u)) && - bool((packedEdgesL & 0x02u) == 0x00u) && - bool((packedEdgesT & 0x04u) == 0x00u) && - bool((packedEdgesR & 0x08u) == 0x00u) && - bool((packedEdgesB & 0x01u) == 0x00u); - bool isolated_l2 = (packedEdgesC == (0x01u | 0x08u)) && - bool((packedEdgesL & 0x08u) == 0x00u) && - bool((packedEdgesT & 0x01u) == 0x00u) && - bool((packedEdgesR & 0x02u) == 0x00u) && - bool((packedEdgesB & 0x04u) == 0x00u); - bool isolated_l3 = (packedEdgesC == (0x04u | 0x08u)) && - bool((packedEdgesL & 0x02u) == 0x00u) && - bool((packedEdgesT & 0x04u) == 0x00u) && - bool((packedEdgesR & 0x08u) == 0x00u) && - bool((packedEdgesB & 0x01u) == 0x00u); - bool isolated_l4 = (packedEdgesC == (0x02u | 0x04u)) && - bool((packedEdgesL & 0x08u) == 0x00u) && - bool((packedEdgesT & 0x01u) == 0x00u) && - bool((packedEdgesR & 0x02u) == 0x00u) && - bool((packedEdgesB & 0x04u) == 0x00u); - if (isolated_l1 || isolated_l2 || isolated_l3 || isolated_l4) - continue; - } - - // 2.) U-like shape (surrounded with edges from 3 sides) - if (numberOfEdges == 3u) { - // with value of 0.13, the pixel will retain approx 72% of its - // colour and the remaining 28% will be picked from its 3 - // neighbours (which are unlikely to be blurred too but could be) - blurCoeff = 0.13; - } - - // 3.) Completely surrounded with edges from all 4 sides - if (numberOfEdges == 4u) { - // with value of 0.07, the pixel will retain 78% of its colour - // and the remaining 22% will come from its 4 neighbours (which - // are unlikely to be blurred) - blurCoeff = 0.07; - } - - // |blurCoeff| must be not zero at this point. - vec4 blurMap = xFroms * blurCoeff; - - vec4 pixelC = texelFetch(g_screenTexture, screenPosI.xy, 0); - - const float centerWeight = 1.0; - float fromBelowWeight = blurMap.x; - float fromAboveWeight = blurMap.y; - float fromRightWeight = blurMap.z; - float fromLeftWeight = blurMap.w; - - // this would be the proper math for blending if we were handling - // lines (Zs) and mini kernel smoothing here, but since we're doing - // lines separately, no need to complicate, just tweak the settings - // float fromBelowWeight = (1.0 / (1.0 - blurMap.x)) - 1.0; - // float fromAboveWeight = (1.0 / (1.0 - blurMap.y)) - 1.0; - // float fromRightWeight = (1.0 / (1.0 - blurMap.z)) - 1.0; - // float fromLeftWeight = (1.0 / (1.0 - blurMap.w)) - 1.0; - - float fourWeightSum = dot(blurMap, vec4(1, 1, 1, 1)); - float allWeightSum = centerWeight + fourWeightSum; - - vec4 color = vec4(0, 0, 0, 0); - if (fromLeftWeight > 0.0) { - vec4 pixelL = texelFetchOffset(g_screenTexture, screenPosI.xy, 0, - ivec2(-1, 0)); - color += fromLeftWeight * pixelL; - } - if (fromAboveWeight > 0.0) { - vec4 pixelT = texelFetchOffset(g_screenTexture, screenPosI.xy, 0, - ivec2(0, 1)); - color += fromAboveWeight * pixelT; - } - if (fromRightWeight > 0.0) { - vec4 pixelR = texelFetchOffset(g_screenTexture, screenPosI.xy, 0, - ivec2(1, 0)); - color += fromRightWeight * pixelR; - } - if (fromBelowWeight > 0.0) { - vec4 pixelB = texelFetchOffset(g_screenTexture, screenPosI.xy, 0, - ivec2(0, -1)); - color += fromBelowWeight * pixelB; - } - - color /= fourWeightSum + 0.0001; - - color = mix(color, pixelC, centerWeight / allWeightSum); - \n#ifdef IN_GAMMA_CORRECT_MODE\n - color.rgb = D3DX_FLOAT3_to_SRGB(color.rgb); - \n#endif\n - - \n#ifdef DEBUG_OUTPUT_AAINFO\n - imageStore(g_resultEdgeTexture, screenPosI.xy, - PackBlurAAInfo(screenPosI.xy, numberOfEdges)); - \n#endif\n - imageStore(g_resultRGBATextureSlot1, screenPosI.xy, color); - - if (numberOfEdges == 2u) { - uint packedEdgesL = packedEdgesArray[(0 + _x) * 4 + (1 + _y)]; - uint packedEdgesB = packedEdgesArray[(1 + _x) * 4 + (0 + _y)]; - uint packedEdgesR = packedEdgesArray[(2 + _x) * 4 + (1 + _y)]; - uint packedEdgesT = packedEdgesArray[(1 + _x) * 4 + (2 + _y)]; - - bool isHorizontalA = ((packedEdgesC) == (0x01u | 0x02u)) && - ((packedEdgesR & 0x08u) == 0x08u); - bool isHorizontalB = ((packedEdgesC) == (0x01u | 0x08u)) && - ((packedEdgesR & 0x02u) == 0x02u); - - bool isHCandidate = isHorizontalA || isHorizontalB; - - bool isVerticalA = ((packedEdgesC) == (0x02u | 0x04u)) && - ((packedEdgesT & 0x01u) == 0x01u); - bool isVerticalB = ((packedEdgesC) == (0x01u | 0x02u)) && - ((packedEdgesT & 0x04u) == 0x04u); - bool isVCandidate = isVerticalA || isVerticalB; - - bool isCandidate = isHCandidate || isVCandidate; - - if (!isCandidate) - continue; - - bool horizontal = isHCandidate; - - // what if both are candidates? do additional pruning (still not - // 100% but gets rid of worst case errors) - if (isHCandidate && isVCandidate) - horizontal = - (isHorizontalA && ((packedEdgesL & 0x02u) == 0x02u)) || - (isHorizontalB && ((packedEdgesL & 0x08u) == 0x08u)); - - ivec2 offsetC; - uint packedEdgesM1P0; - uint packedEdgesP1P0; - if (horizontal) { - packedEdgesM1P0 = packedEdgesL; - packedEdgesP1P0 = packedEdgesR; - offsetC = ivec2(2, 0); - } else { - packedEdgesM1P0 = packedEdgesB; - packedEdgesP1P0 = packedEdgesT; - offsetC = ivec2(0, 2); - } - - uvec4 edgesM1P0 = UnpackEdge(packedEdgesM1P0); - uvec4 edgesP1P0 = UnpackEdge(packedEdgesP1P0); - uvec4 edgesP2P0 = UnpackEdge(uint(texelFetch( - g_src0TextureFlt, screenPosI.xy + offsetC, 0).r * 255.5)); - - uvec4 arg0; - uvec4 arg1; - uvec4 arg2; - uvec4 arg3; - bool arg4; - - if (horizontal) { - arg0 = uvec4(edges); - arg1 = edgesM1P0; - arg2 = edgesP1P0; - arg3 = edgesP2P0; - arg4 = true; - } else { - // Reuse the same code for vertical (used for horizontal above) - // but rotate input data 90º counter-clockwise. See FindLineLength() - // e.g. arg0.r (new top) must be mapped to edges.g (old top) - arg0 = uvec4(edges.gbar); - arg1 = edgesM1P0.gbar; - arg2 = edgesP1P0.gbar; - arg3 = edgesP2P0.gbar; - arg4 = false; - } - - { - ivec2 screenPos = screenPosI.xy; - uvec4 _edges = arg0; - uvec4 _edgesM1P0 = arg1; - uvec4 _edgesP1P0 = arg2; - uvec4 _edgesP2P0 = arg3; - bool horizontal = arg4; - - // Normal Z case: - // __ - // X| - // ¯¯ - bool isInvertedZ = false; - bool isNormalZ = false; - { - \n#ifndef SETTINGS_ALLOW_SHORT_Zs\n - // (1u-_edges.a) constraint can be removed; it was added for - // some rare cases - uint isZShape = _edges.r * _edges.g * _edgesM1P0.g * - _edgesP1P0.a *_edgesP2P0.a * (1u - _edges.b) * - (1u - _edgesP1P0.r) * (1u - _edges.a) * - (1u - _edgesP1P0.g); - \n#else\n - uint isZShape = _edges.r * _edges.g * _edgesP1P0.a * - (1u - _edges.b) * (1u - _edgesP1P0.r) * (1u - _edges.a) * - (1u - _edgesP1P0.g); - isZShape *= (_edgesM1P0.g + _edgesP2P0.a); - // and at least one of these need to be there - \n#endif\n - if (isZShape > 0u) { - isNormalZ = true; - } - } - - // Inverted Z case: - // __ - // X| - // ¯¯ - { - \n#ifndef SETTINGS_ALLOW_SHORT_Zs\n - uint isZShape = _edges.r * _edges.a * _edgesM1P0.a * - _edgesP1P0.g * _edgesP2P0.g * (1u - _edges.b) * - (1u - _edgesP1P0.r) * (1u - _edges.g) * - (1u - _edgesP1P0.a); - \n#else\n - uint isZShape = _edges.r * _edges.a * _edgesP1P0.g * - (1u - _edges.b) * (1u - _edgesP1P0.r) * (1u - _edges.g) * - (1u - _edgesP1P0.a); - isZShape *= - (_edgesM1P0.a + _edgesP2P0.g); - // and at least one of these need to be there - \n#endif\n - - if (isZShape > 0u) { - isInvertedZ = true; - } - } - - bool isZ = isInvertedZ || isNormalZ; - if (isZ) { - forFollowUpCoords[forFollowUpCount++] = - ivec4(screenPosI.xy, horizontal, isInvertedZ); - } - } - } - } - - // This code below is the only potential bug with this algorithm : - // it HAS to be executed after the simple shapes above. It used to be - // executed as separate compute shader (by storing the packed - // 'forFollowUpCoords' in an append buffer and consuming it later) - // but the whole thing (append/consume buffers, using CS) appears to - // be too inefficient on most hardware. - // However, it seems to execute fairly efficiently here and without - // any issues, although there is no 100% guarantee that this code - // below will execute across all pixels (it has a c_maxLineLength - // wide kernel) after other shaders processing same pixels have done - // solving simple shapes. It appears to work regardless, across all - // hardware; pixels with 1-edge or two opposing edges are ignored by - // simple shapes anyway and other shapes stop the long line - // algorithm from executing the only danger appears to be simple - // shape L's colliding with Z shapes from neighbouring pixels but I - // couldn't reproduce any problems on any hardware. - for (uint _i = 0u; _i < forFollowUpCount; _i++) { - ivec4 data = forFollowUpCoords[_i]; - ProcessDetectedZ(data.xy, bool(data.z), bool(data.w)); - } - } - \n#endif\n // BLUR_EDGES - - \n#ifdef DISPLAY_EDGES\n - layout(location = 0) out vec4 color; - layout(location = 1) out vec4 hasEdges; - void DisplayEdges() { - ivec2 screenPosI = ivec2(gl_FragCoord.xy); - - uint packedEdges = uint(0); - uint shapeType = uint(0); - UnpackBlurAAInfo(texelFetch(g_src0TextureFlt, screenPosI, 0).r, - packedEdges, shapeType); - - vec4 edges = vec4(UnpackEdge(packedEdges)); - if (any(greaterThan(edges.xyzw, vec4(0)))) { - \n#ifdef IN_BGR_MODE\n - color = c_edgeDebugColours[shapeType].bgra; - \n#else\n - color = c_edgeDebugColours[shapeType]; - \n#endif\n - hasEdges = vec4(1.0); - } else { - color = vec4(0); - hasEdges = vec4(0.0); - } - } - \n#endif\n // DISPLAY_EDGES - - void main() { - \n#ifdef DETECT_EDGES1\n - DetectEdges1(); - \n#endif\n - \n#if defined DETECT_EDGES2\n - DetectEdges2(); - \n#endif\n - \n#if defined COMBINE_EDGES\n - CombineEdges(); - \n#endif\n - \n#if defined BLUR_EDGES\n - BlurEdges(); - \n#endif\n - \n#if defined DISPLAY_EDGES\n - DisplayEdges(); - \n#endif\n - } - ); -/* clang-format on */ - -} // namespace gles2 -} // namespace gpu - -#undef SHADER diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h deleted file mode 100644 index 33bade31d60..00000000000 --- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_APPLY_FRAMEBUFFER_ATTACHMENT_CMAA_INTEL_H_ -#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_APPLY_FRAMEBUFFER_ATTACHMENT_CMAA_INTEL_H_ - -#include "gpu/command_buffer/service/gl_utils.h" -#include "gpu/gpu_gles2_export.h" - -namespace gpu { -namespace gles2 { -class CopyTextureCHROMIUMResourceManager; -class GLES2Decoder; -class Framebuffer; -class TextureManager; - -// This class encapsulates the resources required to implement the -// GL_INTEL_framebuffer_CMAA extension via shaders. -// -// The CMAA Conservative Morphological Anti-Aliasing) algorithm is applied to -// all color attachments of the currently bound draw framebuffer. -// -// Reference GL_INTEL_framebuffer_CMAA for details. -class GPU_GLES2_EXPORT ApplyFramebufferAttachmentCMAAINTELResourceManager { - public: - ApplyFramebufferAttachmentCMAAINTELResourceManager(); - ~ApplyFramebufferAttachmentCMAAINTELResourceManager(); - - void Initialize(gles2::GLES2Decoder* decoder); - void Destroy(); - - // Applies the algorithm to the color attachments of the currently bound draw - // framebuffer. - void ApplyFramebufferAttachmentCMAAINTEL( - GLES2Decoder* decoder, - Framebuffer* framebuffer, - CopyTextureCHROMIUMResourceManager* copier, - TextureManager* texture_manager); - - private: - // Applies the CMAA algorithm to a texture. - void ApplyCMAAEffectTexture(GLuint source_texture, - GLuint dest_texture, - bool do_copy); - - void OnSize(GLint width, GLint height); - void ReleaseTextures(); - - GLuint CreateProgram(const char* defines, - const char* vs_source, - const char* fs_source); - GLuint CreateShader(GLenum type, const char* defines, const char* source); - - bool initialized_; - bool textures_initialized_; - bool is_in_gamma_correct_mode_; - bool supports_usampler_; - bool supports_r8_image_; - bool is_gles31_compatible_; - - int frame_id_; - - GLint width_; - GLint height_; - - GLuint edges0_shader_; - GLuint edges1_shader_; - GLuint edges_combine_shader_; - GLuint process_and_apply_shader_; - GLuint debug_display_edges_shader_; - - GLuint cmaa_framebuffer_; - - GLuint rgba8_texture_; - GLuint working_color_texture_; - GLuint edges0_texture_; - GLuint edges1_texture_; - GLuint mini4_edge_texture_; - GLuint mini4_edge_depth_texture_; - - GLuint edges0_shader_result_rgba_texture_slot1_; - GLuint edges0_shader_target_texture_slot2_; - GLuint edges1_shader_result_edge_texture_; - GLuint process_and_apply_shader_result_rgba_texture_slot1_; - GLuint edges_combine_shader_result_edge_texture_; - - static const char vert_str_[]; - static const char cmaa_frag_s1_[]; - static const char cmaa_frag_s2_[]; - - DISALLOW_COPY_AND_ASSIGN(ApplyFramebufferAttachmentCMAAINTELResourceManager); -}; - -} // namespace gles2 -} // namespace gpu - -#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_APPLY_FRAMEBUFFER_ATTACHMENT_CMAA_INTEL_H_ diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_clear_framebuffer.cc b/chromium/gpu/command_buffer/service/gles2_cmd_clear_framebuffer.cc index cbd7fb6679d..371dfbb2ce0 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_clear_framebuffer.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_clear_framebuffer.cc @@ -47,10 +47,7 @@ ClearFramebufferResourceManager::ClearFramebufferResourceManager( Initialize(decoder); } -ClearFramebufferResourceManager::~ClearFramebufferResourceManager() { - Destroy(); - DCHECK(!buffer_id_); -} +ClearFramebufferResourceManager::~ClearFramebufferResourceManager() = default; void ClearFramebufferResourceManager::Initialize( const gles2::GLES2Decoder* decoder) { @@ -77,7 +74,6 @@ void ClearFramebufferResourceManager::Destroy() { glDeleteProgram(program_); glDeleteBuffersARB(1, &buffer_id_); - buffer_id_ = 0; } void ClearFramebufferResourceManager::ClearFramebuffer( diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_clear_framebuffer.h b/chromium/gpu/command_buffer/service/gles2_cmd_clear_framebuffer.h index 7896f1e5fb6..81533a0b702 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_clear_framebuffer.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_clear_framebuffer.h @@ -22,6 +22,7 @@ class GPU_GLES2_EXPORT ClearFramebufferResourceManager { ClearFramebufferResourceManager(const gles2::GLES2Decoder* decoder); ~ClearFramebufferResourceManager(); + void Destroy(); void ClearFramebuffer(const gles2::GLES2Decoder* decoder, const gfx::Size& max_viewport_size, GLbitfield mask, @@ -34,7 +35,6 @@ class GPU_GLES2_EXPORT ClearFramebufferResourceManager { private: void Initialize(const gles2::GLES2Decoder* decoder); - void Destroy(); // The attributes used during invocation of the extension. static const GLuint kVertexPositionAttrib = 0; diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc index 415e2101cbc..8de442a6b01 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc @@ -54,7 +54,6 @@ #include "gpu/command_buffer/service/framebuffer_manager.h" #include "gpu/command_buffer/service/gl_stream_texture_image.h" #include "gpu/command_buffer/service/gl_utils.h" -#include "gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h" #include "gpu/command_buffer/service/gles2_cmd_clear_framebuffer.h" #include "gpu/command_buffer/service/gles2_cmd_copy_tex_image.h" #include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h" @@ -108,6 +107,7 @@ #include "ui/gl/gl_implementation.h" #include "ui/gl/gl_surface.h" #include "ui/gl/gl_version_info.h" +#include "ui/gl/gpu_preference.h" #include "ui/gl/gpu_switching_manager.h" #include "ui/gl/gpu_switching_observer.h" #include "ui/gl/gpu_timing.h" @@ -133,12 +133,15 @@ namespace gles2 { namespace { const char kOESDerivativeExtension[] = "GL_OES_standard_derivatives"; +const char kOESFboRenderMipmapExtension[] = "GL_OES_fbo_render_mipmap"; const char kEXTFragDepthExtension[] = "GL_EXT_frag_depth"; const char kEXTDrawBuffersExtension[] = "GL_EXT_draw_buffers"; const char kEXTShaderTextureLodExtension[] = "GL_EXT_shader_texture_lod"; const char kWEBGLMultiDrawExtension[] = "GL_WEBGL_multi_draw"; -const char kWEBGLMultiDrawInstancedExtension[] = - "GL_WEBGL_multi_draw_instanced"; +const char kWEBGLDrawInstancedBaseVertexBaseInstanceExtension[] = + "GL_WEBGL_draw_instanced_base_vertex_base_instance"; +const char kWEBGLMultiDrawInstancedBaseVertexBaseInstanceExtension[] = + "GL_WEBGL_multi_draw_instanced_base_vertex_base_instance"; template <typename MANAGER_TYPE, typename OBJECT_TYPE> GLuint GetClientId(const MANAGER_TYPE* manager, const OBJECT_TYPE* object) { @@ -277,6 +280,10 @@ static bool StringIsValidForGLES(const std::string& str) { str.end(); } +DisallowedFeatures::DisallowedFeatures() = default; +DisallowedFeatures::~DisallowedFeatures() = default; +DisallowedFeatures::DisallowedFeatures(const DisallowedFeatures&) = default; + // This class prevents any GL errors that occur when it is in scope from // being reported to the client. class ScopedGLErrorSuppressor { @@ -751,7 +758,7 @@ class GLES2DecoderImpl : public GLES2Decoder, const gfx::Rect& cleared_rect) override; // Implements GpuSwitchingObserver. - void OnGpuSwitched() override; + void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override; // Restores the current state to the user's settings. void RestoreCurrentFramebufferBindings(); @@ -1179,7 +1186,6 @@ class GLES2DecoderImpl : public GLES2Decoder, const volatile GLbyte* mailbox); void DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id, GLenum mode); void DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id); - void DoApplyScreenSpaceAntialiasingCHROMIUM(); void BindImage(uint32_t client_texture_id, uint32_t texture_target, @@ -2137,9 +2143,12 @@ class GLES2DecoderImpl : public GLES2Decoder, bool ValidateStencilStateForDraw(const char* function_name); // Checks if the current program and vertex attributes are valid for drawing. - bool IsDrawValid( - const char* function_name, GLuint max_vertex_accessed, bool instanced, - GLsizei primcount); + bool IsDrawValid(const char* function_name, + GLuint max_vertex_accessed, + bool instanced, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance); // Returns true if successful, simulated will be true if attrib0 was // simulated. @@ -2182,25 +2191,37 @@ class GLES2DecoderImpl : public GLES2Decoder, void RestoreStateForTextures(); // Returns true if GL_FIXED attribs were simulated. - bool SimulateFixedAttribs( - const char* function_name, - GLuint max_vertex_accessed, bool* simulated, GLsizei primcount); + bool SimulateFixedAttribs(const char* function_name, + GLuint max_vertex_accessed, + bool* simulated, + GLsizei primcount); void RestoreStateForSimulatedFixedAttribs(); + // Having extra base vertex and base instance parameters and run-time if else + // for heavily called DoMultiDrawArrays/DoMultiDrawElements caused + // performance regression, thus use non-type template draw functions + enum class DrawArraysOption { Default = 0, UseBaseInstance }; + enum class DrawElementsOption { Default = 0, UseBaseVertexBaseInstance }; + + template <DrawArraysOption option> bool CheckMultiDrawArraysVertices(const char* function_name, bool instanced, const GLint* firsts, const GLsizei* counts, const GLsizei* primcounts, + const GLuint* baseinstances, GLsizei drawcount, GLuint* total_max_vertex_accessed, GLsizei* total_max_primcount); + template <DrawElementsOption option> bool CheckMultiDrawElementsVertices(const char* function_name, bool instanced, const GLsizei* counts, GLenum type, const int32_t* offsets, const GLsizei* primcounts, + const GLint* basevertices, + const GLuint* baseinstances, GLsizei drawcount, Buffer* element_array_buffer, GLuint* total_max_vertex_accessed, @@ -2215,13 +2236,18 @@ class GLES2DecoderImpl : public GLES2Decoder, // Handle MultiDrawArrays and MultiDrawElements for both instanced and // non-instanced cases (primcount is always 1 for non-instanced). + // (basevertex and baseinstance are always 0 for non-basevertex-baseinstance + // draws) + template <DrawArraysOption option> error::Error DoMultiDrawArrays(const char* function_name, bool instanced, GLenum mode, const GLint* firsts, const GLsizei* counts, const GLsizei* primcounts, + const GLuint* baseinstances, GLsizei drawcount); + template <DrawElementsOption option> error::Error DoMultiDrawElements(const char* function_name, bool instanced, GLenum mode, @@ -2229,6 +2255,8 @@ class GLES2DecoderImpl : public GLES2Decoder, GLenum type, const int32_t* offsets, const GLsizei* primcounts, + const GLint* basevertices, + const GLuint* baseinstances, GLsizei drawcount); GLenum GetBindTargetForSamplerType(GLenum type) { @@ -2689,11 +2717,13 @@ class GLES2DecoderImpl : public GLES2Decoder, // contexts may be broken. These flags override the shared state to preserve // WebGL semantics. bool derivatives_explicitly_enabled_; + bool fbo_render_mipmap_explicitly_enabled_; bool frag_depth_explicitly_enabled_; bool draw_buffers_explicitly_enabled_; bool shader_texture_lod_explicitly_enabled_; bool multi_draw_explicitly_enabled_; - bool multi_draw_instanced_explicitly_enabled_; + bool draw_instanced_base_vertex_base_instance_explicitly_enabled_; + bool multi_draw_instanced_base_vertex_base_instance_explicitly_enabled_; bool compile_shader_always_succeeds_; @@ -2706,8 +2736,6 @@ class GLES2DecoderImpl : public GLES2Decoder, // Log extra info. bool service_logging_; - std::unique_ptr<ApplyFramebufferAttachmentCMAAINTELResourceManager> - apply_framebuffer_attachment_cmaa_intel_; std::unique_ptr<CopyTexImageResourceManager> copy_tex_image_blit_; std::unique_ptr<CopyTextureCHROMIUMResourceManager> copy_texture_chromium_; std::unique_ptr<SRGBConverter> srgb_converter_; @@ -3458,11 +3486,13 @@ GLES2DecoderImpl::GLES2DecoderImpl( supports_commit_overlay_planes_(false), supports_async_swap_(false), derivatives_explicitly_enabled_(false), + fbo_render_mipmap_explicitly_enabled_(false), frag_depth_explicitly_enabled_(false), draw_buffers_explicitly_enabled_(false), shader_texture_lod_explicitly_enabled_(false), multi_draw_explicitly_enabled_(false), - multi_draw_instanced_explicitly_enabled_(false), + draw_instanced_base_vertex_base_instance_explicitly_enabled_(false), + multi_draw_instanced_base_vertex_base_instance_explicitly_enabled_(false), compile_shader_always_succeeds_(false), lose_context_when_out_of_memory_(false), should_use_native_gmb_for_backbuffer_(false), @@ -4450,9 +4480,15 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() { if (shader_spec == SH_WEBGL_SPEC || shader_spec == SH_WEBGL2_SPEC) { resources.ANGLE_multi_draw = - (multi_draw_explicitly_enabled_ && features().webgl_multi_draw) || - (multi_draw_instanced_explicitly_enabled_ && - features().webgl_multi_draw_instanced); + multi_draw_explicitly_enabled_ && features().webgl_multi_draw; + } + + if (shader_spec == SH_WEBGL2_SPEC) { + resources.ANGLE_base_vertex_base_instance = + (draw_instanced_base_vertex_base_instance_explicitly_enabled_ && + features().webgl_draw_instanced_base_vertex_base_instance) || + (multi_draw_instanced_base_vertex_base_instance_explicitly_enabled_ && + features().webgl_multi_draw_instanced_base_vertex_base_instance); } if (((shader_spec == SH_WEBGL_SPEC || shader_spec == SH_WEBGL2_SPEC) && @@ -4493,6 +4529,8 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() { driver_bug_workarounds |= SH_REWRITE_FLOAT_UNARY_MINUS_OPERATOR; if (workarounds().dont_use_loops_to_initialize_variables) driver_bug_workarounds |= SH_DONT_USE_LOOPS_TO_INITIALIZE_VARIABLES; + if (workarounds().remove_dynamic_indexing_of_swizzled_vector) + driver_bug_workarounds |= SH_REMOVE_DYNAMIC_INDEXING_OF_SWIZZLED_VECTOR; // Initialize uninitialized locals by default if (!workarounds().dont_initialize_uninitialized_locals) @@ -5286,9 +5324,9 @@ void GLES2DecoderImpl::SetLevelInfo(uint32_t client_id, 0 /* border */, format, type, cleared_rect); } -void GLES2DecoderImpl::OnGpuSwitched() { +void GLES2DecoderImpl::OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) { // Send OnGpuSwitched notification to renderer process via decoder client. - client()->OnGpuSwitched(); + client()->OnGpuSwitched(active_gpu_heuristic); } void GLES2DecoderImpl::Destroy(bool have_context) { @@ -5316,11 +5354,6 @@ void GLES2DecoderImpl::Destroy(bool have_context) { ReleaseAllBackTextures(have_context); if (have_context) { - if (apply_framebuffer_attachment_cmaa_intel_.get()) { - apply_framebuffer_attachment_cmaa_intel_->Destroy(); - apply_framebuffer_attachment_cmaa_intel_.reset(); - } - if (copy_tex_image_blit_.get()) { copy_tex_image_blit_->Destroy(); copy_tex_image_blit_.reset(); @@ -5336,7 +5369,10 @@ void GLES2DecoderImpl::Destroy(bool have_context) { srgb_converter_.reset(); } - clear_framebuffer_blit_.reset(); + if (clear_framebuffer_blit_.get()) { + clear_framebuffer_blit_->Destroy(); + clear_framebuffer_blit_.reset(); + } if (state_.current_program.get()) { program_manager()->UnuseProgram(shader_manager(), @@ -5437,7 +5473,6 @@ void GLES2DecoderImpl::Destroy(bool have_context) { // state_.current_program object. state_.current_program = nullptr; - apply_framebuffer_attachment_cmaa_intel_.reset(); copy_tex_image_blit_.reset(); copy_texture_chromium_.reset(); srgb_converter_.reset(); @@ -8559,7 +8594,9 @@ void GLES2DecoderImpl::DoFramebufferTexture2DCommon( service_id = texture_ref->service_id(); } - if ((level > 0 && !feature_info_->IsWebGL2OrES3Context()) || + if ((level > 0 && !feature_info_->IsWebGL2OrES3Context() && + !(fbo_render_mipmap_explicitly_enabled_ && + feature_info_->feature_flags().oes_fbo_render_mipmap)) || !texture_manager()->ValidForTarget(textarget, level, 0, 0, 1)) { LOCAL_SET_GL_ERROR( GL_INVALID_VALUE, @@ -9746,8 +9783,13 @@ void GLES2DecoderImpl::DoLinkProgram(GLuint program_id) { if (workarounds().clear_uniforms_before_first_program_use) program_manager()->ClearUniforms(program); } - if (features().webgl_multi_draw || features().webgl_multi_draw_instanced) + if (features().webgl_multi_draw) program_manager()->UpdateDrawIDUniformLocation(program); + if (features().webgl_draw_instanced_base_vertex_base_instance || + features().webgl_multi_draw_instanced_base_vertex_base_instance) { + program_manager()->UpdateBaseVertexUniformLocation(program); + program_manager()->UpdateBaseInstanceUniformLocation(program); + } } // LinkProgram can be very slow. Exit command processing to allow for @@ -10855,9 +10897,12 @@ bool GLES2DecoderImpl::ValidateStencilStateForDraw(const char* function_name) { return true; } -bool GLES2DecoderImpl::IsDrawValid( - const char* function_name, GLuint max_vertex_accessed, bool instanced, - GLsizei primcount) { +bool GLES2DecoderImpl::IsDrawValid(const char* function_name, + GLuint max_vertex_accessed, + bool instanced, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) { DCHECK(instanced || primcount == 1); // NOTE: We specifically do not check current_program->IsValid() because @@ -10881,7 +10926,7 @@ bool GLES2DecoderImpl::IsDrawValid( if (!state_.vertex_attrib_manager->ValidateBindings( function_name, this, feature_info_.get(), buffer_manager(), state_.current_program.get(), max_vertex_accessed, instanced, - primcount)) { + primcount, basevertex, baseinstance)) { return false; } @@ -11019,9 +11064,10 @@ void GLES2DecoderImpl::RestoreStateForAttrib( } } -bool GLES2DecoderImpl::SimulateFixedAttribs( - const char* function_name, - GLuint max_vertex_accessed, bool* simulated, GLsizei primcount) { +bool GLES2DecoderImpl::SimulateFixedAttribs(const char* function_name, + GLuint max_vertex_accessed, + bool* simulated, + GLsizei primcount) { DCHECK(simulated); *simulated = false; if (gl_version_info().SupportsFixedType()) @@ -11048,8 +11094,8 @@ bool GLES2DecoderImpl::SimulateFixedAttribs( const VertexAttrib* attrib = *it; const Program::VertexAttrib* attrib_info = state_.current_program->GetAttribInfoByLocation(attrib->index()); - GLuint max_accessed = attrib->MaxVertexAccessed(primcount, - max_vertex_accessed); + GLuint max_accessed = + attrib->MaxVertexAccessed(primcount, max_vertex_accessed); GLuint num_vertices = max_accessed + 1; if (num_vertices == 0) { LOCAL_SET_GL_ERROR( @@ -11094,8 +11140,8 @@ bool GLES2DecoderImpl::SimulateFixedAttribs( const VertexAttrib* attrib = *it; const Program::VertexAttrib* attrib_info = state_.current_program->GetAttribInfoByLocation(attrib->index()); - GLuint max_accessed = attrib->MaxVertexAccessed(primcount, - max_vertex_accessed); + GLuint max_accessed = + attrib->MaxVertexAccessed(primcount, max_vertex_accessed); GLuint num_vertices = max_accessed + 1; if (num_vertices == 0) { LOCAL_SET_GL_ERROR( @@ -11171,20 +11217,28 @@ bool GLES2DecoderImpl::AttribsTypeMatch() { return true; } +template <GLES2DecoderImpl::DrawArraysOption option> ALWAYS_INLINE bool GLES2DecoderImpl::CheckMultiDrawArraysVertices( const char* function_name, bool instanced, const GLint* firsts, const GLsizei* counts, const GLsizei* primcounts, + const GLuint* baseinstances, GLsizei drawcount, GLuint* total_max_vertex_accessed, GLsizei* total_max_primcount) { + if (option == DrawArraysOption::Default) { + DCHECK_EQ(baseinstances, nullptr); + } DCHECK_GE(drawcount, 0); for (GLsizei draw_id = 0; draw_id < drawcount; ++draw_id) { GLint first = firsts[draw_id]; GLsizei count = counts[draw_id]; GLsizei primcount = instanced ? primcounts[draw_id] : 1; + GLuint baseinstance = (option == DrawArraysOption::UseBaseInstance) + ? baseinstances[draw_id] + : 0; // We have to check this here because the prototype for glDrawArrays // is GLint not GLsizei. if (first < 0) { @@ -11214,8 +11268,8 @@ ALWAYS_INLINE bool GLES2DecoderImpl::CheckMultiDrawArraysVertices( "first + count overflow"); return false; } - if (!IsDrawValid(function_name, max_vertex_accessed, instanced, - primcount)) { + if (!IsDrawValid(function_name, max_vertex_accessed, instanced, primcount, + 0, baseinstance)) { return false; } *total_max_vertex_accessed = @@ -11266,6 +11320,7 @@ ALWAYS_INLINE bool GLES2DecoderImpl::CheckTransformFeedback( return true; } +template <GLES2DecoderImpl::DrawArraysOption option> ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawArrays( const char* function_name, bool instanced, @@ -11273,7 +11328,11 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawArrays( const GLint* firsts, const GLsizei* counts, const GLsizei* primcounts, + const GLuint* baseinstances, GLsizei drawcount) { + if (option == DrawArraysOption::Default) { + DCHECK_EQ(baseinstances, nullptr); + } error::Error error = WillAccessBoundFramebufferForDraw(); if (error != error::kNoError) return error; @@ -11294,9 +11353,9 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawArrays( GLuint total_max_vertex_accessed = 0; GLsizei total_max_primcount = 0; - if (!CheckMultiDrawArraysVertices( - function_name, instanced, firsts, counts, primcounts, drawcount, - &total_max_vertex_accessed, &total_max_primcount)) { + if (!CheckMultiDrawArraysVertices<option>( + function_name, instanced, firsts, counts, primcounts, baseinstances, + drawcount, &total_max_vertex_accessed, &total_max_primcount)) { return error::kNoError; } @@ -11335,6 +11394,9 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawArrays( return error::kNoError; } bool simulated_fixed_attribs = false; + // The branch with fixed attrib is not meant to be used + // normally but just to pass OpenGL ES 2 conformance where there's no + // basevertex and baseinstance support. if (SimulateFixedAttribs(function_name, total_max_vertex_accessed, &simulated_fixed_attribs, total_max_primcount)) { bool textures_set; @@ -11347,6 +11409,8 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawArrays( } GLint draw_id_location = state_.current_program->draw_id_uniform_location(); + GLint base_instance_location = + state_.current_program->base_instance_uniform_location(); for (GLsizei draw_id = 0; draw_id < drawcount; ++draw_id) { GLint first = firsts[draw_id]; GLsizei count = counts[draw_id]; @@ -11360,7 +11424,16 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawArrays( if (!instanced) { api()->glDrawArraysFn(mode, first, count); } else { - api()->glDrawArraysInstancedANGLEFn(mode, first, count, primcount); + if (option != DrawArraysOption::UseBaseInstance) { + api()->glDrawArraysInstancedANGLEFn(mode, first, count, primcount); + } else { + GLuint baseinstance = baseinstances[draw_id]; + if (base_instance_location >= 0) { + api()->glUniform1iFn(base_instance_location, baseinstance); + } + api()->glDrawArraysInstancedBaseInstanceANGLEFn( + mode, first, count, primcount, baseinstance); + } } } if (state_.bound_transform_feedback.get()) { @@ -11374,6 +11447,13 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawArrays( if (simulated_fixed_attribs) { RestoreStateForSimulatedFixedAttribs(); } + // only reset base vertex and base instance shader variable when it's + // possibly non-zero + if (option == DrawArraysOption::UseBaseInstance) { + if (base_instance_location >= 0) { + api()->glUniform1iFn(base_instance_location, 0); + } + } } if (simulated_attrib_0) { // We don't have to restore attrib 0 generic data at the end of this @@ -11391,8 +11471,9 @@ error::Error GLES2DecoderImpl::HandleDrawArrays(uint32_t immediate_data_size, *static_cast<const volatile cmds::DrawArrays*>(cmd_data); GLint first = static_cast<GLint>(c.first); GLsizei count = static_cast<GLsizei>(c.count); - return DoMultiDrawArrays("glDrawArrays", false, static_cast<GLenum>(c.mode), - &first, &count, nullptr, 1); + return DoMultiDrawArrays<DrawArraysOption::Default>( + "glDrawArrays", false, static_cast<GLenum>(c.mode), &first, &count, + nullptr, nullptr, 1); } error::Error GLES2DecoderImpl::HandleDrawArraysInstancedANGLE( @@ -11407,11 +11488,35 @@ error::Error GLES2DecoderImpl::HandleDrawArraysInstancedANGLE( GLint first = static_cast<GLint>(c.first); GLsizei count = static_cast<GLsizei>(c.count); GLsizei primcount = static_cast<GLsizei>(c.primcount); - return DoMultiDrawArrays("glDrawArraysInstancedANGLE", true, - static_cast<GLenum>(c.mode), &first, &count, - &primcount, 1); + return DoMultiDrawArrays<DrawArraysOption::Default>( + "glDrawArraysInstancedANGLE", true, static_cast<GLenum>(c.mode), &first, + &count, &primcount, nullptr, 1); +} + +error::Error GLES2DecoderImpl::HandleDrawArraysInstancedBaseInstanceANGLE( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::DrawArraysInstancedBaseInstanceANGLE& c = + *static_cast< + const volatile gles2::cmds::DrawArraysInstancedBaseInstanceANGLE*>( + cmd_data); + if (!features().angle_instanced_arrays) + return error::kUnknownCommand; + if (!features().webgl_draw_instanced_base_vertex_base_instance && + !features().webgl_multi_draw_instanced_base_vertex_base_instance) + return error::kUnknownCommand; + + GLint first = static_cast<GLint>(c.first); + GLsizei count = static_cast<GLsizei>(c.count); + GLsizei primcount = static_cast<GLsizei>(c.primcount); + GLuint baseInstances = static_cast<GLuint>(c.baseinstance); + return DoMultiDrawArrays<DrawArraysOption::UseBaseInstance>( + "glDrawArraysInstancedBaseInstanceANGLE", true, + static_cast<GLenum>(c.mode), &first, &count, &primcount, &baseInstances, + 1); } +template <GLES2DecoderImpl::DrawElementsOption option> ALWAYS_INLINE bool GLES2DecoderImpl::CheckMultiDrawElementsVertices( const char* function_name, bool instanced, @@ -11419,15 +11524,28 @@ ALWAYS_INLINE bool GLES2DecoderImpl::CheckMultiDrawElementsVertices( GLenum type, const int32_t* offsets, const GLsizei* primcounts, + const GLint* basevertices, + const GLuint* baseinstances, GLsizei drawcount, Buffer* element_array_buffer, GLuint* total_max_vertex_accessed, GLsizei* total_max_primcount) { + if (option == DrawElementsOption::Default) { + DCHECK_EQ(basevertices, nullptr); + DCHECK_EQ(baseinstances, nullptr); + } DCHECK_GE(drawcount, 0); for (GLsizei draw_id = 0; draw_id < drawcount; ++draw_id) { GLsizei count = counts[draw_id]; GLsizei offset = offsets[draw_id]; GLsizei primcount = instanced ? primcounts[draw_id] : 1; + GLint basevertex = (option == DrawElementsOption::UseBaseVertexBaseInstance) + ? basevertices[draw_id] + : 0; + GLint baseinstance = + (option == DrawElementsOption::UseBaseVertexBaseInstance) + ? baseinstances[draw_id] + : 0; if (count < 0) { LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "count < 0"); @@ -11455,18 +11573,19 @@ ALWAYS_INLINE bool GLES2DecoderImpl::CheckMultiDrawElementsVertices( return false; } - if (!IsDrawValid(function_name, max_vertex_accessed, instanced, - primcount)) { + if (!IsDrawValid(function_name, max_vertex_accessed, instanced, primcount, + basevertex, baseinstance)) { return false; } *total_max_vertex_accessed = - std::max(*total_max_vertex_accessed, max_vertex_accessed); + std::max(*total_max_vertex_accessed, max_vertex_accessed + basevertex); *total_max_primcount = std::max(*total_max_primcount, primcount); } return true; } +template <GLES2DecoderImpl::DrawElementsOption option> ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements( const char* function_name, bool instanced, @@ -11475,7 +11594,14 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements( GLenum type, const int32_t* offsets, const GLsizei* primcounts, + const GLint* basevertices, + const GLuint* baseinstances, GLsizei drawcount) { + if (option == DrawElementsOption::Default) { + DCHECK_EQ(basevertices, nullptr); + DCHECK_EQ(baseinstances, nullptr); + } + error::Error error = WillAccessBoundFramebufferForDraw(); if (error != error::kNoError) return error; @@ -11515,10 +11641,10 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements( GLuint total_max_vertex_accessed = 0; GLsizei total_max_primcount = 0; - if (!CheckMultiDrawElementsVertices( + if (!CheckMultiDrawElementsVertices<option>( function_name, instanced, counts, type, offsets, primcounts, - drawcount, element_array_buffer, &total_max_vertex_accessed, - &total_max_primcount)) { + basevertices, baseinstances, drawcount, element_array_buffer, + &total_max_vertex_accessed, &total_max_primcount)) { return error::kNoError; } @@ -11548,6 +11674,9 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements( return error::kNoError; } bool simulated_fixed_attribs = false; + // The branch with fixed attrib is not meant to be used + // normally But just to pass OpenGL ES 2 conformance where there's no + // basevertex and baseinstance support. if (SimulateFixedAttribs(function_name, total_max_vertex_accessed, &simulated_fixed_attribs, total_max_primcount)) { bool textures_set; @@ -11572,6 +11701,10 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements( } GLint draw_id_location = state_.current_program->draw_id_uniform_location(); + GLint base_vertex_location = + state_.current_program->base_vertex_uniform_location(); + GLint base_instance_location = + state_.current_program->base_instance_uniform_location(); for (GLsizei draw_id = 0; draw_id < drawcount; ++draw_id) { GLsizei count = counts[draw_id]; GLsizei offset = offsets[draw_id]; @@ -11589,8 +11722,21 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements( if (!instanced) { api()->glDrawElementsFn(mode, count, type, indices); } else { - api()->glDrawElementsInstancedANGLEFn(mode, count, type, indices, - primcount); + if (option == DrawElementsOption::Default) { + api()->glDrawElementsInstancedANGLEFn(mode, count, type, indices, + primcount); + } else { + GLint basevertex = basevertices[draw_id]; + GLuint baseinstance = baseinstances[draw_id]; + if (base_vertex_location >= 0) { + api()->glUniform1iFn(base_vertex_location, basevertex); + } + if (base_instance_location >= 0) { + api()->glUniform1iFn(base_instance_location, baseinstance); + } + api()->glDrawElementsInstancedBaseVertexBaseInstanceANGLEFn( + mode, count, type, indices, primcount, basevertex, baseinstance); + } } } if (state_.enable_flags.primitive_restart_fixed_index && @@ -11607,6 +11753,16 @@ ALWAYS_INLINE error::Error GLES2DecoderImpl::DoMultiDrawElements( if (simulated_fixed_attribs) { RestoreStateForSimulatedFixedAttribs(); } + // only reset base vertex and base instance shader variable when it's + // possibly non-zero + if (option == DrawElementsOption::UseBaseVertexBaseInstance) { + if (base_vertex_location >= 0) { + api()->glUniform1iFn(base_vertex_location, 0); + } + if (base_instance_location >= 0) { + api()->glUniform1iFn(base_instance_location, 0); + } + } } if (simulated_attrib_0) { // We don't have to restore attrib 0 generic data at the end of this @@ -11625,9 +11781,9 @@ error::Error GLES2DecoderImpl::HandleDrawElements( *static_cast<const volatile gles2::cmds::DrawElements*>(cmd_data); GLsizei count = static_cast<GLsizei>(c.count); int32_t offset = static_cast<int32_t>(c.index_offset); - return DoMultiDrawElements("glDrawElements", false, - static_cast<GLenum>(c.mode), &count, - static_cast<GLenum>(c.type), &offset, nullptr, 1); + return DoMultiDrawElements<DrawElementsOption::Default>( + "glDrawElements", false, static_cast<GLenum>(c.mode), &count, + static_cast<GLenum>(c.type), &offset, nullptr, nullptr, nullptr, 1); } error::Error GLES2DecoderImpl::HandleDrawElementsInstancedANGLE( @@ -11642,9 +11798,32 @@ error::Error GLES2DecoderImpl::HandleDrawElementsInstancedANGLE( GLsizei count = static_cast<GLsizei>(c.count); int32_t offset = static_cast<int32_t>(c.index_offset); GLsizei primcount = static_cast<GLsizei>(c.primcount); - return DoMultiDrawElements( + + return DoMultiDrawElements<DrawElementsOption::Default>( "glDrawElementsInstancedANGLE", true, static_cast<GLenum>(c.mode), &count, - static_cast<GLenum>(c.type), &offset, &primcount, 1); + static_cast<GLenum>(c.type), &offset, &primcount, nullptr, nullptr, 1); +} + +error::Error +GLES2DecoderImpl::HandleDrawElementsInstancedBaseVertexBaseInstanceANGLE( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::DrawElementsInstancedBaseVertexBaseInstanceANGLE& + c = *static_cast<const volatile gles2::cmds:: + DrawElementsInstancedBaseVertexBaseInstanceANGLE*>( + cmd_data); + if (!features().angle_instanced_arrays) + return error::kUnknownCommand; + + GLsizei count = static_cast<GLsizei>(c.count); + int32_t offset = static_cast<int32_t>(c.index_offset); + GLsizei primcount = static_cast<GLsizei>(c.primcount); + GLint basevertex = static_cast<GLsizei>(c.basevertex); + GLuint baseinstance = static_cast<GLsizei>(c.baseinstance); + return DoMultiDrawElements<DrawElementsOption::UseBaseVertexBaseInstance>( + "glDrawElementsInstancedBaseVertexBaseInstanceANGLE", true, + static_cast<GLenum>(c.mode), &count, static_cast<GLenum>(c.type), &offset, + &primcount, &basevertex, &baseinstance, 1); } void GLES2DecoderImpl::DoMultiDrawBeginCHROMIUM(GLsizei drawcount) { @@ -11663,25 +11842,42 @@ void GLES2DecoderImpl::DoMultiDrawEndCHROMIUM() { } switch (result.draw_function) { case MultiDrawManager::DrawFunction::DrawArrays: - DoMultiDrawArrays("glMultiDrawArraysWEBGL", false, result.mode, - result.firsts.data(), result.counts.data(), nullptr, - result.drawcount); + DoMultiDrawArrays<DrawArraysOption::Default>( + "glMultiDrawArraysWEBGL", false, result.mode, result.firsts.data(), + result.counts.data(), nullptr, nullptr, result.drawcount); break; case MultiDrawManager::DrawFunction::DrawArraysInstanced: - DoMultiDrawArrays("glMultiDrawArraysInstancedWEBGL", true, result.mode, - result.firsts.data(), result.counts.data(), - result.instance_counts.data(), result.drawcount); + DoMultiDrawArrays<DrawArraysOption::Default>( + "glMultiDrawArraysInstancedWEBGL", true, result.mode, + result.firsts.data(), result.counts.data(), + result.instance_counts.data(), nullptr, result.drawcount); + break; + case MultiDrawManager::DrawFunction::DrawArraysInstancedBaseInstance: + DoMultiDrawArrays<DrawArraysOption::UseBaseInstance>( + "glMultiDrawArraysInstancedBaseInstanceWEBGL", true, result.mode, + result.firsts.data(), result.counts.data(), + result.instance_counts.data(), result.baseinstances.data(), + result.drawcount); break; case MultiDrawManager::DrawFunction::DrawElements: - DoMultiDrawElements("glMultiDrawElementsWEBGL", false, result.mode, - result.counts.data(), result.type, - result.offsets.data(), nullptr, result.drawcount); + DoMultiDrawElements<DrawElementsOption::Default>( + "glMultiDrawElementsWEBGL", false, result.mode, result.counts.data(), + result.type, result.offsets.data(), nullptr, nullptr, nullptr, + result.drawcount); break; case MultiDrawManager::DrawFunction::DrawElementsInstanced: - DoMultiDrawElements("glMultiDrawElementsInstancedWEBGL", true, - result.mode, result.counts.data(), result.type, - result.offsets.data(), result.instance_counts.data(), - result.drawcount); + DoMultiDrawElements<DrawElementsOption::Default>( + "glMultiDrawElementsInstancedWEBGL", true, result.mode, + result.counts.data(), result.type, result.offsets.data(), + result.instance_counts.data(), nullptr, nullptr, result.drawcount); + break; + case MultiDrawManager::DrawFunction:: + DrawElementsInstancedBaseVertexBaseInstance: + DoMultiDrawElements<DrawElementsOption::UseBaseVertexBaseInstance>( + "glMultiDrawElementsInstancedBaseVertexBaseInstanceWEBGL", true, + result.mode, result.counts.data(), result.type, result.offsets.data(), + result.instance_counts.data(), result.basevertices.data(), + result.baseinstances.data(), result.drawcount); break; default: NOTREACHED(); @@ -11734,7 +11930,7 @@ error::Error GLES2DecoderImpl::HandleMultiDrawArraysInstancedCHROMIUM( *static_cast< const volatile gles2::cmds::MultiDrawArraysInstancedCHROMIUM*>( cmd_data); - if (!features().webgl_multi_draw_instanced) { + if (!features().webgl_multi_draw) { return error::kUnknownCommand; } @@ -11775,6 +11971,63 @@ error::Error GLES2DecoderImpl::HandleMultiDrawArraysInstancedCHROMIUM( return error::kNoError; } +error::Error +GLES2DecoderImpl::HandleMultiDrawArraysInstancedBaseInstanceCHROMIUM( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::MultiDrawArraysInstancedBaseInstanceCHROMIUM& c = + *static_cast<const volatile gles2::cmds:: + MultiDrawArraysInstancedBaseInstanceCHROMIUM*>(cmd_data); + if (!features().webgl_multi_draw_instanced_base_vertex_base_instance) { + return error::kUnknownCommand; + } + + GLenum mode = static_cast<GLenum>(c.mode); + GLsizei drawcount = static_cast<GLsizei>(c.drawcount); + + uint32_t firsts_size, counts_size, instance_counts_size, baseinstances_size; + base::CheckedNumeric<uint32_t> checked_size(drawcount); + if (!(checked_size * sizeof(GLint)).AssignIfValid(&firsts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&instance_counts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLuint)).AssignIfValid(&baseinstances_size)) { + return error::kOutOfBounds; + } + const GLint* firsts = GetSharedMemoryAs<const GLint*>( + c.firsts_shm_id, c.firsts_shm_offset, firsts_size); + const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>( + c.counts_shm_id, c.counts_shm_offset, counts_size); + const GLsizei* instance_counts = GetSharedMemoryAs<const GLsizei*>( + c.instance_counts_shm_id, c.instance_counts_shm_offset, + instance_counts_size); + const GLuint* baseinstances_counts = GetSharedMemoryAs<const GLuint*>( + c.baseinstances_shm_id, c.baseinstances_shm_offset, baseinstances_size); + if (firsts == nullptr) { + return error::kOutOfBounds; + } + if (counts == nullptr) { + return error::kOutOfBounds; + } + if (instance_counts == nullptr) { + return error::kOutOfBounds; + } + if (baseinstances_counts == nullptr) { + return error::kOutOfBounds; + } + if (!multi_draw_manager_->MultiDrawArraysInstancedBaseInstance( + mode, firsts, counts, instance_counts, baseinstances_counts, + drawcount)) { + return error::kInvalidArguments; + } + return error::kNoError; +} + error::Error GLES2DecoderImpl::HandleMultiDrawElementsCHROMIUM( uint32_t immediate_data_size, const volatile void* cmd_data) { @@ -11821,7 +12074,7 @@ error::Error GLES2DecoderImpl::HandleMultiDrawElementsInstancedCHROMIUM( *static_cast< const volatile gles2::cmds::MultiDrawElementsInstancedCHROMIUM*>( cmd_data); - if (!features().webgl_multi_draw_instanced) { + if (!features().webgl_multi_draw) { return error::kUnknownCommand; } @@ -11863,6 +12116,76 @@ error::Error GLES2DecoderImpl::HandleMultiDrawElementsInstancedCHROMIUM( return error::kNoError; } +error::Error GLES2DecoderImpl:: + HandleMultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds:: + MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM& c = + *static_cast< + const volatile gles2::cmds:: + MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM*>( + cmd_data); + if (!features().webgl_multi_draw_instanced_base_vertex_base_instance) { + return error::kUnknownCommand; + } + + GLenum mode = static_cast<GLenum>(c.mode); + GLenum type = static_cast<GLenum>(c.type); + GLsizei drawcount = static_cast<GLsizei>(c.drawcount); + + uint32_t counts_size, offsets_size, instance_counts_size, basevertices_size, + baseinstances_size; + base::CheckedNumeric<uint32_t> checked_size(drawcount); + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&offsets_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&instance_counts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLint)).AssignIfValid(&basevertices_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLuint)).AssignIfValid(&baseinstances_size)) { + return error::kOutOfBounds; + } + const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>( + c.counts_shm_id, c.counts_shm_offset, counts_size); + const GLsizei* offsets = GetSharedMemoryAs<const GLsizei*>( + c.offsets_shm_id, c.offsets_shm_offset, offsets_size); + const GLsizei* instance_counts = GetSharedMemoryAs<const GLsizei*>( + c.instance_counts_shm_id, c.instance_counts_shm_offset, + instance_counts_size); + const GLint* basevertices = GetSharedMemoryAs<const GLint*>( + c.basevertices_shm_id, c.basevertices_shm_offset, basevertices_size); + const GLuint* baseinstances = GetSharedMemoryAs<const GLuint*>( + c.baseinstances_shm_id, c.baseinstances_shm_offset, baseinstances_size); + if (counts == nullptr) { + return error::kOutOfBounds; + } + if (offsets == nullptr) { + return error::kOutOfBounds; + } + if (instance_counts == nullptr) { + return error::kOutOfBounds; + } + if (basevertices == nullptr) { + return error::kOutOfBounds; + } + if (baseinstances == nullptr) { + return error::kOutOfBounds; + } + if (!multi_draw_manager_->MultiDrawElementsInstancedBaseVertexBaseInstance( + mode, counts, type, offsets, instance_counts, basevertices, + baseinstances, drawcount)) { + return error::kInvalidArguments; + } + return error::kNoError; +} + GLuint GLES2DecoderImpl::DoGetMaxValueInBufferCHROMIUM( GLuint buffer_id, GLsizei count, GLenum type, GLuint offset) { GLuint max_vertex_accessed = 0; @@ -13898,6 +14221,8 @@ error::Error GLES2DecoderImpl::HandleGetString(uint32_t immediate_data_size, if (feature_info_->IsWebGLContext()) { if (!derivatives_explicitly_enabled_) extension_set.erase(kOESDerivativeExtension); + if (!fbo_render_mipmap_explicitly_enabled_) + extension_set.erase(kOESFboRenderMipmapExtension); if (!frag_depth_explicitly_enabled_) extension_set.erase(kEXTFragDepthExtension); if (!draw_buffers_explicitly_enabled_) @@ -13906,8 +14231,12 @@ error::Error GLES2DecoderImpl::HandleGetString(uint32_t immediate_data_size, extension_set.erase(kEXTShaderTextureLodExtension); if (!multi_draw_explicitly_enabled_) extension_set.erase(kWEBGLMultiDrawExtension); - if (!multi_draw_instanced_explicitly_enabled_) - extension_set.erase(kWEBGLMultiDrawInstancedExtension); + if (!draw_instanced_base_vertex_base_instance_explicitly_enabled_) + extension_set.erase( + kWEBGLDrawInstancedBaseVertexBaseInstanceExtension); + if (!multi_draw_instanced_base_vertex_base_instance_explicitly_enabled_) + extension_set.erase( + kWEBGLMultiDrawInstancedBaseVertexBaseInstanceExtension); } if (supports_post_sub_buffer_) extension_set.insert("GL_CHROMIUM_post_sub_buffer"); @@ -16793,39 +17122,60 @@ error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM( feature_str = feature_str + " "; bool desire_standard_derivatives = false; + bool desire_fbo_render_mipmap = false; bool desire_frag_depth = false; bool desire_draw_buffers = false; bool desire_shader_texture_lod = false; bool desire_multi_draw = false; - bool desire_multi_draw_instanced = false; + bool desire_draw_instanced_base_vertex_base_instance = false; + bool desire_multi_draw_instanced_base_vertex_base_instance = false; if (feature_info_->context_type() == CONTEXT_TYPE_WEBGL1) { desire_standard_derivatives = feature_str.find("GL_OES_standard_derivatives ") != std::string::npos; + desire_fbo_render_mipmap = + feature_str.find("GL_OES_fbo_render_mipmap ") != std::string::npos; desire_frag_depth = feature_str.find("GL_EXT_frag_depth ") != std::string::npos; desire_draw_buffers = feature_str.find("GL_EXT_draw_buffers ") != std::string::npos; desire_shader_texture_lod = feature_str.find("GL_EXT_shader_texture_lod ") != std::string::npos; + } else if (feature_info_->context_type() == CONTEXT_TYPE_WEBGL2) { + desire_draw_instanced_base_vertex_base_instance = + feature_str.find( + "GL_WEBGL_draw_instanced_base_vertex_base_instance ") != + std::string::npos; + ; + desire_multi_draw_instanced_base_vertex_base_instance = + feature_str.find( + "GL_WEBGL_multi_draw_instanced_base_vertex_base_instance ") != + std::string::npos; + ; } if (feature_info_->IsWebGLContext()) { desire_multi_draw = feature_str.find("GL_WEBGL_multi_draw ") != std::string::npos; - desire_multi_draw_instanced = - feature_str.find("GL_WEBGL_multi_draw_instanced ") != std::string::npos; } if (desire_standard_derivatives != derivatives_explicitly_enabled_ || + desire_fbo_render_mipmap != fbo_render_mipmap_explicitly_enabled_ || desire_frag_depth != frag_depth_explicitly_enabled_ || desire_draw_buffers != draw_buffers_explicitly_enabled_ || desire_shader_texture_lod != shader_texture_lod_explicitly_enabled_ || desire_multi_draw != multi_draw_explicitly_enabled_ || - desire_multi_draw_instanced != multi_draw_instanced_explicitly_enabled_) { + desire_draw_instanced_base_vertex_base_instance != + draw_instanced_base_vertex_base_instance_explicitly_enabled_ || + desire_multi_draw_instanced_base_vertex_base_instance != + multi_draw_instanced_base_vertex_base_instance_explicitly_enabled_) { derivatives_explicitly_enabled_ |= desire_standard_derivatives; + fbo_render_mipmap_explicitly_enabled_ |= desire_fbo_render_mipmap; frag_depth_explicitly_enabled_ |= desire_frag_depth; draw_buffers_explicitly_enabled_ |= desire_draw_buffers; shader_texture_lod_explicitly_enabled_ |= desire_shader_texture_lod; multi_draw_explicitly_enabled_ |= desire_multi_draw; - multi_draw_instanced_explicitly_enabled_ |= desire_multi_draw_instanced; + draw_instanced_base_vertex_base_instance_explicitly_enabled_ |= + desire_draw_instanced_base_vertex_base_instance; + multi_draw_instanced_base_vertex_base_instance_explicitly_enabled_ |= + desire_multi_draw_instanced_base_vertex_base_instance; DestroyShaderTranslator(); } @@ -16858,6 +17208,9 @@ error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM( if (feature_str.find("GL_EXT_float_blend ") != std::string::npos) { feature_info_->EnableEXTFloatBlend(); } + if (feature_str.find("GL_OES_fbo_render_mipmap ") != std::string::npos) { + feature_info_->EnableOESFboRenderMipmap(); + } UpdateCapabilities(); @@ -18610,63 +18963,6 @@ void GLES2DecoderImpl::DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id) { texture_ref->EndAccessSharedImage(); } -void GLES2DecoderImpl::DoApplyScreenSpaceAntialiasingCHROMIUM() { - Framebuffer* bound_framebuffer = - GetFramebufferInfoForTarget(GL_DRAW_FRAMEBUFFER); - // TODO(dshwang): support it even after glBindFrameBuffer(GL_FRAMEBUFFER, 0). - // skia will need to render to the window. crbug.com/656618 - if (!bound_framebuffer) { - LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, - "glApplyScreenSpaceAntialiasingCHROMIUM", - "no bound framebuffer object"); - return; - } - - // Apply CMAA(Conservative Morphological Anti-Aliasing) algorithm to the - // color attachments of currently bound draw framebuffer. - // Reference GL_INTEL_framebuffer_CMAA for details. - // Use platform version if available. - if (!feature_info_->feature_flags() - .use_chromium_screen_space_antialiasing_via_shaders) { - api()->glApplyFramebufferAttachmentCMAAINTELFn(); - } else { - // Defer initializing the CopyTextureCHROMIUMResourceManager until it is - // needed because it takes ??s of milliseconds to initialize. - if (!apply_framebuffer_attachment_cmaa_intel_.get()) { - LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER( - "glApplyFramebufferAttachmentCMAAINTEL"); - apply_framebuffer_attachment_cmaa_intel_.reset( - new ApplyFramebufferAttachmentCMAAINTELResourceManager()); - apply_framebuffer_attachment_cmaa_intel_->Initialize(this); - if (LOCAL_PEEK_GL_ERROR("glApplyFramebufferAttachmentCMAAINTEL") != - GL_NO_ERROR) - return; - } - static const char kFunctionName[] = - "glApplyScreenSpaceAntialiasingCHROMIUM"; - if (!InitializeCopyTextureCHROMIUM(kFunctionName)) - return; - for (uint32_t i = 0; i < group_->max_draw_buffers(); ++i) { - const Framebuffer::Attachment* attachment = - bound_framebuffer->GetAttachment(GL_COLOR_ATTACHMENT0 + i); - if (attachment && attachment->IsTextureAttachment()) { - GLenum internal_format = attachment->internal_format(); - if (!CanUseCopyTextureCHROMIUMInternalFormat(internal_format)) { - LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName, - "Apply CMAA on framebuffer with attachment in " - "invalid internalformat."); - return; - } - } - } - - apply_framebuffer_attachment_cmaa_intel_ - ->ApplyFramebufferAttachmentCMAAINTEL(this, bound_framebuffer, - copy_texture_chromium_.get(), - texture_manager()); - } -} - void GLES2DecoderImpl::DoInsertEventMarkerEXT( GLsizei length, const GLchar* marker) { if (!marker) { diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h index c2e0e055615..f4c3014d1a5 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h @@ -53,8 +53,10 @@ class ShaderTranslatorInterface; class TransformFeedbackManager; class VertexArrayManager; -struct DisallowedFeatures { - DisallowedFeatures() = default; +struct GPU_GLES2_EXPORT DisallowedFeatures { + DisallowedFeatures(); + ~DisallowedFeatures(); + DisallowedFeatures(const DisallowedFeatures&); void AllowExtensions() { chromium_color_buffer_float_rgba = false; @@ -65,6 +67,7 @@ struct DisallowedFeatures { oes_texture_float_linear = false; oes_texture_half_float_linear = false; ext_float_blend = false; + oes_fbo_render_mipmap = false; } bool operator==(const DisallowedFeatures& other) const { @@ -80,6 +83,7 @@ struct DisallowedFeatures { bool oes_texture_float_linear = false; bool oes_texture_half_float_linear = false; bool ext_float_blend = false; + bool oes_fbo_render_mipmap = false; }; // This class implements the DecoderContext interface, decoding GLES2 diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h index 47e756ea961..7cccbf52378 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h @@ -5407,17 +5407,6 @@ error::Error GLES2DecoderImpl::HandleBlendBarrierKHR( return error::kNoError; } -error::Error GLES2DecoderImpl::HandleApplyScreenSpaceAntialiasingCHROMIUM( - uint32_t immediate_data_size, - const volatile void* cmd_data) { - if (!features().chromium_screen_space_antialiasing) { - return error::kUnknownCommand; - } - - DoApplyScreenSpaceAntialiasingCHROMIUM(); - return error::kNoError; -} - error::Error GLES2DecoderImpl::HandleUniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate( uint32_t immediate_data_size, diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc index ad87416a91c..e3afefb6d2f 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc @@ -24,6 +24,7 @@ #include "gpu/command_buffer/service/program_cache.h" #include "gpu/command_buffer/service/shared_image_representation.h" #include "ui/gl/gl_version_info.h" +#include "ui/gl/progress_reporter.h" #if defined(OS_WIN) #include "gpu/command_buffer/service/shared_image_backing_factory_d3d.h" @@ -176,32 +177,49 @@ bool PassthroughResources::HasTexturesPendingDestruction() const { return !textures_pending_destruction.empty(); } -void PassthroughResources::Destroy(gl::GLApi* api) { +void PassthroughResources::Destroy(gl::GLApi* api, + gl::ProgressReporter* progress_reporter) { bool have_context = !!api; // Only delete textures that are not referenced by a TexturePassthrough // object, they handle their own deletion once all references are lost - DeleteServiceObjects(&texture_id_map, have_context, - [this, api](GLuint client_id, GLuint texture) { - if (!texture_object_map.HasClientID(client_id)) { - api->glDeleteTexturesFn(1, &texture); - } - }); - DeleteServiceObjects(&buffer_id_map, have_context, - [api](GLuint client_id, GLuint buffer) { - api->glDeleteBuffersARBFn(1, &buffer); - }); - DeleteServiceObjects(&renderbuffer_id_map, have_context, - [api](GLuint client_id, GLuint renderbuffer) { - api->glDeleteRenderbuffersEXTFn(1, &renderbuffer); - }); + DeleteServiceObjects( + &texture_id_map, have_context, + [this, api, progress_reporter](GLuint client_id, GLuint texture) { + if (!texture_object_map.HasClientID(client_id)) { + api->glDeleteTexturesFn(1, &texture); + if (progress_reporter) { + progress_reporter->ReportProgress(); + } + } + }); + DeleteServiceObjects( + &buffer_id_map, have_context, + [api, progress_reporter](GLuint client_id, GLuint buffer) { + api->glDeleteBuffersARBFn(1, &buffer); + if (progress_reporter) { + progress_reporter->ReportProgress(); + } + }); + DeleteServiceObjects( + &renderbuffer_id_map, have_context, + [api, progress_reporter](GLuint client_id, GLuint renderbuffer) { + api->glDeleteRenderbuffersEXTFn(1, &renderbuffer); + if (progress_reporter) { + progress_reporter->ReportProgress(); + } + }); DeleteServiceObjects(&sampler_id_map, have_context, [api](GLuint client_id, GLuint sampler) { api->glDeleteSamplersFn(1, &sampler); }); - DeleteServiceObjects(&program_id_map, have_context, - [api](GLuint client_id, GLuint program) { - api->glDeleteProgramFn(program); - }); + DeleteServiceObjects( + &program_id_map, have_context, + [api, progress_reporter](GLuint client_id, GLuint program) { + api->glDeleteProgramFn(program); + if (progress_reporter) { + progress_reporter->ReportProgress(); + } + }); DeleteServiceObjects(&shader_id_map, have_context, [api](GLuint client_id, GLuint shader) { api->glDeleteShaderFn(shader); @@ -772,6 +790,10 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize( "GL_CHROMIUM_bind_uniform_location", "GL_CHROMIUM_sync_query", "GL_EXT_debug_marker", + "GL_EXT_memory_object", + "GL_EXT_memory_object_fd", + "GL_EXT_semaphore", + "GL_EXT_semaphore_fd", "GL_KHR_debug", "GL_NV_fence", "GL_OES_EGL_image", diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h index 319833e0b8e..e782addd70e 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h @@ -38,6 +38,7 @@ namespace gl { class GLFence; +class ProgressReporter; } namespace gpu { @@ -64,7 +65,7 @@ struct PassthroughResources { ~PassthroughResources(); // api is null if we don't have a context (e.g. lost). - void Destroy(gl::GLApi* api); + void Destroy(gl::GLApi* api, gl::ProgressReporter* progress_reporter); // Resources stores a shared list of textures pending deletion. // If we have don't context when this function is called, we can mark diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h index 3e6179a51a8..2f790cbf1fd 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h @@ -857,11 +857,24 @@ error::Error DoDrawArraysInstancedANGLE(GLenum mode, GLint first, GLsizei count, GLsizei primcount); +error::Error DoDrawArraysInstancedBaseInstanceANGLE(GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance); error::Error DoDrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, const void* indices, GLsizei primcount); +error::Error DoDrawElementsInstancedBaseVertexBaseInstanceANGLE( + GLenum mode, + GLsizei count, + GLenum type, + const void* indices, + GLsizei primcount, + GLint basevertices, + GLuint baseinstances); error::Error DoVertexAttribDivisorANGLE(GLuint index, GLuint divisor); error::Error DoProduceTextureDirectCHROMIUM(GLuint texture_client_id, const volatile GLbyte* mailbox); @@ -1050,7 +1063,6 @@ error::Error DoProgramPathFragmentInputGenCHROMIUM(GLuint program, GLsizei coeffsBufsize); error::Error DoCoverageModulationCHROMIUM(GLenum components); error::Error DoBlendBarrierKHR(); -error::Error DoApplyScreenSpaceAntialiasingCHROMIUM(); error::Error DoBindFragDataLocationIndexedEXT(GLuint program, GLuint colorNumber, GLuint index, diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc index e76e67405df..c6891934ba9 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc @@ -2332,6 +2332,12 @@ error::Error GLES2DecoderPassthroughImpl::DoMultiDrawEndCHROMIUM() { result.mode, result.firsts.data(), result.counts.data(), result.instance_counts.data(), result.drawcount); return error::kNoError; + case MultiDrawManager::DrawFunction::DrawArraysInstancedBaseInstance: + api()->glMultiDrawArraysInstancedBaseInstanceANGLEFn( + result.mode, result.firsts.data(), result.counts.data(), + result.instance_counts.data(), result.baseinstances.data(), + result.drawcount); + return error::kNoError; case MultiDrawManager::DrawFunction::DrawElements: api()->glMultiDrawElementsANGLEFn(result.mode, result.counts.data(), result.type, result.indices.data(), @@ -2342,6 +2348,13 @@ error::Error GLES2DecoderPassthroughImpl::DoMultiDrawEndCHROMIUM() { result.mode, result.counts.data(), result.type, result.indices.data(), result.instance_counts.data(), result.drawcount); return error::kNoError; + case MultiDrawManager::DrawFunction:: + DrawElementsInstancedBaseVertexBaseInstance: + api()->glMultiDrawElementsInstancedBaseVertexBaseInstanceANGLEFn( + result.mode, result.counts.data(), result.type, result.indices.data(), + result.instance_counts.data(), result.basevertices.data(), + result.baseinstances.data(), result.drawcount); + return error::kNoError; default: NOTREACHED(); return error::kLostContext; @@ -3367,10 +3380,19 @@ error::Error GLES2DecoderPassthroughImpl::DoTexStorage2DImageCHROMIUM( error::Error GLES2DecoderPassthroughImpl::DoGenQueriesEXT( GLsizei n, volatile GLuint* queries) { - return GenHelper(n, queries, &query_id_map_, - [this](GLsizei n, GLuint* queries) { - api()->glGenQueriesFn(n, queries); - }); + return GenHelper( + n, queries, &query_id_map_, [this](GLsizei n, GLuint* queries) { + if (feature_info_->feature_flags().occlusion_query_boolean) { + // glGenQueries is not loaded unless GL_EXT_occlusion_query_boolean is + // present. All queries must be emulated so they don't need to be + // generated. + api()->glGenQueriesFn(n, queries); + } else { + for (GLsizei i = 0; i < n; i++) { + queries[i] = 0; + } + } + }); } error::Error GLES2DecoderPassthroughImpl::DoDeleteQueriesEXT( @@ -3406,10 +3428,16 @@ error::Error GLES2DecoderPassthroughImpl::DoDeleteQueriesEXT( RemovePendingQuery(query_service_id); } - return DeleteHelper(queries_copy.size(), queries_copy.data(), &query_id_map_, - [this](GLsizei n, GLuint* queries) { - api()->glDeleteQueriesFn(n, queries); - }); + return DeleteHelper( + queries_copy.size(), queries_copy.data(), &query_id_map_, + [this](GLsizei n, GLuint* queries) { + if (feature_info_->feature_flags().occlusion_query_boolean) { + // glDeleteQueries is not loaded unless GL_EXT_occlusion_query_boolean + // is present. All queries must be emulated so they don't need to be + // deleted. + api()->glDeleteQueriesFn(n, queries); + } + }); } error::Error GLES2DecoderPassthroughImpl::DoQueryCounterEXT( @@ -3428,14 +3456,25 @@ error::Error GLES2DecoderPassthroughImpl::DoQueryCounterEXT( GLuint service_id = GetQueryServiceID(id, &query_id_map_); - // Flush all previous errors - CheckErrorCallbackState(); + if (IsEmulatedQueryTarget(target)) { + DCHECK_EQ(target, + static_cast<GLenum>(GL_COMMANDS_ISSUED_TIMESTAMP_CHROMIUM)); + } else { + // glQueryCounter is not loaded unless GL_EXT_disjoint_timer_query is present + if (!feature_info_->feature_flags().ext_disjoint_timer_query) { + InsertError(GL_INVALID_ENUM, "Invalid query target."); + return error::kNoError; + } - api()->glQueryCounterFn(service_id, target); + // Flush all previous errors + CheckErrorCallbackState(); - // Check if a new error was generated - if (CheckErrorCallbackState()) { - return error::kNoError; + api()->glQueryCounterFn(service_id, target); + + // Check if a new error was generated + if (CheckErrorCallbackState()) { + return error::kNoError; + } } QueryInfo* query_info = &query_info_map_[service_id]; @@ -3494,6 +3533,13 @@ error::Error GLES2DecoderPassthroughImpl::DoBeginQueryEXT( return error::kNoError; } } else { + // glBeginQuery is not loaded unless GL_EXT_occlusion_query_boolean is + // present + if (!feature_info_->feature_flags().occlusion_query_boolean) { + InsertError(GL_INVALID_ENUM, "Invalid query target."); + return error::kNoError; + } + // Flush all previous errors CheckErrorCallbackState(); @@ -3543,6 +3589,12 @@ error::Error GLES2DecoderPassthroughImpl::DoEndQueryEXT(GLenum target, query_service_id); } } else { + // glEndQuery is not loaded unless GL_EXT_occlusion_query_boolean is present + if (!feature_info_->feature_flags().occlusion_query_boolean) { + InsertError(GL_INVALID_ENUM, "Invalid query target."); + return error::kNoError; + } + // Flush all previous errors CheckErrorCallbackState(); @@ -4404,6 +4456,19 @@ error::Error GLES2DecoderPassthroughImpl::DoDrawArraysInstancedANGLE( return error::kNoError; } +error::Error +GLES2DecoderPassthroughImpl::DoDrawArraysInstancedBaseInstanceANGLE( + GLenum mode, + GLint first, + GLsizei count, + GLsizei primcount, + GLuint baseinstance) { + BindPendingImagesForSamplersIfNeeded(); + api()->glDrawArraysInstancedBaseInstanceANGLEFn(mode, first, count, primcount, + baseinstance); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::DoDrawElementsInstancedANGLE( GLenum mode, GLsizei count, @@ -4415,6 +4480,21 @@ error::Error GLES2DecoderPassthroughImpl::DoDrawElementsInstancedANGLE( return error::kNoError; } +error::Error +GLES2DecoderPassthroughImpl::DoDrawElementsInstancedBaseVertexBaseInstanceANGLE( + GLenum mode, + GLsizei count, + GLenum type, + const void* indices, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) { + BindPendingImagesForSamplersIfNeeded(); + api()->glDrawElementsInstancedBaseVertexBaseInstanceANGLEFn( + mode, count, type, indices, primcount, basevertex, baseinstance); + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::DoVertexAttribDivisorANGLE( GLuint index, GLuint divisor) { @@ -5161,12 +5241,6 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendBarrierKHR() { return error::kNoError; } -error::Error -GLES2DecoderPassthroughImpl::DoApplyScreenSpaceAntialiasingCHROMIUM() { - NOTIMPLEMENTED(); - return error::kNoError; -} - error::Error GLES2DecoderPassthroughImpl::DoBindFragDataLocationIndexedEXT( GLuint program, GLuint colorNumber, diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc index d1600cdaf68..f9761ec8bc5 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc @@ -1778,6 +1778,27 @@ error::Error GLES2DecoderPassthroughImpl::HandleDrawArraysInstancedANGLE( return DoDrawArraysInstancedANGLE(mode, first, count, primcount); } +error::Error +GLES2DecoderPassthroughImpl::HandleDrawArraysInstancedBaseInstanceANGLE( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + if (!features().webgl_draw_instanced_base_vertex_base_instance) { + return error::kUnknownCommand; + } + const volatile gles2::cmds::DrawArraysInstancedBaseInstanceANGLE& c = + *static_cast< + const volatile gles2::cmds::DrawArraysInstancedBaseInstanceANGLE*>( + cmd_data); + GLenum mode = static_cast<GLenum>(c.mode); + GLint first = static_cast<GLint>(c.first); + GLsizei count = static_cast<GLsizei>(c.count); + GLsizei primcount = static_cast<GLsizei>(c.primcount); + GLuint baseinstance = static_cast<GLsizei>(c.baseinstance); + + return DoDrawArraysInstancedBaseInstanceANGLE(mode, first, count, primcount, + baseinstance); +} + error::Error GLES2DecoderPassthroughImpl::HandleDrawElementsInstancedANGLE( uint32_t immediate_data_size, const volatile void* cmd_data) { @@ -1797,6 +1818,30 @@ error::Error GLES2DecoderPassthroughImpl::HandleDrawElementsInstancedANGLE( return DoDrawElementsInstancedANGLE(mode, count, type, indices, primcount); } +error::Error GLES2DecoderPassthroughImpl:: + HandleDrawElementsInstancedBaseVertexBaseInstanceANGLE( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + if (!features().webgl_draw_instanced_base_vertex_base_instance) { + return error::kUnknownCommand; + } + const volatile gles2::cmds::DrawElementsInstancedBaseVertexBaseInstanceANGLE& + c = *static_cast<const volatile gles2::cmds:: + DrawElementsInstancedBaseVertexBaseInstanceANGLE*>( + cmd_data); + GLenum mode = static_cast<GLenum>(c.mode); + GLsizei count = static_cast<GLsizei>(c.count); + GLenum type = static_cast<GLenum>(c.type); + const GLvoid* indices = + reinterpret_cast<const GLvoid*>(static_cast<uintptr_t>(c.index_offset)); + GLsizei primcount = static_cast<GLsizei>(c.primcount); + GLsizei basevertex = static_cast<GLsizei>(c.basevertex); + GLsizei baseinstance = static_cast<GLsizei>(c.baseinstance); + + return DoDrawElementsInstancedBaseVertexBaseInstanceANGLE( + mode, count, type, indices, primcount, basevertex, baseinstance); +} + error::Error GLES2DecoderPassthroughImpl::HandleMultiDrawArraysCHROMIUM( uint32_t immediate_data_size, const volatile void* cmd_data) { @@ -1842,7 +1887,7 @@ GLES2DecoderPassthroughImpl::HandleMultiDrawArraysInstancedCHROMIUM( *static_cast< const volatile gles2::cmds::MultiDrawArraysInstancedCHROMIUM*>( cmd_data); - if (!features().webgl_multi_draw_instanced) { + if (!features().webgl_multi_draw) { return error::kUnknownCommand; } @@ -1883,6 +1928,62 @@ GLES2DecoderPassthroughImpl::HandleMultiDrawArraysInstancedCHROMIUM( return error::kNoError; } +error::Error +GLES2DecoderPassthroughImpl::HandleMultiDrawArraysInstancedBaseInstanceCHROMIUM( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds::MultiDrawArraysInstancedBaseInstanceCHROMIUM& c = + *static_cast<const volatile gles2::cmds:: + MultiDrawArraysInstancedBaseInstanceCHROMIUM*>(cmd_data); + if (!features().webgl_multi_draw_instanced_base_vertex_base_instance) { + return error::kUnknownCommand; + } + + GLenum mode = static_cast<GLenum>(c.mode); + GLsizei drawcount = static_cast<GLsizei>(c.drawcount); + + uint32_t firsts_size, counts_size, instance_counts_size, baseinstances_size; + base::CheckedNumeric<uint32_t> checked_size(drawcount); + if (!(checked_size * sizeof(GLint)).AssignIfValid(&firsts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&instance_counts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLuint)).AssignIfValid(&baseinstances_size)) { + return error::kOutOfBounds; + } + const GLint* firsts = GetSharedMemoryAs<const GLint*>( + c.firsts_shm_id, c.firsts_shm_offset, firsts_size); + const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>( + c.counts_shm_id, c.counts_shm_offset, counts_size); + const GLsizei* instance_counts = GetSharedMemoryAs<const GLsizei*>( + c.instance_counts_shm_id, c.instance_counts_shm_offset, + instance_counts_size); + const GLuint* baseinstances = GetSharedMemoryAs<const GLuint*>( + c.baseinstances_shm_id, c.baseinstances_shm_offset, baseinstances_size); + if (firsts == nullptr) { + return error::kOutOfBounds; + } + if (counts == nullptr) { + return error::kOutOfBounds; + } + if (instance_counts == nullptr) { + return error::kOutOfBounds; + } + if (baseinstances == nullptr) { + return error::kOutOfBounds; + } + if (!multi_draw_manager_->MultiDrawArraysInstancedBaseInstance( + mode, firsts, counts, instance_counts, baseinstances, drawcount)) { + return error::kInvalidArguments; + } + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::HandleMultiDrawElementsCHROMIUM( uint32_t immediate_data_size, const volatile void* cmd_data) { @@ -1930,7 +2031,7 @@ GLES2DecoderPassthroughImpl::HandleMultiDrawElementsInstancedCHROMIUM( *static_cast< const volatile gles2::cmds::MultiDrawElementsInstancedCHROMIUM*>( cmd_data); - if (!features().webgl_multi_draw_instanced) { + if (!features().webgl_multi_draw) { return error::kUnknownCommand; } @@ -1972,6 +2073,70 @@ GLES2DecoderPassthroughImpl::HandleMultiDrawElementsInstancedCHROMIUM( return error::kNoError; } +error::Error GLES2DecoderPassthroughImpl:: + HandleMultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile gles2::cmds:: + MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM& c = + *static_cast< + const volatile gles2::cmds:: + MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM*>( + cmd_data); + if (!features().webgl_multi_draw_instanced_base_vertex_base_instance) { + return error::kUnknownCommand; + } + + GLenum mode = static_cast<GLenum>(c.mode); + GLenum type = static_cast<GLenum>(c.type); + GLsizei drawcount = static_cast<GLsizei>(c.drawcount); + + uint32_t counts_size, offsets_size, instance_counts_size, basevertices_size, + baseinstances_size; + base::CheckedNumeric<uint32_t> checked_size(drawcount); + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&counts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&offsets_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLsizei)).AssignIfValid(&instance_counts_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLint)).AssignIfValid(&basevertices_size)) { + return error::kOutOfBounds; + } + if (!(checked_size * sizeof(GLuint)).AssignIfValid(&baseinstances_size)) { + return error::kOutOfBounds; + } + const GLsizei* counts = GetSharedMemoryAs<const GLsizei*>( + c.counts_shm_id, c.counts_shm_offset, counts_size); + const GLsizei* offsets = GetSharedMemoryAs<const GLsizei*>( + c.offsets_shm_id, c.offsets_shm_offset, offsets_size); + const GLsizei* instance_counts = GetSharedMemoryAs<const GLsizei*>( + c.instance_counts_shm_id, c.instance_counts_shm_offset, + instance_counts_size); + const GLint* basevertices = GetSharedMemoryAs<const GLint*>( + c.basevertices_shm_id, c.basevertices_shm_offset, basevertices_size); + const GLuint* baseinstances = GetSharedMemoryAs<const GLuint*>( + c.baseinstances_shm_id, c.baseinstances_shm_offset, baseinstances_size); + if (counts == nullptr) { + return error::kOutOfBounds; + } + if (offsets == nullptr) { + return error::kOutOfBounds; + } + if (instance_counts == nullptr) { + return error::kOutOfBounds; + } + if (!multi_draw_manager_->MultiDrawElementsInstancedBaseVertexBaseInstance( + mode, counts, type, offsets, instance_counts, basevertices, + baseinstances, drawcount)) { + return error::kInvalidArguments; + } + return error::kNoError; +} + error::Error GLES2DecoderPassthroughImpl::HandleVertexAttribDivisorANGLE( uint32_t immediate_data_size, const volatile void* cmd_data) { diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc index 019dfec502a..caac9cfa69f 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc @@ -4659,21 +4659,6 @@ error::Error GLES2DecoderPassthroughImpl::HandleBlendBarrierKHR( return error::kNoError; } -error::Error -GLES2DecoderPassthroughImpl::HandleApplyScreenSpaceAntialiasingCHROMIUM( - uint32_t immediate_data_size, - const volatile void* cmd_data) { - if (!features().chromium_screen_space_antialiasing) { - return error::kUnknownCommand; - } - - error::Error error = DoApplyScreenSpaceAntialiasingCHROMIUM(); - if (error != error::kNoError) { - return error; - } - return error::kNoError; -} - error::Error GLES2DecoderPassthroughImpl:: HandleUniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate( uint32_t immediate_data_size, diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc index 248497a4e09..8288b6d3b18 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc @@ -857,18 +857,6 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>( } template <> -void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4i, 0>( - bool /* valid */) { - SetupShaderForUniform(GL_INT_VEC4); -} - -template <> -void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4fvImmediate, 0>( - bool /* valid */) { - SetupShaderForUniform(GL_FLOAT_VEC4); -} - -template <> void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>( bool /* valid */) { SetupShaderForUniform(GL_FLOAT_MAT2); diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h index cbc26cce1e3..47fd77225a4 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h @@ -1307,25 +1307,4 @@ TEST_P(GLES2DecoderTest2, Uniform4fValidArgs) { EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); EXPECT_EQ(GL_NO_ERROR, GetGLError()); } - -TEST_P(GLES2DecoderTest2, Uniform4fvImmediateValidArgs) { - cmds::Uniform4fvImmediate& cmd = *GetImmediateAs<cmds::Uniform4fvImmediate>(); - SpecializedSetup<cmds::Uniform4fvImmediate, 0>(true); - GLfloat temp[4 * 2] = { - 0, - }; - EXPECT_CALL(*gl_, Uniform4fv(1, 2, PointsToArray(temp, 4))); - cmd.Init(1, 2, &temp[0]); - EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp))); - EXPECT_EQ(GL_NO_ERROR, GetGLError()); -} - -TEST_P(GLES2DecoderTest2, Uniform4iValidArgs) { - EXPECT_CALL(*gl_, Uniform4iv(1, 1, _)); - SpecializedSetup<cmds::Uniform4i, 0>(true); - cmds::Uniform4i cmd; - cmd.Init(1, 2, 3, 4, 5); - EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); - EXPECT_EQ(GL_NO_ERROR, GetGLError()); -} #endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_ diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc index 54be1035df4..580131f038c 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc @@ -51,6 +51,18 @@ INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderTest3, ::testing::Bool()); INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest3, ::testing::Bool()); template <> +void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4fvImmediate, 0>( + bool /* valid */) { + SetupShaderForUniform(GL_FLOAT_VEC4); +} + +template <> +void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4i, 0>( + bool /* valid */) { + SetupShaderForUniform(GL_INT_VEC4); +} + +template <> void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4ivImmediate, 0>( bool /* valid */) { SetupShaderForUniform(GL_INT_VEC4); diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h index 5f729cd71e9..00161c02032 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h @@ -12,6 +12,27 @@ #ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_ #define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_ +TEST_P(GLES2DecoderTest3, Uniform4fvImmediateValidArgs) { + cmds::Uniform4fvImmediate& cmd = *GetImmediateAs<cmds::Uniform4fvImmediate>(); + SpecializedSetup<cmds::Uniform4fvImmediate, 0>(true); + GLfloat temp[4 * 2] = { + 0, + }; + EXPECT_CALL(*gl_, Uniform4fv(1, 2, PointsToArray(temp, 4))); + cmd.Init(1, 2, &temp[0]); + EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp))); + EXPECT_EQ(GL_NO_ERROR, GetGLError()); +} + +TEST_P(GLES2DecoderTest3, Uniform4iValidArgs) { + EXPECT_CALL(*gl_, Uniform4iv(1, 1, _)); + SpecializedSetup<cmds::Uniform4i, 0>(true); + cmds::Uniform4i cmd; + cmd.Init(1, 2, 3, 4, 5); + EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); + EXPECT_EQ(GL_NO_ERROR, GetGLError()); +} + TEST_P(GLES2DecoderTest3, Uniform4ivImmediateValidArgs) { cmds::Uniform4ivImmediate& cmd = *GetImmediateAs<cmds::Uniform4ivImmediate>(); SpecializedSetup<cmds::Uniform4ivImmediate, 0>(true); diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc index 1a303316be9..45fae069c90 100644 --- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc +++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc @@ -2428,8 +2428,9 @@ void GLES2DecoderPassthroughTestBase::SetUp() { context_creation_attribs_.stencil_size = 8; context_creation_attribs_.bind_generates_resource = true; - gl::init::InitializeGLOneOffImplementation(gl::kGLImplementationEGLANGLE, - false, false, false, true); + gl::init::InitializeStaticGLBindingsImplementation( + gl::kGLImplementationEGLANGLE, false); + gl::init::InitializeGLOneOffPlatformImplementation(false, false, false, true); scoped_refptr<gles2::FeatureInfo> feature_info = new gles2::FeatureInfo(); group_ = new gles2::ContextGroup( diff --git a/chromium/gpu/command_buffer/service/gles2_query_manager.cc b/chromium/gpu/command_buffer/service/gles2_query_manager.cc index fd5c7e84a40..6664023df7b 100644 --- a/chromium/gpu/command_buffer/service/gles2_query_manager.cc +++ b/chromium/gpu/command_buffer/service/gles2_query_manager.cc @@ -10,7 +10,6 @@ #include "base/atomicops.h" #include "base/bind.h" #include "base/logging.h" -#include "base/memory/shared_memory.h" #include "base/time/time.h" #include "gpu/command_buffer/service/error_state.h" #include "gpu/command_buffer/service/feature_info.h" diff --git a/chromium/gpu/command_buffer/service/gpu_switches.cc b/chromium/gpu/command_buffer/service/gpu_switches.cc index 484a13d81d1..f9aadc74c57 100644 --- a/chromium/gpu/command_buffer/service/gpu_switches.cc +++ b/chromium/gpu/command_buffer/service/gpu_switches.cc @@ -62,6 +62,12 @@ const char kGLShaderIntermOutput[] = "gl-shader-interm-output"; // round intermediate values in ANGLE. const char kEmulateShaderPrecision[] = "emulate-shader-precision"; +// Selects the type of the GrContext. +const char kGrContextType[] = "gr-context-type"; +const char kGrContextTypeGL[] = "gl"; +const char kGrContextTypeVulkan[] = "vulkan"; +const char kGrContextTypeMetal[] = "metal"; +const char kGrContextTypeDawn[] = "dawn"; // Enable Vulkan support and select Vulkan implementation, must also have // ENABLE_VULKAN defined. const char kUseVulkan[] = "use-vulkan"; diff --git a/chromium/gpu/command_buffer/service/gpu_switches.h b/chromium/gpu/command_buffer/service/gpu_switches.h index e9b9fdfb7de..ee702988014 100644 --- a/chromium/gpu/command_buffer/service/gpu_switches.h +++ b/chromium/gpu/command_buffer/service/gpu_switches.h @@ -28,6 +28,12 @@ GPU_EXPORT extern const char kDisableGpuShaderDiskCache[]; GPU_EXPORT extern const char kEnableThreadedTextureMailboxes[]; GPU_EXPORT extern const char kGLShaderIntermOutput[]; GPU_EXPORT extern const char kEmulateShaderPrecision[]; +GPU_EXPORT extern const char kGrContextType[]; +GPU_EXPORT extern const char kGrContextTypeGL[]; +GPU_EXPORT extern const char kGrContextTypeVulkan[]; +GPU_EXPORT extern const char kGrContextTypeMetal[]; +GPU_EXPORT extern const char kGrContextTypeDawn[]; +GPU_EXPORT extern const char kVulkanImplementationNameNative[]; GPU_EXPORT extern const char kUseVulkan[]; GPU_EXPORT extern const char kVulkanImplementationNameNative[]; GPU_EXPORT extern const char kVulkanImplementationNameSwiftshader[]; diff --git a/chromium/gpu/command_buffer/service/gpu_tracer.cc b/chromium/gpu/command_buffer/service/gpu_tracer.cc index 667c025a17d..17a670920ec 100644 --- a/chromium/gpu/command_buffer/service/gpu_tracer.cc +++ b/chromium/gpu/command_buffer/service/gpu_tracer.cc @@ -230,7 +230,7 @@ bool GPUTracer::EndDecoding() { marker.trace_->End(); finished_traces_.push_back(marker.trace_); - marker.trace_ = 0; + marker.trace_.reset(); } } } @@ -382,7 +382,7 @@ void GPUTracer::ClearOngoingTraces(bool have_context) { TraceMarker& marker = markers_[n][i]; if (marker.trace_.get()) { marker.trace_->Destroy(have_context); - marker.trace_ = 0; + marker.trace_.reset(); } } } diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc index f233dcf6421..0e7617f23d1 100644 --- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc +++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc @@ -110,7 +110,8 @@ ImageReaderGLOwner::ImageReaderGLOwner( uint64_t usage = mode == Mode::kAImageReaderSecureSurfaceControl ? AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT : AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE; - usage |= gl::SurfaceControl::RequiredUsage(); + if (IsSurfaceControl(mode)) + usage |= AHARDWAREBUFFER_USAGE_COMPOSER_OVERLAY; // Create a new reader for images of the desired size and format. media_status_t return_code = loader_.AImageReader_newWithUsage( diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc index 963be9cce2f..58d8096c0d2 100644 --- a/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc +++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc @@ -33,8 +33,12 @@ class ImageReaderGLOwnerTest : public testing::Test { return; scoped_feature_list_.InitAndEnableFeature(media::kAImageReaderVideoOutput); - gl::init::InitializeGLOneOffImplementation(gl::kGLImplementationEGLGLES2, - false, false, false, true); + + gl::init::InitializeStaticGLBindingsImplementation( + gl::kGLImplementationEGLGLES2, false); + gl::init::InitializeGLOneOffPlatformImplementation(false, false, false, + true); + surface_ = new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240)); surface_->Initialize(); diff --git a/chromium/gpu/command_buffer/service/multi_draw_manager.cc b/chromium/gpu/command_buffer/service/multi_draw_manager.cc index 6000fa98708..27b72bba085 100644 --- a/chromium/gpu/command_buffer/service/multi_draw_manager.cc +++ b/chromium/gpu/command_buffer/service/multi_draw_manager.cc @@ -24,8 +24,9 @@ MultiDrawManager::ResultData::ResultData(ResultData&& rhs) counts(std::move(rhs.counts)), offsets(std::move(rhs.offsets)), indices(std::move(rhs.indices)), - instance_counts(std::move(rhs.instance_counts)) { -} + instance_counts(std::move(rhs.instance_counts)), + basevertices(std::move(rhs.basevertices)), + baseinstances(std::move(rhs.baseinstances)) {} MultiDrawManager::ResultData& MultiDrawManager::ResultData::operator=( ResultData&& rhs) { @@ -41,6 +42,8 @@ MultiDrawManager::ResultData& MultiDrawManager::ResultData::operator=( std::swap(offsets, rhs.offsets); std::swap(indices, rhs.indices); std::swap(instance_counts, rhs.instance_counts); + std::swap(basevertices, rhs.basevertices); + std::swap(baseinstances, rhs.baseinstances); return *this; } @@ -78,9 +81,8 @@ bool MultiDrawManager::MultiDrawArrays(GLenum mode, if (!EnsureDrawArraysFunction(DrawFunction::DrawArrays, mode, drawcount)) { return false; } - std::copy(firsts, firsts + drawcount, &result_.firsts[current_draw_offset_]); - std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]); - current_draw_offset_ += drawcount; + CopyArraysHelper(drawcount, firsts, counts, nullptr, nullptr, nullptr, + nullptr); return true; } @@ -93,11 +95,24 @@ bool MultiDrawManager::MultiDrawArraysInstanced(GLenum mode, drawcount)) { return false; } - std::copy(firsts, firsts + drawcount, &result_.firsts[current_draw_offset_]); - std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]); - std::copy(instance_counts, instance_counts + drawcount, - &result_.instance_counts[current_draw_offset_]); - current_draw_offset_ += drawcount; + CopyArraysHelper(drawcount, firsts, counts, nullptr, instance_counts, nullptr, + nullptr); + return true; +} + +bool MultiDrawManager::MultiDrawArraysInstancedBaseInstance( + GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount) { + if (!EnsureDrawArraysFunction(DrawFunction::DrawArraysInstancedBaseInstance, + mode, drawcount)) { + return false; + } + CopyArraysHelper(drawcount, firsts, counts, nullptr, instance_counts, nullptr, + baseinstances); return true; } @@ -110,21 +125,8 @@ bool MultiDrawManager::MultiDrawElements(GLenum mode, drawcount)) { return false; } - std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]); - switch (index_type_) { - case IndexStorageType::Offset: - std::copy(offsets, offsets + drawcount, - &result_.offsets[current_draw_offset_]); - break; - case IndexStorageType::Pointer: - std::transform( - offsets, offsets + drawcount, &result_.indices[current_draw_offset_], - [](uint32_t offset) { - return reinterpret_cast<void*>(static_cast<intptr_t>(offset)); - }); - break; - } - current_draw_offset_ += drawcount; + CopyArraysHelper(drawcount, nullptr, counts, offsets, nullptr, nullptr, + nullptr); return true; } @@ -139,28 +141,35 @@ bool MultiDrawManager::MultiDrawElementsInstanced( type, drawcount)) { return false; } - std::copy(counts, counts + drawcount, &result_.counts[current_draw_offset_]); - std::copy(instance_counts, instance_counts + drawcount, - &result_.instance_counts[current_draw_offset_]); - switch (index_type_) { - case IndexStorageType::Offset: - std::copy(offsets, offsets + drawcount, - &result_.offsets[current_draw_offset_]); - break; - case IndexStorageType::Pointer: - std::transform( - offsets, offsets + drawcount, &result_.indices[current_draw_offset_], - [](uint32_t offset) { - return reinterpret_cast<void*>(static_cast<intptr_t>(offset)); - }); - break; + CopyArraysHelper(drawcount, nullptr, counts, offsets, instance_counts, + nullptr, nullptr); + return true; +} + +bool MultiDrawManager::MultiDrawElementsInstancedBaseVertexBaseInstance( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount) { + if (!EnsureDrawElementsFunction( + DrawFunction::DrawElementsInstancedBaseVertexBaseInstance, mode, type, + drawcount)) { + return false; } - current_draw_offset_ += drawcount; + CopyArraysHelper(drawcount, nullptr, counts, offsets, instance_counts, + basevertices, baseinstances); return true; } void MultiDrawManager::ResizeArrays() { switch (result_.draw_function) { + case DrawFunction::DrawArraysInstancedBaseInstance: + result_.baseinstances.resize(result_.drawcount); + FALLTHROUGH; case DrawFunction::DrawArraysInstanced: result_.instance_counts.resize(result_.drawcount); FALLTHROUGH; @@ -168,6 +177,10 @@ void MultiDrawManager::ResizeArrays() { result_.firsts.resize(result_.drawcount); result_.counts.resize(result_.drawcount); break; + case DrawFunction::DrawElementsInstancedBaseVertexBaseInstance: + result_.basevertices.resize(result_.drawcount); + result_.baseinstances.resize(result_.drawcount); + FALLTHROUGH; case DrawFunction::DrawElementsInstanced: result_.instance_counts.resize(result_.drawcount); FALLTHROUGH; @@ -248,5 +261,56 @@ bool MultiDrawManager::EnsureDrawElementsFunction(DrawFunction draw_function, return true; } +void MultiDrawManager::CopyArraysHelper(GLsizei drawcount, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances) { + if (firsts) { + std::copy(firsts, firsts + drawcount, + &result_.firsts[current_draw_offset_]); + } + + if (counts) { + std::copy(counts, counts + drawcount, + &result_.counts[current_draw_offset_]); + } + + if (instance_counts) { + std::copy(instance_counts, instance_counts + drawcount, + &result_.instance_counts[current_draw_offset_]); + } + + if (basevertices) { + std::copy(basevertices, basevertices + drawcount, + &result_.basevertices[current_draw_offset_]); + } + + if (baseinstances) { + std::copy(baseinstances, baseinstances + drawcount, + &result_.baseinstances[current_draw_offset_]); + } + + if (offsets) { + switch (index_type_) { + case IndexStorageType::Offset: + std::copy(offsets, offsets + drawcount, + &result_.offsets[current_draw_offset_]); + break; + case IndexStorageType::Pointer: + std::transform( + offsets, offsets + drawcount, + &result_.indices[current_draw_offset_], [](uint32_t offset) { + return reinterpret_cast<void*>(static_cast<intptr_t>(offset)); + }); + break; + } + } + + current_draw_offset_ += drawcount; +} + } // namespace gles2 } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/multi_draw_manager.h b/chromium/gpu/command_buffer/service/multi_draw_manager.h index 2baef1653bb..7a08f8b95b2 100644 --- a/chromium/gpu/command_buffer/service/multi_draw_manager.h +++ b/chromium/gpu/command_buffer/service/multi_draw_manager.h @@ -13,6 +13,7 @@ typedef unsigned GLenum; typedef int GLsizei; typedef int GLint; +typedef unsigned int GLuint; namespace gpu { namespace gles2 { @@ -22,8 +23,10 @@ class GPU_GLES2_EXPORT MultiDrawManager { enum class DrawFunction { DrawArrays, DrawArraysInstanced, + DrawArraysInstancedBaseInstance, DrawElements, DrawElementsInstanced, + DrawElementsInstancedBaseVertexBaseInstance, }; struct GPU_GLES2_EXPORT ResultData { @@ -36,6 +39,8 @@ class GPU_GLES2_EXPORT MultiDrawManager { std::vector<GLsizei> offsets; std::vector<const void*> indices; std::vector<GLsizei> instance_counts; + std::vector<GLint> basevertices; + std::vector<GLuint> baseinstances; ResultData(); ~ResultData(); @@ -61,6 +66,12 @@ class GPU_GLES2_EXPORT MultiDrawManager { const GLsizei* counts, const GLsizei* instance_counts, GLsizei drawcount); + bool MultiDrawArraysInstancedBaseInstance(GLenum mode, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* instance_counts, + const GLuint* baseinstances, + GLsizei drawcount); bool MultiDrawElements(GLenum mode, const GLsizei* counts, GLenum type, @@ -72,6 +83,15 @@ class GPU_GLES2_EXPORT MultiDrawManager { const GLsizei* offsets, const GLsizei* instance_counts, GLsizei drawcount); + bool MultiDrawElementsInstancedBaseVertexBaseInstance( + GLenum mode, + const GLsizei* counts, + GLenum type, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances, + GLsizei drawcount); private: void ResizeArrays(); @@ -83,6 +103,13 @@ class GPU_GLES2_EXPORT MultiDrawManager { GLenum mode, GLenum type, GLsizei drawcount); + void CopyArraysHelper(GLsizei drawcount, + const GLint* firsts, + const GLsizei* counts, + const GLsizei* offsets, + const GLsizei* instance_counts, + const GLint* basevertices, + const GLuint* baseinstances); enum class DrawState { Begin, diff --git a/chromium/gpu/command_buffer/service/multi_draw_manager_unittest.cc b/chromium/gpu/command_buffer/service/multi_draw_manager_unittest.cc index 5145588a644..3b75f9a5d02 100644 --- a/chromium/gpu/command_buffer/service/multi_draw_manager_unittest.cc +++ b/chromium/gpu/command_buffer/service/multi_draw_manager_unittest.cc @@ -30,6 +30,8 @@ class MultiDrawManagerTest : public testing::TestWithParam<Param> { GLenum mode = GL_TRIANGLES, GLenum type = GL_UNSIGNED_INT) { std::vector<GLsizei> data(count); + std::vector<GLint> basevertices(count); + std::vector<GLuint> baseinstances(count); switch (std::get<1>(GetParam())) { case MultiDrawManager::DrawFunction::DrawArrays: return multi_draw_manager_->MultiDrawArrays(mode, data.data(), @@ -39,6 +41,11 @@ class MultiDrawManagerTest : public testing::TestWithParam<Param> { return multi_draw_manager_->MultiDrawArraysInstanced( mode, data.data(), data.data(), data.data(), count); + case MultiDrawManager::DrawFunction::DrawArraysInstancedBaseInstance: + return multi_draw_manager_->MultiDrawArraysInstancedBaseInstance( + mode, data.data(), data.data(), data.data(), baseinstances.data(), + count); + case MultiDrawManager::DrawFunction::DrawElements: return multi_draw_manager_->MultiDrawElements(mode, data.data(), type, data.data(), count); @@ -46,6 +53,13 @@ class MultiDrawManagerTest : public testing::TestWithParam<Param> { case MultiDrawManager::DrawFunction::DrawElementsInstanced: return multi_draw_manager_->MultiDrawElementsInstanced( mode, data.data(), type, data.data(), data.data(), count); + + case MultiDrawManager::DrawFunction:: + DrawElementsInstancedBaseVertexBaseInstance: + return multi_draw_manager_ + ->MultiDrawElementsInstancedBaseVertexBaseInstance( + mode, data.data(), type, data.data(), data.data(), + basevertices.data(), baseinstances.data(), count); } } @@ -55,6 +69,9 @@ class MultiDrawManagerTest : public testing::TestWithParam<Param> { EXPECT_TRUE(draw_function == result.draw_function); switch (draw_function) { + case MultiDrawManager::DrawFunction::DrawArraysInstancedBaseInstance: + EXPECT_TRUE(result.baseinstances.size() == count); + FALLTHROUGH; case MultiDrawManager::DrawFunction::DrawArraysInstanced: EXPECT_TRUE(result.instance_counts.size() == count); FALLTHROUGH; @@ -62,6 +79,11 @@ class MultiDrawManagerTest : public testing::TestWithParam<Param> { EXPECT_TRUE(result.firsts.size() == count); EXPECT_TRUE(result.counts.size() == count); break; + case MultiDrawManager::DrawFunction:: + DrawElementsInstancedBaseVertexBaseInstance: + EXPECT_TRUE(result.basevertices.size() == count); + EXPECT_TRUE(result.baseinstances.size() == count); + FALLTHROUGH; case MultiDrawManager::DrawFunction::DrawElementsInstanced: EXPECT_TRUE(result.instance_counts.size() == count); FALLTHROUGH; @@ -217,7 +239,7 @@ TEST_P(MultiDrawManagerTest, ElementTypeMismatch) { } INSTANTIATE_TEST_SUITE_P( - , + All, MultiDrawManagerTest, testing::Combine( testing::Values(MultiDrawManager::IndexStorageType::Offset, @@ -225,8 +247,11 @@ INSTANTIATE_TEST_SUITE_P( testing::Values( MultiDrawManager::DrawFunction::DrawArrays, MultiDrawManager::DrawFunction::DrawArraysInstanced, + MultiDrawManager::DrawFunction::DrawArraysInstancedBaseInstance, MultiDrawManager::DrawFunction::DrawElements, - MultiDrawManager::DrawFunction::DrawElementsInstanced))); + MultiDrawManager::DrawFunction::DrawElementsInstanced, + MultiDrawManager::DrawFunction:: + DrawElementsInstancedBaseVertexBaseInstance))); } // namespace gles2 } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/program_manager.cc b/chromium/gpu/command_buffer/service/program_manager.cc index 28d0dad5f52..5f82dcb65b0 100644 --- a/chromium/gpu/command_buffer/service/program_manager.cc +++ b/chromium/gpu/command_buffer/service/program_manager.cc @@ -408,6 +408,8 @@ Program::Program(ProgramManager* manager, GLuint service_id) link_status_(false), uniforms_cleared_(false), draw_id_uniform_location_(-1), + base_vertex_uniform_location_(-1), + base_instance_uniform_location_(-1), transform_feedback_buffer_mode_(GL_NONE), effective_transform_feedback_buffer_mode_(GL_NONE), fragment_output_type_mask_(0u), @@ -436,6 +438,8 @@ void Program::Reset() { fragment_output_type_mask_ = 0u; fragment_output_written_mask_ = 0u; draw_id_uniform_location_ = -1; + base_vertex_uniform_location_ = -1; + base_instance_uniform_location_ = -1; ClearVertexInputMasks(); } @@ -581,6 +585,24 @@ void Program::UpdateDrawIDUniformLocation() { &array_index); } +void Program::UpdateBaseVertexUniformLocation() { + DCHECK(IsValid()); + GLint fake_location = GetUniformFakeLocation("gl_BaseVertex"); + base_vertex_uniform_location_ = -1; + GLint array_index; + GetUniformInfoByFakeLocation(fake_location, &base_vertex_uniform_location_, + &array_index); +} + +void Program::UpdateBaseInstanceUniformLocation() { + DCHECK(IsValid()); + GLint fake_location = GetUniformFakeLocation("gl_BaseInstance"); + base_instance_uniform_location_ = -1; + GLint array_index; + GetUniformInfoByFakeLocation(fake_location, &base_instance_uniform_location_, + &array_index); +} + std::string Program::ProcessLogInfo(const std::string& log) { std::string output; re2::StringPiece input(log); @@ -2772,6 +2794,16 @@ void ProgramManager::UpdateDrawIDUniformLocation(Program* program) { program->UpdateDrawIDUniformLocation(); } +void ProgramManager::UpdateBaseVertexUniformLocation(Program* program) { + DCHECK(program); + program->UpdateBaseVertexUniformLocation(); +} + +void ProgramManager::UpdateBaseInstanceUniformLocation(Program* program) { + DCHECK(program); + program->UpdateBaseInstanceUniformLocation(); +} + int32_t ProgramManager::MakeFakeLocation(int32_t index, int32_t element) { return index + element * 0x10000; } diff --git a/chromium/gpu/command_buffer/service/program_manager.h b/chromium/gpu/command_buffer/service/program_manager.h index b9847dfa228..6c7d57bcfb0 100644 --- a/chromium/gpu/command_buffer/service/program_manager.h +++ b/chromium/gpu/command_buffer/service/program_manager.h @@ -437,6 +437,14 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> { GLint draw_id_uniform_location() const { return draw_id_uniform_location_; } + GLint base_vertex_uniform_location() const { + return base_vertex_uniform_location_; + } + + GLint base_instance_uniform_location() const { + return base_instance_uniform_location_; + } + // See member declaration for details. // The data are only valid after a successful link. uint32_t fragment_output_type_mask() const { @@ -521,6 +529,12 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> { // Updates the draw id uniform location used by ANGLE_multi_draw void UpdateDrawIDUniformLocation(); + // Updates the base vertex and base instance uniform location used by + // ANGLE_base_vertex_base_instance + void UpdateBaseVertexUniformLocation(); + + void UpdateBaseInstanceUniformLocation(); + // If long attribate names are mapped during shader translation, call // glBindAttribLocation() again with the mapped names. // This is called right before the glLink() call, but after shaders are @@ -602,6 +616,10 @@ class GPU_GLES2_EXPORT Program : public base::RefCounted<Program> { // ANGLE_multi_draw GLint draw_id_uniform_location_; + // ANGLE_base_vertex_base_instance + GLint base_vertex_uniform_location_; + GLint base_instance_uniform_location_; + // Log info std::unique_ptr<std::string> log_info_; @@ -699,6 +717,14 @@ class GPU_GLES2_EXPORT ProgramManager { // Updates the draw id location for this program for ANGLE_multi_draw void UpdateDrawIDUniformLocation(Program* program); + // Updates the base vertex location for this program for + // ANGLE_base_vertex_base_instance + void UpdateBaseVertexUniformLocation(Program* program); + + // Updates the base instance location for this program for + // ANGLE_base_vertex_base_instance + void UpdateBaseInstanceUniformLocation(Program* program); + // Returns true if |name| has a prefix that is intended for GL built-in shader // variables. static bool HasBuiltInPrefix(const std::string& name); diff --git a/chromium/gpu/command_buffer/service/query_manager.cc b/chromium/gpu/command_buffer/service/query_manager.cc index 759191f2311..dd146b1a6f9 100644 --- a/chromium/gpu/command_buffer/service/query_manager.cc +++ b/chromium/gpu/command_buffer/service/query_manager.cc @@ -10,7 +10,6 @@ #include "base/atomicops.h" #include "base/bind.h" #include "base/logging.h" -#include "base/memory/shared_memory.h" #include "base/time/time.h" #include "ui/gl/gl_bindings.h" #include "ui/gl/gl_fence.h" diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc index 4d50d172abd..52844f907d5 100644 --- a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc +++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc @@ -296,7 +296,8 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient { context_state_ = base::MakeRefCounted<SharedContextState>( std::move(share_group), std::move(surface), std::move(context), - false /* use_virtualized_gl_contexts */, base::DoNothing()); + false /* use_virtualized_gl_contexts */, base::DoNothing(), + GpuPreferences().gr_context_type); context_state_->InitializeGrContext(workarounds, nullptr); context_state_->InitializeGL(GpuPreferences(), feature_info); } diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc index 86f850909b1..135dc082464 100644 --- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc +++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc @@ -183,7 +183,7 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) { shared_context_state_ = base::MakeRefCounted<SharedContextState>( new gl::GLShareGroup(), surface_, context_, feature_info()->workarounds().use_virtualized_gl_contexts, - base::DoNothing()); + base::DoNothing(), GpuPreferences().gr_context_type); shared_context_state_->InitializeGL(GpuPreferences(), feature_info_); diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc index 2f9b109a657..c2e71120ec8 100644 --- a/chromium/gpu/command_buffer/service/scheduler.cc +++ b/chromium/gpu/command_buffer/service/scheduler.cc @@ -13,6 +13,7 @@ #include "base/trace_event/trace_event.h" #include "base/trace_event/traced_value.h" #include "gpu/command_buffer/service/sync_point_manager.h" +#include "gpu/config/gpu_preferences.h" namespace gpu { @@ -289,12 +290,19 @@ void Scheduler::Sequence::RemoveClientWait(CommandBufferId command_buffer_id) { } Scheduler::Scheduler(scoped_refptr<base::SingleThreadTaskRunner> task_runner, - SyncPointManager* sync_point_manager) + SyncPointManager* sync_point_manager, + const GpuPreferences& gpu_preferences) : task_runner_(std::move(task_runner)), - sync_point_manager_(sync_point_manager) { + sync_point_manager_(sync_point_manager), + blocked_time_collection_enabled_( + gpu_preferences.enable_gpu_blocked_time_metric) { DCHECK(thread_checker_.CalledOnValidThread()); // Store weak ptr separately because calling GetWeakPtr() is not thread safe. weak_ptr_ = weak_factory_.GetWeakPtr(); + + if (blocked_time_collection_enabled_ && !base::ThreadTicks::IsSupported()) { + DLOG(ERROR) << "GPU Blocked time collection is enabled but not supported."; + } } Scheduler::~Scheduler() { @@ -525,7 +533,25 @@ void Scheduler::RunNextTask() { { base::AutoUnlock auto_unlock(lock_); order_data->BeginProcessingOrderNumber(order_num); - std::move(closure).Run(); + + if (blocked_time_collection_enabled_ && base::ThreadTicks::IsSupported()) { + // We can't call base::ThreadTicks::Now() if it's not supported + base::ThreadTicks thread_time_start = base::ThreadTicks::Now(); + base::TimeTicks wall_time_start = base::TimeTicks::Now(); + + std::move(closure).Run(); + + base::TimeDelta thread_time_elapsed = + base::ThreadTicks::Now() - thread_time_start; + base::TimeDelta wall_time_elapsed = + base::TimeTicks::Now() - wall_time_start; + base::TimeDelta blocked_time = wall_time_elapsed - thread_time_elapsed; + + total_blocked_time_ += blocked_time; + } else { + std::move(closure).Run(); + } + if (order_data->IsProcessingOrderNumber()) order_data->FinishProcessingOrderNumber(order_num); } @@ -546,4 +572,12 @@ void Scheduler::RunNextTask() { base::BindOnce(&Scheduler::RunNextTask, weak_ptr_)); } +base::TimeDelta Scheduler::TakeTotalBlockingTime() { + if (!blocked_time_collection_enabled_ || !base::ThreadTicks::IsSupported()) + return base::TimeDelta::Min(); + base::TimeDelta result; + std::swap(result, total_blocked_time_); + return result; +} + } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/scheduler.h b/chromium/gpu/command_buffer/service/scheduler.h index a2a6c5d2e3f..0838d704311 100644 --- a/chromium/gpu/command_buffer/service/scheduler.h +++ b/chromium/gpu/command_buffer/service/scheduler.h @@ -32,6 +32,7 @@ class ConvertableToTraceFormat; namespace gpu { class SyncPointManager; +struct GpuPreferences; class GPU_EXPORT Scheduler { public: @@ -49,7 +50,8 @@ class GPU_EXPORT Scheduler { }; Scheduler(scoped_refptr<base::SingleThreadTaskRunner> task_runner, - SyncPointManager* sync_point_manager); + SyncPointManager* sync_point_manager, + const GpuPreferences& gpu_preferences); virtual ~Scheduler(); @@ -94,6 +96,11 @@ class GPU_EXPORT Scheduler { base::WeakPtr<Scheduler> AsWeakPtr(); + // Takes and resets current accumulated blocking time. Not available on all + // platforms. Must be enabled with --enable-gpu-blocked-time. + // Returns TimeDelta::Min() when not available. + base::TimeDelta TakeTotalBlockingTime(); + private: struct SchedulingState { @@ -335,6 +342,10 @@ class GPU_EXPORT Scheduler { // priority. bool rebuild_scheduling_queue_ = false; + // Accumulated time the thread was blocked during running task + base::TimeDelta total_blocked_time_; + const bool blocked_time_collection_enabled_; + base::ThreadChecker thread_checker_; // Invalidated on main thread. diff --git a/chromium/gpu/command_buffer/service/scheduler_unittest.cc b/chromium/gpu/command_buffer/service/scheduler_unittest.cc index e2b8a9555ed..ff5962d7d12 100644 --- a/chromium/gpu/command_buffer/service/scheduler_unittest.cc +++ b/chromium/gpu/command_buffer/service/scheduler_unittest.cc @@ -9,6 +9,7 @@ #include "base/bind.h" #include "base/test/test_simple_task_runner.h" #include "gpu/command_buffer/service/sync_point_manager.h" +#include "gpu/config/gpu_preferences.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" @@ -29,7 +30,9 @@ class SchedulerTest : public testing::Test { SchedulerTest() : task_runner_(new base::TestSimpleTaskRunner()), sync_point_manager_(new SyncPointManager), - scheduler_(new Scheduler(task_runner_, sync_point_manager_.get())) {} + scheduler_(new Scheduler(task_runner_, + sync_point_manager_.get(), + GpuPreferences())) {} protected: base::TestSimpleTaskRunner* task_runner() const { return task_runner_.get(); } diff --git a/chromium/gpu/command_buffer/service/service_discardable_manager.cc b/chromium/gpu/command_buffer/service/service_discardable_manager.cc index 65baae1b8e6..52fe92be8ba 100644 --- a/chromium/gpu/command_buffer/service/service_discardable_manager.cc +++ b/chromium/gpu/command_buffer/service/service_discardable_manager.cc @@ -4,8 +4,13 @@ #include "gpu/command_buffer/service/service_discardable_manager.h" +#include <inttypes.h> + #include "base/memory/singleton.h" +#include "base/strings/stringprintf.h" #include "base/system/sys_info.h" +#include "base/threading/thread_task_runner_handle.h" +#include "base/trace_event/memory_dump_manager.h" #include "build/build_config.h" #include "gpu/command_buffer/service/texture_manager.h" @@ -77,7 +82,15 @@ ServiceDiscardableManager::GpuDiscardableEntry::~GpuDiscardableEntry() = ServiceDiscardableManager::ServiceDiscardableManager() : entries_(EntryCache::NO_AUTO_EVICT), - cache_size_limit_(DiscardableCacheSizeLimit()) {} + cache_size_limit_(DiscardableCacheSizeLimit()) { + // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview). + // Don't register a dump provider in these cases. + if (base::ThreadTaskRunnerHandle::IsSet()) { + base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( + this, "gpu::ServiceDiscardableManager", + base::ThreadTaskRunnerHandle::Get()); + } +} ServiceDiscardableManager::~ServiceDiscardableManager() { #if DCHECK_IS_ON() @@ -85,6 +98,46 @@ ServiceDiscardableManager::~ServiceDiscardableManager() { DCHECK(nullptr == entry.second.unlocked_texture_ref); } #endif + base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( + this); +} + +bool ServiceDiscardableManager::OnMemoryDump( + const base::trace_event::MemoryDumpArgs& args, + base::trace_event::ProcessMemoryDump* pmd) { + using base::trace_event::MemoryAllocatorDump; + using base::trace_event::MemoryDumpLevelOfDetail; + + if (args.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND) { + std::string dump_name = + base::StringPrintf("gpu/discardable_cache/cache_0x%" PRIXPTR, + reinterpret_cast<uintptr_t>(this)); + MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name); + dump->AddScalar(MemoryAllocatorDump::kNameSize, + MemoryAllocatorDump::kUnitsBytes, total_size_); + + if (!entries_.empty()) { + MemoryAllocatorDump* dump_avg_size = + pmd->CreateAllocatorDump(dump_name + "/avg_image_size"); + dump_avg_size->AddScalar("average_size", MemoryAllocatorDump::kUnitsBytes, + total_size_ / entries_.size()); + } + + // Early out, no need for more detail in a BACKGROUND dump. + return true; + } + + for (const auto& entry : entries_) { + std::string dump_name = base::StringPrintf( + "gpu/discardable_cache/cache_0x%" PRIXPTR "/entry_0x%" PRIXPTR, + reinterpret_cast<uintptr_t>(this), + reinterpret_cast<uintptr_t>(entry.second.unlocked_texture_ref.get())); + MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name); + dump->AddScalar(MemoryAllocatorDump::kNameSize, + MemoryAllocatorDump::kUnitsBytes, entry.second.size); + } + + return true; } void ServiceDiscardableManager::InsertLockedTexture( diff --git a/chromium/gpu/command_buffer/service/service_discardable_manager.h b/chromium/gpu/command_buffer/service/service_discardable_manager.h index 50b23b9c6a5..4a54490b40b 100644 --- a/chromium/gpu/command_buffer/service/service_discardable_manager.h +++ b/chromium/gpu/command_buffer/service/service_discardable_manager.h @@ -24,10 +24,15 @@ GPU_GLES2_EXPORT size_t DiscardableCacheSizeLimitForPressure( size_t base_cache_limit, base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level); -class GPU_GLES2_EXPORT ServiceDiscardableManager { +class GPU_GLES2_EXPORT ServiceDiscardableManager + : public base::trace_event::MemoryDumpProvider { public: ServiceDiscardableManager(); - ~ServiceDiscardableManager(); + ~ServiceDiscardableManager() override; + + // base::trace_event::MemoryDumpProvider implementation. + bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args, + base::trace_event::ProcessMemoryDump* pmd) override; void InsertLockedTexture(uint32_t texture_id, size_t texture_size, diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.cc b/chromium/gpu/command_buffer/service/service_transfer_cache.cc index d8fc0082819..c15e9b632a4 100644 --- a/chromium/gpu/command_buffer/service/service_transfer_cache.cc +++ b/chromium/gpu/command_buffer/service/service_transfer_cache.cc @@ -131,7 +131,7 @@ ServiceTransferCache::ServiceTransferCache() // Don't register a dump provider in these cases. if (base::ThreadTaskRunnerHandle::IsSet()) { base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( - this, "cc::GpuImageDecodeCache", base::ThreadTaskRunnerHandle::Get()); + this, "gpu::ServiceTransferCache", base::ThreadTaskRunnerHandle::Get()); } } @@ -157,6 +157,10 @@ bool ServiceTransferCache::CreateLockedEntry(const EntryKey& key, return false; total_size_ += entry->CachedSize(); + if (key.entry_type == cc::TransferCacheEntryType::kImage) { + total_image_count_++; + total_image_size_ += entry->CachedSize(); + } entries_.Put(key, CacheEntryInternal(handle, std::move(entry))); EnforceLimits(); return true; @@ -172,6 +176,10 @@ void ServiceTransferCache::CreateLocalEntry( DeleteEntry(key); total_size_ += entry->CachedSize(); + if (key.entry_type == cc::TransferCacheEntryType::kImage) { + total_image_count_++; + total_image_size_ += entry->CachedSize(); + } entries_.Put(key, CacheEntryInternal(base::nullopt, std::move(entry))); EnforceLimits(); @@ -195,6 +203,10 @@ Iterator ServiceTransferCache::ForceDeleteEntry(Iterator it) { DCHECK_GE(total_size_, it->second.entry->CachedSize()); total_size_ -= it->second.entry->CachedSize(); + if (it->first.entry_type == cc::TransferCacheEntryType::kImage) { + total_image_count_--; + total_image_size_ -= it->second.entry->CachedSize(); + } return entries_.Erase(it); } @@ -227,6 +239,10 @@ void ServiceTransferCache::EnforceLimits() { } total_size_ -= it->second.entry->CachedSize(); + if (it->first.entry_type == cc::TransferCacheEntryType::kImage) { + total_image_count_--; + total_image_size_ -= it->second.entry->CachedSize(); + } it = entries_.Erase(it); } } @@ -285,6 +301,10 @@ bool ServiceTransferCache::CreateLockedHardwareDecodedImageEntry( // Insert it in the transfer cache. total_size_ += entry->CachedSize(); + if (key.entry_type == cc::TransferCacheEntryType::kImage) { + total_image_count_++; + total_image_size_ += entry->CachedSize(); + } entries_.Put(key, CacheEntryInternal(handle, std::move(entry))); EnforceLimits(); return true; @@ -302,7 +322,16 @@ bool ServiceTransferCache::OnMemoryDump( reinterpret_cast<uintptr_t>(this)); MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name); dump->AddScalar(MemoryAllocatorDump::kNameSize, - MemoryAllocatorDump::kUnitsBytes, total_size_); + MemoryAllocatorDump::kUnitsBytes, total_image_size_); + + if (total_image_count_ > 0) { + MemoryAllocatorDump* dump_avg_size = + pmd->CreateAllocatorDump(dump_name + "/avg_image_size"); + const size_t avg_image_size = + total_image_size_ / (total_image_count_ * 1.0); + dump_avg_size->AddScalar("average_size", MemoryAllocatorDump::kUnitsBytes, + avg_image_size); + } // Early out, no need for more detail in a BACKGROUND dump. return true; diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.h b/chromium/gpu/command_buffer/service/service_transfer_cache.h index 61ac93b0f83..990152b3072 100644 --- a/chromium/gpu/command_buffer/service/service_transfer_cache.h +++ b/chromium/gpu/command_buffer/service/service_transfer_cache.h @@ -131,6 +131,10 @@ class GPU_GLES2_EXPORT ServiceTransferCache // Total size of all |entries_|. The same as summing // GpuDiscardableEntry::size for each entry. size_t total_size_ = 0; + // Total size of all |entries_| of TransferCacheEntryType::kImage. + size_t total_image_size_ = 0; + // Number of |entries_| of TransferCacheEntryType::kImage. + int total_image_count_ = 0; // The limit above which the cache will start evicting resources. size_t cache_size_limit_ = 0; diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc index b7e5ae0fefe..0a7df7e9292 100644 --- a/chromium/gpu/command_buffer/service/service_utils.cc +++ b/chromium/gpu/command_buffer/service/service_utils.cc @@ -12,6 +12,7 @@ #include "gpu/command_buffer/service/context_group.h" #include "gpu/command_buffer/service/gpu_switches.h" #include "gpu/config/gpu_finch_features.h" +#include "skia/buildflags.h" #include "ui/gl/gl_switches.h" #include "ui/gl/gl_utils.h" @@ -149,15 +150,11 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) { command_line->HasSwitch(switches::kEnableGPUServiceTracing); gpu_preferences.use_passthrough_cmd_decoder = gpu::gles2::UsePassthroughCommandDecoder(command_line); - gpu_preferences.disable_gpu_driver_bug_workarounds = - command_line->HasSwitch(switches::kDisableGpuDriverBugWorkarounds); gpu_preferences.ignore_gpu_blacklist = command_line->HasSwitch(switches::kIgnoreGpuBlacklist); gpu_preferences.enable_webgpu = command_line->HasSwitch(switches::kEnableUnsafeWebGPU); if (command_line->HasSwitch(switches::kUseVulkan)) { - DLOG_IF(ERROR, base::FeatureList::IsEnabled(features::kVulkan)) - << "--enabled-features=Vulkan is overrided by --use-vulkan."; auto value = command_line->GetSwitchValueASCII(switches::kUseVulkan); if (value.empty() || value == switches::kVulkanImplementationNameNative) { gpu_preferences.use_vulkan = VulkanImplementationName::kForcedNative; @@ -166,13 +163,53 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) { } else { gpu_preferences.use_vulkan = VulkanImplementationName::kNone; } - } else { - gpu_preferences.use_vulkan = base::FeatureList::IsEnabled(features::kVulkan) - ? gpu::VulkanImplementationName::kNative - : gpu::VulkanImplementationName::kNone; } gpu_preferences.disable_vulkan_surface = command_line->HasSwitch(switches::kDisableVulkanSurface); + if (command_line->HasSwitch(switches::kGrContextType)) { + auto value = command_line->GetSwitchValueASCII(switches::kGrContextType); + if (value == switches::kGrContextTypeGL) { + gpu_preferences.gr_context_type = GrContextType::kGL; + } else if (value == switches::kGrContextTypeVulkan) { + gpu_preferences.gr_context_type = GrContextType::kVulkan; + } else if (value == switches::kGrContextTypeMetal) { +#if defined(OS_MACOSX) + DCHECK(base::FeatureList::IsEnabled(features::kMetal)) + << "GrContextType is Metal, but Metal is not enabled."; + gpu_preferences.gr_context_type = GrContextType::kMetal; +#endif +#if BUILDFLAG(SKIA_USE_DAWN) + } else if (value == switches::kGrContextTypeDawn) { + gpu_preferences.gr_context_type = GrContextType::kDawn; +#endif + } else { + NOTREACHED() << "Invalid GrContextType."; + gpu_preferences.gr_context_type = GrContextType::kGL; + } + } else { +#if defined(OS_MACOSX) + gpu_preferences.gr_context_type = + base::FeatureList::IsEnabled(features::kMetal) ? + GrContextType::kMetal : + GrContextType::kGL; +#else + if (base::FeatureList::IsEnabled(features::kVulkan)) { + gpu_preferences.gr_context_type = GrContextType::kVulkan; + } else { + gpu_preferences.gr_context_type = GrContextType::kGL; + } +#endif + } + if (gpu_preferences.gr_context_type == GrContextType::kVulkan && + gpu_preferences.use_vulkan == gpu::VulkanImplementationName::kNone) { + // If gpu_preferences.use_vulkan is not set from --use-vulkan, the native + // vulkan implementation will be used by default. + gpu_preferences.use_vulkan = gpu::VulkanImplementationName::kNative; + } + + gpu_preferences.enable_gpu_blocked_time_metric = + command_line->HasSwitch(switches::kEnableGpuBlockedTime); + return gpu_preferences; } diff --git a/chromium/gpu/command_buffer/service/shader_translator.cc b/chromium/gpu/command_buffer/service/shader_translator.cc index 6182606b0c3..0586ba27e79 100644 --- a/chromium/gpu/command_buffer/service/shader_translator.cc +++ b/chromium/gpu/command_buffer/service/shader_translator.cc @@ -168,7 +168,8 @@ bool ShaderTranslator::Init(GLenum shader_type, compile_options_ = SH_OBJECT_CODE | SH_VARIABLES | SH_ENFORCE_PACKING_RESTRICTIONS | SH_LIMIT_EXPRESSION_COMPLEXITY | SH_LIMIT_CALL_STACK_DEPTH | - SH_CLAMP_INDIRECT_ARRAY_BOUNDS | SH_EMULATE_GL_DRAW_ID; + SH_CLAMP_INDIRECT_ARRAY_BOUNDS | SH_EMULATE_GL_DRAW_ID | + SH_EMULATE_GL_BASE_VERTEX_BASE_INSTANCE; if (gl_shader_interm_output) compile_options_ |= SH_INTERMEDIATE_TREE; compile_options_ |= driver_bug_workarounds; diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc index 37897dcf917..295242470fd 100644 --- a/chromium/gpu/command_buffer/service/shared_context_state.cc +++ b/chromium/gpu/command_buffer/service/shared_context_state.cc @@ -13,6 +13,7 @@ #include "gpu/command_buffer/service/service_utils.h" #include "gpu/config/gpu_driver_bug_workarounds.h" #include "gpu/vulkan/buildflags.h" +#include "skia/buildflags.h" #include "ui/gl/gl_bindings.h" #include "ui/gl/gl_context.h" #include "ui/gl/gl_share_group.h" @@ -28,6 +29,10 @@ #include "components/viz/common/gpu/metal_context_provider.h" #endif +#if BUILDFLAG(SKIA_USE_DAWN) +#include "components/viz/common/gpu/dawn_context_provider.h" +#endif + namespace { static constexpr size_t kInitialScratchDeserializationBufferSize = 1024; } @@ -43,18 +48,47 @@ void SharedContextState::compileError(const char* shader, const char* errors) { } } +SharedContextState::MemoryTracker::MemoryTracker( + gpu::MemoryTracker::Observer* peak_memory_monitor) + : peak_memory_monitor_(peak_memory_monitor) {} + +SharedContextState::MemoryTracker::~MemoryTracker() { + DCHECK(!size_); +} + +void SharedContextState::MemoryTracker::OnMemoryAllocatedChange( + CommandBufferId id, + uint64_t old_size, + uint64_t new_size) { + uint64_t delta = new_size - old_size; + old_size = size_; + size_ += delta; + if (peak_memory_monitor_) + peak_memory_monitor_->OnMemoryAllocatedChange(id, old_size, size_); +} + +uint64_t SharedContextState::MemoryTracker::GetMemoryUsage() const { + return size_; +} + SharedContextState::SharedContextState( scoped_refptr<gl::GLShareGroup> share_group, scoped_refptr<gl::GLSurface> surface, scoped_refptr<gl::GLContext> context, bool use_virtualized_gl_contexts, base::OnceClosure context_lost_callback, + GrContextType gr_context_type, viz::VulkanContextProvider* vulkan_context_provider, - viz::MetalContextProvider* metal_context_provider) + viz::MetalContextProvider* metal_context_provider, + viz::DawnContextProvider* dawn_context_provider, + gpu::MemoryTracker::Observer* peak_memory_monitor) : use_virtualized_gl_contexts_(use_virtualized_gl_contexts), context_lost_callback_(std::move(context_lost_callback)), + gr_context_type_(gr_context_type), + memory_tracker_(peak_memory_monitor), vk_context_provider_(vulkan_context_provider), metal_context_provider_(metal_context_provider), + dawn_context_provider_(dawn_context_provider), share_group_(std::move(share_group)), context_(context), real_context_(std::move(context)), @@ -75,6 +109,13 @@ SharedContextState::SharedContextState( use_virtualized_gl_contexts_ = false; DCHECK(gr_context_); } + if (GrContextIsDawn()) { +#if BUILDFLAG(SKIA_USE_DAWN) + gr_context_ = dawn_context_provider_->GetGrContext(); +#endif + use_virtualized_gl_contexts_ = false; + DCHECK(gr_context_); + } if (base::ThreadTaskRunnerHandle::IsSet()) { base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( @@ -252,11 +293,15 @@ bool SharedContextState::InitializeGL( MakeCurrent(nullptr); } + bool is_native_vulkan = + gpu_preferences.use_vulkan == gpu::VulkanImplementationName::kNative || + gpu_preferences.use_vulkan == + gpu::VulkanImplementationName::kForcedNative; + // Swiftshader GL and Vulkan report supporting external objects extensions, // but they don't. support_vulkan_external_object_ = - !gl::g_current_gl_version->is_swiftshader && - gpu_preferences.use_vulkan == gpu::VulkanImplementationName::kNative && + !gl::g_current_gl_version->is_swiftshader && is_native_vulkan && gl::g_current_gl_driver->ext.b_GL_EXT_memory_object_fd && gl::g_current_gl_driver->ext.b_GL_EXT_semaphore_fd; diff --git a/chromium/gpu/command_buffer/service/shared_context_state.h b/chromium/gpu/command_buffer/service/shared_context_state.h index 8eaa278216b..65adbeaf5e4 100644 --- a/chromium/gpu/command_buffer/service/shared_context_state.h +++ b/chromium/gpu/command_buffer/service/shared_context_state.h @@ -16,6 +16,8 @@ #include "build/build_config.h" #include "gpu/command_buffer/common/skia_utils.h" #include "gpu/command_buffer/service/gl_context_virtual_delegate.h" +#include "gpu/command_buffer/service/memory_tracking.h" +#include "gpu/config/gpu_preferences.h" #include "gpu/gpu_gles2_export.h" #include "third_party/skia/include/gpu/GrContext.h" #include "ui/gl/progress_reporter.h" @@ -27,6 +29,7 @@ class GLSurface; } // namespace gl namespace viz { +class DawnContextProvider; class MetalContextProvider; class VulkanContextProvider; } // namespace viz @@ -35,7 +38,6 @@ namespace gpu { class GpuDriverBugWorkarounds; class GpuProcessActivityFlags; class ServiceTransferCache; -struct GpuPreferences; namespace gles2 { class FeatureInfo; @@ -56,18 +58,28 @@ class GPU_GLES2_EXPORT SharedContextState scoped_refptr<gl::GLContext> context, bool use_virtualized_gl_contexts, base::OnceClosure context_lost_callback, + GrContextType gr_context_type = GrContextType::kGL, viz::VulkanContextProvider* vulkan_context_provider = nullptr, - viz::MetalContextProvider* metal_context_provider = nullptr); + viz::MetalContextProvider* metal_context_provider = nullptr, + viz::DawnContextProvider* dawn_context_provider = nullptr, + gpu::MemoryTracker::Observer* peak_memory_monitor = nullptr); void InitializeGrContext(const GpuDriverBugWorkarounds& workarounds, GrContextOptions::PersistentCache* cache, GpuProcessActivityFlags* activity_flags = nullptr, gl::ProgressReporter* progress_reporter = nullptr); bool GrContextIsGL() const { - return !vk_context_provider_ && !metal_context_provider_; + return gr_context_type_ == GrContextType::kGL; + } + bool GrContextIsVulkan() const { + return vk_context_provider_ && gr_context_type_ == GrContextType::kVulkan; + } + bool GrContextIsMetal() const { + return metal_context_provider_ && gr_context_type_ == GrContextType::kMetal; + } + bool GrContextIsDawn() const { + return dawn_context_provider_ && gr_context_type_ == GrContextType::kDawn; } - bool GrContextIsVulkan() const { return vk_context_provider_; } - bool GrContextIsMetal() const { return metal_context_provider_; } bool InitializeGL(const GpuPreferences& gpu_preferences, scoped_refptr<gles2::FeatureInfo> feature_info); @@ -80,6 +92,8 @@ class GPU_GLES2_EXPORT SharedContextState void PurgeMemory( base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level); + uint64_t GetMemoryUsage() const { return memory_tracker_.GetMemoryUsage(); } + void PessimisticallyResetGrContext() const; gl::GLShareGroup* share_group() { return share_group_.get(); } @@ -92,6 +106,9 @@ class GPU_GLES2_EXPORT SharedContextState viz::MetalContextProvider* metal_context_provider() { return metal_context_provider_; } + viz::DawnContextProvider* dawn_context_provider() { + return dawn_context_provider_; + } gl::ProgressReporter* progress_reporter() const { return progress_reporter_; } GrContext* gr_context() { return gr_context_; } // Handles Skia-reported shader compilation errors. @@ -117,6 +134,7 @@ class GPU_GLES2_EXPORT SharedContextState bool support_vulkan_external_object() const { return support_vulkan_external_object_; } + gpu::MemoryTracker::Observer* memory_tracker() { return &memory_tracker_; } // base::trace_event::MemoryDumpProvider implementation. bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args, @@ -136,6 +154,28 @@ class GPU_GLES2_EXPORT SharedContextState private: friend class base::RefCounted<SharedContextState>; + // Observer which is notified when SkiaOutputSurfaceImpl takes ownership of a + // shared image, and forward information to both histograms and task manager. + class GPU_GLES2_EXPORT MemoryTracker : public gpu::MemoryTracker::Observer { + public: + MemoryTracker(gpu::MemoryTracker::Observer* peak_memory_monitor); + MemoryTracker(MemoryTracker&) = delete; + MemoryTracker& operator=(MemoryTracker&) = delete; + ~MemoryTracker() override; + + // gpu::MemoryTracker::Observer implementation: + void OnMemoryAllocatedChange(CommandBufferId id, + uint64_t old_size, + uint64_t new_size) override; + + // Reports to GpuServiceImpl::GetVideoMemoryUsageStats() + uint64_t GetMemoryUsage() const; + + private: + uint64_t size_ = 0; + gpu::MemoryTracker::Observer* const peak_memory_monitor_; + }; + ~SharedContextState() override; // gpu::GLContextVirtualDelegate implementation. @@ -161,8 +201,11 @@ class GPU_GLES2_EXPORT SharedContextState bool use_virtualized_gl_contexts_ = false; bool support_vulkan_external_object_ = false; base::OnceClosure context_lost_callback_; + GrContextType gr_context_type_ = GrContextType::kGL; + MemoryTracker memory_tracker_; viz::VulkanContextProvider* const vk_context_provider_; viz::MetalContextProvider* const metal_context_provider_; + viz::DawnContextProvider* const dawn_context_provider_; GrContext* gr_context_ = nullptr; scoped_refptr<gl::GLShareGroup> share_group_; diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.cc b/chromium/gpu/command_buffer/service/shared_image_backing.cc index f4c22a418dc..bddce86bb50 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing.cc @@ -67,7 +67,7 @@ std::unique_ptr<SharedImageRepresentationSkia> SharedImageBacking::ProduceSkia( std::unique_ptr<SharedImageRepresentationDawn> SharedImageBacking::ProduceDawn( SharedImageManager* manager, MemoryTypeTracker* tracker, - DawnDevice device) { + WGPUDevice device) { return nullptr; } diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.h b/chromium/gpu/command_buffer/service/shared_image_backing.h index 11315ca566e..9e6f95cc53f 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing.h +++ b/chromium/gpu/command_buffer/service/shared_image_backing.h @@ -5,7 +5,7 @@ #ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_H_ #define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_H_ -#include <dawn/dawn.h> +#include <dawn/webgpu.h> #include <memory> @@ -125,7 +125,7 @@ class GPU_GLES2_EXPORT SharedImageBacking { virtual std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( SharedImageManager* manager, MemoryTypeTracker* tracker, - DawnDevice device); + WGPUDevice device); virtual std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay( SharedImageManager* manager, MemoryTypeTracker* tracker); diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc index 44e0e1752dd..b3aa8b6ae72 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc @@ -17,6 +17,7 @@ #include "base/containers/flat_set.h" #include "base/logging.h" #include "base/memory/scoped_refptr.h" +#include "base/posix/eintr_wrapper.h" #include "components/viz/common/gpu/vulkan_context_provider.h" #include "components/viz/common/resources/resource_format_utils.h" #include "components/viz/common/resources/resource_sizes.h" @@ -290,8 +291,8 @@ class SharedImageRepresentationSkiaVkAHB /*gpu_compositing=*/true, format()); auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget( context_state_->gr_context(), promise_texture_->backendTexture(), - kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, nullptr, - &surface_props); + kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, + color_space().ToSkColorSpace(), &surface_props); DCHECK(surface); surface_ = surface.get(); return surface; diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc index 100c60b3fd7..2888f5fc5d6 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc @@ -8,9 +8,11 @@ #include "components/viz/common/resources/resource_format_utils.h" #include "gpu/command_buffer/common/shared_image_trace_utils.h" #include "gpu/command_buffer/service/mailbox_manager.h" +#include "gpu/command_buffer/service/shared_context_state.h" #include "gpu/command_buffer/service/shared_image_backing.h" #include "gpu/command_buffer/service/shared_image_manager.h" #include "gpu/command_buffer/service/shared_image_representation.h" +#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h" #include "gpu/command_buffer/service/texture_manager.h" #include "ui/gfx/buffer_format_util.h" #include "ui/gl/buildflags.h" @@ -93,15 +95,15 @@ base::Optional<DXGI_FORMAT> VizFormatToDXGIFormat( } #if BUILDFLAG(USE_DAWN) -base::Optional<DawnTextureFormat> VizResourceFormatToDawnTextureFormat( +base::Optional<WGPUTextureFormat> VizResourceFormatToWGPUTextureFormat( viz::ResourceFormat viz_resource_format) { switch (viz_resource_format) { case viz::RGBA_F16: - return DAWN_TEXTURE_FORMAT_RGBA16_FLOAT; + return WGPUTextureFormat_RGBA16Float; case viz::BGRA_8888: - return DAWN_TEXTURE_FORMAT_BGRA8_UNORM; + return WGPUTextureFormat_BGRA8Unorm; case viz::RGBA_8888: - return DAWN_TEXTURE_FORMAT_RGBA8_UNORM; + return WGPUTextureFormat_RGBA8Unorm; default: NOTREACHED(); return {}; @@ -149,6 +151,9 @@ class SharedImageRepresentationGLTexturePassthroughD3D } private: + bool BeginAccess(GLenum mode) override; + void EndAccess() override; + scoped_refptr<gles2::TexturePassthrough> texture_passthrough_; }; @@ -159,7 +164,7 @@ class SharedImageRepresentationDawnD3D : public SharedImageRepresentationDawn { SharedImageRepresentationDawnD3D(SharedImageManager* manager, SharedImageBacking* backing, MemoryTypeTracker* tracker, - DawnDevice device) + WGPUDevice device) : SharedImageRepresentationDawn(manager, backing, tracker), device_(device), dawn_procs_(dawn_native::GetProcs()) { @@ -175,12 +180,12 @@ class SharedImageRepresentationDawnD3D : public SharedImageRepresentationDawn { dawn_procs_.deviceRelease(device_); } - DawnTexture BeginAccess(DawnTextureUsage usage) override; + WGPUTexture BeginAccess(WGPUTextureUsage usage) override; void EndAccess() override; private: - DawnDevice device_; - DawnTexture texture_ = nullptr; + WGPUDevice device_; + WGPUTexture texture_ = nullptr; // TODO(cwallez@chromium.org): Load procs only once when the factory is // created and pass a pointer to them around? @@ -205,7 +210,8 @@ class SharedImageBackingD3D : public SharedImageBacking { scoped_refptr<gl::GLImageD3D> image, size_t buffer_index, Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture, - base::win::ScopedHandle shared_handle) + base::win::ScopedHandle shared_handle, + Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dxgi_keyed_mutex) : SharedImageBacking(mailbox, format, size, @@ -220,7 +226,8 @@ class SharedImageBackingD3D : public SharedImageBacking { image_(std::move(image)), buffer_index_(buffer_index), d3d11_texture_(std::move(d3d11_texture)), - shared_handle_(std::move(shared_handle)) { + shared_handle_(std::move(shared_handle)), + dxgi_keyed_mutex_(std::move(dxgi_keyed_mutex)) { DCHECK(d3d11_texture_); DCHECK((texture_ && !texture_passthrough_) || (!texture_ && texture_passthrough_)); @@ -253,7 +260,7 @@ class SharedImageBackingD3D : public SharedImageBacking { std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( SharedImageManager* manager, MemoryTypeTracker* tracker, - DawnDevice device) override { + WGPUDevice device) override { #if BUILDFLAG(USE_DAWN) return std::make_unique<SharedImageRepresentationDawnD3D>(manager, this, tracker, device); @@ -273,6 +280,9 @@ class SharedImageBackingD3D : public SharedImageBacking { } swap_chain_ = nullptr; d3d11_texture_.Reset(); + dxgi_keyed_mutex_.Reset(); + keyed_mutex_acquire_key_ = 0; + keyed_mutex_acquired_ = false; shared_handle_.Close(); } @@ -296,6 +306,48 @@ class SharedImageBackingD3D : public SharedImageBacking { image_->OnMemoryDump(pmd, client_tracing_id, dump_name); } + bool BeginAccessD3D12(uint64_t* acquire_key) { + if (keyed_mutex_acquired_) { + DLOG(ERROR) << "Recursive BeginAccess not supported"; + return false; + } + *acquire_key = keyed_mutex_acquire_key_; + keyed_mutex_acquire_key_++; + keyed_mutex_acquired_ = true; + return true; + } + + void EndAccessD3D12() { keyed_mutex_acquired_ = false; } + + bool BeginAccessD3D11() { + if (dxgi_keyed_mutex_) { + if (keyed_mutex_acquired_) { + DLOG(ERROR) << "Recursive BeginAccess not supported"; + return false; + } + const HRESULT hr = + dxgi_keyed_mutex_->AcquireSync(keyed_mutex_acquire_key_, INFINITE); + if (FAILED(hr)) { + DLOG(ERROR) << "Unable to acquire the keyed mutex " << std::hex << hr; + return false; + } + keyed_mutex_acquire_key_++; + keyed_mutex_acquired_ = true; + } + return true; + } + void EndAccessD3D11() { + if (dxgi_keyed_mutex_) { + const HRESULT hr = + dxgi_keyed_mutex_->ReleaseSync(keyed_mutex_acquire_key_); + if (FAILED(hr)) { + DLOG(ERROR) << "Unable to release the keyed mutex " << std::hex << hr; + return; + } + keyed_mutex_acquired_ = false; + } + } + HANDLE GetSharedHandle() const { return shared_handle_.Get(); } bool PresentSwapChain() override { @@ -355,6 +407,15 @@ class SharedImageBackingD3D : public SharedImageBacking { manager, this, tracker, texture_passthrough_); } + std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + scoped_refptr<SharedContextState> context_state) override { + return SharedImageRepresentationSkiaGL::CreateForPassthrough( + ProduceGLTexturePassthrough(manager, tracker), std::move(context_state), + manager, this, tracker); + } + private: Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain_; gles2::Texture* texture_ = nullptr; @@ -362,38 +423,56 @@ class SharedImageBackingD3D : public SharedImageBacking { scoped_refptr<gl::GLImageD3D> image_; const size_t buffer_index_; Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture_; + + // If d3d11_texture_ has a keyed mutex, it will be stored in + // dxgi_keyed_mutex. The keyed mutex is used to synchronize + // D3D11 and D3D12 Chromium components. + // dxgi_keyed_mutex_ is the D3D11 side of the keyed mutex. + // To create the corresponding D3D12 interface, pass the handle + // stored in shared_handle_ to ID3D12Device::OpenSharedHandle. + // Only one component is allowed to read/write to the texture + // at a time. keyed_mutex_acquire_key_ is incremented on every + // Acquire/Release usage. base::win::ScopedHandle shared_handle_; + Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dxgi_keyed_mutex_; + uint64_t keyed_mutex_acquire_key_ = 0; + bool keyed_mutex_acquired_ = false; DISALLOW_COPY_AND_ASSIGN(SharedImageBackingD3D); }; #if BUILDFLAG(USE_DAWN) -DawnTexture SharedImageRepresentationDawnD3D::BeginAccess( - DawnTextureUsage usage) { +WGPUTexture SharedImageRepresentationDawnD3D::BeginAccess( + WGPUTextureUsage usage) { SharedImageBackingD3D* d3d_image_backing = static_cast<SharedImageBackingD3D*>(backing()); const HANDLE shared_handle = d3d_image_backing->GetSharedHandle(); const viz::ResourceFormat viz_resource_format = d3d_image_backing->format(); - const base::Optional<DawnTextureFormat> dawn_texture_format = - VizResourceFormatToDawnTextureFormat(viz_resource_format); - if (!dawn_texture_format.has_value()) { + const base::Optional<WGPUTextureFormat> wgpu_texture_format = + VizResourceFormatToWGPUTextureFormat(viz_resource_format); + if (!wgpu_texture_format.has_value()) { DLOG(ERROR) << "Unsupported viz format found: " << viz_resource_format; return nullptr; } - DawnTextureDescriptor desc; + uint64_t shared_mutex_acquire_key; + if (!d3d_image_backing->BeginAccessD3D12(&shared_mutex_acquire_key)) { + return nullptr; + } + + WGPUTextureDescriptor desc; desc.nextInChain = nullptr; - desc.format = dawn_texture_format.value(); + desc.format = wgpu_texture_format.value(); desc.usage = usage; - desc.dimension = DAWN_TEXTURE_DIMENSION_2D; + desc.dimension = WGPUTextureDimension_2D; desc.size = {size().width(), size().height(), 1}; desc.arrayLayerCount = 1; desc.mipLevelCount = 1; desc.sampleCount = 1; - texture_ = - dawn_native::d3d12::WrapSharedHandle(device_, &desc, shared_handle); + texture_ = dawn_native::d3d12::WrapSharedHandle(device_, &desc, shared_handle, + shared_mutex_acquire_key); if (texture_) { // Keep a reference to the texture so that it stays valid (its content // might be destroyed). @@ -406,6 +485,8 @@ DawnTexture SharedImageRepresentationDawnD3D::BeginAccess( // uninitialized data. When !IsCleared we should tell dawn_native to // consider the texture lazy-cleared. SetCleared(); + } else { + d3d_image_backing->EndAccessD3D12(); } return texture_; @@ -416,6 +497,9 @@ void SharedImageRepresentationDawnD3D::EndAccess() { return; } + SharedImageBackingD3D* d3d_image_backing = + static_cast<SharedImageBackingD3D*>(backing()); + // TODO(cwallez@chromium.org): query dawn_native to know if the texture was // cleared and set IsCleared appropriately. @@ -425,9 +509,24 @@ void SharedImageRepresentationDawnD3D::EndAccess() { dawn_procs_.textureRelease(texture_); texture_ = nullptr; + + d3d_image_backing->EndAccessD3D12(); } #endif // BUILDFLAG(USE_DAWN) +bool SharedImageRepresentationGLTexturePassthroughD3D::BeginAccess( + GLenum mode) { + SharedImageBackingD3D* d3d_image_backing = + static_cast<SharedImageBackingD3D*>(backing()); + return d3d_image_backing->BeginAccessD3D11(); +} + +void SharedImageRepresentationGLTexturePassthroughD3D::EndAccess() { + SharedImageBackingD3D* d3d_image_backing = + static_cast<SharedImageBackingD3D*>(backing()); + d3d_image_backing->EndAccessD3D11(); +} + SharedImageBackingFactoryD3D::SharedImageBackingFactoryD3D(bool use_passthrough) : use_passthrough_(use_passthrough), d3d11_device_(gl::QueryD3D11DeviceObjectFromANGLE()) { @@ -478,6 +577,8 @@ std::unique_ptr<SharedImageBacking> SharedImageBackingFactoryD3D::MakeBacking( api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dxgi_keyed_mutex; + if (swap_chain) { DCHECK(!d3d11_texture); DCHECK(!shared_handle.IsValid()); @@ -487,9 +588,16 @@ std::unique_ptr<SharedImageBacking> SharedImageBackingFactoryD3D::MakeBacking( DLOG(ERROR) << "GetBuffer failed with error " << std::hex; return nullptr; } - } else { - DCHECK(d3d11_texture); + } else if (shared_handle.IsValid()) { + const HRESULT hr = d3d11_texture.As(&dxgi_keyed_mutex); + if (FAILED(hr)) { + DLOG(ERROR) << "Unable to QueryInterface for IDXGIKeyedMutex on texture " + "with shared handle " + << std::hex; + return nullptr; + } } + DCHECK(d3d11_texture); // The GL internal format can differ from the underlying swap chain format // e.g. RGBA8 or RGB8 instead of BGRA8. @@ -538,7 +646,8 @@ std::unique_ptr<SharedImageBacking> SharedImageBackingFactoryD3D::MakeBacking( return std::make_unique<SharedImageBackingD3D>( mailbox, format, size, color_space, usage, std::move(swap_chain), texture, std::move(texture_passthrough), std::move(image), buffer_index, - std::move(d3d11_texture), std::move(shared_handle)); + std::move(d3d11_texture), std::move(shared_handle), + std::move(dxgi_keyed_mutex)); } SharedImageBackingFactoryD3D::SwapChainBackings @@ -671,8 +780,8 @@ SharedImageBackingFactoryD3D::CreateSharedImage( desc.Usage = D3D11_USAGE_DEFAULT; desc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET; desc.CPUAccessFlags = 0; - desc.MiscFlags = - D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED; + desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED_NTHANDLE | + D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX; Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture; HRESULT hr = d3d11_device_->CreateTexture2D(&desc, nullptr, &d3d11_texture); if (FAILED(hr)) { diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc index 6d02a0a71f8..f4b9a1cbb6c 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc @@ -14,13 +14,24 @@ #include "gpu/command_buffer/service/shared_image_factory.h" #include "gpu/command_buffer/service/shared_image_manager.h" #include "gpu/command_buffer/service/shared_image_representation.h" +#include "gpu/config/gpu_test_config.h" #include "testing/gtest/include/gtest/gtest.h" +#include "third_party/skia/include/core/SkImage.h" +#include "third_party/skia/include/core/SkPromiseImageTexture.h" +#include "third_party/skia/include/core/SkSurface.h" +#include "ui/gl/buildflags.h" #include "ui/gl/gl_angle_util_win.h" #include "ui/gl/gl_context.h" #include "ui/gl/gl_image_d3d.h" #include "ui/gl/gl_surface.h" #include "ui/gl/init/gl_factory.h" +#if BUILDFLAG(USE_DAWN) +#include <dawn/dawn_proc.h> +#include <dawn/webgpu_cpp.h> +#include <dawn_native/DawnNative.h> +#endif // BUILDFLAG(USE_DAWN) + namespace gpu { namespace { @@ -55,12 +66,19 @@ GLuint MakeTextureAndSetParameters(gl::GLApi* api, GLenum target, bool fbo) { return texture_id; } -class SharedImageBackingFactoryD3DTest : public testing::TestWithParam<bool> { +bool IsD3DSharedImageSupported() { + // D3D shared images with the current group of flags only works on Win8+ + // OSes. If we need shared images on Win7, we can create them but a more + // insecure group of flags is required. + if (GPUTestBotConfig::CurrentConfigMatches("Win7")) + return false; + return true; +} + +class SharedImageBackingFactoryD3DTestBase + : public testing::TestWithParam<bool> { public: void SetUp() override { - if (!SharedImageBackingFactoryD3D::IsSwapChainSupported()) - return; - use_passthrough_texture_ = GetParam(); surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size()); @@ -90,7 +108,17 @@ class SharedImageBackingFactoryD3DTest : public testing::TestWithParam<bool> { std::unique_ptr<SharedImageBackingFactoryD3D> shared_image_factory_; }; -TEST_P(SharedImageBackingFactoryD3DTest, InvalidFormat) { +class SharedImageBackingFactoryD3DTestSwapChain + : public SharedImageBackingFactoryD3DTestBase { + public: + void SetUp() override { + if (!SharedImageBackingFactoryD3D::IsSwapChainSupported()) + return; + SharedImageBackingFactoryD3DTestBase::SetUp(); + } +}; + +TEST_P(SharedImageBackingFactoryD3DTestSwapChain, InvalidFormat) { if (!SharedImageBackingFactoryD3D::IsSwapChainSupported()) return; @@ -139,7 +167,7 @@ TEST_P(SharedImageBackingFactoryD3DTest, InvalidFormat) { } } -TEST_P(SharedImageBackingFactoryD3DTest, CreateAndPresentSwapChain) { +TEST_P(SharedImageBackingFactoryD3DTestSwapChain, CreateAndPresentSwapChain) { if (!SharedImageBackingFactoryD3D::IsSwapChainSupported()) return; @@ -391,9 +419,213 @@ TEST_P(SharedImageBackingFactoryD3DTest, CreateAndPresentSwapChain) { api->glDeleteFramebuffersEXTFn(1, &fbo); } +class SharedImageBackingFactoryD3DTest + : public SharedImageBackingFactoryD3DTestBase { + public: + void SetUp() override { + if (!IsD3DSharedImageSupported()) + return; + + SharedImageBackingFactoryD3DTestBase::SetUp(); + ASSERT_TRUE(use_passthrough_texture_); + GpuDriverBugWorkarounds workarounds; + scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup(); + context_state_ = base::MakeRefCounted<SharedContextState>( + std::move(share_group), surface_, context_, + /*use_virtualized_gl_contexts=*/false, base::DoNothing()); + context_state_->InitializeGrContext(workarounds, nullptr); + auto feature_info = + base::MakeRefCounted<gles2::FeatureInfo>(workarounds, GpuFeatureInfo()); + context_state_->InitializeGL(GpuPreferences(), std::move(feature_info)); + } + + protected: + GrContext* gr_context() const { return context_state_->gr_context(); } + + void CheckSkiaPixels(const Mailbox& mailbox, const gfx::Size& size) const { + auto skia_representation = + shared_image_representation_factory_->ProduceSkia(mailbox, + context_state_); + ASSERT_NE(skia_representation, nullptr); + + SharedImageRepresentationSkia::ScopedReadAccess scoped_read_access( + skia_representation.get(), nullptr, nullptr); + + auto* promise_texture = scoped_read_access.promise_image_texture(); + GrBackendTexture backend_texture = promise_texture->backendTexture(); + + EXPECT_TRUE(backend_texture.isValid()); + EXPECT_EQ(size.width(), backend_texture.width()); + EXPECT_EQ(size.height(), backend_texture.height()); + + // Create an Sk Image from GrBackendTexture. + auto sk_image = SkImage::MakeFromTexture( + gr_context(), backend_texture, kTopLeft_GrSurfaceOrigin, + kRGBA_8888_SkColorType, kOpaque_SkAlphaType, nullptr); + + const SkImageInfo dst_info = + SkImageInfo::Make(size.width(), size.height(), kRGBA_8888_SkColorType, + kOpaque_SkAlphaType, nullptr); + + const int num_pixels = size.width() * size.height(); + std::vector<uint8_t> dst_pixels(num_pixels * 4); + + // Read back pixels from Sk Image. + EXPECT_TRUE(sk_image->readPixels(dst_info, dst_pixels.data(), + dst_info.minRowBytes(), 0, 0)); + + for (int i = 0; i < num_pixels; i++) { + // Compare the pixel values. + const uint8_t* pixel = dst_pixels.data() + (i * 4); + EXPECT_EQ(pixel[0], 0); + EXPECT_EQ(pixel[1], 255); + EXPECT_EQ(pixel[2], 0); + EXPECT_EQ(pixel[3], 255); + } + } + + scoped_refptr<SharedContextState> context_state_; +}; + +// Test to check interaction between Gl and skia GL representations. +// We write to a GL texture using gl representation and then read from skia +// representation. +TEST_P(SharedImageBackingFactoryD3DTest, GL_SkiaGL) { + if (!IsD3DSharedImageSupported()) + return; + + // Create a backing using mailbox. + auto mailbox = Mailbox::GenerateForSharedImage(); + const auto format = viz::ResourceFormat::RGBA_8888; + const gfx::Size size(1, 1); + const auto color_space = gfx::ColorSpace::CreateSRGB(); + const uint32_t usage = SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_DISPLAY; + auto backing = shared_image_factory_->CreateSharedImage( + mailbox, format, size, color_space, usage, false /* is_thread_safe */); + ASSERT_NE(backing, nullptr); + + GLenum expected_target = GL_TEXTURE_2D; + std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref = + shared_image_manager_.Register(std::move(backing), + memory_type_tracker_.get()); + + // Create a SharedImageRepresentationGLTexture. + auto gl_representation = + shared_image_representation_factory_->ProduceGLTexturePassthrough( + mailbox); + EXPECT_EQ(expected_target, + gl_representation->GetTexturePassthrough()->target()); + + SharedImageRepresentationGLTexturePassthrough::ScopedAccess scoped_access( + gl_representation.get(), GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM); + EXPECT_TRUE(scoped_access.success()); + + // Create an FBO. + GLuint fbo = 0; + gl::GLApi* api = gl::g_current_gl_context; + api->glGenFramebuffersEXTFn(1, &fbo); + api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo); + + // Attach the texture to FBO. + api->glFramebufferTexture2DEXTFn( + GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, + gl_representation->GetTexturePassthrough()->target(), + gl_representation->GetTexturePassthrough()->service_id(), 0); + + // Set the clear color to green. + api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f); + api->glClearFn(GL_COLOR_BUFFER_BIT); + scoped_access.reset(); + gl_representation.reset(); + + CheckSkiaPixels(mailbox, size); + + factory_ref.reset(); +} + +#if BUILDFLAG(USE_DAWN) +// Test to check interaction between Dawn and skia GL representations. +TEST_P(SharedImageBackingFactoryD3DTest, Dawn_SkiaGL) { + if (!IsD3DSharedImageSupported()) + return; + + // Create a Dawn D3D12 device + dawn_native::Instance instance; + instance.DiscoverDefaultAdapters(); + + std::vector<dawn_native::Adapter> adapters = instance.GetAdapters(); + auto adapter_it = std::find_if( + adapters.begin(), adapters.end(), [](dawn_native::Adapter adapter) { + return adapter.GetBackendType() == dawn_native::BackendType::D3D12; + }); + ASSERT_NE(adapter_it, adapters.end()); + + wgpu::Device device = wgpu::Device::Acquire(adapter_it->CreateDevice()); + DawnProcTable procs = dawn_native::GetProcs(); + dawnProcSetProcs(&procs); + + // Create a backing using mailbox. + const auto mailbox = Mailbox::GenerateForSharedImage(); + const auto format = viz::ResourceFormat::RGBA_8888; + const gfx::Size size(1, 1); + const auto color_space = gfx::ColorSpace::CreateSRGB(); + const uint32_t usage = SHARED_IMAGE_USAGE_WEBGPU | SHARED_IMAGE_USAGE_DISPLAY; + auto backing = shared_image_factory_->CreateSharedImage( + mailbox, format, size, color_space, usage, false /* is_thread_safe */); + ASSERT_NE(backing, nullptr); + + std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref = + shared_image_manager_.Register(std::move(backing), + memory_type_tracker_.get()); + + // Clear the shared image to green using Dawn. + { + // Create a SharedImageRepresentationDawn. + auto dawn_representation = + shared_image_representation_factory_->ProduceDawn(mailbox, + device.Get()); + + wgpu::Texture texture = wgpu::Texture::Acquire( + dawn_representation->BeginAccess(WGPUTextureUsage_OutputAttachment)); + + wgpu::RenderPassColorAttachmentDescriptor color_desc; + color_desc.attachment = texture.CreateView(); + color_desc.resolveTarget = nullptr; + color_desc.loadOp = wgpu::LoadOp::Clear; + color_desc.storeOp = wgpu::StoreOp::Store; + color_desc.clearColor = {0, 255, 0, 255}; + + wgpu::RenderPassDescriptor renderPassDesc; + renderPassDesc.colorAttachmentCount = 1; + renderPassDesc.colorAttachments = &color_desc; + renderPassDesc.depthStencilAttachment = nullptr; + + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc); + pass.EndPass(); + wgpu::CommandBuffer commands = encoder.Finish(); + + wgpu::Queue queue = device.CreateQueue(); + queue.Submit(1, &commands); + + dawn_representation->EndAccess(); + } + + CheckSkiaPixels(mailbox, size); + + // Shut down Dawn + device = wgpu::Device(); + dawnProcSetProcs(nullptr); + + factory_ref.reset(); +} +#endif // BUILDFLAG(USE_DAWN) + INSTANTIATE_TEST_SUITE_P(/* no prefix */, - SharedImageBackingFactoryD3DTest, + SharedImageBackingFactoryD3DTestSwapChain, testing::Bool()); - +INSTANTIATE_TEST_SUITE_P(/* no prefix */, + SharedImageBackingFactoryD3DTest, + testing::Values(true)); } // anonymous namespace } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc index 86a1c5869be..07981081e05 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc @@ -883,10 +883,20 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage( image = image_factory_->CreateAnonymousImage( size, format_info.buffer_format, gfx::BufferUsage::SCANOUT, &is_cleared); - // A SCANOUT image should not require copy. - DCHECK(!image || image->ShouldBindOrCopy() == gl::GLImage::BIND); - if (!image || !image->BindTexImage(target)) { - LOG(ERROR) << "CreateSharedImage: Failed to create image"; + // Scanout images have different constraints than GL images and might fail + // to allocate even if GL images can be created. + if (!image) { + // TODO(dcastagna): Use BufferUsage::GPU_READ_WRITE instead + // BufferUsage::GPU_READ once we add it. + image = image_factory_->CreateAnonymousImage( + size, format_info.buffer_format, gfx::BufferUsage::GPU_READ, + &is_cleared); + } + // The allocated image should not require copy. + if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND || + !image->BindTexImage(target)) { + LOG(ERROR) << "CreateSharedImage: Failed to " + << (image ? "bind" : "create") << " image"; api->glDeleteTexturesFn(1, &service_id); return nullptr; } diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm index 2ca66f3146b..0f51e0b817d 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm @@ -88,32 +88,32 @@ void FlushIOSurfaceGLOperations() { api->glFlushFn(); } -base::Optional<DawnTextureFormat> GetDawnFormat(viz::ResourceFormat format) { +base::Optional<WGPUTextureFormat> GetWGPUFormat(viz::ResourceFormat format) { switch (format) { case viz::RED_8: case viz::ALPHA_8: case viz::LUMINANCE_8: - return DAWN_TEXTURE_FORMAT_R8_UNORM; + return WGPUTextureFormat_R8Unorm; case viz::RG_88: - return DAWN_TEXTURE_FORMAT_RG8_UNORM; + return WGPUTextureFormat_RG8Unorm; case viz::RGBA_8888: case viz::BGRA_8888: - return DAWN_TEXTURE_FORMAT_BGRA8_UNORM; + return WGPUTextureFormat_BGRA8Unorm; default: return {}; } } -base::Optional<DawnTextureFormat> GetDawnFormat(gfx::BufferFormat format) { +base::Optional<WGPUTextureFormat> GetWGPUFormat(gfx::BufferFormat format) { switch (format) { case gfx::BufferFormat::R_8: - return DAWN_TEXTURE_FORMAT_R8_UNORM; + return WGPUTextureFormat_R8Unorm; case gfx::BufferFormat::RG_88: - return DAWN_TEXTURE_FORMAT_RG8_UNORM; + return WGPUTextureFormat_RG8Unorm; case gfx::BufferFormat::RGBX_8888: case gfx::BufferFormat::RGBA_8888: case gfx::BufferFormat::BGRX_8888: - return DAWN_TEXTURE_FORMAT_BGRA8_UNORM; + return WGPUTextureFormat_BGRA8Unorm; default: return {}; } @@ -275,13 +275,13 @@ class SharedImageRepresentationDawnIOSurface SharedImageManager* manager, SharedImageBacking* backing, MemoryTypeTracker* tracker, - DawnDevice device, + WGPUDevice device, base::ScopedCFTypeRef<IOSurfaceRef> io_surface, - DawnTextureFormat dawn_format) + WGPUTextureFormat wgpu_format) : SharedImageRepresentationDawn(manager, backing, tracker), io_surface_(std::move(io_surface)), device_(device), - dawn_format_(dawn_format), + wgpu_format_(wgpu_format), dawn_procs_(dawn_native::GetProcs()) { DCHECK(device_); DCHECK(io_surface_); @@ -296,12 +296,12 @@ class SharedImageRepresentationDawnIOSurface dawn_procs_.deviceRelease(device_); } - DawnTexture BeginAccess(DawnTextureUsage usage) final { - DawnTextureDescriptor desc; + WGPUTexture BeginAccess(WGPUTextureUsage usage) final { + WGPUTextureDescriptor desc; desc.nextInChain = nullptr; - desc.format = dawn_format_; + desc.format = wgpu_format_; desc.usage = usage; - desc.dimension = DAWN_TEXTURE_DIMENSION_2D; + desc.dimension = WGPUTextureDimension_2D; desc.size = {size().width(), size().height(), 1}; desc.arrayLayerCount = 1; desc.mipLevelCount = 1; @@ -339,7 +339,7 @@ class SharedImageRepresentationDawnIOSurface dawn_procs_.textureDestroy(texture_); // macOS has a global GPU command queue so synchronization between APIs and - // devices is automatic. However on Metal, dawnQueueSubmit "commits" the + // devices is automatic. However on Metal, wgpuQueueSubmit "commits" the // Metal command buffers but they aren't "scheduled" in the global queue // immediately. (that work seems offloaded to a different thread?) // Wait for all the previous submitted commands to be scheduled to have @@ -354,9 +354,9 @@ class SharedImageRepresentationDawnIOSurface private: base::ScopedCFTypeRef<IOSurfaceRef> io_surface_; - DawnDevice device_; - DawnTexture texture_ = nullptr; - DawnTextureFormat dawn_format_; + WGPUDevice device_; + WGPUTexture texture_ = nullptr; + WGPUTextureFormat wgpu_format_; // TODO(cwallez@chromium.org): Load procs only once when the factory is // created and pass a pointer to them around? @@ -378,7 +378,7 @@ class SharedImageBackingIOSurface : public SharedImageBacking { const gfx::ColorSpace& color_space, uint32_t usage, base::ScopedCFTypeRef<IOSurfaceRef> io_surface, - base::Optional<DawnTextureFormat> dawn_format, + base::Optional<WGPUTextureFormat> dawn_format, size_t estimated_size) : SharedImageBacking(mailbox, format, @@ -477,7 +477,7 @@ class SharedImageBackingIOSurface : public SharedImageBacking { std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( SharedImageManager* manager, MemoryTypeTracker* tracker, - DawnDevice device) override { + WGPUDevice device) override { #if BUILDFLAG(USE_DAWN) if (!dawn_format_) { LOG(ERROR) << "Format not supported for Dawn"; @@ -561,7 +561,7 @@ class SharedImageBackingIOSurface : public SharedImageBacking { } base::ScopedCFTypeRef<IOSurfaceRef> io_surface_; - base::Optional<DawnTextureFormat> dawn_format_; + base::Optional<WGPUTextureFormat> dawn_format_; base::scoped_nsprotocol<id<MTLTexture>> mtl_texture_; bool is_cleared_ = false; @@ -645,7 +645,7 @@ SharedImageBackingFactoryIOSurface::CreateSharedImage( return std::make_unique<SharedImageBackingIOSurface>( mailbox, format, size, color_space, usage, std::move(io_surface), - GetDawnFormat(format), estimated_size); + GetWGPUFormat(format), estimated_size); } std::unique_ptr<SharedImageBacking> @@ -692,7 +692,7 @@ SharedImageBackingFactoryIOSurface::CreateSharedImage( return std::make_unique<SharedImageBackingIOSurface>( mailbox, resource_format, size, color_space, usage, std::move(io_surface), - GetDawnFormat(format), estimated_size); + GetWGPUFormat(format), estimated_size); } bool SharedImageBackingFactoryIOSurface::CanImportGpuMemoryBuffer( diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc index 0d03b524d98..6a8ccda5171 100644 --- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc +++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc @@ -30,7 +30,7 @@ #if BUILDFLAG(USE_DAWN) #include <dawn/dawn_proc.h> -#include <dawn/dawncpp.h> +#include <dawn/webgpu_cpp.h> #include <dawn_native/DawnNative.h> #endif // BUILDFLAG(USE_DAWN) @@ -266,7 +266,7 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) { }); ASSERT_NE(adapter_it, adapters.end()); - dawn::Device device = dawn::Device::Acquire(adapter_it->CreateDevice()); + wgpu::Device device = wgpu::Device::Acquire(adapter_it->CreateDevice()); DawnProcTable procs = dawn_native::GetProcs(); dawnProcSetProcs(&procs); @@ -291,27 +291,27 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) { // Clear the shared image to green using Dawn. { - dawn::Texture texture = dawn::Texture::Acquire( - dawn_representation->BeginAccess(DAWN_TEXTURE_USAGE_OUTPUT_ATTACHMENT)); + wgpu::Texture texture = wgpu::Texture::Acquire( + dawn_representation->BeginAccess(WGPUTextureUsage_OutputAttachment)); - dawn::RenderPassColorAttachmentDescriptor color_desc; + wgpu::RenderPassColorAttachmentDescriptor color_desc; color_desc.attachment = texture.CreateView(); color_desc.resolveTarget = nullptr; - color_desc.loadOp = dawn::LoadOp::Clear; - color_desc.storeOp = dawn::StoreOp::Store; + color_desc.loadOp = wgpu::LoadOp::Clear; + color_desc.storeOp = wgpu::StoreOp::Store; color_desc.clearColor = {0, 255, 0, 255}; - dawn::RenderPassDescriptor renderPassDesc; + wgpu::RenderPassDescriptor renderPassDesc; renderPassDesc.colorAttachmentCount = 1; renderPassDesc.colorAttachments = &color_desc; renderPassDesc.depthStencilAttachment = nullptr; - dawn::CommandEncoder encoder = device.CreateCommandEncoder(); - dawn::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc); + wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); + wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc); pass.EndPass(); - dawn::CommandBuffer commands = encoder.Finish(); + wgpu::CommandBuffer commands = encoder.Finish(); - dawn::Queue queue = device.CreateQueue(); + wgpu::Queue queue = device.CreateQueue(); queue.Submit(1, &commands); } @@ -355,7 +355,7 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) { EXPECT_EQ(dst_pixels[3], 255); // Shut down Dawn - device = dawn::Device(); + device = wgpu::Device(); dawnProcSetProcs(nullptr); skia_representation.reset(); diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc new file mode 100644 index 00000000000..84bf3dbee62 --- /dev/null +++ b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.cc @@ -0,0 +1,169 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/command_buffer/service/shared_image_backing_ozone.h" + +#include <dawn/webgpu.h> +#include <vulkan/vulkan.h> + +#include <memory> + +#include "base/logging.h" +#include "base/memory/ptr_util.h" +#include "base/memory/scoped_refptr.h" +#include "components/viz/common/gpu/vulkan_context_provider.h" +#include "components/viz/common/resources/resource_format.h" +#include "components/viz/common/resources/resource_format_utils.h" +#include "gpu/command_buffer/common/mailbox.h" +#include "gpu/command_buffer/common/shared_image_usage.h" +#include "gpu/command_buffer/service/mailbox_manager.h" +#include "gpu/command_buffer/service/memory_tracking.h" +#include "gpu/command_buffer/service/shared_context_state.h" +#include "gpu/command_buffer/service/shared_image_manager.h" +#include "gpu/command_buffer/service/shared_image_representation.h" +#include "gpu/vulkan/vulkan_device_queue.h" +#include "ui/gfx/buffer_format_util.h" +#include "ui/gfx/buffer_types.h" +#include "ui/gfx/color_space.h" +#include "ui/gfx/geometry/rect.h" +#include "ui/gfx/geometry/size.h" +#include "ui/gfx/gpu_fence.h" +#include "ui/gfx/native_pixmap.h" +#include "ui/gfx/native_widget_types.h" +#include "ui/ozone/public/ozone_platform.h" +#include "ui/ozone/public/surface_factory_ozone.h" + +namespace gpu { +namespace { + +size_t GetPixmapSizeInBytes(const gfx::NativePixmap& pixmap) { + return gfx::BufferSizeForBufferFormat(pixmap.GetBufferSize(), + pixmap.GetBufferFormat()); +} + +gfx::BufferUsage GetBufferUsage(uint32_t usage) { + if (usage & SHARED_IMAGE_USAGE_WEBGPU) { + // Just use SCANOUT for WebGPU since the memory doesn't need to be linear. + return gfx::BufferUsage::SCANOUT; + } else if (usage & SHARED_IMAGE_USAGE_SCANOUT) { + return gfx::BufferUsage::SCANOUT; + } else { + NOTREACHED() << "Unsupported usage flags."; + return gfx::BufferUsage::SCANOUT; + } +} + +} // namespace + +std::unique_ptr<SharedImageBackingOzone> SharedImageBackingOzone::Create( + SharedContextState* context_state, + const Mailbox& mailbox, + viz::ResourceFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + uint32_t usage) { + gfx::BufferFormat buffer_format = viz::BufferFormat(format); + gfx::BufferUsage buffer_usage = GetBufferUsage(usage); + VkDevice vk_device = VK_NULL_HANDLE; + DCHECK(context_state); + if (context_state->vk_context_provider()) { + vk_device = context_state->vk_context_provider() + ->GetDeviceQueue() + ->GetVulkanDevice(); + } + ui::SurfaceFactoryOzone* surface_factory = + ui::OzonePlatform::GetInstance()->GetSurfaceFactoryOzone(); + scoped_refptr<gfx::NativePixmap> pixmap = surface_factory->CreateNativePixmap( + gfx::kNullAcceleratedWidget, vk_device, size, buffer_format, + buffer_usage); + if (!pixmap) { + return nullptr; + } + + return base::WrapUnique( + new SharedImageBackingOzone(mailbox, format, size, color_space, usage, + context_state, std::move(pixmap))); +} + +SharedImageBackingOzone::~SharedImageBackingOzone() = default; + +bool SharedImageBackingOzone::IsCleared() const { + NOTIMPLEMENTED_LOG_ONCE(); + return false; +} + +void SharedImageBackingOzone::SetCleared() { + NOTIMPLEMENTED_LOG_ONCE(); +} + +void SharedImageBackingOzone::Update(std::unique_ptr<gfx::GpuFence> in_fence) { + NOTIMPLEMENTED_LOG_ONCE(); + return; +} + +void SharedImageBackingOzone::Destroy() {} + +bool SharedImageBackingOzone::ProduceLegacyMailbox( + MailboxManager* mailbox_manager) { + NOTREACHED(); + return false; +} + +std::unique_ptr<SharedImageRepresentationDawn> +SharedImageBackingOzone::ProduceDawn(SharedImageManager* manager, + MemoryTypeTracker* tracker, + WGPUDevice device) { + NOTIMPLEMENTED_LOG_ONCE(); + return nullptr; +} + +std::unique_ptr<SharedImageRepresentationGLTexture> +SharedImageBackingOzone::ProduceGLTexture(SharedImageManager* manager, + MemoryTypeTracker* tracker) { + NOTIMPLEMENTED_LOG_ONCE(); + return nullptr; +} + +std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> +SharedImageBackingOzone::ProduceGLTexturePassthrough( + SharedImageManager* manager, + MemoryTypeTracker* tracker) { + NOTIMPLEMENTED_LOG_ONCE(); + return nullptr; +} + +std::unique_ptr<SharedImageRepresentationSkia> +SharedImageBackingOzone::ProduceSkia( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + scoped_refptr<SharedContextState> context_state) { + NOTIMPLEMENTED_LOG_ONCE(); + return nullptr; +} + +std::unique_ptr<SharedImageRepresentationOverlay> +SharedImageBackingOzone::ProduceOverlay(SharedImageManager* manager, + MemoryTypeTracker* tracker) { + NOTIMPLEMENTED_LOG_ONCE(); + return nullptr; +} + +SharedImageBackingOzone::SharedImageBackingOzone( + const Mailbox& mailbox, + viz::ResourceFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + uint32_t usage, + SharedContextState* context_state, + scoped_refptr<gfx::NativePixmap> pixmap) + : SharedImageBacking(mailbox, + format, + size, + color_space, + usage, + GetPixmapSizeInBytes(*pixmap), + false), + pixmap_(std::move(pixmap)) {} + +} // namespace gpu diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h new file mode 100644 index 00000000000..08d93456272 --- /dev/null +++ b/chromium/gpu/command_buffer/service/shared_image_backing_ozone.h @@ -0,0 +1,85 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_OZONE_H_ +#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_OZONE_H_ + +#include <dawn/webgpu.h> + +#include <memory> + +#include "base/macros.h" +#include "base/memory/scoped_refptr.h" +#include "components/viz/common/resources/resource_format.h" +#include "gpu/command_buffer/common/mailbox.h" +#include "gpu/command_buffer/service/mailbox_manager.h" +#include "gpu/command_buffer/service/memory_tracking.h" +#include "gpu/command_buffer/service/shared_context_state.h" +#include "gpu/command_buffer/service/shared_image_backing.h" +#include "gpu/command_buffer/service/shared_image_manager.h" +#include "gpu/command_buffer/service/shared_image_representation.h" +#include "ui/gfx/color_space.h" +#include "ui/gfx/geometry/size.h" +#include "ui/gfx/gpu_fence.h" +#include "ui/gfx/native_pixmap.h" + +namespace gpu { + +// Implementation of SharedImageBacking that uses a NativePixmap created via +// an Ozone surface factory. The memory associated with the pixmap can be +// aliased by both GL and Vulkan for use in rendering or compositing. +class SharedImageBackingOzone final : public SharedImageBacking { + public: + static std::unique_ptr<SharedImageBackingOzone> Create( + SharedContextState* context_state, + const Mailbox& mailbox, + viz::ResourceFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + uint32_t usage); + ~SharedImageBackingOzone() override; + + // gpu::SharedImageBacking: + bool IsCleared() const override; + void SetCleared() override; + void Update(std::unique_ptr<gfx::GpuFence> in_fence) override; + void Destroy() override; + bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override; + + protected: + std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + WGPUDevice device) override; + std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture( + SharedImageManager* manager, + MemoryTypeTracker* tracker) override; + std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> + ProduceGLTexturePassthrough(SharedImageManager* manager, + MemoryTypeTracker* tracker) override; + std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia( + SharedImageManager* manager, + MemoryTypeTracker* tracker, + scoped_refptr<SharedContextState> context_state) override; + std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay( + SharedImageManager* manager, + MemoryTypeTracker* tracker) override; + + private: + SharedImageBackingOzone(const Mailbox& mailbox, + viz::ResourceFormat format, + const gfx::Size& size, + const gfx::ColorSpace& color_space, + uint32_t usage, + SharedContextState* context_state, + scoped_refptr<gfx::NativePixmap> pixmap); + + scoped_refptr<gfx::NativePixmap> pixmap_; + + DISALLOW_COPY_AND_ASSIGN(SharedImageBackingOzone); +}; + +} // namespace gpu + +#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_OZONE_H_ diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc index 4e5ec471f5d..8d2c5e131f9 100644 --- a/chromium/gpu/command_buffer/service/shared_image_factory.cc +++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc @@ -80,7 +80,8 @@ SharedImageFactory::SharedImageFactory( shared_image_manager_(shared_image_manager), memory_tracker_(std::make_unique<MemoryTypeTracker>(memory_tracker)), using_vulkan_(context_state && context_state->GrContextIsVulkan()), - using_metal_(context_state && context_state->GrContextIsMetal()) { + using_metal_(context_state && context_state->GrContextIsMetal()), + using_dawn_(context_state && context_state->GrContextIsDawn()) { bool use_gl = gl::GetGLImplementation() != gl::kGLImplementationNone; if (use_gl) { gl_backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>( @@ -164,13 +165,14 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox, } // Currently we only perform data uploads via two paths, - // |gl_backing_factory_| for GL and |wrapped_sk_image_factory_| for Vulkan. + // |gl_backing_factory_| for GL and |wrapped_sk_image_factory_| for Vulkan and + // Dawn. // TODO(ericrk): Make this generic in the future. bool allow_legacy_mailbox = false; SharedImageBackingFactory* factory = nullptr; if (backing_factory_for_testing_) { factory = backing_factory_for_testing_; - } else if (!using_vulkan_) { + } else if (!using_vulkan_ && !using_dawn_) { allow_legacy_mailbox = true; factory = gl_backing_factory_.get(); } else { @@ -477,7 +479,7 @@ SharedImageRepresentationFactory::ProduceSkia( std::unique_ptr<SharedImageRepresentationDawn> SharedImageRepresentationFactory::ProduceDawn(const Mailbox& mailbox, - DawnDevice device) { + WGPUDevice device) { return manager_->ProduceDawn(mailbox, tracker_.get(), device); } diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h index eea81d438bb..5dc0255ec89 100644 --- a/chromium/gpu/command_buffer/service/shared_image_factory.h +++ b/chromium/gpu/command_buffer/service/shared_image_factory.h @@ -121,6 +121,7 @@ class GPU_GLES2_EXPORT SharedImageFactory { std::unique_ptr<MemoryTypeTracker> memory_tracker_; const bool using_vulkan_; const bool using_metal_; + const bool using_dawn_; // The set of SharedImages which have been created (and are being kept alive) // by this factory. @@ -167,7 +168,7 @@ class GPU_GLES2_EXPORT SharedImageRepresentationFactory { scoped_refptr<SharedContextState> context_State); std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( const Mailbox& mailbox, - DawnDevice device); + WGPUDevice device); std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay( const Mailbox& mailbox); diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc index 37810b3317e..0ce71513416 100644 --- a/chromium/gpu/command_buffer/service/shared_image_manager.cc +++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc @@ -206,7 +206,7 @@ std::unique_ptr<SharedImageRepresentationSkia> SharedImageManager::ProduceSkia( std::unique_ptr<SharedImageRepresentationDawn> SharedImageManager::ProduceDawn( const Mailbox& mailbox, MemoryTypeTracker* tracker, - DawnDevice device) { + WGPUDevice device) { CALLED_ON_VALID_THREAD(); AutoLock autolock(this); @@ -294,7 +294,7 @@ void SharedImageManager::OnMemoryDump(const Mailbox& mailbox, // Unique name in the process. std::string dump_name = - base::StringPrintf("gpu/shared-images/client_0x%" PRIX32 "/mailbox_%s", + base::StringPrintf("gpu/shared_images/client_0x%" PRIX32 "/mailbox_%s", client_id, mailbox.ToDebugString().c_str()); base::trace_event::MemoryAllocatorDump* dump = diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.h b/chromium/gpu/command_buffer/service/shared_image_manager.h index 9309757600d..5b34808b75e 100644 --- a/chromium/gpu/command_buffer/service/shared_image_manager.h +++ b/chromium/gpu/command_buffer/service/shared_image_manager.h @@ -49,7 +49,7 @@ class GPU_GLES2_EXPORT SharedImageManager { std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn( const Mailbox& mailbox, MemoryTypeTracker* ref, - DawnDevice device); + WGPUDevice device); std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay( const Mailbox& mailbox, MemoryTypeTracker* ref); diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.h b/chromium/gpu/command_buffer/service/shared_image_representation.h index ab27e2335f5..53dce99f869 100644 --- a/chromium/gpu/command_buffer/service/shared_image_representation.h +++ b/chromium/gpu/command_buffer/service/shared_image_representation.h @@ -5,8 +5,8 @@ #ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_H_ #define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_H_ -#include <dawn/dawn.h> #include <dawn/dawn_proc_table.h> +#include <dawn/webgpu.h> #include "base/callback_helpers.h" #include "build/build_config.h" @@ -23,6 +23,10 @@ typedef unsigned int GLenum; class SkPromiseImageTexture; +namespace gl { +class GLImage; +} + namespace gpu { namespace gles2 { class Texture; @@ -212,6 +216,8 @@ class GPU_GLES2_EXPORT SharedImageRepresentationGLTexturePassthrough GetTexturePassthrough() = 0; protected: + friend class SharedImageRepresentationSkiaGL; + // TODO(ericrk): Make these pure virtual and ensure real implementations // exist. virtual bool BeginAccess(GLenum mode); @@ -305,7 +311,7 @@ class SharedImageRepresentationDawn : public SharedImageRepresentation { // TODO(penghuang): Add ScopedAccess helper class. // This can return null in case of a Dawn validation error, for example if // usage is invalid. - virtual DawnTexture BeginAccess(DawnTextureUsage usage) = 0; + virtual WGPUTexture BeginAccess(WGPUTextureUsage usage) = 0; virtual void EndAccess() = 0; }; @@ -316,6 +322,44 @@ class SharedImageRepresentationOverlay : public SharedImageRepresentation { MemoryTypeTracker* tracker) : SharedImageRepresentation(manager, backing, tracker) {} + class ScopedReadAccess { + public: + ScopedReadAccess(SharedImageRepresentationOverlay* representation, + bool needs_gl_image) + : representation_(representation) { + representation_->BeginReadAccess(); + gl_image_ = needs_gl_image ? representation_->GetGLImage() : nullptr; + } + ScopedReadAccess(ScopedReadAccess&& other) { *this = std::move(other); } + ~ScopedReadAccess() { + if (representation_) + representation_->EndReadAccess(); + } + + ScopedReadAccess& operator=(ScopedReadAccess&& other) { + representation_ = other.representation_; + other.representation_ = nullptr; + gl_image_ = other.gl_image_; + other.gl_image_ = nullptr; + return *this; + } + + gl::GLImage* gl_image() const { + DCHECK(representation_); + return gl_image_; + } + + private: + SharedImageRepresentationOverlay* representation_; + gl::GLImage* gl_image_; + }; + +#if defined(OS_ANDROID) + virtual void NotifyOverlayPromotion(bool promotion, + const gfx::Rect& bounds) = 0; +#endif + + protected: // TODO(weiliangc): Currently this only handles Android pre-SurfaceControl // case. Add appropriate fence later. virtual void BeginReadAccess() = 0; @@ -323,10 +367,9 @@ class SharedImageRepresentationOverlay : public SharedImageRepresentation { // TODO(weiliangc): Add API to backing AHardwareBuffer. -#if defined(OS_ANDROID) - virtual void NotifyOverlayPromotion(bool promotion, - const gfx::Rect& bounds) = 0; -#endif + // TODO(penghuang): Refactor it to not depend on GL. + // Get the backing as GLImage for GLSurface::ScheduleOverlayPlane. + virtual gl::GLImage* GetGLImage() = 0; }; } // namespace gpu diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc index 83613f6bfbf..4577518adc6 100644 --- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc +++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc @@ -53,6 +53,31 @@ SharedImageRepresentationSkiaGL::Create( std::move(context_state), manager, backing, tracker)); } +std::unique_ptr<SharedImageRepresentationSkiaGL> +SharedImageRepresentationSkiaGL::CreateForPassthrough( + std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> + passthrough_representation, + scoped_refptr<SharedContextState> context_state, + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker) { + GrBackendTexture backend_texture; + if (!GetGrBackendTexture( + context_state->feature_info(), + passthrough_representation->GetTexturePassthrough()->target(), + backing->size(), + passthrough_representation->GetTexturePassthrough()->service_id(), + backing->format(), &backend_texture)) { + return nullptr; + } + auto promise_texture = SkPromiseImageTexture::Make(backend_texture); + if (!promise_texture) + return nullptr; + return base::WrapUnique(new SharedImageRepresentationSkiaGL( + std::move(passthrough_representation), std::move(promise_texture), + std::move(context_state), manager, backing, tracker)); +} + SharedImageRepresentationSkiaGL::SharedImageRepresentationSkiaGL( std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation, sk_sp<SkPromiseImageTexture> promise_texture, @@ -64,6 +89,25 @@ SharedImageRepresentationSkiaGL::SharedImageRepresentationSkiaGL( gl_representation_(std::move(gl_representation)), promise_texture_(std::move(promise_texture)), context_state_(std::move(context_state)) { + DCHECK(gl_representation_); +#if DCHECK_IS_ON() + context_ = gl::GLContext::GetCurrent(); +#endif +} + +SharedImageRepresentationSkiaGL::SharedImageRepresentationSkiaGL( + std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> + passthrough_representation, + sk_sp<SkPromiseImageTexture> promise_texture, + scoped_refptr<SharedContextState> context_state, + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker) + : SharedImageRepresentationSkia(manager, backing, tracker), + passthrough_representation_(std::move(passthrough_representation)), + promise_texture_(std::move(promise_texture)), + context_state_(std::move(context_state)) { + DCHECK(passthrough_representation_); #if DCHECK_IS_ON() context_ = gl::GLContext::GetCurrent(); #endif @@ -83,9 +127,14 @@ sk_sp<SkSurface> SharedImageRepresentationSkiaGL::BeginWriteAccess( DCHECK(!surface_); CheckContext(); - if (!gl_representation_->BeginAccess( + if (gl_representation_ && + !gl_representation_->BeginAccess( GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM)) { return nullptr; + } else if (passthrough_representation_ && + !passthrough_representation_->BeginAccess( + GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM)) { + return nullptr; } SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType( @@ -105,7 +154,11 @@ void SharedImageRepresentationSkiaGL::EndWriteAccess(sk_sp<SkSurface> surface) { DCHECK_EQ(surface.get(), surface_); DCHECK(surface->unique()); - gl_representation_->EndAccess(); + if (gl_representation_) { + gl_representation_->EndAccess(); + } else { + passthrough_representation_->EndAccess(); + } mode_ = RepresentationAccessMode::kNone; surface_ = nullptr; } @@ -116,9 +169,14 @@ sk_sp<SkPromiseImageTexture> SharedImageRepresentationSkiaGL::BeginReadAccess( DCHECK_EQ(mode_, RepresentationAccessMode::kNone); CheckContext(); - if (!gl_representation_->BeginAccess( - GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM)) + if (gl_representation_ && !gl_representation_->BeginAccess( + GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM)) { + return nullptr; + } else if (passthrough_representation_ && + !passthrough_representation_->BeginAccess( + GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM)) { return nullptr; + } mode_ = RepresentationAccessMode::kRead; return promise_texture_; } @@ -127,7 +185,11 @@ void SharedImageRepresentationSkiaGL::EndReadAccess() { DCHECK_EQ(mode_, RepresentationAccessMode::kRead); CheckContext(); - gl_representation_->EndAccess(); + if (gl_representation_) { + gl_representation_->EndAccess(); + } else { + passthrough_representation_->EndAccess(); + } mode_ = RepresentationAccessMode::kNone; surface_ = nullptr; } @@ -138,4 +200,4 @@ void SharedImageRepresentationSkiaGL::CheckContext() { #endif } -} // namespace gpu +} // namespace gpu
\ No newline at end of file diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h index 5ad4ab8b11d..5e11a7e0ca6 100644 --- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h +++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h @@ -24,6 +24,13 @@ class GPU_GLES2_EXPORT SharedImageRepresentationSkiaGL SharedImageManager* manager, SharedImageBacking* backing, MemoryTypeTracker* tracker); + static std::unique_ptr<SharedImageRepresentationSkiaGL> CreateForPassthrough( + std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> + passthrough_representation, + scoped_refptr<SharedContextState> context_state, + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker); ~SharedImageRepresentationSkiaGL() override; @@ -46,9 +53,20 @@ class GPU_GLES2_EXPORT SharedImageRepresentationSkiaGL SharedImageManager* manager, SharedImageBacking* backing, MemoryTypeTracker* tracker); + SharedImageRepresentationSkiaGL( + std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> + passthrough_representation, + sk_sp<SkPromiseImageTexture> promise_texture, + scoped_refptr<SharedContextState> context_state, + SharedImageManager* manager, + SharedImageBacking* backing, + MemoryTypeTracker* tracker); + void CheckContext(); std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation_; + std::unique_ptr<SharedImageRepresentationGLTexturePassthrough> + passthrough_representation_; sk_sp<SkPromiseImageTexture> promise_texture_; scoped_refptr<SharedContextState> context_state_; SkSurface* surface_ = nullptr; diff --git a/chromium/gpu/command_buffer/service/shared_image_video.cc b/chromium/gpu/command_buffer/service/shared_image_video.cc index c6a25df8246..990245d6b70 100644 --- a/chromium/gpu/command_buffer/service/shared_image_video.cc +++ b/chromium/gpu/command_buffer/service/shared_image_video.cc @@ -522,17 +522,25 @@ class SharedImageRepresentationOverlayVideo : gpu::SharedImageRepresentationOverlay(manager, backing, tracker), stream_image_(backing->stream_texture_sii_) {} + protected: void BeginReadAccess() override { - TRACE_EVENT0("media", - "SharedImageRepresentationOverlayVideo::BeginReadAccess"); - // A |CodecImage| could only be overlaied if it is already in a SurfaceView. - DCHECK(!stream_image_->HasTextureOwner()); - - stream_image_->RenderToOverlay(); + // A |CodecImage| is already in a SurfaceView, render content to the + // overlay. + if (!stream_image_->HasTextureOwner()) { + TRACE_EVENT0("media", + "SharedImageRepresentationOverlayVideo::BeginReadAccess"); + stream_image_->RenderToOverlay(); + } } void EndReadAccess() override {} + gl::GLImage* GetGLImage() override { + DCHECK(stream_image_->HasTextureOwner()) + << "The backing is already in a SurfaceView!"; + return stream_image_.get(); + } + void NotifyOverlayPromotion(bool promotion, const gfx::Rect& bounds) override { stream_image_->NotifyOverlayPromotion(promotion, bounds); diff --git a/chromium/gpu/command_buffer/service/skia_utils.cc b/chromium/gpu/command_buffer/service/skia_utils.cc index b4f9d29a89d..f95190590e1 100644 --- a/chromium/gpu/command_buffer/service/skia_utils.cc +++ b/chromium/gpu/command_buffer/service/skia_utils.cc @@ -39,6 +39,22 @@ void CleanupAfterSkiaFlush(void* context) { delete flush_context; } +template <class T> +void DeleteSkObject(SharedContextState* context_state, sk_sp<T> sk_object) { + DCHECK(sk_object && sk_object->unique()); + if (!context_state->GrContextIsVulkan()) + return; + +#if BUILDFLAG(ENABLE_VULKAN) + auto* fence_helper = + context_state->vk_context_provider()->GetDeviceQueue()->GetFenceHelper(); + fence_helper->EnqueueCleanupTaskForSubmittedWork(base::BindOnce( + [](const sk_sp<GrContext>& gr_context, sk_sp<T> sk_object, + gpu::VulkanDeviceQueue* device_queue, bool is_lost) {}, + sk_ref_sp(context_state->gr_context()), std::move(sk_object))); +#endif +} + } // namespace GLuint GetGrGLBackendTextureFormat(const gles2::FeatureInfo* feature_info, @@ -132,6 +148,15 @@ void DeleteGrBackendTexture(SharedContextState* context_state, #endif } +void DeleteSkImage(SharedContextState* context_state, sk_sp<SkImage> sk_image) { + DeleteSkObject(context_state, std::move(sk_image)); +} + +void DeleteSkSurface(SharedContextState* context_state, + sk_sp<SkSurface> sk_surface) { + DeleteSkObject(context_state, std::move(sk_surface)); +} + #if BUILDFLAG(ENABLE_VULKAN) GrVkYcbcrConversionInfo CreateGrVkYcbcrConversionInfo( diff --git a/chromium/gpu/command_buffer/service/skia_utils.h b/chromium/gpu/command_buffer/service/skia_utils.h index 7968e96cbb5..a383163b37b 100644 --- a/chromium/gpu/command_buffer/service/skia_utils.h +++ b/chromium/gpu/command_buffer/service/skia_utils.h @@ -67,6 +67,12 @@ GPU_GLES2_EXPORT void DeleteGrBackendTexture( SharedContextState* context_state, GrBackendTexture* backend_textures); +GPU_GLES2_EXPORT void DeleteSkImage(SharedContextState* context_state, + sk_sp<SkImage> sk_image); + +GPU_GLES2_EXPORT void DeleteSkSurface(SharedContextState* context_state, + sk_sp<SkSurface> sk_surface); + #if BUILDFLAG(ENABLE_VULKAN) GPU_GLES2_EXPORT GrVkYcbcrConversionInfo CreateGrVkYcbcrConversionInfo( VkPhysicalDevice physical_device, diff --git a/chromium/gpu/command_buffer/service/surface_texture_gl_owner_unittest.cc b/chromium/gpu/command_buffer/service/surface_texture_gl_owner_unittest.cc index 01d3ece4f48..2e1509fd461 100644 --- a/chromium/gpu/command_buffer/service/surface_texture_gl_owner_unittest.cc +++ b/chromium/gpu/command_buffer/service/surface_texture_gl_owner_unittest.cc @@ -34,8 +34,11 @@ class SurfaceTextureGLOwnerTest : public testing::Test { protected: void SetUp() override { - gl::init::InitializeGLOneOffImplementation(gl::kGLImplementationEGLGLES2, - false, false, false, true); + gl::init::InitializeStaticGLBindingsImplementation( + gl::kGLImplementationEGLGLES2, false); + gl::init::InitializeGLOneOffPlatformImplementation(false, false, false, + true); + surface_ = new gl::PbufferGLSurfaceEGL(gfx::Size(320, 240)); surface_->Initialize(); diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.cc b/chromium/gpu/command_buffer/service/sync_point_manager.cc index 9df666db47f..69856b51347 100644 --- a/chromium/gpu/command_buffer/service/sync_point_manager.cc +++ b/chromium/gpu/command_buffer/service/sync_point_manager.cc @@ -266,8 +266,12 @@ void SyncPointClientState::ReleaseFenceSyncHelper(uint64_t release) { { base::AutoLock auto_lock(fence_sync_lock_); - DLOG_IF(ERROR, release <= fence_sync_release_) - << "Client submitted fence releases out of order."; + if (release <= fence_sync_release_) { + DLOG(ERROR) << "Client submitted fence releases out of order."; + DCHECK(release_callback_queue_.empty() || + release_callback_queue_.top().release_count > release); + return; + } fence_sync_release_ = release; while (!release_callback_queue_.empty() && diff --git a/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc b/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc index dc141e86937..fb16b4dd764 100644 --- a/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc +++ b/chromium/gpu/command_buffer/service/sync_point_manager_unittest.cc @@ -121,6 +121,38 @@ TEST_F(SyncPointManagerTest, BasicFenceSyncRelease) { EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token)); } +TEST_F(SyncPointManagerTest, OutOfOrderSyncTokenRelease) { + CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO; + CommandBufferId kBufferId = CommandBufferId::FromUnsafeValue(0x123); + + uint64_t release_count_1 = 2; + SyncToken sync_token_1(kNamespaceId, kBufferId, release_count_1); + uint64_t release_count_2 = 1; + SyncToken sync_token_2(kNamespaceId, kBufferId, release_count_2); + + SyncPointStream stream(sync_point_manager_.get(), kNamespaceId, kBufferId); + stream.AllocateOrderNum(); + stream.AllocateOrderNum(); + + EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token_1)); + EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token_2)); + + // Releasing the first sync token also releases the second because the first + // token's release count is larger. + stream.order_data->BeginProcessingOrderNumber(1); + stream.client_state->ReleaseFenceSync(release_count_1); + stream.order_data->FinishProcessingOrderNumber(1); + EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token_1)); + EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token_2)); + + // Releasing the second token should be a no-op. + stream.order_data->BeginProcessingOrderNumber(2); + stream.client_state->ReleaseFenceSync(release_count_2); + stream.order_data->FinishProcessingOrderNumber(2); + EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token_1)); + EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token_2)); +} + TEST_F(SyncPointManagerTest, MultipleClientsPerOrderData) { CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO; CommandBufferId kCmdBufferId1 = CommandBufferId::FromUnsafeValue(0x123); @@ -180,6 +212,82 @@ TEST_F(SyncPointManagerTest, BasicFenceSyncWaitRelease) { EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token)); } +TEST_F(SyncPointManagerTest, WaitWithOutOfOrderSyncTokenRelease) { + CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO; + CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123); + CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234); + + int test_num_1 = 10; + int test_num_2 = 10; + int test_num_3 = 10; + SyncPointStream release_stream(sync_point_manager_.get(), kNamespaceId, + kReleaseCmdBufferId); + SyncPointStream wait_stream(sync_point_manager_.get(), kNamespaceId, + kWaitCmdBufferId); + + release_stream.AllocateOrderNum(); + uint64_t release_count_1 = 2; + SyncToken sync_token_1(kNamespaceId, kReleaseCmdBufferId, release_count_1); + release_stream.AllocateOrderNum(); + uint64_t release_count_2 = 1; + SyncToken sync_token_2(kNamespaceId, kReleaseCmdBufferId, release_count_2); + release_stream.AllocateOrderNum(); + uint64_t release_count_3 = 3; + SyncToken sync_token_3(kNamespaceId, kReleaseCmdBufferId, release_count_3); + + wait_stream.AllocateOrderNum(); + wait_stream.BeginProcessing(); + bool valid_wait = wait_stream.client_state->Wait( + sync_token_1, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction, + &test_num_1, 123)); + EXPECT_TRUE(valid_wait); + EXPECT_EQ(10, test_num_1); + EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token_1)); + wait_stream.EndProcessing(); + + wait_stream.AllocateOrderNum(); + wait_stream.BeginProcessing(); + valid_wait = wait_stream.client_state->Wait( + sync_token_2, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction, + &test_num_2, 123)); + EXPECT_TRUE(valid_wait); + EXPECT_EQ(10, test_num_2); + EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token_2)); + wait_stream.EndProcessing(); + + wait_stream.AllocateOrderNum(); + wait_stream.BeginProcessing(); + valid_wait = wait_stream.client_state->Wait( + sync_token_3, base::BindOnce(&SyncPointManagerTest::SetIntegerFunction, + &test_num_3, 123)); + EXPECT_TRUE(valid_wait); + EXPECT_EQ(10, test_num_3); + EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token_3)); + wait_stream.EndProcessing(); + + // Releasing the first sync token should release the second one. Then, + // releasing the second one should be a no-op. + release_stream.BeginProcessing(); + release_stream.client_state->ReleaseFenceSync(release_count_1); + EXPECT_EQ(123, test_num_1); + EXPECT_EQ(123, test_num_2); + EXPECT_EQ(10, test_num_3); + EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token_1)); + EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token_2)); + EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token_3)); + release_stream.EndProcessing(); + + release_stream.BeginProcessing(); + release_stream.client_state->ReleaseFenceSync(release_count_2); + EXPECT_EQ(123, test_num_1); + EXPECT_EQ(123, test_num_2); + EXPECT_EQ(10, test_num_3); + EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token_1)); + EXPECT_TRUE(sync_point_manager_->IsSyncTokenReleased(sync_token_2)); + EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token_3)); + release_stream.EndProcessing(); +} + TEST_F(SyncPointManagerTest, WaitOnSelfFails) { CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO; CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123); @@ -206,7 +314,7 @@ TEST_F(SyncPointManagerTest, WaitOnSelfFails) { EXPECT_FALSE(sync_point_manager_->IsSyncTokenReleased(sync_token)); } -TEST_F(SyncPointManagerTest, OutOfOrderRelease) { +TEST_F(SyncPointManagerTest, ReleaseAfterWaitOrderNumber) { CommandBufferNamespace kNamespaceId = gpu::CommandBufferNamespace::GPU_IO; CommandBufferId kReleaseCmdBufferId = CommandBufferId::FromUnsafeValue(0x123); CommandBufferId kWaitCmdBufferId = CommandBufferId::FromUnsafeValue(0x234); diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc index 67dee8e9ed4..473f3511c18 100644 --- a/chromium/gpu/command_buffer/service/texture_manager.cc +++ b/chromium/gpu/command_buffer/service/texture_manager.cc @@ -1353,8 +1353,8 @@ void Texture::SetLevelInfo(GLenum target, info.border = border; info.format = format; info.type = type; - info.image = 0; - info.stream_texture_image = 0; + info.image.reset(); + info.stream_texture_image.reset(); info.image_state = UNBOUND; info.internal_workaround = false; diff --git a/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc b/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc index 1112c5b3bb1..c9d89d40b08 100644 --- a/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc +++ b/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc @@ -234,15 +234,16 @@ void VertexAttribManager::SetIsBound(bool is_bound) { } } -bool VertexAttribManager::ValidateBindings( - const char* function_name, - GLES2Decoder* decoder, - FeatureInfo* feature_info, - BufferManager* buffer_manager, - Program* current_program, - GLuint max_vertex_accessed, - bool instanced, - GLsizei primcount) { +bool VertexAttribManager::ValidateBindings(const char* function_name, + GLES2Decoder* decoder, + FeatureInfo* feature_info, + BufferManager* buffer_manager, + Program* current_program, + GLuint max_vertex_accessed, + bool instanced, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance) { DCHECK(primcount); ErrorState* error_state = decoder->GetErrorState(); // true if any enabled, used divisor is zero @@ -280,7 +281,8 @@ bool VertexAttribManager::ValidateBindings( if (attrib_info) { divisor0 |= (attrib->divisor() == 0); have_enabled_active_attribs = true; - GLuint count = attrib->MaxVertexAccessed(primcount, max_vertex_accessed); + GLuint count = attrib->MaxVertexAccessed(primcount, max_vertex_accessed, + basevertex, baseinstance); // This attrib is used in the current program. if (!attrib->CanAccess(count)) { ERRORSTATE_SET_GL_ERROR( diff --git a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h index fe296d84876..044d0255992 100644 --- a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h +++ b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h @@ -86,6 +86,16 @@ class GPU_GLES2_EXPORT VertexAttrib { return divisor_ ? ((primcount - 1) / divisor_) : max_vertex_accessed; } + // For performance issue we are having separate overloading functions + // which takes in basevertex and baseinstance + GLuint MaxVertexAccessed(GLsizei primcount, + GLuint max_vertex_accessed, + GLint basevertex, + GLuint baseinstance) const { + return divisor_ ? ((primcount - 1) / divisor_) + baseinstance + : max_vertex_accessed + basevertex; + } + bool is_client_side_array() const { return is_client_side_array_; } @@ -292,15 +302,16 @@ class GPU_GLES2_EXPORT VertexAttribManager return vertex_attribs_.size(); } - bool ValidateBindings( - const char* function_name, - GLES2Decoder* decoder, - FeatureInfo* feature_info, - BufferManager* buffer_manager, - Program* current_program, - GLuint max_vertex_accessed, - bool instanced, - GLsizei primcount); + bool ValidateBindings(const char* function_name, + GLES2Decoder* decoder, + FeatureInfo* feature_info, + BufferManager* buffer_manager, + Program* current_program, + GLuint max_vertex_accessed, + bool instanced, + GLsizei primcount, + GLint basevertex, + GLuint baseinstance); void SetIsBound(bool is_bound); diff --git a/chromium/gpu/command_buffer/service/webgpu_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/webgpu_cmd_validation_implementation_autogen.h index dad69a28277..3fff4f9e27f 100644 --- a/chromium/gpu/command_buffer/service/webgpu_cmd_validation_implementation_autogen.h +++ b/chromium/gpu/command_buffer/service/webgpu_cmd_validation_implementation_autogen.h @@ -12,6 +12,7 @@ #define GPU_COMMAND_BUFFER_SERVICE_WEBGPU_CMD_VALIDATION_IMPLEMENTATION_AUTOGEN_H_ static const PowerPreference valid_power_preference_table[] = { + PowerPreference::kDefault, PowerPreference::kHighPerformance, PowerPreference::kLowPower, }; diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc index 28dde0fc57f..80bfe5eaff4 100644 --- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc +++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc @@ -45,6 +45,9 @@ class WireServerCommandSerializer : public dawn_wire::CommandSerializer { ~WireServerCommandSerializer() override = default; void* GetCmdSpace(size_t size) final; bool Flush() final; + void SendAdapterProperties(uint32_t request_adapter_serial, + uint32_t adapter_server_id, + const dawn_native::Adapter& adapter); private: DecoderClient* client_; @@ -53,11 +56,18 @@ class WireServerCommandSerializer : public dawn_wire::CommandSerializer { }; WireServerCommandSerializer::WireServerCommandSerializer(DecoderClient* client) - : client_(client), buffer_(kMaxWireBufferSize), put_offset_(0) {} + : client_(client), + buffer_(kMaxWireBufferSize), + put_offset_(sizeof(cmds::DawnReturnDataHeader)) { + cmds::DawnReturnDataHeader* return_data_header = + reinterpret_cast<cmds::DawnReturnDataHeader*>(&buffer_[0]); + return_data_header->return_data_type = DawnReturnDataType::kDawnCommands; +} void* WireServerCommandSerializer::GetCmdSpace(size_t size) { - // TODO(enga): Handle chunking commands if size > kMaxWireBufferSize. - if (size > kMaxWireBufferSize) { + // TODO(enga): Handle chunking commands if size + + // sizeof(cmds::DawnReturnDataHeader)> kMaxWireBufferSize. + if (size + sizeof(cmds::DawnReturnDataHeader) > kMaxWireBufferSize) { NOTREACHED(); return nullptr; } @@ -76,8 +86,8 @@ void* WireServerCommandSerializer::GetCmdSpace(size_t size) { // TODO(enga): Keep track of how much command space the application is using // and adjust the buffer size accordingly. - DCHECK_EQ(put_offset_, 0u); - next_offset = size; + DCHECK_EQ(put_offset_, sizeof(cmds::DawnReturnDataHeader)); + next_offset = put_offset_ + size; } uint8_t* ptr = &buffer_[put_offset_]; @@ -86,7 +96,7 @@ void* WireServerCommandSerializer::GetCmdSpace(size_t size) { } bool WireServerCommandSerializer::Flush() { - if (put_offset_ > 0) { + if (put_offset_ > sizeof(cmds::DawnReturnDataHeader)) { TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"), "WireServerCommandSerializer::Flush", "bytes", put_offset_); @@ -95,17 +105,53 @@ bool WireServerCommandSerializer::Flush() { "DawnReturnCommands", return_trace_id++); client_->HandleReturnData(base::make_span(buffer_.data(), put_offset_)); - put_offset_ = 0; + put_offset_ = sizeof(cmds::DawnReturnDataHeader); } return true; } +void WireServerCommandSerializer::SendAdapterProperties( + uint32_t request_adapter_serial, + uint32_t adapter_service_id, + const dawn_native::Adapter& adapter) { + WGPUDeviceProperties adapter_properties = adapter.GetAdapterProperties(); + + size_t serialized_adapter_properties_size = + dawn_wire::SerializedWGPUDevicePropertiesSize(&adapter_properties); + std::vector<char> serialized_buffer(sizeof(cmds::DawnReturnDataHeader) + + sizeof(cmds::DawnReturnAdapterIDs) + + serialized_adapter_properties_size); + + // Set Dawn return data header + reinterpret_cast<cmds::DawnReturnDataHeader*>(serialized_buffer.data()) + ->return_data_type = DawnReturnDataType::kRequestedDawnAdapterProperties; + + // Set adapter ids + cmds::DawnReturnAdapterInfo* return_adapter_info = + reinterpret_cast<cmds::DawnReturnAdapterInfo*>( + serialized_buffer.data() + sizeof(cmds::DawnReturnDataHeader)); + return_adapter_info->adapter_ids.request_adapter_serial = + request_adapter_serial; + return_adapter_info->adapter_ids.adapter_service_id = adapter_service_id; + + // Set serialized adapter properties + dawn_wire::SerializeWGPUDeviceProperties( + &adapter_properties, return_adapter_info->deserialized_buffer); + + client_->HandleReturnData(base::make_span( + reinterpret_cast<const uint8_t*>(serialized_buffer.data()), + serialized_buffer.size())); +} + dawn_native::DeviceType PowerPreferenceToDawnDeviceType( PowerPreference power_preference) { switch (power_preference) { case PowerPreference::kLowPower: return dawn_native::DeviceType::IntegratedGPU; case PowerPreference::kHighPerformance: + // Currently for simplicity we always choose discrete GPU as the device + // related to default power preference. + case PowerPreference::kDefault: return dawn_native::DeviceType::DiscreteGPU; default: NOTREACHED(); @@ -195,11 +241,13 @@ class WebGPUDecoderImpl final : public WebGPUDecoder { bool HasPollingWork() const override { return true; } void PerformPollingWork() override { - DCHECK(dawn_device_); DCHECK(wire_serializer_); TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"), "WebGPUDecoderImpl::PerformPollingWork"); - dawn_procs_.deviceTick(dawn_device_); + // TODO(jiawei.shao@intel.com): support multiple Dawn devices. + if (wgpu_device_) { + dawn_procs_.deviceTick(wgpu_device_); + } wire_serializer_->Flush(); } @@ -344,10 +392,11 @@ class WebGPUDecoderImpl final : public WebGPUDecoder { void DiscoverAdapters(); - dawn_native::Adapter GetPreferredAdapter( - PowerPreference power_preference) const; + int32_t GetPreferredAdapterIndex(PowerPreference power_preference) const; - error::Error InitDawnDeviceAndSetWireServer(dawn_native::Adapter* adapter); + error::Error InitDawnDeviceAndSetWireServer( + int32_t requested_adapter_index, + const WGPUDeviceProperties& requested_device_properties); std::unique_ptr<SharedImageRepresentationFactory> shared_image_representation_factory_; @@ -363,7 +412,7 @@ class WebGPUDecoderImpl final : public WebGPUDecoder { std::unique_ptr<dawn_native::Instance> dawn_instance_; std::vector<dawn_native::Adapter> dawn_adapters_; DawnProcTable dawn_procs_; - DawnDevice dawn_device_ = nullptr; + WGPUDevice wgpu_device_ = nullptr; std::unique_ptr<dawn_wire::WireServer> wire_server_; DISALLOW_COPY_AND_ASSIGN(WebGPUDecoderImpl); @@ -416,8 +465,8 @@ WebGPUDecoderImpl::~WebGPUDecoderImpl() { // Reset the wire server first so all objects are destroyed before the device. // TODO(enga): Handle Device/Context lost. wire_server_ = nullptr; - if (dawn_device_ != nullptr) { - dawn_procs_.deviceRelease(dawn_device_); + if (wgpu_device_ != nullptr) { + dawn_procs_.deviceRelease(wgpu_device_); } } @@ -427,22 +476,31 @@ ContextResult WebGPUDecoderImpl::Initialize() { } error::Error WebGPUDecoderImpl::InitDawnDeviceAndSetWireServer( - dawn_native::Adapter* adapter) { - DCHECK(adapter != nullptr && (*adapter)); + int32_t requested_adapter_index, + const WGPUDeviceProperties& request_device_properties) { + DCHECK_LE(0, requested_adapter_index); // TODO(jiawei.shao@intel.com): support multiple Dawn devices. - if (dawn_device_ != nullptr) { + if (wgpu_device_ != nullptr) { DCHECK(wire_server_); return error::kNoError; } - dawn_device_ = adapter->CreateDevice(); - if (dawn_device_ == nullptr) { + DCHECK_LT(static_cast<size_t>(requested_adapter_index), + dawn_adapters_.size()); + + dawn_native::DeviceDescriptor device_descriptor; + if (request_device_properties.textureCompressionBC) { + device_descriptor.requiredExtensions.push_back("texture_compression_bc"); + } + + wgpu_device_ = dawn_adapters_[requested_adapter_index].CreateDevice(); + if (wgpu_device_ == nullptr) { return error::kLostContext; } dawn_wire::WireServerDescriptor descriptor = {}; - descriptor.device = dawn_device_; + descriptor.device = wgpu_device_; descriptor.procs = &dawn_procs_; descriptor.serializer = wire_serializer_.get(); descriptor.memoryTransferService = memory_transfer_service_.get(); @@ -464,42 +522,46 @@ void WebGPUDecoderImpl::DiscoverAdapters() { // decide to handle multiple adapters, code on the Chromium side will need to // change to do appropriate cross adapter copying to make this happen, either // manually or by using DirectComposition. - if (adapter.GetBackendType() == dawn_native::BackendType::D3D12) { + if (adapter.GetBackendType() == dawn_native::BackendType::D3D12) { #else if (adapter.GetBackendType() != dawn_native::BackendType::Null && adapter.GetBackendType() != dawn_native::BackendType::OpenGL) { #endif dawn_adapters_.push_back(adapter); - } +#if defined(OS_WIN) + break; +#endif + } } } -dawn_native::Adapter WebGPUDecoderImpl::GetPreferredAdapter( +int32_t WebGPUDecoderImpl::GetPreferredAdapterIndex( PowerPreference power_preference) const { dawn_native::DeviceType preferred_device_type = PowerPreferenceToDawnDeviceType(power_preference); - dawn_native::Adapter discrete_gpu_adapter = {}; - dawn_native::Adapter integrated_gpu_adapter = {}; - dawn_native::Adapter cpu_adapter = {}; - dawn_native::Adapter unknown_adapter = {}; + int32_t discrete_gpu_adapter_index = -1; + int32_t integrated_gpu_adapter_index = -1; + int32_t cpu_adapter_index = -1; + int32_t unknown_adapter_index = -1; - for (const dawn_native::Adapter& adapter : dawn_adapters_) { + for (int32_t i = 0; i < static_cast<int32_t>(dawn_adapters_.size()); ++i) { + const dawn_native::Adapter& adapter = dawn_adapters_[i]; if (adapter.GetDeviceType() == preferred_device_type) { - return adapter; + return i; } switch (adapter.GetDeviceType()) { case dawn_native::DeviceType::DiscreteGPU: - discrete_gpu_adapter = adapter; + discrete_gpu_adapter_index = i; break; case dawn_native::DeviceType::IntegratedGPU: - integrated_gpu_adapter = adapter; + integrated_gpu_adapter_index = i; break; case dawn_native::DeviceType::CPU: - cpu_adapter = adapter; + cpu_adapter_index = i; break; case dawn_native::DeviceType::Unknown: - unknown_adapter = adapter; + unknown_adapter_index = i; break; default: NOTREACHED(); @@ -508,19 +570,19 @@ dawn_native::Adapter WebGPUDecoderImpl::GetPreferredAdapter( } // For now, we always prefer the discrete GPU - if (discrete_gpu_adapter) { - return discrete_gpu_adapter; + if (discrete_gpu_adapter_index >= 0) { + return discrete_gpu_adapter_index; } - if (integrated_gpu_adapter) { - return integrated_gpu_adapter; + if (integrated_gpu_adapter_index >= 0) { + return integrated_gpu_adapter_index; } - if (cpu_adapter) { - return cpu_adapter; + if (cpu_adapter_index >= 0) { + return cpu_adapter_index; } - if (unknown_adapter) { - return unknown_adapter; + if (unknown_adapter_index >= 0) { + return unknown_adapter_index; } - return dawn_native::Adapter(); + return -1; } const char* WebGPUDecoderImpl::GetCommandName(unsigned int command_id) const { @@ -604,14 +666,54 @@ error::Error WebGPUDecoderImpl::HandleRequestAdapter( PowerPreference power_preference = static_cast<PowerPreference>(c.power_preference); - dawn_native::Adapter requested_adapter = - GetPreferredAdapter(power_preference); - if (!requested_adapter) { + int32_t requested_adapter_index = GetPreferredAdapterIndex(power_preference); + if (requested_adapter_index < 0) { return error::kLostContext; } - // TODO(jiawei.shao@intel.com): support creating device with device descriptor - return InitDawnDeviceAndSetWireServer(&requested_adapter); + // Currently we treat the index of the adapter in dawn_adapters_ as the id of + // the adapter in the server side. + DCHECK_LT(static_cast<size_t>(requested_adapter_index), + dawn_adapters_.size()); + const dawn_native::Adapter& adapter = dawn_adapters_[requested_adapter_index]; + wire_serializer_->SendAdapterProperties( + static_cast<uint32_t>(c.request_adapter_serial), + static_cast<uint32_t>(requested_adapter_index), adapter); + + return error::kNoError; +} + +error::Error WebGPUDecoderImpl::HandleRequestDevice( + uint32_t immediate_data_size, + const volatile void* cmd_data) { + const volatile webgpu::cmds::RequestDevice& c = + *static_cast<const volatile webgpu::cmds::RequestDevice*>(cmd_data); + + uint32_t adapter_service_id = static_cast<uint32_t>(c.adapter_service_id); + uint32_t request_device_properties_shm_id = + static_cast<uint32_t>(c.request_device_properties_shm_id); + uint32_t request_device_properties_shm_offset = + static_cast<uint32_t>(c.request_device_properties_shm_offset); + uint32_t request_device_properties_size = + static_cast<uint32_t>(c.request_device_properties_size); + + WGPUDeviceProperties device_properties = {}; + if (!request_device_properties_size) { + return InitDawnDeviceAndSetWireServer(adapter_service_id, + device_properties); + } + + const volatile char* shm_device_properties = + GetSharedMemoryAs<const volatile char*>( + request_device_properties_shm_id, + request_device_properties_shm_offset, request_device_properties_size); + if (!shm_device_properties) { + return error::kOutOfBounds; + } + + dawn_wire::DeserializeWGPUDeviceProperties(&device_properties, + shm_device_properties); + return InitDawnDeviceAndSetWireServer(adapter_service_id, device_properties); } error::Error WebGPUDecoderImpl::HandleDawnCommands( @@ -655,7 +757,7 @@ error::Error WebGPUDecoderImpl::HandleAssociateMailboxImmediate( uint32_t device_generation = static_cast<uint32_t>(c.device_generation); uint32_t id = static_cast<uint32_t>(c.id); uint32_t generation = static_cast<uint32_t>(c.generation); - uint32_t usage = static_cast<DawnTextureUsage>(c.usage); + uint32_t usage = static_cast<WGPUTextureUsage>(c.usage); // Unpack the mailbox if (sizeof(Mailbox) > immediate_data_size) { @@ -680,23 +782,23 @@ error::Error WebGPUDecoderImpl::HandleAssociateMailboxImmediate( } static constexpr uint32_t kAllowedTextureUsages = static_cast<uint32_t>( - DAWN_TEXTURE_USAGE_COPY_SRC | DAWN_TEXTURE_USAGE_COPY_DST | - DAWN_TEXTURE_USAGE_SAMPLED | DAWN_TEXTURE_USAGE_OUTPUT_ATTACHMENT); + WGPUTextureUsage_CopySrc | WGPUTextureUsage_CopyDst | + WGPUTextureUsage_Sampled | WGPUTextureUsage_OutputAttachment); if (usage & ~kAllowedTextureUsages) { DLOG(ERROR) << "AssociateMailbox: Invalid usage"; return error::kInvalidArguments; } - DawnTextureUsage dawn_usage = static_cast<DawnTextureUsage>(usage); + WGPUTextureUsage wgpu_usage = static_cast<WGPUTextureUsage>(usage); - // Create a DawnTexture from the mailbox. + // Create a WGPUTexture from the mailbox. std::unique_ptr<SharedImageRepresentationDawn> shared_image = - shared_image_representation_factory_->ProduceDawn(mailbox, dawn_device_); + shared_image_representation_factory_->ProduceDawn(mailbox, wgpu_device_); if (!shared_image) { DLOG(ERROR) << "AssociateMailbox: Couldn't produce shared image"; return error::kInvalidArguments; } - DawnTexture texture = shared_image->BeginAccess(dawn_usage); + WGPUTexture texture = shared_image->BeginAccess(wgpu_usage); if (!texture) { DLOG(ERROR) << "AssociateMailbox: Couldn't begin shared image access"; return error::kInvalidArguments; diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc index b7cda9ce398..788d0923d4f 100644 --- a/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc +++ b/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc @@ -14,6 +14,7 @@ #include "gpu/command_buffer/service/shared_image_factory.h" #include "gpu/command_buffer/service/shared_image_manager.h" #include "gpu/command_buffer/service/test_helper.h" +#include "gpu/config/gpu_test_config.h" #include "testing/gtest/include/gtest/gtest.h" #include "ui/gl/gl_context.h" #include "ui/gl/gl_surface.h" @@ -31,32 +32,41 @@ class WebGPUDecoderTest : public ::testing::Test { WebGPUDecoderTest() {} void SetUp() override { + if (!WebGPUSupported()) { + return; + } + // Shared image factories for some backends take a dependency on GL. // Failure to create a test context with a surface and making it current // will result in a "NoContext" context being current that asserts on all // GL calls. - gl::init::InitializeGLNoExtensionsOneOff(); gl_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size(1, 1)); + ASSERT_NE(gl_surface_, nullptr); + gl_context_ = gl::init::CreateGLContext(nullptr, gl_surface_.get(), gl::GLContextAttribs()); + ASSERT_NE(gl_context_, nullptr); + gl_context_->MakeCurrent(gl_surface_.get()); + decoder_client_.reset(new FakeDecoderClient()); command_buffer_service_.reset(new FakeCommandBufferServiceBase()); - decoder_.reset(WebGPUDecoder::Create(nullptr, command_buffer_service_.get(), - &shared_image_manager_, nullptr, - &outputter_)); - if (decoder_->Initialize() != ContextResult::kSuccess) { - decoder_ = nullptr; - } else { - cmds::RequestAdapter requestAdapterCmd; - requestAdapterCmd.Init( - static_cast<uint32_t>(webgpu::PowerPreference::kHighPerformance)); - if (ExecuteCmd(requestAdapterCmd) == error::kLostContext) { - decoder_ = nullptr; - } else { - ASSERT_EQ(error::kNoError, ExecuteCmd(requestAdapterCmd)); - } - } + decoder_.reset(WebGPUDecoder::Create( + decoder_client_.get(), command_buffer_service_.get(), + &shared_image_manager_, nullptr, &outputter_)); + ASSERT_EQ(decoder_->Initialize(), ContextResult::kSuccess); + + constexpr uint32_t kAdapterClientID = 0; + cmds::RequestAdapter requestAdapterCmd; + requestAdapterCmd.Init( + kAdapterClientID, + static_cast<uint32_t>(webgpu::PowerPreference::kHighPerformance)); + ASSERT_EQ(error::kNoError, ExecuteCmd(requestAdapterCmd)); + + constexpr uint32_t kAdapterServiceID = 0; + cmds::RequestDevice requestDeviceCmd; + requestDeviceCmd.Init(kAdapterServiceID, 0, 0, 0); + ASSERT_EQ(error::kNoError, ExecuteCmd(requestDeviceCmd)); factory_ = std::make_unique<SharedImageFactory>( GpuPreferences(), GpuDriverBugWorkarounds(), GpuFeatureInfo(), @@ -66,15 +76,22 @@ class WebGPUDecoderTest : public ::testing::Test { } void TearDown() override { - factory_->DestroyAllSharedImages(true); - factory_.reset(); + if (factory_) { + factory_->DestroyAllSharedImages(true); + factory_.reset(); + } gl_surface_.reset(); gl_context_.reset(); - gl::init::ShutdownGL(false); } - bool WebGPUSupported() const { return decoder_ != nullptr; } + bool WebGPUSupported() const { + // WebGPU does not work on Win7 because there is no D3D12 on Win7 + // Linux bots running Vulkan are not properly initializing the shared + // image extensions. + return !GPUTestBotConfig::CurrentConfigMatches("Win7") && + !GPUTestBotConfig::CurrentConfigMatches("Linux"); + } template <typename T> error::Error ExecuteCmd(const T& cmd) { @@ -99,6 +116,7 @@ class WebGPUDecoderTest : public ::testing::Test { protected: std::unique_ptr<FakeCommandBufferServiceBase> command_buffer_service_; std::unique_ptr<WebGPUDecoder> decoder_; + std::unique_ptr<FakeDecoderClient> decoder_client_; gles2::TraceOutputter outputter_; SharedImageManager shared_image_manager_; std::unique_ptr<SharedImageFactory> factory_; @@ -137,7 +155,7 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) { { gpu::Mailbox bad_mailbox; AssociateMailboxCmdStorage cmd; - cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_SAMPLED, bad_mailbox.name); + cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Sampled, bad_mailbox.name); EXPECT_EQ(error::kInvalidArguments, ExecuteImmediateCmd(cmd.cmd, sizeof(bad_mailbox.name))); } @@ -145,7 +163,7 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) { // Error case: device doesn't exist. { AssociateMailboxCmdStorage cmd; - cmd.cmd.Init(42, 42, 1, 0, DAWN_TEXTURE_USAGE_SAMPLED, mailbox.name); + cmd.cmd.Init(42, 42, 1, 0, WGPUTextureUsage_Sampled, mailbox.name); EXPECT_EQ(error::kInvalidArguments, ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name))); } @@ -153,7 +171,7 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) { // Error case: texture ID invalid for the wire server. { AssociateMailboxCmdStorage cmd; - cmd.cmd.Init(0, 0, 42, 42, DAWN_TEXTURE_USAGE_SAMPLED, mailbox.name); + cmd.cmd.Init(0, 0, 42, 42, WGPUTextureUsage_Sampled, mailbox.name); EXPECT_EQ(error::kInvalidArguments, ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name))); } @@ -161,7 +179,7 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) { // Error case: invalid usage. { AssociateMailboxCmdStorage cmd; - cmd.cmd.Init(0, 0, 42, 42, DAWN_TEXTURE_USAGE_SAMPLED, mailbox.name); + cmd.cmd.Init(0, 0, 42, 42, WGPUTextureUsage_Sampled, mailbox.name); EXPECT_EQ(error::kInvalidArguments, ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name))); } @@ -169,7 +187,7 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) { // Error case: invalid texture usage. { AssociateMailboxCmdStorage cmd; - cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_FORCE32, mailbox.name); + cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Force32, mailbox.name); EXPECT_EQ(error::kInvalidArguments, ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name))); } @@ -181,7 +199,7 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) { // and generation invalid. { AssociateMailboxCmdStorage cmd; - cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_SAMPLED, mailbox.name); + cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Sampled, mailbox.name); EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name))); } @@ -189,7 +207,7 @@ TEST_F(WebGPUDecoderTest, AssociateMailbox) { // Error case: associated to an already associated texture. { AssociateMailboxCmdStorage cmd; - cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_SAMPLED, mailbox.name); + cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Sampled, mailbox.name); EXPECT_EQ(error::kInvalidArguments, ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name))); } @@ -216,7 +234,7 @@ TEST_F(WebGPUDecoderTest, DissociateMailbox) { // Associate a mailbox so we can later dissociate it. { AssociateMailboxCmdStorage cmd; - cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_SAMPLED, mailbox.name); + cmd.cmd.Init(0, 0, 1, 0, WGPUTextureUsage_Sampled, mailbox.name); EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name))); } diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc index f14240546e9..a6d5768392e 100644 --- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc +++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc @@ -18,6 +18,7 @@ #include "gpu/command_buffer/service/shared_image_backing.h" #include "gpu/command_buffer/service/shared_image_representation.h" #include "gpu/command_buffer/service/skia_utils.h" +#include "skia/buildflags.h" #include "third_party/skia/include/core/SkPromiseImageTexture.h" #include "third_party/skia/include/core/SkSurface.h" #include "third_party/skia/include/core/SkSurfaceProps.h" @@ -53,7 +54,7 @@ class WrappedSkImage : public SharedImageBacking { void Destroy() override { promise_texture_.reset(); - image_.reset(); + gpu::DeleteSkImage(context_state_, std::move(image_)); } bool IsCleared() const override { return cleared_; } @@ -201,6 +202,14 @@ class WrappedSkImage : public SharedImageBacking { tracing_id_ = reinterpret_cast<uint64_t>(image_info.fImage); break; } +#if BUILDFLAG(SKIA_USE_DAWN) + case GrBackendApi::kDawn: { + GrDawnTextureInfo tex_info; + if (backend_texture.getDawnTextureInfo(&tex_info)) + tracing_id_ = reinterpret_cast<uint64_t>(tex_info.fTexture.Get()); + break; + } +#endif default: NOTREACHED(); return false; @@ -212,6 +221,7 @@ class WrappedSkImage : public SharedImageBacking { SharedContextState* const context_state_; sk_sp<SkPromiseImageTexture> promise_texture_; + // TODO(penghuang): manage texture directly with GrBackendTexture, sk_sp<SkImage> image_; bool cleared_ = false; diff --git a/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt index 9c0589804dd..43dc08f0801 100644 --- a/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt +++ b/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt @@ -9,4 +9,5 @@ GL_APICALL void GL_APIENTRY wgDawnCommands (const char* commands, size_t size); GL_APICALL void GL_APIENTRY wgAssociateMailbox (GLuint device_id, GLuint device_generation, GLuint id, GLuint generation, GLuint usage, const GLbyte* mailbox); GL_APICALL void GL_APIENTRY wgDissociateMailbox (GLuint texture_id, GLuint texture_generation); -GL_APICALL void GL_APIENTRY wgRequestAdapter (EnumClassPowerPreference power_preference = PowerPreference::kHighPerformance); +GL_APICALL void GL_APIENTRY wgRequestAdapter (GLuint request_adapter_serial, EnumClassPowerPreference power_preference = PowerPreference::kDefault); +GL_APICALL void GL_APIENTRY wgRequestDevice (GLuint adapter_service_id, const char* dawn_request_device_properties, size_t request_device_properties_size); diff --git a/chromium/gpu/config/BUILD.gn b/chromium/gpu/config/BUILD.gn index d595e1b10f6..d954e1ac669 100644 --- a/chromium/gpu/config/BUILD.gn +++ b/chromium/gpu/config/BUILD.gn @@ -6,6 +6,7 @@ import("//build/config/chrome_build.gni") import("//build/config/chromecast_build.gni") import("//build/config/jumbo.gni") import("//build/config/ui.gni") +import("//gpu/vulkan/features.gni") group("config") { if (is_component_build) { @@ -89,6 +90,31 @@ action("workaround_list") { } } +if (enable_vulkan) { + component("vulkan_info") { + sources = [ + "vulkan_info.cc", + "vulkan_info.h", + ] + + configs += [ "//gpu:gpu_implementation" ] + + deps = [ + "//gpu/ipc/common:vulkan_interface", + ] + + all_dependent_configs = [ "//third_party/vulkan:vulkan_config" ] + + # surpass linke error 4217. It is because vulkan_info depends on + # //gpu/ipc/common:vulkan_interface and //gpu/ipc/common:vulkan_interface" + # depends on this target. + # TODO(penghuang): Fix this link error + if (is_win && is_component_build) { + ldflags = [ "/IGNORE:4217" ] + } + } +} + jumbo_source_set("config_sources") { # External code should depend on this via //gpu/config above rather than # depending on this directly or the component build will break. @@ -152,6 +178,10 @@ jumbo_source_set("config_sources") { "//components/crash/core/common:crash_key", ] + if (enable_vulkan) { + public_deps += [ ":vulkan_info" ] + } + deps = [ ":process_json", "//base", diff --git a/chromium/gpu/config/gpu_blocklist.cc b/chromium/gpu/config/gpu_blocklist.cc index 7fb9cf5042f..694aa68bf62 100644 --- a/chromium/gpu/config/gpu_blocklist.cc +++ b/chromium/gpu/config/gpu_blocklist.cc @@ -27,8 +27,6 @@ std::unique_ptr<GpuBlocklist> GpuBlocklist::Create( std::unique_ptr<GpuBlocklist> list(new GpuBlocklist(data)); list->AddSupportedFeature("accelerated_2d_canvas", GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS); - list->AddSupportedFeature("gpu_compositing", - GPU_FEATURE_TYPE_GPU_COMPOSITING); list->AddSupportedFeature("accelerated_webgl", GPU_FEATURE_TYPE_ACCELERATED_WEBGL); list->AddSupportedFeature("flash3d", GPU_FEATURE_TYPE_FLASH3D); @@ -47,6 +45,7 @@ std::unique_ptr<GpuBlocklist> GpuBlocklist::Create( GPU_FEATURE_TYPE_OOP_RASTERIZATION); list->AddSupportedFeature("android_surface_control", GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL); + list->AddSupportedFeature("accelerated_gl", GPU_FEATURE_TYPE_ACCELERATED_GL); list->AddSupportedFeature("metal", GPU_FEATURE_TYPE_METAL); list->AddSupportedFeature("vulkan", GPU_FEATURE_TYPE_VULKAN); return list; diff --git a/chromium/gpu/config/gpu_blocklist_unittest.cc b/chromium/gpu/config/gpu_blocklist_unittest.cc index ff2264eb48b..a6fac77f312 100644 --- a/chromium/gpu/config/gpu_blocklist_unittest.cc +++ b/chromium/gpu/config/gpu_blocklist_unittest.cc @@ -83,8 +83,6 @@ class GpuBlocklistTest : public testing::Test { GPU_BLOCKLIST_FEATURE_TEST(Accelerated2DCanvas, GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS) -GPU_BLOCKLIST_FEATURE_TEST(GpuCompositing, GPU_FEATURE_TYPE_GPU_COMPOSITING) - GPU_BLOCKLIST_FEATURE_TEST(AcceleratedWebGL, GPU_FEATURE_TYPE_ACCELERATED_WEBGL) GPU_BLOCKLIST_FEATURE_TEST(Flash3D, GPU_FEATURE_TYPE_FLASH3D) @@ -106,6 +104,10 @@ GPU_BLOCKLIST_FEATURE_TEST(WebGL2, GPU_FEATURE_TYPE_ACCELERATED_WEBGL2) GPU_BLOCKLIST_FEATURE_TEST(ProtectedVideoDecode, GPU_FEATURE_TYPE_PROTECTED_VIDEO_DECODE) +GPU_BLOCKLIST_FEATURE_TEST(GL, GPU_FEATURE_TYPE_ACCELERATED_GL) + +GPU_BLOCKLIST_FEATURE_TEST(Vulkan, GPU_FEATURE_TYPE_VULKAN) + // Test for invariant "Assume the newly last added entry has the largest ID". // See GpuControlList::GpuControlList. // It checks software_rendering_list.json diff --git a/chromium/gpu/config/gpu_control_list.cc b/chromium/gpu/config/gpu_control_list.cc index 38b497afa1d..dac27c419fb 100644 --- a/chromium/gpu/config/gpu_control_list.cc +++ b/chromium/gpu/config/gpu_control_list.cc @@ -323,6 +323,12 @@ bool GpuControlList::More::Contains(const GPUInfo& gpu_info) const { #endif // OS_WIN break; } + if ((subpixel_font_rendering == kUnsupported && + gpu_info.subpixel_font_rendering) || + (subpixel_font_rendering == kSupported && + !gpu_info.subpixel_font_rendering)) { + return false; + } return true; } diff --git a/chromium/gpu/config/gpu_control_list.h b/chromium/gpu/config/gpu_control_list.h index f836439a118..4b60cc8476d 100644 --- a/chromium/gpu/config/gpu_control_list.h +++ b/chromium/gpu/config/gpu_control_list.h @@ -158,6 +158,8 @@ class GPU_EXPORT GpuControlList { uint32_t test_group; + SupportedOrNot subpixel_font_rendering; + // Return true if GL_VERSION string does not fit the entry info // on GL type and GL version. bool GLVersionInfoMismatch(const std::string& gl_version_string) const; diff --git a/chromium/gpu/config/gpu_control_list_entry_unittest.cc b/chromium/gpu/config/gpu_control_list_entry_unittest.cc index d02c66458ab..eeb183b6c09 100644 --- a/chromium/gpu/config/gpu_control_list_entry_unittest.cc +++ b/chromium/gpu/config/gpu_control_list_entry_unittest.cc @@ -1060,4 +1060,55 @@ TEST_F(GpuControlListEntryTest, HardwareOverlay) { } #endif // OS_WIN +TEST_F(GpuControlListEntryTest, TestSubpixelFontRendering) { + const Entry& entry = GetEntry(kGpuControlListEntryTest_SubpixelFontRendering); + + GPUInfo gpu_info; + gpu_info.subpixel_font_rendering = true; + gpu_info.gl_renderer = "Mali0xx"; + + EXPECT_TRUE(entry.Contains(kOsChromeOS, "10.0", gpu_info)); + + gpu_info.subpixel_font_rendering = false; + gpu_info.gl_renderer = "Mali1xx"; + EXPECT_FALSE(entry.Contains(kOsChromeOS, "10.0", gpu_info)); + + gpu_info.subpixel_font_rendering = false; + gpu_info.gl_renderer = "DontCare"; + EXPECT_FALSE(entry.Contains(kOsChromeOS, "10.0", gpu_info)); + + gpu_info.subpixel_font_rendering = true; + gpu_info.gl_renderer = "DontCare"; + EXPECT_FALSE(entry.Contains(kOsChromeOS, "10.0", gpu_info)); + + gpu_info.subpixel_font_rendering = false; + gpu_info.gl_renderer = "Supported"; + EXPECT_TRUE(entry.Contains(kOsChromeOS, "10.0", gpu_info)); + + gpu_info.subpixel_font_rendering = true; + gpu_info.gl_renderer = "Supported"; + EXPECT_FALSE(entry.Contains(kOsChromeOS, "10.0", gpu_info)); + + gpu_info.subpixel_font_rendering = true; + gpu_info.gl_renderer = "Others"; + EXPECT_TRUE(entry.Contains(kOsChromeOS, "10.0", gpu_info)); + + // Not ChromeOS + EXPECT_FALSE(entry.Contains(kOsLinux, "10.0", gpu_info)); +} + +TEST_F(GpuControlListEntryTest, TestSubpixelFontRenderingDontCare) { + const Entry& entry = + GetEntry(kGpuControlListEntryTest_SubpixelFontRenderingDontCare); + + GPUInfo gpu_info; + gpu_info.subpixel_font_rendering = true; + gpu_info.gl_renderer = "Mali0xx"; + + EXPECT_TRUE(entry.Contains(kOsChromeOS, "10.0", gpu_info)); + + gpu_info.subpixel_font_rendering = false; + EXPECT_TRUE(entry.Contains(kOsChromeOS, "10.0", gpu_info)); +} + } // namespace gpu diff --git a/chromium/gpu/config/gpu_control_list_format.txt b/chromium/gpu/config/gpu_control_list_format.txt index b61c158f0e2..338910aef16 100644 --- a/chromium/gpu/config/gpu_control_list_format.txt +++ b/chromium/gpu/config/gpu_control_list_format.txt @@ -80,6 +80,10 @@ // specify that. // 30. "intel_gpu_generation" is a VERSION structure. Each Intel GPU has a // specific integer (meaning generation) associated. +// 31. "subpixel_font_rendering" is either "supported" or "unsupported". Currently it +// only applies on ChromeOS where subpixel font rendering causes a glitch +// on Mali GPUs. By default it's "dont_care" and there is no need to +// specify that. // // VERSION includes "op", "style", "value", and "value2". "op" can be any of // the following values: "=", "<", "<=", ">", ">=", "any", "between". "style" diff --git a/chromium/gpu/config/gpu_control_list_testing.json b/chromium/gpu/config/gpu_control_list_testing.json index 55f5852de01..d32c8ab200d 100644 --- a/chromium/gpu/config/gpu_control_list_testing.json +++ b/chromium/gpu/config/gpu_control_list_testing.json @@ -268,7 +268,7 @@ "features": [ "all", { - "exceptions" : [ + "exceptions" : [ "test_feature_0" ] } @@ -871,6 +871,34 @@ "features": [ "test_feature_0" ] + }, + { + "id": 73, + "description": "GpuControlListEntryTest.SubpixelFontRendering", + "os": { + "type": "chromeos" + }, + "features": [ + "test_feature_0" + ], + "exceptions": [ + { "gl_renderer": "Mali.*", + "subpixel_font_rendering": "unsupported"}, + { "gl_renderer": "DontCare" }, + { "gl_renderer": "Supported", + "subpixel_font_rendering": "supported"} + ] + }, + { + "id": 74, + "description": "GpuControlListEntryTest.SubpixelFontRenderingDontCare", + "os": { + "type": "chromeos" + }, + "gl_renderer": "Mali.*", + "features": [ + "test_feature_0" + ] } ] } diff --git a/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h index cd45ad095e5..6564a31921b 100644 --- a/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h +++ b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h @@ -52,6 +52,7 @@ const GpuControlList::More kMoreForEntry1_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry2[1] = { @@ -72,6 +73,7 @@ const GpuControlList::More kMoreForEntry2_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry3[1] = { @@ -92,6 +94,7 @@ const GpuControlList::More kMoreForEntry3_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry4[1] = { @@ -112,6 +115,7 @@ const GpuControlList::More kMoreForEntry4_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const GpuControlList::More kMoreForEntry4_1440601243Exception0 = { @@ -128,6 +132,7 @@ const GpuControlList::More kMoreForEntry4_1440601243Exception0 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry5[1] = { @@ -148,6 +153,7 @@ const GpuControlList::More kMoreForEntry5_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const GpuControlList::More kMoreForEntry5_1440601243Exception0 = { @@ -164,6 +170,7 @@ const GpuControlList::More kMoreForEntry5_1440601243Exception0 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry6[1] = { @@ -189,6 +196,7 @@ const GpuControlList::More kMoreForEntry6_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry7[1] = { @@ -209,6 +217,7 @@ const GpuControlList::More kMoreForEntry7_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry8[1] = { @@ -229,6 +238,7 @@ const GpuControlList::More kMoreForEntry8_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry9[1] = { @@ -249,6 +259,7 @@ const GpuControlList::More kMoreForEntry9_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry10[1] = { @@ -269,6 +280,7 @@ const GpuControlList::More kMoreForEntry10_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry11[1] = { @@ -296,6 +308,7 @@ const GpuControlList::More kMoreForEntry11_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry12[1] = { @@ -323,6 +336,7 @@ const GpuControlList::More kMoreForEntry12_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry13[1] = { @@ -350,6 +364,7 @@ const GpuControlList::More kMoreForEntry13_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry14[1] = { @@ -377,6 +392,7 @@ const GpuControlList::More kMoreForEntry14_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry15[1] = { @@ -404,6 +420,7 @@ const GpuControlList::More kMoreForEntry15_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry16[1] = { @@ -424,6 +441,7 @@ const GpuControlList::More kMoreForEntry16_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry17[1] = { @@ -444,6 +462,7 @@ const GpuControlList::More kMoreForEntry17_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry18[1] = { @@ -470,6 +489,7 @@ const GpuControlList::More kMoreForEntry18_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry19[1] = { @@ -496,6 +516,7 @@ const GpuControlList::More kMoreForEntry19_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry20[1] = { @@ -522,6 +543,7 @@ const GpuControlList::More kMoreForEntry20_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry21[1] = { @@ -542,6 +564,7 @@ const GpuControlList::More kMoreForEntry21_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const GpuControlList::GLStrings @@ -566,6 +589,7 @@ const GpuControlList::More kMoreForEntry21_1440601243Exception0 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry22[1] = { @@ -586,6 +610,7 @@ const GpuControlList::More kMoreForEntry22_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry23[3] = { @@ -608,6 +633,7 @@ const GpuControlList::More kMoreForEntry23_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry24[2] = { @@ -629,6 +655,7 @@ const GpuControlList::More kMoreForEntry24_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry25[1] = { @@ -653,6 +680,7 @@ const GpuControlList::More kMoreForEntry25_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry26[1] = { @@ -687,6 +715,7 @@ const GpuControlList::More kMoreForEntry26_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry27[1] = { @@ -707,6 +736,7 @@ const GpuControlList::More kMoreForEntry27_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const char* const kMachineModelNameForEntry27Exception0[1] = { @@ -735,6 +765,7 @@ const GpuControlList::More kMoreForEntry27_1440601243Exception0 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry28[1] = { @@ -766,6 +797,7 @@ const GpuControlList::More kMoreForEntry28_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry29[1] = { @@ -797,6 +829,7 @@ const GpuControlList::More kMoreForEntry29_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const GpuControlList::MachineModelInfo kMachineModelInfoForEntry29Exception0 = { @@ -820,6 +853,7 @@ const GpuControlList::More kMoreForEntry29_1440601243Exception0 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry30[1] = { @@ -844,6 +878,7 @@ const GpuControlList::More kMoreForEntry30_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry31[1] = { @@ -868,6 +903,7 @@ const GpuControlList::More kMoreForEntry31_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry32[1] = { @@ -892,6 +928,7 @@ const GpuControlList::More kMoreForEntry32_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry33[1] = { @@ -916,6 +953,7 @@ const GpuControlList::More kMoreForEntry33_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry34[1] = { @@ -940,6 +978,7 @@ const GpuControlList::More kMoreForEntry34_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry35[1] = { @@ -965,6 +1004,7 @@ const GpuControlList::More kMoreForEntry35_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry36[1] = { @@ -985,6 +1025,7 @@ const GpuControlList::More kMoreForEntry36_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry37[1] = { @@ -1009,6 +1050,7 @@ const GpuControlList::More kMoreForEntry37_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry38[1] = { @@ -1029,6 +1071,7 @@ const GpuControlList::More kMoreForEntry38_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry39[1] = { @@ -1049,6 +1092,7 @@ const GpuControlList::More kMoreForEntry39_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry40[1] = { @@ -1069,6 +1113,7 @@ const GpuControlList::More kMoreForEntry40_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry41[1] = { @@ -1089,6 +1134,7 @@ const GpuControlList::More kMoreForEntry41_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry42[1] = { @@ -1109,6 +1155,7 @@ const GpuControlList::More kMoreForEntry42_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry43[1] = { @@ -1129,6 +1176,7 @@ const GpuControlList::More kMoreForEntry43_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry44[1] = { @@ -1149,6 +1197,7 @@ const GpuControlList::More kMoreForEntry44_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const uint32_t kDeviceIDsForGpuControlTestingEntry44Exception0[1] = { @@ -1176,6 +1225,7 @@ const GpuControlList::More kMoreForEntry44_1440601243Exception0 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const uint32_t kDeviceIDsForGpuControlTestingEntry44Exception1[1] = { @@ -1203,6 +1253,7 @@ const GpuControlList::More kMoreForEntry44_1440601243Exception1 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry45[1] = { @@ -1223,6 +1274,7 @@ const GpuControlList::More kMoreForEntry45_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry46[1] = { @@ -1243,6 +1295,7 @@ const GpuControlList::More kMoreForEntry46_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry47[1] = { @@ -1263,6 +1316,7 @@ const GpuControlList::More kMoreForEntry47_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry48[1] = { @@ -1283,6 +1337,7 @@ const GpuControlList::More kMoreForEntry48_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry49[1] = { @@ -1309,6 +1364,7 @@ const GpuControlList::More kMoreForEntry49_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry50[1] = { @@ -1335,6 +1391,7 @@ const GpuControlList::More kMoreForEntry50_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry51[1] = { @@ -1355,6 +1412,7 @@ const GpuControlList::More kMoreForEntry51_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const GpuControlList::GLStrings @@ -1379,6 +1437,7 @@ const GpuControlList::More kMoreForEntry51_1440601243Exception0 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry52[1] = { @@ -1399,6 +1458,7 @@ const GpuControlList::More kMoreForEntry52_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry53[1] = { @@ -1425,6 +1485,7 @@ const GpuControlList::More kMoreForEntry53_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const char* const kDisabledExtensionsForEntry54[2] = { @@ -1446,6 +1507,7 @@ const GpuControlList::More kMoreForEntry54_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const char* const kDisabledExtensionsForEntry55[2] = { @@ -1467,6 +1529,7 @@ const GpuControlList::More kMoreForEntry55_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry56[1] = { @@ -1487,6 +1550,7 @@ const GpuControlList::More kMoreForEntry56_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry57[1] = { @@ -1507,6 +1571,7 @@ const GpuControlList::More kMoreForEntry57_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry58[1] = { @@ -1527,6 +1592,7 @@ const GpuControlList::More kMoreForEntry58_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 1, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry59[1] = { @@ -1547,6 +1613,7 @@ const GpuControlList::More kMoreForEntry59_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 2, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry60[1] = { @@ -1572,6 +1639,7 @@ const GpuControlList::More kMoreForEntry60_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry61[1] = { @@ -1596,6 +1664,7 @@ const GpuControlList::More kMoreForEntry61_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry62[1] = { @@ -1620,6 +1689,7 @@ const GpuControlList::More kMoreForEntry62_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry63[1] = { @@ -1644,6 +1714,7 @@ const GpuControlList::More kMoreForEntry63_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry64[1] = { @@ -1668,6 +1739,7 @@ const GpuControlList::More kMoreForEntry64_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry65[1] = { @@ -1688,6 +1760,7 @@ const GpuControlList::More kMoreForEntry65_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const GpuSeriesType kGpuSeriesForEntry65Exception0[1] = { @@ -1708,6 +1781,7 @@ const GpuControlList::More kMoreForEntry65_1440601243Exception0 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry66[1] = { @@ -1734,6 +1808,7 @@ const GpuControlList::More kMoreForEntry66_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry67[1] = { @@ -1754,6 +1829,7 @@ const GpuControlList::More kMoreForEntry67_1440601243 = { nullptr}, // gpu_count GpuControlList::kUnsupported, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry68[1] = { @@ -1774,6 +1850,7 @@ const GpuControlList::More kMoreForEntry68_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry69[1] = { @@ -1794,6 +1871,7 @@ const GpuControlList::More kMoreForEntry69_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry70[1] = { @@ -1814,6 +1892,7 @@ const GpuControlList::More kMoreForEntry70_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry71[1] = { @@ -1834,6 +1913,7 @@ const GpuControlList::More kMoreForEntry71_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; const int kFeatureListForGpuControlTestingEntry72[1] = { @@ -1854,6 +1934,131 @@ const GpuControlList::More kMoreForEntry72_1440601243 = { nullptr}, // gpu_count GpuControlList::kDontCare, // hardware_overlay 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering +}; + +const int kFeatureListForGpuControlTestingEntry73[1] = { + TEST_FEATURE_0, +}; + +const GpuControlList::More kMoreForEntry73_1440601243 = { + GpuControlList::kGLTypeNone, // gl_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gl_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // pixel_shader_version + false, // in_process_gpu + 0, // gl_reset_notification_strategy + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // direct_rendering_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gpu_count + GpuControlList::kDontCare, // hardware_overlay + 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering +}; + +const GpuControlList::GLStrings + kGLStringsForGpuControlTestingEntry73Exception0 = { + nullptr, + "Mali.*", + nullptr, + nullptr, +}; + +const GpuControlList::More kMoreForEntry73_1440601243Exception0 = { + GpuControlList::kGLTypeNone, // gl_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gl_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // pixel_shader_version + false, // in_process_gpu + 0, // gl_reset_notification_strategy + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // direct_rendering_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gpu_count + GpuControlList::kDontCare, // hardware_overlay + 0, // test_group + GpuControlList::kUnsupported, // subpixel_font_rendering +}; + +const GpuControlList::GLStrings + kGLStringsForGpuControlTestingEntry73Exception1 = { + nullptr, + "DontCare", + nullptr, + nullptr, +}; + +const GpuControlList::More kMoreForEntry73_1440601243Exception1 = { + GpuControlList::kGLTypeNone, // gl_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gl_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // pixel_shader_version + false, // in_process_gpu + 0, // gl_reset_notification_strategy + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // direct_rendering_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gpu_count + GpuControlList::kDontCare, // hardware_overlay + 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering +}; + +const GpuControlList::GLStrings + kGLStringsForGpuControlTestingEntry73Exception2 = { + nullptr, + "Supported", + nullptr, + nullptr, +}; + +const GpuControlList::More kMoreForEntry73_1440601243Exception2 = { + GpuControlList::kGLTypeNone, // gl_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gl_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // pixel_shader_version + false, // in_process_gpu + 0, // gl_reset_notification_strategy + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // direct_rendering_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gpu_count + GpuControlList::kDontCare, // hardware_overlay + 0, // test_group + GpuControlList::kSupported, // subpixel_font_rendering +}; + +const int kFeatureListForGpuControlTestingEntry74[1] = { + TEST_FEATURE_0, +}; + +const GpuControlList::GLStrings kGLStringsForGpuControlTestingEntry74 = { + nullptr, + "Mali.*", + nullptr, + nullptr, +}; + +const GpuControlList::More kMoreForEntry74_1440601243 = { + GpuControlList::kGLTypeNone, // gl_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gl_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // pixel_shader_version + false, // in_process_gpu + 0, // gl_reset_notification_strategy + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // direct_rendering_version + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, nullptr, + nullptr}, // gpu_count + GpuControlList::kDontCare, // hardware_overlay + 0, // test_group + GpuControlList::kDontCare, // subpixel_font_rendering }; } // namespace gpu diff --git a/chromium/gpu/config/gpu_control_list_testing_autogen.cc b/chromium/gpu/config/gpu_control_list_testing_autogen.cc index d8d7563d10a..dcb6daf8b92 100644 --- a/chromium/gpu/config/gpu_control_list_testing_autogen.cc +++ b/chromium/gpu/config/gpu_control_list_testing_autogen.cc @@ -2330,6 +2330,70 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = { 0, // exceptions count nullptr, // exceptions }, + { + 73, // id + "GpuControlListEntryTest.SubpixelFontRendering", + base::size(kFeatureListForGpuControlTestingEntry73), // features size + kFeatureListForGpuControlTestingEntry73, // features + 0, // DisabledExtensions size + nullptr, // DisabledExtensions + 0, // DisabledWebGLExtensions size + nullptr, // DisabledWebGLExtensions + 0, // CrBugs size + nullptr, // CrBugs + { + GpuControlList::kOsChromeOS, // os_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // os_version + 0x00, // vendor_id + 0, // DeviceIDs size + nullptr, // DeviceIDs + GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category + GpuControlList::kMultiGpuStyleNone, // multi_gpu_style + nullptr, // driver info + nullptr, // GL strings + nullptr, // machine model info + 0, // gpu_series size + nullptr, // gpu_series + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // intel_gpu_generation + &kMoreForEntry73_1440601243, // more data + }, + base::size(kExceptionsForEntry73), // exceptions count + kExceptionsForEntry73, // exceptions + }, + { + 74, // id + "GpuControlListEntryTest.SubpixelFontRenderingDontCare", + base::size(kFeatureListForGpuControlTestingEntry74), // features size + kFeatureListForGpuControlTestingEntry74, // features + 0, // DisabledExtensions size + nullptr, // DisabledExtensions + 0, // DisabledWebGLExtensions size + nullptr, // DisabledWebGLExtensions + 0, // CrBugs size + nullptr, // CrBugs + { + GpuControlList::kOsChromeOS, // os_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // os_version + 0x00, // vendor_id + 0, // DeviceIDs size + nullptr, // DeviceIDs + GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category + GpuControlList::kMultiGpuStyleNone, // multi_gpu_style + nullptr, // driver info + &kGLStringsForGpuControlTestingEntry74, // GL strings + nullptr, // machine model info + 0, // gpu_series size + nullptr, // gpu_series + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // intel_gpu_generation + &kMoreForEntry74_1440601243, // more data + }, + 0, // exceptions count + nullptr, // exceptions + }, }; -const size_t kGpuControlListTestingEntryCount = 72; +const size_t kGpuControlListTestingEntryCount = 74; } // namespace gpu diff --git a/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h index c7c7da5e2e9..f70d8b3b2d4 100644 --- a/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h +++ b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h @@ -85,6 +85,8 @@ enum GpuControlListTestingEntryEnum { kGpuControlListEntryTest_GpuGenerationAny = 69, kGpuControlListEntryTest_GpuGenerationPrimary = 70, kGpuControlListEntryTest_GpuGenerationSecondary = 71, + kGpuControlListEntryTest_SubpixelFontRendering = 72, + kGpuControlListEntryTest_SubpixelFontRenderingDontCare = 73, }; } // namespace gpu diff --git a/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h b/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h index e04e2dfccd2..0db38fff526 100644 --- a/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h +++ b/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h @@ -200,6 +200,63 @@ const GpuControlList::Conditions kExceptionsForEntry65[1] = { }, }; +const GpuControlList::Conditions kExceptionsForEntry73[3] = { + { + GpuControlList::kOsAny, // os_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // os_version + 0x00, // vendor_id + 0, // DeviceIDs size + nullptr, // DeviceIDs + GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category + GpuControlList::kMultiGpuStyleNone, // multi_gpu_style + nullptr, // driver info + &kGLStringsForGpuControlTestingEntry73Exception0, // GL strings + nullptr, // machine model info + 0, // gpu_series size + nullptr, // gpu_series + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // intel_gpu_generation + &kMoreForEntry73_1440601243Exception0, // more data + }, + { + GpuControlList::kOsAny, // os_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // os_version + 0x00, // vendor_id + 0, // DeviceIDs size + nullptr, // DeviceIDs + GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category + GpuControlList::kMultiGpuStyleNone, // multi_gpu_style + nullptr, // driver info + &kGLStringsForGpuControlTestingEntry73Exception1, // GL strings + nullptr, // machine model info + 0, // gpu_series size + nullptr, // gpu_series + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // intel_gpu_generation + &kMoreForEntry73_1440601243Exception1, // more data + }, + { + GpuControlList::kOsAny, // os_type + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // os_version + 0x00, // vendor_id + 0, // DeviceIDs size + nullptr, // DeviceIDs + GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category + GpuControlList::kMultiGpuStyleNone, // multi_gpu_style + nullptr, // driver info + &kGLStringsForGpuControlTestingEntry73Exception2, // GL strings + nullptr, // machine model info + 0, // gpu_series size + nullptr, // gpu_series + {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical, + nullptr, nullptr}, // intel_gpu_generation + &kMoreForEntry73_1440601243Exception2, // more data + }, +}; + } // namespace gpu #endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_EXCEPTIONS_AUTOGEN_H_ diff --git a/chromium/gpu/config/gpu_crash_keys.cc b/chromium/gpu/config/gpu_crash_keys.cc index 4641a8d45ac..00b84c58d38 100644 --- a/chromium/gpu/config/gpu_crash_keys.cc +++ b/chromium/gpu/config/gpu_crash_keys.cc @@ -32,6 +32,8 @@ crash_reporter::CrashKeyString<1024> current_shader_0("current-shader-0"); crash_reporter::CrashKeyString<1024> current_shader_1("current-shader-1"); crash_reporter::CrashKeyString<4> gpu_watchdog_kill_after_power_resume( "gpu-watchdog-kill-after-power-resume"); +crash_reporter::CrashKeyString<4> gpu_watchdog_crashed_in_gpu_init( + "gpu-watchdog-crashed-in-gpu-init"); } // namespace crash_keys } // namespace gpu diff --git a/chromium/gpu/config/gpu_crash_keys.h b/chromium/gpu/config/gpu_crash_keys.h index 0de896df550..0de342e49ab 100644 --- a/chromium/gpu/config/gpu_crash_keys.h +++ b/chromium/gpu/config/gpu_crash_keys.h @@ -37,7 +37,8 @@ extern GPU_EXPORT crash_reporter::CrashKeyString<1024> current_shader_0; extern GPU_EXPORT crash_reporter::CrashKeyString<1024> current_shader_1; extern GPU_EXPORT crash_reporter::CrashKeyString<4> gpu_watchdog_kill_after_power_resume; - +extern GPU_EXPORT crash_reporter::CrashKeyString<4> + gpu_watchdog_crashed_in_gpu_init; } // namespace crash_keys } // namespace gpu diff --git a/chromium/gpu/config/gpu_driver_bug_list.json b/chromium/gpu/config/gpu_driver_bug_list.json index 75bf27f8502..a352cd89238 100644 --- a/chromium/gpu/config/gpu_driver_bug_list.json +++ b/chromium/gpu/config/gpu_driver_bug_list.json @@ -904,7 +904,10 @@ { "id": 108, "cr_bugs": [449150], - "description": "Mali-4xx does not support GL_RGB format", + "description": "Mali-4xx on Linux does not support GL_RGB format", + "os": { + "type": "linux" + }, "gl_vendor": "ARM.*", "gl_renderer": ".*Mali-4.*", "features": [ @@ -1596,25 +1599,6 @@ ] }, { - "id": 172, - "description": "Use GL_INTEL_framebuffer_CMAA on ChromeOS", - "cr_bugs": [535198], - "os": { - "type" : "chromeos" - }, - "vendor_id": "0x8086", - "driver_vendor": "Mesa", - "gl_vendor": "Intel.*", - "gl_type": "gles", - "gl_version": { - "op": ">=", - "value": "3.1" - }, - "features": [ - "use_framebuffer_cmaa" - ] - }, - { "id": 174, "description": "Adreno 4xx support for EXT_multisampled_render_to_texture is buggy on Android 7.0", "cr_bugs": [612474], @@ -2211,12 +2195,16 @@ }, { "id": 219, - "description": "Zero-copy DXGI video hangs or displays incorrect colors on AMD drivers", - "cr_bugs": [623029], + "description": "Zero-copy DXGI video hangs or displays incorrect colors on older AMD drivers", + "cr_bugs": [623029, 1025427], "os": { "type": "win" }, "vendor_id": "0x1002", + "driver_version": { + "op": "<", + "value": "23.20.826.5120" + }, "features": [ "disable_dxgi_zero_copy_video" ] @@ -2904,18 +2892,6 @@ ] }, { - "id":275, - "cr_bugs": [838725], - "description": "Disable AImageReader on ARM GPUs as its buggy.", - "os": { - "type": "android" - }, - "gl_vendor": "ARM.*", - "features": [ - "disable_aimagereader" - ] - }, - { "id": 277, "description": "Direct composition path is buggy on certain AMD devices/drivers", "cr_bugs": [800950], @@ -3124,14 +3100,17 @@ }, { "id": 294, - "cr_bugs": [932879], - "description": "Hardware overlays result in black videos on non-Intel GPUs", + "cr_bugs": [932879, 1025427], + "description": "Hardware overlays are only supported on certain Intel/AMD gpus", "os": { "type": "win" }, "exceptions": [ { "vendor_id": "0x8086" + }, + { + "vendor_id": "0x1002" } ], "features": [ @@ -3232,7 +3211,7 @@ "value": "24.0.0.0" }, "features": [ - "disable_nv12_dynamic_textures" + "disable_nv12_dynamic_textures" ] }, { @@ -3374,6 +3353,153 @@ "disabled_extensions": [ "GL_MESA_framebuffer_flip_y" ] + }, + { + "id": 316, + "cr_bugs": [1003860], + "description": "Limit MSAA to 4x on ChromeOS for Intel", + "os": { + "type": "chromeos" + }, + "intel_gpu_generation": { + "op": ">=", + "value": "9" + }, + "driver_vendor": "Mesa", + "gl_vendor": "Intel.*", + "features": [ + "max_msaa_sample_count_4" + ] + }, + { + "id": 317, + "cr_bugs": [1003860], + "description": "Limit MSAA to 2x on older Intel GPU generations on ChromeOS", + "os": { + "type": "chromeos" + }, + "intel_gpu_generation": { + "op": "<", + "value": "9" + }, + "driver_vendor": "Mesa", + "gl_vendor": "Intel.*", + "features": [ + "max_msaa_sample_count_2" + ] + }, + { + "id": 318, + "cr_bugs": [995396], + "description": "Direct composition caused performance issues on AMD GPUs", + "os": { + "type": "win" + }, + "vendor_id": "0x1002", + "device_id": ["0x694c"], + "features": [ + "disable_direct_composition" + ] + }, + { + "id": 319, + "cr_bugs": [709351], + "description": "Remove dynamic indexing for swizzled vectors on Mac", + "os": { + "type": "macosx" + }, + "features": [ + "remove_dynamic_indexing_of_swizzled_vector" + ] + }, + { + "id": 320, + "cr_bugs": [709351], + "description": "Remove dynamic indexing for swizzled vectors on Android", + "os": { + "type": "android" + }, + "features": [ + "remove_dynamic_indexing_of_swizzled_vector" + ] + }, + { + "id": 321, + "description": "Zero-copy DXGI video hangs or displays incorrect colors on older AMD drivers", + "cr_bugs": [623029, 1025427], + "os": { + "type": "win" + }, + "vendor_id": "0x1002", + "device_id": ["0x9870", "0x9874", "0x98E4"], + "driver_version": { + "op": "<", + "value": "26.20.15000.37" + }, + "features": [ + "disable_dxgi_zero_copy_video" + ] + }, + { + "id": 322, + "cr_bugs": [932879, 1025427], + "description": "Hardware overlays fail to work on older AMD drivers", + "os": { + "type": "win" + }, + "vendor_id": "0x1002", + "driver_version": { + "op": "<", + "value": "23.20.826.5120" + }, + "features": [ + "disable_direct_composition_video_overlays" + ] + }, + { + "id": 323, + "cr_bugs": [932879, 1025427], + "description": "Hardware overlays fail to work on older AMD drivers", + "os": { + "type": "win" + }, + "vendor_id": "0x1002", + "device_id": ["0x9870", "0x9874", "0x98E4"], + "driver_version": { + "op": "<", + "value": "26.20.15000.37" + }, + "features": [ + "disable_direct_composition_video_overlays" + ] + }, + { + "id": 324, + "cr_bugs": [1029855, 1050666], + "description": "dynamic textures fail to work on AMD GPUs", + "os": { + "type": "win" + }, + "vendor_id": "0x1002", + "features": [ + "disable_nv12_dynamic_textures" + ] + }, + { + "id": 327, + "cr_bugs": [1027981], + "description": "Disable dual source blending support", + "os": { + "type": "macosx", + "version": { + "op": "<", + "value": "10.14" + } + }, + "gl_vendor": "Intel.*", + "features": [ + "disable_dual_source_blending_support" + ] } ] } diff --git a/chromium/gpu/config/gpu_feature_info.cc b/chromium/gpu/config/gpu_feature_info.cc index 52d65772dcc..74106efc0f2 100644 --- a/chromium/gpu/config/gpu_feature_info.cc +++ b/chromium/gpu/config/gpu_feature_info.cc @@ -50,7 +50,7 @@ bool GpuFeatureInfo::IsWorkaroundEnabled(int32_t workaround) const { bool GpuFeatureInfo::IsInitialized() const { // Check if any feature status is undefined. - return status_values[GPU_FEATURE_TYPE_GPU_COMPOSITING] != + return status_values[GPU_FEATURE_TYPE_ACCELERATED_GL] != kGpuFeatureStatusUndefined; } diff --git a/chromium/gpu/config/gpu_feature_type.h b/chromium/gpu/config/gpu_feature_type.h index deaa886ce4d..aab8903dda1 100644 --- a/chromium/gpu/config/gpu_feature_type.h +++ b/chromium/gpu/config/gpu_feature_type.h @@ -12,7 +12,6 @@ namespace gpu { // If a bit is set to 1, corresponding feature is blacklisted. enum GpuFeatureType { GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS = 0, - GPU_FEATURE_TYPE_GPU_COMPOSITING, GPU_FEATURE_TYPE_ACCELERATED_WEBGL, GPU_FEATURE_TYPE_FLASH3D, GPU_FEATURE_TYPE_FLASH_STAGE3D, @@ -23,6 +22,7 @@ enum GpuFeatureType { GPU_FEATURE_TYPE_PROTECTED_VIDEO_DECODE, GPU_FEATURE_TYPE_OOP_RASTERIZATION, GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL, + GPU_FEATURE_TYPE_ACCELERATED_GL, GPU_FEATURE_TYPE_METAL, GPU_FEATURE_TYPE_VULKAN, NUMBER_OF_GPU_FEATURE_TYPES diff --git a/chromium/gpu/config/gpu_finch_features.cc b/chromium/gpu/config/gpu_finch_features.cc index ddb2ed78da4..4fe158b82fb 100644 --- a/chromium/gpu/config/gpu_finch_features.cc +++ b/chromium/gpu/config/gpu_finch_features.cc @@ -55,8 +55,13 @@ const base::Feature kDefaultEnableGpuRasterization{ // Enable out of process rasterization by default. This can still be overridden // by --enable-oop-rasterization or --disable-oop-rasterization. +#if defined(OS_ANDROID) +const base::Feature kDefaultEnableOopRasterization{ + "DefaultEnableOopRasterization", base::FEATURE_ENABLED_BY_DEFAULT}; +#else const base::Feature kDefaultEnableOopRasterization{ "DefaultEnableOopRasterization", base::FEATURE_DISABLED_BY_DEFAULT}; +#endif // Allow putting a video swapchain underneath the main swapchain, so overlays // can be used even if there are controls on top of the video. It can be @@ -67,11 +72,11 @@ const base::Feature kDirectCompositionUnderlays{ #if defined(OS_WIN) // Use a high priority for GPU process on Windows. const base::Feature kGpuProcessHighPriorityWin{ - "GpuProcessHighPriorityWin", base::FEATURE_DISABLED_BY_DEFAULT}; + "GpuProcessHighPriorityWin", base::FEATURE_ENABLED_BY_DEFAULT}; #endif // Use ThreadPriority::DISPLAY for GPU main, viz compositor and IO threads. -#if defined(OS_ANDROID) || defined(OS_CHROMEOS) +#if defined(OS_ANDROID) || defined(OS_CHROMEOS) || defined(OS_WIN) const base::Feature kGpuUseDisplayThreadPriority{ "GpuUseDisplayThreadPriority", base::FEATURE_ENABLED_BY_DEFAULT}; #else diff --git a/chromium/gpu/config/gpu_info.cc b/chromium/gpu/config/gpu_info.cc index 91cf6c1090e..da699bb0632 100644 --- a/chromium/gpu/config/gpu_info.cc +++ b/chromium/gpu/config/gpu_info.cc @@ -188,7 +188,8 @@ GPUInfo::GPUInfo() system_visual(0), rgba_visual(0), #endif - oop_rasterization_supported(false) { + oop_rasterization_supported(false), + subpixel_font_rendering(true) { } GPUInfo::GPUInfo(const GPUInfo& other) = default; @@ -264,6 +265,11 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const { #endif bool oop_rasterization_supported; + bool subpixel_font_rendering; + +#if BUILDFLAG(ENABLE_VULKAN) + base::Optional<VulkanInfo> vulkan_info; +#endif }; // If this assert fails then most likely something below needs to be updated. @@ -333,6 +339,13 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const { enumerator->AddInt64("rgbaVisual", rgba_visual); #endif enumerator->AddBool("oopRasterizationSupported", oop_rasterization_supported); + enumerator->AddBool("subpixelFontRendering", subpixel_font_rendering); +#if BUILDFLAG(ENABLE_VULKAN) + if (vulkan_info) { + auto blob = vulkan_info->Serialize(); + enumerator->AddBinary("vulkanInfo", base::span<const uint8_t>(blob)); + } +#endif enumerator->EndAuxAttributes(); } diff --git a/chromium/gpu/config/gpu_info.h b/chromium/gpu/config/gpu_info.h index 720fdc8d866..cce6a59730c 100644 --- a/chromium/gpu/config/gpu_info.h +++ b/chromium/gpu/config/gpu_info.h @@ -13,17 +13,24 @@ #include <string> #include <vector> +#include "base/containers/span.h" +#include "base/optional.h" #include "base/time/time.h" #include "base/version.h" #include "build/build_config.h" #include "gpu/config/dx_diag_node.h" #include "gpu/gpu_export.h" +#include "gpu/vulkan/buildflags.h" #include "ui/gfx/geometry/size.h" #if defined(USE_X11) typedef unsigned long VisualID; #endif +#if BUILDFLAG(ENABLE_VULKAN) +#include "gpu/config/vulkan_info.h" +#endif + namespace gpu { // These values are persisted to logs. Entries should not be renumbered and @@ -355,6 +362,12 @@ struct GPU_EXPORT GPUInfo { bool oop_rasterization_supported; + bool subpixel_font_rendering; + +#if BUILDFLAG(ENABLE_VULKAN) + base::Optional<VulkanInfo> vulkan_info; +#endif + // Note: when adding new members, please remember to update EnumerateFields // in gpu_info.cc. @@ -374,6 +387,8 @@ struct GPU_EXPORT GPUInfo { virtual void AddBool(const char* name, bool value) = 0; virtual void AddTimeDeltaInSecondsF(const char* name, const base::TimeDelta& value) = 0; + virtual void AddBinary(const char* name, + const base::span<const uint8_t>& blob) = 0; // Markers indicating that a GPUDevice is being described. virtual void BeginGPUDevice() = 0; diff --git a/chromium/gpu/config/gpu_info_collector_win.cc b/chromium/gpu/config/gpu_info_collector_win.cc index b7ff063de6a..34ef68895bb 100644 --- a/chromium/gpu/config/gpu_info_collector_win.cc +++ b/chromium/gpu/config/gpu_info_collector_win.cc @@ -31,6 +31,7 @@ #include "base/win/scoped_com_initializer.h" #include "base/win/windows_version.h" #include "build/branding_buildflags.h" +#include "gpu/config/gpu_util.h" #include "third_party/vulkan/include/vulkan/vulkan.h" namespace gpu { @@ -62,30 +63,6 @@ inline D3D12FeatureLevel ConvertToHistogramFeatureLevel( } } -// These values are persisted to logs. Entries should not be renumbered and -// numeric values should never be reused. -// This should match enum VulkanVersion in \tools\metrics\histograms\enums.xml -enum class VulkanVersion { - kVulkanVersionUnknown = 0, - kVulkanVersion_1_0_0 = 1, - kVulkanVersion_1_1_0 = 2, - kMaxValue = kVulkanVersion_1_1_0, -}; - -inline VulkanVersion ConvertToHistogramVulkanVersion(uint32_t vulkan_version) { - switch (vulkan_version) { - case 0: - return VulkanVersion::kVulkanVersionUnknown; - case VK_MAKE_VERSION(1, 0, 0): - return VulkanVersion::kVulkanVersion_1_0_0; - case VK_MAKE_VERSION(1, 1, 0): - return VulkanVersion::kVulkanVersion_1_1_0; - default: - NOTREACHED(); - return VulkanVersion::kVulkanVersionUnknown; - } -} - } // namespace #if BUILDFLAG(GOOGLE_CHROME_BRANDING) && defined(OFFICIAL_BUILD) diff --git a/chromium/gpu/config/gpu_info_unittest.cc b/chromium/gpu/config/gpu_info_unittest.cc index e30e5b29719..f849760247a 100644 --- a/chromium/gpu/config/gpu_info_unittest.cc +++ b/chromium/gpu/config/gpu_info_unittest.cc @@ -27,6 +27,9 @@ class TestGPUInfoEnumerator : public gpu::GPUInfo::Enumerator { void AddBool(const char* name, bool value) override {} + void AddBinary(const char* name, + const base::span<const uint8_t>& blob) override {} + void AddTimeDeltaInSecondsF(const char* name, const base::TimeDelta& value) override {} diff --git a/chromium/gpu/config/gpu_lists_version.h b/chromium/gpu/config/gpu_lists_version.h index 9dff8f2503f..8ee721df75e 100644 --- a/chromium/gpu/config/gpu_lists_version.h +++ b/chromium/gpu/config/gpu_lists_version.h @@ -3,6 +3,6 @@ #ifndef GPU_CONFIG_GPU_LISTS_VERSION_H_ #define GPU_CONFIG_GPU_LISTS_VERSION_H_ -#define GPU_LISTS_VERSION "a92c93df20fcf33ca6a37962134389f0b85ec9ab" +#define GPU_LISTS_VERSION "d5190fedde334f4c1c3e3851e62966304d236bae" #endif // GPU_CONFIG_GPU_LISTS_VERSION_H_ diff --git a/chromium/gpu/config/gpu_preferences.h b/chromium/gpu/config/gpu_preferences.h index 656b72c0c13..b3bfe5256b7 100644 --- a/chromium/gpu/config/gpu_preferences.h +++ b/chromium/gpu/config/gpu_preferences.h @@ -37,6 +37,14 @@ enum class VulkanImplementationName : uint32_t { kLast = kSwiftshader, }; +enum class GrContextType : uint32_t { + kGL = 0, + kVulkan = 1, + kMetal = 2, + kDawn = 3, + kLast = kDawn, +}; + // NOTE: if you modify this structure then you must also modify the // following two files to keep them in sync: // src/gpu/ipc/common/gpu_preferences.mojom @@ -178,9 +186,6 @@ struct GPU_EXPORT GpuPreferences { // =================================== // Settings from //gpu/config/gpu_switches.h - // Disables workarounds for various GPU driver bugs. - bool disable_gpu_driver_bug_workarounds = false; - // Ignores GPU blacklist. bool ignore_gpu_blacklist = false; @@ -197,6 +202,9 @@ struct GPU_EXPORT GpuPreferences { // =================================== // Settings from //gpu/command_buffer/service/gpu_switches.h + // The type of the GrContext. + GrContextType gr_context_type = GrContextType::kGL; + // Use Vulkan for rasterization and display compositing. VulkanImplementationName use_vulkan = VulkanImplementationName::kNone; @@ -222,6 +230,9 @@ struct GPU_EXPORT GpuPreferences { // Enable the WebGPU command buffer. bool enable_webgpu = false; + // Enable measuring blocked time on GPU Main thread + bool enable_gpu_blocked_time_metric = false; + #if defined(USE_OZONE) // Determines message pump type for the GPU thread. base::MessagePumpType message_pump_type = base::MessagePumpType::DEFAULT; diff --git a/chromium/gpu/config/gpu_preferences_unittest.cc b/chromium/gpu/config/gpu_preferences_unittest.cc index 35fc0b90f8e..c24a7205d5b 100644 --- a/chromium/gpu/config/gpu_preferences_unittest.cc +++ b/chromium/gpu/config/gpu_preferences_unittest.cc @@ -62,17 +62,18 @@ void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) { right.disable_biplanar_gpu_memory_buffers_for_video_frames); EXPECT_EQ(left.texture_target_exception_list, right.texture_target_exception_list); - EXPECT_EQ(left.disable_gpu_driver_bug_workarounds, - right.disable_gpu_driver_bug_workarounds); EXPECT_EQ(left.ignore_gpu_blacklist, right.ignore_gpu_blacklist); EXPECT_EQ(left.enable_oop_rasterization, right.enable_oop_rasterization); EXPECT_EQ(left.disable_oop_rasterization, right.disable_oop_rasterization); EXPECT_EQ(left.watchdog_starts_backgrounded, right.watchdog_starts_backgrounded); + EXPECT_EQ(left.gr_context_type, right.gr_context_type); EXPECT_EQ(left.use_vulkan, right.use_vulkan); EXPECT_EQ(left.enable_gpu_benchmarking_extension, right.enable_gpu_benchmarking_extension); EXPECT_EQ(left.enable_webgpu, right.enable_webgpu); + EXPECT_EQ(left.enable_gpu_blocked_time_metric, + right.enable_gpu_blocked_time_metric); #if defined(USE_OZONE) EXPECT_EQ(left.message_pump_type, right.message_pump_type); #endif @@ -149,15 +150,18 @@ TEST(GpuPreferencesTest, EncodeDecode) { GPU_PREFERENCES_FIELD(use_passthrough_cmd_decoder, true) GPU_PREFERENCES_FIELD(disable_biplanar_gpu_memory_buffers_for_video_frames, true) - GPU_PREFERENCES_FIELD(disable_gpu_driver_bug_workarounds, true) GPU_PREFERENCES_FIELD(ignore_gpu_blacklist, true) GPU_PREFERENCES_FIELD(enable_oop_rasterization, true) GPU_PREFERENCES_FIELD(disable_oop_rasterization, true) GPU_PREFERENCES_FIELD(watchdog_starts_backgrounded, true) + GPU_PREFERENCES_FIELD_ENUM(gr_context_type, + GrContextType::kVulkan, + mojom::GrContextType::kVulkan) GPU_PREFERENCES_FIELD_ENUM(use_vulkan, VulkanImplementationName::kNative, mojom::VulkanImplementationName::kNative) GPU_PREFERENCES_FIELD(enable_gpu_benchmarking_extension, true) GPU_PREFERENCES_FIELD(enable_webgpu, true) + GPU_PREFERENCES_FIELD(enable_gpu_blocked_time_metric, true) #if defined(USE_OZONE) GPU_PREFERENCES_FIELD_ENUM(message_pump_type, base::MessagePumpType::UI, base::MessagePumpType::UI) diff --git a/chromium/gpu/config/gpu_switches.cc b/chromium/gpu/config/gpu_switches.cc index 7109b59058e..a66259c4727 100644 --- a/chromium/gpu/config/gpu_switches.cc +++ b/chromium/gpu/config/gpu_switches.cc @@ -6,10 +6,6 @@ namespace switches { -// Disable workarounds for various GPU driver bugs. -const char kDisableGpuDriverBugWorkarounds[] = - "disable-gpu-driver-bug-workarounds"; - // Disable GPU rasterization, i.e. rasterize on the CPU only. // Overrides the kEnableGpuRasterization and kForceGpuRasterization flags. const char kDisableGpuRasterization[] = "disable-gpu-rasterization"; @@ -56,4 +52,7 @@ const char kUseHighGPUThreadPriorityForPerfTests[] = const char kNoDelayForDX12VulkanInfoCollection[] = "no-delay-for-dx12-vulkan-info-collection"; +// Enables measures of how long GPU Main Thread was blocked between SwapBuffers +const char kEnableGpuBlockedTime[] = "enable-gpu-blocked-time"; + } // namespace switches diff --git a/chromium/gpu/config/gpu_switches.h b/chromium/gpu/config/gpu_switches.h index dcaeef62a29..3a0d3ae36a3 100644 --- a/chromium/gpu/config/gpu_switches.h +++ b/chromium/gpu/config/gpu_switches.h @@ -9,7 +9,6 @@ namespace switches { -GPU_EXPORT extern const char kDisableGpuDriverBugWorkarounds[]; GPU_EXPORT extern const char kDisableGpuRasterization[]; GPU_EXPORT extern const char kEnableGpuRasterization[]; GPU_EXPORT extern const char kGpuBlacklistTestGroup[]; @@ -21,6 +20,7 @@ GPU_EXPORT extern const char kDisableGpuProcessForDX12VulkanInfoCollection[]; GPU_EXPORT extern const char kEnableUnsafeWebGPU[]; GPU_EXPORT extern const char kUseHighGPUThreadPriorityForPerfTests[]; GPU_EXPORT extern const char kNoDelayForDX12VulkanInfoCollection[]; +GPU_EXPORT extern const char kEnableGpuBlockedTime[]; } // namespace switches diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc index 235874e7f3b..05f01258f29 100644 --- a/chromium/gpu/config/gpu_util.cc +++ b/chromium/gpu/config/gpu_util.cc @@ -25,6 +25,7 @@ #include "gpu/config/gpu_preferences.h" #include "gpu/config/gpu_switches.h" #include "gpu/vulkan/buildflags.h" +#include "third_party/vulkan/include/vulkan/vulkan.h" #include "ui/gfx/extension_set.h" #include "ui/gl/buildflags.h" #include "ui/gl/gl_switches.h" @@ -236,15 +237,14 @@ GpuFeatureStatus GetAcceleratedVideoDecodeFeatureStatus( return kGpuFeatureStatusEnabled; } -GpuFeatureStatus GetGpuCompositingFeatureStatus( - const std::set<int>& blacklisted_features, - bool use_swift_shader) { +GpuFeatureStatus GetGLFeatureStatus(const std::set<int>& blacklisted_features, + bool use_swift_shader) { if (use_swift_shader) { // This is for testing only. Chrome should exercise the GPU accelerated // path on top of SwiftShader driver. return kGpuFeatureStatusEnabled; } - if (blacklisted_features.count(GPU_FEATURE_TYPE_GPU_COMPOSITING)) + if (blacklisted_features.count(GPU_FEATURE_TYPE_ACCELERATED_GL)) return kGpuFeatureStatusBlacklisted; return kGpuFeatureStatusEnabled; } @@ -307,8 +307,6 @@ GpuFeatureInfo ComputeGpuFeatureInfoWithHardwareAccelerationDisabled() { GpuFeatureInfo gpu_feature_info; gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS] = kGpuFeatureStatusSoftware; - gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_COMPOSITING] = - kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] = kGpuFeatureStatusSoftware; gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH3D] = @@ -329,6 +327,8 @@ GpuFeatureInfo ComputeGpuFeatureInfoWithHardwareAccelerationDisabled() { kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] = kGpuFeatureStatusDisabled; + gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_GL] = + kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_METAL] = kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_VULKAN] = @@ -345,8 +345,6 @@ GpuFeatureInfo ComputeGpuFeatureInfoWithNoGpu() { GpuFeatureInfo gpu_feature_info; gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS] = kGpuFeatureStatusSoftware; - gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_COMPOSITING] = - kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] = kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH3D] = @@ -367,6 +365,8 @@ GpuFeatureInfo ComputeGpuFeatureInfoWithNoGpu() { kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] = kGpuFeatureStatusDisabled; + gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_GL] = + kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_METAL] = kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_VULKAN] = @@ -383,8 +383,6 @@ GpuFeatureInfo ComputeGpuFeatureInfoForSwiftShader() { GpuFeatureInfo gpu_feature_info; gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS] = kGpuFeatureStatusSoftware; - gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_COMPOSITING] = - kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] = kGpuFeatureStatusSoftware; gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH3D] = @@ -405,6 +403,8 @@ GpuFeatureInfo ComputeGpuFeatureInfoForSwiftShader() { kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] = kGpuFeatureStatusDisabled; + gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_GL] = + kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_METAL] = kGpuFeatureStatusDisabled; gpu_feature_info.status_values[GPU_FEATURE_TYPE_VULKAN] = @@ -477,8 +477,6 @@ GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info, gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE] = GetAcceleratedVideoDecodeFeatureStatus(blacklisted_features, use_swift_shader); - gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_COMPOSITING] = - GetGpuCompositingFeatureStatus(blacklisted_features, use_swift_shader); gpu_feature_info.status_values[GPU_FEATURE_TYPE_PROTECTED_VIDEO_DECODE] = GetProtectedVideoDecodeFeatureStatus(blacklisted_features, gpu_info, use_swift_shader); @@ -488,6 +486,8 @@ GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info, gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] = GetAndroidSurfaceControlFeatureStatus(blacklisted_features, gpu_preferences); + gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_GL] = + GetGLFeatureStatus(blacklisted_features, use_swift_shader); gpu_feature_info.status_values[GPU_FEATURE_TYPE_METAL] = GetMetalFeatureStatus(blacklisted_features, gpu_preferences); gpu_feature_info.status_values[GPU_FEATURE_TYPE_VULKAN] = @@ -512,7 +512,7 @@ GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info, std::set<int> enabled_driver_bug_workarounds; std::vector<std::string> driver_bug_disabled_extensions; - if (!gpu_preferences.disable_gpu_driver_bug_workarounds) { + if (!command_line->HasSwitch(switches::kDisableGpuDriverBugWorkarounds)) { std::unique_ptr<gpu::GpuDriverBugList> list(GpuDriverBugList::Create()); unsigned target_test_group = 0u; if (command_line->HasSwitch(switches::kGpuDriverBugListTestGroup)) { @@ -635,7 +635,7 @@ bool InitializeGLThreadSafe(base::CommandLine* command_line, } if (gl::GetGLImplementation() == gl::kGLImplementationNone) { // Some tests initialize bindings by themselves. - if (!gl::init::InitializeGLNoExtensionsOneOff()) { + if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) { VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed"; return false; } @@ -662,13 +662,14 @@ bool EnableSwiftShaderIfNeeded(base::CommandLine* command_line, bool disable_software_rasterizer, bool blacklist_needs_more_info) { #if BUILDFLAG(ENABLE_SWIFTSHADER) - if (disable_software_rasterizer) + if (disable_software_rasterizer || blacklist_needs_more_info) return false; // Don't overwrite user preference. if (command_line->HasSwitch(switches::kUseGL)) return false; - if (!blacklist_needs_more_info && - gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] != + if (gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] != + kGpuFeatureStatusEnabled || + gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_GL] != kGpuFeatureStatusEnabled) { command_line->AppendSwitchASCII( switches::kUseGL, gl::kGLImplementationSwiftShaderForWebGLName); @@ -810,4 +811,18 @@ std::string VulkanVersionToString(uint32_t vulkan_version) { } #endif // OS_WIN +VulkanVersion ConvertToHistogramVulkanVersion(uint32_t vulkan_version) { + switch (vulkan_version) { + case 0: + return VulkanVersion::kVulkanVersionUnknown; + case VK_MAKE_VERSION(1, 0, 0): + return VulkanVersion::kVulkanVersion_1_0_0; + case VK_MAKE_VERSION(1, 1, 0): + return VulkanVersion::kVulkanVersion_1_1_0; + default: + NOTREACHED(); + return VulkanVersion::kVulkanVersionUnknown; + } +} + } // namespace gpu diff --git a/chromium/gpu/config/gpu_util.h b/chromium/gpu/config/gpu_util.h index d1cfed531c9..2e3c85026ec 100644 --- a/chromium/gpu/config/gpu_util.h +++ b/chromium/gpu/config/gpu_util.h @@ -85,6 +85,19 @@ GPU_EXPORT std::string D3DFeatureLevelToString(uint32_t d3d_feature_level); GPU_EXPORT std::string VulkanVersionToString(uint32_t vulkan_version); #endif // OS_WIN +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. +// This should match enum VulkanVersion in \tools\metrics\histograms\enums.xml +enum class VulkanVersion { + kVulkanVersionUnknown = 0, + kVulkanVersion_1_0_0 = 1, + kVulkanVersion_1_1_0 = 2, + kMaxValue = kVulkanVersion_1_1_0, +}; + +GPU_EXPORT VulkanVersion +ConvertToHistogramVulkanVersion(uint32_t vulkan_version); + } // namespace gpu #endif // GPU_CONFIG_GPU_UTIL_H_ diff --git a/chromium/gpu/config/gpu_workaround_list.txt b/chromium/gpu/config/gpu_workaround_list.txt index 5d5beda9b0e..70751969b07 100644 --- a/chromium/gpu/config/gpu_workaround_list.txt +++ b/chromium/gpu/config/gpu_workaround_list.txt @@ -73,6 +73,7 @@ init_two_cube_map_levels_before_copyteximage init_vertex_attributes max_copy_texture_chromium_size_1048576 max_copy_texture_chromium_size_262144 +max_msaa_sample_count_2 max_msaa_sample_count_4 max_texture_size_limit_4096 msaa_is_slow @@ -103,7 +104,6 @@ unpack_image_height_workaround_with_unpack_buffer unpack_overlapping_rows_separately_unpack_buffer use_client_side_arrays_for_stream_buffers use_es2_for_oopr -use_framebuffer_cmaa use_gpu_driver_workaround_for_testing use_intermediary_for_copy_texture_image use_non_zero_size_for_client_side_stream_buffers @@ -116,3 +116,5 @@ use_eqaa_storage_samples_2 max_3d_array_texture_size_1024 disable_half_float_for_gmb prefer_draw_to_copy +remove_dynamic_indexing_of_swizzled_vector +disable_dual_source_blending_support diff --git a/chromium/gpu/config/process_json.py b/chromium/gpu/config/process_json.py index d8852c7db19..1c527687812 100755 --- a/chromium/gpu/config/process_json.py +++ b/chromium/gpu/config/process_json.py @@ -385,6 +385,7 @@ def write_conditions(entry_id, is_exception, exception_id, entry, machine_model_name = None machine_model_version = None exception_count = 0 + subpixel_font_rendering = None # process the entry for key in entry: if key == 'id': @@ -464,6 +465,8 @@ def write_conditions(entry_id, is_exception, exception_id, entry, machine_model_name = entry[key] elif key == 'machine_model_version': machine_model_version = entry[key] + elif key == 'subpixel_font_rendering': + subpixel_font_rendering = entry[key] elif key == 'exceptions': assert not is_exception assert exception_count == 0 @@ -503,12 +506,14 @@ def write_conditions(entry_id, is_exception, exception_id, entry, # group a bunch of less used conditions if (gl_version != None or pixel_shader_version != None or in_process_gpu or gl_reset_notification_strategy != None or direct_rendering_version != None - or gpu_count != None or hardware_overlay != None or test_group != 0): + or gpu_count != None or hardware_overlay != None or test_group != 0 or + subpixel_font_rendering != None): write_entry_more_data(entry_id, is_exception, exception_id, gl_type, gl_version, pixel_shader_version, in_process_gpu, gl_reset_notification_strategy, direct_rendering_version, gpu_count, hardware_overlay, - test_group, data_file, data_helper_file) + test_group, subpixel_font_rendering, + data_file, data_helper_file) else: data_file.write('nullptr, // more conditions\n') @@ -555,7 +560,8 @@ def write_entry_more_data(entry_id, is_exception, exception_id, gl_type, gl_version, pixel_shader_version, in_process_gpu, gl_reset_notification_strategy, direct_rendering_version, gpu_count, hardware_overlay, - test_group, data_file, data_helper_file): + test_group, subpixel_font_rendering, data_file, + data_helper_file): # write more data # Generate a unique name for jumbo build which concatenates multiple @@ -581,6 +587,8 @@ def write_entry_more_data(entry_id, is_exception, exception_id, gl_type, write_version(gpu_count, 'gpu_count', data_helper_file) write_supported_or_not(hardware_overlay, 'hardware_overlay', data_helper_file) write_integer_value(test_group, 'test_group', data_helper_file) + write_supported_or_not(subpixel_font_rendering, 'subpixel_font_rendering', + data_helper_file) data_helper_file.write('};\n\n') # reference more data in entry data_file.write('&%s, // more data\n' % var_name) diff --git a/chromium/gpu/config/software_rendering_list.json b/chromium/gpu/config/software_rendering_list.json index ea1294e4ce7..545aeb58cb2 100644 --- a/chromium/gpu/config/software_rendering_list.json +++ b/chromium/gpu/config/software_rendering_list.json @@ -1342,8 +1342,8 @@ }, { "id": 137, - "description": "GPU rasterization on CrOS is blacklisted on anything but Intel, Imagination, or AMD GPUs for now.", - "cr_bugs": [684094], + "description": "GPU rasterization on CrOS is blacklisted on anything but Intel, Mali T8/G, Imagination, Freedreno, or AMD GPU/drivers for now.", + "cr_bugs": [684094, 996858, 1020500], "os": { "type": "chromeos" }, @@ -1352,6 +1352,11 @@ ], "exceptions": [ { "vendor_id": "0x8086" }, + { "gl_vendor": "freedreno" }, + { "gl_renderer": "Mali-T8.*", + "subpixel_font_rendering": "unsupported"}, + { "gl_renderer": "Mali-G.*", + "subpixel_font_rendering": "unsupported"}, { "gl_renderer": "PowerVR.*" }, { "vendor_id": "0x1002" } ] @@ -1582,6 +1587,7 @@ "features": [ "all", {"exceptions": [ + "accelerated_gl", "accelerated_webgl" ]} ] diff --git a/chromium/gpu/config/vulkan_info.cc b/chromium/gpu/config/vulkan_info.cc new file mode 100644 index 00000000000..36e8207571d --- /dev/null +++ b/chromium/gpu/config/vulkan_info.cc @@ -0,0 +1,78 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/config/vulkan_info.h" + +#include "gpu/ipc/common/vulkan_info.mojom.h" +#include "gpu/ipc/common/vulkan_info_mojom_traits.h" + +namespace gpu { + +VulkanPhysicalDeviceInfo::VulkanPhysicalDeviceInfo() = default; +VulkanPhysicalDeviceInfo::VulkanPhysicalDeviceInfo( + const VulkanPhysicalDeviceInfo& other) = default; +VulkanPhysicalDeviceInfo::~VulkanPhysicalDeviceInfo() = default; +VulkanPhysicalDeviceInfo& VulkanPhysicalDeviceInfo::operator=( + const VulkanPhysicalDeviceInfo& info) = default; + +VulkanInfo::VulkanInfo() = default; +VulkanInfo::~VulkanInfo() = default; + +VulkanInfo::VulkanInfo(const VulkanInfo& other) { + *this = other; +} + +VulkanInfo& VulkanInfo::operator=(const VulkanInfo& other) { + api_version = other.api_version; + used_api_version = other.used_api_version; + instance_extensions = other.instance_extensions; + instance_layers = other.instance_layers; + physical_devices = other.physical_devices; + SetEnabledInstanceExtensions(other.enabled_instance_extensions); + return *this; +} + +std::vector<uint8_t> VulkanInfo::Serialize() const { + return gpu::mojom::VulkanInfo::Serialize(this); +} + +void VulkanInfo::SetEnabledInstanceExtensions( + const std::vector<const char*>& extensions) { + enabled_instance_extensions.clear(); + for (const auto* const extension : extensions) { + bool found = false; + for (const auto& instance_extension : instance_extensions) { + if (strcmp(extension, instance_extension.extensionName) == 0) { + enabled_instance_extensions.push_back(instance_extension.extensionName); + found = true; + break; + } + } + if (!found) { + LOG(ERROR) << "The enabled extension '" << extension + << "' is not in instance_extensions!"; + } + } +} + +void VulkanInfo::SetEnabledInstanceExtensions( + const std::vector<base::StringPiece>& extensions) { + enabled_instance_extensions.clear(); + for (const auto& extension : extensions) { + bool found = false; + for (const auto& instance_extension : instance_extensions) { + if (extension == instance_extension.extensionName) { + enabled_instance_extensions.push_back(instance_extension.extensionName); + found = true; + break; + } + } + if (!found) { + LOG(ERROR) << "The enabled extension '" << extension + << "' is not in instance_extensions!"; + } + } +} + +} // namespace gpu diff --git a/chromium/gpu/config/vulkan_info.h b/chromium/gpu/config/vulkan_info.h new file mode 100644 index 00000000000..57b951ef349 --- /dev/null +++ b/chromium/gpu/config/vulkan_info.h @@ -0,0 +1,62 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_CONFIG_VULKAN_INFO_H_ +#define GPU_CONFIG_VULKAN_INFO_H_ + +#include <vulkan/vulkan.h> +#include <vector> + +#include "base/macros.h" +#include "base/strings/string_piece.h" +#include "gpu/gpu_export.h" +#include "ui/gfx/extension_set.h" + +namespace gpu { + +class GPU_EXPORT VulkanPhysicalDeviceInfo { + public: + VulkanPhysicalDeviceInfo(); + VulkanPhysicalDeviceInfo(const VulkanPhysicalDeviceInfo& other); + ~VulkanPhysicalDeviceInfo(); + VulkanPhysicalDeviceInfo& operator=(const VulkanPhysicalDeviceInfo& other); + + // This is a local variable in GPU process, it will not be sent via IPC. + VkPhysicalDevice device = VK_NULL_HANDLE; + + VkPhysicalDeviceProperties properties = {}; + std::vector<VkLayerProperties> layers; + + VkPhysicalDeviceFeatures features = {}; + // Extended physical device features: + bool feature_sampler_ycbcr_conversion = false; + bool feature_protected_memory = false; + + std::vector<VkQueueFamilyProperties> queue_families; +}; + +class GPU_EXPORT VulkanInfo { + public: + VulkanInfo(); + VulkanInfo(const VulkanInfo& other); + ~VulkanInfo(); + VulkanInfo& operator=(const VulkanInfo& other); + + std::vector<uint8_t> Serialize() const; + + void SetEnabledInstanceExtensions(const std::vector<const char*>& extensions); + void SetEnabledInstanceExtensions( + const std::vector<base::StringPiece>& extensions); + + uint32_t api_version = VK_MAKE_VERSION(1, 0, 0); + uint32_t used_api_version = VK_MAKE_VERSION(1, 0, 0); + std::vector<VkExtensionProperties> instance_extensions; + std::vector<const char*> enabled_instance_extensions; + std::vector<VkLayerProperties> instance_layers; + std::vector<VulkanPhysicalDeviceInfo> physical_devices; +}; + +} // namespace gpu + +#endif // GPU_CONFIG_VULKAN_INFO_H_ diff --git a/chromium/gpu/gles2_conform_support/egl/thread_state.cc b/chromium/gpu/gles2_conform_support/egl/thread_state.cc index 4312979b764..bdacb4887a6 100644 --- a/chromium/gpu/gles2_conform_support/egl/thread_state.cc +++ b/chromium/gpu/gles2_conform_support/egl/thread_state.cc @@ -13,7 +13,6 @@ #include "base/strings/utf_string_conversions.h" #include "gpu/command_buffer/client/gles2_lib.h" #include "gpu/command_buffer/common/thread_local.h" -#include "gpu/command_buffer/service/gpu_switches.h" #include "gpu/config/gpu_info_collector.h" #include "gpu/config/gpu_preferences.h" #include "gpu/config/gpu_util.h" @@ -23,6 +22,7 @@ #include "gpu/gles2_conform_support/egl/test_support.h" #include "ui/gl/gl_context.h" #include "ui/gl/gl_surface.h" +#include "ui/gl/gl_switches.h" #include "ui/gl/init/gl_factory.h" // Thread local key for ThreadState instance. Accessed when holding g_egl_lock @@ -79,7 +79,7 @@ egl::ThreadState* ThreadState::Get() { // Need to call both Init and InitFromArgv, since Windows does not use // argc, argv in CommandLine::Init(argc, argv). command_line->InitFromArgv(argv); - gl::init::InitializeGLNoExtensionsOneOff(); + gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true); gpu::GpuFeatureInfo gpu_feature_info; if (!command_line->HasSwitch(switches::kDisableGpuDriverBugWorkarounds)) { gpu::GPUInfo gpu_info; diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc index 1ea1ee14598..2ba4395ef04 100644 --- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc +++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc @@ -38,6 +38,7 @@ #include "ui/gfx/geometry/size.h" #include "ui/gfx/gpu_fence.h" #include "ui/gl/gl_bindings.h" +#include "ui/gl/gpu_preference.h" namespace gpu { @@ -190,9 +191,10 @@ void CommandBufferProxyImpl::OnConsoleMessage( message.id); } -void CommandBufferProxyImpl::OnGpuSwitched() { +void CommandBufferProxyImpl::OnGpuSwitched( + gl::GpuPreference active_gpu_heuristic) { if (gpu_control_client_) - gpu_control_client_->OnGpuSwitched(); + gpu_control_client_->OnGpuSwitched(active_gpu_heuristic); } void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) { diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h index 67cecbdf8ee..b2be647ee62 100644 --- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h +++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h @@ -178,7 +178,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer, void OnDestroyed(gpu::error::ContextLostReason reason, gpu::error::Error error); void OnConsoleMessage(const GPUCommandBufferConsoleMessage& message); - void OnGpuSwitched(); + void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic); void OnSignalAck(uint32_t id, const CommandBuffer::State& state); void OnSwapBuffersCompleted(const SwapBuffersCompleteParams& params); void OnBufferPresented(uint64_t swap_id, diff --git a/chromium/gpu/ipc/client/gpu_channel_host.cc b/chromium/gpu/ipc/client/gpu_channel_host.cc index 4b8c1374bb0..505affa44fa 100644 --- a/chromium/gpu/ipc/client/gpu_channel_host.cc +++ b/chromium/gpu/ipc/client/gpu_channel_host.cc @@ -21,6 +21,7 @@ #include "gpu/ipc/common/gpu_watchdog_timeout.h" #include "ipc/ipc_channel_mojo.h" #include "ipc/ipc_sync_message.h" +#include "mojo/public/cpp/bindings/lib/message_quota_checker.h" #include "url/gurl.h" using base::AutoLock; @@ -274,11 +275,13 @@ operator=(OrderingBarrierInfo&&) = default; GpuChannelHost::Listener::Listener( mojo::ScopedMessagePipeHandle handle, scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) - : channel_(IPC::ChannelMojo::Create(std::move(handle), - IPC::Channel::MODE_CLIENT, - this, - io_task_runner, - base::ThreadTaskRunnerHandle::Get())) { + : channel_(IPC::ChannelMojo::Create( + std::move(handle), + IPC::Channel::MODE_CLIENT, + this, + io_task_runner, + base::ThreadTaskRunnerHandle::Get(), + mojo::internal::MessageQuotaChecker::MaybeCreate())) { DCHECK(channel_); DCHECK(io_task_runner->BelongsToCurrentThread()); bool result = channel_->Connect(); diff --git a/chromium/gpu/ipc/client/gpu_channel_host.h b/chromium/gpu/ipc/client/gpu_channel_host.h index c6e46a23fe3..1af60880511 100644 --- a/chromium/gpu/ipc/client/gpu_channel_host.h +++ b/chromium/gpu/ipc/client/gpu_channel_host.h @@ -100,7 +100,7 @@ class GPU_EXPORT GpuChannelHost // Ensure that the all deferred messages prior upto |deferred_message_id| have // been flushed. Pass UINT32_MAX to force all pending deferred messages to be // flushed. - void EnsureFlush(uint32_t deferred_message_id); + virtual void EnsureFlush(uint32_t deferred_message_id); // Verify that the all deferred messages prior upto |deferred_message_id| have // reached the service. Pass UINT32_MAX to force all pending deferred messages diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc index 76029007347..630cd3ddb45 100644 --- a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc +++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc @@ -35,6 +35,7 @@ bool IsSupportedImageSize( image_size = image_data->coded_size.value(); else image_size = image_data->image_size; + DCHECK(!image_size.IsEmpty()); return image_size.width() >= supported_profile.min_encoded_dimensions.width() && diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h index a47df5d6957..1fd6f911f23 100644 --- a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h +++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.h @@ -9,6 +9,7 @@ #include "base/synchronization/lock.h" #include "base/thread_annotations.h" #include "gpu/command_buffer/client/image_decode_accelerator_interface.h" +#include "gpu/gpu_export.h" namespace gpu { class GpuChannelHost; @@ -45,7 +46,8 @@ class GpuChannelHost; // Objects of this class are thread-safe. // // TODO(andrescj): actually put the decoder's capabilities in GpuInfo. -class ImageDecodeAcceleratorProxy : public ImageDecodeAcceleratorInterface { +class GPU_EXPORT ImageDecodeAcceleratorProxy + : public ImageDecodeAcceleratorInterface { public: ImageDecodeAcceleratorProxy(GpuChannelHost* host, int32_t route_id); ~ImageDecodeAcceleratorProxy() override; @@ -53,7 +55,6 @@ class ImageDecodeAcceleratorProxy : public ImageDecodeAcceleratorInterface { // Determines if |image_metadata| corresponds to an image that can be decoded // using hardware decode acceleration. The ScheduleImageDecode() method should // only be called for images for which IsImageSupported() returns true. - // Otherwise, the client faces a GPU channel teardown if the decode fails. bool IsImageSupported( const cc::ImageHeaderMetadata* image_metadata) const override; diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc b/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc new file mode 100644 index 00000000000..e6bb414290a --- /dev/null +++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy_unittest.cc @@ -0,0 +1,152 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <memory> +#include <utility> +#include <vector> + +#include "base/stl_util.h" +#include "base/test/task_environment.h" +#include "gpu/ipc/client/gpu_channel_host.h" +#include "gpu/ipc/client/image_decode_accelerator_proxy.h" +#include "gpu/ipc/common/command_buffer_id.h" +#include "gpu/ipc/common/gpu_messages.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "ui/gfx/color_space.h" + +using ::testing::DeleteArg; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::StrictMock; + +namespace gpu { + +namespace { +constexpr int kChannelId = 5; +constexpr int32_t kRasterCmdBufferRouteId = 3; +constexpr gfx::Size kOutputSize(2, 2); + +MATCHER_P(IpcMessageEqualTo, expected, "") { + // Get params from actual IPC message. + GpuChannelMsg_ScheduleImageDecode::Param actual_param_tuple; + if (!GpuChannelMsg_ScheduleImageDecode::Read(arg, &actual_param_tuple)) + return false; + + GpuChannelMsg_ScheduleImageDecode_Params params = + std::get<0>(actual_param_tuple); + const uint64_t release_count = std::get<1>(actual_param_tuple); + + // Get params from expected IPC Message. + GpuChannelMsg_ScheduleImageDecode::Param expected_param_tuple; + if (!GpuChannelMsg_ScheduleImageDecode::Read(expected, &expected_param_tuple)) + return false; + + GpuChannelMsg_ScheduleImageDecode_Params expected_params = + std::get<0>(expected_param_tuple); + const uint64_t expected_release_count = std::get<1>(expected_param_tuple); + + // Compare all relevant fields. + return arg->routing_id() == expected->routing_id() && + release_count == expected_release_count && + params.encoded_data == expected_params.encoded_data && + params.output_size == expected_params.output_size && + params.raster_decoder_route_id == + expected_params.raster_decoder_route_id && + params.transfer_cache_entry_id == + expected_params.transfer_cache_entry_id && + params.discardable_handle_shm_id == + expected_params.discardable_handle_shm_id && + params.discardable_handle_shm_offset == + expected_params.discardable_handle_shm_offset && + params.discardable_handle_release_count == + expected_params.discardable_handle_release_count && + params.target_color_space == expected_params.target_color_space && + params.needs_mips == expected_params.needs_mips; +} + +} // namespace + +class MockGpuChannelHost : public GpuChannelHost { + public: + MockGpuChannelHost() + : GpuChannelHost(kChannelId, + GPUInfo(), + GpuFeatureInfo(), + mojo::ScopedMessagePipeHandle(mojo::MessagePipeHandle( + mojo::kInvalidHandleValue))) {} + + MOCK_METHOD1(Send, bool(IPC::Message*)); + + protected: + ~MockGpuChannelHost() override {} +}; + +class ImageDecodeAcceleratorProxyTest : public ::testing::Test { + public: + ImageDecodeAcceleratorProxyTest() + : gpu_channel_host_( + base::MakeRefCounted<StrictMock<MockGpuChannelHost>>()), + proxy_(gpu_channel_host_.get(), + (int32_t)GpuChannelReservedRoutes::kImageDecodeAccelerator) {} + + ~ImageDecodeAcceleratorProxyTest() override = default; + + protected: + base::test::SingleThreadTaskEnvironment task_environment_; + scoped_refptr<StrictMock<MockGpuChannelHost>> gpu_channel_host_; + ImageDecodeAcceleratorProxy proxy_; +}; + +TEST_F(ImageDecodeAcceleratorProxyTest, ScheduleImageDecodeSendsMessage) { + const uint8_t image[4] = {1, 2, 3, 4}; + base::span<const uint8_t> encoded_data = + base::span<const uint8_t>(image, base::size(image)); + + const gfx::ColorSpace color_space = gfx::ColorSpace::CreateSRGB(); + + GpuChannelMsg_ScheduleImageDecode_Params expected_params; + expected_params.encoded_data = + std::vector<uint8_t>(encoded_data.cbegin(), encoded_data.cend()); + expected_params.output_size = kOutputSize; + expected_params.raster_decoder_route_id = kRasterCmdBufferRouteId; + expected_params.transfer_cache_entry_id = 1u; + expected_params.discardable_handle_shm_id = 2; + expected_params.discardable_handle_shm_offset = 3u; + expected_params.discardable_handle_release_count = 4u; + expected_params.target_color_space = color_space; + expected_params.needs_mips = false; + + GpuChannelMsg_ScheduleImageDecode expected_message( + static_cast<int32_t>(GpuChannelReservedRoutes::kImageDecodeAccelerator), + std::move(expected_params), /*release_count=*/1u); + + { + EXPECT_CALL(*gpu_channel_host_, Send(IpcMessageEqualTo(&expected_message))) + .Times(1) + .WillOnce(DoAll(DeleteArg<0>(), + Return(false))); // Delete object passed to Send. + } + + SyncToken token = proxy_.ScheduleImageDecode( + encoded_data, kOutputSize, + CommandBufferIdFromChannelAndRoute(kChannelId, kRasterCmdBufferRouteId), + /*transfer_cache_entry_id=*/1u, + /*discardable_handle_shm_id=*/2, + /*discardable_handle_shm_offset=*/3u, + /*discardable_handle_release_count=*/4u, color_space, + /*needs_mips=*/false); + + task_environment_.RunUntilIdle(); + testing::Mock::VerifyAndClearExpectations(gpu_channel_host_.get()); + + EXPECT_EQ(ChannelIdFromCommandBufferId(token.command_buffer_id()), + kChannelId); + EXPECT_EQ( + RouteIdFromCommandBufferId(token.command_buffer_id()), + static_cast<int32_t>(GpuChannelReservedRoutes::kImageDecodeAccelerator)); + EXPECT_EQ(token.release_count(), 1u); +} + +} // namespace gpu diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn index 9ff12f1a402..4f885a6d4eb 100644 --- a/chromium/gpu/ipc/common/BUILD.gn +++ b/chromium/gpu/ipc/common/BUILD.gn @@ -3,6 +3,7 @@ # found in the LICENSE file. import("//build/config/ui.gni") +import("//gpu/vulkan/features.gni") import("//mojo/public/tools/bindings/mojom.gni") import("//ui/ozone/ozone.gni") @@ -199,7 +200,19 @@ component("vulkan_ycbcr_info") { configs += [ "//gpu:gpu_implementation" ] } +source_set("vulkan_types") { + sources = [ + "vulkan_types.h", + ] + public_deps = [ + "//ui/gfx", + ] + all_dependent_configs = [ "//third_party/vulkan:vulkan_config" ] + configs += [ "//gpu:gpu_implementation" ] +} + mojom("interfaces") { + generate_java = true sources = [ "capabilities.mojom", "context_result.mojom", @@ -221,9 +234,14 @@ mojom("interfaces") { "//ui/gfx/geometry/mojom", "//ui/gfx/mojom", ] + if (enable_vulkan) { + public_deps += [ ":vulkan_interface" ] + enabled_features = [ "supports_vulkan" ] + } } mojom("gpu_preferences_interface") { + generate_java = true sources = [ "gpu_preferences.mojom", ] @@ -239,6 +257,20 @@ mojom("gpu_preferences_interface") { } } +mojom("vulkan_interface") { + generate_java = true + sources = [ + "vulkan_info.mojom", + "vulkan_types.mojom", + ] + + public_deps = [ + "//mojo/public/mojom/base", + ] + + js_generate_struct_deserializers = true +} + mojom("test_interfaces") { testonly = true sources = [ @@ -249,6 +281,21 @@ mojom("test_interfaces") { ":gpu_preferences_interface", ":interfaces", ] + + if (enable_vulkan) { + public_deps += [ ":vulkan_interface" ] + } +} + +source_set("vulkan_types_mojom_traits") { + sources = [ + "vulkan_types_mojom_traits.h", + ] + + deps = [ + ":vulkan_interface_shared_cpp_sources", + ":vulkan_types", + ] } source_set("mojom_traits") { @@ -271,4 +318,7 @@ source_set("mojom_traits") { if (is_android) { sources += [ "vulkan_ycbcr_info_mojom_traits.h" ] } + if (enable_vulkan) { + deps += [ ":vulkan_types_mojom_traits" ] + } } diff --git a/chromium/gpu/ipc/common/OWNERS b/chromium/gpu/ipc/common/OWNERS index 94f052bc57f..02933e17756 100644 --- a/chromium/gpu/ipc/common/OWNERS +++ b/chromium/gpu/ipc/common/OWNERS @@ -3,6 +3,9 @@ set noparent file://ipc/SECURITY_OWNERS +per-file generate_vulkan_types.py=file://gpu/OWNERS +per-file gpu_watchdog_timeout.h=file://gpu/OWNERS + # The following lines are redundant, they're just to silence the presubmit per-file *_messages*.h=set noparent per-file *_messages*.h=file://ipc/SECURITY_OWNERS diff --git a/chromium/gpu/ipc/common/PRESUBMIT.py b/chromium/gpu/ipc/common/PRESUBMIT.py new file mode 100644 index 00000000000..d30db350f6f --- /dev/null +++ b/chromium/gpu/ipc/common/PRESUBMIT.py @@ -0,0 +1,55 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Enforces Vulkan types autogen matches script output. + +See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts +for more details on the presubmit API built into depot_tools. +""" + +import os.path + + +def CommonChecks(input_api, output_api): + generating_files = input_api.AffectedFiles( + file_filter=lambda x: os.path.basename(x.LocalPath()) in [ + 'generate_vulkan_types.py']) + generated_files = input_api.AffectedFiles( + file_filter=lambda x: os.path.basename(x.LocalPath()) in [ + 'vulkan_types.mojom', 'vulkan_types_mojom_traits.h', + 'vulkan_types_mojom_traits.cc', 'vulkan_types.typemap' + ]) + + + messages = [] + + if generated_files and not generating_files: + long_text = 'Changed files:\n' + for file in generated_files: + long_text += file.LocalPath() + '\n' + long_text += '\n' + messages.append(output_api.PresubmitError( + 'Vulkan types generated files changed but the generator ' + 'did not.', long_text=long_text)) + + with input_api.temporary_directory() as temp_dir: + commands = [] + if generating_files: + commands.append(input_api.Command(name='generate_vulkan_types', + cmd=[input_api.python_executable, + 'generate_vulkan_types.py', + '--check', + '--output-dir=' + temp_dir], + kwargs={}, + message=output_api.PresubmitError)) + if commands: + messages.extend(input_api.RunTests(commands)) + + return messages + +def CheckChangeOnUpload(input_api, output_api): + return CommonChecks(input_api, output_api) + +def CheckChangeOnCommit(input_api, output_api): + return CommonChecks(input_api, output_api) diff --git a/chromium/gpu/ipc/common/generate_vulkan_types.py b/chromium/gpu/ipc/common/generate_vulkan_types.py new file mode 100755 index 00000000000..ef27fab9b6e --- /dev/null +++ b/chromium/gpu/ipc/common/generate_vulkan_types.py @@ -0,0 +1,598 @@ +#!/usr/bin/env python +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import filecmp +import optparse +import os.path +import platform +import re +import subprocess +import sys + +_VULKAN_HEADER_FILE = "third_party/vulkan/include/vulkan/vulkan_core.h" + +_STRUCTS = [ + "VkExtensionProperties", + "VkLayerProperties", + "VkPhysicalDeviceProperties", + "VkPhysicalDeviceFeatures", + "VkQueueFamilyProperties", +] + +_SELF_LOCATION = os.path.dirname(os.path.abspath(__file__)) + +_MOJO_TYPES = set([ + "uint8", + "uint16", + "uint32", + "int8", + "int16", + "int32", + "float", + "string", +]) + +_VULKAN_BASIC_TYPE_MAP = set([ + "uint8_t", + "uint16_t", + "uint32_t", + "uint64_t", + "int8_t", + "int16_t", + "int32_t", + "int64_t", + "size_t", + "VkBool32", + "float", + "char", +]) + +# types to mojo type +_type_map = { + "uint8_t" : "uint8", + "uint16_t" : "uint16", + "uint32_t" : "uint32", + "uint64_t" : "uint64", + "int8_t" : "int8", + "int16_t" : "int16", + "int32_t" : "int32", + "int64_t" : "int64", + "size_t" : "uint64", + "VkBool32" : "bool", + "float" : "float", + "char" : "char", +} + +_structs = {} +_enums = {} +_defines = {} +_handles = set([]) +_generated_types = [] + + +def ValueNameToVALUE_NAME(name): + return re.sub( + r'(?<=[a-z])[A-Z]|(?<!^)[A-Z](?=[a-z])', r"_\g<0>", name).upper() + + +def ParseHandle(line): + if line.startswith("VK_DEFINE_HANDLE("): + name = line[len("VK_DEFINE_HANDLE("):-1] + elif line.startswith("VK_DEFINE_NON_DISPATCHABLE_HANDLE("): + name = line[len("VK_DEFINE_NON_DISPATCHABLE_HANDLE("):-1] + elif line.startswith("VK_DEFINE_DISPATCHABLE_HANDLE("): + name = line[len("VK_DEFINE_DISPATCHABLE_HANDLE("):-1] + else: + return + _handles.add(name) + + +def ParseTypedef(line): + # typedef Type1 Type1; + line = line.rstrip(';') + line = line.split() + if len(line) == 3: + typedef, t1, t2 = line + assert typedef == "typedef" + # We would like to use bool instead uint32 for VkBool32 + if t2 == "VkBool32": + return + if t1 in _type_map: + _type_map[t2] = _type_map[t1] + else: + assert t1 in _structs or t1 in _enums or t1 in _handles, \ + "Undefined type '%s'" % t1 + else: + pass + # skip typdef for function pointer + + +def ParseEnum(line, header_file): + # typedef enum kName { + # ... + # } kName; + name = line.split()[2] + + # Skip VkResult and NameBits + if name == "VkResult": + value_name_prefix = "VK" + elif name.endswith("FlagBits"): + value_name_prefix = ValueNameToVALUE_NAME(name[:-len("FlagBits")]) + elif name.endswith("FlagBitsKHR"): + value_name_prefix = ValueNameToVALUE_NAME(name[:-len("FlagBitsKHR")]) + else: + value_name_prefix = ValueNameToVALUE_NAME(name) + + values = [] + while True: + line = header_file.readline().strip() + # } kName; + if line == "} %s;" % name: + break + # VK_NAME = value, + value_name, value = line.rstrip(',').split(" = ") + if not value.isdigit(): + # Ignore VK_NAME_BEGIN_RANGE + # Ignore VK_NAME_END_RANGE + # Ignore VK_NAME_RANGE_SIZE + # Ignore VK_NAME_MAX_ENUM = 0x7FFFFFFF + continue + assert len(value_name_prefix) + 1 < len(value_name), \ + "Wrong enum value name `%s`" % value_name + mojom_value_name = value_name[len(value_name_prefix) + 1:] + values.append((value_name, value, mojom_value_name)) + assert name not in _enums, "enum '%s' has been defined." % name + _enums[name] = values + + +def ParseStruct(line, header_file): + # typedef struct kName { + # ... + # } kName; + name = line.split()[2] + + fields = [] + while True: + line = header_file.readline().strip() + # } kName; + if line == "} %s;" % name: + break + # type name; + # const type name; + # type name[L]; + line = line.rstrip(";") + field_type, field_name = line.rsplit(None, 1) + array_len = None + if '[' in field_name: + assert ']' in field_name + field_name, array_len = field_name.rstrip(']').split('[') + assert array_len.isdigit() or array_len in _defines + fields.append((field_name, field_type, array_len)) + assert name not in _structs, "struct '%s' has been defined." % name + _structs[name] = fields + + +def ParseDefine(line): + # not parse multi-line macros + if line.endswith('\\'): + return + # not parse #define NAME() ... + if '(' in line or ')' in line: + return + + define, name, value = line.split() + assert define == "#define" + assert name not in _defines, "macro '%s' has been defined." % name + _defines[name] = value + + +def ParseVulkanHeaderFile(path): + with open(path) as header_file: + while True: + line = header_file.readline() + if not line: + break + line = line.strip() + + if line.startswith("#define"): + ParseDefine(line) + elif line.startswith("typedef enum "): + ParseEnum(line, header_file) + elif line.startswith("typedef struct "): + ParseStruct(line, header_file) + elif line.startswith("typedef "): + ParseTypedef(line) + elif line.startswith("VK_DEFINE_"): + ParseHandle(line) + + +def WriteMojomEnum(name, mojom_file): + if name in _generated_types: + return + _generated_types.append(name) + + values = _enums[name] + mojom_file.write("\n") + mojom_file.write("enum %s {\n" % name) + for _, value, mojom_value_name in values: + mojom_file.write(" %s = %s,\n" % (mojom_value_name, value)) + mojom_file.write(" INVALID_VALUE = -1,\n") + mojom_file.write("};\n") + + +def WriteMojomStruct(name, mojom_file): + if name in _generated_types: + return + _generated_types.append(name) + + fields = _structs[name] + deps = [] + for field_name, field_type, array_len in fields: + if field_type in _structs or field_type in _enums: + deps.append(field_type) + WriteMojomTypes(deps, mojom_file) + + mojom_file.write("\n") + mojom_file.write("struct %s {\n" % name) + for field_name, field_type, array_len in fields: + if field_type in _type_map: + field_type = _type_map[field_type] + else: + assert field_type in _structs or field_type in _enums or \ + field_type in _handles, "Undefine type: '%s'" % field_type + if field_type == "char": + assert array_len + array_len = _defines[array_len] + mojom_file.write(" string %s;\n" % field_name) + elif not array_len: + mojom_file.write(" %s %s;\n" % (field_type, field_name)) + else: + if not array_len.isdigit(): + array_len = _defines[array_len] + assert array_len.isdigit(), "%s is not a digit." % array_len + mojom_file.write( + " array<%s, %s> %s;\n" % (field_type, array_len, field_name)) + mojom_file.write("};\n") + + +def WriteMojomTypes(types, mojom_file): + for t in types: + if t in _structs: + WriteMojomStruct(t, mojom_file) + elif t in _enums: + WriteMojomEnum(t, mojom_file) + else: + pass + + +def GenerateMojom(mojom_file): + mojom_file.write( +'''// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is auto-generated from +// gpu/ipc/common/generate_vulkan_types.py +// It's formatted by clang-format using chromium coding style: +// clang-format -i -style=chromium filename +// DO NOT EDIT! + +module gpu.mojom; +''') + WriteMojomTypes(_STRUCTS, mojom_file) + + +def WriteStructTraits(name, traits_header_file, traits_source_file): + traits_header_file.write( +""" +template <> +struct StructTraits<gpu::mojom::%sDataView, %s> { +""" % (name, name) + ) + + fields = _structs[name] + for field_name, field_type, array_len in fields: + if field_type == "VkBool32": + field_type = "bool" + elif field_type == "VkDeviceSize": + field_type = "bool" + + if field_type == "char": + assert array_len + traits_header_file.write( +""" + static base::StringPiece %s(const %s& input) { + return input.%s; + } +""" % (field_name, name, field_name)) + elif array_len: + traits_header_file.write( +""" + static base::span<const %s> %s(const %s& input) { + return input.%s; + } +""" % (field_type, field_name, name, field_name)) + elif field_type in _structs: + traits_header_file.write( +""" + static const %s& %s(const %s& input) { + return input.%s; + } +""" % (field_type, field_name, name, field_name)) + else: + traits_header_file.write( +""" + static %s %s(const %s& input) { + return input.%s; + } +""" % (field_type, field_name, name, field_name)) + + traits_header_file.write( +""" + static bool Read(gpu::mojom::%sDataView data, %s* out); +""" % (name, name)) + + traits_source_file.write( +""" +// static +bool StructTraits<gpu::mojom::%sDataView, %s>::Read( + gpu::mojom::%sDataView data, %s* out) { +""" % (name, name, name, name)) + + fields = _structs[name] + for field_name, field_type, array_len in fields: + if field_type == "VkBool32": + field_type = "bool" + elif field_type == "VkDeviceSize": + field_type = "bool" + + if field_type == "char": + assert array_len + read_method = "Read%s%s" % (field_name[0].upper(), field_name[1:]) + traits_source_file.write( +""" + base::StringPiece %s; + if (!data.%s(&%s)) + return false; + %s.copy(out->%s, sizeof(out->%s)); +""" % (field_name, read_method, field_name, field_name, field_name, field_name)) + elif array_len: + read_method = "Read%s%s" % (field_name[0].upper(), field_name[1:]) + traits_source_file.write( +""" + base::span<%s> %s(out->%s); + if (!data.%s(&%s)) + return false; +""" % (field_type, field_name, field_name, read_method, field_name)) + elif field_type in _structs or field_type in _enums: + traits_source_file.write( +""" + if (!data.Read%s%s(&out->%s)) + return false; +""" % (field_name[0].upper(), field_name[1:], field_name)) + else: + traits_source_file.write( +""" + out->%s = data.%s(); +""" % (field_name, field_name)) + + + traits_source_file.write( +""" + return true; +} +""") + + + traits_header_file.write("};\n") + + +def WriteEnumTraits(name, traits_header_file): + traits_header_file.write( +""" +template <> +struct EnumTraits<gpu::mojom::%s, %s> { + static gpu::mojom::%s ToMojom(%s input) { + switch (input) { +""" % (name, name, name, name)) + + for value_name, _, mojom_value_name in _enums[name]: + traits_header_file.write( +""" + case %s::%s: + return gpu::mojom::%s::%s;""" + % (name, value_name, name, mojom_value_name)) + + traits_header_file.write( +""" + default: + NOTREACHED(); + return gpu::mojom::%s::INVALID_VALUE; + } + } + + static bool FromMojom(gpu::mojom::%s input, %s* out) { + switch (input) { +""" % (name, name, name)) + + for value_name, _, mojom_value_name in _enums[name]: + traits_header_file.write( +""" + case gpu::mojom::%s::%s: + *out = %s::%s; + return true;""" % (name, mojom_value_name, name, value_name)) + + traits_header_file.write( +""" + case gpu::mojom::%s::INVALID_VALUE: + NOTREACHED(); + return false; + + } + NOTREACHED(); + return false; + } +};""" % name) + + + +def GenerateTraitsFile(traits_header_file, traits_source_file): + traits_header_file.write( +"""// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is auto-generated from +// gpu/ipc/common/generate_vulkan_types.py +// It's formatted by clang-format using chromium coding style: +// clang-format -i -style=chromium filename +// DO NOT EDIT! + +#ifndef GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_ +#define GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_ + +#include "base/containers/span.h" +#include "base/strings/string_piece.h" +#include "gpu/ipc/common/vulkan_types.h" +#include "gpu/ipc/common/vulkan_types.mojom-shared.h" + +namespace mojo { +""") + + traits_source_file.write( +"""// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is auto-generated from +// gpu/ipc/common/generate_vulkan_types.py +// It's formatted by clang-format using chromium coding style: +// clang-format -i -style=chromium filename +// DO NOT EDIT! + +#include "gpu/ipc/common/vulkan_info_mojom_traits.h" + +namespace mojo { +""") + + for t in _generated_types: + if t in _structs: + WriteStructTraits(t, traits_header_file, traits_source_file) + elif t in _enums: + WriteEnumTraits(t, traits_header_file) + + traits_header_file.write( +""" +} // namespace mojo + +#endif // GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_""") + + traits_source_file.write( +""" +} // namespace mojo""") + + +def GenerateTypemapFile(typemap_file): + typemap_file.write( +"""# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This file is auto-generated from +# gpu/ipc/common/generate_vulkan_types.py +# DO NOT EDIT! + +mojom = "//gpu/ipc/common/vulkan_types.mojom" +public_headers = [ "//gpu/ipc/common/vulkan_types.h" ] +traits_headers = [ "//gpu/ipc/common/vulkan_types_mojom_traits.h" ] +sources = [ + "//gpu/ipc/common/vulkan_types_mojom_traits.cc", +] +public_deps = [ + "//gpu/ipc/common:vulkan_types", +] +type_mappings = [ +""") + for t in _generated_types: + typemap_file.write(" \"gpu.mojom.%s=::%s\",\n" % (t, t)) + typemap_file.write("]\n") + + +def main(argv): + """This is the main function.""" + + parser = optparse.OptionParser() + parser.add_option( + "--output-dir", + help="Output directory for generated files. Defaults to this script's " + "directory.") + parser.add_option( + "-c", "--check", action="store_true", + help="Check if output files match generated files in chromium root " + "directory. Use this in PRESUBMIT scripts with --output-dir.") + + (options, _) = parser.parse_args(args=argv) + + # Support generating files for PRESUBMIT. + if options.output_dir: + output_dir = options.output_dir + else: + output_dir = _SELF_LOCATION + + def ClangFormat(filename): + formatter = "clang-format" + if platform.system() == "Windows": + formatter += ".bat" + subprocess.call([formatter, "-i", "-style=chromium", filename]) + + vulkan_header_file_path = os.path.join( + _SELF_LOCATION, "../../..", _VULKAN_HEADER_FILE) + ParseVulkanHeaderFile(vulkan_header_file_path) + + mojom_file_name = "vulkan_types.mojom" + mojom_file = open( + os.path.join(output_dir, mojom_file_name), 'wb') + GenerateMojom(mojom_file) + mojom_file.close() + ClangFormat(mojom_file.name) + + traits_header_file_name = "vulkan_types_mojom_traits.h" + traits_header_file = \ + open(os.path.join(output_dir, traits_header_file_name), 'wb') + traits_source_file_name = "vulkan_types_mojom_traits.cc" + traits_source_file = \ + open(os.path.join(output_dir, traits_source_file_name), 'wb') + GenerateTraitsFile(traits_header_file, traits_source_file) + traits_header_file.close() + ClangFormat(traits_header_file.name) + traits_source_file.close() + ClangFormat(traits_source_file.name) + + typemap_file_name = "vulkan_types.typemap" + typemap_file = open( + os.path.join(output_dir, typemap_file_name), 'wb') + GenerateTypemapFile(typemap_file) + typemap_file.close() + + check_failed_filenames = [] + if options.check: + for filename in [mojom_file_name, traits_header_file_name, + traits_source_file_name, typemap_file_name]: + if not filecmp.cmp(os.path.join(output_dir, filename), + os.path.join(_SELF_LOCATION, filename)): + check_failed_filenames.append(filename) + + if len(check_failed_filenames) > 0: + print 'Please run gpu/ipc/common/generate_vulkan_types.py' + print 'Failed check on generated files:' + for filename in check_failed_filenames: + print filename + return 1 + + return 0 + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom index d0f8b34f23f..9942b5eb9da 100644 --- a/chromium/gpu/ipc/common/gpu_info.mojom +++ b/chromium/gpu/ipc/common/gpu_info.mojom @@ -9,6 +9,9 @@ import "gpu/ipc/common/dx_diag_node.mojom"; import "mojo/public/mojom/base/time.mojom"; import "ui/gfx/geometry/mojom/geometry.mojom"; +[EnableIf=supports_vulkan] +import "gpu/ipc/common/vulkan_info.mojom"; + // gpu::GPUInfo::GPUDevice struct GpuDevice { uint32 vendor_id; @@ -172,4 +175,8 @@ struct GpuInfo { uint64 system_visual; uint64 rgba_visual; bool oop_rasterization_supported; + bool subpixel_font_rendering; + + [EnableIf=supports_vulkan] + VulkanInfo? vulkan_info; }; diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc index f777a5195e6..248daaf61ce 100644 --- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc +++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc @@ -8,6 +8,10 @@ #include "base/logging.h" #include "mojo/public/cpp/base/time_mojom_traits.h" +#if BUILDFLAG(ENABLE_VULKAN) +#include "gpu/ipc/common/vulkan_info_mojom_traits.h" +#endif + namespace mojo { // static @@ -379,6 +383,7 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read( out->rgba_visual = data.rgba_visual(); #endif out->oop_rasterization_supported = data.oop_rasterization_supported(); + out->subpixel_font_rendering = data.subpixel_font_rendering(); #if defined(OS_WIN) out->direct_composition = data.direct_composition(); @@ -412,7 +417,11 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read( data.ReadVideoEncodeAcceleratorSupportedProfiles( &out->video_encode_accelerator_supported_profiles) && data.ReadImageDecodeAcceleratorSupportedProfiles( - &out->image_decode_accelerator_supported_profiles); + &out->image_decode_accelerator_supported_profiles) && +#if BUILDFLAG(ENABLE_VULKAN) + data.ReadVulkanInfo(&out->vulkan_info) && +#endif + true; } } // namespace mojo diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h index b617a1781e6..b8b22696781 100644 --- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h +++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h @@ -391,6 +391,17 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> { static bool oop_rasterization_supported(const gpu::GPUInfo& input) { return input.oop_rasterization_supported; } + + static bool subpixel_font_rendering(const gpu::GPUInfo& input) { + return input.subpixel_font_rendering; + } + +#if BUILDFLAG(ENABLE_VULKAN) + static const base::Optional<gpu::VulkanInfo> vulkan_info( + const gpu::GPUInfo& input) { + return input.vulkan_info; + } +#endif }; } // namespace mojo diff --git a/chromium/gpu/ipc/common/gpu_messages.h b/chromium/gpu/ipc/common/gpu_messages.h index 2b8e00f1350..ace22e5c450 100644 --- a/chromium/gpu/ipc/common/gpu_messages.h +++ b/chromium/gpu/ipc/common/gpu_messages.h @@ -13,7 +13,6 @@ #include <string> #include <vector> -#include "base/memory/shared_memory.h" #include "base/optional.h" #include "base/unguessable_token.h" #include "build/build_config.h" @@ -42,6 +41,7 @@ #include "ui/gfx/native_widget_types.h" #include "ui/gfx/presentation_feedback.h" #include "ui/gfx/swap_result.h" +#include "ui/gl/gpu_preference.h" #include "url/ipc/url_param_traits.h" #if defined(OS_MACOSX) @@ -296,7 +296,8 @@ IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ConsoleMsg, GPUCommandBufferConsoleMessage /* msg */) // Sent by the GPU process to notify the renderer process of a GPU switch. -IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_GpuSwitched) +IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_GpuSwitched, + gl::GpuPreference /* active_gpu_heuristic */) // Register an existing shared memory transfer buffer. The id that can be // used to identify the transfer buffer from a command buffer. diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom index 3fd75af7210..305d8a294c7 100644 --- a/chromium/gpu/ipc/common/gpu_preferences.mojom +++ b/chromium/gpu/ipc/common/gpu_preferences.mojom @@ -19,6 +19,15 @@ enum VulkanImplementationName { kLast = kSwiftshader, }; +// Corresponds to gpu::GrContextType. +enum GrContextType { + kGL = 0, + kVulkan = 1, + kMetal = 2, + kDawn = 3, + kLast = kDawn, +}; + // gpu::GpuPreferences struct GpuPreferences { bool disable_accelerated_video_decode; @@ -59,12 +68,12 @@ struct GpuPreferences { bool disable_biplanar_gpu_memory_buffers_for_video_frames; array<gfx.mojom.BufferUsageAndFormat> texture_target_exception_list; - bool disable_gpu_driver_bug_workarounds; bool ignore_gpu_blacklist; bool enable_oop_rasterization; bool disable_oop_rasterization; bool enable_oop_rasterization_ddl; bool watchdog_starts_backgrounded; + GrContextType gr_context_type; VulkanImplementationName use_vulkan; bool enforce_vulkan_protected_memory; bool disable_vulkan_surface; @@ -72,6 +81,7 @@ struct GpuPreferences { bool enable_metal; bool enable_gpu_benchmarking_extension; bool enable_webgpu; + bool enable_gpu_blocked_time_metric; [EnableIf=use_ozone] mojo_base.mojom.MessagePumpType message_pump_type; diff --git a/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h b/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h index a481a354458..48441160c64 100644 --- a/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h +++ b/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h @@ -19,6 +19,42 @@ namespace mojo { template <> +struct EnumTraits<gpu::mojom::GrContextType, gpu::GrContextType> { + static gpu::mojom::GrContextType ToMojom(gpu::GrContextType input) { + switch (input) { + case gpu::GrContextType::kGL: + return gpu::mojom::GrContextType::kGL; + case gpu::GrContextType::kVulkan: + return gpu::mojom::GrContextType::kVulkan; + case gpu::GrContextType::kMetal: + return gpu::mojom::GrContextType::kMetal; + case gpu::GrContextType::kDawn: + return gpu::mojom::GrContextType::kDawn; + } + NOTREACHED(); + return gpu::mojom::GrContextType::kGL; + } + static bool FromMojom(gpu::mojom::GrContextType input, + gpu::GrContextType* out) { + switch (input) { + case gpu::mojom::GrContextType::kGL: + *out = gpu::GrContextType::kGL; + return true; + case gpu::mojom::GrContextType::kVulkan: + *out = gpu::GrContextType::kVulkan; + return true; + case gpu::mojom::GrContextType::kMetal: + *out = gpu::GrContextType::kMetal; + return true; + case gpu::mojom::GrContextType::kDawn: + *out = gpu::GrContextType::kDawn; + return true; + } + return false; + } +}; + +template <> struct EnumTraits<gpu::mojom::VulkanImplementationName, gpu::VulkanImplementationName> { static gpu::mojom::VulkanImplementationName ToMojom( @@ -113,13 +149,13 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> { out->texture_target_exception_list.push_back(usage_format); } - out->disable_gpu_driver_bug_workarounds = - prefs.disable_gpu_driver_bug_workarounds(); out->ignore_gpu_blacklist = prefs.ignore_gpu_blacklist(); out->enable_oop_rasterization = prefs.enable_oop_rasterization(); out->disable_oop_rasterization = prefs.disable_oop_rasterization(); out->enable_oop_rasterization_ddl = prefs.enable_oop_rasterization_ddl(); out->watchdog_starts_backgrounded = prefs.watchdog_starts_backgrounded(); + if (!prefs.ReadGrContextType(&out->gr_context_type)) + return false; if (!prefs.ReadUseVulkan(&out->use_vulkan)) return false; out->enforce_vulkan_protected_memory = @@ -131,6 +167,8 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> { out->enable_gpu_benchmarking_extension = prefs.enable_gpu_benchmarking_extension(); out->enable_webgpu = prefs.enable_webgpu(); + out->enable_gpu_blocked_time_metric = + prefs.enable_gpu_blocked_time_metric(); #if defined(USE_OZONE) if (!prefs.ReadMessagePumpType(&out->message_pump_type)) @@ -246,10 +284,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> { texture_target_exception_list(const gpu::GpuPreferences& prefs) { return prefs.texture_target_exception_list; } - static bool disable_gpu_driver_bug_workarounds( - const gpu::GpuPreferences& prefs) { - return prefs.disable_gpu_driver_bug_workarounds; - } static bool ignore_gpu_blacklist(const gpu::GpuPreferences& prefs) { return prefs.ignore_gpu_blacklist; } @@ -265,6 +299,9 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> { static bool watchdog_starts_backgrounded(const gpu::GpuPreferences& prefs) { return prefs.watchdog_starts_backgrounded; } + static gpu::GrContextType gr_context_type(const gpu::GpuPreferences& prefs) { + return prefs.gr_context_type; + } static gpu::VulkanImplementationName use_vulkan( const gpu::GpuPreferences& prefs) { return prefs.use_vulkan; @@ -290,6 +327,9 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> { static bool enable_webgpu(const gpu::GpuPreferences& prefs) { return prefs.enable_webgpu; } + static bool enable_gpu_blocked_time_metric(const gpu::GpuPreferences& prefs) { + return prefs.enable_gpu_blocked_time_metric; + } #if defined(USE_OZONE) static base::MessagePumpType message_pump_type( const gpu::GpuPreferences& prefs) { diff --git a/chromium/gpu/ipc/common/gpu_watchdog_timeout.h b/chromium/gpu/ipc/common/gpu_watchdog_timeout.h index 07332c723ae..f67352aa7d2 100644 --- a/chromium/gpu/ipc/common/gpu_watchdog_timeout.h +++ b/chromium/gpu/ipc/common/gpu_watchdog_timeout.h @@ -33,7 +33,7 @@ constexpr int kRestartFactor = 2; // It takes longer to initialize GPU process in Windows. See // https://crbug.com/949839 for details. #if defined(OS_WIN) -constexpr int kInitFactor = 4; +constexpr int kInitFactor = 2; #else constexpr int kInitFactor = 1; #endif diff --git a/chromium/gpu/ipc/common/typemaps.gni b/chromium/gpu/ipc/common/typemaps.gni index 397b2b00c21..e27bfe23ec2 100644 --- a/chromium/gpu/ipc/common/typemaps.gni +++ b/chromium/gpu/ipc/common/typemaps.gni @@ -16,4 +16,6 @@ typemaps = [ "//gpu/ipc/common/surface_handle.typemap", "//gpu/ipc/common/sync_token.typemap", "//gpu/ipc/common/vulkan_ycbcr_info.typemap", + "//gpu/ipc/common/vulkan_info.typemap", + "//gpu/ipc/common/vulkan_types.typemap", ] diff --git a/chromium/gpu/ipc/common/vulkan_info.mojom b/chromium/gpu/ipc/common/vulkan_info.mojom new file mode 100644 index 00000000000..f80cc07f3e4 --- /dev/null +++ b/chromium/gpu/ipc/common/vulkan_info.mojom @@ -0,0 +1,26 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// gpu/config/vulkan_info.h +module gpu.mojom; + +import "gpu/ipc/common/vulkan_types.mojom"; + +struct VulkanPhysicalDeviceInfo { + VkPhysicalDeviceProperties properties; + array<VkLayerProperties> layers; + VkPhysicalDeviceFeatures features; + bool feature_sampler_ycbcr_conversion; + bool feature_protected_memory; + array<VkQueueFamilyProperties> queue_families; +}; + +struct VulkanInfo { + uint32 api_version; + uint32 used_api_version; + array<VkExtensionProperties> instance_extensions; + array<string> enabled_instance_extensions; + array<VkLayerProperties> instance_layers; + array<VulkanPhysicalDeviceInfo> physical_devices; +}; diff --git a/chromium/gpu/ipc/common/vulkan_info.typemap b/chromium/gpu/ipc/common/vulkan_info.typemap new file mode 100644 index 00000000000..d61d1095f12 --- /dev/null +++ b/chromium/gpu/ipc/common/vulkan_info.typemap @@ -0,0 +1,16 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +mojom = "//gpu/ipc/common/vulkan_info.mojom" +public_headers = [ "//gpu/config/vulkan_info.h" ] +traits_headers = [ "//gpu/ipc/common/vulkan_info_mojom_traits.h" ] +public_deps = [ + # "//gpu/config", + "//gpu/ipc/common:vulkan_types", + "//gpu/ipc/common:vulkan_types_mojom_traits", +] +type_mappings = [ + "gpu.mojom.VulkanPhysicalDeviceInfo=::gpu::VulkanPhysicalDeviceInfo", + "gpu.mojom.VulkanInfo=::gpu::VulkanInfo", +] diff --git a/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h b/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h new file mode 100644 index 00000000000..9b67d962139 --- /dev/null +++ b/chromium/gpu/ipc/common/vulkan_info_mojom_traits.h @@ -0,0 +1,118 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_IPC_COMMON_VULKAN_INFO_MOJOM_TRAITS_H_ +#define GPU_IPC_COMMON_VULKAN_INFO_MOJOM_TRAITS_H_ + +#include "base/containers/span.h" +#include "base/strings/string_piece.h" +#include "gpu/config/vulkan_info.h" +#include "gpu/ipc/common/vulkan_info.mojom-shared.h" +#include "gpu/ipc/common/vulkan_types_mojom_traits.h" + +namespace mojo { + +template <> +struct StructTraits<gpu::mojom::VulkanPhysicalDeviceInfoDataView, + gpu::VulkanPhysicalDeviceInfo> { + static const VkPhysicalDeviceProperties& properties( + const gpu::VulkanPhysicalDeviceInfo& input) { + return input.properties; + } + + static const std::vector<VkLayerProperties>& layers( + const gpu::VulkanPhysicalDeviceInfo& input) { + return input.layers; + } + + static const VkPhysicalDeviceFeatures& features( + const gpu::VulkanPhysicalDeviceInfo& input) { + return input.features; + } + + static bool feature_sampler_ycbcr_conversion( + const gpu::VulkanPhysicalDeviceInfo& input) { + return input.feature_sampler_ycbcr_conversion; + } + + static bool feature_protected_memory( + const gpu::VulkanPhysicalDeviceInfo& input) { + return input.feature_protected_memory; + } + + static const std::vector<VkQueueFamilyProperties>& queue_families( + const gpu::VulkanPhysicalDeviceInfo& input) { + return input.queue_families; + } + + static bool Read(gpu::mojom::VulkanPhysicalDeviceInfoDataView data, + gpu::VulkanPhysicalDeviceInfo* out) { + if (!data.ReadProperties(&out->properties)) + return false; + if (!data.ReadLayers(&out->layers)) + return false; + if (!data.ReadFeatures(&out->features)) + return false; + out->feature_sampler_ycbcr_conversion = + data.feature_sampler_ycbcr_conversion(); + out->feature_protected_memory = data.feature_protected_memory(); + if (!data.ReadQueueFamilies(&out->queue_families)) + return false; + return true; + } +}; + +template <> +struct StructTraits<gpu::mojom::VulkanInfoDataView, gpu::VulkanInfo> { + static uint32_t api_version(const gpu::VulkanInfo& input) { + return input.api_version; + } + + static uint32_t used_api_version(const gpu::VulkanInfo& input) { + return input.used_api_version; + } + + static const std::vector<VkExtensionProperties>& instance_extensions( + const gpu::VulkanInfo& input) { + return input.instance_extensions; + } + + static std::vector<base::StringPiece> enabled_instance_extensions( + const gpu::VulkanInfo& input) { + std::vector<base::StringPiece> extensions; + extensions.reserve(input.enabled_instance_extensions.size()); + for (const char* extension : input.enabled_instance_extensions) + extensions.emplace_back(extension); + return extensions; + } + + static const std::vector<VkLayerProperties>& instance_layers( + const gpu::VulkanInfo& input) { + return input.instance_layers; + } + + static const std::vector<gpu::VulkanPhysicalDeviceInfo>& physical_devices( + const gpu::VulkanInfo& input) { + return input.physical_devices; + } + + static bool Read(gpu::mojom::VulkanInfoDataView data, gpu::VulkanInfo* out) { + out->api_version = data.api_version(); + out->used_api_version = data.used_api_version(); + + if (!data.ReadInstanceExtensions(&out->instance_extensions)) + return false; + + std::vector<base::StringPiece> extensions; + if (!data.ReadEnabledInstanceExtensions(&extensions)) + return false; + out->SetEnabledInstanceExtensions(extensions); + return data.ReadInstanceLayers(&out->instance_layers) && + data.ReadPhysicalDevices(&out->physical_devices); + } +}; + +} // namespace mojo + +#endif // GPU_IPC_COMMON_VULKAN_INFO_MOJOM_TRAITS_H_ diff --git a/chromium/gpu/ipc/common/vulkan_types.h b/chromium/gpu/ipc/common/vulkan_types.h new file mode 100644 index 00000000000..0d65cd9235f --- /dev/null +++ b/chromium/gpu/ipc/common/vulkan_types.h @@ -0,0 +1,10 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_IPC_COMMON_VULKAN_TYPES_H_ +#define GPU_IPC_COMMON_VULKAN_TYPES_H_ + +#include <vulkan/vulkan.h> + +#endif // GPU_IPC_COMMON_VULKAN_TYPES_H_ diff --git a/chromium/gpu/ipc/common/vulkan_types.mojom b/chromium/gpu/ipc/common/vulkan_types.mojom new file mode 100644 index 00000000000..8f13e182cd6 --- /dev/null +++ b/chromium/gpu/ipc/common/vulkan_types.mojom @@ -0,0 +1,232 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is auto-generated from +// gpu/ipc/common/generate_vulkan_types.py +// It's formatted by clang-format using chromium coding style: +// clang-format -i -style=chromium filename +// DO NOT EDIT! + +module gpu.mojom; + +struct VkExtensionProperties { + string extensionName; + uint32 specVersion; +}; + +struct VkLayerProperties { + string layerName; + uint32 specVersion; + uint32 implementationVersion; + string description; +}; + +enum VkPhysicalDeviceType { + OTHER = 0, + INTEGRATED_GPU = 1, + DISCRETE_GPU = 2, + VIRTUAL_GPU = 3, + CPU = 4, + INVALID_VALUE = -1, +}; + +struct VkPhysicalDeviceLimits { + uint32 maxImageDimension1D; + uint32 maxImageDimension2D; + uint32 maxImageDimension3D; + uint32 maxImageDimensionCube; + uint32 maxImageArrayLayers; + uint32 maxTexelBufferElements; + uint32 maxUniformBufferRange; + uint32 maxStorageBufferRange; + uint32 maxPushConstantsSize; + uint32 maxMemoryAllocationCount; + uint32 maxSamplerAllocationCount; + uint64 bufferImageGranularity; + uint64 sparseAddressSpaceSize; + uint32 maxBoundDescriptorSets; + uint32 maxPerStageDescriptorSamplers; + uint32 maxPerStageDescriptorUniformBuffers; + uint32 maxPerStageDescriptorStorageBuffers; + uint32 maxPerStageDescriptorSampledImages; + uint32 maxPerStageDescriptorStorageImages; + uint32 maxPerStageDescriptorInputAttachments; + uint32 maxPerStageResources; + uint32 maxDescriptorSetSamplers; + uint32 maxDescriptorSetUniformBuffers; + uint32 maxDescriptorSetUniformBuffersDynamic; + uint32 maxDescriptorSetStorageBuffers; + uint32 maxDescriptorSetStorageBuffersDynamic; + uint32 maxDescriptorSetSampledImages; + uint32 maxDescriptorSetStorageImages; + uint32 maxDescriptorSetInputAttachments; + uint32 maxVertexInputAttributes; + uint32 maxVertexInputBindings; + uint32 maxVertexInputAttributeOffset; + uint32 maxVertexInputBindingStride; + uint32 maxVertexOutputComponents; + uint32 maxTessellationGenerationLevel; + uint32 maxTessellationPatchSize; + uint32 maxTessellationControlPerVertexInputComponents; + uint32 maxTessellationControlPerVertexOutputComponents; + uint32 maxTessellationControlPerPatchOutputComponents; + uint32 maxTessellationControlTotalOutputComponents; + uint32 maxTessellationEvaluationInputComponents; + uint32 maxTessellationEvaluationOutputComponents; + uint32 maxGeometryShaderInvocations; + uint32 maxGeometryInputComponents; + uint32 maxGeometryOutputComponents; + uint32 maxGeometryOutputVertices; + uint32 maxGeometryTotalOutputComponents; + uint32 maxFragmentInputComponents; + uint32 maxFragmentOutputAttachments; + uint32 maxFragmentDualSrcAttachments; + uint32 maxFragmentCombinedOutputResources; + uint32 maxComputeSharedMemorySize; + array<uint32, 3> maxComputeWorkGroupCount; + uint32 maxComputeWorkGroupInvocations; + array<uint32, 3> maxComputeWorkGroupSize; + uint32 subPixelPrecisionBits; + uint32 subTexelPrecisionBits; + uint32 mipmapPrecisionBits; + uint32 maxDrawIndexedIndexValue; + uint32 maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32 maxViewports; + array<uint32, 2> maxViewportDimensions; + array<float, 2> viewportBoundsRange; + uint32 viewportSubPixelBits; + uint64 minMemoryMapAlignment; + uint64 minTexelBufferOffsetAlignment; + uint64 minUniformBufferOffsetAlignment; + uint64 minStorageBufferOffsetAlignment; + int32 minTexelOffset; + uint32 maxTexelOffset; + int32 minTexelGatherOffset; + uint32 maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32 subPixelInterpolationOffsetBits; + uint32 maxFramebufferWidth; + uint32 maxFramebufferHeight; + uint32 maxFramebufferLayers; + uint32 framebufferColorSampleCounts; + uint32 framebufferDepthSampleCounts; + uint32 framebufferStencilSampleCounts; + uint32 framebufferNoAttachmentsSampleCounts; + uint32 maxColorAttachments; + uint32 sampledImageColorSampleCounts; + uint32 sampledImageIntegerSampleCounts; + uint32 sampledImageDepthSampleCounts; + uint32 sampledImageStencilSampleCounts; + uint32 storageImageSampleCounts; + uint32 maxSampleMaskWords; + bool timestampComputeAndGraphics; + float timestampPeriod; + uint32 maxClipDistances; + uint32 maxCullDistances; + uint32 maxCombinedClipAndCullDistances; + uint32 discreteQueuePriorities; + array<float, 2> pointSizeRange; + array<float, 2> lineWidthRange; + float pointSizeGranularity; + float lineWidthGranularity; + bool strictLines; + bool standardSampleLocations; + uint64 optimalBufferCopyOffsetAlignment; + uint64 optimalBufferCopyRowPitchAlignment; + uint64 nonCoherentAtomSize; +}; + +struct VkPhysicalDeviceSparseProperties { + bool residencyStandard2DBlockShape; + bool residencyStandard2DMultisampleBlockShape; + bool residencyStandard3DBlockShape; + bool residencyAlignedMipSize; + bool residencyNonResidentStrict; +}; + +struct VkPhysicalDeviceProperties { + uint32 apiVersion; + uint32 driverVersion; + uint32 vendorID; + uint32 deviceID; + VkPhysicalDeviceType deviceType; + string deviceName; + array<uint8, 16> pipelineCacheUUID; + VkPhysicalDeviceLimits limits; + VkPhysicalDeviceSparseProperties sparseProperties; +}; + +struct VkPhysicalDeviceFeatures { + bool robustBufferAccess; + bool fullDrawIndexUint32; + bool imageCubeArray; + bool independentBlend; + bool geometryShader; + bool tessellationShader; + bool sampleRateShading; + bool dualSrcBlend; + bool logicOp; + bool multiDrawIndirect; + bool drawIndirectFirstInstance; + bool depthClamp; + bool depthBiasClamp; + bool fillModeNonSolid; + bool depthBounds; + bool wideLines; + bool largePoints; + bool alphaToOne; + bool multiViewport; + bool samplerAnisotropy; + bool textureCompressionETC2; + bool textureCompressionASTC_LDR; + bool textureCompressionBC; + bool occlusionQueryPrecise; + bool pipelineStatisticsQuery; + bool vertexPipelineStoresAndAtomics; + bool fragmentStoresAndAtomics; + bool shaderTessellationAndGeometryPointSize; + bool shaderImageGatherExtended; + bool shaderStorageImageExtendedFormats; + bool shaderStorageImageMultisample; + bool shaderStorageImageReadWithoutFormat; + bool shaderStorageImageWriteWithoutFormat; + bool shaderUniformBufferArrayDynamicIndexing; + bool shaderSampledImageArrayDynamicIndexing; + bool shaderStorageBufferArrayDynamicIndexing; + bool shaderStorageImageArrayDynamicIndexing; + bool shaderClipDistance; + bool shaderCullDistance; + bool shaderFloat64; + bool shaderInt64; + bool shaderInt16; + bool shaderResourceResidency; + bool shaderResourceMinLod; + bool sparseBinding; + bool sparseResidencyBuffer; + bool sparseResidencyImage2D; + bool sparseResidencyImage3D; + bool sparseResidency2Samples; + bool sparseResidency4Samples; + bool sparseResidency8Samples; + bool sparseResidency16Samples; + bool sparseResidencyAliased; + bool variableMultisampleRate; + bool inheritedQueries; +}; + +struct VkExtent3D { + uint32 width; + uint32 height; + uint32 depth; +}; + +struct VkQueueFamilyProperties { + uint32 queueFlags; + uint32 queueCount; + uint32 timestampValidBits; + VkExtent3D minImageTransferGranularity; +}; diff --git a/chromium/gpu/ipc/common/vulkan_types.typemap b/chromium/gpu/ipc/common/vulkan_types.typemap new file mode 100644 index 00000000000..9506337e412 --- /dev/null +++ b/chromium/gpu/ipc/common/vulkan_types.typemap @@ -0,0 +1,28 @@ +# Copyright 2019 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This file is auto-generated from +# gpu/ipc/common/generate_vulkan_types.py +# DO NOT EDIT! + +mojom = "//gpu/ipc/common/vulkan_types.mojom" +public_headers = [ "//gpu/ipc/common/vulkan_types.h" ] +traits_headers = [ "//gpu/ipc/common/vulkan_types_mojom_traits.h" ] +sources = [ + "//gpu/ipc/common/vulkan_types_mojom_traits.cc", +] +public_deps = [ + "//gpu/ipc/common:vulkan_types", +] +type_mappings = [ + "gpu.mojom.VkExtensionProperties=::VkExtensionProperties", + "gpu.mojom.VkLayerProperties=::VkLayerProperties", + "gpu.mojom.VkPhysicalDeviceProperties=::VkPhysicalDeviceProperties", + "gpu.mojom.VkPhysicalDeviceType=::VkPhysicalDeviceType", + "gpu.mojom.VkPhysicalDeviceLimits=::VkPhysicalDeviceLimits", + "gpu.mojom.VkPhysicalDeviceSparseProperties=::VkPhysicalDeviceSparseProperties", + "gpu.mojom.VkPhysicalDeviceFeatures=::VkPhysicalDeviceFeatures", + "gpu.mojom.VkQueueFamilyProperties=::VkQueueFamilyProperties", + "gpu.mojom.VkExtent3D=::VkExtent3D", +] diff --git a/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc new file mode 100644 index 00000000000..9dc3878dcc8 --- /dev/null +++ b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.cc @@ -0,0 +1,510 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is auto-generated from +// gpu/ipc/common/generate_vulkan_types.py +// It's formatted by clang-format using chromium coding style: +// clang-format -i -style=chromium filename +// DO NOT EDIT! + +#include "gpu/ipc/common/vulkan_info_mojom_traits.h" + +namespace mojo { + +// static +bool StructTraits< + gpu::mojom::VkExtensionPropertiesDataView, + VkExtensionProperties>::Read(gpu::mojom::VkExtensionPropertiesDataView data, + VkExtensionProperties* out) { + base::StringPiece extensionName; + if (!data.ReadExtensionName(&extensionName)) + return false; + extensionName.copy(out->extensionName, sizeof(out->extensionName)); + + out->specVersion = data.specVersion(); + + return true; +} + +// static +bool StructTraits<gpu::mojom::VkLayerPropertiesDataView, VkLayerProperties>:: + Read(gpu::mojom::VkLayerPropertiesDataView data, VkLayerProperties* out) { + base::StringPiece layerName; + if (!data.ReadLayerName(&layerName)) + return false; + layerName.copy(out->layerName, sizeof(out->layerName)); + + out->specVersion = data.specVersion(); + + out->implementationVersion = data.implementationVersion(); + + base::StringPiece description; + if (!data.ReadDescription(&description)) + return false; + description.copy(out->description, sizeof(out->description)); + + return true; +} + +// static +bool StructTraits<gpu::mojom::VkPhysicalDevicePropertiesDataView, + VkPhysicalDeviceProperties>:: + Read(gpu::mojom::VkPhysicalDevicePropertiesDataView data, + VkPhysicalDeviceProperties* out) { + out->apiVersion = data.apiVersion(); + + out->driverVersion = data.driverVersion(); + + out->vendorID = data.vendorID(); + + out->deviceID = data.deviceID(); + + if (!data.ReadDeviceType(&out->deviceType)) + return false; + + base::StringPiece deviceName; + if (!data.ReadDeviceName(&deviceName)) + return false; + deviceName.copy(out->deviceName, sizeof(out->deviceName)); + + base::span<uint8_t> pipelineCacheUUID(out->pipelineCacheUUID); + if (!data.ReadPipelineCacheUUID(&pipelineCacheUUID)) + return false; + + if (!data.ReadLimits(&out->limits)) + return false; + + if (!data.ReadSparseProperties(&out->sparseProperties)) + return false; + + return true; +} + +// static +bool StructTraits<gpu::mojom::VkPhysicalDeviceLimitsDataView, + VkPhysicalDeviceLimits>:: + Read(gpu::mojom::VkPhysicalDeviceLimitsDataView data, + VkPhysicalDeviceLimits* out) { + out->maxImageDimension1D = data.maxImageDimension1D(); + + out->maxImageDimension2D = data.maxImageDimension2D(); + + out->maxImageDimension3D = data.maxImageDimension3D(); + + out->maxImageDimensionCube = data.maxImageDimensionCube(); + + out->maxImageArrayLayers = data.maxImageArrayLayers(); + + out->maxTexelBufferElements = data.maxTexelBufferElements(); + + out->maxUniformBufferRange = data.maxUniformBufferRange(); + + out->maxStorageBufferRange = data.maxStorageBufferRange(); + + out->maxPushConstantsSize = data.maxPushConstantsSize(); + + out->maxMemoryAllocationCount = data.maxMemoryAllocationCount(); + + out->maxSamplerAllocationCount = data.maxSamplerAllocationCount(); + + out->bufferImageGranularity = data.bufferImageGranularity(); + + out->sparseAddressSpaceSize = data.sparseAddressSpaceSize(); + + out->maxBoundDescriptorSets = data.maxBoundDescriptorSets(); + + out->maxPerStageDescriptorSamplers = data.maxPerStageDescriptorSamplers(); + + out->maxPerStageDescriptorUniformBuffers = + data.maxPerStageDescriptorUniformBuffers(); + + out->maxPerStageDescriptorStorageBuffers = + data.maxPerStageDescriptorStorageBuffers(); + + out->maxPerStageDescriptorSampledImages = + data.maxPerStageDescriptorSampledImages(); + + out->maxPerStageDescriptorStorageImages = + data.maxPerStageDescriptorStorageImages(); + + out->maxPerStageDescriptorInputAttachments = + data.maxPerStageDescriptorInputAttachments(); + + out->maxPerStageResources = data.maxPerStageResources(); + + out->maxDescriptorSetSamplers = data.maxDescriptorSetSamplers(); + + out->maxDescriptorSetUniformBuffers = data.maxDescriptorSetUniformBuffers(); + + out->maxDescriptorSetUniformBuffersDynamic = + data.maxDescriptorSetUniformBuffersDynamic(); + + out->maxDescriptorSetStorageBuffers = data.maxDescriptorSetStorageBuffers(); + + out->maxDescriptorSetStorageBuffersDynamic = + data.maxDescriptorSetStorageBuffersDynamic(); + + out->maxDescriptorSetSampledImages = data.maxDescriptorSetSampledImages(); + + out->maxDescriptorSetStorageImages = data.maxDescriptorSetStorageImages(); + + out->maxDescriptorSetInputAttachments = + data.maxDescriptorSetInputAttachments(); + + out->maxVertexInputAttributes = data.maxVertexInputAttributes(); + + out->maxVertexInputBindings = data.maxVertexInputBindings(); + + out->maxVertexInputAttributeOffset = data.maxVertexInputAttributeOffset(); + + out->maxVertexInputBindingStride = data.maxVertexInputBindingStride(); + + out->maxVertexOutputComponents = data.maxVertexOutputComponents(); + + out->maxTessellationGenerationLevel = data.maxTessellationGenerationLevel(); + + out->maxTessellationPatchSize = data.maxTessellationPatchSize(); + + out->maxTessellationControlPerVertexInputComponents = + data.maxTessellationControlPerVertexInputComponents(); + + out->maxTessellationControlPerVertexOutputComponents = + data.maxTessellationControlPerVertexOutputComponents(); + + out->maxTessellationControlPerPatchOutputComponents = + data.maxTessellationControlPerPatchOutputComponents(); + + out->maxTessellationControlTotalOutputComponents = + data.maxTessellationControlTotalOutputComponents(); + + out->maxTessellationEvaluationInputComponents = + data.maxTessellationEvaluationInputComponents(); + + out->maxTessellationEvaluationOutputComponents = + data.maxTessellationEvaluationOutputComponents(); + + out->maxGeometryShaderInvocations = data.maxGeometryShaderInvocations(); + + out->maxGeometryInputComponents = data.maxGeometryInputComponents(); + + out->maxGeometryOutputComponents = data.maxGeometryOutputComponents(); + + out->maxGeometryOutputVertices = data.maxGeometryOutputVertices(); + + out->maxGeometryTotalOutputComponents = + data.maxGeometryTotalOutputComponents(); + + out->maxFragmentInputComponents = data.maxFragmentInputComponents(); + + out->maxFragmentOutputAttachments = data.maxFragmentOutputAttachments(); + + out->maxFragmentDualSrcAttachments = data.maxFragmentDualSrcAttachments(); + + out->maxFragmentCombinedOutputResources = + data.maxFragmentCombinedOutputResources(); + + out->maxComputeSharedMemorySize = data.maxComputeSharedMemorySize(); + + base::span<uint32_t> maxComputeWorkGroupCount(out->maxComputeWorkGroupCount); + if (!data.ReadMaxComputeWorkGroupCount(&maxComputeWorkGroupCount)) + return false; + + out->maxComputeWorkGroupInvocations = data.maxComputeWorkGroupInvocations(); + + base::span<uint32_t> maxComputeWorkGroupSize(out->maxComputeWorkGroupSize); + if (!data.ReadMaxComputeWorkGroupSize(&maxComputeWorkGroupSize)) + return false; + + out->subPixelPrecisionBits = data.subPixelPrecisionBits(); + + out->subTexelPrecisionBits = data.subTexelPrecisionBits(); + + out->mipmapPrecisionBits = data.mipmapPrecisionBits(); + + out->maxDrawIndexedIndexValue = data.maxDrawIndexedIndexValue(); + + out->maxDrawIndirectCount = data.maxDrawIndirectCount(); + + out->maxSamplerLodBias = data.maxSamplerLodBias(); + + out->maxSamplerAnisotropy = data.maxSamplerAnisotropy(); + + out->maxViewports = data.maxViewports(); + + base::span<uint32_t> maxViewportDimensions(out->maxViewportDimensions); + if (!data.ReadMaxViewportDimensions(&maxViewportDimensions)) + return false; + + base::span<float> viewportBoundsRange(out->viewportBoundsRange); + if (!data.ReadViewportBoundsRange(&viewportBoundsRange)) + return false; + + out->viewportSubPixelBits = data.viewportSubPixelBits(); + + out->minMemoryMapAlignment = data.minMemoryMapAlignment(); + + out->minTexelBufferOffsetAlignment = data.minTexelBufferOffsetAlignment(); + + out->minUniformBufferOffsetAlignment = data.minUniformBufferOffsetAlignment(); + + out->minStorageBufferOffsetAlignment = data.minStorageBufferOffsetAlignment(); + + out->minTexelOffset = data.minTexelOffset(); + + out->maxTexelOffset = data.maxTexelOffset(); + + out->minTexelGatherOffset = data.minTexelGatherOffset(); + + out->maxTexelGatherOffset = data.maxTexelGatherOffset(); + + out->minInterpolationOffset = data.minInterpolationOffset(); + + out->maxInterpolationOffset = data.maxInterpolationOffset(); + + out->subPixelInterpolationOffsetBits = data.subPixelInterpolationOffsetBits(); + + out->maxFramebufferWidth = data.maxFramebufferWidth(); + + out->maxFramebufferHeight = data.maxFramebufferHeight(); + + out->maxFramebufferLayers = data.maxFramebufferLayers(); + + out->framebufferColorSampleCounts = data.framebufferColorSampleCounts(); + + out->framebufferDepthSampleCounts = data.framebufferDepthSampleCounts(); + + out->framebufferStencilSampleCounts = data.framebufferStencilSampleCounts(); + + out->framebufferNoAttachmentsSampleCounts = + data.framebufferNoAttachmentsSampleCounts(); + + out->maxColorAttachments = data.maxColorAttachments(); + + out->sampledImageColorSampleCounts = data.sampledImageColorSampleCounts(); + + out->sampledImageIntegerSampleCounts = data.sampledImageIntegerSampleCounts(); + + out->sampledImageDepthSampleCounts = data.sampledImageDepthSampleCounts(); + + out->sampledImageStencilSampleCounts = data.sampledImageStencilSampleCounts(); + + out->storageImageSampleCounts = data.storageImageSampleCounts(); + + out->maxSampleMaskWords = data.maxSampleMaskWords(); + + out->timestampComputeAndGraphics = data.timestampComputeAndGraphics(); + + out->timestampPeriod = data.timestampPeriod(); + + out->maxClipDistances = data.maxClipDistances(); + + out->maxCullDistances = data.maxCullDistances(); + + out->maxCombinedClipAndCullDistances = data.maxCombinedClipAndCullDistances(); + + out->discreteQueuePriorities = data.discreteQueuePriorities(); + + base::span<float> pointSizeRange(out->pointSizeRange); + if (!data.ReadPointSizeRange(&pointSizeRange)) + return false; + + base::span<float> lineWidthRange(out->lineWidthRange); + if (!data.ReadLineWidthRange(&lineWidthRange)) + return false; + + out->pointSizeGranularity = data.pointSizeGranularity(); + + out->lineWidthGranularity = data.lineWidthGranularity(); + + out->strictLines = data.strictLines(); + + out->standardSampleLocations = data.standardSampleLocations(); + + out->optimalBufferCopyOffsetAlignment = + data.optimalBufferCopyOffsetAlignment(); + + out->optimalBufferCopyRowPitchAlignment = + data.optimalBufferCopyRowPitchAlignment(); + + out->nonCoherentAtomSize = data.nonCoherentAtomSize(); + + return true; +} + +// static +bool StructTraits<gpu::mojom::VkPhysicalDeviceSparsePropertiesDataView, + VkPhysicalDeviceSparseProperties>:: + Read(gpu::mojom::VkPhysicalDeviceSparsePropertiesDataView data, + VkPhysicalDeviceSparseProperties* out) { + out->residencyStandard2DBlockShape = data.residencyStandard2DBlockShape(); + + out->residencyStandard2DMultisampleBlockShape = + data.residencyStandard2DMultisampleBlockShape(); + + out->residencyStandard3DBlockShape = data.residencyStandard3DBlockShape(); + + out->residencyAlignedMipSize = data.residencyAlignedMipSize(); + + out->residencyNonResidentStrict = data.residencyNonResidentStrict(); + + return true; +} + +// static +bool StructTraits<gpu::mojom::VkPhysicalDeviceFeaturesDataView, + VkPhysicalDeviceFeatures>:: + Read(gpu::mojom::VkPhysicalDeviceFeaturesDataView data, + VkPhysicalDeviceFeatures* out) { + out->robustBufferAccess = data.robustBufferAccess(); + + out->fullDrawIndexUint32 = data.fullDrawIndexUint32(); + + out->imageCubeArray = data.imageCubeArray(); + + out->independentBlend = data.independentBlend(); + + out->geometryShader = data.geometryShader(); + + out->tessellationShader = data.tessellationShader(); + + out->sampleRateShading = data.sampleRateShading(); + + out->dualSrcBlend = data.dualSrcBlend(); + + out->logicOp = data.logicOp(); + + out->multiDrawIndirect = data.multiDrawIndirect(); + + out->drawIndirectFirstInstance = data.drawIndirectFirstInstance(); + + out->depthClamp = data.depthClamp(); + + out->depthBiasClamp = data.depthBiasClamp(); + + out->fillModeNonSolid = data.fillModeNonSolid(); + + out->depthBounds = data.depthBounds(); + + out->wideLines = data.wideLines(); + + out->largePoints = data.largePoints(); + + out->alphaToOne = data.alphaToOne(); + + out->multiViewport = data.multiViewport(); + + out->samplerAnisotropy = data.samplerAnisotropy(); + + out->textureCompressionETC2 = data.textureCompressionETC2(); + + out->textureCompressionASTC_LDR = data.textureCompressionASTC_LDR(); + + out->textureCompressionBC = data.textureCompressionBC(); + + out->occlusionQueryPrecise = data.occlusionQueryPrecise(); + + out->pipelineStatisticsQuery = data.pipelineStatisticsQuery(); + + out->vertexPipelineStoresAndAtomics = data.vertexPipelineStoresAndAtomics(); + + out->fragmentStoresAndAtomics = data.fragmentStoresAndAtomics(); + + out->shaderTessellationAndGeometryPointSize = + data.shaderTessellationAndGeometryPointSize(); + + out->shaderImageGatherExtended = data.shaderImageGatherExtended(); + + out->shaderStorageImageExtendedFormats = + data.shaderStorageImageExtendedFormats(); + + out->shaderStorageImageMultisample = data.shaderStorageImageMultisample(); + + out->shaderStorageImageReadWithoutFormat = + data.shaderStorageImageReadWithoutFormat(); + + out->shaderStorageImageWriteWithoutFormat = + data.shaderStorageImageWriteWithoutFormat(); + + out->shaderUniformBufferArrayDynamicIndexing = + data.shaderUniformBufferArrayDynamicIndexing(); + + out->shaderSampledImageArrayDynamicIndexing = + data.shaderSampledImageArrayDynamicIndexing(); + + out->shaderStorageBufferArrayDynamicIndexing = + data.shaderStorageBufferArrayDynamicIndexing(); + + out->shaderStorageImageArrayDynamicIndexing = + data.shaderStorageImageArrayDynamicIndexing(); + + out->shaderClipDistance = data.shaderClipDistance(); + + out->shaderCullDistance = data.shaderCullDistance(); + + out->shaderFloat64 = data.shaderFloat64(); + + out->shaderInt64 = data.shaderInt64(); + + out->shaderInt16 = data.shaderInt16(); + + out->shaderResourceResidency = data.shaderResourceResidency(); + + out->shaderResourceMinLod = data.shaderResourceMinLod(); + + out->sparseBinding = data.sparseBinding(); + + out->sparseResidencyBuffer = data.sparseResidencyBuffer(); + + out->sparseResidencyImage2D = data.sparseResidencyImage2D(); + + out->sparseResidencyImage3D = data.sparseResidencyImage3D(); + + out->sparseResidency2Samples = data.sparseResidency2Samples(); + + out->sparseResidency4Samples = data.sparseResidency4Samples(); + + out->sparseResidency8Samples = data.sparseResidency8Samples(); + + out->sparseResidency16Samples = data.sparseResidency16Samples(); + + out->sparseResidencyAliased = data.sparseResidencyAliased(); + + out->variableMultisampleRate = data.variableMultisampleRate(); + + out->inheritedQueries = data.inheritedQueries(); + + return true; +} + +// static +bool StructTraits<gpu::mojom::VkQueueFamilyPropertiesDataView, + VkQueueFamilyProperties>:: + Read(gpu::mojom::VkQueueFamilyPropertiesDataView data, + VkQueueFamilyProperties* out) { + out->queueFlags = data.queueFlags(); + + out->queueCount = data.queueCount(); + + out->timestampValidBits = data.timestampValidBits(); + + if (!data.ReadMinImageTransferGranularity(&out->minImageTransferGranularity)) + return false; + + return true; +} + +// static +bool StructTraits<gpu::mojom::VkExtent3DDataView, VkExtent3D>::Read( + gpu::mojom::VkExtent3DDataView data, + VkExtent3D* out) { + out->width = data.width(); + + out->height = data.height(); + + out->depth = data.depth(); + + return true; +} + +} // namespace mojo
\ No newline at end of file diff --git a/chromium/gpu/ipc/common/vulkan_types_mojom_traits.h b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.h new file mode 100644 index 00000000000..fa4aef92498 --- /dev/null +++ b/chromium/gpu/ipc/common/vulkan_types_mojom_traits.h @@ -0,0 +1,951 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is auto-generated from +// gpu/ipc/common/generate_vulkan_types.py +// It's formatted by clang-format using chromium coding style: +// clang-format -i -style=chromium filename +// DO NOT EDIT! + +#ifndef GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_ +#define GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_ + +#include "base/containers/span.h" +#include "base/strings/string_piece.h" +#include "gpu/ipc/common/vulkan_types.h" +#include "gpu/ipc/common/vulkan_types.mojom-shared.h" + +namespace mojo { + +template <> +struct StructTraits<gpu::mojom::VkExtensionPropertiesDataView, + VkExtensionProperties> { + static base::StringPiece extensionName(const VkExtensionProperties& input) { + return input.extensionName; + } + + static uint32_t specVersion(const VkExtensionProperties& input) { + return input.specVersion; + } + + static bool Read(gpu::mojom::VkExtensionPropertiesDataView data, + VkExtensionProperties* out); +}; + +template <> +struct StructTraits<gpu::mojom::VkLayerPropertiesDataView, VkLayerProperties> { + static base::StringPiece layerName(const VkLayerProperties& input) { + return input.layerName; + } + + static uint32_t specVersion(const VkLayerProperties& input) { + return input.specVersion; + } + + static uint32_t implementationVersion(const VkLayerProperties& input) { + return input.implementationVersion; + } + + static base::StringPiece description(const VkLayerProperties& input) { + return input.description; + } + + static bool Read(gpu::mojom::VkLayerPropertiesDataView data, + VkLayerProperties* out); +}; + +template <> +struct StructTraits<gpu::mojom::VkPhysicalDevicePropertiesDataView, + VkPhysicalDeviceProperties> { + static uint32_t apiVersion(const VkPhysicalDeviceProperties& input) { + return input.apiVersion; + } + + static uint32_t driverVersion(const VkPhysicalDeviceProperties& input) { + return input.driverVersion; + } + + static uint32_t vendorID(const VkPhysicalDeviceProperties& input) { + return input.vendorID; + } + + static uint32_t deviceID(const VkPhysicalDeviceProperties& input) { + return input.deviceID; + } + + static VkPhysicalDeviceType deviceType( + const VkPhysicalDeviceProperties& input) { + return input.deviceType; + } + + static base::StringPiece deviceName(const VkPhysicalDeviceProperties& input) { + return input.deviceName; + } + + static base::span<const uint8_t> pipelineCacheUUID( + const VkPhysicalDeviceProperties& input) { + return input.pipelineCacheUUID; + } + + static const VkPhysicalDeviceLimits& limits( + const VkPhysicalDeviceProperties& input) { + return input.limits; + } + + static const VkPhysicalDeviceSparseProperties& sparseProperties( + const VkPhysicalDeviceProperties& input) { + return input.sparseProperties; + } + + static bool Read(gpu::mojom::VkPhysicalDevicePropertiesDataView data, + VkPhysicalDeviceProperties* out); +}; + +template <> +struct EnumTraits<gpu::mojom::VkPhysicalDeviceType, VkPhysicalDeviceType> { + static gpu::mojom::VkPhysicalDeviceType ToMojom(VkPhysicalDeviceType input) { + switch (input) { + case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_OTHER: + return gpu::mojom::VkPhysicalDeviceType::OTHER; + case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: + return gpu::mojom::VkPhysicalDeviceType::INTEGRATED_GPU; + case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: + return gpu::mojom::VkPhysicalDeviceType::DISCRETE_GPU; + case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU: + return gpu::mojom::VkPhysicalDeviceType::VIRTUAL_GPU; + case VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_CPU: + return gpu::mojom::VkPhysicalDeviceType::CPU; + default: + NOTREACHED(); + return gpu::mojom::VkPhysicalDeviceType::INVALID_VALUE; + } + } + + static bool FromMojom(gpu::mojom::VkPhysicalDeviceType input, + VkPhysicalDeviceType* out) { + switch (input) { + case gpu::mojom::VkPhysicalDeviceType::OTHER: + *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_OTHER; + return true; + case gpu::mojom::VkPhysicalDeviceType::INTEGRATED_GPU: + *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; + return true; + case gpu::mojom::VkPhysicalDeviceType::DISCRETE_GPU: + *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU; + return true; + case gpu::mojom::VkPhysicalDeviceType::VIRTUAL_GPU: + *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU; + return true; + case gpu::mojom::VkPhysicalDeviceType::CPU: + *out = VkPhysicalDeviceType::VK_PHYSICAL_DEVICE_TYPE_CPU; + return true; + case gpu::mojom::VkPhysicalDeviceType::INVALID_VALUE: + NOTREACHED(); + return false; + } + NOTREACHED(); + return false; + } +}; +template <> +struct StructTraits<gpu::mojom::VkPhysicalDeviceLimitsDataView, + VkPhysicalDeviceLimits> { + static uint32_t maxImageDimension1D(const VkPhysicalDeviceLimits& input) { + return input.maxImageDimension1D; + } + + static uint32_t maxImageDimension2D(const VkPhysicalDeviceLimits& input) { + return input.maxImageDimension2D; + } + + static uint32_t maxImageDimension3D(const VkPhysicalDeviceLimits& input) { + return input.maxImageDimension3D; + } + + static uint32_t maxImageDimensionCube(const VkPhysicalDeviceLimits& input) { + return input.maxImageDimensionCube; + } + + static uint32_t maxImageArrayLayers(const VkPhysicalDeviceLimits& input) { + return input.maxImageArrayLayers; + } + + static uint32_t maxTexelBufferElements(const VkPhysicalDeviceLimits& input) { + return input.maxTexelBufferElements; + } + + static uint32_t maxUniformBufferRange(const VkPhysicalDeviceLimits& input) { + return input.maxUniformBufferRange; + } + + static uint32_t maxStorageBufferRange(const VkPhysicalDeviceLimits& input) { + return input.maxStorageBufferRange; + } + + static uint32_t maxPushConstantsSize(const VkPhysicalDeviceLimits& input) { + return input.maxPushConstantsSize; + } + + static uint32_t maxMemoryAllocationCount( + const VkPhysicalDeviceLimits& input) { + return input.maxMemoryAllocationCount; + } + + static uint32_t maxSamplerAllocationCount( + const VkPhysicalDeviceLimits& input) { + return input.maxSamplerAllocationCount; + } + + static bool bufferImageGranularity(const VkPhysicalDeviceLimits& input) { + return input.bufferImageGranularity; + } + + static bool sparseAddressSpaceSize(const VkPhysicalDeviceLimits& input) { + return input.sparseAddressSpaceSize; + } + + static uint32_t maxBoundDescriptorSets(const VkPhysicalDeviceLimits& input) { + return input.maxBoundDescriptorSets; + } + + static uint32_t maxPerStageDescriptorSamplers( + const VkPhysicalDeviceLimits& input) { + return input.maxPerStageDescriptorSamplers; + } + + static uint32_t maxPerStageDescriptorUniformBuffers( + const VkPhysicalDeviceLimits& input) { + return input.maxPerStageDescriptorUniformBuffers; + } + + static uint32_t maxPerStageDescriptorStorageBuffers( + const VkPhysicalDeviceLimits& input) { + return input.maxPerStageDescriptorStorageBuffers; + } + + static uint32_t maxPerStageDescriptorSampledImages( + const VkPhysicalDeviceLimits& input) { + return input.maxPerStageDescriptorSampledImages; + } + + static uint32_t maxPerStageDescriptorStorageImages( + const VkPhysicalDeviceLimits& input) { + return input.maxPerStageDescriptorStorageImages; + } + + static uint32_t maxPerStageDescriptorInputAttachments( + const VkPhysicalDeviceLimits& input) { + return input.maxPerStageDescriptorInputAttachments; + } + + static uint32_t maxPerStageResources(const VkPhysicalDeviceLimits& input) { + return input.maxPerStageResources; + } + + static uint32_t maxDescriptorSetSamplers( + const VkPhysicalDeviceLimits& input) { + return input.maxDescriptorSetSamplers; + } + + static uint32_t maxDescriptorSetUniformBuffers( + const VkPhysicalDeviceLimits& input) { + return input.maxDescriptorSetUniformBuffers; + } + + static uint32_t maxDescriptorSetUniformBuffersDynamic( + const VkPhysicalDeviceLimits& input) { + return input.maxDescriptorSetUniformBuffersDynamic; + } + + static uint32_t maxDescriptorSetStorageBuffers( + const VkPhysicalDeviceLimits& input) { + return input.maxDescriptorSetStorageBuffers; + } + + static uint32_t maxDescriptorSetStorageBuffersDynamic( + const VkPhysicalDeviceLimits& input) { + return input.maxDescriptorSetStorageBuffersDynamic; + } + + static uint32_t maxDescriptorSetSampledImages( + const VkPhysicalDeviceLimits& input) { + return input.maxDescriptorSetSampledImages; + } + + static uint32_t maxDescriptorSetStorageImages( + const VkPhysicalDeviceLimits& input) { + return input.maxDescriptorSetStorageImages; + } + + static uint32_t maxDescriptorSetInputAttachments( + const VkPhysicalDeviceLimits& input) { + return input.maxDescriptorSetInputAttachments; + } + + static uint32_t maxVertexInputAttributes( + const VkPhysicalDeviceLimits& input) { + return input.maxVertexInputAttributes; + } + + static uint32_t maxVertexInputBindings(const VkPhysicalDeviceLimits& input) { + return input.maxVertexInputBindings; + } + + static uint32_t maxVertexInputAttributeOffset( + const VkPhysicalDeviceLimits& input) { + return input.maxVertexInputAttributeOffset; + } + + static uint32_t maxVertexInputBindingStride( + const VkPhysicalDeviceLimits& input) { + return input.maxVertexInputBindingStride; + } + + static uint32_t maxVertexOutputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxVertexOutputComponents; + } + + static uint32_t maxTessellationGenerationLevel( + const VkPhysicalDeviceLimits& input) { + return input.maxTessellationGenerationLevel; + } + + static uint32_t maxTessellationPatchSize( + const VkPhysicalDeviceLimits& input) { + return input.maxTessellationPatchSize; + } + + static uint32_t maxTessellationControlPerVertexInputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxTessellationControlPerVertexInputComponents; + } + + static uint32_t maxTessellationControlPerVertexOutputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxTessellationControlPerVertexOutputComponents; + } + + static uint32_t maxTessellationControlPerPatchOutputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxTessellationControlPerPatchOutputComponents; + } + + static uint32_t maxTessellationControlTotalOutputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxTessellationControlTotalOutputComponents; + } + + static uint32_t maxTessellationEvaluationInputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxTessellationEvaluationInputComponents; + } + + static uint32_t maxTessellationEvaluationOutputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxTessellationEvaluationOutputComponents; + } + + static uint32_t maxGeometryShaderInvocations( + const VkPhysicalDeviceLimits& input) { + return input.maxGeometryShaderInvocations; + } + + static uint32_t maxGeometryInputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxGeometryInputComponents; + } + + static uint32_t maxGeometryOutputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxGeometryOutputComponents; + } + + static uint32_t maxGeometryOutputVertices( + const VkPhysicalDeviceLimits& input) { + return input.maxGeometryOutputVertices; + } + + static uint32_t maxGeometryTotalOutputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxGeometryTotalOutputComponents; + } + + static uint32_t maxFragmentInputComponents( + const VkPhysicalDeviceLimits& input) { + return input.maxFragmentInputComponents; + } + + static uint32_t maxFragmentOutputAttachments( + const VkPhysicalDeviceLimits& input) { + return input.maxFragmentOutputAttachments; + } + + static uint32_t maxFragmentDualSrcAttachments( + const VkPhysicalDeviceLimits& input) { + return input.maxFragmentDualSrcAttachments; + } + + static uint32_t maxFragmentCombinedOutputResources( + const VkPhysicalDeviceLimits& input) { + return input.maxFragmentCombinedOutputResources; + } + + static uint32_t maxComputeSharedMemorySize( + const VkPhysicalDeviceLimits& input) { + return input.maxComputeSharedMemorySize; + } + + static base::span<const uint32_t> maxComputeWorkGroupCount( + const VkPhysicalDeviceLimits& input) { + return input.maxComputeWorkGroupCount; + } + + static uint32_t maxComputeWorkGroupInvocations( + const VkPhysicalDeviceLimits& input) { + return input.maxComputeWorkGroupInvocations; + } + + static base::span<const uint32_t> maxComputeWorkGroupSize( + const VkPhysicalDeviceLimits& input) { + return input.maxComputeWorkGroupSize; + } + + static uint32_t subPixelPrecisionBits(const VkPhysicalDeviceLimits& input) { + return input.subPixelPrecisionBits; + } + + static uint32_t subTexelPrecisionBits(const VkPhysicalDeviceLimits& input) { + return input.subTexelPrecisionBits; + } + + static uint32_t mipmapPrecisionBits(const VkPhysicalDeviceLimits& input) { + return input.mipmapPrecisionBits; + } + + static uint32_t maxDrawIndexedIndexValue( + const VkPhysicalDeviceLimits& input) { + return input.maxDrawIndexedIndexValue; + } + + static uint32_t maxDrawIndirectCount(const VkPhysicalDeviceLimits& input) { + return input.maxDrawIndirectCount; + } + + static float maxSamplerLodBias(const VkPhysicalDeviceLimits& input) { + return input.maxSamplerLodBias; + } + + static float maxSamplerAnisotropy(const VkPhysicalDeviceLimits& input) { + return input.maxSamplerAnisotropy; + } + + static uint32_t maxViewports(const VkPhysicalDeviceLimits& input) { + return input.maxViewports; + } + + static base::span<const uint32_t> maxViewportDimensions( + const VkPhysicalDeviceLimits& input) { + return input.maxViewportDimensions; + } + + static base::span<const float> viewportBoundsRange( + const VkPhysicalDeviceLimits& input) { + return input.viewportBoundsRange; + } + + static uint32_t viewportSubPixelBits(const VkPhysicalDeviceLimits& input) { + return input.viewportSubPixelBits; + } + + static size_t minMemoryMapAlignment(const VkPhysicalDeviceLimits& input) { + return input.minMemoryMapAlignment; + } + + static bool minTexelBufferOffsetAlignment( + const VkPhysicalDeviceLimits& input) { + return input.minTexelBufferOffsetAlignment; + } + + static bool minUniformBufferOffsetAlignment( + const VkPhysicalDeviceLimits& input) { + return input.minUniformBufferOffsetAlignment; + } + + static bool minStorageBufferOffsetAlignment( + const VkPhysicalDeviceLimits& input) { + return input.minStorageBufferOffsetAlignment; + } + + static int32_t minTexelOffset(const VkPhysicalDeviceLimits& input) { + return input.minTexelOffset; + } + + static uint32_t maxTexelOffset(const VkPhysicalDeviceLimits& input) { + return input.maxTexelOffset; + } + + static int32_t minTexelGatherOffset(const VkPhysicalDeviceLimits& input) { + return input.minTexelGatherOffset; + } + + static uint32_t maxTexelGatherOffset(const VkPhysicalDeviceLimits& input) { + return input.maxTexelGatherOffset; + } + + static float minInterpolationOffset(const VkPhysicalDeviceLimits& input) { + return input.minInterpolationOffset; + } + + static float maxInterpolationOffset(const VkPhysicalDeviceLimits& input) { + return input.maxInterpolationOffset; + } + + static uint32_t subPixelInterpolationOffsetBits( + const VkPhysicalDeviceLimits& input) { + return input.subPixelInterpolationOffsetBits; + } + + static uint32_t maxFramebufferWidth(const VkPhysicalDeviceLimits& input) { + return input.maxFramebufferWidth; + } + + static uint32_t maxFramebufferHeight(const VkPhysicalDeviceLimits& input) { + return input.maxFramebufferHeight; + } + + static uint32_t maxFramebufferLayers(const VkPhysicalDeviceLimits& input) { + return input.maxFramebufferLayers; + } + + static VkSampleCountFlags framebufferColorSampleCounts( + const VkPhysicalDeviceLimits& input) { + return input.framebufferColorSampleCounts; + } + + static VkSampleCountFlags framebufferDepthSampleCounts( + const VkPhysicalDeviceLimits& input) { + return input.framebufferDepthSampleCounts; + } + + static VkSampleCountFlags framebufferStencilSampleCounts( + const VkPhysicalDeviceLimits& input) { + return input.framebufferStencilSampleCounts; + } + + static VkSampleCountFlags framebufferNoAttachmentsSampleCounts( + const VkPhysicalDeviceLimits& input) { + return input.framebufferNoAttachmentsSampleCounts; + } + + static uint32_t maxColorAttachments(const VkPhysicalDeviceLimits& input) { + return input.maxColorAttachments; + } + + static VkSampleCountFlags sampledImageColorSampleCounts( + const VkPhysicalDeviceLimits& input) { + return input.sampledImageColorSampleCounts; + } + + static VkSampleCountFlags sampledImageIntegerSampleCounts( + const VkPhysicalDeviceLimits& input) { + return input.sampledImageIntegerSampleCounts; + } + + static VkSampleCountFlags sampledImageDepthSampleCounts( + const VkPhysicalDeviceLimits& input) { + return input.sampledImageDepthSampleCounts; + } + + static VkSampleCountFlags sampledImageStencilSampleCounts( + const VkPhysicalDeviceLimits& input) { + return input.sampledImageStencilSampleCounts; + } + + static VkSampleCountFlags storageImageSampleCounts( + const VkPhysicalDeviceLimits& input) { + return input.storageImageSampleCounts; + } + + static uint32_t maxSampleMaskWords(const VkPhysicalDeviceLimits& input) { + return input.maxSampleMaskWords; + } + + static bool timestampComputeAndGraphics(const VkPhysicalDeviceLimits& input) { + return input.timestampComputeAndGraphics; + } + + static float timestampPeriod(const VkPhysicalDeviceLimits& input) { + return input.timestampPeriod; + } + + static uint32_t maxClipDistances(const VkPhysicalDeviceLimits& input) { + return input.maxClipDistances; + } + + static uint32_t maxCullDistances(const VkPhysicalDeviceLimits& input) { + return input.maxCullDistances; + } + + static uint32_t maxCombinedClipAndCullDistances( + const VkPhysicalDeviceLimits& input) { + return input.maxCombinedClipAndCullDistances; + } + + static uint32_t discreteQueuePriorities(const VkPhysicalDeviceLimits& input) { + return input.discreteQueuePriorities; + } + + static base::span<const float> pointSizeRange( + const VkPhysicalDeviceLimits& input) { + return input.pointSizeRange; + } + + static base::span<const float> lineWidthRange( + const VkPhysicalDeviceLimits& input) { + return input.lineWidthRange; + } + + static float pointSizeGranularity(const VkPhysicalDeviceLimits& input) { + return input.pointSizeGranularity; + } + + static float lineWidthGranularity(const VkPhysicalDeviceLimits& input) { + return input.lineWidthGranularity; + } + + static bool strictLines(const VkPhysicalDeviceLimits& input) { + return input.strictLines; + } + + static bool standardSampleLocations(const VkPhysicalDeviceLimits& input) { + return input.standardSampleLocations; + } + + static bool optimalBufferCopyOffsetAlignment( + const VkPhysicalDeviceLimits& input) { + return input.optimalBufferCopyOffsetAlignment; + } + + static bool optimalBufferCopyRowPitchAlignment( + const VkPhysicalDeviceLimits& input) { + return input.optimalBufferCopyRowPitchAlignment; + } + + static bool nonCoherentAtomSize(const VkPhysicalDeviceLimits& input) { + return input.nonCoherentAtomSize; + } + + static bool Read(gpu::mojom::VkPhysicalDeviceLimitsDataView data, + VkPhysicalDeviceLimits* out); +}; + +template <> +struct StructTraits<gpu::mojom::VkPhysicalDeviceSparsePropertiesDataView, + VkPhysicalDeviceSparseProperties> { + static bool residencyStandard2DBlockShape( + const VkPhysicalDeviceSparseProperties& input) { + return input.residencyStandard2DBlockShape; + } + + static bool residencyStandard2DMultisampleBlockShape( + const VkPhysicalDeviceSparseProperties& input) { + return input.residencyStandard2DMultisampleBlockShape; + } + + static bool residencyStandard3DBlockShape( + const VkPhysicalDeviceSparseProperties& input) { + return input.residencyStandard3DBlockShape; + } + + static bool residencyAlignedMipSize( + const VkPhysicalDeviceSparseProperties& input) { + return input.residencyAlignedMipSize; + } + + static bool residencyNonResidentStrict( + const VkPhysicalDeviceSparseProperties& input) { + return input.residencyNonResidentStrict; + } + + static bool Read(gpu::mojom::VkPhysicalDeviceSparsePropertiesDataView data, + VkPhysicalDeviceSparseProperties* out); +}; + +template <> +struct StructTraits<gpu::mojom::VkPhysicalDeviceFeaturesDataView, + VkPhysicalDeviceFeatures> { + static bool robustBufferAccess(const VkPhysicalDeviceFeatures& input) { + return input.robustBufferAccess; + } + + static bool fullDrawIndexUint32(const VkPhysicalDeviceFeatures& input) { + return input.fullDrawIndexUint32; + } + + static bool imageCubeArray(const VkPhysicalDeviceFeatures& input) { + return input.imageCubeArray; + } + + static bool independentBlend(const VkPhysicalDeviceFeatures& input) { + return input.independentBlend; + } + + static bool geometryShader(const VkPhysicalDeviceFeatures& input) { + return input.geometryShader; + } + + static bool tessellationShader(const VkPhysicalDeviceFeatures& input) { + return input.tessellationShader; + } + + static bool sampleRateShading(const VkPhysicalDeviceFeatures& input) { + return input.sampleRateShading; + } + + static bool dualSrcBlend(const VkPhysicalDeviceFeatures& input) { + return input.dualSrcBlend; + } + + static bool logicOp(const VkPhysicalDeviceFeatures& input) { + return input.logicOp; + } + + static bool multiDrawIndirect(const VkPhysicalDeviceFeatures& input) { + return input.multiDrawIndirect; + } + + static bool drawIndirectFirstInstance(const VkPhysicalDeviceFeatures& input) { + return input.drawIndirectFirstInstance; + } + + static bool depthClamp(const VkPhysicalDeviceFeatures& input) { + return input.depthClamp; + } + + static bool depthBiasClamp(const VkPhysicalDeviceFeatures& input) { + return input.depthBiasClamp; + } + + static bool fillModeNonSolid(const VkPhysicalDeviceFeatures& input) { + return input.fillModeNonSolid; + } + + static bool depthBounds(const VkPhysicalDeviceFeatures& input) { + return input.depthBounds; + } + + static bool wideLines(const VkPhysicalDeviceFeatures& input) { + return input.wideLines; + } + + static bool largePoints(const VkPhysicalDeviceFeatures& input) { + return input.largePoints; + } + + static bool alphaToOne(const VkPhysicalDeviceFeatures& input) { + return input.alphaToOne; + } + + static bool multiViewport(const VkPhysicalDeviceFeatures& input) { + return input.multiViewport; + } + + static bool samplerAnisotropy(const VkPhysicalDeviceFeatures& input) { + return input.samplerAnisotropy; + } + + static bool textureCompressionETC2(const VkPhysicalDeviceFeatures& input) { + return input.textureCompressionETC2; + } + + static bool textureCompressionASTC_LDR( + const VkPhysicalDeviceFeatures& input) { + return input.textureCompressionASTC_LDR; + } + + static bool textureCompressionBC(const VkPhysicalDeviceFeatures& input) { + return input.textureCompressionBC; + } + + static bool occlusionQueryPrecise(const VkPhysicalDeviceFeatures& input) { + return input.occlusionQueryPrecise; + } + + static bool pipelineStatisticsQuery(const VkPhysicalDeviceFeatures& input) { + return input.pipelineStatisticsQuery; + } + + static bool vertexPipelineStoresAndAtomics( + const VkPhysicalDeviceFeatures& input) { + return input.vertexPipelineStoresAndAtomics; + } + + static bool fragmentStoresAndAtomics(const VkPhysicalDeviceFeatures& input) { + return input.fragmentStoresAndAtomics; + } + + static bool shaderTessellationAndGeometryPointSize( + const VkPhysicalDeviceFeatures& input) { + return input.shaderTessellationAndGeometryPointSize; + } + + static bool shaderImageGatherExtended(const VkPhysicalDeviceFeatures& input) { + return input.shaderImageGatherExtended; + } + + static bool shaderStorageImageExtendedFormats( + const VkPhysicalDeviceFeatures& input) { + return input.shaderStorageImageExtendedFormats; + } + + static bool shaderStorageImageMultisample( + const VkPhysicalDeviceFeatures& input) { + return input.shaderStorageImageMultisample; + } + + static bool shaderStorageImageReadWithoutFormat( + const VkPhysicalDeviceFeatures& input) { + return input.shaderStorageImageReadWithoutFormat; + } + + static bool shaderStorageImageWriteWithoutFormat( + const VkPhysicalDeviceFeatures& input) { + return input.shaderStorageImageWriteWithoutFormat; + } + + static bool shaderUniformBufferArrayDynamicIndexing( + const VkPhysicalDeviceFeatures& input) { + return input.shaderUniformBufferArrayDynamicIndexing; + } + + static bool shaderSampledImageArrayDynamicIndexing( + const VkPhysicalDeviceFeatures& input) { + return input.shaderSampledImageArrayDynamicIndexing; + } + + static bool shaderStorageBufferArrayDynamicIndexing( + const VkPhysicalDeviceFeatures& input) { + return input.shaderStorageBufferArrayDynamicIndexing; + } + + static bool shaderStorageImageArrayDynamicIndexing( + const VkPhysicalDeviceFeatures& input) { + return input.shaderStorageImageArrayDynamicIndexing; + } + + static bool shaderClipDistance(const VkPhysicalDeviceFeatures& input) { + return input.shaderClipDistance; + } + + static bool shaderCullDistance(const VkPhysicalDeviceFeatures& input) { + return input.shaderCullDistance; + } + + static bool shaderFloat64(const VkPhysicalDeviceFeatures& input) { + return input.shaderFloat64; + } + + static bool shaderInt64(const VkPhysicalDeviceFeatures& input) { + return input.shaderInt64; + } + + static bool shaderInt16(const VkPhysicalDeviceFeatures& input) { + return input.shaderInt16; + } + + static bool shaderResourceResidency(const VkPhysicalDeviceFeatures& input) { + return input.shaderResourceResidency; + } + + static bool shaderResourceMinLod(const VkPhysicalDeviceFeatures& input) { + return input.shaderResourceMinLod; + } + + static bool sparseBinding(const VkPhysicalDeviceFeatures& input) { + return input.sparseBinding; + } + + static bool sparseResidencyBuffer(const VkPhysicalDeviceFeatures& input) { + return input.sparseResidencyBuffer; + } + + static bool sparseResidencyImage2D(const VkPhysicalDeviceFeatures& input) { + return input.sparseResidencyImage2D; + } + + static bool sparseResidencyImage3D(const VkPhysicalDeviceFeatures& input) { + return input.sparseResidencyImage3D; + } + + static bool sparseResidency2Samples(const VkPhysicalDeviceFeatures& input) { + return input.sparseResidency2Samples; + } + + static bool sparseResidency4Samples(const VkPhysicalDeviceFeatures& input) { + return input.sparseResidency4Samples; + } + + static bool sparseResidency8Samples(const VkPhysicalDeviceFeatures& input) { + return input.sparseResidency8Samples; + } + + static bool sparseResidency16Samples(const VkPhysicalDeviceFeatures& input) { + return input.sparseResidency16Samples; + } + + static bool sparseResidencyAliased(const VkPhysicalDeviceFeatures& input) { + return input.sparseResidencyAliased; + } + + static bool variableMultisampleRate(const VkPhysicalDeviceFeatures& input) { + return input.variableMultisampleRate; + } + + static bool inheritedQueries(const VkPhysicalDeviceFeatures& input) { + return input.inheritedQueries; + } + + static bool Read(gpu::mojom::VkPhysicalDeviceFeaturesDataView data, + VkPhysicalDeviceFeatures* out); +}; + +template <> +struct StructTraits<gpu::mojom::VkQueueFamilyPropertiesDataView, + VkQueueFamilyProperties> { + static VkQueueFlags queueFlags(const VkQueueFamilyProperties& input) { + return input.queueFlags; + } + + static uint32_t queueCount(const VkQueueFamilyProperties& input) { + return input.queueCount; + } + + static uint32_t timestampValidBits(const VkQueueFamilyProperties& input) { + return input.timestampValidBits; + } + + static const VkExtent3D& minImageTransferGranularity( + const VkQueueFamilyProperties& input) { + return input.minImageTransferGranularity; + } + + static bool Read(gpu::mojom::VkQueueFamilyPropertiesDataView data, + VkQueueFamilyProperties* out); +}; + +template <> +struct StructTraits<gpu::mojom::VkExtent3DDataView, VkExtent3D> { + static uint32_t width(const VkExtent3D& input) { return input.width; } + + static uint32_t height(const VkExtent3D& input) { return input.height; } + + static uint32_t depth(const VkExtent3D& input) { return input.depth; } + + static bool Read(gpu::mojom::VkExtent3DDataView data, VkExtent3D* out); +}; + +} // namespace mojo + +#endif // GPU_IPC_COMMON_VULKAN_TYPES_MOJOM_TRAITS_H_
\ No newline at end of file diff --git a/chromium/gpu/ipc/host/shader_disk_cache.cc b/chromium/gpu/ipc/host/shader_disk_cache.cc index 73b90030a5c..52dbcde835d 100644 --- a/chromium/gpu/ipc/host/shader_disk_cache.cc +++ b/chromium/gpu/ipc/host/shader_disk_cache.cc @@ -556,8 +556,9 @@ void ShaderDiskCache::Init() { int rv = disk_cache::CreateCacheBackend( net::SHADER_CACHE, net::CACHE_BACKEND_DEFAULT, - cache_path_.Append(kGpuCachePath), CacheSizeBytes(), true, nullptr, - &backend_, base::BindOnce(&ShaderDiskCache::CacheCreatedCallback, this)); + cache_path_.Append(kGpuCachePath), CacheSizeBytes(), + disk_cache::ResetHandling::kResetOnError, nullptr, &backend_, + base::BindOnce(&ShaderDiskCache::CacheCreatedCallback, this)); if (rv == net::OK) cache_available_ = true; diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc index 4b98aab3ca3..08162a8e826 100644 --- a/chromium/gpu/ipc/in_process_command_buffer.cc +++ b/chromium/gpu/ipc/in_process_command_buffer.cc @@ -645,6 +645,11 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread( use_virtualized_gl_context_ ? gl_share_group_->GetSharedContext(surface_.get()) : nullptr; + if (real_context && + (!real_context->MakeCurrent(surface_.get()) || + real_context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) { + real_context = nullptr; + } if (!real_context) { real_context = gl::init::CreateGLContext( gl_share_group_.get(), surface_.get(), @@ -680,7 +685,8 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread( if (!context_state_) { context_state_ = base::MakeRefCounted<SharedContextState>( gl_share_group_, surface_, real_context, - use_virtualized_gl_context_, base::DoNothing()); + use_virtualized_gl_context_, base::DoNothing(), + task_executor_->gpu_preferences().gr_context_type); context_state_->InitializeGL(task_executor_->gpu_preferences(), context_group_->feature_info()); context_state_->InitializeGrContext(workarounds, params.gr_shader_cache, @@ -1771,6 +1777,16 @@ viz::GpuVSyncCallback InProcessCommandBuffer::GetGpuVSyncCallback() { std::move(handle_gpu_vsync_callback)); } +base::TimeDelta InProcessCommandBuffer::GetGpuBlockedTimeSinceLastSwap() { + // Some examples and tests create InProcessCommandBuffer without + // GpuChannelManagerDelegate. + if (!gpu_channel_manager_delegate_) + return base::TimeDelta::Min(); + + return gpu_channel_manager_delegate_->GetGpuScheduler() + ->TakeTotalBlockingTime(); +} + void InProcessCommandBuffer::HandleGpuVSyncOnOriginThread( base::TimeTicks vsync_time, base::TimeDelta vsync_interval) { diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h index 6c842b835da..939e083cf05 100644 --- a/chromium/gpu/ipc/in_process_command_buffer.h +++ b/chromium/gpu/ipc/in_process_command_buffer.h @@ -178,6 +178,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer const GpuPreferences& GetGpuPreferences() const override; void BufferPresented(const gfx::PresentationFeedback& feedback) override; viz::GpuVSyncCallback GetGpuVSyncCallback() override; + base::TimeDelta GetGpuBlockedTimeSinceLastSwap() override; // Upstream this function to GpuControl if needs arise. Can be called on any // thread. diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc index 3b9c6aeaf55..21ad0705ac8 100644 --- a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc +++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc @@ -66,8 +66,8 @@ CommandBufferTaskExecutor* InProcessGpuThreadHolder::GetTaskExecutor() { void InProcessGpuThreadHolder::InitializeOnGpuThread( base::WaitableEvent* completion) { sync_point_manager_ = std::make_unique<SyncPointManager>(); - scheduler_ = - std::make_unique<Scheduler>(task_runner(), sync_point_manager_.get()); + scheduler_ = std::make_unique<Scheduler>( + task_runner(), sync_point_manager_.get(), gpu_preferences_); mailbox_manager_ = gles2::CreateMailboxManager(gpu_preferences_); shared_image_manager_ = std::make_unique<SharedImageManager>(); task_executor_ = std::make_unique<GpuInProcessThreadService>( diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn index 082ac932cfa..c3ca00c900f 100644 --- a/chromium/gpu/ipc/service/BUILD.gn +++ b/chromium/gpu/ipc/service/BUILD.gn @@ -10,6 +10,10 @@ if (is_mac) { import("//build/config/mac/mac_sdk.gni") } +declare_args() { + subpixel_font_rendering_disabled = false +} + jumbo_component("service") { output_name = "gpu_ipc_service" sources = [ @@ -52,9 +56,13 @@ jumbo_component("service") { if (is_chromecast) { defines += [ "IS_CHROMECAST" ] } + if (subpixel_font_rendering_disabled) { + defines += [ "SUBPIXEL_FONT_RENDERING_DISABLED" ] + } public_deps = [ "//base", "//components/viz/common", + "//gpu/config", "//ipc", "//ui/base", "//ui/display", diff --git a/chromium/gpu/ipc/service/command_buffer_stub.cc b/chromium/gpu/ipc/service/command_buffer_stub.cc index e430c664365..dea796c946d 100644 --- a/chromium/gpu/ipc/service/command_buffer_stub.cc +++ b/chromium/gpu/ipc/service/command_buffer_stub.cc @@ -12,7 +12,7 @@ #include "base/json/json_writer.h" #include "base/macros.h" #include "base/memory/ptr_util.h" -#include "base/memory/shared_memory.h" +#include "base/memory/unsafe_shared_memory_region.h" #include "base/no_destructor.h" #include "base/single_thread_task_runner.h" #include "base/time/time.h" diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc index 98a27387fe5..28a7a9dd845 100644 --- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc +++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc @@ -13,7 +13,7 @@ #include "base/json/json_writer.h" #include "base/macros.h" #include "base/memory/memory_pressure_listener.h" -#include "base/memory/shared_memory.h" +#include "base/memory/unsafe_shared_memory_region.h" #include "base/metrics/histogram_macros.h" #include "base/single_thread_task_runner.h" #include "base/time/time.h" @@ -31,6 +31,7 @@ #include "gpu/command_buffer/service/logger.h" #include "gpu/command_buffer/service/mailbox_manager.h" #include "gpu/command_buffer/service/memory_tracking.h" +#include "gpu/command_buffer/service/scheduler.h" #include "gpu/command_buffer/service/service_utils.h" #include "gpu/command_buffer/service/sync_point_manager.h" #include "gpu/command_buffer/service/transfer_buffer_manager.h" @@ -261,6 +262,10 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize( scoped_refptr<gl::GLContext> context; if (use_virtualized_gl_context_ && share_group_) { context = share_group_->GetSharedContext(surface_.get()); + if (context && (!context->MakeCurrent(surface_.get()) || + context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) { + context = nullptr; + } if (!context) { context = gl::init::CreateGLContext( share_group_.get(), surface_.get(), @@ -423,12 +428,17 @@ viz::GpuVSyncCallback GLES2CommandBufferStub::GetGpuVSyncCallback() { return viz::GpuVSyncCallback(); } +base::TimeDelta GLES2CommandBufferStub::GetGpuBlockedTimeSinceLastSwap() { + return channel_->scheduler()->TakeTotalBlockingTime(); +} + MemoryTracker* GLES2CommandBufferStub::GetMemoryTracker() const { return context_group_->memory_tracker(); } -void GLES2CommandBufferStub::OnGpuSwitched() { - Send(new GpuCommandBufferMsg_GpuSwitched(route_id_)); +void GLES2CommandBufferStub::OnGpuSwitched( + gl::GpuPreference active_gpu_heuristic) { + Send(new GpuCommandBufferMsg_GpuSwitched(route_id_, active_gpu_heuristic)); } bool GLES2CommandBufferStub::HandleMessage(const IPC::Message& message) { diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h index 9b3f695c233..953919de878 100644 --- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h +++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h @@ -40,7 +40,7 @@ class GPU_IPC_SERVICE_EXPORT GLES2CommandBufferStub MemoryTracker* GetMemoryTracker() const override; // DecoderClient implementation. - void OnGpuSwitched() override; + void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override; // ImageTransportSurfaceDelegate implementation: #if defined(OS_WIN) @@ -53,6 +53,7 @@ class GPU_IPC_SERVICE_EXPORT GLES2CommandBufferStub const GpuPreferences& GetGpuPreferences() const override; void BufferPresented(const gfx::PresentationFeedback& feedback) override; viz::GpuVSyncCallback GetGpuVSyncCallback() override; + base::TimeDelta GetGpuBlockedTimeSinceLastSwap() override; private: bool HandleMessage(const IPC::Message& message) override; diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc index 1cacaa4c14c..39544aca67e 100644 --- a/chromium/gpu/ipc/service/gpu_channel_manager.cc +++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc @@ -110,7 +110,8 @@ GpuChannelManager::GpuChannelManager( scoped_refptr<gl::GLSurface> default_offscreen_surface, ImageDecodeAcceleratorWorker* image_decode_accelerator_worker, viz::VulkanContextProvider* vulkan_context_provider, - viz::MetalContextProvider* metal_context_provider) + viz::MetalContextProvider* metal_context_provider, + viz::DawnContextProvider* dawn_context_provider) : task_runner_(task_runner), io_task_runner_(io_task_runner), gpu_preferences_(gpu_preferences), @@ -133,7 +134,8 @@ GpuChannelManager::GpuChannelManager( base::BindRepeating(&GpuChannelManager::HandleMemoryPressure, base::Unretained(this))), vulkan_context_provider_(vulkan_context_provider), - metal_context_provider_(metal_context_provider) { + metal_context_provider_(metal_context_provider), + dawn_context_provider_(dawn_context_provider) { DCHECK(task_runner->BelongsToCurrentThread()); DCHECK(io_task_runner); DCHECK(scheduler); @@ -284,6 +286,9 @@ void GpuChannelManager::GetVideoMemoryUsageStats( .video_memory += size; } + if (shared_context_state_ && !shared_context_state_->context_lost()) + total_size += shared_context_state_->GetMemoryUsage(); + // Assign the total across all processes in the GPU process video_memory_usage_stats->process_map[base::GetCurrentProcId()].video_memory = total_size; @@ -438,6 +443,10 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState( scoped_refptr<gl::GLContext> context = use_virtualized_gl_contexts ? share_group->GetSharedContext(surface.get()) : nullptr; + if (context && (!context->MakeCurrent(surface.get()) || + context->CheckStickyGraphicsResetStatus() != GL_NO_ERROR)) { + context = nullptr; + } if (!context) { gl::GLContextAttribs attribs = gles2::GenerateGLContextAttribs( ContextCreationAttribs(), use_passthrough_decoder); @@ -481,7 +490,8 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState( use_virtualized_gl_contexts, base::BindOnce(&GpuChannelManager::OnContextLost, base::Unretained(this), /*synthetic_loss=*/false), - vulkan_context_provider_, metal_context_provider_); + gpu_preferences_.gr_context_type, vulkan_context_provider_, + metal_context_provider_, dawn_context_provider_, peak_memory_monitor()); // OOP-R needs GrContext for raster tiles. bool need_gr_context = @@ -492,7 +502,7 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState( need_gr_context |= features::IsUsingSkiaRenderer(); if (need_gr_context) { - if (!vulkan_context_provider_ && !metal_context_provider_) { + if (gpu_preferences_.gr_context_type == gpu::GrContextType::kGL) { auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>( gpu_driver_bug_workarounds(), gpu_feature_info()); if (!shared_context_state_->InitializeGL(gpu_preferences_, diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h index a8abfe5f4b0..217adb652cb 100644 --- a/chromium/gpu/ipc/service/gpu_channel_manager.h +++ b/chromium/gpu/ipc/service/gpu_channel_manager.h @@ -83,7 +83,8 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager scoped_refptr<gl::GLSurface> default_offscreen_surface, ImageDecodeAcceleratorWorker* image_decode_accelerator_worker, viz::VulkanContextProvider* vulkan_context_provider = nullptr, - viz::MetalContextProvider* metal_context_provider = nullptr); + viz::MetalContextProvider* metal_context_provider = nullptr, + viz::DawnContextProvider* dawn_context_provider = nullptr); ~GpuChannelManager() override; GpuChannelManagerDelegate* delegate() const { return delegate_; } @@ -288,13 +289,18 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager scoped_refptr<SharedContextState> shared_context_state_; // With --enable-vulkan, |vulkan_context_provider_| will be set from - // viz::GpuServiceImpl. The raster decoders will use it for rasterization. + // viz::GpuServiceImpl. The raster decoders will use it for rasterization if + // --gr-context-type is also set to Vulkan. viz::VulkanContextProvider* vulkan_context_provider_ = nullptr; // If features::SkiaOnMetad, |metal_context_provider_| will be set from // viz::GpuServiceImpl. The raster decoders will use it for rasterization. viz::MetalContextProvider* metal_context_provider_ = nullptr; + // With --gr-context-type=dawn, |dawn_context_provider_| will be set from + // viz::GpuServiceImpl. The raster decoders will use it for rasterization. + viz::DawnContextProvider* dawn_context_provider_ = nullptr; + GpuPeakMemoryMonitor peak_memory_monitor_; // Member variables should appear before the WeakPtrFactory, to ensure diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h index 9209fa995ea..9e6809b7779 100644 --- a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h +++ b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h @@ -60,6 +60,9 @@ class GpuChannelManagerDelegate { // thread. virtual bool IsExiting() const = 0; + // Returns GPU Scheduler + virtual gpu::Scheduler* GetGpuScheduler() = 0; + #if defined(OS_WIN) // Tells the delegate that |child_window| was created in the GPU process and // to send an IPC to make SetParent() syscall. This syscall is blocked by the diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.cc b/chromium/gpu/ipc/service/gpu_channel_test_common.cc index dd1894b2a06..22966507b37 100644 --- a/chromium/gpu/ipc/service/gpu_channel_test_common.cc +++ b/chromium/gpu/ipc/service/gpu_channel_test_common.cc @@ -4,7 +4,7 @@ #include "gpu/ipc/service/gpu_channel_test_common.h" -#include "base/memory/shared_memory.h" +#include "base/memory/unsafe_shared_memory_region.h" #include "base/test/test_simple_task_runner.h" #include "base/threading/thread_task_runner_handle.h" #include "gpu/command_buffer/common/activity_flags.h" @@ -23,7 +23,7 @@ namespace gpu { class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate { public: - TestGpuChannelManagerDelegate() = default; + TestGpuChannelManagerDelegate(Scheduler* scheduler) : scheduler_(scheduler) {} ~TestGpuChannelManagerDelegate() override = default; // GpuChannelManagerDelegate implementation: @@ -47,8 +47,11 @@ class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate { SurfaceHandle child_window) override {} #endif + Scheduler* GetGpuScheduler() override { return scheduler_; } + private: bool is_exiting_ = false; + Scheduler* const scheduler_; DISALLOW_COPY_AND_ASSIGN(TestGpuChannelManagerDelegate); }; @@ -63,8 +66,11 @@ GpuChannelTestCommon::GpuChannelTestCommon( io_task_runner_(new base::TestSimpleTaskRunner), sync_point_manager_(new SyncPointManager()), shared_image_manager_(new SharedImageManager(false /* thread_safe */)), - scheduler_(new Scheduler(task_runner_, sync_point_manager_.get())), - channel_manager_delegate_(new TestGpuChannelManagerDelegate()) { + scheduler_(new Scheduler(task_runner_, + sync_point_manager_.get(), + GpuPreferences())), + channel_manager_delegate_( + new TestGpuChannelManagerDelegate(scheduler_.get())) { // We need GL bindings to actually initialize command buffers. if (use_stub_bindings) gl::GLSurfaceTestSupport::InitializeOneOffWithStubBindings(); diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc index 0aa6832893e..1aeba67edd4 100644 --- a/chromium/gpu/ipc/service/gpu_init.cc +++ b/chromium/gpu/ipc/service/gpu_init.cc @@ -51,13 +51,13 @@ #if BUILDFLAG(ENABLE_VULKAN) #include "gpu/vulkan/init/vulkan_factory.h" #include "gpu/vulkan/vulkan_implementation.h" +#include "gpu/vulkan/vulkan_instance.h" #endif namespace gpu { namespace { -bool CollectGraphicsInfo(GPUInfo* gpu_info, - const GpuPreferences& gpu_preferences) { +bool CollectGraphicsInfo(GPUInfo* gpu_info) { DCHECK(gpu_info); TRACE_EVENT0("gpu,startup", "Collect Graphics Info"); base::TimeTicks before_collect_context_graphics_info = base::TimeTicks::Now(); @@ -164,6 +164,11 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, // Set keys for crash logging based on preliminary gpu info, in case we // crash during feature collection. gpu::SetKeysForCrashLogging(gpu_info_); +#if defined(SUBPIXEL_FONT_RENDERING_DISABLED) + gpu_info_.subpixel_font_rendering = false; +#else + gpu_info_.subpixel_font_rendering = true; +#endif #if defined(OS_LINUX) && !defined(OS_CHROMEOS) if (gpu_info_.gpu.vendor_id == 0x10de && // NVIDIA @@ -178,7 +183,6 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, } #endif // !OS_ANDROID && !IS_CHROMECAST gpu_info_.in_process_gpu = false; - bool use_swiftshader = false; // GL bindings may have already been initialized, specifically on MacOSX. @@ -196,7 +200,8 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, } bool enable_watchdog = !gpu_preferences_.disable_gpu_watchdog && - !command_line->HasSwitch(switches::kHeadless); + !command_line->HasSwitch(switches::kHeadless) && + !use_swiftshader; // Disable the watchdog in debug builds because they tend to only be run by // developers who will not appreciate the watchdog killing the GPU process. @@ -216,6 +221,11 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, delayed_watchdog_enable = true; #endif + // PreSandbox is mainly for resource handling and not related to the GPU + // driver, it doesn't need the GPU watchdog. The loadLibrary may take long + // time that killing and restarting the GPU process will not help. + sandbox_helper_->PreSandboxStartup(); + // Start the GPU watchdog only after anything that is expected to be time // consuming has completed, otherwise the process is liable to be aborted. if (enable_watchdog && !delayed_watchdog_enable) { @@ -243,8 +253,6 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, #endif // OS_WIN } - sandbox_helper_->PreSandboxStartup(); - bool attempted_startsandbox = false; #if defined(OS_LINUX) // On Chrome OS ARM Mali, GPU driver userspace creates threads when @@ -289,12 +297,30 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, gl_initialized = false; #endif // OS_LINUX } - if (!gl_initialized) - gl_initialized = gl::init::InitializeGLNoExtensionsOneOff(); + if (!gl_initialized) { - VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed"; - return false; + // Pause watchdog. LoadLibrary in GLBindings may take long time. + if (watchdog_thread_) + watchdog_thread_->PauseWatchdog(); + gl_initialized = gl::init::InitializeStaticGLBindingsOneOff(); + + if (!gl_initialized) { + VLOG(1) << "gl::init::InitializeStaticGLBindingsOneOff failed"; + return false; + } + + if (watchdog_thread_) + watchdog_thread_->ResumeWatchdog(); + if (gl::GetGLImplementation() != gl::kGLImplementationDisabled) { + gl_initialized = + gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ false); + if (!gl_initialized) { + VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed"; + return false; + } + } } + bool gl_disabled = gl::GetGLImplementation() == gl::kGLImplementationDisabled; // Compute passthrough decoder status before ComputeGpuFeatureInfo below. @@ -305,7 +331,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, // We need to collect GL strings (VENDOR, RENDERER) for blacklisting purposes. if (!gl_disabled) { if (!use_swiftshader) { - if (!CollectGraphicsInfo(&gpu_info_, gpu_preferences_)) + if (!CollectGraphicsInfo(&gpu_info_)) return false; gpu::SetKeysForCrashLogging(gpu_info_); gpu_feature_info_ = gpu::ComputeGpuFeatureInfo( @@ -320,7 +346,9 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, return false; #else gl::init::ShutdownGL(true); - if (!gl::init::InitializeGLNoExtensionsOneOff()) { + watchdog_thread_ = nullptr; + watchdog_init.SetGpuWatchdogPtr(nullptr); + if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) { VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff with SwiftShader " << "failed"; @@ -331,10 +359,10 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, } else { // use_swiftshader == true switch (gpu_preferences_.use_vulkan) { case gpu::VulkanImplementationName::kNative: { - // Collect GPU info, so we can use backlist to disable vulkan if it is - // needed. + // Collect GPU info, so we can use blacklist to disable vulkan if it + // is needed. gpu::GPUInfo gpu_info; - if (!CollectGraphicsInfo(&gpu_info, gpu_preferences_)) + if (!CollectGraphicsInfo(&gpu_info)) return false; auto gpu_feature_info = gpu::ComputeGpuFeatureInfo( gpu_info, gpu_preferences_, command_line, nullptr); @@ -355,38 +383,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, } } -#if BUILDFLAG(ENABLE_VULKAN) - if (gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] == - gpu::kGpuFeatureStatusEnabled) { - DCHECK_NE(gpu_preferences_.use_vulkan, - gpu::VulkanImplementationName::kNone); - bool vulkan_use_swiftshader = gpu_preferences_.use_vulkan == - gpu::VulkanImplementationName::kSwiftshader; - const bool enforce_protected_memory = - gpu_preferences_.enforce_vulkan_protected_memory; - vulkan_implementation_ = gpu::CreateVulkanImplementation( - vulkan_use_swiftshader, - enforce_protected_memory ? true : false /* allow_protected_memory */, - enforce_protected_memory); - if (!vulkan_implementation_ || - !vulkan_implementation_->InitializeVulkanInstance( - !gpu_preferences_.disable_vulkan_surface)) { - DLOG(ERROR) << "Failed to create and initialize Vulkan implementation."; - vulkan_implementation_ = nullptr; - CHECK(!gpu_preferences_.disable_vulkan_fallback_to_gl_for_testing); - } - } - if (!vulkan_implementation_) { - gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone; - gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] = - gpu::kGpuFeatureStatusDisabled; - } - -#else - gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone; - gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] = - gpu::kGpuFeatureStatusDisabled; -#endif + InitializeVulkan(); // Collect GPU process info if (!gl_disabled) { @@ -418,7 +415,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, // information on Linux platform. Try to collect graphics information // based on core profile context after disabling platform extensions. if (!gl_disabled && !use_swiftshader) { - if (!CollectGraphicsInfo(&gpu_info_, gpu_preferences_)) + if (!CollectGraphicsInfo(&gpu_info_)) return false; gpu::SetKeysForCrashLogging(gpu_info_); gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_, @@ -462,8 +459,9 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line, if (use_swiftshader || gl::GetGLImplementation() == gl::GetSoftwareGLImplementation()) { gpu_info_.software_rendering = true; - if (watchdog_thread_) - watchdog_thread_->Stop(); + watchdog_thread_ = nullptr; + watchdog_init.SetGpuWatchdogPtr(nullptr); + } else if (gl_disabled) { watchdog_thread_ = nullptr; watchdog_init.SetGpuWatchdogPtr(nullptr); } else if (enable_watchdog && delayed_watchdog_enable) { @@ -519,6 +517,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line, InitializeGLThreadSafe(command_line, gpu_preferences_, &gpu_info_, &gpu_feature_info_); + InitializeVulkan(); default_offscreen_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size()); @@ -551,6 +550,11 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line, if (!PopGPUInfoCache(&gpu_info_)) { CollectBasicGraphicsInfo(command_line, &gpu_info_); } +#if defined(SUBPIXEL_FONT_RENDERING_DISABLED) + gpu_info_.subpixel_font_rendering = false; +#else + gpu_info_.subpixel_font_rendering = true; +#endif if (!PopGpuFeatureInfoCache(&gpu_feature_info_)) { gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_, command_line, &needs_more_info); @@ -564,7 +568,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line, bool use_swiftshader = EnableSwiftShaderIfNeeded( command_line, gpu_feature_info_, gpu_preferences_.disable_software_rasterizer, needs_more_info); - if (!gl::init::InitializeGLNoExtensionsOneOff()) { + if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) { VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed"; return; } @@ -579,7 +583,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line, gpu_preferences_.disable_software_rasterizer, false); if (use_swiftshader) { gl::init::ShutdownGL(true); - if (!gl::init::InitializeGLNoExtensionsOneOff()) { + if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) { VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed " << "with SwiftShader"; return; @@ -617,7 +621,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line, gpu_preferences_.disable_software_rasterizer, false); if (use_swiftshader) { gl::init::ShutdownGL(true); - if (!gl::init::InitializeGLNoExtensionsOneOff()) { + if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) { VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed " << "with SwiftShader"; return; @@ -651,4 +655,59 @@ scoped_refptr<gl::GLSurface> GpuInit::TakeDefaultOffscreenSurface() { return std::move(default_offscreen_surface_); } +void GpuInit::InitializeVulkan() { +#if BUILDFLAG(ENABLE_VULKAN) + if (gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] == + gpu::kGpuFeatureStatusEnabled) { + DCHECK_NE(gpu_preferences_.use_vulkan, + gpu::VulkanImplementationName::kNone); + bool vulkan_use_swiftshader = gpu_preferences_.use_vulkan == + gpu::VulkanImplementationName::kSwiftshader; + const bool enforce_protected_memory = + gpu_preferences_.enforce_vulkan_protected_memory; + vulkan_implementation_ = gpu::CreateVulkanImplementation( + vulkan_use_swiftshader, + enforce_protected_memory ? true : false /* allow_protected_memory */, + enforce_protected_memory); + if (!vulkan_implementation_ || + !vulkan_implementation_->InitializeVulkanInstance( + !gpu_preferences_.disable_vulkan_surface)) { + DLOG(ERROR) << "Failed to create and initialize Vulkan implementation."; + vulkan_implementation_ = nullptr; + CHECK(!gpu_preferences_.disable_vulkan_fallback_to_gl_for_testing); + } + // TODO(penghuang): Remove GPU.SupportsVulkan and GPU.VulkanVersion from + // //gpu/config/gpu_info_collector_win.cc when we are finch vulkan on + // Windows. + if (!vulkan_use_swiftshader) { + const bool supports_vulkan = !!vulkan_implementation_; + UMA_HISTOGRAM_BOOLEAN("GPU.SupportsVulkan", supports_vulkan); + uint32_t vulkan_version = 0; + if (supports_vulkan) { + const auto& vulkan_info = + vulkan_implementation_->GetVulkanInstance()->vulkan_info(); + vulkan_version = vulkan_info.used_api_version; + } + UMA_HISTOGRAM_ENUMERATION( + "GPU.VulkanVersion", ConvertToHistogramVulkanVersion(vulkan_version)); + } + } + if (!vulkan_implementation_) { + if (gpu_preferences_.gr_context_type == gpu::GrContextType::kVulkan) { + gpu_preferences_.gr_context_type = gpu::GrContextType::kGL; + } + gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone; + gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] = + gpu::kGpuFeatureStatusDisabled; + } else { + gpu_info_.vulkan_info = + vulkan_implementation_->GetVulkanInstance()->vulkan_info(); + } +#else + gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone; + gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] = + gpu::kGpuFeatureStatusDisabled; +#endif // BUILDFLAG(ENABLE_VULKAN) +} + } // namespace gpu diff --git a/chromium/gpu/ipc/service/gpu_init.h b/chromium/gpu/ipc/service/gpu_init.h index 0545b9dfb73..5263436f3dc 100644 --- a/chromium/gpu/ipc/service/gpu_init.h +++ b/chromium/gpu/ipc/service/gpu_init.h @@ -79,6 +79,8 @@ class GPU_IPC_SERVICE_EXPORT GpuInit { #endif private: + void InitializeVulkan(); + GpuSandboxHelper* sandbox_helper_ = nullptr; std::unique_ptr<GpuWatchdogThread> watchdog_thread_; GPUInfo gpu_info_; diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc index 50cd9b9bcba..b7811234c24 100644 --- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc +++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc @@ -6,7 +6,6 @@ #include "base/android/android_hardware_buffer_compat.h" #include "base/logging.h" -#include "base/memory/shared_memory_handle.h" #include "base/stl_util.h" #include "build/build_config.h" #include "gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.h" diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc index d10e5e21cc1..dd1e489ad49 100644 --- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc +++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc @@ -165,13 +165,15 @@ GpuMemoryBufferFactoryNativePixmap::CreateAnonymousImage( #endif if (!pixmap.get()) { LOG(ERROR) << "Failed to create pixmap " << size.ToString() << ", " - << gfx::BufferFormatToString(format); + << gfx::BufferFormatToString(format) << ", usage " + << gfx::BufferUsageToString(usage); return nullptr; } auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(size, format); if (!image->Initialize(std::move(pixmap))) { LOG(ERROR) << "Failed to create GLImage " << size.ToString() << ", " - << gfx::BufferFormatToString(format); + << gfx::BufferFormatToString(format) << ", usage " + << gfx::BufferUsageToString(usage); return nullptr; } *is_cleared = true; diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc index b4b8b36c805..873527c6fcf 100644 --- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc +++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc @@ -10,6 +10,7 @@ #include "base/files/file_util.h" #include "base/format_macros.h" #include "base/message_loop/message_loop_current.h" +#include "base/metrics/histogram_functions.h" #include "base/power_monitor/power_monitor.h" #include "base/single_thread_task_runner.h" #include "base/strings/string_number_conversions.h" @@ -81,7 +82,6 @@ GpuWatchdogThreadImplV1::GpuWatchdogThreadImplV1() host_tty_ = GetActiveTTY(); #endif base::MessageLoopCurrent::Get()->AddTaskObserver(&task_observer_); - GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart); } // static @@ -124,7 +124,7 @@ void GpuWatchdogThreadImplV1::OnForegrounded() { void GpuWatchdogThreadImplV1::GpuWatchdogHistogram( GpuWatchdogThreadEvent thread_event) { - UMA_HISTOGRAM_ENUMERATION("GPU.WatchdogThread.Event", thread_event); + base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event); } bool GpuWatchdogThreadImplV1::IsGpuHangDetectedForTesting() { @@ -149,7 +149,8 @@ GpuWatchdogThreadImplV1::GpuWatchdogTaskObserver::~GpuWatchdogTaskObserver() = default; void GpuWatchdogThreadImplV1::GpuWatchdogTaskObserver::WillProcessTask( - const base::PendingTask& pending_task) { + const base::PendingTask& pending_task, + bool was_blocked_or_low_priority) { watchdog_->CheckArmed(); } @@ -330,6 +331,14 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() { // Should not get here while the system is suspended. DCHECK(!suspension_counter_.HasRefs()); + // If this metric is added too early (eg. watchdog creation time), it cannot + // be persistent. The histogram data will be lost after crash or browser exit. + // Delay the recording of kGpuWatchdogStart until the first OnCheckTimeout(). + if (!is_watchdog_start_histogram_recorded) { + is_watchdog_start_histogram_recorded = true; + GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart); + } + // If the watchdog woke up significantly behind schedule, disarm and reset // the watchdog check. This is to prevent the watchdog thread from terminating // when a machine wakes up from sleep or hibernation, which would otherwise @@ -375,6 +384,7 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() { // Don't crash if we're not on the TTY of our host X11 server. int active_tty = GetActiveTTY(); if (host_tty_ != -1 && active_tty != -1 && host_tty_ != active_tty) { + OnAcknowledge(); return; } #endif diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.h b/chromium/gpu/ipc/service/gpu_watchdog_thread.h index 3112cef2008..7d128d5d362 100644 --- a/chromium/gpu/ipc/service/gpu_watchdog_thread.h +++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.h @@ -58,6 +58,13 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThread : public base::Thread, // viz::GpuServiceImpl::~GpuServiceImpl() virtual void OnGpuProcessTearDown() = 0; + // Pause the GPU watchdog to stop the timeout task. If the current heavy task + // is not running on the GPU driver, the watchdog can be paused to avoid + // unneeded crash. + virtual void PauseWatchdog() = 0; + // Continue the watchdog after a pause. + virtual void ResumeWatchdog() = 0; + virtual void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) = 0; // For gpu testing only. Return status for the watchdog tests @@ -86,6 +93,8 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1 void OnForegrounded() override; void OnInitComplete() override {} void OnGpuProcessTearDown() override {} + void ResumeWatchdog() override {} + void PauseWatchdog() override {} void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) override; bool IsGpuHangDetectedForTesting() override; @@ -105,7 +114,8 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1 ~GpuWatchdogTaskObserver() override; // Implements TaskObserver. - void WillProcessTask(const base::PendingTask& pending_task) override; + void WillProcessTask(const base::PendingTask& pending_task, + bool was_blocked_or_low_priority) override; void DidProcessTask(const base::PendingTask& pending_task) override; private: @@ -219,6 +229,9 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1 base::Time check_time_; base::TimeTicks check_timeticks_; + // whether GpuWatchdogThreadEvent::kGpuWatchdogStart has been recorded. + bool is_watchdog_start_histogram_recorded = false; + #if defined(USE_X11) FILE* tty_file_; int host_tty_; diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc index 6beb6aad8d4..925457ef637 100644 --- a/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc +++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc @@ -2,15 +2,16 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "base/test/task_environment.h" #include "gpu/ipc/service/gpu_watchdog_thread_v2.h" -#include "base/message_loop/message_loop.h" #include "base/message_loop/message_loop_current.h" #include "base/power_monitor/power_monitor.h" #include "base/power_monitor/power_monitor_source.h" #include "base/test/power_monitor_test_base.h" #include "base/threading/thread_task_runner_handle.h" #include "base/time/time.h" +#include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" namespace gpu { @@ -19,6 +20,9 @@ namespace { constexpr auto kGpuWatchdogTimeoutForTesting = base::TimeDelta::FromMilliseconds(1000); +constexpr base::TimeDelta kMaxWaitTimeForTesting = + base::TimeDelta::FromMilliseconds(4000); + // This task will run for duration_ms milliseconds. void SimpleTask(base::TimeDelta duration) { base::PlatformThread::Sleep(duration); @@ -41,7 +45,7 @@ class GpuWatchdogTest : public testing::Test { protected: ~GpuWatchdogTest() override = default; - base::MessageLoop main_loop; + base::test::SingleThreadTaskEnvironment task_environment_; base::RunLoop run_loop; std::unique_ptr<gpu::GpuWatchdogThread> watchdog_thread_; }; @@ -70,6 +74,7 @@ void GpuWatchdogTest::SetUp() { watchdog_thread_ = gpu::GpuWatchdogThreadImplV2::Create( /*start_backgrounded*/ false, /*timeout*/ kGpuWatchdogTimeoutForTesting, + /*max_wait_time*/ kMaxWaitTimeForTesting, /*test_mode*/ true); } @@ -136,9 +141,16 @@ void GpuWatchdogPowerTest::LongTaskOnResume( // GPU Hang In Initialization TEST_F(GpuWatchdogTest, GpuInitializationHang) { - // Gpu init (5000 ms) takes longer than timeout (2000 ms). + // GPU init takes longer than timeout. +#if defined(OS_WIN) + SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor + + kGpuWatchdogTimeoutForTesting * + kMaxCountOfMoreGpuThreadTimeAllowed + + kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(3000)); +#else SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor + - base::TimeDelta::FromMilliseconds(3000)); + kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(3000)); +#endif // Gpu hangs. OnInitComplete() is not called @@ -154,23 +166,24 @@ TEST_F(GpuWatchdogTest, GpuInitializationAndRunningTasks) { // Start running GPU tasks. Watchdog function WillProcessTask(), // DidProcessTask() and ReportProgress() are tested. - main_loop.task_runner()->PostTask( + task_environment_.GetMainThreadTaskRunner()->PostTask( FROM_HERE, base::BindOnce(&SimpleTask, base::TimeDelta::FromMilliseconds(500))); - main_loop.task_runner()->PostTask( + task_environment_.GetMainThreadTaskRunner()->PostTask( FROM_HERE, base::BindOnce(&SimpleTask, base::TimeDelta::FromMilliseconds(500))); // This long task takes 3000 milliseconds to finish, longer than timeout. // But it reports progress every 500 milliseconds - main_loop.task_runner()->PostTask( + task_environment_.GetMainThreadTaskRunner()->PostTask( FROM_HERE, base::BindOnce(&GpuWatchdogTest::LongTaskWithReportProgress, base::Unretained(this), kGpuWatchdogTimeoutForTesting + base::TimeDelta::FromMilliseconds(2000), base::TimeDelta::FromMilliseconds(500))); - main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure()); + task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE, + run_loop.QuitClosure()); run_loop.Run(); // Everything should be fine. No GPU hang detected. @@ -183,13 +196,25 @@ TEST_F(GpuWatchdogTest, GpuRunningATaskHang) { // Report gpu init complete watchdog_thread_->OnInitComplete(); - // Start running a GPU task. This long task takes 6000 milliseconds to finish. - main_loop.task_runner()->PostTask( + // Start running a GPU task. +#if defined(OS_WIN) + task_environment_.GetMainThreadTaskRunner()->PostTask( + FROM_HERE, + base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 + + kGpuWatchdogTimeoutForTesting * + kMaxCountOfMoreGpuThreadTimeAllowed + + kMaxWaitTimeForTesting + + base::TimeDelta::FromMilliseconds(4000))); +#else + task_environment_.GetMainThreadTaskRunner()->PostTask( FROM_HERE, base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 + + kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4000))); +#endif - main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure()); + task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE, + run_loop.QuitClosure()); run_loop.Run(); // This GPU task takes too long. A GPU hang should be detected. @@ -209,11 +234,12 @@ TEST_F(GpuWatchdogTest, ChromeInBackground) { watchdog_thread_->OnInitComplete(); // Run a task that takes longer (3000 milliseconds) than timeout. - main_loop.task_runner()->PostTask( + task_environment_.GetMainThreadTaskRunner()->PostTask( FROM_HERE, base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 + base::TimeDelta::FromMilliseconds(1000))); - main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure()); + task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE, + run_loop.QuitClosure()); run_loop.Run(); // The gpu might be slow when running in the background. This is ok. @@ -228,16 +254,32 @@ TEST_F(GpuWatchdogTest, GpuSwitchingToForegroundHang) { // A task stays in the background for 200 milliseconds, and then // switches to the foreground and runs for 6000 milliseconds. This is longer // than the first-time foreground watchdog timeout (2000 ms). - main_loop.task_runner()->PostTask( +#if defined(OS_WIN) + task_environment_.GetMainThreadTaskRunner()->PostTask( + FROM_HERE, + base::BindOnce(&GpuWatchdogTest::LongTaskFromBackgroundToForeground, + base::Unretained(this), + /*duration*/ kGpuWatchdogTimeoutForTesting * 2 + + kGpuWatchdogTimeoutForTesting * + kMaxCountOfMoreGpuThreadTimeAllowed + + kMaxWaitTimeForTesting + + base::TimeDelta::FromMilliseconds(4200), + /*time_to_switch_to_foreground*/ + base::TimeDelta::FromMilliseconds(200))); +#else + task_environment_.GetMainThreadTaskRunner()->PostTask( FROM_HERE, base::BindOnce(&GpuWatchdogTest::LongTaskFromBackgroundToForeground, base::Unretained(this), /*duration*/ kGpuWatchdogTimeoutForTesting * 2 + + kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4200), /*time_to_switch_to_foreground*/ base::TimeDelta::FromMilliseconds(200))); +#endif - main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure()); + task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE, + run_loop.QuitClosure()); run_loop.Run(); // It takes too long to finish a task after switching to the foreground. @@ -246,6 +288,37 @@ TEST_F(GpuWatchdogTest, GpuSwitchingToForegroundHang) { EXPECT_TRUE(result); } +TEST_F(GpuWatchdogTest, GpuInitializationPause) { + // Running for 100 ms in the beginning of GPU init. + SimpleTask(base::TimeDelta::FromMilliseconds(100)); + watchdog_thread_->PauseWatchdog(); + + // The Gpu init continues for another (init timeout + 1000) ms after the pause + SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor + + base::TimeDelta::FromMilliseconds(1000)); + + // No GPU hang is detected when the watchdog is paused. + bool result = watchdog_thread_->IsGpuHangDetectedForTesting(); + EXPECT_FALSE(result); + + // Continue the watchdog now. + watchdog_thread_->ResumeWatchdog(); + // The Gpu init continues for (init timeout + 4000) ms. +#if defined(OS_WIN) + SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor + + kGpuWatchdogTimeoutForTesting * + kMaxCountOfMoreGpuThreadTimeAllowed + + kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4000)); +#else + SimpleTask(kGpuWatchdogTimeoutForTesting * kInitFactor + + kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4000)); +#endif + + // A GPU hang should be detected. + result = watchdog_thread_->IsGpuHangDetectedForTesting(); + EXPECT_TRUE(result); +} + TEST_F(GpuWatchdogPowerTest, GpuOnSuspend) { // watchdog_thread_->OnInitComplete() is called in SetUp @@ -253,11 +326,12 @@ TEST_F(GpuWatchdogPowerTest, GpuOnSuspend) { power_monitor_source_->GenerateSuspendEvent(); // Run a task that takes longer (5000 milliseconds) than timeout. - main_loop.task_runner()->PostTask( + task_environment_.GetMainThreadTaskRunner()->PostTask( FROM_HERE, base::BindOnce(&SimpleTask, kGpuWatchdogTimeoutForTesting * 2 + base::TimeDelta::FromMilliseconds(3000))); - main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure()); + task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE, + run_loop.QuitClosure()); run_loop.Run(); // A task might take long time to finish after entering suspension mode. @@ -272,16 +346,30 @@ TEST_F(GpuWatchdogPowerTest, GpuOnResumeHang) { // This task stays in the suspension mode for 200 milliseconds, and it // wakes up on power resume and then runs for 6000 milliseconds. This is // longer than the watchdog resume timeout (2000 ms). - main_loop.task_runner()->PostTask( +#if defined(OS_WIN) + task_environment_.GetMainThreadTaskRunner()->PostTask( + FROM_HERE, + base::BindOnce( + &GpuWatchdogPowerTest::LongTaskOnResume, base::Unretained(this), + /*duration*/ kGpuWatchdogTimeoutForTesting * kRestartFactor + + kGpuWatchdogTimeoutForTesting * + kMaxCountOfMoreGpuThreadTimeAllowed + + kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4200), + /*time_to_power_resume*/ + base::TimeDelta::FromMilliseconds(200))); +#else + task_environment_.GetMainThreadTaskRunner()->PostTask( FROM_HERE, base::BindOnce( &GpuWatchdogPowerTest::LongTaskOnResume, base::Unretained(this), /*duration*/ kGpuWatchdogTimeoutForTesting * kRestartFactor + - base::TimeDelta::FromMilliseconds(4200), + kMaxWaitTimeForTesting + base::TimeDelta::FromMilliseconds(4200), /*time_to_power_resume*/ base::TimeDelta::FromMilliseconds(200))); +#endif - main_loop.task_runner()->PostTask(FROM_HERE, run_loop.QuitClosure()); + task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE, + run_loop.QuitClosure()); run_loop.Run(); // It takes too long to finish this task after power resume. A GPU hang should diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc index 9677124367a..970e6e56022 100644 --- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc +++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc @@ -7,9 +7,16 @@ #include "base/atomicops.h" #include "base/bind.h" #include "base/bind_helpers.h" +#include "base/bit_cast.h" #include "base/debug/alias.h" +#include "base/files/file_path.h" #include "base/message_loop/message_loop_current.h" +#include "base/metrics/histogram_functions.h" +#include "base/metrics/persistent_histogram_allocator.h" +#include "base/native_library.h" #include "base/power_monitor/power_monitor.h" +#include "base/strings/string_number_conversions.h" +#include "base/threading/platform_thread.h" #include "base/threading/thread_task_runner_handle.h" #include "base/time/time.h" #include "build/build_config.h" @@ -18,11 +25,25 @@ namespace gpu { GpuWatchdogThreadImplV2::GpuWatchdogThreadImplV2(base::TimeDelta timeout, + base::TimeDelta max_wait_time, bool is_test_mode) : watchdog_timeout_(timeout), + in_gpu_initialization_(true), + max_wait_time_(max_wait_time), is_test_mode_(is_test_mode), watched_gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()) { base::MessageLoopCurrent::Get()->AddTaskObserver(this); +#if defined(OS_WIN) + // GetCurrentThread returns a pseudo-handle that cannot be used by one thread + // to identify another. DuplicateHandle creates a "real" handle that can be + // used for this purpose. + if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), + GetCurrentProcess(), &watched_thread_handle_, + THREAD_QUERY_INFORMATION, FALSE, 0)) { + watched_thread_handle_ = nullptr; + } +#endif + Arm(); } @@ -33,15 +54,20 @@ GpuWatchdogThreadImplV2::~GpuWatchdogThreadImplV2() { base::MessageLoopCurrent::Get()->RemoveTaskObserver(this); base::PowerMonitor::RemoveObserver(this); GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogEnd); +#if defined(OS_WIN) + if (watched_thread_handle_) + CloseHandle(watched_thread_handle_); +#endif } // static std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create( bool start_backgrounded, base::TimeDelta timeout, + base::TimeDelta max_wait_time, bool is_test_mode) { - auto watchdog_thread = - base::WrapUnique(new GpuWatchdogThreadImplV2(timeout, is_test_mode)); + auto watchdog_thread = base::WrapUnique( + new GpuWatchdogThreadImplV2(timeout, max_wait_time, is_test_mode)); base::Thread::Options options; options.timer_slack = base::TIMER_SLACK_MAXIMUM; watchdog_thread->StartWithOptions(options); @@ -53,7 +79,7 @@ std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create( // static std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create( bool start_backgrounded) { - return Create(start_backgrounded, kGpuWatchdogTimeout, false); + return Create(start_backgrounded, kGpuWatchdogTimeout, kMaxWaitTime, false); } // Do not add power observer during watchdog init, PowerMonitor might not be up @@ -70,25 +96,30 @@ void GpuWatchdogThreadImplV2::AddPowerObserver() { base::Unretained(this))); } -// Called from the gpu thread. +// Android Chrome goes to the background. Called from the gpu thread. void GpuWatchdogThreadImplV2::OnBackgrounded() { task_runner()->PostTask( FROM_HERE, - base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogBackgrounded, - base::Unretained(this))); + base::BindOnce(&GpuWatchdogThreadImplV2::StopWatchdogTimeoutTask, + base::Unretained(this), kAndroidBackgroundForeground)); } -// Called from the gpu thread. +// Android Chrome goes to the foreground. Called from the gpu thread. void GpuWatchdogThreadImplV2::OnForegrounded() { task_runner()->PostTask( FROM_HERE, - base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogForegrounded, - base::Unretained(this))); + base::BindOnce(&GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask, + base::Unretained(this), kAndroidBackgroundForeground)); } // Called from the gpu thread when gpu init has completed. void GpuWatchdogThreadImplV2::OnInitComplete() { DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread()); + + task_runner()->PostTask( + FROM_HERE, + base::BindOnce(&GpuWatchdogThreadImplV2::UpdateInitializationFlag, + base::Unretained(this))); Disarm(); } @@ -104,6 +135,26 @@ void GpuWatchdogThreadImplV2::OnGpuProcessTearDown() { Arm(); } +// Called from the gpu main thread. +void GpuWatchdogThreadImplV2::PauseWatchdog() { + DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread()); + + task_runner()->PostTask( + FROM_HERE, + base::BindOnce(&GpuWatchdogThreadImplV2::StopWatchdogTimeoutTask, + base::Unretained(this), kGeneralGpuFlow)); +} + +// Called from the gpu main thread. +void GpuWatchdogThreadImplV2::ResumeWatchdog() { + DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread()); + + task_runner()->PostTask( + FROM_HERE, + base::BindOnce(&GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask, + base::Unretained(this), kGeneralGpuFlow)); +} + // Running on the watchdog thread. // On Linux, Init() will be called twice for Sandbox Initialization. The // watchdog is stopped and then restarted in StartSandboxLinux(). Everything @@ -113,17 +164,24 @@ void GpuWatchdogThreadImplV2::Init() { // Get and Invalidate weak_ptr should be done on the watchdog thread only. weak_ptr_ = weak_factory_.GetWeakPtr(); + base::TimeDelta timeout = watchdog_timeout_ * kInitFactor; task_runner()->PostDelayedTask( FROM_HERE, base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_), - watchdog_timeout_ * kInitFactor); + timeout); last_arm_disarm_counter_ = base::subtle::NoBarrier_Load(&arm_disarm_counter_); watchdog_start_timeticks_ = base::TimeTicks::Now(); last_on_watchdog_timeout_timeticks_ = watchdog_start_timeticks_; - last_on_watchdog_timeout_time_ = base::Time::Now(); - GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart); +#if defined(OS_WIN) + if (watched_thread_handle_) { + if (base::ThreadTicks::IsSupported()) + base::ThreadTicks::WaitUntilInitialized(); + last_on_watchdog_timeout_thread_ticks_ = GetWatchedThreadTime(); + remaining_watched_thread_ticks_ = timeout; + } +#endif } // Running on the watchdog thread. @@ -138,7 +196,8 @@ void GpuWatchdogThreadImplV2::ReportProgress() { } void GpuWatchdogThreadImplV2::WillProcessTask( - const base::PendingTask& pending_task) { + const base::PendingTask& pending_task, + bool was_blocked_or_low_priority) { DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread()); // The watchdog is armed at the beginning of the gpu process teardown. @@ -160,23 +219,14 @@ void GpuWatchdogThreadImplV2::DidProcessTask( Disarm(); } -// Running on the watchdog thread. +// Power Suspends. Running on the watchdog thread. void GpuWatchdogThreadImplV2::OnSuspend() { - DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread()); - in_power_suspension_ = true; - // Revoke any pending watchdog timeout task - weak_factory_.InvalidateWeakPtrs(); - suspend_timeticks_ = base::TimeTicks::Now(); + StopWatchdogTimeoutTask(kPowerSuspendResume); } -// Running on the watchdog thread. +// Power Resumes. Running on the watchdog thread. void GpuWatchdogThreadImplV2::OnResume() { - DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread()); - - in_power_suspension_ = false; - RestartWatchdogTimeoutTask(); - resume_timeticks_ = base::TimeTicks::Now(); - is_first_timeout_after_power_resume = true; + RestartWatchdogTimeoutTask(kPowerSuspendResume); } // Running on the watchdog thread. @@ -188,41 +238,92 @@ void GpuWatchdogThreadImplV2::OnAddPowerObserver() { } // Running on the watchdog thread. -void GpuWatchdogThreadImplV2::OnWatchdogBackgrounded() { - DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread()); - - is_backgrounded_ = true; - // Revoke any pending watchdog timeout task - weak_factory_.InvalidateWeakPtrs(); - backgrounded_timeticks_ = base::TimeTicks::Now(); -} - -// Running on the watchdog thread. -void GpuWatchdogThreadImplV2::OnWatchdogForegrounded() { - DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread()); - - is_backgrounded_ = false; - RestartWatchdogTimeoutTask(); - foregrounded_timeticks_ = base::TimeTicks::Now(); -} - -// Running on the watchdog thread. -void GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask() { +void GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask( + PauseResumeSource source_of_request) { DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread()); + base::TimeDelta timeout; + + switch (source_of_request) { + case kAndroidBackgroundForeground: + if (!is_backgrounded_) + return; + is_backgrounded_ = false; + timeout = watchdog_timeout_ * kRestartFactor; + foregrounded_timeticks_ = base::TimeTicks::Now(); + foregrounded_event_ = true; + num_of_timeout_after_foregrounded_ = 0; + break; + case kPowerSuspendResume: + if (!in_power_suspension_) + return; + in_power_suspension_ = false; + timeout = watchdog_timeout_ * kRestartFactor; + power_resume_timeticks_ = base::TimeTicks::Now(); + power_resumed_event_ = true; + num_of_timeout_after_power_resume_ = 0; + break; + case kGeneralGpuFlow: + if (!is_paused_) + return; + is_paused_ = false; + timeout = watchdog_timeout_ * kInitFactor; + watchdog_resume_timeticks_ = base::TimeTicks::Now(); + break; + } - if (!is_backgrounded_ && !in_power_suspension_) { - // Make the timeout twice long. The system/gpu might be very slow right - // after resume or foregrounded. + if (!is_backgrounded_ && !in_power_suspension_ && !is_paused_) { weak_ptr_ = weak_factory_.GetWeakPtr(); task_runner()->PostDelayedTask( FROM_HERE, base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_), - watchdog_timeout_ * kRestartFactor); + timeout); last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now(); - last_on_watchdog_timeout_time_ = base::Time::Now(); + last_arm_disarm_counter_ = + base::subtle::NoBarrier_Load(&arm_disarm_counter_); +#if defined(OS_WIN) + if (watched_thread_handle_) { + last_on_watchdog_timeout_thread_ticks_ = GetWatchedThreadTime(); + remaining_watched_thread_ticks_ = timeout; + } +#endif } } +void GpuWatchdogThreadImplV2::StopWatchdogTimeoutTask( + PauseResumeSource source_of_request) { + DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread()); + + switch (source_of_request) { + case kAndroidBackgroundForeground: + if (is_backgrounded_) + return; + is_backgrounded_ = true; + backgrounded_timeticks_ = base::TimeTicks::Now(); + foregrounded_event_ = false; + break; + case kPowerSuspendResume: + if (in_power_suspension_) + return; + in_power_suspension_ = true; + power_suspend_timeticks_ = base::TimeTicks::Now(); + power_resumed_event_ = false; + break; + case kGeneralGpuFlow: + if (is_paused_) + return; + is_paused_ = true; + watchdog_pause_timeticks_ = base::TimeTicks::Now(); + break; + } + + // Revoke any pending watchdog timeout task + weak_factory_.InvalidateWeakPtrs(); +} + +void GpuWatchdogThreadImplV2::UpdateInitializationFlag() { + in_gpu_initialization_ = false; +} + // Called from the gpu main thread. // The watchdog is armed only in these three functions - // GpuWatchdogThreadImplV2(), WillProcessTask(), and OnGpuProcessTearDown() @@ -264,19 +365,52 @@ void GpuWatchdogThreadImplV2::OnWatchdogTimeout() { DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread()); DCHECK(!is_backgrounded_); DCHECK(!in_power_suspension_); + DCHECK(!is_paused_); + + // If this metric is added too early (eg. watchdog creation time), it cannot + // be persistent. The histogram data will be lost after crash or browser exit. + // Delay the recording of kGpuWatchdogStart until the firs + // OnWatchdogTimeout() to ensure this metric is created in the persistent + // memory. + if (!is_watchdog_start_histogram_recorded) { + is_watchdog_start_histogram_recorded = true; + GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart); + } + base::subtle::Atomic32 arm_disarm_counter = base::subtle::NoBarrier_Load(&arm_disarm_counter_); - - // disarmed is true if it's an even number. - bool disarmed = arm_disarm_counter % 2 == 0; + GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeout); + if (power_resumed_event_) + num_of_timeout_after_power_resume_++; + if (foregrounded_event_) + num_of_timeout_after_foregrounded_++; + + // Collect all needed info for gpu hang detection. + bool disarmed = arm_disarm_counter % 2 == 0; // even number bool gpu_makes_progress = arm_disarm_counter != last_arm_disarm_counter_; - last_arm_disarm_counter_ = arm_disarm_counter; + bool watched_thread_needs_more_time = + WatchedThreadNeedsMoreTime(disarmed || gpu_makes_progress); + + // No gpu hang is detected. Continue with another OnWatchdogTimeout task + if (disarmed || gpu_makes_progress || watched_thread_needs_more_time) { + last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now(); + last_arm_disarm_counter_ = + base::subtle::NoBarrier_Load(&arm_disarm_counter_); + + task_runner()->PostDelayedTask( + FROM_HERE, + base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_), + watchdog_timeout_); + return; + } - // No gpu hang is detected. Continue with another OnWatchdogTimeout - if (disarmed || gpu_makes_progress) { + // An experiment for all platforms: Wait for max_wait_time_ and see if GPU + // will response. + GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeoutWait); + if (GpuRespondsAfterWaiting()) { last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now(); - last_on_watchdog_timeout_time_ = base::Time::Now(); - is_first_timeout_after_power_resume = false; + last_arm_disarm_counter_ = + base::subtle::NoBarrier_Load(&arm_disarm_counter_); task_runner()->PostDelayedTask( FROM_HERE, @@ -286,9 +420,117 @@ void GpuWatchdogThreadImplV2::OnWatchdogTimeout() { } // Still armed without any progress. GPU possibly hangs. + GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKill); DeliberatelyTerminateToRecoverFromHang(); } +bool GpuWatchdogThreadImplV2::GpuIsAlive() { + base::subtle::Atomic32 arm_disarm_counter = + base::subtle::NoBarrier_Load(&arm_disarm_counter_); + bool gpu_makes_progress = arm_disarm_counter != last_arm_disarm_counter_; + + return (gpu_makes_progress); +} + +bool GpuWatchdogThreadImplV2::WatchedThreadNeedsMoreTime( + bool no_gpu_hang_detected) { +#if defined(OS_WIN) + if (!watched_thread_handle_) + return false; + + // For metrics only - If count_of_more_gpu_thread_time_allowed_ > 0, we know + // extra time was extended in the previous OnWatchdogTimeout(). Now we find + // gpu makes progress. Record this case. + if (no_gpu_hang_detected && count_of_more_gpu_thread_time_allowed_ > 0) { + GpuWatchdogTimeoutHistogram( + GpuWatchdogTimeoutEvent::kProgressAfterMoreThreadTime); + WindowsNumOfExtraTimeoutsHistogram(); + } + // For metrics only - The extra time was give in timeouts. + time_in_extra_timeouts_ = + count_of_more_gpu_thread_time_allowed_ * watchdog_timeout_; + + // Calculate how many thread ticks the watched thread spent doing the work. + base::ThreadTicks now = GetWatchedThreadTime(); + base::TimeDelta thread_time_elapsed = + now - last_on_watchdog_timeout_thread_ticks_; + last_on_watchdog_timeout_thread_ticks_ = now; + remaining_watched_thread_ticks_ -= thread_time_elapsed; + + if (no_gpu_hang_detected || + count_of_more_gpu_thread_time_allowed_ >= + kMaxCountOfMoreGpuThreadTimeAllowed || + thread_time_elapsed < base::TimeDelta() /* bogus data */ || + remaining_watched_thread_ticks_ <= base::TimeDelta()) { + // Reset the remaining thread ticks. + remaining_watched_thread_ticks_ = watchdog_timeout_; + count_of_more_gpu_thread_time_allowed_ = 0; + return false; + } else { + count_of_more_gpu_thread_time_allowed_++; + // Only record it once for all extenteded timeout on the same detected gpu + // hang, so we know this is equivlent one crash in our crash reports. + if (count_of_more_gpu_thread_time_allowed_ == 1) + GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kMoreThreadTime); + + return true; + } +#else + return false; +#endif +} + +#if defined(OS_WIN) +base::ThreadTicks GpuWatchdogThreadImplV2::GetWatchedThreadTime() { + DCHECK(watched_thread_handle_); + + if (base::ThreadTicks::IsSupported()) { + // Note: GetForThread() might return bogus results if running on different + // CPUs between two calls. + return base::ThreadTicks::GetForThread( + base::PlatformThreadHandle(watched_thread_handle_)); + } else { + FILETIME creation_time; + FILETIME exit_time; + FILETIME kernel_time; + FILETIME user_time; + BOOL result = GetThreadTimes(watched_thread_handle_, &creation_time, + &exit_time, &kernel_time, &user_time); + if (!result) + return base::ThreadTicks(); + + // Need to bit_cast to fix alignment, then divide by 10 to convert + // 100-nanoseconds to microseconds. + int64_t user_time_us = bit_cast<int64_t, FILETIME>(user_time) / 10; + int64_t kernel_time_us = bit_cast<int64_t, FILETIME>(kernel_time) / 10; + + return base::ThreadTicks() + + base::TimeDelta::FromMicroseconds(user_time_us + kernel_time_us); + } +} +#endif + +// This is an experiment on all platforms to see whether GPU will response +// after waiting longer. +bool GpuWatchdogThreadImplV2::GpuRespondsAfterWaiting() { + base::TimeDelta duration; + base::TimeTicks start_timeticks = base::TimeTicks::Now(); + + while (duration < max_wait_time_) { + // Sleep for 1 seconds each time and check if the GPU makes a progress. + base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(1)); + duration = base::TimeTicks::Now() - start_timeticks; + + if (GpuIsAlive()) { + GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kProgressAfterWait); + GpuWatchdogWaitTimeHistogram(duration); + return true; + } + } + + return false; +} + void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() { DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread()); // If this is for gpu testing, do not terminate the gpu process. @@ -304,13 +546,18 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() { // Store variables so they're available in crash dumps to help determine the // cause of any hang. - base::TimeTicks current_timeticks = base::TimeTicks::Now(); - base::debug::Alias(¤t_timeticks); + base::TimeTicks function_begin_timeticks = base::TimeTicks::Now(); + base::debug::Alias(&in_gpu_initialization_); + base::debug::Alias(&num_of_timeout_after_power_resume_); + base::debug::Alias(&num_of_timeout_after_foregrounded_); + base::debug::Alias(&function_begin_timeticks); base::debug::Alias(&watchdog_start_timeticks_); - base::debug::Alias(&suspend_timeticks_); - base::debug::Alias(&resume_timeticks_); + base::debug::Alias(&power_suspend_timeticks_); + base::debug::Alias(&power_resume_timeticks_); base::debug::Alias(&backgrounded_timeticks_); base::debug::Alias(&foregrounded_timeticks_); + base::debug::Alias(&watchdog_pause_timeticks_); + base::debug::Alias(&watchdog_resume_timeticks_); base::debug::Alias(&in_power_suspension_); base::debug::Alias(&in_gpu_process_teardown_); base::debug::Alias(&is_backgrounded_); @@ -318,21 +565,19 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() { base::debug::Alias(&is_power_observer_added_); base::debug::Alias(&last_on_watchdog_timeout_timeticks_); base::TimeDelta timeticks_elapses = - current_timeticks - last_on_watchdog_timeout_timeticks_; + function_begin_timeticks - last_on_watchdog_timeout_timeticks_; base::debug::Alias(&timeticks_elapses); - - // If clock_time_elapses is much longer than time_elapses, it might be a sign - // of a busy system. - base::Time current_time = base::Time::Now(); - base::TimeDelta time_elapses = current_time - last_on_watchdog_timeout_time_; - base::debug::Alias(¤t_time); - base::debug::Alias(&last_on_watchdog_timeout_time_); - base::debug::Alias(&time_elapses); +#if defined(OS_WIN) + base::debug::Alias(&remaining_watched_thread_ticks_); +#endif GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogKill); + crash_keys::gpu_watchdog_crashed_in_gpu_init.Set( + in_gpu_initialization_ ? "1" : "0"); + crash_keys::gpu_watchdog_kill_after_power_resume.Set( - is_first_timeout_after_power_resume ? "1" : "0"); + WithinOneMinFromPowerResumed() ? "1" : "0"); // Deliberately crash the process to create a crash dump. *((volatile int*)0) = 0xdeadface; @@ -340,8 +585,130 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() { void GpuWatchdogThreadImplV2::GpuWatchdogHistogram( GpuWatchdogThreadEvent thread_event) { - UMA_HISTOGRAM_ENUMERATION("GPU.WatchdogThread.Event.V2", thread_event); - UMA_HISTOGRAM_ENUMERATION("GPU.WatchdogThread.Event", thread_event); + base::UmaHistogramEnumeration("GPU.WatchdogThread.Event.V2", thread_event); + base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event); +} + +void GpuWatchdogThreadImplV2::GpuWatchdogTimeoutHistogram( + GpuWatchdogTimeoutEvent timeout_event) { + base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout", timeout_event); + + bool recorded = false; + if (in_gpu_initialization_) { + base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Init", + timeout_event); + recorded = true; + } + + if (WithinOneMinFromPowerResumed()) { + base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.PowerResume", + timeout_event); + recorded = true; + } + + if (WithinOneMinFromForegrounded()) { + base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Foregrounded", + timeout_event); + recorded = true; + } + + if (!recorded) { + base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Normal", + timeout_event); + } +} + +#if defined(OS_WIN) +void GpuWatchdogThreadImplV2::WindowsNumOfExtraTimeoutsHistogram() { + // Record the number of timeouts the GPU main thread needs to make a progress + // after GPU OnWatchdogTimeout() is triggered. The maximum count is 6 which + // is more than kMaxCountOfMoreGpuThreadTimeAllowed(4); + constexpr int kMin = 1; + constexpr int kMax = 6; + constexpr int kBuckets = 6; + int count = count_of_more_gpu_thread_time_allowed_; + bool recorded = false; + + base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime", count, + kMin, kMax, kBuckets); + + if (in_gpu_initialization_) { + base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime.Init", + count, kMin, kMax, kBuckets); + recorded = true; + } + + if (WithinOneMinFromPowerResumed()) { + base::UmaHistogramCustomCounts( + "GPU.WatchdogThread.ExtraThreadTime.PowerResume", count, kMin, kMax, + kBuckets); + recorded = true; + } + + if (WithinOneMinFromForegrounded()) { + base::UmaHistogramCustomCounts( + "GPU.WatchdogThread.ExtraThreadTime.Foregrounded", count, kMin, kMax, + kBuckets); + recorded = true; + } + + if (!recorded) { + base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime.Normal", + count, kMin, kMax, kBuckets); + } +} +#endif + +void GpuWatchdogThreadImplV2::GpuWatchdogWaitTimeHistogram( + base::TimeDelta wait_time) { +#if defined(OS_WIN) + // Add the time the GPU thread was given for full thread time. + wait_time += time_in_extra_timeouts_; +#endif + + // Record the wait time in OnWatchdogTimeout() for the GPU main thread to + // make a progress. The maximum recodrding time is 150 seconds because + // Windows need to add the time spent before reaching here (max 60 sec). + constexpr base::TimeDelta kMin = base::TimeDelta::FromSeconds(1); + constexpr base::TimeDelta kMax = base::TimeDelta::FromSeconds(150); + constexpr int kBuckets = 50; + bool recorded = false; + + base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime", wait_time, kMin, + kMax, kBuckets); + + if (in_gpu_initialization_) { + base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.Init", wait_time, + kMin, kMax, kBuckets); + recorded = true; + } + + if (WithinOneMinFromPowerResumed()) { + base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.PowerResume", + wait_time, kMin, kMax, kBuckets); + recorded = true; + } + + if (WithinOneMinFromForegrounded()) { + base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.Foregrounded", + wait_time, kMin, kMax, kBuckets); + recorded = true; + } + + if (!recorded) { + base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime.Normal", + wait_time, kMin, kMax, kBuckets); + } +} + +bool GpuWatchdogThreadImplV2::WithinOneMinFromPowerResumed() { + size_t count = base::TimeDelta::FromSeconds(60) / watchdog_timeout_; + return power_resumed_event_ && num_of_timeout_after_power_resume_ <= count; +} + +bool GpuWatchdogThreadImplV2::WithinOneMinFromForegrounded() { + size_t count = base::TimeDelta::FromSeconds(60) / watchdog_timeout_; + return foregrounded_event_ && num_of_timeout_after_foregrounded_ <= count; } // For gpu testing only. Return whether a GPU hang was detected or not. diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h index fe5d4d94521..f9a63c7d953 100644 --- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h +++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h @@ -8,6 +8,36 @@ #include "gpu/ipc/service/gpu_watchdog_thread.h" namespace gpu { +#if defined(OS_WIN) +// If the actual time the watched GPU thread spent doing actual work is less +// than the wathdog timeout, the GPU thread can continue running through +// OnGPUWatchdogTimeout for at most 4 times before the gpu thread is killed. +constexpr int kMaxCountOfMoreGpuThreadTimeAllowed = 4; +#endif +constexpr base::TimeDelta kMaxWaitTime = base::TimeDelta::FromSeconds(60); + +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. +enum class GpuWatchdogTimeoutEvent { + // Recorded each time OnWatchdogTimeout() is called. + kTimeout, + // Recorded when a GPU main thread is killed for a detected hang. + kKill, + // Window only: Recorded when a hang is detected but we allow the GPU main + // thread to continue until it spent the full + // thread time doing the work. + kMoreThreadTime, + // Windows only: The GPU makes progress after givenmore thread time. The GPU + // main thread is not killed. + kProgressAfterMoreThreadTime, + // A gpu hang is detected but watchdog waits for 60 seconds before taking + // action. + kTimeoutWait, + // The GPU makes progress within 60 sec in OnWatchdogTimeout(). The GPU main + // thread is not killed. + kProgressAfterWait, + kMaxValue = kProgressAfterWait, +}; class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2 : public GpuWatchdogThread, @@ -16,8 +46,11 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2 static std::unique_ptr<GpuWatchdogThreadImplV2> Create( bool start_backgrounded); - static std::unique_ptr<GpuWatchdogThreadImplV2> - Create(bool start_backgrounded, base::TimeDelta timeout, bool test_mode); + static std::unique_ptr<GpuWatchdogThreadImplV2> Create( + bool start_backgrounded, + base::TimeDelta timeout, + base::TimeDelta max_wait_time, + bool test_mode); ~GpuWatchdogThreadImplV2() override; @@ -27,6 +60,8 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2 void OnForegrounded() override; void OnInitComplete() override; void OnGpuProcessTearDown() override; + void ResumeWatchdog() override; + void PauseWatchdog() override; void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) override; bool IsGpuHangDetectedForTesting() override; void WaitForPowerObserverAddedForTesting() override; @@ -39,7 +74,8 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2 void ReportProgress() override; // Implements TaskObserver. - void WillProcessTask(const base::PendingTask& pending_task) override; + void WillProcessTask(const base::PendingTask& pending_task, + bool was_blocked_or_low_priority) override; void DidProcessTask(const base::PendingTask& pending_task) override; // Implements base::PowerObserver. @@ -47,47 +83,98 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2 void OnResume() override; private: - GpuWatchdogThreadImplV2(base::TimeDelta timeout, bool test_mode); + enum PauseResumeSource { + kAndroidBackgroundForeground = 0, + kPowerSuspendResume = 1, + kGeneralGpuFlow = 2, + }; + + GpuWatchdogThreadImplV2(base::TimeDelta timeout, + base::TimeDelta max_wait_time, + bool test_mode); void OnAddPowerObserver(); - void OnWatchdogBackgrounded(); - void OnWatchdogForegrounded(); - void RestartWatchdogTimeoutTask(); + void RestartWatchdogTimeoutTask(PauseResumeSource source_of_request); + void StopWatchdogTimeoutTask(PauseResumeSource source_of_request); + void UpdateInitializationFlag(); void Arm(); void Disarm(); void InProgress(); bool IsArmed(); void OnWatchdogTimeout(); + bool GpuIsAlive(); + bool WatchedThreadNeedsMoreTime(bool no_gpu_hang_detected); +#if defined(OS_WIN) + base::ThreadTicks GetWatchedThreadTime(); +#endif + bool GpuRespondsAfterWaiting(); // Do not change the function name. It is used for [GPU HANG] carsh reports. void DeliberatelyTerminateToRecoverFromHang(); + // Histogram recorded in OnWatchdogTimeout() + void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event); + +#if defined(OS_WIN) + // The extra timeout the GPU main thread needs to make a progress. + void WindowsNumOfExtraTimeoutsHistogram(); +#endif + + // The wait time in OnWatchdogTimeout() for the GPU main thread to make a + // progress. + void GpuWatchdogWaitTimeHistogram(base::TimeDelta wait_time); + + // Used for metrics. It's 1 minute after the event. + bool WithinOneMinFromPowerResumed(); + bool WithinOneMinFromForegrounded(); + // This counter is only written on the gpu thread, and read on both threads. base::subtle::Atomic32 arm_disarm_counter_ = 0; // The counter number read in the last OnWatchdogTimeout() on the watchdog // thread. int32_t last_arm_disarm_counter_ = 0; - // Timeout on the watchdog thread to check if gpu hangs + // Timeout on the watchdog thread to check if gpu hangs. base::TimeDelta watchdog_timeout_; - // The time the gpu watchdog was created + // The time the gpu watchdog was created. base::TimeTicks watchdog_start_timeticks_; // The time the last OnSuspend and OnResume was called. - base::TimeTicks suspend_timeticks_; - base::TimeTicks resume_timeticks_; + base::TimeTicks power_suspend_timeticks_; + base::TimeTicks power_resume_timeticks_; // The time the last OnBackgrounded and OnForegrounded was called. base::TimeTicks backgrounded_timeticks_; base::TimeTicks foregrounded_timeticks_; - // Time: Interpreting the wall-clock time provided by a remote system. + // The time PauseWatchdog and ResumeWatchdog was called. + base::TimeTicks watchdog_pause_timeticks_; + base::TimeTicks watchdog_resume_timeticks_; + // TimeTicks: Tracking the amount of time a task runs. Executing delayed // tasks at the right time. + // ThreadTicks: Use this timer to (approximately) measure how much time the + // calling thread spent doing actual work vs. being de-scheduled. // The time the last OnWatchdogTimeout() was called. base::TimeTicks last_on_watchdog_timeout_timeticks_; - base::Time last_on_watchdog_timeout_time_; +#if defined(OS_WIN) + base::ThreadTicks last_on_watchdog_timeout_thread_ticks_; + + // The difference between the timeout and the actual time the watched thread + // spent doing actual work. + base::TimeDelta remaining_watched_thread_ticks_; + + // The Windows thread hanndle of the watched GPU main thread. + void* watched_thread_handle_ = nullptr; + + // After GPU hang detected, how many times has the GPU thread been allowed to + // continue due to not enough thread time. + int count_of_more_gpu_thread_time_allowed_ = 0; + + // The accumulated timeout time the GPU main thread was given. + base::TimeDelta time_in_extra_timeouts_; +#endif // The system has entered the power suspension mode. bool in_power_suspension_ = false; @@ -95,18 +182,32 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2 // The GPU process has started tearing down. Accessed only in the gpu process. bool in_gpu_process_teardown_ = false; - // OnWatchdogTimeout() is called for the first time after power resume. - bool is_first_timeout_after_power_resume = false; - // Chrome is running on the background on Android. Gpu is probably very slow // or stalled. bool is_backgrounded_ = false; + // The GPU watchdog is paused. The timeout task is temporarily stopped. + bool is_paused_ = false; + // Whether the watchdog thread has been called and added to the power monitor // observer. bool is_add_power_observer_called_ = false; bool is_power_observer_added_ = false; + // whether GpuWatchdogThreadEvent::kGpuWatchdogStart has been recorded. + bool is_watchdog_start_histogram_recorded = false; + + // Read/Write by the watchdog thread only after initialized in the + // constructor. + bool in_gpu_initialization_ = false; + + // For the experiment and the debugging purpose + size_t num_of_timeout_after_power_resume_ = 0; + size_t num_of_timeout_after_foregrounded_ = 0; + bool foregrounded_event_ = false; + bool power_resumed_event_ = false; + base::TimeDelta max_wait_time_; + // For gpu testing only. const bool is_test_mode_; // Set by the watchdog thread and Read by the test thread. diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc index 21aa72df856..f61a9798e97 100644 --- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc +++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc @@ -153,27 +153,11 @@ void ImageDecodeAcceleratorStub::OnScheduleImageDecode( uint64_t release_count) { DCHECK(io_task_runner_->BelongsToCurrentThread()); base::AutoLock lock(lock_); - if (!channel_ || destroying_channel_) { + if (!channel_) { // The channel is no longer available, so don't do anything. return; } - // Make sure the decode sync token is ordered with respect to the last decode - // request. - if (release_count <= last_release_count_) { - DLOG(ERROR) << "Out-of-order decode sync token"; - OnError(); - return; - } - last_release_count_ = release_count; - - // Make sure the output dimensions are not too small. - if (decode_params.output_size.IsEmpty()) { - DLOG(ERROR) << "Output dimensions are too small"; - OnError(); - return; - } - // Start the actual decode. worker_->Decode( std::move(decode_params.encoded_data), decode_params.output_size, @@ -200,7 +184,7 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( uint64_t decode_release_count) { DCHECK(main_task_runner_->BelongsToCurrentThread()); base::AutoLock lock(lock_); - if (!channel_ || destroying_channel_) { + if (!channel_) { // The channel is no longer available, so don't do anything. return; } @@ -208,6 +192,29 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( DCHECK(!pending_completed_decodes_.empty()); std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult> completed_decode = std::move(pending_completed_decodes_.front()); + pending_completed_decodes_.pop(); + + // Regardless of what happens next, make sure the sync token gets released and + // the sequence gets disabled if there are no more completed decodes after + // this. base::Unretained(this) is safe because *this outlives the + // ScopedClosureRunner. + base::ScopedClosureRunner finalizer( + base::BindOnce(&ImageDecodeAcceleratorStub::FinishCompletedDecode, + base::Unretained(this), decode_release_count)); + + if (!completed_decode) { + DLOG(ERROR) << "The image could not be decoded"; + return; + } + + // TODO(crbug.com/995883): the output_size parameter is going away, so this + // validation is not needed. Checking if the size is too small should happen + // at the level of the decoder (since that's the component that's aware of its + // own capabilities). + if (params.output_size.IsEmpty()) { + DLOG(ERROR) << "Output dimensions are too small"; + return; + } // Gain access to the transfer cache through the GpuChannelManager's // SharedContextState. We will also use that to get a GrContext that will be @@ -217,7 +224,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( channel_->gpu_channel_manager()->GetSharedContextState(&context_result); if (context_result != ContextResult::kSuccess) { DLOG(ERROR) << "Unable to obtain the SharedContextState"; - OnError(); return; } DCHECK(shared_context_state); @@ -227,17 +233,14 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( // other graphics APIs). if (!shared_context_state->IsGLInitialized()) { DLOG(ERROR) << "GL has not been initialized"; - OnError(); return; } if (!shared_context_state->gr_context()) { DLOG(ERROR) << "Could not get the GrContext"; - OnError(); return; } if (!shared_context_state->MakeCurrent(nullptr /* surface */)) { DLOG(ERROR) << "Could not MakeCurrent the shared context"; - OnError(); return; } @@ -269,7 +272,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( if (!safe_uv_width.AssignIfValid(&uv_width) || !safe_uv_height.AssignIfValid(&uv_height)) { DLOG(ERROR) << "Could not calculate subsampled dimensions"; - OnError(); return; } gfx::Size uv_plane_size = gfx::Size(uv_width, uv_height); @@ -343,13 +345,11 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( } if (!plane_image) { DLOG(ERROR) << "Could not create GL image"; - OnError(); return; } resource->gl_image = std::move(plane_image); if (!resource->gl_image->BindTexImage(GL_TEXTURE_EXTERNAL_OES)) { DLOG(ERROR) << "Could not bind GL image to texture"; - OnError(); return; } @@ -372,7 +372,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( resource); if (!plane_sk_images[plane]) { DLOG(ERROR) << "Could not create planar SkImage"; - OnError(); return; } // No need for us to call the resource cleaner. Skia should do that. @@ -383,7 +382,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( // |native_pixmap_handle| member of a GpuMemoryBufferHandle. NOTIMPLEMENTED() << "Image decode acceleration is unsupported for this platform"; - OnError(); return; #endif @@ -395,7 +393,6 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( channel_->LookupCommandBuffer(params.raster_decoder_route_id); if (!command_buffer) { DLOG(ERROR) << "Could not find the command buffer"; - OnError(); return; } scoped_refptr<Buffer> handle_buffer = @@ -403,13 +400,11 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( if (!DiscardableHandleBase::ValidateParameters( handle_buffer.get(), params.discardable_handle_shm_offset)) { DLOG(ERROR) << "Could not validate the discardable handle parameters"; - OnError(); return; } DCHECK(command_buffer->decoder_context()); if (command_buffer->decoder_context()->GetRasterDecoderId() < 0) { DLOG(ERROR) << "Could not get the raster decoder ID"; - OnError(); return; } @@ -441,21 +436,18 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode( completed_decode->yuv_color_space, completed_decode->buffer_byte_size, params.needs_mips)) { DLOG(ERROR) << "Could not create and insert the transfer cache entry"; - OnError(); return; } } DCHECK(notify_gl_state_changed); notify_gl_state_changed->RunAndReset(); +} - // All done! The decoded image can now be used for rasterization, so we can - // release the decode sync token. +void ImageDecodeAcceleratorStub::FinishCompletedDecode( + uint64_t decode_release_count) { + DCHECK(main_task_runner_->BelongsToCurrentThread()); + lock_.AssertAcquired(); sync_point_client_state_->ReleaseFenceSync(decode_release_count); - - // If there are no more completed decodes to be processed, we can disable the - // sequence: when the next decode is completed, the sequence will be - // re-enabled. - pending_completed_decodes_.pop(); if (pending_completed_decodes_.empty()) channel_->scheduler()->DisableSequence(sequence_); } @@ -464,19 +456,13 @@ void ImageDecodeAcceleratorStub::OnDecodeCompleted( gfx::Size expected_output_size, std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult> result) { base::AutoLock lock(lock_); - if (!channel_ || destroying_channel_) { + if (!channel_) { // The channel is no longer available, so don't do anything. return; } - if (!result) { - DLOG(ERROR) << "The decode failed"; - OnError(); - return; - } - // A sanity check on the output of the decoder. - DCHECK(expected_output_size == result->visible_size); + DCHECK(!result || expected_output_size == result->visible_size); // The decode is ready to be processed: add it to |pending_completed_decodes_| // so that ProcessCompletedDecode() can pick it up. @@ -488,19 +474,4 @@ void ImageDecodeAcceleratorStub::OnDecodeCompleted( channel_->scheduler()->EnableSequence(sequence_); } -void ImageDecodeAcceleratorStub::OnError() { - lock_.AssertAcquired(); - DCHECK(channel_); - - // Trigger the destruction of the channel and stop processing further - // completed decodes, even if they're successful. We can't call - // GpuChannel::OnChannelError() directly because that will end up calling - // ImageDecodeAcceleratorStub::Shutdown() while |lock_| is still acquired. So, - // we post a task to the main thread instead. - destroying_channel_ = true; - channel_->task_runner()->PostTask( - FROM_HERE, - base::BindOnce(&GpuChannel::OnChannelError, channel_->AsWeakPtr())); -} - } // namespace gpu diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h index 41256a3067e..b3552f98573 100644 --- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h +++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h @@ -76,23 +76,22 @@ class GPU_IPC_SERVICE_EXPORT ImageDecodeAcceleratorStub uint64_t release_count); // Creates the service-side cache entry for a completed decode and releases - // the decode sync token. + // the decode sync token. If the decode was unsuccessful, no cache entry is + // created but the decode sync token is still released. void ProcessCompletedDecode(GpuChannelMsg_ScheduleImageDecode_Params params, uint64_t decode_release_count); - // The |worker_| calls this when a decode is completed. If the decode is - // successful, |sequence_| will be enabled so that ProcessCompletedDecode() is - // called. If the decode is not successful, we destroy the channel (see - // OnError()). + // Releases the decode sync token corresponding to |decode_release_count| and + // disables |sequence_| if there are no more decodes to process for now. + void FinishCompletedDecode(uint64_t decode_release_count) + EXCLUSIVE_LOCKS_REQUIRED(lock_); + + // The |worker_| calls this when a decode is completed. |result| is enqueued + // and |sequence_| is enabled so that ProcessCompletedDecode() picks it up. void OnDecodeCompleted( gfx::Size expected_output_size, std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult> result); - // Triggers the destruction of the channel asynchronously and makes it so that - // we stop accepting completed decodes. On entry, |channel_| must not be - // nullptr. - void OnError() EXCLUSIVE_LOCKS_REQUIRED(lock_); - // The object to which the actual decoding can be delegated. ImageDecodeAcceleratorWorker* worker_ = nullptr; @@ -103,8 +102,6 @@ class GPU_IPC_SERVICE_EXPORT ImageDecodeAcceleratorStub GUARDED_BY(lock_); base::queue<std::unique_ptr<ImageDecodeAcceleratorWorker::DecodeResult>> pending_completed_decodes_ GUARDED_BY(lock_); - bool destroying_channel_ GUARDED_BY(lock_) = false; - uint64_t last_release_count_ GUARDED_BY(lock_) = 0; ImageFactory* external_image_factory_for_testing_ = nullptr; diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc index 23830d15ddd..b190cfcc37d 100644 --- a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc +++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc @@ -73,6 +73,11 @@ class MemoryTracker; namespace { +struct ExpectedCacheEntry { + uint32_t id = 0u; + SkISize dimensions; +}; + std::unique_ptr<MemoryTracker> CreateMockMemoryTracker( const GPUCreateCommandBufferConfig& init_params) { return std::make_unique<gles2::MockMemoryTracker>(); @@ -204,8 +209,7 @@ class ImageDecodeAcceleratorStubTest int GetRasterDecoderId() { GpuChannel* channel = channel_manager()->LookupChannel(kChannelId); - if (!channel) - return -1; + DCHECK(channel); CommandBufferStub* command_buffer = channel->LookupCommandBuffer(kCommandBufferRouteId); if (!command_buffer || !command_buffer->decoder_context()) @@ -283,7 +287,7 @@ class ImageDecodeAcceleratorStubTest scoped_refptr<Buffer> buffer, uint64_t handle_release_count) { GpuChannel* channel = channel_manager()->LookupChannel(kChannelId); - CHECK(channel); + DCHECK(channel); CommandBufferStub* command_buffer = channel->LookupCommandBuffer(kCommandBufferRouteId); CHECK(command_buffer); @@ -295,12 +299,11 @@ class ImageDecodeAcceleratorStubTest // the raster sequence) to register the handle's buffer and release the sync // token corresponding to |handle_release_count| (see the // RegisterDiscardableHandleBuffer() method). Returns an invalid handle if the - // GPU channel or the command buffer doesn't exist. + // command buffer doesn't exist. ClientDiscardableHandle CreateDiscardableHandle( uint64_t handle_release_count) { GpuChannel* channel = channel_manager()->LookupChannel(kChannelId); - if (!channel) - return ClientDiscardableHandle(); + DCHECK(channel); CommandBufferStub* command_buffer = channel->LookupCommandBuffer(kCommandBufferRouteId); if (!command_buffer) @@ -324,20 +327,14 @@ class ImageDecodeAcceleratorStubTest // (|decode_release_count|), the transfer cache entry ID // (|transfer_cache_entry_id|), and the release count of the sync token that // is signaled after the discardable handle's buffer has been registered in - // the TransferBufferManager. If the channel does not exist or the discardable - // handle can't be created, this function returns an empty sync token. + // the TransferBufferManager. If the discardable handle can't be created, this + // function returns an empty sync token. SyncToken SendDecodeRequest(const gfx::Size& output_size, uint64_t decode_release_count, uint32_t transfer_cache_entry_id, uint64_t handle_release_count) { GpuChannel* channel = channel_manager()->LookupChannel(kChannelId); - if (!channel) { - // It's possible that the channel was destroyed as part of an earlier - // SendDecodeRequest() call. This would happen if - // ImageDecodeAcceleratorStub::OnScheduleImageDecode decides to destroy - // the channel. - return SyncToken(); - } + DCHECK(channel); // Create the decode sync token for the decode request so that we can test // that it's actually released. @@ -383,7 +380,8 @@ class ImageDecodeAcceleratorStubTest } } - void CheckTransferCacheEntries(std::vector<SkISize> expected_sizes) { + void CheckTransferCacheEntries( + const std::vector<ExpectedCacheEntry>& expected_entries) { ServiceTransferCache* transfer_cache = GetServiceTransferCache(); ASSERT_TRUE(transfer_cache); @@ -391,8 +389,8 @@ class ImageDecodeAcceleratorStubTest // expected. const size_t num_actual_cache_entries = transfer_cache->entries_count_for_testing(); - ASSERT_EQ(expected_sizes.size(), num_actual_cache_entries); - if (expected_sizes.empty()) + ASSERT_EQ(expected_entries.size(), num_actual_cache_entries); + if (expected_entries.empty()) return; // Then, check the dimensions of the entries to make sure they are as @@ -402,7 +400,8 @@ class ImageDecodeAcceleratorStubTest for (size_t i = 0; i < num_actual_cache_entries; i++) { auto* decode_entry = static_cast<cc::ServiceImageTransferCacheEntry*>( transfer_cache->GetEntry(ServiceTransferCache::EntryKey( - raster_decoder_id, cc::TransferCacheEntryType::kImage, i + 1))); + raster_decoder_id, cc::TransferCacheEntryType::kImage, + expected_entries[i].id))); ASSERT_TRUE(decode_entry); ASSERT_EQ(gfx::NumberOfPlanesForLinearBufferFormat(GetParam()), decode_entry->plane_images().size()); @@ -412,9 +411,9 @@ class ImageDecodeAcceleratorStubTest EXPECT_TRUE(decode_entry->plane_images()[plane]->isTextureBacked()); } ASSERT_TRUE(decode_entry->image()); - EXPECT_EQ(expected_sizes[i].width(), + EXPECT_EQ(expected_entries[i].dimensions.width(), decode_entry->image()->dimensions().width()); - EXPECT_EQ(expected_sizes[i].height(), + EXPECT_EQ(expected_entries[i].dimensions.height(), decode_entry->image()->dimensions().height()); } } @@ -471,11 +470,9 @@ TEST_P(ImageDecodeAcceleratorStubTest, EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token)); EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token)); - // The channel should still exist at the end. - EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId)); - // Check that the decoded images are in the transfer cache. - CheckTransferCacheEntries({SkISize::Make(100, 100), SkISize::Make(200, 200)}); + CheckTransferCacheEntries( + {{1u, SkISize::Make(100, 100)}, {2u, SkISize::Make(200, 200)}}); } // Tests the following flow: three decode requests are sent. The first decode @@ -521,18 +518,14 @@ TEST_P(ImageDecodeAcceleratorStubTest, EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token)); EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token)); - // The channel should still exist at the end. - EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId)); - // Check that the decoded images are in the transfer cache. - CheckTransferCacheEntries({SkISize::Make(100, 100), SkISize::Make(200, 200), - SkISize::Make(300, 300)}); + CheckTransferCacheEntries({{1u, SkISize::Make(100, 100)}, + {2u, SkISize::Make(200, 200)}, + {3u, SkISize::Make(300, 300)}}); } // Tests the following flow: three decode requests are sent. The first decode -// fails which should trigger the destruction of the channel. The second -// succeeds and the third one fails. Regardless, the channel should still be -// destroyed and all sync tokens should be released. +// fails, the second succeeds, and the third one fails. TEST_P(ImageDecodeAcceleratorStubTest, FailedDecodes) { { InSequence call_sequence; @@ -561,25 +554,29 @@ TEST_P(ImageDecodeAcceleratorStubTest, FailedDecodes) { EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token)); EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token)); EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token)); + + // All decode sync tokens should be released after completing all the decodes. image_decode_accelerator_worker_.FinishOneDecode(false); image_decode_accelerator_worker_.FinishOneDecode(true); image_decode_accelerator_worker_.FinishOneDecode(false); - - // We expect the destruction of the ImageDecodeAcceleratorStub, which also - // implies that all decode sync tokens should be released. RunTasksUntilIdle(); - EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId)); EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token)); EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token)); EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode3_sync_token)); - // We expect no entries in the transfer cache. - CheckTransferCacheEntries({}); + // There should only be one image in the transfer cache (the one that + // succeeded). + CheckTransferCacheEntries({{2u, SkISize::Make(200, 200)}}); } TEST_P(ImageDecodeAcceleratorStubTest, OutOfOrderDecodeSyncTokens) { - EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100))) - .Times(1); + { + InSequence call_sequence; + EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100))) + .Times(1); + EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(200, 200))) + .Times(1); + } const SyncToken decode1_sync_token = SendDecodeRequest( gfx::Size(100, 100) /* output_size */, 2u /* decode_release_count */, 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */); @@ -590,62 +587,87 @@ TEST_P(ImageDecodeAcceleratorStubTest, OutOfOrderDecodeSyncTokens) { 2u /* transfer_cache_entry_id */, 2u /* handle_release_count */); ASSERT_TRUE(decode2_sync_token.HasData()); - // We expect the destruction of the ImageDecodeAcceleratorStub, which also - // implies that all decode sync tokens should be released. + // A decode sync token should not be released before a decode is finished. + RunTasksUntilIdle(); + EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token)); + EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token)); + + // Since the sync tokens are out of order, releasing the first one should also + // release the second one. + image_decode_accelerator_worker_.FinishOneDecode(true); RunTasksUntilIdle(); - EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId)); EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token)); EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token)); - // We expect no entries in the transfer cache. - CheckTransferCacheEntries({}); + // We only expect the first image in the transfer cache. + CheckTransferCacheEntries({{1u, SkISize::Make(100, 100)}}); + + // Finishing the second decode should not "unrelease" the first sync token. + image_decode_accelerator_worker_.FinishOneDecode(true); + RunTasksUntilIdle(); + EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode1_sync_token)); + EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode2_sync_token)); + CheckTransferCacheEntries( + {{1u, SkISize::Make(100, 100)}, {2u, SkISize::Make(200, 200)}}); } TEST_P(ImageDecodeAcceleratorStubTest, ZeroReleaseCountDecodeSyncToken) { + EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 100))) + .Times(1); const SyncToken decode_sync_token = SendDecodeRequest( gfx::Size(100, 100) /* output_size */, 0u /* decode_release_count */, 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */); ASSERT_TRUE(decode_sync_token.HasData()); - // We expect the destruction of the ImageDecodeAcceleratorStub, which also - // implies that all decode sync tokens should be released. + // A zero-release count sync token is always considered released. RunTasksUntilIdle(); - EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId)); EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); - // We expect no entries in the transfer cache. - CheckTransferCacheEntries({}); + // Even though the release count is not really valid, we can still finish the + // decode. + image_decode_accelerator_worker_.FinishOneDecode(true); + RunTasksUntilIdle(); + EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); + CheckTransferCacheEntries({{1u, SkISize::Make(100, 100)}}); } TEST_P(ImageDecodeAcceleratorStubTest, ZeroWidthOutputSize) { + EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(0, 100))) + .Times(1); const SyncToken decode_sync_token = SendDecodeRequest( gfx::Size(0, 100) /* output_size */, 1u /* decode_release_count */, 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */); ASSERT_TRUE(decode_sync_token.HasData()); - // We expect the destruction of the ImageDecodeAcceleratorStub, which also - // implies that all decode sync tokens should be released. + // A decode sync token should not be released before a decode is finished. RunTasksUntilIdle(); - EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId)); - EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); + EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); - // We expect no entries in the transfer cache. + // Even though the output size is not valid, we can still finish the decode. + // We just shouldn't get any entries in the transfer cache. + image_decode_accelerator_worker_.FinishOneDecode(true); + RunTasksUntilIdle(); + EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); CheckTransferCacheEntries({}); } TEST_P(ImageDecodeAcceleratorStubTest, ZeroHeightOutputSize) { + EXPECT_CALL(image_decode_accelerator_worker_, DoDecode(gfx::Size(100, 0))) + .Times(1); const SyncToken decode_sync_token = SendDecodeRequest( gfx::Size(100, 0) /* output_size */, 1u /* decode_release_count */, 1u /* transfer_cache_entry_id */, 1u /* handle_release_count */); ASSERT_TRUE(decode_sync_token.HasData()); - // We expect the destruction of the ImageDecodeAcceleratorStub, which also - // implies that all decode sync tokens should be released. + // A decode sync token should not be released before a decode is finished. RunTasksUntilIdle(); - EXPECT_FALSE(channel_manager()->LookupChannel(kChannelId)); - EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); + EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); - // We expect no entries in the transfer cache. + // Even though the output size is not valid, we can still finish the decode. + // We just shouldn't get any entries in the transfer cache. + image_decode_accelerator_worker_.FinishOneDecode(true); + RunTasksUntilIdle(); + EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); CheckTransferCacheEntries({}); } @@ -683,14 +705,6 @@ TEST_P(ImageDecodeAcceleratorStubTest, WaitForDiscardableHandleRegistration) { RunTasksUntilIdle(); EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); - // Let's make sure that the channel and the command buffer are still alive - // because if we didn't wait for the discardable handle's buffer to be - // registered, we could have caused a channel teardown. - ASSERT_TRUE(channel_manager()->LookupChannel(kChannelId)); - ASSERT_TRUE(channel_manager() - ->LookupChannel(kChannelId) - ->LookupCommandBuffer(kCommandBufferRouteId)); - // Now let's register the discardable handle's buffer by re-enabling the // raster sequence. This should trigger the processing of the completed decode // and the subsequent release of the decode sync token. @@ -698,17 +712,14 @@ TEST_P(ImageDecodeAcceleratorStubTest, WaitForDiscardableHandleRegistration) { RunTasksUntilIdle(); EXPECT_TRUE(sync_point_manager()->IsSyncTokenReleased(decode_sync_token)); - // The channel should still exist at the end. - EXPECT_TRUE(channel_manager()->LookupChannel(kChannelId)); - // Check that the decoded images are in the transfer cache. - CheckTransferCacheEntries({SkISize::Make(100, 100)}); + CheckTransferCacheEntries({{1u, SkISize::Make(100, 100)}}); } // TODO(andrescj): test the deletion of transfer cache entries. INSTANTIATE_TEST_SUITE_P( - , + All, ImageDecodeAcceleratorStubTest, ::testing::Values(gfx::BufferFormat::YVU_420, gfx::BufferFormat::YUV_420_BIPLANAR)); diff --git a/chromium/gpu/ipc/service/image_transport_surface_delegate.h b/chromium/gpu/ipc/service/image_transport_surface_delegate.h index 44430c0ed95..1e1319d7535 100644 --- a/chromium/gpu/ipc/service/image_transport_surface_delegate.h +++ b/chromium/gpu/ipc/service/image_transport_surface_delegate.h @@ -47,6 +47,9 @@ class GPU_IPC_SERVICE_EXPORT ImageTransportSurfaceDelegate { // Callback for GPU vsync signal. May be called on a different thread. virtual viz::GpuVSyncCallback GetGpuVSyncCallback() = 0; + // Returns how long GpuThread was blocked since last swap. Used for metrics. + virtual base::TimeDelta GetGpuBlockedTimeSinceLastSwap() = 0; + protected: virtual ~ImageTransportSurfaceDelegate() = default; }; diff --git a/chromium/gpu/ipc/service/image_transport_surface_linux.cc b/chromium/gpu/ipc/service/image_transport_surface_linux.cc index 41a2d297482..c5c4d6ce7ed 100644 --- a/chromium/gpu/ipc/service/image_transport_surface_linux.cc +++ b/chromium/gpu/ipc/service/image_transport_surface_linux.cc @@ -23,8 +23,10 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface( #endif if (!surface) { surface = gl::init::CreateViewGLSurface(surface_handle); - if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL) + if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL || + gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE) { override_vsync_for_multi_window_swap = true; + } } if (!surface) return surface; diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h index f343b7e0e04..f65ad035e90 100644 --- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h +++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h @@ -82,7 +82,7 @@ class ImageTransportSurfaceOverlayMacBase : public BaseClass, bool IsSurfaceless() const override; // ui::GpuSwitchingObserver implementation. - void OnGpuSwitched() override; + void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override; private: ~ImageTransportSurfaceOverlayMacBase() override; diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm index fa1ef9c99b2..c1af03a268d 100644 --- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm +++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm @@ -316,7 +316,8 @@ bool ImageTransportSurfaceOverlayMacBase<BaseClass>::Resize( } template <typename BaseClass> -void ImageTransportSurfaceOverlayMacBase<BaseClass>::OnGpuSwitched() { +void ImageTransportSurfaceOverlayMacBase<BaseClass>::OnGpuSwitched( + gl::GpuPreference active_gpu_heuristic) { // Create a new context, and use the GL renderer ID that the new context gets. scoped_refptr<ui::IOSurfaceContext> context_on_new_gpu = ui::IOSurfaceContext::Get(ui::IOSurfaceContext::kCALayerContext); diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc index 0230484ec25..fa58d426738 100644 --- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc +++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc @@ -9,6 +9,7 @@ #include "base/bind.h" #include "base/bind_helpers.h" #include "base/command_line.h" +#include "base/metrics/histogram_macros.h" #include "build/build_config.h" #include "gpu/command_buffer/common/swap_buffers_complete_params.h" #include "ui/gfx/vsync_provider.h" @@ -169,6 +170,29 @@ void PassThroughImageTransportSurface::SetVSyncEnabled(bool enabled) { GLSurfaceAdapter::SetVSyncEnabled(enabled); } +void PassThroughImageTransportSurface::TrackMultiSurfaceSwap() { + // This code is a simple way of enforcing that we only vsync if one surface + // is swapping per frame. This provides single window cases a stable refresh + // while allowing multi-window cases to not slow down due to multiple syncs + // on a single thread. A better way to fix this problem would be to have + // each surface present on its own thread. + if (g_current_swap_generation_ == swap_generation_) { + // No other surface has swapped since we swapped last time. + if (g_num_swaps_in_current_swap_generation_ > 1) + g_last_multi_window_swap_generation_ = g_current_swap_generation_; + g_num_swaps_in_current_swap_generation_ = 0; + g_current_swap_generation_++; + } + + swap_generation_ = g_current_swap_generation_; + g_num_swaps_in_current_swap_generation_++; + + multiple_surfaces_swapped_ = + (g_num_swaps_in_current_swap_generation_ > 1) || + (g_current_swap_generation_ - g_last_multi_window_swap_generation_ < + kMultiWindowSwapEnableVSyncDelay); +} + void PassThroughImageTransportSurface::UpdateVSyncEnabled() { if (is_gpu_vsync_disabled_) { SetVSyncEnabled(false); @@ -177,33 +201,14 @@ void PassThroughImageTransportSurface::UpdateVSyncEnabled() { bool should_override_vsync = false; if (is_multi_window_swap_vsync_override_enabled_) { - // This code is a simple way of enforcing that we only vsync if one surface - // is swapping per frame. This provides single window cases a stable refresh - // while allowing multi-window cases to not slow down due to multiple syncs - // on a single thread. A better way to fix this problem would be to have - // each surface present on its own thread. - - if (g_current_swap_generation_ == swap_generation_) { - // No other surface has swapped since we swapped last time. - if (g_num_swaps_in_current_swap_generation_ > 1) - g_last_multi_window_swap_generation_ = g_current_swap_generation_; - g_num_swaps_in_current_swap_generation_ = 0; - g_current_swap_generation_++; - } - - swap_generation_ = g_current_swap_generation_; - g_num_swaps_in_current_swap_generation_++; - - should_override_vsync = - (g_num_swaps_in_current_swap_generation_ > 1) || - (g_current_swap_generation_ - g_last_multi_window_swap_generation_ < - kMultiWindowSwapEnableVSyncDelay); + should_override_vsync = multiple_surfaces_swapped_; } SetVSyncEnabled(!should_override_vsync); } void PassThroughImageTransportSurface::StartSwapBuffers( gfx::SwapResponse* response) { + TrackMultiSurfaceSwap(); UpdateVSyncEnabled(); #if DCHECK_IS_ON() @@ -231,6 +236,32 @@ void PassThroughImageTransportSurface::FinishSwapBuffers( #endif if (delegate_) { + auto blocked_time_since_last_swap = + delegate_->GetGpuBlockedTimeSinceLastSwap(); + + if (!multiple_surfaces_swapped_) { + static constexpr base::TimeDelta kTimingMetricsHistogramMin = + base::TimeDelta::FromMicroseconds(5); + static constexpr base::TimeDelta kTimingMetricsHistogramMax = + base::TimeDelta::FromMilliseconds(500); + static constexpr uint32_t kTimingMetricsHistogramBuckets = 50; + + base::TimeDelta delta = + response.timings.swap_end - response.timings.swap_start; + UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES( + "GPU.SwapTimeUs", delta, kTimingMetricsHistogramMin, + kTimingMetricsHistogramMax, kTimingMetricsHistogramBuckets); + + // Report only if collection is enabled and supported on current platform + // See gpu::Scheduler::TakeTotalBlockingTime for details. + if (!blocked_time_since_last_swap.is_min()) { + UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES( + "GPU.GpuBlockedBetweenSwapsUs2", blocked_time_since_last_swap, + kTimingMetricsHistogramMin, kTimingMetricsHistogramMax, + kTimingMetricsHistogramBuckets); + } + } + SwapBuffersCompleteParams params; params.swap_response = std::move(response); delegate_->DidSwapBuffersComplete(std::move(params)); diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h index b11596dac75..e463dc1e95b 100644 --- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h +++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h @@ -55,6 +55,7 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter { private: ~PassThroughImageTransportSurface() override; + void TrackMultiSurfaceSwap(); void UpdateVSyncEnabled(); void StartSwapBuffers(gfx::SwapResponse* response); @@ -74,6 +75,7 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter { base::WeakPtr<ImageTransportSurfaceDelegate> delegate_; int swap_generation_ = 0; bool vsync_enabled_ = true; + bool multiple_surfaces_swapped_ = false; // Local swap ids, which are used to make sure the swap order is correct and // the presentation callbacks are not called earlier than the swap ack of the diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc index d2d4bb303bc..e9c23ac950b 100644 --- a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc +++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc @@ -8,7 +8,7 @@ #include <utility> #include "base/macros.h" -#include "base/memory/shared_memory.h" +#include "base/memory/unsafe_shared_memory_region.h" #include "base/trace_event/trace_event.h" #include "build/build_config.h" #include "gpu/command_buffer/common/constants.h" @@ -138,7 +138,10 @@ gpu::ContextResult RasterCommandBufferStub::Initialize( : "0"); scoped_refptr<gl::GLContext> context = shared_context_state->context(); - if (!shared_context_state->MakeCurrent(nullptr)) { + // Raster decoder needs gl context for GPUTracing. + // TODO(penghuang): get rid of the gl dependeny when GL is not used for + // raster. https://crbug.com/c/1018725 + if (!shared_context_state->MakeCurrent(nullptr, true /* needs_gl */)) { LOG(ERROR) << "ContextResult::kTransientFailure: " "Failed to make context current."; return gpu::ContextResult::kTransientFailure; diff --git a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc index ad83c5bf8eb..11d71cc56db 100644 --- a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc +++ b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc @@ -8,7 +8,7 @@ #include <utility> #include "base/macros.h" -#include "base/memory/shared_memory.h" +#include "base/memory/unsafe_shared_memory_region.h" #include "base/trace_event/trace_event.h" #include "build/build_config.h" #include "gpu/command_buffer/common/constants.h" diff --git a/chromium/gpu/vulkan/BUILD.gn b/chromium/gpu/vulkan/BUILD.gn index 7d1331f1ec1..64500bfc7fc 100644 --- a/chromium/gpu/vulkan/BUILD.gn +++ b/chromium/gpu/vulkan/BUILD.gn @@ -43,8 +43,6 @@ if (enable_vulkan) { "vulkan_function_pointers.h", "vulkan_implementation.cc", "vulkan_implementation.h", - "vulkan_info.cc", - "vulkan_info.h", "vulkan_instance.cc", "vulkan_instance.h", "vulkan_surface.cc", @@ -69,7 +67,9 @@ if (enable_vulkan) { "//base", "//ui/gfx", ] - public_deps = [] + public_deps = [ + "//gpu/config:vulkan_info", + ] data_deps = [] if (is_posix) { diff --git a/chromium/gpu/vulkan/PRESUBMIT.py b/chromium/gpu/vulkan/PRESUBMIT.py index c5f3fd8991b..8d61974cead 100644 --- a/chromium/gpu/vulkan/PRESUBMIT.py +++ b/chromium/gpu/vulkan/PRESUBMIT.py @@ -22,7 +22,7 @@ def CommonChecks(input_api, output_api): messages = [] - if (len(generated_files) > 0 and len(generating_files) == 0): + if generated_files and not generating_files: long_text = 'Changed files:\n' for file in generated_files: long_text += file.LocalPath() + '\n' @@ -33,7 +33,7 @@ def CommonChecks(input_api, output_api): with input_api.temporary_directory() as temp_dir: commands = [] - if len(generating_files) > 0: + if generating_files: commands.append(input_api.Command(name='generate_bindings', cmd=[input_api.python_executable, 'generate_bindings.py', @@ -41,7 +41,7 @@ def CommonChecks(input_api, output_api): '--output-dir=' + temp_dir], kwargs={}, message=output_api.PresubmitError)) - if len(commands) > 0: + if commands: messages.extend(input_api.RunTests(commands)) return messages diff --git a/chromium/gpu/vulkan/demo/vulkan_demo.h b/chromium/gpu/vulkan/demo/vulkan_demo.h index e1ca7307804..3d8cd19cb6c 100644 --- a/chromium/gpu/vulkan/demo/vulkan_demo.h +++ b/chromium/gpu/vulkan/demo/vulkan_demo.h @@ -12,7 +12,7 @@ #include "gpu/vulkan/vulkan_swap_chain.h" #include "third_party/skia/include/core/SkRefCnt.h" #include "ui/gfx/geometry/size.h" -#include "ui/platform_window/platform_window_base.h" +#include "ui/platform_window/platform_window.h" #include "ui/platform_window/platform_window_delegate.h" class SkCanvas; @@ -66,7 +66,7 @@ class VulkanDemo : public ui::PlatformWindowDelegate { scoped_refptr<viz::VulkanContextProvider> vulkan_context_provider_; gfx::AcceleratedWidget accelerated_widget_ = gfx::kNullAcceleratedWidget; std::unique_ptr<ui::PlatformEventSource> event_source_; - std::unique_ptr<ui::PlatformWindowBase> window_; + std::unique_ptr<ui::PlatformWindow> window_; std::unique_ptr<VulkanSurface> vulkan_surface_; base::Optional<VulkanSwapChain::ScopedWrite> scoped_write_; sk_sp<SkSurface> sk_surface_; diff --git a/chromium/gpu/vulkan/vulkan_device_queue.cc b/chromium/gpu/vulkan/vulkan_device_queue.cc index 0406917672d..55266dac0ab 100644 --- a/chromium/gpu/vulkan/vulkan_device_queue.cc +++ b/chromium/gpu/vulkan/vulkan_device_queue.cc @@ -8,10 +8,10 @@ #include <utility> #include <vector> +#include "gpu/config/vulkan_info.h" #include "gpu/vulkan/vulkan_command_pool.h" #include "gpu/vulkan/vulkan_fence_helper.h" #include "gpu/vulkan/vulkan_function_pointers.h" -#include "gpu/vulkan/vulkan_info.h" namespace gpu { diff --git a/chromium/gpu/vulkan/vulkan_info.cc b/chromium/gpu/vulkan/vulkan_info.cc deleted file mode 100644 index 548a1233806..00000000000 --- a/chromium/gpu/vulkan/vulkan_info.cc +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "gpu/vulkan/vulkan_info.h" - -namespace gpu { - -VulkanInfo::VulkanInfo() = default; -VulkanInfo::~VulkanInfo() = default; -VulkanInfo::PhysicalDeviceInfo::PhysicalDeviceInfo() = default; -VulkanInfo::PhysicalDeviceInfo::PhysicalDeviceInfo( - const PhysicalDeviceInfo& other) = default; -VulkanInfo::PhysicalDeviceInfo::~PhysicalDeviceInfo() = default; -VulkanInfo::PhysicalDeviceInfo& VulkanInfo::PhysicalDeviceInfo::operator=( - const PhysicalDeviceInfo& info) = default; - -} // namespace gpu diff --git a/chromium/gpu/vulkan/vulkan_info.h b/chromium/gpu/vulkan/vulkan_info.h deleted file mode 100644 index babd2f93581..00000000000 --- a/chromium/gpu/vulkan/vulkan_info.h +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef GPU_VULKAN_VULKAN_INFO_H_ -#define GPU_VULKAN_VULKAN_INFO_H_ - -#include <vulkan/vulkan.h> -#include <vector> - -#include "base/macros.h" -#include "gpu/vulkan/vulkan_export.h" -#include "ui/gfx/extension_set.h" - -namespace gpu { - -class VULKAN_EXPORT VulkanInfo { - public: - VulkanInfo(); - ~VulkanInfo(); - - class PhysicalDeviceInfo { - public: - PhysicalDeviceInfo(); - PhysicalDeviceInfo(const PhysicalDeviceInfo& other); - ~PhysicalDeviceInfo(); - PhysicalDeviceInfo& operator=(const PhysicalDeviceInfo& other); - - VkPhysicalDevice device = VK_NULL_HANDLE; - VkPhysicalDeviceProperties properties = {}; - std::vector<VkLayerProperties> layers; - - VkPhysicalDeviceFeatures features = {}; - // Extended physical device features: - bool feature_sampler_ycbcr_conversion = false; - bool feature_protected_memory = false; - - std::vector<VkQueueFamilyProperties> queue_families; - }; - - uint32_t api_version = VK_MAKE_VERSION(1, 0, 0); - uint32_t used_api_version = VK_MAKE_VERSION(1, 0, 0); - std::vector<VkExtensionProperties> instance_extensions; - std::vector<const char*> enabled_instance_extensions; - std::vector<VkLayerProperties> instance_layers; - std::vector<PhysicalDeviceInfo> physical_devices; -}; - -} // namespace gpu - -#endif // GPU_VULKAN_VULKAN_INFO_H_ diff --git a/chromium/gpu/vulkan/vulkan_instance.cc b/chromium/gpu/vulkan/vulkan_instance.cc index 8bd39f54cb1..d1638b65dc1 100644 --- a/chromium/gpu/vulkan/vulkan_instance.cc +++ b/chromium/gpu/vulkan/vulkan_instance.cc @@ -284,6 +284,7 @@ void VulkanInstance::CollectInfo() { ycbcr_converson_features.pNext = &protected_memory_feature; vkGetPhysicalDeviceFeatures2(device, &features_2); + info.features = features_2.features; info.feature_sampler_ycbcr_conversion = ycbcr_converson_features.samplerYcbcrConversion; info.feature_protected_memory = protected_memory_feature.protectedMemory; diff --git a/chromium/gpu/vulkan/vulkan_instance.h b/chromium/gpu/vulkan/vulkan_instance.h index a4656d0b132..bd0092e11bf 100644 --- a/chromium/gpu/vulkan/vulkan_instance.h +++ b/chromium/gpu/vulkan/vulkan_instance.h @@ -10,8 +10,8 @@ #include "base/logging.h" #include "base/macros.h" +#include "gpu/config/vulkan_info.h" #include "gpu/vulkan/vulkan_export.h" -#include "gpu/vulkan/vulkan_info.h" #include "ui/gfx/extension_set.h" namespace gpu { |