summaryrefslogtreecommitdiff
path: root/chromium/gpu/command_buffer
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 10:22:43 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 12:36:28 +0000
commit271a6c3487a14599023a9106329505597638d793 (patch)
treee040d58ffc86c1480b79ca8528020ca9ec919bf8 /chromium/gpu/command_buffer
parent7b2ffa587235a47d4094787d72f38102089f402a (diff)
downloadqtwebengine-chromium-271a6c3487a14599023a9106329505597638d793.tar.gz
BASELINE: Update Chromium to 77.0.3865.59
Change-Id: I1e89a5f3b009a9519a6705102ad65c92fe736f21 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/gpu/command_buffer')
-rw-r--r--chromium/gpu/command_buffer/PRESUBMIT.py109
-rw-r--r--chromium/gpu/command_buffer/build_cmd_buffer_lib.py86
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py11
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.cc10
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.h11
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc4
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator.cc3
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator_test.cc4
-rw-r--r--chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h11
-rw-r--r--chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h8
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc26
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h3
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_autogen.h3
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h12
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h11
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_autogen.h13
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h2
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h2
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h2
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h7
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.cc3
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.h2
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.cc17
-rw-r--r--chromium/gpu/command_buffer/client/raster_interface.h4
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer_test.cc4
-rw-r--r--chromium/gpu/command_buffer/client/shared_image_interface.h13
-rw-r--r--chromium/gpu/command_buffer/client/shared_memory_limits.h4
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.cc7
-rw-r--r--chromium/gpu/command_buffer/common/capabilities.h1
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h33
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h11
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h197
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc4
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h4
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc7
-rw-r--r--chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt3
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn3
-rw-r--r--chromium/gpu/command_buffer/service/DEPS1
-rw-r--r--chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc89
-rw-r--r--chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h31
-rw-r--r--chromium/gpu/command_buffer/service/context_state_test_helpers.cc63
-rw-r--r--chromium/gpu/command_buffer/service/context_state_test_helpers.h44
-rw-r--r--chromium/gpu/command_buffer/service/context_state_test_helpers_autogen.h135
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.cc434
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.h40
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc127
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc15
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc32
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h8
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager.cc22
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual.cc7
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual.h2
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc44
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.cc73
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.h12
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc5
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc7
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc216
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h30
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc3
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc152
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h23
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h15
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc282
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc20
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc16
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h134
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc99
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h49
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc24
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gpu_service_test.h4
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.cc18
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.h12
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller.cc12
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller.h1
-rw-r--r--chromium/gpu/command_buffer/service/mailbox_manager_sync.cc9
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.cc6
-rw-r--r--chromium/gpu/command_buffer/service/program_cache.cc27
-rw-r--r--chromium/gpu/command_buffer/service/program_cache.h12
-rw-r--r--chromium/gpu/command_buffer/service/program_cache_unittest.cc46
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.cc8
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.h3
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc347
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest.cc7
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h135
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc54
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h15
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc2
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.cc5
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.h6
-rw-r--r--chromium/gpu/command_buffer/service/service_font_manager.cc24
-rw-r--r--chromium/gpu/command_buffer/service/service_transfer_cache.cc80
-rw-r--r--chromium/gpu/command_buffer/service/service_transfer_cache.h6
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.cc31
-rw-r--r--chromium/gpu/command_buffer/service/shader_manager.h1
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.cc54
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.h10
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state_unittest.cc92
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing.h6
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc177
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc50
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h1
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm41
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.cc21
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.h7
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.cc3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.cc42
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.h49
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc141
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h63
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.cc24
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.h6
-rw-r--r--chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.cc20
-rw-r--r--chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.h7
-rw-r--r--chromium/gpu/command_buffer/service/swap_chain_factory_dxgi_unittest.cc320
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager.cc5
-rw-r--r--chromium/gpu/command_buffer/service/texture_definition.cc7
-rw-r--r--chromium/gpu/command_buffer/service/texture_definition.h1
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc66
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h33
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc12
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.cc39
129 files changed, 3514 insertions, 1589 deletions
diff --git a/chromium/gpu/command_buffer/PRESUBMIT.py b/chromium/gpu/command_buffer/PRESUBMIT.py
index 170c9ce1e3e..c1f484c8c6b 100644
--- a/chromium/gpu/command_buffer/PRESUBMIT.py
+++ b/chromium/gpu/command_buffer/PRESUBMIT.py
@@ -1,7 +1,6 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Enforces command buffer autogen matches script output.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
@@ -11,60 +10,84 @@ for more details on the presubmit API built into depot_tools.
import os.path
+def _IsGLES2CmdBufferFile(file):
+ filename = os.path.basename(file.LocalPath())
+ if filename in [
+ 'build_cmd_buffer_lib.py', 'build_gles2_cmd_buffer.py',
+ 'gles2_cmd_buffer_functions.txt', 'gl2.h', 'gl2ext.h', 'gl3.h', 'gl31.h',
+ 'gl2chromium.h', 'gl2extchromium.h'
+ ]:
+ return True
+
+ return ((filename.startswith('gles2') or filename.startswith('context_state')
+ or filename.startswith('client_context_state')) and
+ filename.endswith('_autogen.h'))
+
+
+def _IsRasterCmdBufferFile(file):
+ filename = os.path.basename(file.LocalPath())
+ if filename in [
+ 'build_cmd_buffer_lib.py', 'build_raster_cmd_buffer.py',
+ 'raster_cmd_buffer_functions.txt'
+ ]:
+ return True
+
+ return filename.startswith('raster') and filename.endswith('_autogen.h')
+
+
+def _IsWebGPUCmdBufferFile(file):
+ filename = os.path.basename(file.LocalPath())
+ if filename in [
+ 'build_cmd_buffer_lib.py', 'build_webgpu_cmd_buffer.py',
+ 'webgpu_cmd_buffer_functions.txt'
+ ]:
+ return True
+
+ return filename.startswith('webgpu') and filename.endswith('_autogen.h')
+
+
def CommonChecks(input_api, output_api):
gles2_cmd_buffer_files = input_api.AffectedFiles(
- file_filter=lambda x: os.path.basename(x.LocalPath()) in [
- 'build_cmd_buffer_lib.py', 'build_gles2_cmd_buffer.py',
- 'gles2_cmd_buffer_functions.txt'])
+ file_filter=_IsGLES2CmdBufferFile)
raster_cmd_buffer_files = input_api.AffectedFiles(
- file_filter=lambda x: os.path.basename(x.LocalPath()) in [
- 'build_cmd_buffer_lib.py', 'build_raster_cmd_buffer.py',
- 'raster_cmd_buffer_functions.txt'])
+ file_filter=_IsRasterCmdBufferFile)
webgpu_cmd_buffer_files = input_api.AffectedFiles(
- file_filter=lambda x: os.path.basename(x.LocalPath()) in [
- 'build_cmd_buffer_lib.py', 'build_webgpu_cmd_buffer.py',
- 'webgpu_cmd_buffer_functions.txt'])
-
- autogen_files = input_api.AffectedFiles(
- file_filter=lambda x: x.LocalPath().endswith('_autogen.h'))
-
- # Use input_api.change.AffectedFiles() to get files outside this directory.
- external_gl_headers = input_api.change.AffectedFiles(
- file_filter=lambda x: os.path.basename(x.LocalPath()) in [
- 'gl2.h', 'gl2ext.h', 'gl3.h', 'gl31.h', 'gl2chromium.h',
- 'gl2extchromium.h'
- ])
+ file_filter=_IsWebGPUCmdBufferFile)
messages = []
- if (len(autogen_files) > 0 and len(gles2_cmd_buffer_files) == 0 and
- len(external_gl_headers) == 0 and len(raster_cmd_buffer_files) == 0 and
- len(webgpu_cmd_buffer_files) == 0):
- long_text = 'Changed files:\n'
- for file in autogen_files:
- long_text += file.LocalPath() + '\n'
- long_text += '\n'
- messages.append(output_api.PresubmitError(
- 'Command buffer autogenerated files changed but generators did not.',
- long_text=long_text))
-
with input_api.temporary_directory() as temp_dir:
commands = []
if len(gles2_cmd_buffer_files) > 0:
- commands.append(input_api.Command(name='build_gles2_cmd_buffer',
- cmd=[input_api.python_executable, 'build_gles2_cmd_buffer.py',
- '--check', '--output-dir=' + temp_dir],
- kwargs={}, message=output_api.PresubmitError))
+ commands.append(
+ input_api.Command(
+ name='build_gles2_cmd_buffer',
+ cmd=[
+ input_api.python_executable, 'build_gles2_cmd_buffer.py',
+ '--check', '--output-dir=' + temp_dir
+ ],
+ kwargs={},
+ message=output_api.PresubmitError))
if len(raster_cmd_buffer_files) > 0:
- commands.append(input_api.Command(name='build_raster_cmd_buffer',
- cmd=[input_api.python_executable, 'build_raster_cmd_buffer.py',
- '--check', '--output-dir=' + temp_dir],
- kwargs={}, message=output_api.PresubmitError))
+ commands.append(
+ input_api.Command(
+ name='build_raster_cmd_buffer',
+ cmd=[
+ input_api.python_executable, 'build_raster_cmd_buffer.py',
+ '--check', '--output-dir=' + temp_dir
+ ],
+ kwargs={},
+ message=output_api.PresubmitError))
if len(webgpu_cmd_buffer_files) > 0:
- commands.append(input_api.Command(name='build_webgpu_cmd_buffer',
- cmd=[input_api.python_executable, 'build_webgpu_cmd_buffer.py',
- '--check', '--output-dir=' + temp_dir],
- kwargs={}, message=output_api.PresubmitError))
+ commands.append(
+ input_api.Command(
+ name='build_webgpu_cmd_buffer',
+ cmd=[
+ input_api.python_executable, 'build_webgpu_cmd_buffer.py',
+ '--check', '--output-dir=' + temp_dir
+ ],
+ kwargs={},
+ message=output_api.PresubmitError))
if len(commands) > 0:
messages.extend(input_api.RunTests(commands))
diff --git a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
index 9d66d120638..14004259867 100644
--- a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
+++ b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
@@ -576,25 +576,23 @@ _STATE_INFO = {
'name': 'scissor_x',
'type': 'GLint',
'default': '0',
- 'expected': 'kViewportX',
},
{
'name': 'scissor_y',
'type': 'GLint',
'default': '0',
- 'expected': 'kViewportY',
},
{
'name': 'scissor_width',
'type': 'GLsizei',
'default': '1',
- 'expected': 'kViewportWidth',
+ 'expected': 'initial_size.width()',
},
{
'name': 'scissor_height',
'type': 'GLsizei',
'default': '1',
- 'expected': 'kViewportHeight',
+ 'expected': 'initial_size.height()',
},
],
},
@@ -608,25 +606,23 @@ _STATE_INFO = {
'name': 'viewport_x',
'type': 'GLint',
'default': '0',
- 'expected': 'kViewportX',
},
{
'name': 'viewport_y',
'type': 'GLint',
'default': '0',
- 'expected': 'kViewportY',
},
{
'name': 'viewport_width',
'type': 'GLsizei',
'default': '1',
- 'expected': 'kViewportWidth',
+ 'expected': 'initial_size.width()',
},
{
'name': 'viewport_height',
'type': 'GLsizei',
'default': '1',
- 'expected': 'kViewportHeight',
+ 'expected': 'initial_size.height()',
},
],
},
@@ -780,7 +776,7 @@ def CachedStateName(item):
return 'cached_' + item['name']
return item['name']
-def GuardState(state, operation):
+def GuardState(state, operation, feature_info):
if 'manual' in state:
assert state['manual']
return ""
@@ -789,11 +785,11 @@ def GuardState(state, operation):
result_end = []
if 'es3' in state:
assert state['es3']
- result.append(" if (feature_info_->IsES3Capable()) {\n");
+ result.append(" if (%s->IsES3Capable()) {\n" % feature_info);
result_end.append(" }\n")
if 'extension_flag' in state:
- result.append(" if (feature_info_->feature_flags().%s) {\n " %
- (state['extension_flag']))
+ result.append(" if (%s->feature_flags().%s) {\n " %
+ (feature_info, state['extension_flag']))
result_end.append(" }\n")
if 'gl_version_flag' in state:
name = state['gl_version_flag']
@@ -801,8 +797,8 @@ def GuardState(state, operation):
if name[0] == '!':
inverted = '!'
name = name[1:]
- result.append(" if (%sfeature_info_->gl_version_info().%s) {\n" %
- (inverted, name))
+ result.append(" if (%s%s->gl_version_info().%s) {\n" %
+ (inverted, feature_info, name))
result_end.append(" }\n")
result.append(operation)
@@ -1868,7 +1864,7 @@ class StateSetNamedParameter(TypeHandler):
if not func.GetInfo("no_gl"):
operation = " %s(%s);\n" % \
(func.GetGLFunctionName(), func.MakeOriginalArgString(""))
- f.write(GuardState(state, operation))
+ f.write(GuardState(state, operation, "feature_info_"))
f.write(" }\n")
f.write(" break;\n")
f.write(" default:\n")
@@ -1898,15 +1894,18 @@ class CustomHandler(TypeHandler):
def WriteServiceImplementation(self, func, f):
"""Overrriden from TypeHandler."""
- pass
+ if func.IsES31():
+ TypeHandler.WriteServiceImplementation(self, func, f)
def WriteImmediateServiceImplementation(self, func, f):
"""Overrriden from TypeHandler."""
- pass
+ if func.IsES31():
+ TypeHandler.WriteImmediateServiceImplementation(self, func, f)
def WriteBucketServiceImplementation(self, func, f):
"""Overrriden from TypeHandler."""
- pass
+ if func.IsES31():
+ TypeHandler.WriteBucketServiceImplementation(self, func, f)
def WritePassthroughServiceImplementation(self, func, f):
"""Overrriden from TypeHandler."""
@@ -2620,7 +2619,7 @@ class DeleteHandler(TypeHandler):
def WriteServiceImplementation(self, func, f):
"""Overrriden from TypeHandler."""
- if func.IsES3():
+ if func.IsES3() or func.IsES31():
TypeHandler.WriteServiceImplementation(self, func, f)
# HandleDeleteShader and HandleDeleteProgram are manually written.
@@ -4739,7 +4738,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs) {
def WriteServiceImplementation(self, func, f):
"""Overrriden from TypeHandler."""
- pass
+ if func.IsES31():
+ TypeHandler.WriteServiceImplementation(self, func, f)
def WritePassthroughServiceImplementation(self, func, f):
"""Overrriden from TypeHandler."""
@@ -6800,7 +6800,8 @@ void ContextState::InitState(const ContextState *prev_state) const {
if test_prev:
operation.append(" }")
- guarded_operation = GuardState(item, ''.join(operation))
+ guarded_operation = GuardState(item, ''.join(operation),
+ "feature_info_")
f.write(guarded_operation)
else:
if 'extension_flag' in state:
@@ -6998,14 +6999,16 @@ namespace gles2 {
})
self.generated_cpp_filenames.append(filename)
- comment = ("// It is included by %s_cmd_decoder_unittest_base.cc\n"
- % _lower_prefix)
- filename = filename_pattern % 0
+
+ def WriteServiceContextStateTestHelpers(self, filename):
+ comment = "// It is included by context_state_test_helpers.cc\n"
with CHeaderWriter(filename, self.year, comment) as f:
if self.capability_flags:
f.write(
-"""void %sDecoderTestBase::SetupInitCapabilitiesExpectations(
- bool es3_capable) {""" % _prefix)
+ """void ContextStateTestHelpers::SetupInitCapabilitiesExpectations(
+ MockGL* gl,
+ gles2::FeatureInfo* feature_info) {
+""")
for capability in self.capability_flags:
capability_no_init = 'no_init' in capability and \
capability['no_init'] == True
@@ -7015,28 +7018,30 @@ namespace gles2 {
if capability_es3:
continue
if 'extension_flag' in capability:
- f.write(" if (feature_info()->feature_flags().%s) {\n" %
+ f.write(" if (feature_info->feature_flags().%s) {\n" %
capability['extension_flag'])
f.write(" ")
- f.write(" ExpectEnableDisable(GL_%s, %s);\n" %
+ f.write(" ExpectEnableDisable(gl, GL_%s, %s);\n" %
(capability['name'].upper(),
('false', 'true')['default' in capability]))
if 'extension_flag' in capability:
f.write(" }")
- f.write(" if (es3_capable) {")
+ f.write(" if (feature_info->IsES3Capable()) {")
for capability in self.capability_flags:
capability_es3 = 'es3' in capability and capability['es3'] == True
if capability_es3:
- f.write(" ExpectEnableDisable(GL_%s, %s);\n" %
+ f.write(" ExpectEnableDisable(gl, GL_%s, %s);\n" %
(capability['name'].upper(),
('false', 'true')['default' in capability]))
f.write(""" }
}
""")
f.write("""
-void %sDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
- auto* feature_info_ = feature_info();
-""" % _prefix)
+void ContextStateTestHelpers::SetupInitStateExpectations(
+ MockGL* gl,
+ gles2::FeatureInfo* feature_info,
+ const gfx::Size& initial_size) {
+""")
# We need to sort the keys so the expectations match
for state_name in sorted(_STATE_INFO.keys()):
state = _STATE_INFO[state_name]
@@ -7051,7 +7056,7 @@ void %sDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
else:
args.append(item['default'])
f.write(
- " EXPECT_CALL(*gl_, %s(%s, %s))\n" %
+ " EXPECT_CALL(*gl, %s(%s, %s))\n" %
(state['func'], ('GL_FRONT', 'GL_BACK')[ndx],
", ".join(args)))
f.write(" .Times(1)\n")
@@ -7065,7 +7070,7 @@ void %sDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
operation = []
operation.append(
- " EXPECT_CALL(*gl_, %s(%s, %s))\n" %
+ " EXPECT_CALL(*gl, %s(%s, %s))\n" %
(state['func'],
(item['enum_set']
if 'enum_set' in item else item['enum']),
@@ -7073,11 +7078,12 @@ void %sDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
operation.append(" .Times(1)\n")
operation.append(" .RetiresOnSaturation();\n")
- guarded_operation = GuardState(item, ''.join(operation))
+ guarded_operation = GuardState(item, ''.join(operation),
+ "feature_info")
f.write(guarded_operation)
elif 'no_init' not in state:
if 'extension_flag' in state:
- f.write(" if (feature_info()->feature_flags().%s) {\n" %
+ f.write(" if (feature_info->feature_flags().%s) {\n" %
state['extension_flag'])
f.write(" ")
args = []
@@ -7089,16 +7095,16 @@ void %sDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
# TODO: Currently we do not check array values.
args = ["_" if isinstance(arg, list) else arg for arg in args]
if 'custom_function' in state:
- f.write(" SetupInitStateManualExpectationsFor%s(%s);\n" %
+ f.write(" SetupInitStateManualExpectationsFor%s(gl, %s);\n" %
(state['func'], ", ".join(args)))
else:
- f.write(" EXPECT_CALL(*gl_, %s(%s))\n" %
+ f.write(" EXPECT_CALL(*gl, %s(%s))\n" %
(state['func'], ", ".join(args)))
f.write(" .Times(1)\n")
f.write(" .RetiresOnSaturation();\n")
if 'extension_flag' in state:
f.write(" }\n")
- f.write(" SetupInitStateManualExpectations(es3_capable);\n")
+ f.write(" SetupInitStateManualExpectations(gl, feature_info);\n")
f.write("}\n")
self.generated_cpp_filenames.append(filename)
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index e830f36dcb8..ccf87291d02 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -2202,6 +2202,12 @@ _FUNCTION_INFO = {
'es31': True,
'unit_test': False,
},
+ 'DispatchComputeIndirect': {
+ 'cmd_args': 'GLintptrNotNegative offset',
+ 'trace_level': 2,
+ 'es31': True,
+ 'unit_test': False,
+ },
'DrawArrays': {
'type': 'Custom',
'impl_func': False,
@@ -3899,7 +3905,8 @@ _FUNCTION_INFO = {
'impl_func': False,
'client_test': False,
'cmd_args': 'GLfloat opacity, GLboolean is_clipped, '
- 'GLint sorting_context_id, GLuint shm_id, GLuint shm_offset',
+ 'GLint sorting_context_id, '
+ 'GLuint shm_id, GLuint shm_offset',
'extension': 'CHROMIUM_schedule_ca_layer',
},
'ScheduleCALayerCHROMIUM': {
@@ -4408,6 +4415,8 @@ def main(argv):
"gpu/command_buffer/service/context_state_autogen.h")
gen.WriteServiceContextStateImpl(
"gpu/command_buffer/service/context_state_impl_autogen.h")
+ gen.WriteServiceContextStateTestHelpers(
+ "gpu/command_buffer/service/context_state_test_helpers_autogen.h")
gen.WriteClientContextStateHeader(
"gpu/command_buffer/client/client_context_state_autogen.h")
gen.WriteClientContextStateImpl(
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
index 5a764e48d45..620fa6535a8 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
@@ -260,8 +260,18 @@ bool CommandBufferHelper::HasTokenPassed(int32_t token) {
// Don't update state if we don't have to.
if (token <= cached_last_token_read_)
return true;
+ RefreshCachedToken();
+ return token <= cached_last_token_read_;
+}
+
+void CommandBufferHelper::RefreshCachedToken() {
CommandBuffer::State last_state = command_buffer_->GetLastState();
UpdateCachedState(last_state);
+}
+
+bool CommandBufferHelper::HasCachedTokenPassed(int32_t token) {
+ if (token > token_)
+ return true;
return token <= cached_last_token_read_;
}
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
index c8bbb11237e..eca26c60e79 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -102,11 +102,20 @@ class GPU_EXPORT CommandBufferHelper {
// shutdown.
int32_t InsertToken();
- // Returns true if the token has passed.
+ // Returns true if the token has passed. This combines RefreshCachedToken
+ // and HasCachedTokenPassed. Don't call this function if you have to call
+ // it repeatedly, and instead use those alternative functions.
// Parameters:
// the value of the token to check whether it has passed
bool HasTokenPassed(int32_t token);
+ // Returns true if the token has passed, but doesn't take a lock and check
+ // for what the latest token state is.
+ bool HasCachedTokenPassed(int32_t token);
+
+ // Update the state of the latest passed token.
+ void RefreshCachedToken();
+
// Waits until the token of a particular value has passed through the command
// stream (i.e. commands inserted before that token have been executed).
// NOTE: This will call Flush if it needs to block.
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
index 8e2755df7f4..23e9fa3acbf 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
@@ -13,8 +13,8 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/command_buffer/client/command_buffer_direct_locked.h"
#include "gpu/command_buffer/service/mocks.h"
@@ -202,7 +202,7 @@ class CommandBufferHelperTest : public testing::Test {
std::vector<std::unique_ptr<CommandBufferEntry[]>> test_command_args_;
unsigned int test_command_next_id_;
Sequence sequence_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
};
// Checks immediate_entry_count_ changes based on RingBuffer state.
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator.cc b/chromium/gpu/command_buffer/client/fenced_allocator.cc
index 50a96aa4cb0..52960d8848d 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator.cc
+++ b/chromium/gpu/command_buffer/client/fenced_allocator.cc
@@ -220,10 +220,11 @@ FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
// Frees any blocks pending a token for which the token has been read.
void FencedAllocator::FreeUnused() {
+ helper_->RefreshCachedToken();
for (uint32_t i = 0; i < blocks_.size();) {
Block& block = blocks_[i];
if (block.state == FREE_PENDING_TOKEN &&
- helper_->HasTokenPassed(block.token)) {
+ helper_->HasCachedTokenPassed(block.token)) {
block.state = FREE;
i = CollapseFreeBlock(i);
} else {
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
index be304f1d755..a5f7cd1a174 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
+++ b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -11,8 +11,8 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/memory/aligned_memory.h"
-#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/command_buffer/client/fenced_allocator.h"
#include "gpu/command_buffer/service/command_buffer_direct.h"
@@ -58,7 +58,7 @@ class BaseFencedAllocatorTest : public testing::Test {
std::unique_ptr<CommandBufferDirect> command_buffer_;
std::unique_ptr<AsyncAPIMock> api_mock_;
std::unique_ptr<CommandBufferHelper> helper_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
};
#ifndef _MSC_VER
diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 1772133baf4..8f261f43a19 100644
--- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -1272,6 +1272,9 @@ void GL_APIENTRY GLES2DispatchCompute(GLuint num_groups_x,
gles2::GetGLContext()->DispatchCompute(num_groups_x, num_groups_y,
num_groups_z);
}
+void GL_APIENTRY GLES2DispatchComputeIndirect(GLintptr offset) {
+ gles2::GetGLContext()->DispatchComputeIndirect(offset);
+}
void GL_APIENTRY GLES2GetProgramInterfaceiv(GLuint program,
GLenum program_interface,
GLenum pname,
@@ -1574,10 +1577,12 @@ void GL_APIENTRY
GLES2ScheduleCALayerSharedStateCHROMIUM(GLfloat opacity,
GLboolean is_clipped,
const GLfloat* clip_rect,
+ const GLfloat* rounded_corner_bounds,
GLint sorting_context_id,
const GLfloat* transform) {
gles2::GetGLContext()->ScheduleCALayerSharedStateCHROMIUM(
- opacity, is_clipped, clip_rect, sorting_context_id, transform);
+ opacity, is_clipped, clip_rect, rounded_corner_bounds, sorting_context_id,
+ transform);
}
void GL_APIENTRY GLES2ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
const GLfloat* contents_rect,
@@ -2967,6 +2972,10 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glDispatchCompute),
},
{
+ "glDispatchComputeIndirect",
+ reinterpret_cast<GLES2FunctionPointer>(glDispatchComputeIndirect),
+ },
+ {
"glGetProgramInterfaceiv",
reinterpret_cast<GLES2FunctionPointer>(glGetProgramInterfaceiv),
},
diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index dbe1d6080d2..cc930c0f828 100644
--- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -2486,6 +2486,14 @@ void DispatchCompute(GLuint num_groups_x,
}
}
+void DispatchComputeIndirect(GLintptr offset) {
+ gles2::cmds::DispatchComputeIndirect* c =
+ GetCmdSpace<gles2::cmds::DispatchComputeIndirect>();
+ if (c) {
+ c->Init(offset);
+ }
+}
+
void GetProgramInterfaceiv(GLuint program,
GLenum program_interface,
GLenum pname,
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index 1784043bfdc..5703d8c81d7 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -208,6 +208,7 @@ GLES2Implementation::GLES2Implementation(
bound_atomic_counter_buffer_(0),
bound_copy_read_buffer_(0),
bound_copy_write_buffer_(0),
+ bound_dispatch_indirect_buffer_(0),
bound_pixel_pack_buffer_(0),
bound_pixel_unpack_buffer_(0),
bound_shader_storage_buffer_(0),
@@ -223,8 +224,7 @@ GLES2Implementation::GLES2Implementation(
max_extra_transfer_buffer_size_(0),
current_trace_stack_(0),
aggressively_free_resources_(false),
- cached_extension_string_(nullptr),
- weak_ptr_factory_(this) {
+ cached_extension_string_(nullptr) {
DCHECK(helper);
std::stringstream ss;
@@ -1054,6 +1054,9 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
case GL_ATOMIC_COUNTER_BUFFER_BINDING:
*params = bound_atomic_counter_buffer_;
return true;
+ case GL_DISPATCH_INDIRECT_BUFFER_BINDING:
+ *params = bound_dispatch_indirect_buffer_;
+ return true;
case GL_SHADER_STORAGE_BUFFER_BINDING:
*params = bound_shader_storage_buffer_;
return true;
@@ -4654,6 +4657,12 @@ void GLES2Implementation::BindBufferHelper(GLenum target, GLuint buffer_id) {
changed = true;
}
break;
+ case GL_DISPATCH_INDIRECT_BUFFER:
+ if (bound_dispatch_indirect_buffer_ != buffer_id) {
+ bound_dispatch_indirect_buffer_ = buffer_id;
+ changed = true;
+ }
+ break;
case GL_ELEMENT_ARRAY_BUFFER:
changed = vertex_array_object_manager_->BindElementArray(buffer_id);
break;
@@ -4976,6 +4985,9 @@ void GLES2Implementation::DeleteBuffersHelper(GLsizei n,
if (buffers[ii] == bound_copy_write_buffer_) {
bound_copy_write_buffer_ = 0;
}
+ if (buffers[ii] == bound_dispatch_indirect_buffer_) {
+ bound_dispatch_indirect_buffer_ = 0;
+ }
if (buffers[ii] == bound_pixel_pack_buffer_) {
bound_pixel_pack_buffer_ = 0;
}
@@ -5385,9 +5397,11 @@ void GLES2Implementation::ScheduleCALayerSharedStateCHROMIUM(
GLfloat opacity,
GLboolean is_clipped,
const GLfloat* clip_rect,
+ const GLfloat* rounded_corner_bounds,
GLint sorting_context_id,
const GLfloat* transform) {
- uint32_t shm_size = 20 * sizeof(GLfloat);
+ // 4 for clip_rect, 5 for rounded_corner_rect, 16 for transform.
+ uint32_t shm_size = 25 * sizeof(GLfloat);
ScopedTransferBufferPtr buffer(shm_size, helper_, transfer_buffer_);
if (!buffer.valid() || buffer.size() < shm_size) {
SetGLError(GL_OUT_OF_MEMORY, "GLES2::ScheduleCALayerSharedStateCHROMIUM",
@@ -5396,7 +5410,8 @@ void GLES2Implementation::ScheduleCALayerSharedStateCHROMIUM(
}
GLfloat* mem = static_cast<GLfloat*>(buffer.address());
memcpy(mem + 0, clip_rect, 4 * sizeof(GLfloat));
- memcpy(mem + 4, transform, 16 * sizeof(GLfloat));
+ memcpy(mem + 4, rounded_corner_bounds, 5 * sizeof(GLfloat));
+ memcpy(mem + 9, transform, 16 * sizeof(GLfloat));
helper_->ScheduleCALayerSharedStateCHROMIUM(opacity, is_clipped,
sorting_context_id,
buffer.shm_id(), buffer.offset());
@@ -5678,6 +5693,7 @@ GLboolean GLES2Implementation::UnmapBuffer(GLenum target) {
case GL_ELEMENT_ARRAY_BUFFER:
case GL_COPY_READ_BUFFER:
case GL_COPY_WRITE_BUFFER:
+ case GL_DISPATCH_INDIRECT_BUFFER:
case GL_PIXEL_PACK_BUFFER:
case GL_PIXEL_UNPACK_BUFFER:
case GL_SHADER_STORAGE_BUFFER:
@@ -6700,6 +6716,8 @@ bool CreateImageValidInternalFormat(GLenum internalformat,
return capabilities.texture_norm16;
case GL_RGB10_A2_EXT:
return capabilities.image_xr30 || capabilities.image_xb30;
+ case GL_RGB_YCBCR_P010_CHROMIUM:
+ return capabilities.image_ycbcr_p010;
case GL_RED:
case GL_RG_EXT:
case GL_RGB:
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index 99e350d9b8d..cb24f56098d 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -750,6 +750,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
GLuint bound_atomic_counter_buffer_;
GLuint bound_copy_read_buffer_;
GLuint bound_copy_write_buffer_;
+ GLuint bound_dispatch_indirect_buffer_;
GLuint bound_pixel_pack_buffer_;
GLuint bound_pixel_unpack_buffer_;
GLuint bound_shader_storage_buffer_;
@@ -854,7 +855,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
std::string last_active_url_;
- base::WeakPtrFactory<GLES2Implementation> weak_ptr_factory_;
+ base::WeakPtrFactory<GLES2Implementation> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(GLES2Implementation);
};
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
index dd25fd00046..af1eb25984b 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -890,6 +890,8 @@ void DispatchCompute(GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z) override;
+void DispatchComputeIndirect(GLintptr offset) override;
+
void GetProgramInterfaceiv(GLuint program,
GLenum program_interface,
GLenum pname,
@@ -1111,6 +1113,7 @@ void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
void ScheduleCALayerSharedStateCHROMIUM(GLfloat opacity,
GLboolean is_clipped,
const GLfloat* clip_rect,
+ const GLfloat* rounded_corner_bounds,
GLint sorting_context_id,
const GLfloat* transform) override;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
index 123fcfe07b9..b485a6371d3 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -3116,6 +3116,18 @@ void GLES2Implementation::DispatchCompute(GLuint num_groups_x,
CheckGLError();
}
+void GLES2Implementation::DispatchComputeIndirect(GLintptr offset) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDispatchComputeIndirect("
+ << offset << ")");
+ if (offset < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDispatchComputeIndirect", "offset < 0");
+ return;
+ }
+ helper_->DispatchComputeIndirect(offset);
+ CheckGLError();
+}
+
void GLES2Implementation::GetProgramInterfaceiv(GLuint program,
GLenum program_interface,
GLenum pname,
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
index bc43ccd8a98..0127b476ba8 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -2724,6 +2724,17 @@ TEST_F(GLES2ImplementationTest, DispatchCompute) {
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
+TEST_F(GLES2ImplementationTest, DispatchComputeIndirect) {
+ struct Cmds {
+ cmds::DispatchComputeIndirect cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->DispatchComputeIndirect(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
TEST_F(GLES2ImplementationTest, GetProgramInterfaceiv) {
struct Cmds {
cmds::GetProgramInterfaceiv cmd;
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
index dcf283b6555..2ae7aef998e 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -656,6 +656,7 @@ virtual void BindImageTexture(GLuint unit,
virtual void DispatchCompute(GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z) = 0;
+virtual void DispatchComputeIndirect(GLintptr offset) = 0;
virtual void GetProgramInterfaceiv(GLuint program,
GLenum program_interface,
GLenum pname,
@@ -824,11 +825,13 @@ virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
GLfloat uv_height,
GLboolean enable_blend,
GLuint gpu_fence_id) = 0;
-virtual void ScheduleCALayerSharedStateCHROMIUM(GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint sorting_context_id,
- const GLfloat* transform) = 0;
+virtual void ScheduleCALayerSharedStateCHROMIUM(
+ GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ const GLfloat* rounded_corner_bounds,
+ GLint sorting_context_id,
+ const GLfloat* transform) = 0;
virtual void ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
const GLfloat* contents_rect,
GLuint background_color,
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index bd728edb570..4951913a5cf 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -633,6 +633,7 @@ void BindImageTexture(GLuint unit,
void DispatchCompute(GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z) override;
+void DispatchComputeIndirect(GLintptr offset) override;
void GetProgramInterfaceiv(GLuint program,
GLenum program_interface,
GLenum pname,
@@ -804,6 +805,7 @@ void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
void ScheduleCALayerSharedStateCHROMIUM(GLfloat opacity,
GLboolean is_clipped,
const GLfloat* clip_rect,
+ const GLfloat* rounded_corner_bounds,
GLint sorting_context_id,
const GLfloat* transform) override;
void ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index 30573101205..9ed76ef4b8f 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -859,6 +859,7 @@ void GLES2InterfaceStub::BindImageTexture(GLuint /* unit */,
void GLES2InterfaceStub::DispatchCompute(GLuint /* num_groups_x */,
GLuint /* num_groups_y */,
GLuint /* num_groups_z */) {}
+void GLES2InterfaceStub::DispatchComputeIndirect(GLintptr /* offset */) {}
void GLES2InterfaceStub::GetProgramInterfaceiv(GLuint /* program */,
GLenum /* program_interface */,
GLenum /* pname */,
@@ -1076,6 +1077,7 @@ void GLES2InterfaceStub::ScheduleCALayerSharedStateCHROMIUM(
GLfloat /* opacity */,
GLboolean /* is_clipped */,
const GLfloat* /* clip_rect */,
+ const GLfloat* /* rounded_corner_bounds */,
GLint /* sorting_context_id */,
const GLfloat* /* transform */) {}
void GLES2InterfaceStub::ScheduleCALayerCHROMIUM(
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index 6f6b8dde72c..033b9b60543 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -633,6 +633,7 @@ void BindImageTexture(GLuint unit,
void DispatchCompute(GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z) override;
+void DispatchComputeIndirect(GLintptr offset) override;
void GetProgramInterfaceiv(GLuint program,
GLenum program_interface,
GLenum pname,
@@ -804,6 +805,7 @@ void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
void ScheduleCALayerSharedStateCHROMIUM(GLfloat opacity,
GLboolean is_clipped,
const GLfloat* clip_rect,
+ const GLfloat* rounded_corner_bounds,
GLint sorting_context_id,
const GLfloat* transform) override;
void ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index 4a0a5288fa7..1b492607f2f 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -1843,6 +1843,11 @@ void GLES2TraceImplementation::DispatchCompute(GLuint num_groups_x,
gl_->DispatchCompute(num_groups_x, num_groups_y, num_groups_z);
}
+void GLES2TraceImplementation::DispatchComputeIndirect(GLintptr offset) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DispatchComputeIndirect");
+ gl_->DispatchComputeIndirect(offset);
+}
+
void GLES2TraceImplementation::GetProgramInterfaceiv(GLuint program,
GLenum program_interface,
GLenum pname,
@@ -2260,11 +2265,13 @@ void GLES2TraceImplementation::ScheduleCALayerSharedStateCHROMIUM(
GLfloat opacity,
GLboolean is_clipped,
const GLfloat* clip_rect,
+ const GLfloat* rounded_corner_bounds,
GLint sorting_context_id,
const GLfloat* transform) {
TRACE_EVENT_BINARY_EFFICIENT0(
"gpu", "GLES2Trace::ScheduleCALayerSharedStateCHROMIUM");
gl_->ScheduleCALayerSharedStateCHROMIUM(opacity, is_clipped, clip_rect,
+ rounded_corner_bounds,
sorting_context_id, transform);
}
diff --git a/chromium/gpu/command_buffer/client/implementation_base.cc b/chromium/gpu/command_buffer/client/implementation_base.cc
index 1adb73b0251..66c1d1f65b2 100644
--- a/chromium/gpu/command_buffer/client/implementation_base.cc
+++ b/chromium/gpu/command_buffer/client/implementation_base.cc
@@ -30,8 +30,7 @@ ImplementationBase::ImplementationBase(CommandBufferHelper* helper,
: transfer_buffer_(transfer_buffer),
gpu_control_(gpu_control),
capabilities_(gpu_control->GetCapabilities()),
- helper_(helper),
- weak_ptr_factory_(this) {}
+ helper_(helper) {}
ImplementationBase::~ImplementationBase() {
// The gpu_control_ outlives this class, so clear the client on it before we
diff --git a/chromium/gpu/command_buffer/client/implementation_base.h b/chromium/gpu/command_buffer/client/implementation_base.h
index 8a302e6eaab..a4e8485080b 100644
--- a/chromium/gpu/command_buffer/client/implementation_base.h
+++ b/chromium/gpu/command_buffer/client/implementation_base.h
@@ -156,7 +156,7 @@ class GLES2_IMPL_EXPORT ImplementationBase
CommandBufferHelper* helper_;
- base::WeakPtrFactory<ImplementationBase> weak_ptr_factory_;
+ base::WeakPtrFactory<ImplementationBase> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(ImplementationBase);
};
diff --git a/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc b/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc
index d1844a0443c..1a574fc6d9e 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc
+++ b/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -11,8 +11,8 @@
#include <memory>
#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/command_buffer/client/command_buffer_direct_locked.h"
#include "gpu/command_buffer/service/mocks.h"
@@ -55,7 +55,7 @@ class MappedMemoryTestBase : public testing::Test {
std::unique_ptr<CommandBufferDirectLocked> command_buffer_;
std::unique_ptr<AsyncAPIMock> api_mock_;
std::unique_ptr<CommandBufferHelper> helper_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
};
#ifndef _MSC_VER
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc
index 37b9361f451..2328f1faab1 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation.cc
@@ -91,21 +91,6 @@ namespace {
const uint32_t kMaxTransferCacheEntrySizeForTransferBuffer = 1024;
-void RecordPaintOpSize(size_t size) {
- constexpr size_t kMinPaintOpSize = 512 * 1024;
- constexpr size_t kMaxPaintOpSize = 16 * 1024 * 1024;
-
- // Serialization failure, record max size.
- if (size == 0u)
- size = kMaxPaintOpSize;
-
- if (size < kMinPaintOpSize)
- return;
-
- UMA_HISTOGRAM_CUSTOM_COUNTS("GPU.OopRaster.PaintOpSerializationSize", size,
- kMinPaintOpSize, kMaxPaintOpSize, 50);
-}
-
} // namespace
// Helper to copy data to the GPU service over the transfer cache.
@@ -258,13 +243,11 @@ class RasterImplementation::PaintOpSerializer {
}
if (!size) {
- RecordPaintOpSize(0u);
LOG(ERROR) << "Failed to serialize op in " << block_size << " bytes.";
return 0u;
}
}
- RecordPaintOpSize(size);
DCHECK_LE(size, free_bytes_);
DCHECK(base::CheckAdd<uint32_t>(written_bytes_, size).IsValid());
diff --git a/chromium/gpu/command_buffer/client/raster_interface.h b/chromium/gpu/command_buffer/client/raster_interface.h
index 74646c7e3dc..ccd2db389f3 100644
--- a/chromium/gpu/command_buffer/client/raster_interface.h
+++ b/chromium/gpu/command_buffer/client/raster_interface.h
@@ -57,7 +57,9 @@ class RasterInterface : public InterfaceBase {
const gfx::ColorSpace& color_space,
const GLbyte* mailbox) = 0;
- static constexpr size_t kDefaultMaxOpSizeHint = 512 * 1024;
+ // Heuristic decided on UMA data. This covers 85% of the cases where we need
+ // to serialize ops > 512k.
+ static constexpr size_t kDefaultMaxOpSizeHint = 600 * 1024;
virtual void RasterCHROMIUM(const cc::DisplayItemList* list,
cc::ImageProvider* provider,
const gfx::Size& content_size,
diff --git a/chromium/gpu/command_buffer/client/ring_buffer_test.cc b/chromium/gpu/command_buffer/client/ring_buffer_test.cc
index 4dd4f680c9c..f7c0b6e3526 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer_test.cc
+++ b/chromium/gpu/command_buffer/client/ring_buffer_test.cc
@@ -12,8 +12,8 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/command_buffer/service/command_buffer_direct.h"
#include "gpu/command_buffer/service/mocks.h"
@@ -85,7 +85,7 @@ class BaseRingBufferTest : public testing::Test {
std::unique_ptr<int8_t[]> buffer_;
int8_t* buffer_start_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
};
#ifndef _MSC_VER
diff --git a/chromium/gpu/command_buffer/client/shared_image_interface.h b/chromium/gpu/command_buffer/client/shared_image_interface.h
index 0db8109c414..962506a17f9 100644
--- a/chromium/gpu/command_buffer/client/shared_image_interface.h
+++ b/chromium/gpu/command_buffer/client/shared_image_interface.h
@@ -14,6 +14,7 @@
namespace gfx {
class ColorSpace;
+class GpuFence;
class GpuMemoryBuffer;
class Size;
} // namespace gfx
@@ -84,6 +85,15 @@ class SharedImageInterface {
virtual void UpdateSharedImage(const SyncToken& sync_token,
const Mailbox& mailbox) = 0;
+ // Updates a shared image after its GpuMemoryBuffer (if any) was modified on
+ // the CPU or through external devices, after |sync_token| has been released.
+ // If |acquire_fence| is not null, the fence is inserted in the GPU command
+ // stream and a server side wait is issued before any GPU command referring
+ // to this shared imaged is executed on the GPU.
+ virtual void UpdateSharedImage(const SyncToken& sync_token,
+ std::unique_ptr<gfx::GpuFence> acquire_fence,
+ const Mailbox& mailbox) = 0;
+
// Destroys the shared image, unregistering its mailbox, after |sync_token|
// has been released. After this call, the mailbox can't be used to reference
// the image any more, however if the image was imported into other APIs,
@@ -124,6 +134,9 @@ class SharedImageInterface {
// Generates a verified SyncToken that is released after all previous
// commands on this interface have executed on the service side.
virtual SyncToken GenVerifiedSyncToken() = 0;
+
+ // Flush the SharedImageInterface, issuing any deferred IPCs.
+ virtual void Flush() = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/shared_memory_limits.h b/chromium/gpu/command_buffer/client/shared_memory_limits.h
index eb68c2f9bdd..90cf07bf3b7 100644
--- a/chromium/gpu/command_buffer/client/shared_memory_limits.h
+++ b/chromium/gpu/command_buffer/client/shared_memory_limits.h
@@ -80,8 +80,8 @@ struct SharedMemoryLimits {
DCHECK(!screen_size.IsEmpty());
SharedMemoryLimits limits;
- constexpr size_t kBytesPerPixel = 4;
- const size_t full_screen_texture_size_in_bytes =
+ constexpr uint32_t kBytesPerPixel = 4;
+ const uint32_t full_screen_texture_size_in_bytes =
screen_size.width() * screen_size.height() * kBytesPerPixel;
// Android uses a smaller command buffer for the display compositor. Meant
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.cc b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
index 7d875ed907e..9af1f5af3b4 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
@@ -29,7 +29,12 @@ WebGPUImplementation::WebGPUImplementation(
: ImplementationBase(helper, transfer_buffer, gpu_control),
helper_(helper),
#if BUILDFLAG(USE_DAWN)
- wire_client_(new dawn_wire::WireClient(this)),
+ wire_client_([this]() {
+ dawn_wire::WireClientDescriptor descriptor = {};
+ descriptor.serializer = this;
+
+ return new dawn_wire::WireClient(descriptor);
+ }()),
procs_(wire_client_->GetProcs()),
#endif
c2s_buffer_(helper, transfer_buffer) {
diff --git a/chromium/gpu/command_buffer/common/capabilities.h b/chromium/gpu/command_buffer/common/capabilities.h
index b9ea9011915..53cfce903db 100644
--- a/chromium/gpu/command_buffer/common/capabilities.h
+++ b/chromium/gpu/command_buffer/common/capabilities.h
@@ -156,6 +156,7 @@ struct GPU_EXPORT Capabilities {
bool image_ycbcr_420v_disabled_for_video_frames = false;
bool image_xr30 = false;
bool image_xb30 = false;
+ bool image_ycbcr_p010 = false;
bool render_buffer_format_bgra8888 = false;
bool occlusion_query = false;
bool occlusion_query_boolean = false;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index cf009da4127..01e551a8c6d 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -12368,6 +12368,39 @@ static_assert(offsetof(DispatchCompute, num_groups_y) == 8,
static_assert(offsetof(DispatchCompute, num_groups_z) == 12,
"offset of DispatchCompute num_groups_z should be 12");
+struct DispatchComputeIndirect {
+ typedef DispatchComputeIndirect ValueType;
+ static const CommandId kCmdId = kDispatchComputeIndirect;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLintptr _offset) {
+ SetHeader();
+ offset = _offset;
+ }
+
+ void* Set(void* cmd, GLintptr _offset) {
+ static_cast<ValueType*>(cmd)->Init(_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t offset;
+};
+
+static_assert(sizeof(DispatchComputeIndirect) == 8,
+ "size of DispatchComputeIndirect should be 8");
+static_assert(offsetof(DispatchComputeIndirect, header) == 0,
+ "offset of DispatchComputeIndirect header should be 0");
+static_assert(offsetof(DispatchComputeIndirect, offset) == 4,
+ "offset of DispatchComputeIndirect offset should be 4");
+
struct GetProgramInterfaceiv {
typedef GetProgramInterfaceiv ValueType;
static const CommandId kCmdId = kGetProgramInterfaceiv;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index ee423c7663f..64a5457e0c1 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -4142,6 +4142,17 @@ TEST_F(GLES2FormatTest, DispatchCompute) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, DispatchComputeIndirect) {
+ cmds::DispatchComputeIndirect& cmd =
+ *GetBufferAs<cmds::DispatchComputeIndirect>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLintptr>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DispatchComputeIndirect::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLintptr>(11), cmd.offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, GetProgramInterfaceiv) {
cmds::GetProgramInterfaceiv& cmd =
*GetBufferAs<cmds::GetProgramInterfaceiv>();
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index 3f3f159828a..5b3e2e69514 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -263,104 +263,105 @@
OP(FramebufferParameteri) /* 504 */ \
OP(BindImageTexture) /* 505 */ \
OP(DispatchCompute) /* 506 */ \
- OP(GetProgramInterfaceiv) /* 507 */ \
- OP(GetProgramResourceIndex) /* 508 */ \
- OP(GetProgramResourceName) /* 509 */ \
- OP(GetProgramResourceiv) /* 510 */ \
- OP(GetProgramResourceLocation) /* 511 */ \
- OP(MemoryBarrierEXT) /* 512 */ \
- OP(MemoryBarrierByRegion) /* 513 */ \
- OP(SwapBuffers) /* 514 */ \
- OP(GetMaxValueInBufferCHROMIUM) /* 515 */ \
- OP(EnableFeatureCHROMIUM) /* 516 */ \
- OP(MapBufferRange) /* 517 */ \
- OP(UnmapBuffer) /* 518 */ \
- OP(FlushMappedBufferRange) /* 519 */ \
- OP(ResizeCHROMIUM) /* 520 */ \
- OP(GetRequestableExtensionsCHROMIUM) /* 521 */ \
- OP(RequestExtensionCHROMIUM) /* 522 */ \
- OP(GetProgramInfoCHROMIUM) /* 523 */ \
- OP(GetUniformBlocksCHROMIUM) /* 524 */ \
- OP(GetTransformFeedbackVaryingsCHROMIUM) /* 525 */ \
- OP(GetUniformsES3CHROMIUM) /* 526 */ \
- OP(DescheduleUntilFinishedCHROMIUM) /* 527 */ \
- OP(GetTranslatedShaderSourceANGLE) /* 528 */ \
- OP(PostSubBufferCHROMIUM) /* 529 */ \
- OP(CopyTextureCHROMIUM) /* 530 */ \
- OP(CopySubTextureCHROMIUM) /* 531 */ \
- OP(DrawArraysInstancedANGLE) /* 532 */ \
- OP(DrawElementsInstancedANGLE) /* 533 */ \
- OP(VertexAttribDivisorANGLE) /* 534 */ \
- OP(ProduceTextureDirectCHROMIUMImmediate) /* 535 */ \
- OP(CreateAndConsumeTextureINTERNALImmediate) /* 536 */ \
- OP(BindUniformLocationCHROMIUMBucket) /* 537 */ \
- OP(BindTexImage2DCHROMIUM) /* 538 */ \
- OP(BindTexImage2DWithInternalformatCHROMIUM) /* 539 */ \
- OP(ReleaseTexImage2DCHROMIUM) /* 540 */ \
- OP(TraceBeginCHROMIUM) /* 541 */ \
- OP(TraceEndCHROMIUM) /* 542 */ \
- OP(DiscardFramebufferEXTImmediate) /* 543 */ \
- OP(LoseContextCHROMIUM) /* 544 */ \
- OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 545 */ \
- OP(DrawBuffersEXTImmediate) /* 546 */ \
- OP(DiscardBackbufferCHROMIUM) /* 547 */ \
- OP(ScheduleOverlayPlaneCHROMIUM) /* 548 */ \
- OP(ScheduleCALayerSharedStateCHROMIUM) /* 549 */ \
- OP(ScheduleCALayerCHROMIUM) /* 550 */ \
- OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 551 */ \
- OP(CommitOverlayPlanesCHROMIUM) /* 552 */ \
- OP(FlushDriverCachesCHROMIUM) /* 553 */ \
- OP(ScheduleDCLayerCHROMIUM) /* 554 */ \
- OP(SetActiveURLCHROMIUM) /* 555 */ \
- OP(MatrixLoadfCHROMIUMImmediate) /* 556 */ \
- OP(MatrixLoadIdentityCHROMIUM) /* 557 */ \
- OP(GenPathsCHROMIUM) /* 558 */ \
- OP(DeletePathsCHROMIUM) /* 559 */ \
- OP(IsPathCHROMIUM) /* 560 */ \
- OP(PathCommandsCHROMIUM) /* 561 */ \
- OP(PathParameterfCHROMIUM) /* 562 */ \
- OP(PathParameteriCHROMIUM) /* 563 */ \
- OP(PathStencilFuncCHROMIUM) /* 564 */ \
- OP(StencilFillPathCHROMIUM) /* 565 */ \
- OP(StencilStrokePathCHROMIUM) /* 566 */ \
- OP(CoverFillPathCHROMIUM) /* 567 */ \
- OP(CoverStrokePathCHROMIUM) /* 568 */ \
- OP(StencilThenCoverFillPathCHROMIUM) /* 569 */ \
- OP(StencilThenCoverStrokePathCHROMIUM) /* 570 */ \
- OP(StencilFillPathInstancedCHROMIUM) /* 571 */ \
- OP(StencilStrokePathInstancedCHROMIUM) /* 572 */ \
- OP(CoverFillPathInstancedCHROMIUM) /* 573 */ \
- OP(CoverStrokePathInstancedCHROMIUM) /* 574 */ \
- OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 575 */ \
- OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 576 */ \
- OP(BindFragmentInputLocationCHROMIUMBucket) /* 577 */ \
- OP(ProgramPathFragmentInputGenCHROMIUM) /* 578 */ \
- OP(CoverageModulationCHROMIUM) /* 579 */ \
- OP(BlendBarrierKHR) /* 580 */ \
- OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 581 */ \
- OP(BindFragDataLocationIndexedEXTBucket) /* 582 */ \
- OP(BindFragDataLocationEXTBucket) /* 583 */ \
- OP(GetFragDataIndexEXT) /* 584 */ \
- OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 585 */ \
- OP(OverlayPromotionHintCHROMIUM) /* 586 */ \
- OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 587 */ \
- OP(SetDrawRectangleCHROMIUM) /* 588 */ \
- OP(SetEnableDCLayersCHROMIUM) /* 589 */ \
- OP(InitializeDiscardableTextureCHROMIUM) /* 590 */ \
- OP(UnlockDiscardableTextureCHROMIUM) /* 591 */ \
- OP(LockDiscardableTextureCHROMIUM) /* 592 */ \
- OP(TexStorage2DImageCHROMIUM) /* 593 */ \
- OP(SetColorSpaceMetadataCHROMIUM) /* 594 */ \
- OP(WindowRectanglesEXTImmediate) /* 595 */ \
- OP(CreateGpuFenceINTERNAL) /* 596 */ \
- OP(WaitGpuFenceCHROMIUM) /* 597 */ \
- OP(DestroyGpuFenceCHROMIUM) /* 598 */ \
- OP(SetReadbackBufferShadowAllocationINTERNAL) /* 599 */ \
- OP(FramebufferTextureMultiviewOVR) /* 600 */ \
- OP(MaxShaderCompilerThreadsKHR) /* 601 */ \
- OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 602 */ \
- OP(BeginSharedImageAccessDirectCHROMIUM) /* 603 */ \
- OP(EndSharedImageAccessDirectCHROMIUM) /* 604 */
+ OP(DispatchComputeIndirect) /* 507 */ \
+ OP(GetProgramInterfaceiv) /* 508 */ \
+ OP(GetProgramResourceIndex) /* 509 */ \
+ OP(GetProgramResourceName) /* 510 */ \
+ OP(GetProgramResourceiv) /* 511 */ \
+ OP(GetProgramResourceLocation) /* 512 */ \
+ OP(MemoryBarrierEXT) /* 513 */ \
+ OP(MemoryBarrierByRegion) /* 514 */ \
+ OP(SwapBuffers) /* 515 */ \
+ OP(GetMaxValueInBufferCHROMIUM) /* 516 */ \
+ OP(EnableFeatureCHROMIUM) /* 517 */ \
+ OP(MapBufferRange) /* 518 */ \
+ OP(UnmapBuffer) /* 519 */ \
+ OP(FlushMappedBufferRange) /* 520 */ \
+ OP(ResizeCHROMIUM) /* 521 */ \
+ OP(GetRequestableExtensionsCHROMIUM) /* 522 */ \
+ OP(RequestExtensionCHROMIUM) /* 523 */ \
+ OP(GetProgramInfoCHROMIUM) /* 524 */ \
+ OP(GetUniformBlocksCHROMIUM) /* 525 */ \
+ OP(GetTransformFeedbackVaryingsCHROMIUM) /* 526 */ \
+ OP(GetUniformsES3CHROMIUM) /* 527 */ \
+ OP(DescheduleUntilFinishedCHROMIUM) /* 528 */ \
+ OP(GetTranslatedShaderSourceANGLE) /* 529 */ \
+ OP(PostSubBufferCHROMIUM) /* 530 */ \
+ OP(CopyTextureCHROMIUM) /* 531 */ \
+ OP(CopySubTextureCHROMIUM) /* 532 */ \
+ OP(DrawArraysInstancedANGLE) /* 533 */ \
+ OP(DrawElementsInstancedANGLE) /* 534 */ \
+ OP(VertexAttribDivisorANGLE) /* 535 */ \
+ OP(ProduceTextureDirectCHROMIUMImmediate) /* 536 */ \
+ OP(CreateAndConsumeTextureINTERNALImmediate) /* 537 */ \
+ OP(BindUniformLocationCHROMIUMBucket) /* 538 */ \
+ OP(BindTexImage2DCHROMIUM) /* 539 */ \
+ OP(BindTexImage2DWithInternalformatCHROMIUM) /* 540 */ \
+ OP(ReleaseTexImage2DCHROMIUM) /* 541 */ \
+ OP(TraceBeginCHROMIUM) /* 542 */ \
+ OP(TraceEndCHROMIUM) /* 543 */ \
+ OP(DiscardFramebufferEXTImmediate) /* 544 */ \
+ OP(LoseContextCHROMIUM) /* 545 */ \
+ OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 546 */ \
+ OP(DrawBuffersEXTImmediate) /* 547 */ \
+ OP(DiscardBackbufferCHROMIUM) /* 548 */ \
+ OP(ScheduleOverlayPlaneCHROMIUM) /* 549 */ \
+ OP(ScheduleCALayerSharedStateCHROMIUM) /* 550 */ \
+ OP(ScheduleCALayerCHROMIUM) /* 551 */ \
+ OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 552 */ \
+ OP(CommitOverlayPlanesCHROMIUM) /* 553 */ \
+ OP(FlushDriverCachesCHROMIUM) /* 554 */ \
+ OP(ScheduleDCLayerCHROMIUM) /* 555 */ \
+ OP(SetActiveURLCHROMIUM) /* 556 */ \
+ OP(MatrixLoadfCHROMIUMImmediate) /* 557 */ \
+ OP(MatrixLoadIdentityCHROMIUM) /* 558 */ \
+ OP(GenPathsCHROMIUM) /* 559 */ \
+ OP(DeletePathsCHROMIUM) /* 560 */ \
+ OP(IsPathCHROMIUM) /* 561 */ \
+ OP(PathCommandsCHROMIUM) /* 562 */ \
+ OP(PathParameterfCHROMIUM) /* 563 */ \
+ OP(PathParameteriCHROMIUM) /* 564 */ \
+ OP(PathStencilFuncCHROMIUM) /* 565 */ \
+ OP(StencilFillPathCHROMIUM) /* 566 */ \
+ OP(StencilStrokePathCHROMIUM) /* 567 */ \
+ OP(CoverFillPathCHROMIUM) /* 568 */ \
+ OP(CoverStrokePathCHROMIUM) /* 569 */ \
+ OP(StencilThenCoverFillPathCHROMIUM) /* 570 */ \
+ OP(StencilThenCoverStrokePathCHROMIUM) /* 571 */ \
+ OP(StencilFillPathInstancedCHROMIUM) /* 572 */ \
+ OP(StencilStrokePathInstancedCHROMIUM) /* 573 */ \
+ OP(CoverFillPathInstancedCHROMIUM) /* 574 */ \
+ OP(CoverStrokePathInstancedCHROMIUM) /* 575 */ \
+ OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 576 */ \
+ OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 577 */ \
+ OP(BindFragmentInputLocationCHROMIUMBucket) /* 578 */ \
+ OP(ProgramPathFragmentInputGenCHROMIUM) /* 579 */ \
+ OP(CoverageModulationCHROMIUM) /* 580 */ \
+ OP(BlendBarrierKHR) /* 581 */ \
+ OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 582 */ \
+ OP(BindFragDataLocationIndexedEXTBucket) /* 583 */ \
+ OP(BindFragDataLocationEXTBucket) /* 584 */ \
+ OP(GetFragDataIndexEXT) /* 585 */ \
+ OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 586 */ \
+ OP(OverlayPromotionHintCHROMIUM) /* 587 */ \
+ OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 588 */ \
+ OP(SetDrawRectangleCHROMIUM) /* 589 */ \
+ OP(SetEnableDCLayersCHROMIUM) /* 590 */ \
+ OP(InitializeDiscardableTextureCHROMIUM) /* 591 */ \
+ OP(UnlockDiscardableTextureCHROMIUM) /* 592 */ \
+ OP(LockDiscardableTextureCHROMIUM) /* 593 */ \
+ OP(TexStorage2DImageCHROMIUM) /* 594 */ \
+ OP(SetColorSpaceMetadataCHROMIUM) /* 595 */ \
+ OP(WindowRectanglesEXTImmediate) /* 596 */ \
+ OP(CreateGpuFenceINTERNAL) /* 597 */ \
+ OP(WaitGpuFenceCHROMIUM) /* 598 */ \
+ OP(DestroyGpuFenceCHROMIUM) /* 599 */ \
+ OP(SetReadbackBufferShadowAllocationINTERNAL) /* 600 */ \
+ OP(FramebufferTextureMultiviewOVR) /* 601 */ \
+ OP(MaxShaderCompilerThreadsKHR) /* 602 */ \
+ OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 603 */ \
+ OP(BeginSharedImageAccessDirectCHROMIUM) /* 604 */ \
+ OP(EndSharedImageAccessDirectCHROMIUM) /* 605 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index 10cb22921f3..e2ff04d62bd 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -290,6 +290,8 @@ int GLES2Util::GLGetNumValuesReturned(int id) const {
return 1;
case GL_ATOMIC_COUNTER_BUFFER_START:
return 1;
+ case GL_DISPATCH_INDIRECT_BUFFER_BINDING:
+ return 1;
case GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS:
return 1;
case GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS:
@@ -1674,6 +1676,8 @@ uint32_t GLES2Util::MapBufferTargetToBindingEnum(uint32_t target) {
return GL_COPY_READ_BUFFER_BINDING;
case GL_COPY_WRITE_BUFFER:
return GL_COPY_WRITE_BUFFER_BINDING;
+ case GL_DISPATCH_INDIRECT_BUFFER:
+ return GL_DISPATCH_INDIRECT_BUFFER_BINDING;
case GL_ELEMENT_ARRAY_BUFFER:
return GL_ELEMENT_ARRAY_BUFFER_BINDING;
case GL_PIXEL_PACK_BUFFER:
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
index 20750bb6e80..c2c4bcd3170 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -1041,6 +1041,10 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_RGB_YCBCR_420V_CHROMIUM",
},
{
+ 0x78FD,
+ "GL_RGB_YCBCR_P010_CHROMIUM",
+ },
+ {
0x8,
"GL_CA_LAYER_EDGE_TOP_CHROMIUM",
},
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
index 481230cf411..12b03e0f9b3 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
@@ -46,6 +46,8 @@ unsigned InternalFormatForGpuMemoryBufferFormat(gfx::BufferFormat format) {
return GL_RGB_YCBCR_420V_CHROMIUM;
case gfx::BufferFormat::UYVY_422:
return GL_RGB_YCBCR_422_CHROMIUM;
+ case gfx::BufferFormat::P010:
+ return GL_RGB_YCBCR_P010_CHROMIUM;
default:
NOTREACHED();
return 0;
@@ -76,6 +78,7 @@ bool IsImageSizeValidForGpuMemoryBufferFormat(const gfx::Size& size,
return true;
case gfx::BufferFormat::YVU_420:
case gfx::BufferFormat::YUV_420_BIPLANAR:
+ case gfx::BufferFormat::P010:
// U and V planes are subsampled by a factor of 2.
return size.width() % 2 == 0 && size.height() % 2 == 0;
case gfx::BufferFormat::UYVY_422:
@@ -104,8 +107,8 @@ uint32_t GetPlatformSpecificTextureTarget() {
GPU_EXPORT uint32_t GetBufferTextureTarget(gfx::BufferUsage usage,
gfx::BufferFormat format,
const Capabilities& capabilities) {
- bool found = base::ContainsValue(capabilities.texture_target_exception_list,
- gfx::BufferUsageAndFormat(usage, format));
+ bool found = base::Contains(capabilities.texture_target_exception_list,
+ gfx::BufferUsageAndFormat(usage, format));
return found ? gpu::GetPlatformSpecificTextureTarget() : GL_TEXTURE_2D;
}
diff --git a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
index 7c3d121e54d..32e1907f3dd 100644
--- a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
@@ -272,6 +272,7 @@ GL_APICALL void GL_APIENTRY glFramebufferParameteri (GLenumFramebufferTa
GL_APICALL void GL_APIENTRY glBindImageTexture (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format);
GL_APICALL void GL_APIENTRY glDispatchCompute (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z);
+GL_APICALL void GL_APIENTRY glDispatchComputeIndirect (GLintptrNotNegative offset);
GL_APICALL void GL_APIENTRY glGetProgramInterfaceiv (GLidProgram program, GLenum program_interface, GLenum pname, GLint* params);
GL_APICALL GLuint GL_APIENTRY glGetProgramResourceIndex (GLidProgram program, GLenum program_interface, const char* name);
@@ -327,7 +328,7 @@ GL_APICALL void GL_APIENTRY glUnpremultiplyAndDitherCopyCHROMIUM (GLuint
GL_APICALL void GL_APIENTRY glDrawBuffersEXT (GLsizei count, const GLenum* bufs);
GL_APICALL void GL_APIENTRY glDiscardBackbufferCHROMIUM (void);
GL_APICALL void GL_APIENTRY glScheduleOverlayPlaneCHROMIUM (GLint plane_z_order, GLenum plane_transform, GLuint overlay_texture_id, GLint bounds_x, GLint bounds_y, GLint bounds_width, GLint bounds_height, GLfloat uv_x, GLfloat uv_y, GLfloat uv_width, GLfloat uv_height, GLboolean enable_blend, GLuint gpu_fence_id);
-GL_APICALL void GL_APIENTRY glScheduleCALayerSharedStateCHROMIUM (GLfloat opacity, GLboolean is_clipped, const GLfloat* clip_rect, GLint sorting_context_id, const GLfloat* transform);
+GL_APICALL void GL_APIENTRY glScheduleCALayerSharedStateCHROMIUM (GLfloat opacity, GLboolean is_clipped, const GLfloat* clip_rect, const GLfloat* rounded_corner_bounds, GLint sorting_context_id, const GLfloat* transform);
GL_APICALL void GL_APIENTRY glScheduleCALayerCHROMIUM (GLuint contents_texture_id, const GLfloat* contents_rect, GLuint background_color, GLuint edge_aa_mask, const GLfloat* bounds_rect, GLuint filter);
GL_APICALL void GL_APIENTRY glScheduleCALayerInUseQueryCHROMIUM (GLsizei count, const GLuint* textures);
GL_APICALL void GL_APIENTRY glCommitOverlayPlanesCHROMIUM (GLuint64 swap_id, GLbitfieldSwapBuffersFlags flags = 0);
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index 5a84425376f..96c3ceacb64 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -240,6 +240,8 @@ target(link_target_type, "gles2_sources") {
"shared_image_manager.h",
"shared_image_representation.cc",
"shared_image_representation.h",
+ "shared_image_representation_skia_gl.cc",
+ "shared_image_representation_skia_gl.h",
"skia_utils.cc",
"skia_utils.h",
"texture_definition.cc",
@@ -297,6 +299,7 @@ target(link_target_type, "gles2_sources") {
":service",
"//base",
"//base/third_party/dynamic_annotations",
+ "//components/crash/core/common",
"//components/viz/common:resource_format_utils",
"//gpu/command_buffer/client",
"//gpu/command_buffer/common:gles2_utils",
diff --git a/chromium/gpu/command_buffer/service/DEPS b/chromium/gpu/command_buffer/service/DEPS
index 00595bae252..ee6d2c147ac 100644
--- a/chromium/gpu/command_buffer/service/DEPS
+++ b/chromium/gpu/command_buffer/service/DEPS
@@ -1,6 +1,7 @@
include_rules = [
"+cc/paint",
"+third_party/skia",
+ "+components/crash/core/common/crash_key.h",
"+components/viz/common/gpu/metal_context_provider.h",
"+components/viz/common/gpu/vulkan_context_provider.h",
"+components/viz/common/resources/resource_format.h",
diff --git a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
index f133e9a574d..aed8ac6d9a6 100644
--- a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
@@ -52,7 +52,7 @@ AbstractTextureImplOnSharedContext::AbstractTextureImplOnSharedContext(
gfx::Rect cleared_rect;
texture_->SetLevelInfo(target, 0, internal_format, width, height, depth,
border, format, type, cleared_rect);
- texture_->SetImmutable(true);
+ texture_->SetImmutable(true, false);
shared_context_state_->AddContextLostObserver(this);
}
@@ -87,7 +87,11 @@ void AbstractTextureImplOnSharedContext::SetParameteri(GLenum pname,
void AbstractTextureImplOnSharedContext::BindStreamTextureImage(
GLStreamTextureImage* image,
GLuint service_id) {
- NOTIMPLEMENTED();
+ const GLint level = 0;
+ const GLuint target = texture_->target();
+ texture_->SetLevelStreamTextureImage(
+ target, level, image, Texture::ImageState::UNBOUND, service_id);
+ texture_->SetLevelCleared(target, level, true);
}
void AbstractTextureImplOnSharedContext::BindImage(gl::GLImage* image,
@@ -116,5 +120,86 @@ void AbstractTextureImplOnSharedContext::OnContextLost() {
shared_context_state_.reset();
}
+AbstractTextureImplOnSharedContextPassthrough::
+ AbstractTextureImplOnSharedContextPassthrough(
+ GLenum target,
+ scoped_refptr<gpu::SharedContextState> shared_context_state)
+ : shared_context_state_(std::move(shared_context_state)) {
+ DCHECK(shared_context_state_);
+
+ // The calling code which wants to create this abstract texture should have
+ // already made the shared context current.
+ DCHECK(shared_context_state_->IsCurrent(nullptr));
+
+ // Create a gles2 Texture.
+ GLuint service_id = 0;
+ auto* api = gl::g_current_gl_context;
+ api->glGenTexturesFn(1, &service_id);
+
+ GLint prev_texture = 0;
+ api->glGetIntegervFn(GetTextureBindingQuery(target), &prev_texture);
+
+ api->glBindTextureFn(target, service_id);
+ api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ glBindTexture(target, prev_texture);
+
+ texture_ = new TexturePassthrough(service_id, target);
+ shared_context_state_->AddContextLostObserver(this);
+}
+
+AbstractTextureImplOnSharedContextPassthrough::
+ ~AbstractTextureImplOnSharedContextPassthrough() {
+ if (cleanup_cb_)
+ std::move(cleanup_cb_).Run(this);
+}
+
+TextureBase* AbstractTextureImplOnSharedContextPassthrough::GetTextureBase()
+ const {
+ return texture_.get();
+}
+
+void AbstractTextureImplOnSharedContextPassthrough::SetParameteri(GLenum pname,
+ GLint param) {
+ NOTIMPLEMENTED();
+}
+
+void AbstractTextureImplOnSharedContextPassthrough::BindStreamTextureImage(
+ GLStreamTextureImage* image,
+ GLuint service_id) {
+ NOTIMPLEMENTED();
+}
+
+void AbstractTextureImplOnSharedContextPassthrough::BindImage(
+ gl::GLImage* image,
+ bool client_managed) {
+ NOTIMPLEMENTED();
+}
+
+gl::GLImage* AbstractTextureImplOnSharedContextPassthrough::GetImage() const {
+ NOTIMPLEMENTED();
+ return nullptr;
+}
+
+void AbstractTextureImplOnSharedContextPassthrough::SetCleared() {
+ NOTIMPLEMENTED();
+}
+
+void AbstractTextureImplOnSharedContextPassthrough::SetCleanupCallback(
+ CleanupCallback cb) {
+ cleanup_cb_ = std::move(cb);
+}
+
+void AbstractTextureImplOnSharedContextPassthrough::OnContextLost() {
+ if (cleanup_cb_)
+ std::move(cleanup_cb_).Run(this);
+ texture_->MarkContextLost();
+ shared_context_state_->RemoveContextLostObserver(this);
+ shared_context_state_ = nullptr;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h
index f9b87579450..8630e7bf011 100644
--- a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h
+++ b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h
@@ -15,6 +15,7 @@ class SharedContextState;
namespace gles2 {
class Texture;
+class TexturePassthrough;
// Implementation of AbstractTexture which will be used to create
// AbstractTextures on ShareContextState.
@@ -53,6 +54,36 @@ class GPU_GLES2_EXPORT AbstractTextureImplOnSharedContext
CleanupCallback cleanup_cb_;
};
+// Implementation of AbstractTexture which will be used to create
+// AbstractTextures on SharedContextState for the passthrough command decoder.
+class GPU_GLES2_EXPORT AbstractTextureImplOnSharedContextPassthrough
+ : public AbstractTexture,
+ public SharedContextState::ContextLostObserver {
+ public:
+ AbstractTextureImplOnSharedContextPassthrough(
+ GLenum target,
+ scoped_refptr<gpu::SharedContextState> shared_context_state);
+ ~AbstractTextureImplOnSharedContextPassthrough() override;
+
+ // AbstractTexture implementation.
+ TextureBase* GetTextureBase() const override;
+ void SetParameteri(GLenum pname, GLint param) override;
+ void BindStreamTextureImage(GLStreamTextureImage* image,
+ GLuint service_id) override;
+ void BindImage(gl::GLImage* image, bool client_managed) override;
+ gl::GLImage* GetImage() const override;
+ void SetCleared() override;
+ void SetCleanupCallback(CleanupCallback cb) override;
+
+ // SharedContextState::ContextLostObserver implementation.
+ void OnContextLost() override;
+
+ private:
+ scoped_refptr<TexturePassthrough> texture_;
+ scoped_refptr<SharedContextState> shared_context_state_;
+ CleanupCallback cleanup_cb_;
+};
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/context_state_test_helpers.cc b/chromium/gpu/command_buffer/service/context_state_test_helpers.cc
new file mode 100644
index 00000000000..70e7ac19d03
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/context_state_test_helpers.cc
@@ -0,0 +1,63 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/context_state_test_helpers.h"
+
+#include "gpu/command_buffer/service/feature_info.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gl/gl_version_info.h"
+
+using ::testing::_;
+
+namespace gpu {
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/service/context_state_test_helpers_autogen.h"
+
+void ContextStateTestHelpers::SetupInitState(MockGL* gl,
+ gles2::FeatureInfo* feature_info,
+ const gfx::Size& initial_size) {
+ SetupInitCapabilitiesExpectations(gl, feature_info);
+ SetupInitStateExpectations(gl, feature_info, initial_size);
+}
+
+void ContextStateTestHelpers::SetupInitStateManualExpectations(
+ MockGL* gl,
+ gles2::FeatureInfo* feature_info) {
+ if (feature_info->IsES3Capable()) {
+ EXPECT_CALL(*gl, PixelStorei(GL_PACK_ROW_LENGTH, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, PixelStorei(GL_UNPACK_ROW_LENGTH, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (feature_info->feature_flags().ext_window_rectangles) {
+ EXPECT_CALL(*gl, WindowRectanglesEXT(GL_EXCLUSIVE_EXT, 0, nullptr))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ }
+}
+
+void ContextStateTestHelpers::SetupInitStateManualExpectationsForDoLineWidth(
+ MockGL* gl,
+ GLfloat width) {
+ EXPECT_CALL(*gl, LineWidth(width)).Times(1).RetiresOnSaturation();
+}
+
+void ContextStateTestHelpers::ExpectEnableDisable(MockGL* gl,
+ GLenum cap,
+ bool enable) {
+ if (enable) {
+ EXPECT_CALL(*gl, Enable(cap)).Times(1).RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl, Disable(cap)).Times(1).RetiresOnSaturation();
+ }
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/context_state_test_helpers.h b/chromium/gpu/command_buffer/service/context_state_test_helpers.h
new file mode 100644
index 00000000000..4ee751c137f
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/context_state_test_helpers.h
@@ -0,0 +1,44 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_TEST_HELPERS_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_TEST_HELPERS_H_
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+namespace gfx {
+class Size;
+} // namespace gfx
+
+namespace gpu {
+namespace gles2 {
+class FeatureInfo;
+} // namespace gles2
+
+class ContextStateTestHelpers {
+ public:
+ using MockGL = ::testing::StrictMock<::gl::MockGLInterface>;
+ static void SetupInitState(MockGL* gl,
+ gles2::FeatureInfo* feature_info,
+ const gfx::Size& initial_size);
+
+ private:
+ static void SetupInitCapabilitiesExpectations(
+ MockGL* gl,
+ gles2::FeatureInfo* feature_info);
+ static void SetupInitStateExpectations(MockGL* gl,
+ gles2::FeatureInfo* feature_info,
+ const gfx::Size& initial_size);
+ static void SetupInitStateManualExpectations(
+ MockGL* gl,
+ gles2::FeatureInfo* feature_info);
+ static void SetupInitStateManualExpectationsForDoLineWidth(MockGL* gl,
+ GLfloat width);
+ static void ExpectEnableDisable(MockGL* gl, GLenum cap, bool enable);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_TEST_HELPERS_H_
diff --git a/chromium/gpu/command_buffer/service/context_state_test_helpers_autogen.h b/chromium/gpu/command_buffer/service/context_state_test_helpers_autogen.h
new file mode 100644
index 00000000000..6fa1c4930bd
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/context_state_test_helpers_autogen.h
@@ -0,0 +1,135 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by context_state_test_helpers.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_TEST_HELPERS_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_TEST_HELPERS_AUTOGEN_H_
+
+void ContextStateTestHelpers::SetupInitCapabilitiesExpectations(
+ MockGL* gl,
+ gles2::FeatureInfo* feature_info) {
+ ExpectEnableDisable(gl, GL_BLEND, false);
+ ExpectEnableDisable(gl, GL_CULL_FACE, false);
+ ExpectEnableDisable(gl, GL_DEPTH_TEST, false);
+ ExpectEnableDisable(gl, GL_DITHER, true);
+ ExpectEnableDisable(gl, GL_POLYGON_OFFSET_FILL, false);
+ ExpectEnableDisable(gl, GL_SAMPLE_ALPHA_TO_COVERAGE, false);
+ ExpectEnableDisable(gl, GL_SAMPLE_COVERAGE, false);
+ ExpectEnableDisable(gl, GL_SCISSOR_TEST, false);
+ ExpectEnableDisable(gl, GL_STENCIL_TEST, false);
+ if (feature_info->feature_flags().ext_multisample_compatibility) {
+ ExpectEnableDisable(gl, GL_MULTISAMPLE_EXT, true);
+ }
+ if (feature_info->feature_flags().ext_multisample_compatibility) {
+ ExpectEnableDisable(gl, GL_SAMPLE_ALPHA_TO_ONE_EXT, false);
+ }
+ if (feature_info->IsES3Capable()) {
+ ExpectEnableDisable(gl, GL_RASTERIZER_DISCARD, false);
+ ExpectEnableDisable(gl, GL_PRIMITIVE_RESTART_FIXED_INDEX, false);
+ }
+}
+
+void ContextStateTestHelpers::SetupInitStateExpectations(
+ MockGL* gl,
+ gles2::FeatureInfo* feature_info,
+ const gfx::Size& initial_size) {
+ EXPECT_CALL(*gl, BlendColor(0.0f, 0.0f, 0.0f, 0.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, BlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, ClearColor(0.0f, 0.0f, 0.0f, 0.0f))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, ClearDepth(1.0f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl, ClearStencil(0)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl, ColorMask(true, true, true, true))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (feature_info->feature_flags().chromium_framebuffer_mixed_samples) {
+ EXPECT_CALL(*gl, CoverageModulationNV(GL_NONE))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl, CullFace(GL_BACK)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl, DepthFunc(GL_LESS)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl, DepthMask(true)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl, DepthRange(0.0f, 1.0f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl, FrontFace(GL_CCW)).Times(1).RetiresOnSaturation();
+ if (!feature_info->gl_version_info().is_desktop_core_profile) {
+ EXPECT_CALL(*gl, Hint(GL_GENERATE_MIPMAP_HINT, GL_DONT_CARE))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (feature_info->feature_flags().oes_standard_derivatives) {
+ EXPECT_CALL(*gl, Hint(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES, GL_DONT_CARE))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (feature_info->feature_flags().chromium_texture_filtering_hint) {
+ EXPECT_CALL(*gl, Hint(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ SetupInitStateManualExpectationsForDoLineWidth(gl, 1.0f);
+ if (feature_info->feature_flags().chromium_path_rendering) {
+ EXPECT_CALL(*gl, MatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (feature_info->feature_flags().chromium_path_rendering) {
+ EXPECT_CALL(*gl, MatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ if (feature_info->feature_flags().chromium_path_rendering) {
+ EXPECT_CALL(*gl, PathStencilFuncNV(GL_ALWAYS, 0, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl, PixelStorei(GL_PACK_ALIGNMENT, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, PixelStorei(GL_UNPACK_ALIGNMENT, 4))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, PolygonOffset(0.0f, 0.0f)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl, SampleCoverage(1.0f, false)).Times(1).RetiresOnSaturation();
+ EXPECT_CALL(*gl, Scissor(0, 0, initial_size.width(), initial_size.height()))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, StencilFuncSeparate(GL_FRONT, GL_ALWAYS, 0, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, StencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, StencilMaskSeparate(GL_FRONT, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, StencilMaskSeparate(GL_BACK, 0xFFFFFFFFU))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, StencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_KEEP))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, StencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_KEEP))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl, Viewport(0, 0, initial_size.width(), initial_size.height()))
+ .Times(1)
+ .RetiresOnSaturation();
+ SetupInitStateManualExpectations(gl, feature_info);
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_CONTEXT_STATE_TEST_HELPERS_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
index 7be2287e19a..5617b2e4f85 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
@@ -14,7 +14,6 @@
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
#include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
-#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/vulkan/vulkan_command_buffer.h"
#include "gpu/vulkan/vulkan_command_pool.h"
#include "gpu/vulkan/vulkan_fence_helper.h"
@@ -34,11 +33,22 @@ namespace gpu {
namespace {
-VkResult CreateExternalVkImage(SharedContextState* context_state,
- VkFormat format,
- const gfx::Size& size,
- bool is_transfer_dst,
- VkImage* image) {
+GrVkImageInfo CreateGrVkImageInfo(VkImage image,
+ VkFormat vk_format,
+ VkDeviceMemory memory,
+ size_t memory_size) {
+ GrVkAlloc alloc(memory, 0 /* offset */, memory_size, 0 /* flags */);
+ return GrVkImageInfo(image, alloc, VK_IMAGE_TILING_OPTIMAL,
+ VK_IMAGE_LAYOUT_UNDEFINED, vk_format,
+ 1 /* levelCount */);
+}
+
+VkResult CreateVkImage(SharedContextState* context_state,
+ VkFormat format,
+ const gfx::Size& size,
+ bool is_transfer_dst,
+ bool is_external,
+ VkImage* image) {
VkExternalMemoryImageCreateInfoKHR external_info = {
.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR,
.handleTypes = context_state->vk_context_provider()
@@ -52,7 +62,7 @@ VkResult CreateExternalVkImage(SharedContextState* context_state,
VkImageCreateInfo create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
- .pNext = &external_info,
+ .pNext = is_external ? &external_info : nullptr,
.flags = 0,
.imageType = VK_IMAGE_TYPE_2D,
.format = format,
@@ -73,28 +83,6 @@ VkResult CreateExternalVkImage(SharedContextState* context_state,
return vkCreateImage(device, &create_info, nullptr, image);
}
-void TransitionToColorAttachment(VkImage image,
- SharedContextState* context_state,
- VulkanCommandPool* command_pool) {
- auto command_buffer = command_pool->CreatePrimaryCommandBuffer();
- CHECK(command_buffer->Initialize());
-
- {
- ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
- command_buffer->TransitionImageLayout(
- image, VK_IMAGE_LAYOUT_UNDEFINED,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- }
-
- // TODO(penghuang): get rid of this submission if poosible.
- command_buffer->Submit(0, nullptr, 0, nullptr);
-
- context_state->vk_context_provider()
- ->GetDeviceQueue()
- ->GetFenceHelper()
- ->EnqueueVulkanObjectCleanupForSubmittedWork(std::move(command_buffer));
-}
-
uint32_t FindMemoryTypeIndex(SharedContextState* context_state,
const VkMemoryRequirements& requirements,
VkMemoryPropertyFlags flags) {
@@ -115,6 +103,28 @@ uint32_t FindMemoryTypeIndex(SharedContextState* context_state,
return kInvalidTypeIndex;
}
+class ScopedPixelStore {
+ public:
+ ScopedPixelStore(gl::GLApi* api, GLenum name, GLint value)
+ : api_(api), name_(name), value_(value) {
+ api_->glGetIntegervFn(name_, &old_value_);
+ if (value_ != old_value_)
+ api->glPixelStoreiFn(name_, value_);
+ }
+ ~ScopedPixelStore() {
+ if (value_ != old_value_)
+ api_->glPixelStoreiFn(name_, old_value_);
+ }
+
+ private:
+ gl::GLApi* const api_;
+ const GLenum name_;
+ const GLint value_;
+ GLint old_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedPixelStore);
+};
+
} // namespace
// static
@@ -132,9 +142,10 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
context_state->vk_context_provider()->GetDeviceQueue()->GetVulkanDevice();
VkFormat vk_format = ToVkFormat(format);
VkImage image;
- bool is_transfer_dst = using_gmb || !pixel_data.empty();
- VkResult result = CreateExternalVkImage(context_state, vk_format, size,
- is_transfer_dst, &image);
+ bool is_external = context_state->support_vulkan_external_object();
+ bool is_transfer_dst = using_gmb || !pixel_data.empty() || !is_external;
+ VkResult result = CreateVkImage(context_state, vk_format, size,
+ is_transfer_dst, is_external, &image);
if (result != VK_SUCCESS) {
DLOG(ERROR) << "Failed to create external VkImage: " << result;
return nullptr;
@@ -159,7 +170,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
VkMemoryAllocateInfo mem_alloc_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
- .pNext = &external_info,
+ .pNext = is_external ? &external_info : nullptr,
.allocationSize = requirements.size,
.memoryTypeIndex = FindMemoryTypeIndex(
context_state, requirements, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT),
@@ -184,16 +195,17 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
return nullptr;
}
- // TODO(penghuang): track image layout to avoid unnecessary image layout
- // transition. https://crbug.com/965955
- TransitionToColorAttachment(image, context_state, command_pool);
-
auto backing = base::WrapUnique(new ExternalVkImageBacking(
mailbox, format, size, color_space, usage, context_state, image, memory,
requirements.size, vk_format, command_pool));
- if (!pixel_data.empty())
- backing->WritePixels(pixel_data, 0);
+ if (!pixel_data.empty()) {
+ backing->WritePixels(
+ pixel_data.size(), 0,
+ base::BindOnce([](const void* data, size_t size,
+ void* buffer) { memcpy(buffer, data, size); },
+ pixel_data.data(), pixel_data.size()));
+ }
return backing;
}
@@ -208,7 +220,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
- if (gfx::NumberOfPlanesForBufferFormat(buffer_format) != 1) {
+ if (gfx::NumberOfPlanesForLinearBufferFormat(buffer_format) != 1) {
DLOG(ERROR) << "Invalid image format.";
return nullptr;
}
@@ -245,10 +257,6 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
return nullptr;
}
- // TODO(penghuang): track image layout to avoid unnecessary image layout
- // transition. https://crbug.com/965955
- TransitionToColorAttachment(vk_image, context_state, command_pool);
-
return base::WrapUnique(new ExternalVkImageBacking(
mailbox, viz::GetResourceFormat(buffer_format), size, color_space,
usage, context_state, vk_image, vk_device_memory, memory_size,
@@ -355,39 +363,36 @@ ExternalVkImageBacking::ExternalVkImageBacking(
memory_size,
false /* is_thread_safe */),
context_state_(context_state),
- image_(image),
- memory_(memory),
- memory_size_(memory_size),
- vk_format_(vk_format),
+ backend_texture_(
+ size.width(),
+ size.height(),
+ CreateGrVkImageInfo(image, vk_format, memory, memory_size)),
command_pool_(command_pool) {}
ExternalVkImageBacking::~ExternalVkImageBacking() {
- DCHECK(image_ == VK_NULL_HANDLE);
- DCHECK(memory_ == VK_NULL_HANDLE);
+ DCHECK(!backend_texture_.isValid());
}
bool ExternalVkImageBacking::BeginAccess(
bool readonly,
- std::vector<SemaphoreHandle>* semaphore_handles) {
- if (readonly) {
- if (reads_in_progress_ == 0 && shared_memory_mapping_.IsValid() &&
- shared_memory_is_updated_) {
- if (!WritePixels(
- shared_memory_mapping_.GetMemoryAsSpan<const uint8_t>().subspan(
- memory_offset_),
- stride_))
- return false;
- shared_memory_is_updated_ = false;
- }
- }
+ std::vector<SemaphoreHandle>* semaphore_handles,
+ bool is_gl) {
+ if (readonly && !reads_in_progress_)
+ UpdateContent(is_gl ? kInGLTexture : kInVkImage);
return BeginAccessInternal(readonly, semaphore_handles);
}
void ExternalVkImageBacking::EndAccess(bool readonly,
- SemaphoreHandle semaphore_handle) {
+ SemaphoreHandle semaphore_handle,
+ bool is_gl) {
EndAccessInternal(readonly, std::move(semaphore_handle));
- // TODO(penghuang): read pixels back from VkImage to shared memory GMB, if
- // this feature is needed.
+ if (!readonly) {
+ if (use_separate_gl_texture()) {
+ latest_content_ = is_gl ? kInGLTexture : kInVkImage;
+ } else {
+ latest_content_ = kInVkImage | kInGLTexture;
+ }
+ }
}
bool ExternalVkImageBacking::IsCleared() const {
@@ -398,24 +403,29 @@ void ExternalVkImageBacking::SetCleared() {
is_cleared_ = true;
}
-void ExternalVkImageBacking::Update() {
- shared_memory_is_updated_ = true;
+void ExternalVkImageBacking::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
+ DCHECK(!in_fence);
+ latest_content_ = kInSharedMemory;
}
void ExternalVkImageBacking::Destroy() {
+ GrVkImageInfo image_info;
+ bool result = backend_texture_.getVkImageInfo(&image_info);
+ DCHECK(result);
+
auto* fence_helper = context_state()
->vk_context_provider()
->GetDeviceQueue()
->GetFenceHelper();
- fence_helper->EnqueueImageCleanupForSubmittedWork(image_, memory_);
- image_ = VK_NULL_HANDLE;
- memory_ = VK_NULL_HANDLE;
+ fence_helper->EnqueueImageCleanupForSubmittedWork(image_info.fImage,
+ image_info.fAlloc.fMemory);
+ backend_texture_ = GrBackendTexture();
if (texture_) {
// Ensure that a context is current before removing the ref and calling
// glDeleteTextures.
- if (!context_state()->context()->IsCurrent(nullptr))
- context_state()->context()->MakeCurrent(context_state()->surface());
+ if (!gl::g_current_gl_context)
+ context_state()->MakeCurrent(nullptr, true /* need_gl */);
texture_->RemoveLightweightRef(have_context());
}
}
@@ -440,48 +450,54 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
NOTIMPLEMENTED_LOG_ONCE();
return nullptr;
#elif defined(OS_LINUX)
+ GrVkImageInfo image_info;
+ bool result = backend_texture_.getVkImageInfo(&image_info);
+ DCHECK(result);
if (!texture_) {
- VkMemoryGetFdInfoKHR get_fd_info;
- get_fd_info.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
- get_fd_info.pNext = nullptr;
- get_fd_info.memory = memory_;
- get_fd_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
-
- int memory_fd = -1;
- vkGetMemoryFdKHR(device(), &get_fd_info, &memory_fd);
- if (memory_fd < 0) {
- DLOG(ERROR)
- << "Unable to extract file descriptor out of external VkImage";
- return nullptr;
- }
-
gl::GLApi* api = gl::g_current_gl_context;
+ GLuint memory_object = 0;
+ if (!use_separate_gl_texture()) {
+ VkMemoryGetFdInfoKHR get_fd_info;
+ get_fd_info.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
+ get_fd_info.pNext = nullptr;
+ get_fd_info.memory = image_info.fAlloc.fMemory;
+ get_fd_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+ int memory_fd = -1;
+ vkGetMemoryFdKHR(device(), &get_fd_info, &memory_fd);
+ if (memory_fd < 0) {
+ DLOG(ERROR)
+ << "Unable to extract file descriptor out of external VkImage";
+ return nullptr;
+ }
- constexpr GLenum target = GL_TEXTURE_2D;
- constexpr GLenum get_target = GL_TEXTURE_BINDING_2D;
- GLuint internal_format = viz::TextureStorageFormat(format());
+ api->glCreateMemoryObjectsEXTFn(1, &memory_object);
+ api->glImportMemoryFdEXTFn(memory_object, image_info.fAlloc.fSize,
+ GL_HANDLE_TYPE_OPAQUE_FD_EXT, memory_fd);
+ }
- GLuint memory_object;
- api->glCreateMemoryObjectsEXTFn(1, &memory_object);
- api->glImportMemoryFdEXTFn(memory_object, memory_size_,
- GL_HANDLE_TYPE_OPAQUE_FD_EXT, memory_fd);
+ GLuint internal_format = viz::TextureStorageFormat(format());
+ GLint old_texture_binding = 0;
+ api->glGetIntegervFn(GL_TEXTURE_BINDING_2D, &old_texture_binding);
GLuint texture_service_id;
api->glGenTexturesFn(1, &texture_service_id);
-
- GLint old_texture_binding = 0;
- api->glGetIntegervFn(get_target, &old_texture_binding);
- api->glBindTextureFn(target, texture_service_id);
- api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, internal_format,
- size().width(), size().height(), memory_object,
- 0);
-
+ api->glBindTextureFn(GL_TEXTURE_2D, texture_service_id);
+ api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ if (use_separate_gl_texture()) {
+ api->glTexStorage2DEXTFn(GL_TEXTURE_2D, 1, internal_format,
+ size().width(), size().height());
+ } else {
+ DCHECK(memory_object);
+ api->glTexStorageMem2DEXTFn(GL_TEXTURE_2D, 1, internal_format,
+ size().width(), size().height(),
+ memory_object, 0);
+ }
texture_ = new gles2::Texture(texture_service_id);
texture_->SetLightweightRef();
- texture_->SetTarget(target, 1);
+ texture_->SetTarget(GL_TEXTURE_2D, 1);
texture_->sampler_state_.min_filter = GL_LINEAR;
texture_->sampler_state_.mag_filter = GL_LINEAR;
texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
@@ -493,12 +509,12 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
GLenum gl_format = viz::GLDataFormat(format());
GLenum gl_type = viz::GLDataType(format());
- texture_->SetLevelInfo(target, 0, internal_format, size().width(),
+ texture_->SetLevelInfo(GL_TEXTURE_2D, 0, internal_format, size().width(),
size().height(), 1, 0, gl_format, gl_type,
cleared_rect);
- texture_->SetImmutable(true);
+ texture_->SetImmutable(true, true);
- api->glBindTextureFn(target, old_texture_binding);
+ api->glBindTextureFn(GL_TEXTURE_2D, old_texture_binding);
}
return std::make_unique<ExternalVkImageGlRepresentation>(
manager, this, tracker, texture_, texture_->service_id());
@@ -537,16 +553,60 @@ void ExternalVkImageBacking::InstallSharedMemory(
shared_memory_mapping_ = std::move(shared_memory_mapping);
stride_ = stride;
memory_offset_ = memory_offset;
- Update();
+ Update(nullptr);
}
-bool ExternalVkImageBacking::WritePixels(
- const base::span<const uint8_t>& pixel_data,
- size_t stride) {
- DCHECK(stride == 0 || size().height() * stride <= pixel_data.size());
+void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) {
+ // Only support one backing for now.
+ DCHECK(content_flags == kInVkImage || content_flags == kInGLTexture ||
+ content_flags == kInSharedMemory);
+
+ if ((latest_content_ & content_flags) == content_flags)
+ return;
+
+ if (content_flags == kInGLTexture && !use_separate_gl_texture())
+ content_flags = kInVkImage;
+
+ if (content_flags == kInVkImage) {
+ if (latest_content_ & kInSharedMemory) {
+ if (!shared_memory_mapping_.IsValid())
+ return;
+ auto pixel_data =
+ shared_memory_mapping_.GetMemoryAsSpan<const uint8_t>().subspan(
+ memory_offset_);
+ if (!WritePixels(
+ pixel_data.size(), stride_,
+ base::BindOnce([](const void* data, size_t size,
+ void* buffer) { memcpy(buffer, data, size); },
+ pixel_data.data(), pixel_data.size()))) {
+ return;
+ }
+ latest_content_ |=
+ use_separate_gl_texture() ? kInVkImage : kInVkImage | kInGLTexture;
+ return;
+ }
+ if ((latest_content_ & kInGLTexture) && use_separate_gl_texture()) {
+ CopyPixelsFromGLTexture();
+ latest_content_ |= kInVkImage;
+ return;
+ }
+ } else if (content_flags == kInGLTexture) {
+ // TODO(penghuang): support updating content in gl texture.
+ NOTIMPLEMENTED_LOG_ONCE();
+ } else if (content_flags == kInSharedMemory) {
+ // TODO(penghuang): read pixels back from VkImage to shared memory GMB, if
+ // this feature is needed.
+ NOTIMPLEMENTED_LOG_ONCE();
+ }
+}
+
+bool ExternalVkImageBacking::WritePixels(size_t data_size,
+ size_t stride,
+ FillBufferCallback callback) {
+ DCHECK(stride == 0 || size().height() * stride <= data_size);
VkBufferCreateInfo buffer_create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
- .size = pixel_data.size(),
+ .size = data_size,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
};
@@ -590,41 +650,18 @@ bool ExternalVkImageBacking::WritePixels(
return false;
}
- void* data = nullptr;
- result = vkMapMemory(device(), stage_memory, 0 /* memoryOffset */,
- pixel_data.size(), 0, &data);
+ void* buffer = nullptr;
+ result = vkMapMemory(device(), stage_memory, 0 /* memoryOffset */, data_size,
+ 0, &buffer);
if (result != VK_SUCCESS) {
DLOG(ERROR) << "vkMapMemory() failed. " << result;
vkDestroyBuffer(device(), stage_buffer, nullptr /* pAllocator */);
vkFreeMemory(device(), stage_memory, nullptr /* pAllocator */);
return false;
}
- memcpy(data, pixel_data.data(), pixel_data.size());
- vkUnmapMemory(device(), stage_memory);
- auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
- CHECK(command_buffer->Initialize());
-
- {
- ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
-
- // TODO(penghuang): track image layout to avoid unnecessary image layout
- // transition. https://crbug.com/965955
- command_buffer->TransitionImageLayout(
- image(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
- uint32_t buffer_width =
- stride ? stride * 8 / BitsPerPixel(format()) : size().width();
- command_buffer->CopyBufferToImage(stage_buffer, image(), buffer_width,
- size().height(), size().width(),
- size().height());
-
- // TODO(penghuang): track image layout to avoid unnecessary image layout
- // transition. https://crbug.com/965955
- command_buffer->TransitionImageLayout(
- image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- }
+ std::move(callback).Run(buffer);
+ vkUnmapMemory(device(), stage_memory);
std::vector<gpu::SemaphoreHandle> handles;
if (!BeginAccessInternal(false /* readonly */, &handles)) {
@@ -634,6 +671,26 @@ bool ExternalVkImageBacking::WritePixels(
return false;
}
+ auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
+ CHECK(command_buffer->Initialize());
+ {
+ ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
+ GrVkImageInfo image_info;
+ bool result = backend_texture_.getVkImageInfo(&image_info);
+ DCHECK(result);
+ if (image_info.fImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
+ command_buffer->TransitionImageLayout(
+ image_info.fImage, image_info.fImageLayout,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ backend_texture_.setVkImageLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ }
+ uint32_t buffer_width =
+ stride ? stride * 8 / BitsPerPixel(format()) : size().width();
+ command_buffer->CopyBufferToImage(stage_buffer, image_info.fImage,
+ buffer_width, size().height(),
+ size().width(), size().height());
+ }
+
if (!need_sychronization()) {
DCHECK(handles.empty());
command_buffer->Submit(0, nullptr, 0, nullptr);
@@ -682,6 +739,97 @@ bool ExternalVkImageBacking::WritePixels(
return true;
}
+void ExternalVkImageBacking::CopyPixelsFromGLTexture() {
+ DCHECK(use_separate_gl_texture());
+ DCHECK(texture_);
+
+ GLenum gl_format = GL_NONE;
+ GLenum gl_type = GL_NONE;
+ size_t bytes_per_pixel = 0;
+ switch (ToVkFormat(format())) {
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ gl_format = GL_RGBA;
+ gl_type = GL_UNSIGNED_BYTE;
+ bytes_per_pixel = 4;
+ break;
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ gl_format = GL_BGRA;
+ gl_type = GL_UNSIGNED_BYTE;
+ bytes_per_pixel = 4;
+ break;
+ case VK_FORMAT_R8_UNORM:
+ gl_format = GL_RED;
+ gl_type = GL_UNSIGNED_BYTE;
+ bytes_per_pixel = 1;
+ break;
+ case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+ gl_format = GL_RGBA;
+ gl_type = GL_UNSIGNED_SHORT_4_4_4_4;
+ bytes_per_pixel = 2;
+ break;
+ case VK_FORMAT_R5G6B5_UNORM_PACK16:
+ gl_format = GL_RGB;
+ gl_type = GL_UNSIGNED_SHORT_5_6_5;
+ bytes_per_pixel = 2;
+ break;
+ case VK_FORMAT_R16_UNORM:
+ gl_format = GL_RED;
+ gl_type = GL_UNSIGNED_SHORT;
+ bytes_per_pixel = 2;
+ break;
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+ gl_format = GL_RGBA;
+ gl_type = GL_UNSIGNED_INT_2_10_10_10_REV;
+ bytes_per_pixel = 4;
+ break;
+ default:
+ NOTREACHED() << "Not supported resource format=" << format();
+ return;
+ }
+
+ // Make sure GrContext is not using GL. So we don't need reset GrContext
+ DCHECK(!context_state_->GrContextIsGL());
+
+ // Make sure a gl context is current, since textures are shared between all gl
+ // contexts, we don't care which gl context is current.
+ if (!gl::g_current_gl_context &&
+ !context_state_->MakeCurrent(nullptr, true /* needs_gl */))
+ return;
+
+ gl::GLApi* api = gl::g_current_gl_context;
+ GLuint framebuffer;
+ GLint old_framebuffer;
+ api->glGetIntegervFn(GL_READ_FRAMEBUFFER_BINDING, &old_framebuffer);
+ api->glGenFramebuffersEXTFn(1, &framebuffer);
+ api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, framebuffer);
+ api->glFramebufferTexture2DEXTFn(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, texture_->service_id(), 0);
+ GLenum status = api->glCheckFramebufferStatusEXTFn(GL_READ_FRAMEBUFFER);
+ DCHECK_EQ(status, static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE))
+ << "CheckFramebufferStatusEXT() failed.";
+
+ base::CheckedNumeric<size_t> checked_size = bytes_per_pixel;
+ checked_size *= size().width();
+ checked_size *= size().height();
+ DCHECK(checked_size.IsValid());
+
+ ScopedPixelStore pack_row_length(api, GL_PACK_ROW_LENGTH, 0);
+ ScopedPixelStore pack_skip_pixels(api, GL_PACK_SKIP_PIXELS, 0);
+ ScopedPixelStore pack_skip_rows(api, GL_PACK_SKIP_ROWS, 0);
+ ScopedPixelStore pack_aligment(api, GL_PACK_ALIGNMENT, 1);
+
+ WritePixels(checked_size.ValueOrDie(), 0,
+ base::BindOnce(
+ [](gl::GLApi* api, const gfx::Size& size, GLenum format,
+ GLenum type, void* buffer) {
+ api->glReadPixelsFn(0, 0, size.width(), size.height(),
+ format, type, buffer);
+ },
+ api, size(), gl_format, gl_type));
+ api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, old_framebuffer);
+ api->glDeleteFramebuffersEXTFn(1, &framebuffer);
+}
+
bool ExternalVkImageBacking::BeginAccessInternal(
bool readonly,
std::vector<SemaphoreHandle>* semaphore_handles) {
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
index e008b9e04b7..54d73b4ea16 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
@@ -48,11 +48,8 @@ class ExternalVkImageBacking : public SharedImageBacking {
~ExternalVkImageBacking() override;
- VkImage image() const { return image_; }
- VkDeviceMemory memory() const { return memory_; }
- size_t memory_size() const { return memory_size_; }
- VkFormat vk_format() const { return vk_format_; }
SharedContextState* context_state() const { return context_state_; }
+ const GrBackendTexture& backend_texture() const { return backend_texture_; }
VulkanImplementation* vulkan_implementation() const {
return context_state()->vk_context_provider()->GetVulkanImplementation();
}
@@ -63,28 +60,35 @@ class ExternalVkImageBacking : public SharedImageBacking {
->GetVulkanDevice();
}
bool need_sychronization() const {
+ if (use_separate_gl_texture())
+ return false;
return usage() & SHARED_IMAGE_USAGE_GLES2;
}
+ bool use_separate_gl_texture() const {
+ return !context_state()->support_vulkan_external_object();
+ }
// Notifies the backing that an access will start. Return false if there is
// currently any other conflict access in progress. Otherwise, returns true
// and semaphore handles which will be waited on before accessing.
bool BeginAccess(bool readonly,
- std::vector<SemaphoreHandle>* semaphore_handles);
+ std::vector<SemaphoreHandle>* semaphore_handles,
+ bool is_gl);
// Notifies the backing that an access has ended. The representation must
// provide a semaphore handle that has been signaled at the end of the write
// access.
- void EndAccess(bool readonly, SemaphoreHandle semaphore_handle);
+ void EndAccess(bool readonly, SemaphoreHandle semaphore_handle, bool is_gl);
// SharedImageBacking implementation.
bool IsCleared() const override;
void SetCleared() override;
- void Update() override;
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
void Destroy() override;
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
protected:
+ void UpdateContent(uint32_t content_flags);
bool BeginAccessInternal(bool readonly,
std::vector<SemaphoreHandle>* semaphore_handles);
void EndAccessInternal(bool readonly, SemaphoreHandle semaphore_handle);
@@ -120,28 +124,36 @@ class ExternalVkImageBacking : public SharedImageBacking {
size_t stride,
size_t memory_offset);
- bool WritePixels(const base::span<const uint8_t>& pixel_data, size_t stride);
+ using FillBufferCallback = base::OnceCallback<void(void* buffer)>;
+ bool WritePixels(size_t data_size,
+ size_t stride,
+ FillBufferCallback callback);
+ void CopyPixelsFromGLTexture();
SharedContextState* const context_state_;
- VkImage image_ = VK_NULL_HANDLE;
- VkDeviceMemory memory_ = VK_NULL_HANDLE;
+ GrBackendTexture backend_texture_;
+ VulkanCommandPool* const command_pool_;
+
SemaphoreHandle write_semaphore_handle_;
std::vector<SemaphoreHandle> read_semaphore_handles_;
- const size_t memory_size_;
bool is_cleared_ = false;
- const VkFormat vk_format_;
- VulkanCommandPool* const command_pool_;
bool is_write_in_progress_ = false;
uint32_t reads_in_progress_ = 0;
gles2::Texture* texture_ = nullptr;
// GMB related stuff.
- bool shared_memory_is_updated_ = false;
base::WritableSharedMemoryMapping shared_memory_mapping_;
size_t stride_ = 0;
size_t memory_offset_ = 0;
+ enum LatestContent {
+ kInVkImage = 1 << 0,
+ kInSharedMemory = 1 << 1,
+ kInGLTexture = 1 << 2,
+ };
+ uint32_t latest_content_ = 0;
+
DISALLOW_COPY_AND_ASSIGN(ExternalVkImageBacking);
};
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
index e46de306e74..cb889f309ff 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
@@ -12,11 +12,52 @@
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_implementation.h"
+#define GL_LAYOUT_GENERAL_EXT 0x958D
#define GL_LAYOUT_COLOR_ATTACHMENT_EXT 0x958E
+#define GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT 0x958F
+#define GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT 0x9590
+#define GL_LAYOUT_SHADER_READ_ONLY_EXT 0x9591
+#define GL_LAYOUT_TRANSFER_SRC_EXT 0x9592
+#define GL_LAYOUT_TRANSFER_DST_EXT 0x9593
+#define GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT 0x9530
+#define GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT 0x9531
+
#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586
namespace gpu {
+namespace {
+
+GLenum ToGLImageLayout(VkImageLayout layout) {
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ return GL_NONE;
+ case VK_IMAGE_LAYOUT_GENERAL:
+ return GL_LAYOUT_GENERAL_EXT;
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return GL_LAYOUT_COLOR_ATTACHMENT_EXT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+ return GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT;
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ return GL_LAYOUT_SHADER_READ_ONLY_EXT;
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return GL_LAYOUT_TRANSFER_SRC_EXT;
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return GL_LAYOUT_TRANSFER_DST_EXT;
+ case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR:
+ return GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT;
+ case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR:
+ return GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT;
+ default:
+ NOTREACHED() << "Invalid image layout " << layout;
+ return GL_NONE;
+ }
+}
+
+} // namespace
+
ExternalVkImageGlRepresentation::ExternalVkImageGlRepresentation(
SharedImageManager* manager,
SharedImageBacking* backing,
@@ -48,13 +89,16 @@ bool ExternalVkImageGlRepresentation::BeginAccess(GLenum mode) {
std::vector<SemaphoreHandle> handles;
- if (!backing_impl()->BeginAccess(readonly, &handles))
+ if (!backing_impl()->BeginAccess(readonly, &handles, true /* is_gl */))
return false;
for (auto& handle : handles) {
GLuint gl_semaphore = ImportVkSemaphoreIntoGL(std::move(handle));
if (gl_semaphore) {
- GLenum src_layout = GL_LAYOUT_COLOR_ATTACHMENT_EXT;
+ GrVkImageInfo info;
+ auto result = backing_impl()->backend_texture().getVkImageInfo(&info);
+ DCHECK(result);
+ GLenum src_layout = ToGLImageLayout(info.fImageLayout);
api()->glWaitSemaphoreEXTFn(gl_semaphore, 0, nullptr, 1,
&texture_service_id_, &src_layout);
api()->glDeleteSemaphoresEXTFn(1, &gl_semaphore);
@@ -80,45 +124,56 @@ void ExternalVkImageGlRepresentation::EndAccess() {
(current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
current_access_mode_ = 0;
- VkSemaphore semaphore =
- vk_implementation()->CreateExternalSemaphore(backing_impl()->device());
- if (semaphore == VK_NULL_HANDLE) {
- // TODO(crbug.com/933452): We should be able to handle this failure more
- // gracefully rather than shutting down the whole process.
- LOG(FATAL) << "Unable to create a VkSemaphore in "
- << "ExternalVkImageGlRepresentation for synchronization with "
- << "Vulkan";
- return;
- }
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ SemaphoreHandle semaphore_handle;
+ GLuint gl_semaphore = 0;
+ if (backing_impl()->need_sychronization()) {
+ semaphore =
+ vk_implementation()->CreateExternalSemaphore(backing_impl()->device());
+ if (semaphore == VK_NULL_HANDLE) {
+ // TODO(crbug.com/933452): We should be able to handle this failure more
+ // gracefully rather than shutting down the whole process.
+ LOG(FATAL) << "Unable to create a VkSemaphore in "
+ << "ExternalVkImageGlRepresentation for synchronization with "
+ << "Vulkan";
+ return;
+ }
- SemaphoreHandle semaphore_handle =
- vk_implementation()->GetSemaphoreHandle(vk_device(), semaphore);
- vkDestroySemaphore(backing_impl()->device(), semaphore, nullptr);
- if (!semaphore_handle.is_valid()) {
- LOG(FATAL) << "Unable to export VkSemaphore into GL in "
- << "ExternalVkImageGlRepresentation for synchronization with "
- << "Vulkan";
- return;
- }
+ semaphore_handle =
+ vk_implementation()->GetSemaphoreHandle(vk_device(), semaphore);
+ vkDestroySemaphore(backing_impl()->device(), semaphore, nullptr);
+ if (!semaphore_handle.is_valid()) {
+ LOG(FATAL) << "Unable to export VkSemaphore into GL in "
+ << "ExternalVkImageGlRepresentation for synchronization with "
+ << "Vulkan";
+ return;
+ }
- SemaphoreHandle dup_semaphore_handle = semaphore_handle.Duplicate();
- GLuint gl_semaphore =
- ImportVkSemaphoreIntoGL(std::move(dup_semaphore_handle));
+ SemaphoreHandle dup_semaphore_handle = semaphore_handle.Duplicate();
+ gl_semaphore = ImportVkSemaphoreIntoGL(std::move(dup_semaphore_handle));
- if (!gl_semaphore) {
- // TODO(crbug.com/933452): We should be able to semaphore_handle this
- // failure more gracefully rather than shutting down the whole process.
- LOG(FATAL) << "Unable to export VkSemaphore into GL in "
- << "ExternalVkImageGlRepresentation for synchronization with "
- << "Vulkan";
- return;
+ if (!gl_semaphore) {
+ // TODO(crbug.com/933452): We should be able to semaphore_handle this
+ // failure more gracefully rather than shutting down the whole process.
+ LOG(FATAL) << "Unable to export VkSemaphore into GL in "
+ << "ExternalVkImageGlRepresentation for synchronization with "
+ << "Vulkan";
+ return;
+ }
+ }
+
+ GrVkImageInfo info;
+ auto result = backing_impl()->backend_texture().getVkImageInfo(&info);
+ DCHECK(result);
+ GLenum dst_layout = ToGLImageLayout(info.fImageLayout);
+ if (backing_impl()->need_sychronization()) {
+ api()->glSignalSemaphoreEXTFn(gl_semaphore, 0, nullptr, 1,
+ &texture_service_id_, &dst_layout);
+ api()->glDeleteSemaphoresEXTFn(1, &gl_semaphore);
}
- GLenum dst_layout = GL_LAYOUT_COLOR_ATTACHMENT_EXT;
- api()->glSignalSemaphoreEXTFn(gl_semaphore, 0, nullptr, 1,
- &texture_service_id_, &dst_layout);
- api()->glDeleteSemaphoresEXTFn(1, &gl_semaphore);
- backing_impl()->EndAccess(readonly, std::move(semaphore_handle));
+ backing_impl()->EndAccess(readonly, std::move(semaphore_handle),
+ true /* is_gl */);
}
GLuint ExternalVkImageGlRepresentation::ImportVkSemaphoreIntoGL(
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
index 935ae6ef46d..d4faa916394 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
@@ -93,7 +93,7 @@ sk_sp<SkPromiseImageTexture> ExternalVkImageSkiaRepresentation::BeginAccess(
DCHECK(end_access_semaphore_ == VK_NULL_HANDLE);
std::vector<SemaphoreHandle> handles;
- if (!backing_impl()->BeginAccess(readonly, &handles))
+ if (!backing_impl()->BeginAccess(readonly, &handles, false /* is_gl */))
return nullptr;
for (auto& handle : handles) {
@@ -115,16 +115,7 @@ sk_sp<SkPromiseImageTexture> ExternalVkImageSkiaRepresentation::BeginAccess(
end_semaphores->back().initVulkan(end_access_semaphore_);
}
- // Create backend texture from the VkImage.
- GrVkAlloc alloc(backing_impl()->memory(), 0 /* offset */,
- backing_impl()->memory_size(), 0 /* flags */);
- GrVkImageInfo vk_image_info(backing_impl()->image(), alloc,
- VK_IMAGE_TILING_OPTIMAL,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
- backing_impl()->vk_format(), 1 /* levelCount */);
-
- return SkPromiseImageTexture::Make(
- GrBackendTexture(size().width(), size().height(), vk_image_info));
+ return SkPromiseImageTexture::Make(backing_impl()->backend_texture());
}
void ExternalVkImageSkiaRepresentation::EndAccess(bool readonly) {
@@ -146,7 +137,7 @@ void ExternalVkImageSkiaRepresentation::EndAccess(bool readonly) {
DCHECK(end_access_semaphore_ == VK_NULL_HANDLE);
}
- backing_impl()->EndAccess(readonly, std::move(handle));
+ backing_impl()->EndAccess(readonly, std::move(handle), false /* is_gl */);
}
} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index 817d686d984..7ca5cc52a5f 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -223,12 +223,18 @@ FeatureInfo::FeatureInfo(
gpu::kGpuFeatureStatusEnabled;
#if defined(OS_CHROMEOS)
- feature_flags_.chromium_image_ycbcr_420v = base::ContainsValue(
+ feature_flags_.chromium_image_ycbcr_420v = base::Contains(
gpu_feature_info.supported_buffer_formats_for_allocation_and_texturing,
gfx::BufferFormat::YUV_420_BIPLANAR);
#elif defined(OS_MACOSX)
feature_flags_.chromium_image_ycbcr_420v = true;
#endif
+
+#if defined(OS_CHROMEOS)
+ feature_flags_.chromium_image_ycbcr_p010 = base::Contains(
+ gpu_feature_info.supported_buffer_formats_for_allocation_and_texturing,
+ gfx::BufferFormat::P010);
+#endif
}
void FeatureInfo::InitializeBasicState(const base::CommandLine* command_line) {
@@ -1029,11 +1035,9 @@ void FeatureInfo::InitializeFeatures() {
validators_.g_l_state.AddValue(GL_TEXTURE_BINDING_EXTERNAL_OES);
}
- // TODO(kainino): If we add a way to query whether ANGLE is exposing
- // native support for ETC1 textures, require that here. Otherwise, we could
- // co-opt the native-ETC2-support query discussed below.
- if (gfx::HasExtension(extensions, "GL_OES_compressed_ETC1_RGB8_texture") &&
- !gl_version_info_->is_angle) {
+ // ANGLE only exposes this extension when it has native support of the
+ // GL_ETC1_RGB8 format.
+ if (gfx::HasExtension(extensions, "GL_OES_compressed_ETC1_RGB8_texture")) {
AddExtensionString("GL_OES_compressed_ETC1_RGB8_texture");
feature_flags_.oes_compressed_etc1_rgb8_texture = true;
validators_.compressed_texture_format.AddValue(GL_ETC1_RGB8_OES);
@@ -1143,6 +1147,11 @@ void FeatureInfo::InitializeFeatures() {
gfx::BufferFormat::RGBX_1010102);
}
+ if (feature_flags_.chromium_image_ycbcr_p010) {
+ AddExtensionString("GL_CHROMIUM_ycbcr_p010_image");
+ feature_flags_.gpu_memory_buffer_formats.Add(gfx::BufferFormat::P010);
+ }
+
// TODO(gman): Add support for these extensions.
// GL_OES_depth32
@@ -1492,12 +1501,6 @@ void FeatureInfo::InitializeFeatures() {
gfx::HasExtension(extensions, "GL_ANGLE_request_extension");
feature_flags_.ext_debug_marker =
gfx::HasExtension(extensions, "GL_EXT_debug_marker");
- feature_flags_.arb_robustness =
- gfx::HasExtension(extensions, "GL_ARB_robustness");
- feature_flags_.khr_robustness =
- gfx::HasExtension(extensions, "GL_KHR_robustness");
- feature_flags_.ext_robustness =
- gfx::HasExtension(extensions, "GL_EXT_robustness");
feature_flags_.ext_pixel_buffer_object =
gfx::HasExtension(extensions, "GL_ARB_pixel_buffer_object") ||
gfx::HasExtension(extensions, "GL_NV_pixel_buffer_object");
@@ -1680,8 +1683,9 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
}
}
- // Assume all desktop (!gl_version_info_->is_es) supports float blend
- if (!gl_version_info_->is_es ||
+ // Assume all desktop (!gl_version_info_->is_es) supports float blend.
+ // Floating-point format blending is core of ES 3.2.
+ if (!gl_version_info_->is_es || gl_version_info_->IsAtLeastGLES(3, 2) ||
gfx::HasExtension(extensions, "GL_EXT_float_blend")) {
if (!disallowed_features_.ext_float_blend) {
EnableEXTFloatBlend();
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index 80c36fe5384..7c1d2d29f98 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -102,6 +102,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool chromium_image_ycbcr_422 = false;
bool chromium_image_xr30 = false;
bool chromium_image_xb30 = false;
+ bool chromium_image_ycbcr_p010 = false;
bool emulate_primitive_restart_fixed_index = false;
bool ext_render_buffer_format_bgra8888 = false;
bool ext_multisample_compatibility = false;
@@ -123,9 +124,6 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool angle_client_arrays = false;
bool angle_request_extension = false;
bool ext_debug_marker = false;
- bool arb_robustness = false;
- bool khr_robustness = false;
- bool ext_robustness = false;
bool ext_pixel_buffer_object = false;
bool ext_unpack_subimage = false;
bool oes_rgb8_rgba8 = false;
@@ -227,6 +225,10 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
return oes_texture_half_float_linear_available_;
}
+ bool is_passthrough_cmd_decoder() const {
+ return is_passthrough_cmd_decoder_;
+ }
+
private:
friend class base::RefCounted<FeatureInfo>;
friend class BufferManagerClientSideArraysTest;
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager.cc b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
index 30a62773a3f..96dea3656f3 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
@@ -709,6 +709,8 @@ GLenum Framebuffer::IsPossiblyComplete(const FeatureInfo* feature_info) const {
GLsizei width = -1;
GLsizei height = -1;
GLsizei samples = -1;
+ uint32_t colorbufferSize = 0;
+ bool colorbufferSizeValid = false;
const bool kSamplesMustMatch = feature_info->IsWebGLContext() ||
!feature_info->feature_flags().chromium_framebuffer_mixed_samples;
@@ -751,10 +753,26 @@ GLenum Framebuffer::IsPossiblyComplete(const FeatureInfo* feature_info) const {
return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
}
- // Attaching an image to more than one color attachment point should return
- // FRAMEBUFFER_UNSUPPORTED.
if (it->first >= GL_COLOR_ATTACHMENT0 &&
it->first < GL_COLOR_ATTACHMENT0 + manager_->max_color_attachments_) {
+ // in GLES 2.0, all color attachments attachments must have the same
+ // number of bitplanes.
+ // in GLES 3.0, there is no such restriction.
+ if (feature_info->context_type() == CONTEXT_TYPE_WEBGL1) {
+ if (colorbufferSizeValid) {
+ if (colorbufferSize !=
+ GLES2Util::GetGLTypeSizeForTextures(attachment->texture_type())) {
+ return GL_FRAMEBUFFER_UNSUPPORTED;
+ }
+ } else {
+ colorbufferSize =
+ GLES2Util::GetGLTypeSizeForTextures(attachment->texture_type());
+ colorbufferSizeValid = true;
+ }
+ }
+
+ // Attaching an image to more than one color attachment point should
+ // return FRAMEBUFFER_UNSUPPORTED.
for (GLenum i = it->first + 1;
i < GL_COLOR_ATTACHMENT0 + manager_->max_color_attachments_; i++) {
const Attachment* other = GetAttachment(i);
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual.cc b/chromium/gpu/command_buffer/service/gl_context_virtual.cc
index 8eea6084343..81703702c64 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual.cc
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual.cc
@@ -97,8 +97,11 @@ void GLContextVirtual::SetSafeToForceGpuSwitch() {
return shared_context_->SetSafeToForceGpuSwitch();
}
-bool GLContextVirtual::WasAllocatedUsingRobustnessExtension() {
- return shared_context_->WasAllocatedUsingRobustnessExtension();
+unsigned int GLContextVirtual::CheckStickyGraphicsResetStatus() {
+ // Don't pretend we know which one of the virtual contexts was responsible.
+ unsigned int reset_status = shared_context_->CheckStickyGraphicsResetStatus();
+ return reset_status == GL_NO_ERROR ? GL_NO_ERROR
+ : GL_UNKNOWN_CONTEXT_RESET_ARB;
}
void GLContextVirtual::SetUnbindFboOnMakeCurrent() {
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual.h b/chromium/gpu/command_buffer/service/gl_context_virtual.h
index dc3d4d54de1..3e6f297a0ab 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual.h
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual.h
@@ -42,7 +42,7 @@ class GPU_GLES2_EXPORT GLContextVirtual : public gl::GLContext {
std::string GetGLRenderer() override;
const gfx::ExtensionSet& GetExtensions() override;
void SetSafeToForceGpuSwitch() override;
- bool WasAllocatedUsingRobustnessExtension() override;
+ unsigned int CheckStickyGraphicsResetStatus() override;
void SetUnbindFboOnMakeCurrent() override;
gl::YUVToRGBConverter* GetYUVToRGBConverter(
const gfx::ColorSpace& color_space) override;
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc b/chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc
index b9c6bcd4b58..67975fb4078 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual_unittest.cc
@@ -47,25 +47,57 @@ TEST_F(GLContextVirtualTest, Reinitialize) {
.Times(AnyNumber())
.WillRepeatedly(Return(reinterpret_cast<unsigned const char *>("")));
{
- scoped_refptr<gl::GLContextStub> base_context = new gl::GLContextStub;
+ auto base_context = base::MakeRefCounted<gl::GLContextStub>();
gl::GLShareGroup* share_group = base_context->share_group();
share_group->SetSharedContext(GetGLSurface(), base_context.get());
- scoped_refptr<GLContextVirtual> context(new GLContextVirtual(
- share_group, base_context.get(), decoder_->AsWeakPtr()));
+ auto context = base::MakeRefCounted<GLContextVirtual>(
+ share_group, base_context.get(), decoder_->AsWeakPtr());
EXPECT_TRUE(context->Initialize(GetGLSurface(), gl::GLContextAttribs()));
EXPECT_TRUE(context->MakeCurrent(GetGLSurface()));
}
{
- scoped_refptr<gl::GLContextStub> base_context = new gl::GLContextStub;
+ auto base_context = base::MakeRefCounted<gl::GLContextStub>();
gl::GLShareGroup* share_group = base_context->share_group();
share_group->SetSharedContext(GetGLSurface(), base_context.get());
- scoped_refptr<GLContextVirtual> context(new GLContextVirtual(
- share_group, base_context.get(), decoder_->AsWeakPtr()));
+ auto context = base::MakeRefCounted<GLContextVirtual>(
+ share_group, base_context.get(), decoder_->AsWeakPtr());
EXPECT_TRUE(context->Initialize(GetGLSurface(), gl::GLContextAttribs()));
EXPECT_TRUE(context->MakeCurrent(GetGLSurface()));
}
}
+// Tests that CheckStickyGraphicsResetStatus gets the state from the real
+// context, but "virtualizes" the guilty party (i.e. makes it unknown).
+TEST_F(GLContextVirtualTest, CheckStickyGraphicsResetStatus) {
+ EXPECT_CALL(*gl_, GetError())
+ .Times(AnyNumber())
+ .WillRepeatedly(Return(GL_NO_ERROR));
+ auto base_context = base::MakeRefCounted<gl::GLContextStub>();
+ const char gl_extensions[] = "GL_KHR_robustness";
+ base_context->SetExtensionsString(gl_extensions);
+
+ gl::GLShareGroup* share_group = base_context->share_group();
+ share_group->SetSharedContext(GetGLSurface(), base_context.get());
+ auto context = base::MakeRefCounted<GLContextVirtual>(
+ share_group, base_context.get(), decoder_->AsWeakPtr());
+ EXPECT_TRUE(context->Initialize(GetGLSurface(), gl::GLContextAttribs()));
+ EXPECT_TRUE(context->MakeCurrent(GetGLSurface()));
+
+ // If no reset, GLContextVirtual should report no error.
+ EXPECT_CALL(*gl_, GetGraphicsResetStatusARB()).WillOnce(Return(GL_NO_ERROR));
+ EXPECT_EQ(unsigned{GL_NO_ERROR}, context->CheckStickyGraphicsResetStatus());
+
+ // If reset, GLContextVirtual should report an error, but with an unknown
+ // guilty context.
+ EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
+ .WillOnce(Return(GL_GUILTY_CONTEXT_RESET_ARB));
+ EXPECT_EQ(unsigned{GL_UNKNOWN_CONTEXT_RESET_ARB},
+ context->CheckStickyGraphicsResetStatus());
+ // The underlying real context still knows, though.
+ EXPECT_EQ(unsigned{GL_GUILTY_CONTEXT_RESET_ARB},
+ base_context->CheckStickyGraphicsResetStatus());
+}
+
} // anonymous namespace
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gl_utils.cc b/chromium/gpu/command_buffer/service/gl_utils.cc
index ee8cd5d2303..87ceecf75a1 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.cc
+++ b/chromium/gpu/command_buffer/service/gl_utils.cc
@@ -1025,6 +1025,7 @@ bool ValidateCopyTextureCHROMIUMInternalFormats(const FeatureInfo* feature_info,
source_internal_format == GL_BGRA8_EXT ||
source_internal_format == GL_RGB_YCBCR_420V_CHROMIUM ||
source_internal_format == GL_RGB_YCBCR_422_CHROMIUM ||
+ source_internal_format == GL_RGB_YCBCR_P010_CHROMIUM ||
source_internal_format == GL_R16_EXT ||
source_internal_format == GL_RGB10_A2;
if (!valid_source_format) {
@@ -1041,5 +1042,77 @@ bool ValidateCopyTextureCHROMIUMInternalFormats(const FeatureInfo* feature_info,
return true;
}
+GLenum GetTextureBindingQuery(GLenum texture_type) {
+ switch (texture_type) {
+ case GL_TEXTURE_2D:
+ return GL_TEXTURE_BINDING_2D;
+ case GL_TEXTURE_2D_ARRAY:
+ return GL_TEXTURE_BINDING_2D_ARRAY;
+ case GL_TEXTURE_2D_MULTISAMPLE:
+ return GL_TEXTURE_BINDING_2D_MULTISAMPLE;
+ case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
+ return GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY;
+ case GL_TEXTURE_3D:
+ return GL_TEXTURE_BINDING_3D;
+ case GL_TEXTURE_EXTERNAL_OES:
+ return GL_TEXTURE_BINDING_EXTERNAL_OES;
+ case GL_TEXTURE_RECTANGLE:
+ return GL_TEXTURE_BINDING_RECTANGLE;
+ case GL_TEXTURE_CUBE_MAP:
+ return GL_TEXTURE_BINDING_CUBE_MAP;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+gfx::OverlayTransform GetGFXOverlayTransform(GLenum plane_transform) {
+ switch (plane_transform) {
+ case GL_OVERLAY_TRANSFORM_NONE_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_NONE;
+ case GL_OVERLAY_TRANSFORM_FLIP_HORIZONTAL_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_FLIP_HORIZONTAL;
+ case GL_OVERLAY_TRANSFORM_FLIP_VERTICAL_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_FLIP_VERTICAL;
+ case GL_OVERLAY_TRANSFORM_ROTATE_90_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_ROTATE_90;
+ case GL_OVERLAY_TRANSFORM_ROTATE_180_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_ROTATE_180;
+ case GL_OVERLAY_TRANSFORM_ROTATE_270_CHROMIUM:
+ return gfx::OVERLAY_TRANSFORM_ROTATE_270;
+ default:
+ return gfx::OVERLAY_TRANSFORM_INVALID;
+ }
+}
+
+bool GetGFXBufferFormat(GLenum internal_format, gfx::BufferFormat* out_format) {
+ switch (internal_format) {
+ case GL_RGBA8_OES:
+ *out_format = gfx::BufferFormat::RGBA_8888;
+ return true;
+ case GL_BGRA8_EXT:
+ *out_format = gfx::BufferFormat::BGRA_8888;
+ return true;
+ case GL_RGBA16F_EXT:
+ *out_format = gfx::BufferFormat::RGBA_F16;
+ return true;
+ case GL_R8_EXT:
+ *out_format = gfx::BufferFormat::R_8;
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool GetGFXBufferUsage(GLenum buffer_usage, gfx::BufferUsage* out_usage) {
+ switch (buffer_usage) {
+ case GL_SCANOUT_CHROMIUM:
+ *out_usage = gfx::BufferUsage::SCANOUT;
+ return true;
+ default:
+ return false;
+ }
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gl_utils.h b/chromium/gpu/command_buffer/service/gl_utils.h
index 28e38fca0e7..9e76d767954 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.h
+++ b/chromium/gpu/command_buffer/service/gl_utils.h
@@ -13,7 +13,10 @@
#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
+#include "ui/gfx/buffer_types.h"
#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/overlay_transform.h"
+#include "ui/gfx/rrect_f.h"
#include "ui/gfx/transform.h"
#include "ui/gl/gl_bindings.h"
@@ -56,6 +59,7 @@ struct CALayerSharedState {
float opacity;
bool is_clipped;
gfx::Rect clip_rect;
+ gfx::RRectF rounded_corner_bounds;
int sorting_context_id;
gfx::Transform transform;
};
@@ -149,6 +153,14 @@ bool ValidateCopyTextureCHROMIUMInternalFormats(const FeatureInfo* feature_info,
GLenum source_internal_format,
GLenum dest_internal_format,
std::string* output_error_msg);
+
+GLenum GetTextureBindingQuery(GLenum texture_type);
+
+gfx::OverlayTransform GetGFXOverlayTransform(GLenum plane_transform);
+
+bool GetGFXBufferFormat(GLenum internal_format, gfx::BufferFormat* out_format);
+bool GetGFXBufferUsage(GLenum buffer_usage, gfx::BufferUsage* out_usage);
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
index d5492209fea..655f84e0f03 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
@@ -235,7 +235,7 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::
TextureRef* texture =
texture_manager->GetTexture(attachment->object_name());
const bool rgba_immutable =
- texture->texture()->IsImmutable() &&
+ texture->texture()->HasImmutableStorage() &&
TextureManager::ExtractFormatFromStorageFormat(internal_format) ==
GL_RGBA;
const bool do_copy = !rgba_immutable;
@@ -477,9 +477,6 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::ApplyCMAAEffectTexture(
}
glBindImageTextureEXT(1, dest_texture, 0, GL_FALSE, 0, GL_WRITE_ONLY,
GL_RGBA8);
- // TODO(samans): Investigate why there is an error. https://crbug.com/938597
- GLenum error = glGetError();
- DLOG_IF(ERROR, error != GL_NO_ERROR) << "GL ERROR: " << error;
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, working_color_texture_);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
index a58de9551ac..6259c99d424 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -44,6 +44,7 @@ enum {
S_FORMAT_RGB_YCBCR_422_CHROMIUM,
S_FORMAT_COMPRESSED,
S_FORMAT_RGB10_A2,
+ S_FORMAT_RGB_YCBCR_P010_CHROMIUM,
NUM_S_FORMAT
};
@@ -189,6 +190,9 @@ ShaderId GetFragmentShaderId(bool premultiply_alpha,
case GL_RGB10_A2:
sourceFormatIndex = S_FORMAT_RGB10_A2;
break;
+ case GL_RGB_YCBCR_P010_CHROMIUM:
+ sourceFormatIndex = S_FORMAT_RGB_YCBCR_P010_CHROMIUM;
+ break;
default:
NOTREACHED() << "Invalid source format "
<< gl::GLEnums::GetStringEnum(source_format);
@@ -301,10 +305,11 @@ ShaderId GetFragmentShaderId(bool premultiply_alpha,
const char* kShaderPrecisionPreamble =
"#ifdef GL_ES\n"
- "precision mediump float;\n"
"#ifdef GL_FRAGMENT_PRECISION_HIGH\n"
+ "precision highp float;\n"
"#define TexCoordPrecision highp\n"
"#else\n"
+ "precision mediump float;\n"
"#define TexCoordPrecision mediump\n"
"#endif\n"
"#else\n"
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 9d5d01c2879..7756f497b81 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -97,7 +97,6 @@
#include "ui/gfx/gpu_fence.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/ipc/color/gfx_param_traits.h"
-#include "ui/gfx/overlay_transform.h"
#include "ui/gfx/transform.h"
#include "ui/gl/ca_renderer_layer_params.h"
#include "ui/gl/dc_renderer_layer_params.h"
@@ -133,25 +132,6 @@ const char kWEBGLMultiDrawExtension[] = "GL_WEBGL_multi_draw";
const char kWEBGLMultiDrawInstancedExtension[] =
"GL_WEBGL_multi_draw_instanced";
-gfx::OverlayTransform GetGFXOverlayTransform(GLenum plane_transform) {
- switch (plane_transform) {
- case GL_OVERLAY_TRANSFORM_NONE_CHROMIUM:
- return gfx::OVERLAY_TRANSFORM_NONE;
- case GL_OVERLAY_TRANSFORM_FLIP_HORIZONTAL_CHROMIUM:
- return gfx::OVERLAY_TRANSFORM_FLIP_HORIZONTAL;
- case GL_OVERLAY_TRANSFORM_FLIP_VERTICAL_CHROMIUM:
- return gfx::OVERLAY_TRANSFORM_FLIP_VERTICAL;
- case GL_OVERLAY_TRANSFORM_ROTATE_90_CHROMIUM:
- return gfx::OVERLAY_TRANSFORM_ROTATE_90;
- case GL_OVERLAY_TRANSFORM_ROTATE_180_CHROMIUM:
- return gfx::OVERLAY_TRANSFORM_ROTATE_180;
- case GL_OVERLAY_TRANSFORM_ROTATE_270_CHROMIUM:
- return gfx::OVERLAY_TRANSFORM_ROTATE_270;
- default:
- return gfx::OVERLAY_TRANSFORM_INVALID;
- }
-}
-
template <typename MANAGER_TYPE, typename OBJECT_TYPE>
GLuint GetClientId(const MANAGER_TYPE* manager, const OBJECT_TYPE* object) {
DCHECK(manager);
@@ -2379,11 +2359,6 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
surface_->DeferDraws();
}
- bool IsRobustnessSupported() {
- return has_robustness_extension_ &&
- context_->WasAllocatedUsingRobustnessExtension();
- }
-
error::Error WillAccessBoundFramebufferForDraw() {
if (ShouldDeferDraws())
return error::kDeferCommandUntilLater;
@@ -2684,7 +2659,6 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Number of commands remaining to be processed in DoCommands().
int commands_to_process_;
- bool has_robustness_extension_;
bool context_was_lost_;
bool reset_by_robustness_extension_;
bool supports_post_sub_buffer_;
@@ -2789,7 +2763,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// future when our context is current.
std::set<scoped_refptr<TextureRef>> texture_refs_pending_destruction_;
- base::WeakPtrFactory<GLES2DecoderImpl> weak_ptr_factory_;
+ base::WeakPtrFactory<GLES2DecoderImpl> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(GLES2DecoderImpl);
};
@@ -3461,7 +3435,6 @@ GLES2DecoderImpl::GLES2DecoderImpl(
validators_(group_->feature_info()->validators()),
feature_info_(group_->feature_info()),
frame_number_(0),
- has_robustness_extension_(false),
context_was_lost_(false),
reset_by_robustness_extension_(false),
supports_post_sub_buffer_(false),
@@ -3491,8 +3464,7 @@ GLES2DecoderImpl::GLES2DecoderImpl(
validation_fbo_multisample_(0),
validation_fbo_(0),
texture_manager_service_id_generation_(0),
- force_shader_name_hashing_for_test(false),
- weak_ptr_factory_(this) {
+ force_shader_name_hashing_for_test(false) {
DCHECK(client);
DCHECK(group);
}
@@ -3537,6 +3509,9 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
if (workarounds().rely_on_implicit_sync_for_swap_buffers)
surface_->SetRelyOnImplicitSync();
+ if (workarounds().force_gl_flush_on_swap_buffers)
+ surface_->SetForceGlFlushOnSwapBuffers();
+
// Create GPU Tracer for timing values.
gpu_tracer_.reset(new GPUTracer(this));
@@ -3913,10 +3888,6 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
api()->glEnableFn(GL_TEXTURE_CUBE_MAP_SEAMLESS);
}
- has_robustness_extension_ = features().arb_robustness ||
- features().khr_robustness ||
- features().ext_robustness;
-
GLint range[2] = {0, 0};
GLint precision = 0;
QueryShaderPrecisionFormat(gl_version_info(), GL_FRAGMENT_SHADER,
@@ -4082,6 +4053,18 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
}
+ if (CheckResetStatus()) {
+ // If the context was lost at any point before or during initialization, the
+ // values queried from the driver could be bogus, and potentially
+ // inconsistent between various ContextStates on the same underlying real GL
+ // context. Make sure to report the failure early, to not allow virtualized
+ // context switches in that case.
+ LOG(ERROR)
+ << " GLES2DecoderImpl: Context reset detected after initialization.";
+ group_->LoseContexts(error::kUnknown);
+ return gpu::ContextResult::kTransientFailure;
+ }
+
return gpu::ContextResult::kSuccess;
}
@@ -4264,6 +4247,8 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
.disable_biplanar_gpu_memory_buffers_for_video_frames;
caps.image_xr30 = feature_info_->feature_flags().chromium_image_xr30;
caps.image_xb30 = feature_info_->feature_flags().chromium_image_xb30;
+ caps.image_ycbcr_p010 =
+ feature_info_->feature_flags().chromium_image_ycbcr_p010;
caps.max_copy_texture_chromium_size =
workarounds().max_copy_texture_chromium_size;
caps.render_buffer_format_bgra8888 =
@@ -9780,9 +9765,12 @@ void GLES2DecoderImpl::DoSetDrawRectangleCHROMIUM(GLint x,
if (!surface_->SetDrawRectangle(rect)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glSetDrawRectangleCHROMIUM",
"failed on surface");
+ // If SetDrawRectangle failed, we may not have a current context any
+ // more, make sure to report lost context.
LOG(ERROR) << "Context lost because SetDrawRectangleCHROMIUM failed.";
MarkContextLost(error::kUnknown);
group_->LoseContexts(error::kUnknown);
+ return;
}
OnFboChanged();
}
@@ -9802,6 +9790,11 @@ void GLES2DecoderImpl::DoSetEnableDCLayersCHROMIUM(GLboolean enable) {
if (!surface_->SetEnableDCLayers(!!enable)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glSetEnableDCLayersCHROMIUM",
"failed on surface");
+ // If SetEnableDCLayers failed, we may not have a current context any
+ // more, make sure to report lost context.
+ LOG(ERROR) << "Context lost because SetEnableDCLayers failed.";
+ MarkContextLost(error::kUnknown);
+ group_->LoseContexts(error::kUnknown);
}
}
@@ -10745,8 +10738,8 @@ bool GLES2DecoderImpl::PrepareTexturesForRender(bool* textures_set,
LOCAL_RENDER_WARNING(
std::string("texture bound to texture unit ") +
base::NumberToString(texture_unit_index) +
- " is not renderable. It maybe non-power-of-2 and have"
- " incompatible texture filtering.");
+ " is not renderable. It might be non-power-of-2 or have"
+ " incompatible texture filtering (maybe)?");
}
continue;
} else if (!texture_ref->texture()->CompatibleWithSamplerUniformType(
@@ -11999,6 +11992,21 @@ void GLES2DecoderImpl::DoGetShaderiv(GLuint shader_id,
return;
}
+ if (pname == GL_COMPILE_STATUS) {
+ if (shader->HasCompiled()) {
+ *params = compile_shader_always_succeeds_ ? true : shader->valid();
+ return;
+ }
+ // Lookup if there is compiled shader cache
+ if (program_manager()->HasCachedCompileStatus(shader)) {
+ // Only successful compile is cached
+ // Fail-to-compile shader is not cached as needs compiling
+ // to get log info
+ *params = true;
+ return;
+ }
+ }
+
// Compile now for statuses that require it.
switch (pname) {
case GL_COMPILE_STATUS:
@@ -12094,34 +12102,6 @@ error::Error GLES2DecoderImpl::HandleGetProgramInfoLog(
return error::kNoError;
}
-error::Error GLES2DecoderImpl::HandleGetProgramResourceiv(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- // Unimplemented for WebGL 2.0 Compute context.
- return error::kUnknownCommand;
-}
-
-error::Error GLES2DecoderImpl::HandleGetProgramResourceIndex(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- // Unimplemented for WebGL 2.0 Compute context.
- return error::kUnknownCommand;
-}
-
-error::Error GLES2DecoderImpl::HandleGetProgramResourceLocation(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- // Unimplemented for WebGL 2.0 Compute context.
- return error::kUnknownCommand;
-}
-
-error::Error GLES2DecoderImpl::HandleGetProgramResourceName(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- // Unimplemented for WebGL 2.0 Compute context.
- return error::kUnknownCommand;
-}
-
error::Error GLES2DecoderImpl::HandleGetShaderInfoLog(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -13484,20 +13464,22 @@ error::Error GLES2DecoderImpl::HandleScheduleCALayerSharedStateCHROMIUM(
const volatile gles2::cmds::ScheduleCALayerSharedStateCHROMIUM*>(
cmd_data);
+ // 4 for |clip_rect|, 5 for |rounded_corner_bounds|, 16 for |transform|.
const GLfloat* mem = GetSharedMemoryAs<const GLfloat*>(c.shm_id, c.shm_offset,
- 20 * sizeof(GLfloat));
+ 25 * sizeof(GLfloat));
if (!mem) {
return error::kOutOfBounds;
}
gfx::RectF clip_rect(mem[0], mem[1], mem[2], mem[3]);
- gfx::Transform transform(mem[4], mem[8], mem[12], mem[16],
- mem[5], mem[9], mem[13], mem[17],
- mem[6], mem[10], mem[14], mem[18],
- mem[7], mem[11], mem[15], mem[19]);
+ gfx::RRectF rounded_corner_bounds(mem[4], mem[5], mem[6], mem[7], mem[8]);
+ gfx::Transform transform(mem[9], mem[13], mem[17], mem[21], mem[10], mem[14],
+ mem[18], mem[22], mem[11], mem[15], mem[19], mem[23],
+ mem[12], mem[16], mem[20], mem[24]);
ca_layer_shared_state_.reset(new CALayerSharedState);
ca_layer_shared_state_->opacity = c.opacity;
ca_layer_shared_state_->is_clipped = c.is_clipped ? true : false;
ca_layer_shared_state_->clip_rect = gfx::ToEnclosingRect(clip_rect);
+ ca_layer_shared_state_->rounded_corner_bounds = rounded_corner_bounds;
ca_layer_shared_state_->sorting_context_id = c.sorting_context_id;
ca_layer_shared_state_->transform = transform;
return error::kNoError;
@@ -13552,6 +13534,7 @@ error::Error GLES2DecoderImpl::HandleScheduleCALayerCHROMIUM(
ui::CARendererLayerParams params = ui::CARendererLayerParams(
ca_layer_shared_state_->is_clipped, ca_layer_shared_state_->clip_rect,
+ ca_layer_shared_state_->rounded_corner_bounds,
ca_layer_shared_state_->sorting_context_id,
ca_layer_shared_state_->transform, image, contents_rect,
gfx::ToEnclosingRect(bounds_rect), c.background_color, c.edge_aa_mask,
@@ -16659,8 +16642,10 @@ void GLES2DecoderImpl::FinishAsyncSwapBuffers(
void GLES2DecoderImpl::FinishSwapBuffers(gfx::SwapResult result) {
if (result == gfx::SwapResult::SWAP_FAILED) {
+ // If SwapBuffers/SwapBuffersWithBounds/PostSubBuffer failed, we may not
+ // have a current context any more.
LOG(ERROR) << "Context lost because SwapBuffers failed.";
- if (!CheckResetStatus()) {
+ if (!context_->IsCurrent(surface_.get()) || !CheckResetStatus()) {
MarkContextLost(error::kUnknown);
group_->LoseContexts(error::kUnknown);
}
@@ -17021,39 +17006,32 @@ bool GLES2DecoderImpl::CheckResetStatus() {
DCHECK(!WasContextLost());
DCHECK(context_->IsCurrent(nullptr));
- if (IsRobustnessSupported()) {
- // If the reason for the call was a GL error, we can try to determine the
- // reset status more accurately.
- GLenum driver_status = api()->glGetGraphicsResetStatusARBFn();
- if (driver_status == GL_NO_ERROR)
- return false;
-
- LOG(ERROR) << (surface_->IsOffscreen() ? "Offscreen" : "Onscreen")
- << " context lost via ARB/EXT_robustness. Reset status = "
- << GLES2Util::GetStringEnum(driver_status);
+ // If the reason for the call was a GL error, we can try to determine the
+ // reset status more accurately.
+ GLenum driver_status = context_->CheckStickyGraphicsResetStatus();
+ if (driver_status == GL_NO_ERROR)
+ return false;
- // Don't pretend we know which client was responsible.
- if (workarounds().use_virtualized_gl_contexts)
- driver_status = GL_UNKNOWN_CONTEXT_RESET_ARB;
+ LOG(ERROR) << (surface_->IsOffscreen() ? "Offscreen" : "Onscreen")
+ << " context lost via ARB/EXT_robustness. Reset status = "
+ << GLES2Util::GetStringEnum(driver_status);
- switch (driver_status) {
- case GL_GUILTY_CONTEXT_RESET_ARB:
- MarkContextLost(error::kGuilty);
- break;
- case GL_INNOCENT_CONTEXT_RESET_ARB:
- MarkContextLost(error::kInnocent);
- break;
- case GL_UNKNOWN_CONTEXT_RESET_ARB:
- MarkContextLost(error::kUnknown);
- break;
- default:
- NOTREACHED();
- return false;
- }
- reset_by_robustness_extension_ = true;
- return true;
+ switch (driver_status) {
+ case GL_GUILTY_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kGuilty);
+ break;
+ case GL_INNOCENT_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kInnocent);
+ break;
+ case GL_UNKNOWN_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kUnknown);
+ break;
+ default:
+ NOTREACHED();
+ return false;
}
- return false;
+ reset_by_robustness_extension_ = true;
+ return true;
}
error::Error GLES2DecoderImpl::HandleDescheduleUntilFinishedCHROMIUM(
@@ -18284,7 +18262,7 @@ void GLES2DecoderImpl::TexStorageImpl(GLenum target,
level_depth = std::max(1, level_depth >> 1);
}
texture->ApplyFormatWorkarounds(feature_info_.get());
- texture->SetImmutable(true);
+ texture->SetImmutable(true, true);
}
if (workarounds().reset_base_mipmap_level_before_texstorage &&
@@ -18367,26 +18345,18 @@ void GLES2DecoderImpl::DoTexStorage2DImageCHROMIUM(GLenum target,
}
gfx::BufferFormat buffer_format;
- switch (internal_format) {
- case GL_RGBA8_OES:
- buffer_format = gfx::BufferFormat::RGBA_8888;
- break;
- case GL_BGRA8_EXT:
- buffer_format = gfx::BufferFormat::BGRA_8888;
- break;
- case GL_RGBA16F_EXT:
- buffer_format = gfx::BufferFormat::RGBA_F16;
- break;
- case GL_R8_EXT:
- buffer_format = gfx::BufferFormat::R_8;
- break;
- default:
- LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "glTexStorage2DImageCHROMIUM",
- "Invalid buffer format");
- return;
+ if (!GetGFXBufferFormat(internal_format, &buffer_format)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "glTexStorage2DImageCHROMIUM",
+ "Invalid buffer format");
+ return;
}
- DCHECK_EQ(buffer_usage, static_cast<GLenum>(GL_SCANOUT_CHROMIUM));
+ gfx::BufferUsage gfx_buffer_usage;
+ if (!GetGFXBufferUsage(buffer_usage, &gfx_buffer_usage)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, "glTexStorage2DImageCHROMIUM",
+ "Invalid buffer usage");
+ return;
+ }
if (!GetContextGroup()->image_factory()) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glTexStorage2DImageCHROMIUM",
@@ -18397,7 +18367,7 @@ void GLES2DecoderImpl::DoTexStorage2DImageCHROMIUM(GLenum target,
bool is_cleared = false;
scoped_refptr<gl::GLImage> image =
GetContextGroup()->image_factory()->CreateAnonymousImage(
- gfx::Size(width, height), buffer_format, gfx::BufferUsage::SCANOUT,
+ gfx::Size(width, height), buffer_format, gfx_buffer_usage,
&is_cleared);
if (!image || !image->BindTexImage(target)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glTexStorage2DImageCHROMIUM",
@@ -18420,7 +18390,7 @@ void GLES2DecoderImpl::DoTexStorage2DImageCHROMIUM(GLenum target,
if (texture->IsAttachedToFramebuffer())
framebuffer_state_.clear_state_dirty = true;
- texture->SetImmutable(true);
+ texture->SetImmutable(true, false);
}
void GLES2DecoderImpl::DoProduceTextureDirectCHROMIUM(
@@ -19438,7 +19408,7 @@ void GLES2DecoderImpl::DoScheduleDCLayerCHROMIUM(GLuint y_texture_id,
GLint clip_height,
GLuint protected_video_type) {
if (protected_video_type >
- static_cast<GLuint>(ui::ProtectedVideoType::kMaxValue)) {
+ static_cast<GLuint>(gfx::ProtectedVideoType::kMaxValue)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glScheduleDCLayerCHROMIUM",
"invalid protected video type");
return;
@@ -19483,7 +19453,7 @@ void GLES2DecoderImpl::DoScheduleDCLayerCHROMIUM(GLuint y_texture_id,
params.is_clipped = is_clipped;
params.clip_rect = gfx::Rect(clip_x, clip_y, clip_width, clip_height);
params.protected_video_type =
- static_cast<ui::ProtectedVideoType>(protected_video_type);
+ static_cast<gfx::ProtectedVideoType>(protected_video_type);
if (!surface_->ScheduleDCLayer(params)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glScheduleDCLayerCHROMIUM",
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index d79dc201c6e..617ee4f1fb9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -4724,12 +4724,42 @@ error::Error GLES2DecoderImpl::HandleDispatchCompute(
return error::kUnknownCommand;
}
+error::Error GLES2DecoderImpl::HandleDispatchComputeIndirect(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
error::Error GLES2DecoderImpl::HandleGetProgramInterfaceiv(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
return error::kUnknownCommand;
}
+error::Error GLES2DecoderImpl::HandleGetProgramResourceIndex(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramResourceName(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramResourceiv(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
+error::Error GLES2DecoderImpl::HandleGetProgramResourceLocation(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ return error::kUnknownCommand;
+}
+
error::Error GLES2DecoderImpl::HandleMemoryBarrierEXT(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc
index 7797ac41650..96c1f595156 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.cc
@@ -13,8 +13,7 @@ MockGLES2Decoder::MockGLES2Decoder(
DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
Outputter* outputter)
- : GLES2Decoder(client, command_buffer_service, outputter),
- weak_ptr_factory_(this) {
+ : GLES2Decoder(client, command_buffer_service, outputter) {
ON_CALL(*this, MakeCurrent())
.WillByDefault(testing::Return(true));
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
index 71423102dd5..be417a0d848 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
@@ -177,7 +177,7 @@ class MockGLES2Decoder : public GLES2Decoder {
void(CopyTexImageResourceManager* copy_texture_resource_manager));
private:
- base::WeakPtrFactory<MockGLES2Decoder> weak_ptr_factory_;
+ base::WeakPtrFactory<MockGLES2Decoder> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(MockGLES2Decoder);
};
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index 15fd3f99101..fe16e764a15 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -11,12 +11,14 @@
#include "base/callback.h"
#include "base/stl_util.h"
#include "base/strings/string_split.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/command_buffer/service/gpu_fence_manager.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "gpu/command_buffer/service/image_factory.h"
#include "gpu/command_buffer/service/multi_draw_manager.h"
#include "gpu/command_buffer/service/passthrough_discardable_manager.h"
#include "gpu/command_buffer/service/program_cache.h"
@@ -569,11 +571,9 @@ GLES2DecoderPassthroughImpl::GLES2DecoderPassthroughImpl(
gpu_trace_level_(2),
gpu_trace_commands_(false),
gpu_debug_commands_(false),
- has_robustness_extension_(false),
context_lost_(false),
reset_by_robustness_extension_(false),
- lose_context_when_out_of_memory_(false),
- weak_ptr_factory_(this) {
+ lose_context_when_out_of_memory_(false) {
DCHECK(client);
DCHECK(group);
}
@@ -737,9 +737,20 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
gl::GetRequestableGLExtensionsFromCurrentContext());
static constexpr const char* kRequiredFunctionalityExtensions[] = {
- "GL_ANGLE_memory_size", "GL_CHROMIUM_bind_uniform_location",
- "GL_CHROMIUM_sync_query", "GL_EXT_debug_marker",
- "GL_KHR_debug", "GL_NV_fence",
+ "GL_ANGLE_memory_size",
+ "GL_ANGLE_native_id",
+ "GL_ANGLE_texture_storage_external",
+ "GL_CHROMIUM_bind_uniform_location",
+ "GL_CHROMIUM_sync_query",
+ "GL_EXT_debug_marker",
+ "GL_KHR_debug",
+ "GL_NV_fence",
+ "GL_OES_EGL_image",
+ "GL_OES_EGL_image_external",
+ "GL_OES_EGL_image_external_essl3",
+#if defined(OS_MACOSX)
+ "GL_ANGLE_texture_rectangle",
+#endif
};
RequestExtensions(api(), requestable_extensions,
kRequiredFunctionalityExtensions,
@@ -780,9 +791,6 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
"GL_NV_pack_subimage",
"GL_OES_compressed_ETC1_RGB8_texture",
"GL_OES_depth32",
- "GL_OES_EGL_image",
- "GL_OES_EGL_image_external",
- "GL_OES_EGL_image_external_essl3",
"GL_OES_packed_depth_stencil",
"GL_OES_rgb8_rgba8",
"GL_OES_vertex_array_object",
@@ -799,9 +807,15 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
// Each context initializes its own feature info because some extensions may
// be enabled dynamically. Don't disallow any features, leave it up to ANGLE
// to dynamically enable extensions.
- feature_info_->Initialize(attrib_helper.context_type,
- true /* is_passthrough_cmd_decoder */,
- DisallowedFeatures());
+ InitializeFeatureInfo(attrib_helper.context_type, DisallowedFeatures(),
+ false);
+
+ // Support for CHROMIUM_texture_storage_image depends on the underlying
+ // ImageFactory's ability to create anonymous images.
+ gpu::ImageFactory* image_factory = group_->image_factory();
+ if (image_factory && image_factory->SupportsCreateAnonymousImage()) {
+ feature_info_->EnableCHROMIUMTextureStorageImage();
+ }
// Check for required extensions
// TODO(geofflang): verify
@@ -888,8 +902,6 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
}
- has_robustness_extension_ = feature_info_->feature_flags().khr_robustness ||
- feature_info_->feature_flags().ext_robustness;
lose_context_when_out_of_memory_ =
attrib_helper.lose_context_when_out_of_memory;
@@ -1346,6 +1358,8 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
.disable_biplanar_gpu_memory_buffers_for_video_frames;
caps.image_xr30 = feature_info_->feature_flags().chromium_image_xr30;
caps.image_xb30 = feature_info_->feature_flags().chromium_image_xb30;
+ caps.image_ycbcr_p010 =
+ feature_info_->feature_flags().chromium_image_ycbcr_p010;
caps.max_copy_texture_chromium_size =
feature_info_->workarounds().max_copy_texture_chromium_size;
caps.render_buffer_format_bgra8888 =
@@ -1358,6 +1372,7 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] ==
kGpuFeatureStatusEnabled;
caps.post_sub_buffer = surface_->SupportsPostSubBuffer();
+ caps.swap_buffers_with_bounds = surface_->SupportsSwapBuffersWithBounds();
caps.surfaceless = !offscreen_ && surface_->IsSurfaceless();
caps.flips_vertically = !offscreen_ && surface_->FlipsVertically();
caps.msaa_is_slow = feature_info_->workarounds().msaa_is_slow;
@@ -1371,6 +1386,8 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
caps.protected_video_swap_chain = surface_->SupportsProtectedVideo();
caps.gpu_vsync = surface_->SupportsGpuVSync();
caps.texture_npot = feature_info_->feature_flags().npot_ok;
+ caps.texture_storage_image =
+ feature_info_->feature_flags().chromium_texture_storage_image;
caps.chromium_gpu_fence = feature_info_->feature_flags().chromium_gpu_fence;
caps.chromium_nonblocking_readback = true;
caps.num_surface_buffers = surface_->GetBufferCount();
@@ -1577,7 +1594,8 @@ GLES2DecoderPassthroughImpl::CreateAbstractTexture(GLenum target,
GLuint service_id = 0;
api()->glGenTexturesFn(1, &service_id);
scoped_refptr<TexturePassthrough> texture(
- new TexturePassthrough(service_id, target));
+ new TexturePassthrough(service_id, target, internal_format, width, height,
+ depth, border, format, type));
// Unretained is safe, because of the destruction cb.
std::unique_ptr<PassthroughAbstractTextureImpl> abstract_texture =
@@ -1627,10 +1645,24 @@ void GLES2DecoderPassthroughImpl::BeginDecoding() {
gpu_tracer_->BeginDecoding();
gpu_trace_commands_ = gpu_tracer_->IsTracing() && *gpu_decoder_category_;
gpu_debug_commands_ = log_commands() || debug() || gpu_trace_commands_;
+
+ auto it = active_queries_.find(GL_COMMANDS_ISSUED_CHROMIUM);
+ if (it != active_queries_.end()) {
+ DCHECK_EQ(it->second.command_processing_start_time, base::TimeTicks());
+ it->second.command_processing_start_time = base::TimeTicks::Now();
+ }
}
void GLES2DecoderPassthroughImpl::EndDecoding() {
gpu_tracer_->EndDecoding();
+
+ auto it = active_queries_.find(GL_COMMANDS_ISSUED_CHROMIUM);
+ if (it != active_queries_.end()) {
+ DCHECK_NE(it->second.command_processing_start_time, base::TimeTicks());
+ it->second.active_time +=
+ (base::TimeTicks::Now() - it->second.command_processing_start_time);
+ it->second.command_processing_start_time = base::TimeTicks();
+ }
}
const gpu::gles2::ContextState* GLES2DecoderPassthroughImpl::GetContextState() {
@@ -1749,6 +1781,19 @@ void GLES2DecoderPassthroughImpl::SetOptionalExtensionsRequestedForTesting(
request_optional_extensions_ = request_extensions;
}
+void GLES2DecoderPassthroughImpl::InitializeFeatureInfo(
+ ContextType context_type,
+ const DisallowedFeatures& disallowed_features,
+ bool force_reinitialize) {
+ feature_info_->Initialize(context_type, true /* is_passthrough_cmd_decoder */,
+ disallowed_features, force_reinitialize);
+
+ gpu::ImageFactory* image_factory = group_->image_factory();
+ if (image_factory && image_factory->SupportsCreateAnonymousImage()) {
+ feature_info_->EnableCHROMIUMTextureStorageImage();
+ }
+}
+
void* GLES2DecoderPassthroughImpl::GetScratchMemory(size_t size) {
if (scratch_memory_.size() < size) {
scratch_memory_.resize(size, 0);
@@ -1789,6 +1834,7 @@ error::Error GLES2DecoderPassthroughImpl::PatchGetNumericResults(GLenum pname,
case GL_COPY_READ_BUFFER_BINDING:
case GL_COPY_WRITE_BUFFER_BINDING:
case GL_UNIFORM_BUFFER_BINDING:
+ case GL_DISPATCH_INDIRECT_BUFFER_BINDING:
if (*params != 0 &&
!GetClientID(&resources_->buffer_id_map, *params, params)) {
return error::kInvalidArguments;
@@ -2019,37 +2065,29 @@ bool GLES2DecoderPassthroughImpl::CheckResetStatus() {
DCHECK(!WasContextLost());
DCHECK(context_->IsCurrent(nullptr));
- if (IsRobustnessSupported()) {
- // If the reason for the call was a GL error, we can try to determine the
- // reset status more accurately.
- GLenum driver_status = api()->glGetGraphicsResetStatusARBFn();
- if (driver_status == GL_NO_ERROR) {
- return false;
- }
-
- switch (driver_status) {
- case GL_GUILTY_CONTEXT_RESET_ARB:
- MarkContextLost(error::kGuilty);
- break;
- case GL_INNOCENT_CONTEXT_RESET_ARB:
- MarkContextLost(error::kInnocent);
- break;
- case GL_UNKNOWN_CONTEXT_RESET_ARB:
- MarkContextLost(error::kUnknown);
- break;
- default:
- NOTREACHED();
- return false;
- }
- reset_by_robustness_extension_ = true;
- return true;
+ // If the reason for the call was a GL error, we can try to determine the
+ // reset status more accurately.
+ GLenum driver_status = context_->CheckStickyGraphicsResetStatus();
+ if (driver_status == GL_NO_ERROR) {
+ return false;
}
- return false;
-}
-bool GLES2DecoderPassthroughImpl::IsRobustnessSupported() {
- return has_robustness_extension_ &&
- context_->WasAllocatedUsingRobustnessExtension();
+ switch (driver_status) {
+ case GL_GUILTY_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kGuilty);
+ break;
+ case GL_INNOCENT_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kInnocent);
+ break;
+ case GL_UNKNOWN_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kUnknown);
+ break;
+ default:
+ NOTREACHED();
+ return false;
+ }
+ reset_by_robustness_extension_ = true;
+ return true;
}
bool GLES2DecoderPassthroughImpl::IsEmulatedQueryTarget(GLenum target) const {
@@ -2095,7 +2133,7 @@ error::Error GLES2DecoderPassthroughImpl::ProcessQueries(bool did_finish) {
case GL_COMMANDS_ISSUED_CHROMIUM:
result_available = GL_TRUE;
- result = GL_TRUE;
+ result = query.commands_issued_time.InMicroseconds();
break;
case GL_LATENCY_QUERY_CHROMIUM:
@@ -2508,7 +2546,9 @@ error::Error GLES2DecoderPassthroughImpl::BindTexImage2DCHROMIUMImpl(
GLenum target,
GLenum internalformat,
GLint imageId) {
- if (target != GL_TEXTURE_2D) {
+ TextureTarget target_enum = GLenumToTextureTarget(target);
+ if (target_enum == TextureTarget::kCubeMap ||
+ target_enum == TextureTarget::kUnkown) {
InsertError(GL_INVALID_ENUM, "Invalid target");
return error::kNoError;
}
@@ -2520,8 +2560,7 @@ error::Error GLES2DecoderPassthroughImpl::BindTexImage2DCHROMIUMImpl(
}
const BoundTexture& bound_texture =
- bound_textures_[static_cast<size_t>(TextureTarget::k2D)]
- [active_texture_unit_];
+ bound_textures_[static_cast<size_t>(target_enum)][active_texture_unit_];
if (bound_texture.texture == nullptr) {
InsertError(GL_INVALID_OPERATION, "No texture bound");
return error::kNoError;
@@ -2574,12 +2613,27 @@ bool GLES2DecoderPassthroughImpl::IsEmulatedFramebufferBound(
return false;
}
+void GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult(
+ const char* function_name,
+ uint64_t swap_id,
+ gfx::SwapResult result,
+ std::unique_ptr<gfx::GpuFence> gpu_fence) {
+ TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", swap_id);
+ // Handling of the out-fence should have already happened before reaching
+ // this function, so we don't expect to get a valid fence here.
+ DCHECK(!gpu_fence);
+
+ CheckSwapBuffersResult(result, function_name);
+}
+
error::Error GLES2DecoderPassthroughImpl::CheckSwapBuffersResult(
gfx::SwapResult result,
const char* function_name) {
if (result == gfx::SwapResult::SWAP_FAILED) {
+ // If SwapBuffers/SwapBuffersWithBounds/PostSubBuffer failed, we may not
+ // have a current context any more.
LOG(ERROR) << "Context lost because " << function_name << " failed.";
- if (!CheckResetStatus()) {
+ if (!context_->IsCurrent(surface_.get()) || !CheckResetStatus()) {
MarkContextLost(error::kUnknown);
group_->LoseContexts(error::kUnknown);
return error::kLostContext;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index 1fa45e8311c..91bef722774 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -377,6 +377,10 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
void SetOptionalExtensionsRequestedForTesting(bool request_extensions);
+ void InitializeFeatureInfo(ContextType context_type,
+ const DisallowedFeatures& disallowed_features,
+ bool force_reinitialize);
+
void* GetScratchMemory(size_t size);
template <typename T>
@@ -427,8 +431,6 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
GLenum PopError();
bool FlushErrors();
- bool IsRobustnessSupported();
-
bool IsEmulatedQueryTarget(GLenum target) const;
error::Error ProcessQueries(bool did_finish);
void RemovePendingQuery(GLuint service_id);
@@ -467,6 +469,10 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
void ExitCommandProcessingEarly() override;
+ void CheckSwapBuffersAsyncResult(const char* function_name,
+ uint64_t swap_id,
+ gfx::SwapResult result,
+ std::unique_ptr<gfx::GpuFence> gpu_fence);
error::Error CheckSwapBuffersResult(gfx::SwapResult result,
const char* function_name);
@@ -669,6 +675,7 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
base::subtle::Atomic32 submit_count = 0;
std::unique_ptr<gl::GLFence> commands_completed_fence;
+ base::TimeDelta commands_issued_time;
std::vector<base::OnceClosure> callbacks;
std::unique_ptr<gl::GLFence> buffer_shadow_update_fence = nullptr;
@@ -689,6 +696,12 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
GLuint service_id = 0;
scoped_refptr<gpu::Buffer> shm;
QuerySync* sync = nullptr;
+
+ // Time at which the commands for this query started processing. This is
+ // used to ensure we only include the time when the decoder is scheduled in
+ // the |active_time|. Used for GL_COMMANDS_ISSUED_CHROMIUM type query.
+ base::TimeTicks command_processing_start_time;
+ base::TimeDelta active_time;
};
std::unordered_map<GLenum, ActiveQuery> active_queries_;
@@ -840,7 +853,6 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
bool gpu_debug_commands_;
// Context lost state
- bool has_robustness_extension_;
bool context_lost_;
bool reset_by_robustness_extension_;
bool lose_context_when_out_of_memory_;
@@ -855,7 +867,10 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
GLuint linking_program_service_id_ = 0u;
- base::WeakPtrFactory<GLES2DecoderPassthroughImpl> weak_ptr_factory_;
+ // CA Layer state
+ std::unique_ptr<CALayerSharedState> ca_layer_shared_state_;
+
+ base::WeakPtrFactory<GLES2DecoderPassthroughImpl> weak_ptr_factory_{this};
// Include the prototypes of all the doer functions from a separate header to
// keep this file clean.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index 1fb7aa15683..c2eac26c2bb 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -172,6 +172,7 @@ error::Error DoDisableVertexAttribArray(GLuint index);
error::Error DoDispatchCompute(GLuint num_groups_x,
GLuint num_groups_y,
GLuint num_groups_z);
+error::Error DoDispatchComputeIndirect(GLintptr offset);
error::Error DoDrawArrays(GLenum mode, GLint first, GLsizei count);
error::Error DoDrawElements(GLenum mode,
GLsizei count,
@@ -893,16 +894,20 @@ error::Error DoScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
GLfloat uv_y,
GLfloat uv_width,
GLfloat uv_height,
+ bool enable_blend,
GLuint gpu_fence_id);
-error::Error DoScheduleCALayerSharedStateCHROMIUM(GLfloat opacity,
- GLboolean is_clipped,
- const GLfloat* clip_rect,
- GLint sorting_context_id,
- const GLfloat* transform);
+error::Error DoScheduleCALayerSharedStateCHROMIUM(
+ GLfloat opacity,
+ GLboolean is_clipped,
+ const GLfloat* clip_rect,
+ const GLfloat* rounded_corner_bounds,
+ GLint sorting_context_id,
+ const GLfloat* transform);
error::Error DoScheduleCALayerCHROMIUM(GLuint contents_texture_id,
const GLfloat* contents_rect,
GLuint background_color,
GLuint edge_aa_mask,
+ GLenum filter,
const GLfloat* bounds_rect);
error::Error DoScheduleCALayerInUseQueryCHROMIUM(
GLuint n,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index c8113522fa4..0e1006c987f 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -12,11 +12,13 @@
#include "gpu/command_buffer/service/gl_stream_texture_image.h"
#include "gpu/command_buffer/service/gpu_fence_manager.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "gpu/command_buffer/service/image_factory.h"
#include "gpu/command_buffer/service/multi_draw_manager.h"
#include "gpu/command_buffer/service/passthrough_discardable_manager.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "ui/gfx/geometry/rect_conversions.h"
+#include "ui/gl/ca_renderer_layer_params.h"
#include "ui/gl/dc_renderer_layer_params.h"
#include "ui/gl/gl_version_info.h"
@@ -1124,6 +1126,15 @@ error::Error GLES2DecoderPassthroughImpl::DoDispatchCompute(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoDispatchComputeIndirect(
+ GLintptr offset) {
+ BindPendingImagesForSamplersIfNeeded();
+ // TODO(jiajie.hu@intel.com): Use glDispatchComputeIndirectRobustANGLEFn()
+ // when it's ready in ANGLE.
+ api()->glDispatchComputeIndirectFn(offset);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoDrawArrays(GLenum mode,
GLint first,
GLsizei count) {
@@ -3251,8 +3262,51 @@ error::Error GLES2DecoderPassthroughImpl::DoTexStorage2DImageCHROMIUM(
GLenum bufferUsage,
GLsizei width,
GLsizei height) {
- NOTIMPLEMENTED();
- // Call UpdateTextureSizeFromTarget(target) if implemented
+ TextureTarget target_enum = GLenumToTextureTarget(target);
+ if (target_enum == TextureTarget::kCubeMap ||
+ target_enum == TextureTarget::kUnkown) {
+ InsertError(GL_INVALID_ENUM, "Invalid target");
+ return error::kNoError;
+ }
+
+ const BoundTexture& bound_texture =
+ bound_textures_[static_cast<size_t>(target_enum)][active_texture_unit_];
+ if (bound_texture.texture == nullptr) {
+ InsertError(GL_INVALID_OPERATION, "No texture bound");
+ return error::kNoError;
+ }
+
+ gfx::BufferFormat buffer_format;
+ if (!GetGFXBufferFormat(internalFormat, &buffer_format)) {
+ InsertError(GL_INVALID_ENUM, "Invalid buffer format");
+ return error::kNoError;
+ }
+
+ gfx::BufferUsage buffer_usage;
+ if (!GetGFXBufferUsage(bufferUsage, &buffer_usage)) {
+ InsertError(GL_INVALID_ENUM, "Invalid buffer usage");
+ return error::kNoError;
+ }
+
+ if (!GetContextGroup()->image_factory()) {
+ InsertError(GL_INVALID_OPERATION, "Cannot create GL image");
+ return error::kNoError;
+ }
+
+ bool is_cleared;
+ scoped_refptr<gl::GLImage> image =
+ GetContextGroup()->image_factory()->CreateAnonymousImage(
+ gfx::Size(width, height), buffer_format, buffer_usage, &is_cleared);
+ if (!image || !image->BindTexImage(target)) {
+ InsertError(GL_INVALID_OPERATION, "Failed to create or bind GL Image");
+ return error::kNoError;
+ }
+
+ bound_texture.texture->SetLevelImage(target, 0, image.get());
+
+ // Target is already validated
+ UpdateTextureSizeFromTarget(target);
+
return error::kNoError;
}
@@ -3405,6 +3459,8 @@ error::Error GLES2DecoderPassthroughImpl::DoBeginQueryEXT(
query.service_id = service_id;
query.shm = std::move(buffer);
query.sync = sync;
+ if (target == GL_COMMANDS_ISSUED_CHROMIUM)
+ query.command_processing_start_time = base::TimeTicks::Now();
active_queries_[target] = std::move(query);
return error::kNoError;
@@ -3467,6 +3523,12 @@ error::Error GLES2DecoderPassthroughImpl::DoEndQueryEXT(GLenum target,
pending_query.program_service_id = linking_program_service_id_;
break;
+ case GL_COMMANDS_ISSUED_CHROMIUM:
+ pending_query.commands_issued_time =
+ active_query.active_time +
+ (base::TimeTicks::Now() - active_query.command_processing_start_time);
+ break;
+
default:
break;
}
@@ -3537,6 +3599,8 @@ error::Error GLES2DecoderPassthroughImpl::DoBindVertexArrayOES(GLuint array) {
error::Error GLES2DecoderPassthroughImpl::DoSwapBuffers(uint64_t swap_id,
GLbitfield flags) {
+ ca_layer_shared_state_ = nullptr;
+
if (offscreen_) {
if (offscreen_single_buffer_) {
return error::kNoError;
@@ -3587,8 +3651,18 @@ error::Error GLES2DecoderPassthroughImpl::DoSwapBuffers(uint64_t swap_id,
}
client()->OnSwapBuffers(swap_id, flags);
- return CheckSwapBuffersResult(surface_->SwapBuffers(base::DoNothing()),
- "SwapBuffers");
+ if (surface_->SupportsAsyncSwap()) {
+ TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", swap_id);
+ surface_->SwapBuffersAsync(
+ base::BindOnce(
+ &GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult,
+ weak_ptr_factory_.GetWeakPtr(), "SwapBuffers", swap_id),
+ base::DoNothing());
+ return error::kNoError;
+ } else {
+ return CheckSwapBuffersResult(surface_->SwapBuffers(base::DoNothing()),
+ "SwapBuffers");
+ }
}
error::Error GLES2DecoderPassthroughImpl::DoGetMaxValueInBufferCHROMIUM(
@@ -3784,9 +3858,15 @@ error::Error GLES2DecoderPassthroughImpl::DoRequestExtensionCHROMIUM(
// Make sure newly enabled extensions are exposed and usable.
context_->ReinitializeDynamicBindings();
- feature_info_->Initialize(
- feature_info_->context_type(), true /* is_passthrough_cmd_decoder */,
- feature_info_->disallowed_features(), true /* force_reinitialize */);
+ InitializeFeatureInfo(feature_info_->context_type(),
+ feature_info_->disallowed_features(), true);
+
+ // Support for CHROMIUM_texture_storage_image depends on the underlying
+ // ImageFactory's ability to create anonymous images.
+ gpu::ImageFactory* image_factory = group_->image_factory();
+ if (image_factory && image_factory->SupportsCreateAnonymousImage()) {
+ feature_info_->EnableCHROMIUMTextureStorageImage();
+ }
return error::kNoError;
}
@@ -4165,6 +4245,8 @@ error::Error GLES2DecoderPassthroughImpl::DoSwapBuffersWithBoundsCHROMIUM(
return error::kNoError;
}
+ ca_layer_shared_state_ = nullptr;
+
std::vector<gfx::Rect> bounds(count);
for (GLsizei i = 0; i < count; ++i) {
bounds[i] = gfx::Rect(rects[i * 4 + 0], rects[i * 4 + 1], rects[i * 4 + 2],
@@ -4190,10 +4272,23 @@ error::Error GLES2DecoderPassthroughImpl::DoPostSubBufferCHROMIUM(
return error::kNoError;
}
+ ca_layer_shared_state_ = nullptr;
+
client()->OnSwapBuffers(swap_id, flags);
- return CheckSwapBuffersResult(
- surface_->PostSubBuffer(x, y, width, height, base::DoNothing()),
- "PostSubBuffer");
+ if (surface_->SupportsAsyncSwap()) {
+ TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", swap_id);
+ surface_->PostSubBufferAsync(
+ x, y, width, height,
+ base::BindOnce(
+ &GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult,
+ weak_ptr_factory_.GetWeakPtr(), "PostSubBuffer", swap_id),
+ base::DoNothing());
+ return error::kNoError;
+ } else {
+ return CheckSwapBuffersResult(
+ surface_->PostSubBuffer(x, y, width, height, base::DoNothing()),
+ "PostSubBuffer");
+ }
}
error::Error GLES2DecoderPassthroughImpl::DoCopyTextureCHROMIUM(
@@ -4344,14 +4439,15 @@ GLES2DecoderPassthroughImpl::DoBindTexImage2DWithInternalformatCHROMIUM(
error::Error GLES2DecoderPassthroughImpl::DoReleaseTexImage2DCHROMIUM(
GLenum target,
GLint imageId) {
- if (target != GL_TEXTURE_2D) {
+ TextureTarget target_enum = GLenumToTextureTarget(target);
+ if (target_enum == TextureTarget::kCubeMap ||
+ target_enum == TextureTarget::kUnkown) {
InsertError(GL_INVALID_ENUM, "Invalid target");
return error::kNoError;
}
const BoundTexture& bound_texture =
- bound_textures_[static_cast<size_t>(TextureTarget::k2D)]
- [active_texture_unit_];
+ bound_textures_[static_cast<size_t>(target_enum)][active_texture_unit_];
if (bound_texture.texture == nullptr) {
InsertError(GL_INVALID_OPERATION, "No texture bound");
return error::kNoError;
@@ -4481,8 +4577,47 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleOverlayPlaneCHROMIUM(
GLfloat uv_y,
GLfloat uv_width,
GLfloat uv_height,
+ bool enable_blend,
GLuint gpu_fence_id) {
- NOTIMPLEMENTED();
+ scoped_refptr<TexturePassthrough> passthrough_texture = nullptr;
+ if (!resources_->texture_object_map.GetServiceID(overlay_texture_id,
+ &passthrough_texture) ||
+ passthrough_texture == nullptr) {
+ InsertError(GL_INVALID_VALUE, "invalid texture id");
+ return error::kNoError;
+ }
+
+ gl::GLImage* image =
+ passthrough_texture->GetLevelImage(passthrough_texture->target(), 0);
+ if (!image) {
+ InsertError(GL_INVALID_VALUE, "texture has no image");
+ return error::kNoError;
+ }
+
+ gfx::OverlayTransform transform = GetGFXOverlayTransform(plane_transform);
+ if (transform == gfx::OVERLAY_TRANSFORM_INVALID) {
+ InsertError(GL_INVALID_ENUM, "invalid transform enum");
+ return error::kNoError;
+ }
+
+ std::unique_ptr<gfx::GpuFence> gpu_fence;
+ if (gpu_fence_id != 0) {
+ gpu_fence = GetGpuFenceManager()->GetGpuFence(gpu_fence_id);
+ if (!gpu_fence) {
+ InsertError(GL_INVALID_ENUM, "unknown fence");
+ return error::kNoError;
+ }
+ }
+
+ if (!surface_->ScheduleOverlayPlane(
+ plane_z_order, transform, image,
+ gfx::Rect(bounds_x, bounds_y, bounds_width, bounds_height),
+ gfx::RectF(uv_x, uv_y, uv_width, uv_height), enable_blend,
+ std::move(gpu_fence))) {
+ InsertError(GL_INVALID_OPERATION, "failed to schedule overlay");
+ return error::kNoError;
+ }
+
return error::kNoError;
}
@@ -4490,9 +4625,28 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerSharedStateCHROMIUM(
GLfloat opacity,
GLboolean is_clipped,
const GLfloat* clip_rect,
+ const GLfloat* rounded_corner_bounds,
GLint sorting_context_id,
const GLfloat* transform) {
- NOTIMPLEMENTED();
+ if (!ca_layer_shared_state_) {
+ ca_layer_shared_state_.reset(new CALayerSharedState);
+ }
+
+ ca_layer_shared_state_->opacity = opacity;
+ ca_layer_shared_state_->is_clipped = is_clipped;
+ ca_layer_shared_state_->clip_rect = gfx::ToEnclosingRect(
+ gfx::RectF(clip_rect[0], clip_rect[1], clip_rect[2], clip_rect[3]));
+
+ ca_layer_shared_state_->rounded_corner_bounds =
+ gfx::RRectF(rounded_corner_bounds[0], rounded_corner_bounds[1],
+ rounded_corner_bounds[2], rounded_corner_bounds[3],
+ rounded_corner_bounds[4]);
+ ca_layer_shared_state_->sorting_context_id = sorting_context_id;
+ ca_layer_shared_state_->transform =
+ gfx::Transform(transform[0], transform[4], transform[8], transform[12],
+ transform[1], transform[5], transform[9], transform[13],
+ transform[2], transform[6], transform[10], transform[14],
+ transform[3], transform[7], transform[11], transform[15]);
return error::kNoError;
}
@@ -4501,15 +4655,76 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerCHROMIUM(
const GLfloat* contents_rect,
GLuint background_color,
GLuint edge_aa_mask,
+ GLenum filter,
const GLfloat* bounds_rect) {
- NOTIMPLEMENTED();
+ if (!ca_layer_shared_state_) {
+ InsertError(GL_INVALID_OPERATION,
+ "glScheduleCALayerSharedStateCHROMIUM has not been called");
+ return error::kNoError;
+ }
+
+ gl::GLImage* image = nullptr;
+ if (contents_texture_id) {
+ scoped_refptr<TexturePassthrough> passthrough_texture;
+ if (!resources_->texture_object_map.GetServiceID(contents_texture_id,
+ &passthrough_texture) &&
+ passthrough_texture) {
+ InsertError(GL_INVALID_VALUE, "unknown texture");
+ return error::kNoError;
+ }
+ DCHECK(passthrough_texture);
+ image =
+ passthrough_texture->GetLevelImage(passthrough_texture->target(), 0);
+ if (!image) {
+ InsertError(GL_INVALID_VALUE, "unsupported texture format");
+ return error::kNoError;
+ }
+ }
+
+ ui::CARendererLayerParams params = ui::CARendererLayerParams(
+ ca_layer_shared_state_->is_clipped, ca_layer_shared_state_->clip_rect,
+ ca_layer_shared_state_->rounded_corner_bounds,
+ ca_layer_shared_state_->sorting_context_id,
+ ca_layer_shared_state_->transform, image,
+ gfx::RectF(contents_rect[0], contents_rect[1], contents_rect[2],
+ contents_rect[3]),
+ gfx::ToEnclosingRect(gfx::RectF(bounds_rect[0], bounds_rect[1],
+ bounds_rect[2], bounds_rect[3])),
+ background_color, edge_aa_mask, ca_layer_shared_state_->opacity, filter);
+ if (!surface_->ScheduleCALayer(params)) {
+ InsertError(GL_INVALID_OPERATION, "failed to schedule CALayer");
+ return error::kNoError;
+ }
+
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerInUseQueryCHROMIUM(
GLuint n,
const volatile GLuint* textures) {
- NOTIMPLEMENTED();
+ std::vector<gl::GLSurface::CALayerInUseQuery> queries;
+ queries.reserve(n);
+ for (GLuint i = 0; i < n; ++i) {
+ gl::GLImage* image = nullptr;
+ GLuint texture_id = textures[i];
+ if (texture_id) {
+ scoped_refptr<TexturePassthrough> passthrough_texture;
+ if (!resources_->texture_object_map.GetServiceID(texture_id,
+ &passthrough_texture) &&
+ passthrough_texture) {
+ InsertError(GL_INVALID_VALUE, "unknown texture");
+ return error::kNoError;
+ }
+ image =
+ passthrough_texture->GetLevelImage(passthrough_texture->target(), 0);
+ }
+ gl::GLSurface::CALayerInUseQuery query;
+ query.image = image;
+ query.texture = texture_id;
+ queries.push_back(query);
+ }
+
+ surface_->ScheduleCALayerInUseQuery(std::move(queries));
return error::kNoError;
}
@@ -4538,7 +4753,7 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerCHROMIUM(
GLint clip_height,
GLuint protected_video_type) {
if (protected_video_type >
- static_cast<GLuint>(ui::ProtectedVideoType::kMaxValue)) {
+ static_cast<GLuint>(gfx::ProtectedVideoType::kMaxValue)) {
InsertError(GL_INVALID_VALUE, "invalid protected video type");
return error::kNoError;
}
@@ -4581,7 +4796,7 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerCHROMIUM(
params.is_clipped = is_clipped;
params.clip_rect = gfx::Rect(clip_x, clip_y, clip_width, clip_height);
params.protected_video_type =
- static_cast<ui::ProtectedVideoType>(protected_video_type);
+ static_cast<gfx::ProtectedVideoType>(protected_video_type);
if (!surface_->ScheduleDCLayer(params))
InsertError(GL_INVALID_OPERATION, "failed to schedule DCLayer");
@@ -4598,9 +4813,22 @@ error::Error GLES2DecoderPassthroughImpl::DoCommitOverlayPlanesCHROMIUM(
return error::kNoError;
}
+ ca_layer_shared_state_ = nullptr;
+
client()->OnSwapBuffers(swap_id, flags);
- return CheckSwapBuffersResult(
- surface_->CommitOverlayPlanes(base::DoNothing()), "CommitOverlayPlanes");
+ if (surface_->SupportsAsyncSwap()) {
+ TRACE_EVENT_ASYNC_BEGIN0("gpu", "AsyncSwapBuffers", swap_id);
+ surface_->CommitOverlayPlanesAsync(
+ base::BindOnce(
+ &GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult,
+ weak_ptr_factory_.GetWeakPtr(), "CommitOverlayPlanes", swap_id),
+ base::DoNothing());
+ return error::kNoError;
+ } else {
+ return CheckSwapBuffersResult(
+ surface_->CommitOverlayPlanes(base::DoNothing()),
+ "CommitOverlayPlanes");
+ }
}
error::Error GLES2DecoderPassthroughImpl::DoSetColorSpaceMetadataCHROMIUM(
@@ -4998,7 +5226,11 @@ error::Error GLES2DecoderPassthroughImpl::DoSetDrawRectangleCHROMIUM(
gfx::Rect rect(x, y, width, height);
if (!surface_->SetDrawRectangle(rect)) {
InsertError(GL_INVALID_OPERATION, "SetDrawRectangle failed on surface");
- return error::kNoError;
+ // If SetDrawRectangle failed, we may not have a current context any
+ // more, make sure to report lost context.
+ MarkContextLost(error::kUnknown);
+ group_->LoseContexts(error::kUnknown);
+ return error::kLostContext;
}
ApplySurfaceDrawOffset();
@@ -5023,7 +5255,11 @@ error::Error GLES2DecoderPassthroughImpl::DoSetEnableDCLayersCHROMIUM(
if (!surface_->SetEnableDCLayers(!!enable)) {
InsertError(GL_INVALID_OPERATION, "SetEnableDCLayers failed on surface.");
- return error::kNoError;
+ // If SetEnableDCLayers failed, we may not have a current context any
+ // more, make sure to report lost context.
+ MarkContextLost(error::kUnknown);
+ group_->LoseContexts(error::kUnknown);
+ return error::kLostContext;
}
return error::kNoError;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
index 5a3c40210e8..50baa6cf95b 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
@@ -2041,12 +2041,13 @@ error::Error GLES2DecoderPassthroughImpl::HandleScheduleOverlayPlaneCHROMIUM(
GLfloat uv_y = static_cast<GLfloat>(c.uv_y);
GLfloat uv_width = static_cast<GLfloat>(c.uv_width);
GLfloat uv_height = static_cast<GLfloat>(c.uv_height);
+ bool enable_blend = static_cast<bool>(c.enable_blend);
GLuint gpu_fence_id = static_cast<GLuint>(c.gpu_fence_id);
- return DoScheduleOverlayPlaneCHROMIUM(plane_z_order, plane_transform,
- overlay_texture_id, bounds_x, bounds_y,
- bounds_width, bounds_height, uv_x, uv_y,
- uv_width, uv_height, gpu_fence_id);
+ return DoScheduleOverlayPlaneCHROMIUM(
+ plane_z_order, plane_transform, overlay_texture_id, bounds_x, bounds_y,
+ bounds_width, bounds_height, uv_x, uv_y, uv_width, uv_height,
+ enable_blend, gpu_fence_id);
}
error::Error
@@ -2063,14 +2064,17 @@ GLES2DecoderPassthroughImpl::HandleScheduleCALayerSharedStateCHROMIUM(
uint32_t shm_id = c.shm_id;
uint32_t shm_offset = c.shm_offset;
+ // 4 for |clip_rect|, 5 for |rounded_corner_bounds|, 16 for |transform|.
const GLfloat* mem = GetSharedMemoryAs<const GLfloat*>(shm_id, shm_offset,
- 20 * sizeof(GLfloat));
+ 25 * sizeof(GLfloat));
if (!mem) {
return error::kOutOfBounds;
}
const GLfloat* clip_rect = mem + 0;
- const GLfloat* transform = mem + 4;
+ const GLfloat* rounded_corner_bounds = mem + 4;
+ const GLfloat* transform = mem + 9;
return DoScheduleCALayerSharedStateCHROMIUM(opacity, is_clipped, clip_rect,
+ rounded_corner_bounds,
sorting_context_id, transform);
}
@@ -2083,6 +2087,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleScheduleCALayerCHROMIUM(
GLuint contents_texture_id = static_cast<GLint>(c.contents_texture_id);
GLuint background_color = static_cast<GLuint>(c.background_color);
GLuint edge_aa_mask = static_cast<GLuint>(c.edge_aa_mask);
+ GLenum filter = static_cast<GLenum>(c.filter);
uint32_t shm_id = c.shm_id;
uint32_t shm_offset = c.shm_offset;
@@ -2094,7 +2099,8 @@ error::Error GLES2DecoderPassthroughImpl::HandleScheduleCALayerCHROMIUM(
const GLfloat* contents_rect = mem;
const GLfloat* bounds_rect = mem + 4;
return DoScheduleCALayerCHROMIUM(contents_texture_id, contents_rect,
- background_color, edge_aa_mask, bounds_rect);
+ background_color, edge_aa_mask, filter,
+ bounds_rect);
}
error::Error GLES2DecoderPassthroughImpl::HandleSetColorSpaceMetadataCHROMIUM(
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index 0abeb0b7d3a..20c29481305 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -4020,6 +4020,22 @@ error::Error GLES2DecoderPassthroughImpl::HandleDispatchCompute(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleDispatchComputeIndirect(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2ComputeContext())
+ return error::kUnknownCommand;
+ const volatile gles2::cmds::DispatchComputeIndirect& c =
+ *static_cast<const volatile gles2::cmds::DispatchComputeIndirect*>(
+ cmd_data);
+ GLintptr offset = static_cast<GLintptr>(c.offset);
+ error::Error error = DoDispatchComputeIndirect(offset);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleGetProgramInterfaceiv(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
index 8eb307fb6bd..4b7c4dbe8cb 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_textures.cc
@@ -61,7 +61,9 @@ class TestSharedImageBackingPassthrough : public SharedImageBacking {
void SetCleared() override {}
- void Update() override {}
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
+ DCHECK(!in_fence);
+ }
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
return false;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index af6398cc69c..797187adef0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -582,6 +582,14 @@ TEST_P(GLES3DecoderTest, WaitSyncValidArgs) {
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
+TEST_P(GLES2DecoderManualInitTest, InitFailsIfLostContext) {
+ InitState init;
+ init.extensions = "GL_KHR_robustness";
+ init.lose_context_on_init = true;
+ EXPECT_EQ(ContextResult::kTransientFailure,
+ MaybeInitDecoderWithWorkarounds(init, GpuDriverBugWorkarounds()));
+}
+
TEST_P(GLES2DecoderManualInitTest, BindGeneratesResourceFalse) {
InitState init;
InitDecoder(init);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h
deleted file mode 100644
index 09a217d1fde..00000000000
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is auto-generated from
-// gpu/command_buffer/build_gles2_cmd_buffer.py
-// It's formatted by clang-format using chromium coding style:
-// clang-format -i -style=chromium filename
-// DO NOT EDIT!
-
-// It is included by gles2_cmd_decoder_unittest_base.cc
-#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_0_AUTOGEN_H_
-#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_0_AUTOGEN_H_
-
-void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations(bool es3_capable) {
- ExpectEnableDisable(GL_BLEND, false);
- ExpectEnableDisable(GL_CULL_FACE, false);
- ExpectEnableDisable(GL_DEPTH_TEST, false);
- ExpectEnableDisable(GL_DITHER, true);
- ExpectEnableDisable(GL_POLYGON_OFFSET_FILL, false);
- ExpectEnableDisable(GL_SAMPLE_ALPHA_TO_COVERAGE, false);
- ExpectEnableDisable(GL_SAMPLE_COVERAGE, false);
- ExpectEnableDisable(GL_SCISSOR_TEST, false);
- ExpectEnableDisable(GL_STENCIL_TEST, false);
- if (feature_info()->feature_flags().ext_multisample_compatibility) {
- ExpectEnableDisable(GL_MULTISAMPLE_EXT, true);
- }
- if (feature_info()->feature_flags().ext_multisample_compatibility) {
- ExpectEnableDisable(GL_SAMPLE_ALPHA_TO_ONE_EXT, false);
- }
- if (es3_capable) {
- ExpectEnableDisable(GL_RASTERIZER_DISCARD, false);
- ExpectEnableDisable(GL_PRIMITIVE_RESTART_FIXED_INDEX, false);
- }
-}
-
-void GLES2DecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
- auto* feature_info_ = feature_info();
- EXPECT_CALL(*gl_, BlendColor(0.0f, 0.0f, 0.0f, 0.0f))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, BlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, BlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, ClearColor(0.0f, 0.0f, 0.0f, 0.0f))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, ClearDepth(1.0f)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, ClearStencil(0)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, ColorMask(true, true, true, true))
- .Times(1)
- .RetiresOnSaturation();
- if (feature_info()->feature_flags().chromium_framebuffer_mixed_samples) {
- EXPECT_CALL(*gl_, CoverageModulationNV(GL_NONE))
- .Times(1)
- .RetiresOnSaturation();
- }
- EXPECT_CALL(*gl_, CullFace(GL_BACK)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, DepthFunc(GL_LESS)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, DepthMask(true)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, DepthRange(0.0f, 1.0f)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, FrontFace(GL_CCW)).Times(1).RetiresOnSaturation();
- if (!feature_info_->gl_version_info().is_desktop_core_profile) {
- EXPECT_CALL(*gl_, Hint(GL_GENERATE_MIPMAP_HINT, GL_DONT_CARE))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info_->feature_flags().oes_standard_derivatives) {
- EXPECT_CALL(*gl_,
- Hint(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES, GL_DONT_CARE))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info_->feature_flags().chromium_texture_filtering_hint) {
- EXPECT_CALL(*gl_, Hint(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST))
- .Times(1)
- .RetiresOnSaturation();
- }
- SetupInitStateManualExpectationsForDoLineWidth(1.0f);
- if (feature_info_->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl_, MatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, _))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info_->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl_, MatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, _))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info()->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl_, PathStencilFuncNV(GL_ALWAYS, 0, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- }
- EXPECT_CALL(*gl_, PixelStorei(GL_PACK_ALIGNMENT, 4))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_ALIGNMENT, 4))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, PolygonOffset(0.0f, 0.0f)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, SampleCoverage(1.0f, false)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_,
- Scissor(kViewportX, kViewportY, kViewportWidth, kViewportHeight))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilFuncSeparate(GL_FRONT, GL_ALWAYS, 0, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilMaskSeparate(GL_FRONT, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilMaskSeparate(GL_BACK, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_KEEP))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_KEEP))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_,
- Viewport(kViewportX, kViewportY, kViewportWidth, kViewportHeight))
- .Times(1)
- .RetiresOnSaturation();
- SetupInitStateManualExpectations(es3_capable);
-}
-#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_0_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
index 34bfa2459f7..1a303316be9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -19,6 +19,7 @@
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state_test_helpers.h"
#include "gpu/command_buffer/service/copy_texture_chromium_mock.h"
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/logger.h"
@@ -168,21 +169,10 @@ void GLES2DecoderTestBase::AddExpectationsForVertexAttribManager() {
}
}
-GLES2DecoderTestBase::InitState::InitState()
- : extensions("GL_EXT_framebuffer_object"),
- gl_version("2.1"),
- has_alpha(false),
- has_depth(false),
- has_stencil(false),
- request_alpha(false),
- request_depth(false),
- request_stencil(false),
- bind_generates_resource(false),
- lose_context_when_out_of_memory(false),
- use_native_vao(true),
- context_type(CONTEXT_TYPE_OPENGLES2) {}
-
+GLES2DecoderTestBase::InitState::InitState() = default;
GLES2DecoderTestBase::InitState::InitState(const InitState& other) = default;
+GLES2DecoderTestBase::InitState& GLES2DecoderTestBase::InitState::operator=(
+ const InitState& other) = default;
void GLES2DecoderTestBase::InitDecoder(const InitState& init) {
gpu::GpuDriverBugWorkarounds workarounds;
@@ -192,6 +182,13 @@ void GLES2DecoderTestBase::InitDecoder(const InitState& init) {
void GLES2DecoderTestBase::InitDecoderWithWorkarounds(
const InitState& init,
const gpu::GpuDriverBugWorkarounds& workarounds) {
+ ContextResult result = MaybeInitDecoderWithWorkarounds(init, workarounds);
+ ASSERT_EQ(result, gpu::ContextResult::kSuccess);
+}
+
+ContextResult GLES2DecoderTestBase::MaybeInitDecoderWithWorkarounds(
+ const InitState& init,
+ const gpu::GpuDriverBugWorkarounds& workarounds) {
InitState normalized_init = init;
NormalizeInitState(&normalized_init);
// For easier substring/extension matching
@@ -438,8 +435,9 @@ void GLES2DecoderTestBase::InitDecoderWithWorkarounds(
.RetiresOnSaturation();
}
- SetupInitCapabilitiesExpectations(group_->feature_info()->IsES3Capable());
- SetupInitStateExpectations(group_->feature_info()->IsES3Capable());
+ ContextStateTestHelpers::SetupInitState(
+ gl_.get(), group_->feature_info(),
+ gfx::Size(kBackBufferWidth, kBackBufferHeight));
EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0))
.Times(1)
@@ -480,6 +478,12 @@ void GLES2DecoderTestBase::InitDecoderWithWorkarounds(
}
#endif
+ if (context_->HasRobustness()) {
+ EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
+ .WillOnce(Return(init.lose_context_on_init ? GL_GUILTY_CONTEXT_RESET_ARB
+ : GL_NO_ERROR));
+ }
+
scoped_refptr<gpu::Buffer> buffer =
command_buffer_service_->CreateTransferBufferHelper(kSharedBufferSize,
&shared_memory_id_);
@@ -510,12 +514,17 @@ void GLES2DecoderTestBase::InitDecoderWithWorkarounds(
decoder_->SetCopyTexImageBlitterForTest(copy_tex_image_blitter_);
}
- ASSERT_EQ(decoder_->Initialize(surface_, context_, false,
- DisallowedFeatures(), attribs),
- gpu::ContextResult::kSuccess);
+ gpu::ContextResult result = decoder_->Initialize(
+ surface_, context_, false, DisallowedFeatures(), attribs);
+ if (result != gpu::ContextResult::kSuccess) {
+ decoder_->Destroy(false /* have_context */);
+ decoder_.reset();
+ group_->Destroy(mock_decoder_.get(), false);
+ return result;
+ }
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
- if (context_->WasAllocatedUsingRobustnessExtension()) {
+ if (context_->HasRobustness()) {
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(GL_NO_ERROR));
}
@@ -564,6 +573,7 @@ void GLES2DecoderTestBase::InitDecoderWithWorkarounds(
}
EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ return result;
}
void GLES2DecoderTestBase::ResetDecoder() {
@@ -616,19 +626,6 @@ void GLES2DecoderTestBase::TearDown() {
ResetDecoder();
}
-void GLES2DecoderTestBase::ExpectEnableDisable(GLenum cap, bool enable) {
- if (enable) {
- EXPECT_CALL(*gl_, Enable(cap))
- .Times(1)
- .RetiresOnSaturation();
- } else {
- EXPECT_CALL(*gl_, Disable(cap))
- .Times(1)
- .RetiresOnSaturation();
- }
-}
-
-
GLint GLES2DecoderTestBase::GetGLError() {
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
@@ -1799,11 +1796,6 @@ const GLint GLES2DecoderTestBase::kMaxVertexUniformVectors;
const GLint GLES2DecoderTestBase::kMaxViewportWidth;
const GLint GLES2DecoderTestBase::kMaxViewportHeight;
-const GLint GLES2DecoderTestBase::kViewportX;
-const GLint GLES2DecoderTestBase::kViewportY;
-const GLint GLES2DecoderTestBase::kViewportWidth;
-const GLint GLES2DecoderTestBase::kViewportHeight;
-
const GLuint GLES2DecoderTestBase::kServiceAttrib0BufferId;
const GLuint GLES2DecoderTestBase::kServiceFixedAttribBufferId;
@@ -2361,30 +2353,6 @@ void GLES2DecoderTestBase::SetupMockGLBehaviors() {
&GLES2DecoderTestBase::MockGLStates::OnVertexAttribNullPointer));
}
-void GLES2DecoderTestBase::SetupInitStateManualExpectations(bool es3_capable) {
- if (es3_capable) {
- EXPECT_CALL(*gl_, PixelStorei(GL_PACK_ROW_LENGTH, 0))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_ROW_LENGTH, 0))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0))
- .Times(1)
- .RetiresOnSaturation();
- if (group_->feature_info()->feature_flags().ext_window_rectangles) {
- EXPECT_CALL(*gl_, WindowRectanglesEXT(GL_EXCLUSIVE_EXT, 0, nullptr))
- .Times(1)
- .RetiresOnSaturation();
- }
- }
-}
-
-void GLES2DecoderTestBase::SetupInitStateManualExpectationsForDoLineWidth(
- GLfloat width) {
- EXPECT_CALL(*gl_, LineWidth(width)).Times(1).RetiresOnSaturation();
-}
-
void GLES2DecoderWithShaderTestBase::SetUp() {
GLES2DecoderTestBase::SetUp();
SetupDefaultProgram();
@@ -2414,11 +2382,6 @@ void GLES2DecoderTestBase::DoLockDiscardableTextureCHROMIUM(GLuint texture_id) {
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
-// Include the auto-generated part of this file. We split this because it means
-// we can easily edit the non-auto generated parts right here in this file
-// instead of having to edit some template or the code generator.
-#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h"
-
namespace {
GpuPreferences GenerateGpuPreferencesForPassthroughTests() {
@@ -2465,7 +2428,7 @@ void GLES2DecoderPassthroughTestBase::SetUp() {
context_creation_attribs_.stencil_size = 8;
context_creation_attribs_.bind_generates_resource = true;
- gl::init::InitializeGLOneOffImplementation(gl::kGLImplementationEGLGLES2,
+ gl::init::InitializeGLOneOffImplementation(gl::kGLImplementationEGLANGLE,
false, false, false, true);
scoped_refptr<gles2::FeatureInfo> feature_info = new gles2::FeatureInfo();
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
index d81387cf5f9..74ace1d1e3a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
@@ -11,7 +11,7 @@
#include <array>
#include <memory>
-#include "base/message_loop/message_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -216,24 +216,29 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
struct InitState {
InitState();
InitState(const InitState& other);
-
- std::string extensions;
- std::string gl_version;
- bool has_alpha;
- bool has_depth;
- bool has_stencil;
- bool request_alpha;
- bool request_depth;
- bool request_stencil;
- bool bind_generates_resource;
- bool lose_context_when_out_of_memory;
- bool use_native_vao; // default is true.
- ContextType context_type;
+ InitState& operator=(const InitState& other);
+
+ std::string extensions = "GL_EXT_framebuffer_object";
+ std::string gl_version = "2.1";
+ bool has_alpha = false;
+ bool has_depth = false;
+ bool has_stencil = false;
+ bool request_alpha = false;
+ bool request_depth = false;
+ bool request_stencil = false;
+ bool bind_generates_resource = false;
+ bool lose_context_when_out_of_memory = false;
+ bool lose_context_on_init = false;
+ bool use_native_vao = true;
+ ContextType context_type = CONTEXT_TYPE_OPENGLES2;
};
void InitDecoder(const InitState& init);
void InitDecoderWithWorkarounds(const InitState& init,
const GpuDriverBugWorkarounds& workarounds);
+ ContextResult MaybeInitDecoderWithWorkarounds(
+ const InitState& init,
+ const GpuDriverBugWorkarounds& workarounds);
void ResetDecoder();
@@ -273,10 +278,6 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
GLuint vertex_shader_client_id, GLuint vertex_shader_service_id,
GLuint fragment_shader_client_id, GLuint fragment_shader_service_id);
- void SetupInitCapabilitiesExpectations(bool es3_capable);
- void SetupInitStateExpectations(bool es3_capable);
- void ExpectEnableDisable(GLenum cap, bool enable);
-
// Setups up a shader for testing glUniform.
void SetupShaderForUniform(GLenum uniform_type);
void SetupDefaultProgram();
@@ -561,11 +562,6 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
static const GLint kMaxViewportWidth = 8192;
static const GLint kMaxViewportHeight = 8192;
- static const GLint kViewportX = 0;
- static const GLint kViewportY = 0;
- static const GLint kViewportWidth = kBackBufferWidth;
- static const GLint kViewportHeight = kBackBufferHeight;
-
static const GLuint kServiceAttrib0BufferId = 801;
static const GLuint kServiceFixedAttribBufferId = 802;
@@ -799,11 +795,6 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
void AddExpectationsForVertexAttribManager();
void SetupMockGLBehaviors();
- void SetupInitStateManualExpectations(bool es3_capable);
- void SetupInitStateManualExpectationsForWindowRectanglesEXT(GLenum mode,
- GLint count);
- void SetupInitStateManualExpectationsForDoLineWidth(GLfloat width);
-
GpuPreferences gpu_preferences_;
MailboxManagerImpl mailbox_manager_;
ShaderTranslatorCache shader_translator_cache_;
@@ -813,7 +804,7 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
SharedImageManager shared_image_manager_;
scoped_refptr<ContextGroup> group_;
MockGLStates gl_states_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
MockCopyTextureResourceManager* copy_texture_manager_; // not owned
MockCopyTexImageResourceManager* copy_tex_image_blitter_; // not owned
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc
index a2df617cc6a..7b18a6ff287 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc
@@ -35,7 +35,7 @@ class GLES2DecoderDrawOOMTest : public GLES2DecoderManualInitTest {
error::ContextLostReason expected_other_reason) {
const GLsizei kFakeLargeCount = 0x1234;
SetupTexture();
- if (context_->WasAllocatedUsingRobustnessExtension()) {
+ if (context_->HasRobustness()) {
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(reset_status));
}
@@ -108,15 +108,6 @@ class GLES2DecoderLostContextTest : public GLES2DecoderManualInitTest {
InitDecoder(init);
}
- void InitWithVirtualContextsAndRobustness() {
- gpu::GpuDriverBugWorkarounds workarounds;
- workarounds.use_virtualized_gl_contexts = true;
- InitState init;
- init.gl_version = "OpenGL ES 2.0";
- init.extensions = "GL_KHR_robustness";
- InitDecoderWithWorkarounds(init, workarounds);
- }
-
void DoGetErrorWithContextLost(GLenum reset_status) {
DCHECK(context_->HasExtension("GL_KHR_robustness"));
EXPECT_CALL(*gl_, GetError())
@@ -309,19 +300,6 @@ TEST_P(GLES2DecoderLostContextTest, LoseInnocentFromGLError) {
EXPECT_EQ(error::kInnocent, GetContextLostReason());
}
-TEST_P(GLES2DecoderLostContextTest, LoseVirtualContextWithRobustness) {
- InitWithVirtualContextsAndRobustness();
- EXPECT_CALL(*mock_decoder_, MarkContextLost(error::kUnknown))
- .Times(1);
- // Signal guilty....
- DoGetErrorWithContextLost(GL_GUILTY_CONTEXT_RESET_KHR);
- EXPECT_TRUE(decoder_->WasContextLost());
- EXPECT_TRUE(decoder_->WasContextLostByRobustnessExtension());
- // ...but make sure we don't pretend, since for virtual contexts we don't
- // know if this was really the guilty client.
- EXPECT_EQ(error::kUnknown, GetContextLostReason());
-}
-
TEST_P(GLES2DecoderLostContextTest, LoseGroupFromRobustness) {
// If one context in a group is lost through robustness,
// the other ones should also get lost and query the reset status.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index 23d989601e7..8aa4c2e97e7 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -3161,7 +3161,9 @@ class TestSharedImageBacking : public SharedImageBacking {
void SetCleared() override {}
- void Update() override {}
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
+ DCHECK(!in_fence);
+ }
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
return false;
diff --git a/chromium/gpu/command_buffer/service/gpu_service_test.h b/chromium/gpu/command_buffer/service/gpu_service_test.h
index 2163a4e3abe..8cf3a834d24 100644
--- a/chromium/gpu/command_buffer/service/gpu_service_test.h
+++ b/chromium/gpu/command_buffer/service/gpu_service_test.h
@@ -8,7 +8,7 @@
#include <memory>
#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_mock.h"
@@ -42,7 +42,7 @@ class GpuServiceTest : public testing::Test {
bool ran_teardown_;
scoped_refptr<gl::GLContextStub> context_;
scoped_refptr<gl::GLSurfaceStub> surface_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
};
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.cc b/chromium/gpu/command_buffer/service/gpu_switches.cc
index e920902415e..c515c94d91a 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.cc
+++ b/chromium/gpu/command_buffer/service/gpu_switches.cc
@@ -6,13 +6,6 @@
#include "base/macros.h"
-namespace gpu {
-
-const char kCmdDecoderValidatingName[] = "validating";
-const char kCmdDecoderPassthroughName[] = "passthrough";
-
-} // namespace gpu
-
namespace switches {
// Always return success when compiling a shader. Linking will still fail.
@@ -69,12 +62,11 @@ const char kGLShaderIntermOutput[] = "gl-shader-interm-output";
// round intermediate values in ANGLE.
const char kEmulateShaderPrecision[] = "emulate-shader-precision";
-// Use the Pass-through command decoder, skipping all validation and state
-// tracking.
-const char kUseCmdDecoder[] = "use-cmd-decoder";
-
-// Enable Vulkan support, must also have ENABLE_VULKAN defined.
-const char kEnableVulkan[] = "enable-vulkan";
+// Enable Vulkan support and select Vulkan implementation, must also have
+// ENABLE_VULKAN defined.
+const char kUseVulkan[] = "use-vulkan";
+const char kVulkanImplementationNameNative[] = "native";
+const char kVulkanImplementationNameSwiftshader[] = "swiftshader";
// Disables VK_KHR_surface extension. Instead of using swapchain, bitblt will be
// used for present render result on screen.
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.h b/chromium/gpu/command_buffer/service/gpu_switches.h
index bff68afe6e1..9c71810c217 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.h
+++ b/chromium/gpu/command_buffer/service/gpu_switches.h
@@ -10,13 +10,6 @@
#include "gpu/config/gpu_switches.h"
#include "gpu/gpu_export.h"
-namespace gpu {
-
-// The command decoder names that can be passed to --use-cmd-decoder.
-GPU_EXPORT extern const char kCmdDecoderValidatingName[];
-GPU_EXPORT extern const char kCmdDecoderPassthroughName[];
-} // namespace gpu
-
namespace switches {
GPU_EXPORT extern const char kCompileShaderAlwaysSucceeds[];
@@ -35,8 +28,9 @@ GPU_EXPORT extern const char kDisableGpuShaderDiskCache[];
GPU_EXPORT extern const char kEnableThreadedTextureMailboxes[];
GPU_EXPORT extern const char kGLShaderIntermOutput[];
GPU_EXPORT extern const char kEmulateShaderPrecision[];
-GPU_EXPORT extern const char kUseCmdDecoder[];
-GPU_EXPORT extern const char kEnableVulkan[];
+GPU_EXPORT extern const char kUseVulkan[];
+GPU_EXPORT extern const char kVulkanImplementationNameNative[];
+GPU_EXPORT extern const char kVulkanImplementationNameSwiftshader[];
GPU_EXPORT extern const char kDisableVulkanSurface[];
GPU_EXPORT extern const char kDisableVulkanFallbackToGLForTesting[];
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller.cc b/chromium/gpu/command_buffer/service/gr_cache_controller.cc
index 8e26fe334b1..84bbf6585a6 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller.cc
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller.cc
@@ -7,6 +7,7 @@
#include <chrono>
#include "base/bind.h"
+#include "base/metrics/histogram_macros.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "ui/gl/gl_context.h"
@@ -31,6 +32,9 @@ void GrCacheController::ScheduleGrContextCleanup() {
if (!purge_gr_cache_cb_.IsCancelled())
return;
+ // Record memory usage periodically.
+ RecordGrContextMemory();
+
constexpr int kOldResourceCleanupDelaySeconds = 5;
// Here we ask GrContext to free any resources that haven't been used in
// a long while even if it is under budget. Below we set a call back to
@@ -68,5 +72,13 @@ void GrCacheController::PurgeGrCache(uint64_t idle_id) {
context_state_->gr_context()->freeGpuResources();
}
+void GrCacheController::RecordGrContextMemory() const {
+ int resource_count = 0;
+ size_t resource_bytes = 0;
+ context_state_->gr_context()->getResourceCacheUsage(&resource_count,
+ &resource_bytes);
+ UMA_HISTOGRAM_MEMORY_KB("GPU.GrContextMemoryKb", resource_bytes / 1000);
+}
+
} // namespace raster
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller.h b/chromium/gpu/command_buffer/service/gr_cache_controller.h
index d207a923ec8..8e578d79824 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller.h
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller.h
@@ -30,6 +30,7 @@ class GPU_GLES2_EXPORT GrCacheController {
private:
void PurgeGrCache(uint64_t idle_id);
+ void RecordGrContextMemory() const;
// The |current_idle_id_| is used to avoid continuously posting tasks to clear
// the GrContext. Each time the context is used this id is incremented and
diff --git a/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc b/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc
index ec2f6d950ff..14339bf317c 100644
--- a/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc
+++ b/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc
@@ -51,8 +51,13 @@ void CreateFenceLocked(const SyncToken& sync_token) {
sync_points.pop();
}
// Need to use EGL fences since we are likely not in a single share group.
- auto fence = gl::GLFenceEGL::Create();
- CHECK(fence) << "eglCreateSyncKHR failed";
+ std::unique_ptr<gl::GLFence> fence = gl::GLFenceEGL::Create();
+ if (!fence) {
+ // Fall back to glFinish instead crashing such as in crbug.com/995376.
+ LOG(ERROR) << "eglCreateSyncKHR failed";
+ glFinish();
+ return;
+ }
std::pair<SyncTokenToFenceMap::iterator, bool> result =
sync_point_to_fence.insert(
std::make_pair(sync_token, std::move(fence)));
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.cc b/chromium/gpu/command_buffer/service/memory_program_cache.cc
index 13dc774c017..734436a8ab0 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.cc
@@ -569,7 +569,7 @@ void MemoryProgramCache::LoadProgram(const std::string& key,
UMA_HISTOGRAM_COUNTS_1M("GPU.ProgramCache.MemorySizeAfterKb",
curr_size_bytes_ / 1024);
} else {
- LOG(ERROR) << "Failed to parse proto file.";
+ DVLOG(2) << "Failed to parse proto file.";
}
}
@@ -620,12 +620,14 @@ MemoryProgramCache::ProgramCacheValue::ProgramCacheValue(
interface_block_map_1_(interface_block_map_1),
program_cache_(program_cache) {
program_cache_->curr_size_bytes_ += data_.size();
+ program_cache->CompiledShaderCacheSuccess(shader_0_hash_);
+ program_cache->CompiledShaderCacheSuccess(shader_1_hash_);
program_cache_->LinkedProgramCacheSuccess(program_hash);
}
MemoryProgramCache::ProgramCacheValue::~ProgramCacheValue() {
program_cache_->curr_size_bytes_ -= data_.size();
- program_cache_->Evict(program_hash_);
+ program_cache_->Evict(program_hash_, shader_0_hash_, shader_1_hash_);
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/program_cache.cc b/chromium/gpu/command_buffer/service/program_cache.cc
index 31bcca0ef41..7e76fdae38d 100644
--- a/chromium/gpu/command_buffer/service/program_cache.cc
+++ b/chromium/gpu/command_buffer/service/program_cache.cc
@@ -33,6 +33,19 @@ ProgramCache::~ProgramCache() = default;
void ProgramCache::Clear() {
ClearBackend();
link_status_.clear();
+ compiled_shaders_.clear();
+}
+
+bool ProgramCache::HasSuccessfullyCompiledShader(
+ const std::string& shader_signature) const {
+ char sha[kHashLength];
+ ComputeShaderHash(shader_signature, sha);
+ const std::string sha_string(sha, kHashLength);
+
+ if (compiled_shaders_.find(sha_string) != compiled_shaders_.end()) {
+ return true;
+ }
+ return false;
}
ProgramCache::LinkedProgramStatus ProgramCache::GetLinkedProgramStatus(
@@ -80,8 +93,12 @@ void ProgramCache::LinkedProgramCacheSuccess(
transform_feedback_varyings,
transform_feedback_buffer_mode,
sha);
+ const std::string a_sha_string(a_sha, kHashLength);
+ const std::string b_sha_string(b_sha, kHashLength);
const std::string sha_string(sha, kHashLength);
+ CompiledShaderCacheSuccess(a_sha_string);
+ CompiledShaderCacheSuccess(b_sha_string);
LinkedProgramCacheSuccess(sha_string);
}
@@ -89,6 +106,10 @@ void ProgramCache::LinkedProgramCacheSuccess(const std::string& program_hash) {
link_status_[program_hash] = LINK_SUCCEEDED;
}
+void ProgramCache::CompiledShaderCacheSuccess(const std::string& shader_hash) {
+ compiled_shaders_.insert(shader_hash);
+}
+
void ProgramCache::ComputeShaderHash(
const std::string& str,
char* result) const {
@@ -96,8 +117,12 @@ void ProgramCache::ComputeShaderHash(
str.length(), reinterpret_cast<unsigned char*>(result));
}
-void ProgramCache::Evict(const std::string& program_hash) {
+void ProgramCache::Evict(const std::string& program_hash,
+ const std::string& shader_0_hash,
+ const std::string& shader_1_hash) {
link_status_.erase(program_hash);
+ compiled_shaders_.erase(shader_0_hash);
+ compiled_shaders_.erase(shader_1_hash);
}
namespace {
diff --git a/chromium/gpu/command_buffer/service/program_cache.h b/chromium/gpu/command_buffer/service/program_cache.h
index cdb6eba8d6c..cb8e48d03e7 100644
--- a/chromium/gpu/command_buffer/service/program_cache.h
+++ b/chromium/gpu/command_buffer/service/program_cache.h
@@ -10,6 +10,7 @@
#include <map>
#include <string>
#include <unordered_map>
+#include <unordered_set>
#include "base/hash/sha1.h"
#include "base/macros.h"
@@ -59,6 +60,8 @@ class GPU_GLES2_EXPORT ProgramCache {
explicit ProgramCache(size_t max_cache_size_bytes);
virtual ~ProgramCache();
+ bool HasSuccessfullyCompiledShader(const std::string& shader_signature) const;
+
LinkedProgramStatus GetLinkedProgramStatus(
const std::string& shader_signature_a,
const std::string& shader_signature_b,
@@ -115,6 +118,8 @@ class GPU_GLES2_EXPORT ProgramCache {
// called by implementing class after a shader was successfully cached
void LinkedProgramCacheSuccess(const std::string& program_hash);
+ void CompiledShaderCacheSuccess(const std::string& shader_hash);
+
// result is not null terminated
void ComputeShaderHash(const std::string& shader,
char* result) const;
@@ -129,7 +134,9 @@ class GPU_GLES2_EXPORT ProgramCache {
GLenum transform_feedback_buffer_mode,
char* result) const;
- void Evict(const std::string& program_hash);
+ void Evict(const std::string& program_hash,
+ const std::string& shader_0_hash,
+ const std::string& shader_1_hash);
// Used by the passthrough program cache to notify when a new blob is
// inserted.
@@ -137,12 +144,15 @@ class GPU_GLES2_EXPORT ProgramCache {
private:
typedef std::unordered_map<std::string, LinkedProgramStatus> LinkStatusMap;
+ typedef std::unordered_set<std::string> CachedCompiledShaderSet;
// called to clear the backend cache
virtual void ClearBackend() = 0;
const size_t max_size_bytes_;
LinkStatusMap link_status_;
+ // only cache the hash of successfully compiled shaders
+ CachedCompiledShaderSet compiled_shaders_;
DISALLOW_COPY_AND_ASSIGN(ProgramCache);
};
diff --git a/chromium/gpu/command_buffer/service/program_cache_unittest.cc b/chromium/gpu/command_buffer/service/program_cache_unittest.cc
index e97f5ef5450..99773989eea 100644
--- a/chromium/gpu/command_buffer/service/program_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/program_cache_unittest.cc
@@ -59,8 +59,12 @@ class NoBackendProgramCache : public ProgramCache {
varyings,
buffer_mode,
sha);
+ const std::string a_shaString(a_sha, kHashLength);
+ const std::string b_shaString(b_sha, kHashLength);
const std::string shaString(sha, kHashLength);
+ CompiledShaderCacheSuccess(a_shaString);
+ CompiledShaderCacheSuccess(b_shaString);
LinkedProgramCacheSuccess(shaString);
}
@@ -84,8 +88,10 @@ class NoBackendProgramCache : public ProgramCache {
result);
}
- void Evict(const std::string& program_hash) {
- ProgramCache::Evict(program_hash);
+ void Evict(const std::string& program_hash,
+ const std::string& shader_0_hash,
+ const std::string& shader_1_hash) {
+ ProgramCache::Evict(program_hash, shader_0_hash, shader_1_hash);
}
size_t Trim(size_t limit) override { return 0; }
@@ -155,7 +161,9 @@ TEST_F(ProgramCacheTest, StatusEviction) {
char sha[ProgramCache::kHashLength];
cache_->ComputeProgramHash(a_sha, b_sha, nullptr, varyings_, GL_NONE, sha);
- cache_->Evict(std::string(sha, ProgramCache::kHashLength));
+ cache_->Evict(std::string(sha, ProgramCache::kHashLength),
+ std::string(a_sha, ProgramCache::kHashLength),
+ std::string(b_sha, ProgramCache::kHashLength));
EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
cache_->GetLinkedProgramStatus(shader1, shader2, nullptr, varyings_,
GL_NONE));
@@ -177,7 +185,9 @@ TEST_F(ProgramCacheTest, EvictionWithReusedShader) {
char sha[ProgramCache::kHashLength];
cache_->ComputeProgramHash(a_sha, b_sha, nullptr, varyings_, GL_NONE, sha);
- cache_->Evict(std::string(sha, ProgramCache::kHashLength));
+ cache_->Evict(std::string(sha, ProgramCache::kHashLength),
+ std::string(a_sha, ProgramCache::kHashLength),
+ std::string(b_sha, ProgramCache::kHashLength));
EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
cache_->GetLinkedProgramStatus(shader1, shader2, nullptr, varyings_,
GL_NONE));
@@ -186,7 +196,9 @@ TEST_F(ProgramCacheTest, EvictionWithReusedShader) {
GL_NONE));
cache_->ComputeProgramHash(a_sha, c_sha, nullptr, varyings_, GL_NONE, sha);
- cache_->Evict(std::string(sha, ProgramCache::kHashLength));
+ cache_->Evict(std::string(sha, ProgramCache::kHashLength),
+ std::string(a_sha, ProgramCache::kHashLength),
+ std::string(c_sha, ProgramCache::kHashLength));
EXPECT_EQ(ProgramCache::LINK_UNKNOWN,
cache_->GetLinkedProgramStatus(shader1, shader2, nullptr, varyings_,
GL_NONE));
@@ -227,5 +239,29 @@ TEST_F(ProgramCacheTest, LinkUnknownOnTransformFeedbackChange) {
GL_INTERLEAVED_ATTRIBS));
}
+TEST_F(ProgramCacheTest, ShaderCompileStatus) {
+ const std::string shader1 = "abcd1234";
+ const std::string shader2 = "abcda sda b1~#4 bbbbb1234";
+ {
+ std::string shader_a = shader1;
+ std::string shader_b = shader2;
+
+ EXPECT_EQ(cache_->HasSuccessfullyCompiledShader(shader1), false);
+ EXPECT_EQ(cache_->HasSuccessfullyCompiledShader(shader2), false);
+ cache_->SaySuccessfullyCached(shader_a, shader_b, nullptr, varyings_,
+ GL_NONE);
+
+ shader_a.clear();
+ shader_b.clear();
+ }
+ // make sure it was copied
+ EXPECT_EQ(ProgramCache::LINK_SUCCEEDED,
+ cache_->GetLinkedProgramStatus(shader1, shader2, nullptr, varyings_,
+ GL_NONE));
+
+ EXPECT_EQ(cache_->HasSuccessfullyCompiledShader(shader1), true);
+ EXPECT_EQ(cache_->HasSuccessfullyCompiledShader(shader2), true);
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/program_manager.cc b/chromium/gpu/command_buffer/service/program_manager.cc
index 12ba131a213..5d98b20cf2a 100644
--- a/chromium/gpu/command_buffer/service/program_manager.cc
+++ b/chromium/gpu/command_buffer/service/program_manager.cc
@@ -2702,6 +2702,14 @@ bool ProgramManager::IsOwned(Program* program) const {
return false;
}
+bool ProgramManager::HasCachedCompileStatus(Shader* shader) const {
+ if (program_cache_) {
+ return program_cache_->HasSuccessfullyCompiledShader(
+ shader->last_compiled_signature());
+ }
+ return false;
+}
+
void ProgramManager::RemoveProgramInfoIfUnused(
ShaderManager* shader_manager, Program* program) {
DCHECK(shader_manager);
diff --git a/chromium/gpu/command_buffer/service/program_manager.h b/chromium/gpu/command_buffer/service/program_manager.h
index f3452ea9f19..b4646ba080f 100644
--- a/chromium/gpu/command_buffer/service/program_manager.h
+++ b/chromium/gpu/command_buffer/service/program_manager.h
@@ -700,6 +700,9 @@ class GPU_GLES2_EXPORT ProgramManager {
// Check if a Program is owned by this ProgramManager.
bool IsOwned(Program* program) const;
+ // Return true if this shader has compiled status cached.
+ bool HasCachedCompileStatus(Shader* shader) const;
+
static int32_t MakeFakeLocation(int32_t index, int32_t element);
uint32_t max_varying_vectors() const { return max_varying_vectors_; }
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index 208fb4b5809..54659e675be 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -61,6 +61,7 @@
#include "gpu/vulkan/buildflags.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "third_party/skia/include/core/SkDeferredDisplayListRecorder.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/core/SkSurfaceProps.h"
#include "third_party/skia/include/core/SkTypeface.h"
@@ -69,6 +70,7 @@
#include "third_party/skia/include/gpu/GrContext.h"
#include "third_party/skia/include/gpu/GrTypes.h"
#include "ui/gfx/buffer_format_util.h"
+#include "ui/gfx/skia_util.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/gl_surface.h"
@@ -391,16 +393,13 @@ class RasterDecoderImpl final : public RasterDecoder,
if (gr_context())
gr_context()->flush();
api()->glFlushFn();
+
+ // Flushes can be expensive, yield to allow interruption after each flush.
+ ExitCommandProcessingEarly();
}
#endif
}
- bool IsRobustnessSupported() {
- return has_robustness_extension_ &&
- shared_context_state_->context()
- ->WasAllocatedUsingRobustnessExtension();
- }
-
const gl::GLVersionInfo& gl_version_info() {
return feature_info()->gl_version_info();
}
@@ -431,6 +430,30 @@ class RasterDecoderImpl final : public RasterDecoder,
GLsizei width,
GLsizei height,
const volatile GLbyte* mailboxes);
+ void DoCopySubTextureINTERNALGLPassthrough(GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ const Mailbox& source_mailbox,
+ const Mailbox& dest_mailbox);
+ void DoCopySubTextureINTERNALGL(GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ const Mailbox& source_mailbox,
+ const Mailbox& dest_mailbox);
+ void DoCopySubTextureINTERNALSkia(GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ const Mailbox& source_mailbox,
+ const Mailbox& dest_mailbox);
void DoLoseContextCHROMIUM(GLenum current, GLenum other) { NOTIMPLEMENTED(); }
void DoBeginRasterCHROMIUM(GLuint sk_color,
GLuint msaa_sample_count,
@@ -522,7 +545,6 @@ class RasterDecoderImpl final : public RasterDecoder,
bool use_passthrough_ = false;
bool use_ddl_ = false;
- bool has_robustness_extension_ = false;
bool reset_by_robustness_extension_ = false;
// The current decoder error communicates the decoder error through command
@@ -582,7 +604,7 @@ class RasterDecoderImpl final : public RasterDecoder,
gl::GLApi* api_ = nullptr;
- base::WeakPtrFactory<DecoderContext> weak_ptr_factory_;
+ base::WeakPtrFactory<DecoderContext> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(RasterDecoderImpl);
};
@@ -685,8 +707,7 @@ RasterDecoderImpl::RasterDecoderImpl(
memory_tracker),
gpu_decoder_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("gpu.decoder"))),
- font_manager_(base::MakeRefCounted<ServiceFontManager>(this)),
- weak_ptr_factory_(this) {
+ font_manager_(base::MakeRefCounted<ServiceFontManager>(this)) {
DCHECK(shared_context_state_);
}
@@ -735,10 +756,6 @@ ContextResult RasterDecoderImpl::Initialize(
query_manager_ = std::make_unique<QueryManager>();
- has_robustness_extension_ = features().arb_robustness ||
- features().khr_robustness ||
- features().ext_robustness;
-
if (attrib_helper.enable_oop_rasterization) {
if (!features().chromium_raster_transport) {
LOG(ERROR) << "ContextResult::kFatalFailure: "
@@ -864,6 +881,7 @@ gl::GLSurface* RasterDecoderImpl::GetGLSurface() {
}
Capabilities RasterDecoderImpl::GetCapabilities() {
+ // TODO(enne): reconcile this with gles2_cmd_decoder's capability settings.
Capabilities caps;
caps.gpu_rasterization = supports_gpu_raster_;
caps.supports_oop_raster = supports_oop_raster_;
@@ -908,6 +926,9 @@ Capabilities RasterDecoderImpl::GetCapabilities() {
feature_info()->workarounds().max_3d_array_texture_size);
}
caps.sync_query = feature_info()->feature_flags().chromium_sync_query;
+ caps.msaa_is_slow = feature_info()->workarounds().msaa_is_slow;
+ caps.avoid_stencil_buffers =
+ feature_info()->workarounds().avoid_stencil_buffers;
if (gr_context()) {
caps.context_supports_distance_field_text =
@@ -1075,39 +1096,33 @@ bool RasterDecoderImpl::CheckResetStatus() {
DCHECK(!WasContextLost());
DCHECK(shared_context_state_->context()->IsCurrent(nullptr));
- if (IsRobustnessSupported()) {
- // If the reason for the call was a GL error, we can try to determine the
- // reset status more accurately.
- GLenum driver_status = api()->glGetGraphicsResetStatusARBFn();
- if (driver_status == GL_NO_ERROR)
- return false;
+ // If the reason for the call was a GL error, we can try to determine the
+ // reset status more accurately.
+ GLenum driver_status =
+ shared_context_state_->context()->CheckStickyGraphicsResetStatus();
+ if (driver_status == GL_NO_ERROR)
+ return false;
- LOG(ERROR) << "RasterDecoder context lost via ARB/EXT_robustness. Reset "
- "status = "
- << gles2::GLES2Util::GetStringEnum(driver_status);
-
- // Don't pretend we know which client was responsible.
- if (workarounds().use_virtualized_gl_contexts)
- driver_status = GL_UNKNOWN_CONTEXT_RESET_ARB;
-
- switch (driver_status) {
- case GL_GUILTY_CONTEXT_RESET_ARB:
- MarkContextLost(error::kGuilty);
- break;
- case GL_INNOCENT_CONTEXT_RESET_ARB:
- MarkContextLost(error::kInnocent);
- break;
- case GL_UNKNOWN_CONTEXT_RESET_ARB:
- MarkContextLost(error::kUnknown);
- break;
- default:
- NOTREACHED();
- return false;
- }
- reset_by_robustness_extension_ = true;
- return true;
+ LOG(ERROR) << "RasterDecoder context lost via ARB/EXT_robustness. Reset "
+ "status = "
+ << gles2::GLES2Util::GetStringEnum(driver_status);
+
+ switch (driver_status) {
+ case GL_GUILTY_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kGuilty);
+ break;
+ case GL_INNOCENT_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kInnocent);
+ break;
+ case GL_UNKNOWN_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kUnknown);
+ break;
+ default:
+ NOTREACHED();
+ return false;
}
- return false;
+ reset_by_robustness_extension_ = true;
+ return true;
}
gles2::Logger* RasterDecoderImpl::GetLogger() {
@@ -1686,55 +1701,88 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
return;
}
- if (use_passthrough_) {
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- source_shared_image =
- shared_image_representation_factory_.ProduceGLTexturePassthrough(
- source_mailbox);
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- dest_shared_image =
- shared_image_representation_factory_.ProduceGLTexturePassthrough(
- dest_mailbox);
- if (!source_shared_image || !dest_shared_image) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "unknown mailbox");
- return;
- }
+ if (!shared_context_state_->GrContextIsGL()) {
+ // Use Skia to copy texture if raster's gr_context() is not using GL.
+ DoCopySubTextureINTERNALSkia(xoffset, yoffset, x, y, width, height,
+ source_mailbox, dest_mailbox);
+ } else if (use_passthrough_) {
+ DoCopySubTextureINTERNALGLPassthrough(xoffset, yoffset, x, y, width, height,
+ source_mailbox, dest_mailbox);
+ } else {
+ DoCopySubTextureINTERNALGL(xoffset, yoffset, x, y, width, height,
+ source_mailbox, dest_mailbox);
+ }
+}
- SharedImageRepresentationGLTexturePassthrough::ScopedAccess source_access(
- source_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
- if (!source_access.success()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "unable to access source for read");
- return;
- }
+void RasterDecoderImpl::DoCopySubTextureINTERNALGLPassthrough(
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ const Mailbox& source_mailbox,
+ const Mailbox& dest_mailbox) {
+ DCHECK(source_mailbox != dest_mailbox);
+ DCHECK(use_passthrough_);
+
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ source_shared_image =
+ shared_image_representation_factory_.ProduceGLTexturePassthrough(
+ source_mailbox);
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ dest_shared_image =
+ shared_image_representation_factory_.ProduceGLTexturePassthrough(
+ dest_mailbox);
+ if (!source_shared_image || !dest_shared_image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown mailbox");
+ return;
+ }
- SharedImageRepresentationGLTexturePassthrough::ScopedAccess dest_access(
- dest_shared_image.get(),
- GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
- if (!dest_access.success()) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
- "unable to access destination for write");
- return;
- }
+ SharedImageRepresentationGLTexturePassthrough::ScopedAccess source_access(
+ source_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ if (!source_access.success()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "unable to access source for read");
+ return;
+ }
- gles2::TexturePassthrough* source_texture =
- source_shared_image->GetTexturePassthrough().get();
- gles2::TexturePassthrough* dest_texture =
- dest_shared_image->GetTexturePassthrough().get();
- DCHECK(!source_texture->is_bind_pending());
- DCHECK_NE(source_texture->service_id(), dest_texture->service_id());
-
- api()->glCopySubTextureCHROMIUMFn(
- source_texture->service_id(), /*source_level=*/0,
- dest_texture->target(), dest_texture->service_id(),
- /*dest_level=*/0, xoffset, yoffset, x, y, width, height,
- /*unpack_flip_y=*/false, /*unpack_premultiply_alpha=*/false,
- /*unpack_unmultiply_alpha=*/false);
- LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopySubTexture");
+ SharedImageRepresentationGLTexturePassthrough::ScopedAccess dest_access(
+ dest_shared_image.get(), GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+ if (!dest_access.success()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "unable to access destination for write");
return;
}
+ gles2::TexturePassthrough* source_texture =
+ source_shared_image->GetTexturePassthrough().get();
+ gles2::TexturePassthrough* dest_texture =
+ dest_shared_image->GetTexturePassthrough().get();
+ DCHECK(!source_texture->is_bind_pending());
+ DCHECK_NE(source_texture->service_id(), dest_texture->service_id());
+
+ api()->glCopySubTextureCHROMIUMFn(
+ source_texture->service_id(), /*source_level=*/0, dest_texture->target(),
+ dest_texture->service_id(),
+ /*dest_level=*/0, xoffset, yoffset, x, y, width, height,
+ /*unpack_flip_y=*/false, /*unpack_premultiply_alpha=*/false,
+ /*unpack_unmultiply_alpha=*/false);
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glCopySubTexture");
+}
+
+void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ const Mailbox& source_mailbox,
+ const Mailbox& dest_mailbox) {
+ DCHECK(source_mailbox != dest_mailbox);
+ DCHECK(shared_context_state_->GrContextIsGL());
+
std::unique_ptr<SharedImageRepresentationGLTexture> source_shared_image =
shared_image_representation_factory_.ProduceGLTexture(source_mailbox);
std::unique_ptr<SharedImageRepresentationGLTexture> dest_shared_image =
@@ -1951,6 +1999,102 @@ void RasterDecoderImpl::DoCopySubTextureINTERNAL(
}
}
+void RasterDecoderImpl::DoCopySubTextureINTERNALSkia(
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ const Mailbox& source_mailbox,
+ const Mailbox& dest_mailbox) {
+ DCHECK(source_mailbox != dest_mailbox);
+
+ // Use Skia to copy texture if raster's gr_context() is not using GL.
+ auto source_shared_image = shared_image_representation_factory_.ProduceSkia(
+ source_mailbox, shared_context_state_);
+ auto dest_shared_image = shared_image_representation_factory_.ProduceSkia(
+ dest_mailbox, shared_context_state_);
+ if (!source_shared_image || !dest_shared_image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture", "unknown mailbox");
+ return;
+ }
+
+ gfx::Size source_size = source_shared_image->size();
+ gfx::Rect source_rect(x, y, width, height);
+ if (!gfx::Rect(source_size).Contains(source_rect)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "source texture bad dimensions.");
+ return;
+ }
+
+ gfx::Size dest_size = dest_shared_image->size();
+ gfx::Rect dest_rect(xoffset, yoffset, width, height);
+ if (!gfx::Rect(dest_size).Contains(dest_rect)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "destination texture bad dimensions.");
+ return;
+ }
+
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+
+ SharedImageRepresentationSkia::ScopedWriteAccess dest_scoped_access(
+ dest_shared_image.get(), &begin_semaphores, &end_semaphores);
+ if (!dest_scoped_access.success()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "Dest shared image is not writable");
+ return;
+ }
+
+ // With OneCopyRasterBufferProvider, source_shared_image->BeginReadAccess()
+ // will copy pixels from SHM GMB to the texture in |source_shared_image|,
+ // and then use drawImageRect() to draw that texure to the target
+ // |dest_shared_image|. We can save one copy by drawing the SHM GMB to the
+ // target |dest_shared_image| directly.
+ // TODO(penghuang): get rid of the one extra copy. https://crbug.com/984045
+ SharedImageRepresentationSkia::ScopedReadAccess source_scoped_access(
+ source_shared_image.get(), &begin_semaphores, &end_semaphores);
+
+ if (!begin_semaphores.empty()) {
+ bool result = dest_scoped_access.surface()->wait(begin_semaphores.size(),
+ begin_semaphores.data());
+ DCHECK(result);
+ }
+
+ if (!source_scoped_access.success()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTexture",
+ "Source shared image is not accessable");
+ } else {
+ auto color_type = viz::ResourceFormatToClosestSkColorType(
+ true /* gpu_compositing */, source_shared_image->format());
+ auto source_image = SkImage::MakeFromTexture(
+ shared_context_state_->gr_context(),
+ source_scoped_access.promise_image_texture()->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, color_type, kUnpremul_SkAlphaType,
+ nullptr /* colorSpace */);
+
+ auto* canvas = dest_scoped_access.surface()->getCanvas();
+ SkPaint paint;
+ paint.setBlendMode(SkBlendMode::kSrc);
+ canvas->drawImageRect(source_image, gfx::RectToSkRect(source_rect),
+ gfx::RectToSkRect(dest_rect), &paint);
+ }
+
+ // Always flush the surface even if source_scoped_access.success() is false,
+ // so the begin_semaphores can be released, and end_semaphores can be
+ // signalled.
+ GrFlushInfo flush_info = {
+ .fFlags = kNone_GrFlushFlags,
+ .fNumSemaphores = end_semaphores.size(),
+ .fSignalSemaphores = end_semaphores.data(),
+ };
+ gpu::AddVulkanCleanupTaskForSkiaFlush(
+ shared_context_state_->vk_context_provider(), &flush_info);
+ dest_scoped_access.surface()->flush(
+ SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
+}
+
namespace {
// Helper to read client data from transfer cache.
@@ -2035,7 +2179,7 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
// commands between BeginRaster and EndRaster will not flush).
FlushToWorkAroundMacCrashes();
- if (!gr_context()) {
+ if (!gr_context() || !supports_oop_raster_) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
"chromium_raster_transport not enabled via attribs");
return;
@@ -2241,16 +2385,9 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
};
AddVulkanCleanupTaskForSkiaFlush(
shared_context_state_->vk_context_provider(), &flush_info);
- if (use_ddl_) {
- // TODO(penghuang): Switch to sk_surface_->flush() when skia flush bug is
- // fixed. https://crbug.com/958055
- auto result = gr_context()->flush(flush_info);
- DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
- } else {
- auto result = sk_surface_->flush(
- SkSurface::BackendSurfaceAccess::kPresent, flush_info);
- DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
- }
+ auto result = sk_surface_->flush(SkSurface::BackendSurfaceAccess::kPresent,
+ flush_info);
+ DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
end_semaphores_.clear();
}
@@ -2274,9 +2411,9 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
locked_handles_.clear();
// We just flushed a tile's worth of GPU work from the SkSurface in
- // flush above. Use kDeferLaterCommands to ensure we yield to
- // the Scheduler before processing more commands.
- current_decoder_error_ = error::kDeferLaterCommands;
+ // flush above. Yield to the Scheduler to allow pre-emption before
+ // processing more commands.
+ ExitCommandProcessingEarly();
}
void RasterDecoderImpl::DoCreateTransferCacheEntryINTERNAL(
@@ -2340,6 +2477,12 @@ void RasterDecoderImpl::DoCreateTransferCacheEntryINTERNAL(
"Failure to deserialize transfer cache entry.");
return;
}
+
+ // The only entry using the GrContext are image transfer cache entries for
+ // image uploads. Since this tends to a slow operation, yield to allow the
+ // decoder to be pre-empted.
+ if (context_for_entry)
+ ExitCommandProcessingEarly();
}
void RasterDecoderImpl::DoUnlockTransferCacheEntryINTERNAL(
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
index e6caf57d827..06bb255c190 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
@@ -257,13 +257,6 @@ TEST_P(RasterDecoderManualInitTest, CopyTexSubImage2DValidateColorFormat) {
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
-TEST_P(RasterDecoderTest, YieldAfterEndRasterCHROMIUM) {
- GetDecoder()->SetUpForRasterCHROMIUMForTest();
- cmds::EndRasterCHROMIUM end_raster_cmd;
- end_raster_cmd.Init();
- EXPECT_EQ(error::kDeferLaterCommands, ExecuteCmd(end_raster_cmd));
-}
-
class RasterDecoderOOPTest : public testing::Test, DecoderClient {
public:
void SetUp() override {
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h
deleted file mode 100644
index 200cbb57aba..00000000000
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is auto-generated from
-// gpu/command_buffer/build_raster_cmd_buffer.py
-// It's formatted by clang-format using chromium coding style:
-// clang-format -i -style=chromium filename
-// DO NOT EDIT!
-
-// It is included by raster_cmd_decoder_unittest_base.cc
-#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_0_AUTOGEN_H_
-#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_0_AUTOGEN_H_
-
-void RasterDecoderTestBase::SetupInitCapabilitiesExpectations(
- bool es3_capable) {
- ExpectEnableDisable(GL_BLEND, false);
- ExpectEnableDisable(GL_CULL_FACE, false);
- ExpectEnableDisable(GL_DEPTH_TEST, false);
- ExpectEnableDisable(GL_DITHER, true);
- ExpectEnableDisable(GL_POLYGON_OFFSET_FILL, false);
- ExpectEnableDisable(GL_SAMPLE_ALPHA_TO_COVERAGE, false);
- ExpectEnableDisable(GL_SAMPLE_COVERAGE, false);
- ExpectEnableDisable(GL_SCISSOR_TEST, false);
- ExpectEnableDisable(GL_STENCIL_TEST, false);
- if (feature_info()->feature_flags().ext_multisample_compatibility) {
- ExpectEnableDisable(GL_MULTISAMPLE_EXT, true);
- }
- if (feature_info()->feature_flags().ext_multisample_compatibility) {
- ExpectEnableDisable(GL_SAMPLE_ALPHA_TO_ONE_EXT, false);
- }
- if (es3_capable) {
- ExpectEnableDisable(GL_RASTERIZER_DISCARD, false);
- ExpectEnableDisable(GL_PRIMITIVE_RESTART_FIXED_INDEX, false);
- }
-}
-
-void RasterDecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
- auto* feature_info_ = feature_info();
- EXPECT_CALL(*gl_, BlendColor(0.0f, 0.0f, 0.0f, 0.0f))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, BlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, BlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, ClearColor(0.0f, 0.0f, 0.0f, 0.0f))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, ClearDepth(1.0f)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, ClearStencil(0)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, ColorMask(true, true, true, true))
- .Times(1)
- .RetiresOnSaturation();
- if (feature_info()->feature_flags().chromium_framebuffer_mixed_samples) {
- EXPECT_CALL(*gl_, CoverageModulationNV(GL_NONE))
- .Times(1)
- .RetiresOnSaturation();
- }
- EXPECT_CALL(*gl_, CullFace(GL_BACK)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, DepthFunc(GL_LESS)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, DepthMask(true)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, DepthRange(0.0f, 1.0f)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, FrontFace(GL_CCW)).Times(1).RetiresOnSaturation();
- if (!feature_info_->gl_version_info().is_desktop_core_profile) {
- EXPECT_CALL(*gl_, Hint(GL_GENERATE_MIPMAP_HINT, GL_DONT_CARE))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info_->feature_flags().oes_standard_derivatives) {
- EXPECT_CALL(*gl_,
- Hint(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES, GL_DONT_CARE))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info_->feature_flags().chromium_texture_filtering_hint) {
- EXPECT_CALL(*gl_, Hint(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST))
- .Times(1)
- .RetiresOnSaturation();
- }
- SetupInitStateManualExpectationsForDoLineWidth(1.0f);
- if (feature_info_->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl_, MatrixLoadfEXT(GL_PATH_MODELVIEW_CHROMIUM, _))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info_->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl_, MatrixLoadfEXT(GL_PATH_PROJECTION_CHROMIUM, _))
- .Times(1)
- .RetiresOnSaturation();
- }
- if (feature_info()->feature_flags().chromium_path_rendering) {
- EXPECT_CALL(*gl_, PathStencilFuncNV(GL_ALWAYS, 0, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- }
- EXPECT_CALL(*gl_, PixelStorei(GL_PACK_ALIGNMENT, 4))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_ALIGNMENT, 4))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, PolygonOffset(0.0f, 0.0f)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_, SampleCoverage(1.0f, false)).Times(1).RetiresOnSaturation();
- EXPECT_CALL(*gl_,
- Scissor(kViewportX, kViewportY, kViewportWidth, kViewportHeight))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilFuncSeparate(GL_FRONT, GL_ALWAYS, 0, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilFuncSeparate(GL_BACK, GL_ALWAYS, 0, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilMaskSeparate(GL_FRONT, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilMaskSeparate(GL_BACK, 0xFFFFFFFFU))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_KEEP))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, StencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_KEEP))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_,
- Viewport(kViewportX, kViewportY, kViewportWidth, kViewportHeight))
- .Times(1)
- .RetiresOnSaturation();
- SetupInitStateManualExpectations(es3_capable);
-}
-#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_0_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
index 323c8516286..86f850909b1 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
@@ -22,6 +22,7 @@
#include "gpu/command_buffer/common/raster_cmd_format.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state_test_helpers.h"
#include "gpu/command_buffer/service/copy_texture_chromium_mock.h"
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/logger.h"
@@ -113,38 +114,6 @@ void RasterDecoderTestBase::AddExpectationsForRestoreAttribState(
}
}
-void RasterDecoderTestBase::SetupInitStateManualExpectations(bool es3_capable) {
- if (es3_capable) {
- EXPECT_CALL(*gl_, PixelStorei(GL_PACK_ROW_LENGTH, 0))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_ROW_LENGTH, 0))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0))
- .Times(1)
- .RetiresOnSaturation();
- if (feature_info()->feature_flags().ext_window_rectangles) {
- EXPECT_CALL(*gl_, WindowRectanglesEXT(GL_EXCLUSIVE_EXT, 0, nullptr))
- .Times(1)
- .RetiresOnSaturation();
- }
- }
-}
-
-void RasterDecoderTestBase::SetupInitStateManualExpectationsForDoLineWidth(
- GLfloat width) {
- EXPECT_CALL(*gl_, LineWidth(width)).Times(1).RetiresOnSaturation();
-}
-
-void RasterDecoderTestBase::ExpectEnableDisable(GLenum cap, bool enable) {
- if (enable) {
- EXPECT_CALL(*gl_, Enable(cap)).Times(1).RetiresOnSaturation();
- } else {
- EXPECT_CALL(*gl_, Disable(cap)).Times(1).RetiresOnSaturation();
- }
-}
-
gpu::Mailbox RasterDecoderTestBase::CreateFakeTexture(
GLuint service_id,
viz::ResourceFormat resource_format,
@@ -203,8 +172,13 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
EXPECT_CALL(*gl_, GetIntegerv(GL_MAX_VERTEX_ATTRIBS, _))
.WillOnce(SetArgPointee<1>(8u))
.RetiresOnSaturation();
- SetupInitCapabilitiesExpectations(feature_info()->IsES3Capable());
- SetupInitStateExpectations(feature_info()->IsES3Capable());
+ ContextStateTestHelpers::SetupInitState(gl_.get(), feature_info(),
+ gfx::Size(1, 1));
+
+ if (context_->HasRobustness()) {
+ EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
+ .WillOnce(Return(GL_NO_ERROR));
+ }
shared_context_state_ = base::MakeRefCounted<SharedContextState>(
new gl::GLShareGroup(), surface_, context_,
@@ -236,7 +210,7 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
gpu::ContextResult::kSuccess);
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
- if (context_->WasAllocatedUsingRobustnessExtension()) {
+ if (context_->HasRobustness()) {
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(GL_NO_ERROR));
}
@@ -400,21 +374,11 @@ void RasterDecoderTestBase::SetupClearTextureExpectations(
#endif
}
-// Include the auto-generated part of this file. We split this because it means
-// we can easily edit the non-auto generated parts right here in this file
-// instead of having to edit some template or the code generator.
-#include "gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h"
-
// GCC requires these declarations, but MSVC requires they not be present
#ifndef COMPILER_MSVC
const GLint RasterDecoderTestBase::kMaxTextureSize;
const GLint RasterDecoderTestBase::kNumTextureUnits;
-const GLint RasterDecoderTestBase::kViewportX;
-const GLint RasterDecoderTestBase::kViewportY;
-const GLint RasterDecoderTestBase::kViewportWidth;
-const GLint RasterDecoderTestBase::kViewportHeight;
-
const GLuint RasterDecoderTestBase::kServiceBufferId;
const GLuint RasterDecoderTestBase::kServiceTextureId;
const GLuint RasterDecoderTestBase::kServiceVertexArrayId;
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
index 9e72426645b..042994e346e 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
@@ -14,7 +14,7 @@
#include <string>
#include <vector>
-#include "base/message_loop/message_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/common/raster_cmd_format.h"
@@ -153,12 +153,6 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
typedef gles2::TestHelper::AttribInfo AttribInfo;
typedef gles2::TestHelper::UniformInfo UniformInfo;
- void SetupInitCapabilitiesExpectations(bool es3_capable);
- void SetupInitStateExpectations(bool es3_capable);
- void SetupInitStateManualExpectations(bool es3_capable);
- void SetupInitStateManualExpectationsForDoLineWidth(GLfloat width);
- void ExpectEnableDisable(GLenum cap, bool enable);
-
gpu::Mailbox CreateFakeTexture(GLuint service_id,
viz::ResourceFormat resource_format,
GLsizei width,
@@ -198,11 +192,6 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
static const GLint kNumTextureUnits = 8;
static const GLint kNumVertexAttribs = 16;
- static const GLint kViewportX = 0;
- static const GLint kViewportY = 0;
- static const GLint kViewportWidth = 1;
- static const GLint kViewportHeight = 1;
-
static const GLuint kServiceBufferId = 301;
static const GLuint kServiceTextureId = 304;
static const GLuint kServiceVertexArrayId = 310;
@@ -246,7 +235,7 @@ class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
MemoryTypeTracker memory_tracker_;
std::vector<std::unique_ptr<SharedImageRepresentationFactoryRef>>
shared_images_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
gles2::MockCopyTextureResourceManager* copy_texture_manager_; // not owned
GLuint next_fake_texture_client_id_ = 271828;
};
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
index 2574c192163..054a41e19a9 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
@@ -33,7 +33,7 @@ class RasterDecoderOOMTest : public RasterDecoderManualInitTest {
void OOM(GLenum reset_status,
error::ContextLostReason expected_other_reason) {
- if (context_->WasAllocatedUsingRobustnessExtension()) {
+ if (context_->HasRobustness()) {
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(reset_status));
}
diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc
index c0ff111b12d..7c52490bff7 100644
--- a/chromium/gpu/command_buffer/service/scheduler.cc
+++ b/chromium/gpu/command_buffer/service/scheduler.cc
@@ -291,8 +291,7 @@ void Scheduler::Sequence::RemoveClientWait(CommandBufferId command_buffer_id) {
Scheduler::Scheduler(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
SyncPointManager* sync_point_manager)
: task_runner_(std::move(task_runner)),
- sync_point_manager_(sync_point_manager),
- weak_factory_(this) {
+ sync_point_manager_(sync_point_manager) {
DCHECK(thread_checker_.CalledOnValidThread());
// Store weak ptr separately because calling GetWeakPtr() is not thread safe.
weak_ptr_ = weak_factory_.GetWeakPtr();
@@ -303,7 +302,6 @@ Scheduler::~Scheduler() {
}
SequenceId Scheduler::CreateSequence(SchedulingPriority priority) {
- DCHECK(thread_checker_.CalledOnValidThread());
base::AutoLock auto_lock(lock_);
scoped_refptr<SyncPointOrderData> order_data =
sync_point_manager_->CreateSyncPointOrderData();
@@ -315,7 +313,6 @@ SequenceId Scheduler::CreateSequence(SchedulingPriority priority) {
}
void Scheduler::DestroySequence(SequenceId sequence_id) {
- DCHECK(thread_checker_.CalledOnValidThread());
base::AutoLock auto_lock(lock_);
Sequence* sequence = GetSequence(sequence_id);
diff --git a/chromium/gpu/command_buffer/service/scheduler.h b/chromium/gpu/command_buffer/service/scheduler.h
index 41ac5cfca3d..b9ed75c3f04 100644
--- a/chromium/gpu/command_buffer/service/scheduler.h
+++ b/chromium/gpu/command_buffer/service/scheduler.h
@@ -56,9 +56,11 @@ class GPU_EXPORT Scheduler {
// Create a sequence with given priority. Returns an identifier for the
// sequence that can be used with SyncPonintManager for creating sync point
// release clients. Sequences start off as enabled (see |EnableSequence|).
+ // Sequence could be created outside of GPU thread.
SequenceId CreateSequence(SchedulingPriority priority);
- // Destroy the sequence and run any scheduled tasks immediately.
+ // Destroy the sequence and run any scheduled tasks immediately. Sequence
+ // could be destroyed outside of GPU thread.
void DestroySequence(SequenceId sequence_id);
// Enables the sequence so that its tasks may be scheduled.
@@ -335,7 +337,7 @@ class GPU_EXPORT Scheduler {
// Invalidated on main thread.
base::WeakPtr<Scheduler> weak_ptr_;
- base::WeakPtrFactory<Scheduler> weak_factory_;
+ base::WeakPtrFactory<Scheduler> weak_factory_{this};
private:
FRIEND_TEST_ALL_PREFIXES(SchedulerTest, StreamPriorities);
diff --git a/chromium/gpu/command_buffer/service/service_font_manager.cc b/chromium/gpu/command_buffer/service/service_font_manager.cc
index 3feef451a3f..f025d0ce966 100644
--- a/chromium/gpu/command_buffer/service/service_font_manager.cc
+++ b/chromium/gpu/command_buffer/service/service_font_manager.cc
@@ -4,9 +4,13 @@
#include "gpu/command_buffer/service/service_font_manager.h"
+#include <inttypes.h>
+
#include "base/debug/dump_without_crashing.h"
#include "base/metrics/histogram_macros.h"
#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "components/crash/core/common/crash_key.h"
#include "gpu/command_buffer/common/buffer.h"
#include "gpu/command_buffer/common/discardable_handle.h"
@@ -86,6 +90,8 @@ class ServiceFontManager::SkiaDiscardableManager
: font_manager_(std::move(font_manager)) {}
~SkiaDiscardableManager() override = default;
+ static constexpr int kMaxDumps = 5;
+
bool deleteHandle(SkDiscardableHandleId handle_id) override {
if (!font_manager_)
return true;
@@ -104,13 +110,29 @@ class ServiceFontManager::SkiaDiscardableManager
type == SkStrikeClient::kGlyphPath ||
type == SkStrikeClient::kGlyphImage);
- constexpr int kMaxDumps = 5;
if (no_fallback && dump_count_ < kMaxDumps && base::RandInt(1, 100) == 1) {
++dump_count_;
base::debug::DumpWithoutCrashing();
}
}
+ void notifyReadFailure(
+ const DiscardableHandleManager::ReadFailureData& data) override {
+ if (dump_count_ >= kMaxDumps)
+ return;
+
+ std::string str = base::StringPrintf(
+ "ms: %zd, br: %zd, ts: %" PRIu64 ", sc: %" PRIu64 ", gic: %" PRIu64
+ ", gpc: %" PRIu64,
+ data.memorySize, data.bytesRead, data.typefaceSize, data.strikeCount,
+ data.glyphImagesCount, data.glyphPathsCount);
+ static crash_reporter::CrashKeyString<128> crash_key("oop_read_failure");
+ crash_reporter::ScopedCrashKeyString auto_clear(&crash_key, str);
+
+ ++dump_count_;
+ base::debug::DumpWithoutCrashing();
+ }
+
private:
int dump_count_ = 0;
scoped_refptr<ServiceFontManager> font_manager_;
diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.cc b/chromium/gpu/command_buffer/service/service_transfer_cache.cc
index aaf3d0fd5a1..83ad1fe9e72 100644
--- a/chromium/gpu/command_buffer/service/service_transfer_cache.cc
+++ b/chromium/gpu/command_buffer/service/service_transfer_cache.cc
@@ -16,6 +16,7 @@
#include "cc/paint/image_transfer_cache_entry.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "third_party/skia/include/core/SkImage.h"
+#include "third_party/skia/include/core/SkYUVAIndex.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gl/trace_util.h"
@@ -26,6 +27,9 @@ namespace {
// unbounded handle growth with tiny entries.
static size_t kMaxCacheEntries = 2000;
+// Alias the image entry to its skia counterpart, taking ownership of the
+// memory and preventing double counting.
+//
// TODO(ericrk): Move this into ServiceImageTransferCacheEntry - here for now
// due to ui/gl dependency.
void DumpMemoryForImageTransferCacheEntry(
@@ -33,18 +37,12 @@ void DumpMemoryForImageTransferCacheEntry(
const std::string& dump_name,
const cc::ServiceImageTransferCacheEntry* entry) {
using base::trace_event::MemoryAllocatorDump;
+ DCHECK(entry->image());
MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, entry->CachedSize());
- // Alias the image entry to its skia counterpart, taking ownership of the
- // memory and preventing double counting.
- //
- // TODO(andrescj): if entry->image() is backed by multiple textures,
- // getBackendTexture() would end up flattening them which is undesirable:
- // figure out how to report memory usage for those cases.
- DCHECK(entry->image());
GrBackendTexture image_backend_texture =
entry->image()->getBackendTexture(false /* flushPendingGrContextIO */);
GrGLTextureInfo info;
@@ -58,6 +56,57 @@ void DumpMemoryForImageTransferCacheEntry(
}
}
+// Alias each texture of the YUV image entry to its Skia texture counterpart,
+// taking ownership of the memory and preventing double counting.
+//
+// Because hardware-decoded images do not have knowledge of the individual plane
+// sizes, we allow |plane_sizes| to be empty and report the aggregate size for
+// plane_0 and give plane_1 and plane_2 size 0.
+//
+// TODO(ericrk): Move this into ServiceImageTransferCacheEntry - here for now
+// due to ui/gl dependency.
+void DumpMemoryForYUVImageTransferCacheEntry(
+ base::trace_event::ProcessMemoryDump* pmd,
+ const std::string& dump_base_name,
+ const cc::ServiceImageTransferCacheEntry* entry) {
+ using base::trace_event::MemoryAllocatorDump;
+ DCHECK(entry->image());
+ DCHECK(entry->is_yuv());
+
+ std::vector<size_t> plane_sizes = entry->GetPlaneCachedSizes();
+ for (size_t i = 0u; i < entry->num_planes(); ++i) {
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(
+ dump_base_name +
+ base::StringPrintf("/plane_%0u", base::checked_cast<uint32_t>(i)));
+ if (plane_sizes.empty()) {
+ // Hardware-decoded image case.
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes,
+ (i == SkYUVAIndex::kY_Index) ? entry->CachedSize() : 0u);
+ } else {
+ DCHECK_EQ(plane_sizes.size(), entry->num_planes());
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, plane_sizes.at(i));
+ }
+
+ // If entry->image() is backed by multiple textures,
+ // getBackendTexture() would end up flattening them to RGB, which is
+ // undesirable.
+ GrBackendTexture image_backend_texture =
+ entry->GetPlaneImage(i)->getBackendTexture(
+ false /* flushPendingGrContextIO */);
+ GrGLTextureInfo info;
+ if (image_backend_texture.getGLTextureInfo(&info)) {
+ auto guid = gl::GetGLTextureRasterGUIDForTracing(info.fID);
+ pmd->CreateSharedGlobalAllocatorDump(guid);
+ // Importance of 3 gives this dump priority over the dump made by Skia
+ // (importance 2), attributing memory here.
+ const int kImportance = 3;
+ pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
+ }
+ }
+}
+
} // namespace
ServiceTransferCache::CacheEntryInternal::CacheEntryInternal(
@@ -217,9 +266,9 @@ bool ServiceTransferCache::CreateLockedHardwareDecodedImageEntry(
ServiceDiscardableHandle handle,
GrContext* context,
std::vector<sk_sp<SkImage>> plane_images,
+ cc::YUVDecodeFormat plane_images_format,
size_t buffer_byte_size,
- bool needs_mips,
- sk_sp<SkColorSpace> target_color_space) {
+ bool needs_mips) {
EntryKey key(decoder_id, cc::TransferCacheEntryType::kImage, entry_id);
auto found = entries_.Peek(key);
if (found != entries_.end())
@@ -228,8 +277,8 @@ bool ServiceTransferCache::CreateLockedHardwareDecodedImageEntry(
// Create the service-side image transfer cache entry.
auto entry = std::make_unique<cc::ServiceImageTransferCacheEntry>();
if (!entry->BuildFromHardwareDecodedImage(context, std::move(plane_images),
- buffer_byte_size, needs_mips,
- std::move(target_color_space))) {
+ plane_images_format,
+ buffer_byte_size, needs_mips)) {
return false;
}
@@ -269,11 +318,16 @@ bool ServiceTransferCache::OnMemoryDump(
}
if (image_entry && image_entry->fits_on_gpu()) {
- std::string dump_name = base::StringPrintf(
+ std::string dump_base_name = base::StringPrintf(
"gpu/transfer_cache/cache_0x%" PRIXPTR "/gpu/entry_0x%" PRIXPTR,
reinterpret_cast<uintptr_t>(this),
reinterpret_cast<uintptr_t>(entry));
- DumpMemoryForImageTransferCacheEntry(pmd, dump_name, image_entry);
+ if (image_entry->is_yuv()) {
+ DumpMemoryForYUVImageTransferCacheEntry(pmd, dump_base_name,
+ image_entry);
+ } else {
+ DumpMemoryForImageTransferCacheEntry(pmd, dump_base_name, image_entry);
+ }
} else {
std::string dump_name = base::StringPrintf(
"gpu/transfer_cache/cache_0x%" PRIXPTR "/cpu/entry_0x%" PRIXPTR,
diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.h b/chromium/gpu/command_buffer/service/service_transfer_cache.h
index 54ab52f847d..6afe8293b1b 100644
--- a/chromium/gpu/command_buffer/service/service_transfer_cache.h
+++ b/chromium/gpu/command_buffer/service/service_transfer_cache.h
@@ -14,6 +14,7 @@
#include "base/containers/mru_cache.h"
#include "base/containers/span.h"
#include "base/memory/memory_pressure_listener.h"
+#include "cc/paint/image_transfer_cache_entry.h"
#include "cc/paint/transfer_cache_entry.h"
#include "gpu/command_buffer/common/discardable_handle.h"
#include "gpu/command_buffer/service/context_group.h"
@@ -21,7 +22,6 @@
#include "third_party/skia/include/core/SkRefCnt.h"
class GrContext;
-class SkColorSpace;
class SkImage;
namespace gpu {
@@ -73,9 +73,9 @@ class GPU_GLES2_EXPORT ServiceTransferCache
ServiceDiscardableHandle handle,
GrContext* context,
std::vector<sk_sp<SkImage>> plane_images,
+ cc::YUVDecodeFormat plane_images_format,
size_t buffer_byte_size,
- bool needs_mips,
- sk_sp<SkColorSpace> target_color_space);
+ bool needs_mips);
void PurgeMemory(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level);
diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc
index ca291cdb445..a1ed819c1b9 100644
--- a/chromium/gpu/command_buffer/service/service_utils.cc
+++ b/chromium/gpu/command_buffer/service/service_utils.cc
@@ -11,8 +11,8 @@
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/gpu_switches.h"
-#include "gpu/config/gpu_finch_features.h"
#include "ui/gl/gl_switches.h"
+#include "ui/gl/gl_utils.h"
#if defined(USE_EGL)
#include "ui/gl/gl_surface_egl.h"
@@ -86,20 +86,7 @@ gl::GLContextAttribs GenerateGLContextAttribs(
}
bool UsePassthroughCommandDecoder(const base::CommandLine* command_line) {
- std::string switch_value;
- if (command_line->HasSwitch(switches::kUseCmdDecoder)) {
- switch_value = command_line->GetSwitchValueASCII(switches::kUseCmdDecoder);
- }
-
- if (switch_value == kCmdDecoderPassthroughName) {
- return true;
- } else if (switch_value == kCmdDecoderValidatingName) {
- return false;
- } else {
- // Unrecognized or missing switch, use the default.
- return base::FeatureList::IsEnabled(
- features::kDefaultPassthroughCommandDecoder);
- }
+ return gl::UsePassthroughCommandDecoder(command_line);
}
bool PassthroughCommandDecoderSupported() {
@@ -167,8 +154,18 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) {
command_line->HasSwitch(switches::kIgnoreGpuBlacklist);
gpu_preferences.enable_webgpu =
command_line->HasSwitch(switches::kEnableUnsafeWebGPU);
- gpu_preferences.enable_vulkan =
- command_line->HasSwitch(switches::kEnableVulkan);
+ if (command_line->HasSwitch(switches::kUseVulkan)) {
+ auto value = command_line->GetSwitchValueASCII(switches::kUseVulkan);
+ if (value.empty() || value == switches::kVulkanImplementationNameNative) {
+ gpu_preferences.use_vulkan = VulkanImplementationName::kNative;
+ } else if (value == switches::kVulkanImplementationNameSwiftshader) {
+ gpu_preferences.use_vulkan = VulkanImplementationName::kSwiftshader;
+ } else {
+ gpu_preferences.use_vulkan = VulkanImplementationName::kNone;
+ }
+ } else {
+ gpu_preferences.use_vulkan = VulkanImplementationName::kNone;
+ }
gpu_preferences.disable_vulkan_surface =
command_line->HasSwitch(switches::kDisableVulkanSurface);
return gpu_preferences;
diff --git a/chromium/gpu/command_buffer/service/shader_manager.h b/chromium/gpu/command_buffer/service/shader_manager.h
index 251311a693a..deb4a491486 100644
--- a/chromium/gpu/command_buffer/service/shader_manager.h
+++ b/chromium/gpu/command_buffer/service/shader_manager.h
@@ -58,6 +58,7 @@ class GPU_GLES2_EXPORT Shader : public base::RefCounted<Shader> {
// Returns true if we are ready to call DoCompile. If we have not yet called
// RequestCompile or if we've already compiled, returns false.
bool CanCompile() { return shader_state_ == kShaderStateCompileRequested; }
+ bool HasCompiled() { return shader_state_ == kShaderStateCompiled; }
void DoCompile();
void RefreshTranslatedShaderSource();
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc
index 2010eb46707..792fa50d984 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/shared_context_state.cc
@@ -17,6 +17,7 @@
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_share_group.h"
#include "ui/gl/gl_surface.h"
+#include "ui/gl/gl_version_info.h"
#include "ui/gl/init/create_gr_gl_interface.h"
#if BUILDFLAG(ENABLE_VULKAN)
@@ -48,8 +49,9 @@ SharedContextState::SharedContextState(
share_group_(std::move(share_group)),
context_(context),
real_context_(std::move(context)),
- surface_(std::move(surface)),
- weak_ptr_factory_(this) {
+ surface_(std::move(surface)) {
+ raster::DetermineGrCacheLimitsFromAvailableMemory(
+ &max_resource_cache_bytes_, &glyph_cache_max_texture_bytes_);
if (GrContextIsVulkan()) {
#if BUILDFLAG(ENABLE_VULKAN)
gr_context_ = vk_context_provider_->GetGrContext();
@@ -142,23 +144,24 @@ void SharedContextState::InitializeGrContext(
options.fDriverBugWorkarounds =
GrDriverBugWorkarounds(workarounds.ToIntSet());
options.fDisableCoverageCountingPaths = true;
- size_t max_resource_cache_bytes = 0u;
- raster::DetermineGrCacheLimitsFromAvailableMemory(
- &max_resource_cache_bytes, &glyph_cache_max_texture_bytes_);
options.fGlyphCacheTextureMaximumBytes = glyph_cache_max_texture_bytes_;
options.fPersistentCache = cache;
options.fAvoidStencilBuffers = workarounds.avoid_stencil_buffers;
options.fDisallowGLSLBinaryCaching = workarounds.disable_program_disk_cache;
+ // TODO(csmartdalton): enable internal multisampling after the related Skia
+ // rolls are in.
+ options.fInternalMultisampleCount = 0;
owned_gr_context_ = GrContext::MakeGL(std::move(interface), options);
gr_context_ = owned_gr_context_.get();
- if (!gr_context_) {
- LOG(ERROR) << "OOP raster support disabled: GrContext creation "
- "failed.";
- } else {
- constexpr int kMaxGaneshResourceCacheCount = 16384;
- gr_context_->setResourceCacheLimits(kMaxGaneshResourceCacheCount,
- max_resource_cache_bytes);
- }
+ }
+
+ if (!gr_context_) {
+ LOG(ERROR) << "OOP raster support disabled: GrContext creation "
+ "failed.";
+ } else {
+ constexpr int kMaxGaneshResourceCacheCount = 16384;
+ gr_context_->setResourceCacheLimits(kMaxGaneshResourceCacheCount,
+ max_resource_cache_bytes_);
}
transfer_cache_ = std::make_unique<ServiceTransferCache>();
}
@@ -211,6 +214,18 @@ bool SharedContextState::InitializeGL(
context_state_->InitCapabilities(nullptr);
context_state_->InitState(nullptr);
+ GLenum driver_status = real_context_->CheckStickyGraphicsResetStatus();
+ if (driver_status != GL_NO_ERROR) {
+ // If the context was lost at any point before or during initialization,
+ // the values queried from the driver could be bogus, and potentially
+ // inconsistent between various ContextStates on the same underlying real
+ // GL context. Make sure to report the failure early, to not allow
+ // virtualized context switches in that case.
+ feature_info_ = nullptr;
+ context_state_ = nullptr;
+ return false;
+ }
+
if (use_virtualized_gl_contexts_) {
auto virtual_context = base::MakeRefCounted<GLContextVirtual>(
share_group_.get(), real_context_.get(),
@@ -223,11 +238,20 @@ bool SharedContextState::InitializeGL(
context_ = std::move(virtual_context);
MakeCurrent(nullptr);
}
+
+ // Swiftshader GL and Vulkan report supporting external objects extensions,
+ // but they don't.
+ support_vulkan_external_object_ =
+ !gl::g_current_gl_version->is_swiftshader &&
+ gpu_preferences.use_vulkan == gpu::VulkanImplementationName::kNative &&
+ gl::g_current_gl_driver->ext.b_GL_EXT_memory_object_fd &&
+ gl::g_current_gl_driver->ext.b_GL_EXT_semaphore_fd;
+
return true;
}
-bool SharedContextState::MakeCurrent(gl::GLSurface* surface) {
- if (!GrContextIsGL())
+bool SharedContextState::MakeCurrent(gl::GLSurface* surface, bool needs_gl) {
+ if (!GrContextIsGL() && !needs_gl)
return true;
if (context_lost_)
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.h b/chromium/gpu/command_buffer/service/shared_context_state.h
index 5c57a6232ee..ca131252d71 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.h
+++ b/chromium/gpu/command_buffer/service/shared_context_state.h
@@ -72,7 +72,7 @@ class GPU_GLES2_EXPORT SharedContextState
scoped_refptr<gles2::FeatureInfo> feature_info);
bool IsGLInitialized() const { return !!feature_info_; }
- bool MakeCurrent(gl::GLSurface* surface);
+ bool MakeCurrent(gl::GLSurface* surface, bool needs_gl = false);
void MarkContextLost();
bool IsCurrent(gl::GLSurface* surface);
@@ -104,12 +104,16 @@ class GPU_GLES2_EXPORT SharedContextState
std::vector<uint8_t>* scratch_deserialization_buffer() {
return &scratch_deserialization_buffer_;
}
+ size_t max_resource_cache_bytes() const { return max_resource_cache_bytes_; }
size_t glyph_cache_max_texture_bytes() const {
return glyph_cache_max_texture_bytes_;
}
bool use_virtualized_gl_contexts() const {
return use_virtualized_gl_contexts_;
}
+ bool support_vulkan_external_object() const {
+ return support_vulkan_external_object_;
+ }
// base::trace_event::MemoryDumpProvider implementation.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
@@ -152,6 +156,7 @@ class GPU_GLES2_EXPORT SharedContextState
QueryManager* GetQueryManager() override;
bool use_virtualized_gl_contexts_ = false;
+ bool support_vulkan_external_object_ = false;
base::OnceClosure context_lost_callback_;
viz::VulkanContextProvider* const vk_context_provider_;
viz::MetalContextProvider* const metal_context_provider_;
@@ -169,6 +174,7 @@ class GPU_GLES2_EXPORT SharedContextState
gl::ProgressReporter* progress_reporter_ = nullptr;
sk_sp<GrContext> owned_gr_context_;
std::unique_ptr<ServiceTransferCache> transfer_cache_;
+ size_t max_resource_cache_bytes_ = 0u;
size_t glyph_cache_max_texture_bytes_ = 0u;
std::vector<uint8_t> scratch_deserialization_buffer_;
@@ -179,7 +185,7 @@ class GPU_GLES2_EXPORT SharedContextState
bool context_lost_ = false;
base::ObserverList<ContextLostObserver>::Unchecked context_lost_observers_;
- base::WeakPtrFactory<SharedContextState> weak_ptr_factory_;
+ base::WeakPtrFactory<SharedContextState> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(SharedContextState);
};
diff --git a/chromium/gpu/command_buffer/service/shared_context_state_unittest.cc b/chromium/gpu/command_buffer/service/shared_context_state_unittest.cc
new file mode 100644
index 00000000000..a6a3ad3adf6
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_context_state_unittest.cc
@@ -0,0 +1,92 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_context_state.h"
+
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/bind_helpers.h"
+#include "base/command_line.h"
+#include "base/memory/ptr_util.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/context_state_test_helpers.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/config/gpu_feature_info.h"
+#include "gpu/config/gpu_preferences.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_context_stub.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+#include "ui/gl/init/gl_factory.h"
+#include "ui/gl/test/gl_surface_test_support.h"
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+using ::testing::StrictMock;
+
+namespace gpu {
+
+class SharedContextStateTest : public ::testing::Test {
+ public:
+ SharedContextStateTest() = default;
+};
+
+TEST_F(SharedContextStateTest, InitFailsIfLostContext) {
+ const ContextType context_type = CONTEXT_TYPE_OPENGLES2;
+
+ // For easier substring/extension matching
+ gl::SetGLGetProcAddressProc(gl::MockGLInterface::GetGLProcAddress);
+ gl::GLSurfaceTestSupport::InitializeOneOffWithMockBindings();
+
+ StrictMock<gl::MockGLInterface> gl_interface;
+ gl::MockGLInterface::SetGLInterface(&gl_interface);
+
+ InSequence sequence;
+
+ auto surface = base::MakeRefCounted<gl::GLSurfaceStub>();
+ auto context = base::MakeRefCounted<gl::GLContextStub>();
+ const char gl_version[] = "2.1";
+ context->SetGLVersionString(gl_version);
+ const char gl_extensions[] = "GL_KHR_robustness";
+ context->SetExtensionsString(gl_extensions);
+
+ context->MakeCurrent(surface.get());
+
+ GpuFeatureInfo gpu_feature_info;
+ GpuDriverBugWorkarounds workarounds;
+ auto feature_info =
+ base::MakeRefCounted<gles2::FeatureInfo>(workarounds, gpu_feature_info);
+ gles2::TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
+ &gl_interface, gl_extensions, "", gl_version, context_type);
+ feature_info->Initialize(gpu::CONTEXT_TYPE_OPENGLES2, false /* passthrough */,
+ gles2::DisallowedFeatures());
+
+ // Setup expectations for SharedContextState::InitializeGL().
+ EXPECT_CALL(gl_interface, GetIntegerv(GL_MAX_VERTEX_ATTRIBS, _))
+ .WillOnce(SetArgPointee<1>(8u))
+ .RetiresOnSaturation();
+ ContextStateTestHelpers::SetupInitState(&gl_interface, feature_info.get(),
+ gfx::Size(1, 1));
+
+ EXPECT_CALL(gl_interface, GetGraphicsResetStatusARB())
+ .WillOnce(Return(GL_GUILTY_CONTEXT_RESET_ARB));
+
+ auto shared_context_state = base::MakeRefCounted<SharedContextState>(
+ new gl::GLShareGroup(), surface, context,
+ false /* use_virtualized_gl_contexts */, base::DoNothing());
+
+ bool result =
+ shared_context_state->InitializeGL(GpuPreferences(), feature_info);
+ EXPECT_FALSE(result);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.h b/chromium/gpu/command_buffer/service/shared_image_backing.h
index 88b29155d69..0c7566e99a2 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing.h
@@ -27,6 +27,10 @@ class MemoryAllocatorDump;
} // namespace trace_event
} // namespace base
+namespace gfx {
+class GpuFence;
+} // namespace gfx
+
namespace gpu {
class MailboxManager;
class SharedContextState;
@@ -74,7 +78,7 @@ class GPU_GLES2_EXPORT SharedImageBacking {
// unintiailized pixels.
virtual void SetCleared() = 0;
- virtual void Update() = 0;
+ virtual void Update(std::unique_ptr<gfx::GpuFence> in_fence) = 0;
// Destroys the underlying backing. Must be called before destruction.
virtual void Destroy() = 0;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
index 0d597bb04ef..867f320f070 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
@@ -29,6 +29,7 @@
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/ipc/common/android/android_image_reader_utils.h"
@@ -53,27 +54,6 @@
namespace gpu {
namespace {
-enum class RepresentationAccessMode {
- kNone,
- kRead,
- kWrite,
-};
-
-std::ostream& operator<<(std::ostream& os, RepresentationAccessMode mode) {
- switch (mode) {
- case RepresentationAccessMode::kNone:
- os << "kNone";
- break;
- case RepresentationAccessMode::kRead:
- os << "kRead";
- break;
- case RepresentationAccessMode::kWrite:
- os << "kWrite";
- break;
- }
- return os;
-}
-
sk_sp<SkPromiseImageTexture> CreatePromiseTexture(
viz::VulkanContextProvider* context_provider,
base::android::ScopedHardwareBufferHandle ahb_handle,
@@ -154,7 +134,7 @@ class SharedImageBackingAHB : public SharedImageBacking {
bool IsCleared() const override;
void SetCleared() override;
- void Update() override;
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
void Destroy() override;
base::android::ScopedHardwareBufferHandle GetAhbHandle() const;
@@ -269,137 +249,6 @@ class SharedImageRepresentationGLTextureAHB
DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLTextureAHB);
};
-// GL backed Skia representation of SharedImageBackingAHB.
-class SharedImageRepresentationSkiaGLAHB
- : public SharedImageRepresentationSkia {
- public:
- SharedImageRepresentationSkiaGLAHB(
- SharedImageManager* manager,
- SharedImageBacking* backing,
- scoped_refptr<SharedContextState> context_state,
- sk_sp<SkPromiseImageTexture> promise_texture,
- MemoryTypeTracker* tracker,
- gles2::Texture* texture)
- : SharedImageRepresentationSkia(manager, backing, tracker),
- context_state_(std::move(context_state)),
- promise_texture_(std::move(promise_texture)),
- texture_(std::move(texture)) {
-#if DCHECK_IS_ON()
- context_ = gl::GLContext::GetCurrent();
-#endif
- }
-
- ~SharedImageRepresentationSkiaGLAHB() override {
- DCHECK_EQ(RepresentationAccessMode::kNone, mode_);
- DCHECK(!surface_);
- if (texture_)
- texture_->RemoveLightweightRef(has_context());
- }
-
- sk_sp<SkSurface> BeginWriteAccess(
- int final_msaa_count,
- const SkSurfaceProps& surface_props,
- std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores) override {
- DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
- CheckContext();
-
- // if there is already a surface_, it means previous BeginWriteAccess
- // doesn't have a corresponding EndWriteAccess.
- DCHECK(!surface_);
-
- base::ScopedFD sync_fd;
- if (!ahb_backing()->BeginWrite(&sync_fd))
- return nullptr;
-
- if (!InsertEglFenceAndWait(std::move(sync_fd)))
- return nullptr;
-
- if (!promise_texture_)
- return nullptr;
-
- SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
- /*gpu_compositing=*/true, format());
- auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
- context_state_->gr_context(), promise_texture_->backendTexture(),
- kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
- backing()->color_space().ToSkColorSpace(), &surface_props);
- surface_ = surface.get();
- mode_ = RepresentationAccessMode::kWrite;
- return surface;
- }
-
- void EndWriteAccess(sk_sp<SkSurface> surface) override {
- DCHECK_EQ(mode_, RepresentationAccessMode::kWrite);
- DCHECK(surface_);
- DCHECK_EQ(surface.get(), surface_);
- DCHECK(surface->unique());
- EndAccess(false /* readonly */);
- }
-
- sk_sp<SkPromiseImageTexture> BeginReadAccess(
- std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores) override {
- DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
- CheckContext();
-
- base::ScopedFD write_sync_fd;
- if (!ahb_backing()->BeginRead(this, &write_sync_fd))
- return nullptr;
- if (!InsertEglFenceAndWait(std::move(write_sync_fd)))
- return nullptr;
-
- mode_ = RepresentationAccessMode::kRead;
-
- return promise_texture_;
- }
-
- void EndReadAccess() override {
- DCHECK_EQ(mode_, RepresentationAccessMode::kRead);
- CheckContext();
- EndAccess(true /* readonly */);
- }
-
- private:
- SharedImageBackingAHB* ahb_backing() {
- return static_cast<SharedImageBackingAHB*>(backing());
- }
-
- void CheckContext() {
-#if DCHECK_IS_ON()
- DCHECK(gl::GLContext::GetCurrent() == context_);
-#endif
- }
-
- void EndAccess(bool readonly) {
- CheckContext();
-
- // Insert a gl fence to signal the write completion.
- base::ScopedFD sync_fd = CreateEglFenceAndExportFd();
- if (readonly)
- ahb_backing()->EndRead(this, std::move(sync_fd));
- else
- ahb_backing()->EndWrite(std::move(sync_fd));
-
- if (texture_ && !readonly) {
- if (texture_->IsLevelCleared(texture_->target(), 0))
- backing()->SetCleared();
- }
-
- mode_ = RepresentationAccessMode::kNone;
- surface_ = nullptr;
- }
-
- scoped_refptr<SharedContextState> context_state_;
- sk_sp<SkPromiseImageTexture> promise_texture_;
- gles2::Texture* texture_;
- SkSurface* surface_ = nullptr;
- RepresentationAccessMode mode_ = RepresentationAccessMode::kNone;
-#if DCHECK_IS_ON()
- gl::GLContext* context_;
-#endif
-};
-
// Vk backed Skia representation of SharedImageBackingAHB.
class SharedImageRepresentationSkiaVkAHB
: public SharedImageRepresentationSkia {
@@ -624,7 +473,9 @@ void SharedImageBackingAHB::SetCleared() {
is_cleared_ = true;
}
-void SharedImageBackingAHB::Update() {}
+void SharedImageBackingAHB::Update(std::unique_ptr<gfx::GpuFence> in_fence) {
+ DCHECK(!in_fence);
+}
bool SharedImageBackingAHB::ProduceLegacyMailbox(
MailboxManager* mailbox_manager) {
@@ -692,16 +543,12 @@ SharedImageBackingAHB::ProduceSkia(
auto* texture = GenGLTexture();
if (!texture)
return nullptr;
-
- GrBackendTexture backend_texture;
- GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(),
- texture->target(), size(), texture->service_id(),
- format(), &backend_texture);
- sk_sp<SkPromiseImageTexture> promise_texture =
- SkPromiseImageTexture::Make(backend_texture);
- return std::make_unique<SharedImageRepresentationSkiaGLAHB>(
- manager, this, std::move(context_state), std::move(promise_texture),
- tracker, std::move(texture));
+ auto gl_representation =
+ std::make_unique<SharedImageRepresentationGLTextureAHB>(
+ manager, this, tracker, std::move(texture));
+ return SharedImageRepresentationSkiaGL::Create(std::move(gl_representation),
+ std::move(context_state),
+ manager, this, tracker);
}
bool SharedImageBackingAHB::BeginWrite(base::ScopedFD* fd_to_wait_on) {
@@ -837,7 +684,7 @@ gles2::Texture* SharedImageBackingAHB::GenGLTexture() {
size().width(), size().height(), 1, 0, gl_format,
gl_type, cleared_rect);
texture->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
- texture->SetImmutable(true);
+ texture->SetImmutable(true, false);
api->glBindTextureFn(target, old_texture_binding);
DCHECK_EQ(egl_image->GetInternalFormat(), gl_format);
return texture;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
index 0126a74c8a7..c3c17d1938b 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
@@ -27,9 +27,11 @@
#include "gpu/config/gpu_finch_features.h"
#include "gpu/config/gpu_preferences.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/gl_image_shared_memory.h"
#include "ui/gl/gl_version_info.h"
@@ -382,7 +384,7 @@ class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
texture_->SetLevelCleared(texture_->target(), 0, true);
}
- void Update() override {
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
GLenum target = texture_->target();
gl::GLApi* api = gl::g_current_gl_context;
ScopedRestoreTexture scoped_restore(api, target);
@@ -394,6 +396,16 @@ class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
return;
if (old_state == gles2::Texture::BOUND)
image->ReleaseTexImage(target);
+
+ if (in_fence) {
+ // TODO(dcastagna): Don't wait for the fence if the SharedImage is going
+ // to be scanned out as an HW overlay. Currently we don't know that at
+ // this point and we always bind the image, therefore we need to wait for
+ // the fence.
+ std::unique_ptr<gl::GLFence> egl_fence =
+ gl::GLFence::CreateFromGpuFence(*in_fence.get());
+ egl_fence->ServerWait();
+ }
gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND;
if (image->ShouldBindOrCopy() == gl::GLImage::BIND &&
image->BindTexImage(target)) {
@@ -518,7 +530,7 @@ class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
format, type, info->cleared_rect);
rgb_emulation_texture_->SetLevelImage(target, 0, image, image_state);
- rgb_emulation_texture_->SetImmutable(true);
+ rgb_emulation_texture_->SetImmutable(true, false);
}
return std::make_unique<SharedImageRepresentationGLTextureImpl>(
@@ -576,7 +588,7 @@ class SharedImageBackingPassthroughGLTexture
bool IsCleared() const override { return is_cleared_; }
void SetCleared() override { is_cleared_ = true; }
- void Update() override {
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
GLenum target = texture_passthrough_->target();
gl::GLApi* api = gl::g_current_gl_context;
ScopedRestoreTexture scoped_restore(api, target);
@@ -752,9 +764,9 @@ SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
info.buffer_format = buffer_format;
DCHECK_EQ(info.gl_format,
gpu::InternalFormatForGpuMemoryBufferFormat(buffer_format));
- if (base::ContainsValue(gpu_preferences.texture_target_exception_list,
- gfx::BufferUsageAndFormat(gfx::BufferUsage::SCANOUT,
- buffer_format)))
+ if (base::Contains(gpu_preferences.texture_target_exception_list,
+ gfx::BufferUsageAndFormat(gfx::BufferUsage::SCANOUT,
+ buffer_format)))
info.target_for_scanout = gpu::GetPlatformSpecificTextureTarget();
}
}
@@ -866,6 +878,7 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
GLuint level_info_internal_format = format_info.gl_format;
bool is_cleared = false;
bool needs_subimage_upload = false;
+ bool has_immutable_storage = false;
if (use_buffer) {
image = image_factory_->CreateAnonymousImage(
size, format_info.buffer_format, gfx::BufferUsage::SCANOUT,
@@ -884,6 +897,7 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
} else if (format_info.supports_storage) {
api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
size.width(), size.height());
+ has_immutable_storage = true;
needs_subimage_upload = !pixel_data.empty();
} else if (format_info.is_compressed) {
ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
@@ -910,12 +924,12 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
pixel_data.data());
}
- return MakeBacking(use_passthrough_, mailbox, target, service_id, image,
- gles2::Texture::BOUND, level_info_internal_format,
- format_info.gl_format, format_info.gl_type,
- format_info.swizzle,
- pixel_data.empty() ? is_cleared : true, format, size,
- color_space, usage, attribs);
+ return MakeBacking(
+ use_passthrough_, mailbox, target, service_id, image,
+ gles2::Texture::BOUND, level_info_internal_format, format_info.gl_format,
+ format_info.gl_type, format_info.swizzle,
+ pixel_data.empty() ? is_cleared : true, has_immutable_storage, format,
+ size, color_space, usage, attribs);
}
std::unique_ptr<SharedImageBacking>
@@ -934,7 +948,8 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
}
if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) {
- LOG(ERROR) << "Invalid image size for format.";
+ LOG(ERROR) << "Invalid image size " << size.ToString() << " for "
+ << gfx::BufferFormatToString(buffer_format);
return nullptr;
}
@@ -1001,7 +1016,7 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
return MakeBacking(use_passthrough_, mailbox, target, service_id, image,
image_state, internal_format, gl_format, gl_type, nullptr,
- true, format, size, color_space, usage, attribs);
+ true, false, format, size, color_space, usage, attribs);
}
std::unique_ptr<SharedImageBacking>
@@ -1016,8 +1031,8 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageForTest(
return MakeBacking(false, mailbox, target, service_id, nullptr,
gles2::Texture::UNBOUND, viz::GLInternalFormat(format),
viz::GLDataFormat(format), viz::GLDataType(format),
- nullptr, is_cleared, format, size, gfx::ColorSpace(),
- usage, UnpackStateAttribs());
+ nullptr, is_cleared, false, format, size,
+ gfx::ColorSpace(), usage, UnpackStateAttribs());
}
scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLTexture::MakeGLImage(
@@ -1066,6 +1081,7 @@ SharedImageBackingFactoryGLTexture::MakeBacking(
GLuint gl_type,
const gles2::Texture::CompatibilitySwizzle* swizzle,
bool is_cleared,
+ bool has_immutable_storage,
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
@@ -1102,7 +1118,7 @@ SharedImageBackingFactoryGLTexture::MakeBacking(
texture->SetCompatibilitySwizzle(swizzle);
if (image)
texture->SetLevelImage(target, 0, image.get(), image_state);
- texture->SetImmutable(true);
+ texture->SetImmutable(true, has_immutable_storage);
return std::make_unique<SharedImageBackingGLTexture>(
mailbox, format, size, color_space, usage, texture, attribs);
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
index d077ef449d4..bf49d246fcb 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
@@ -101,6 +101,7 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
GLuint gl_type,
const gles2::Texture::CompatibilitySwizzle* swizzle,
bool is_cleared,
+ bool has_immutable_storage,
viz::ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
index b2cbef5e4ef..68ab474acc9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
@@ -668,7 +668,7 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
auto* stub_image = static_cast<StubImage*>(image.get());
EXPECT_TRUE(stub_image->bound());
int update_counter = stub_image->update_counter();
- ref->Update();
+ ref->Update(nullptr);
EXPECT_TRUE(stub_image->bound());
EXPECT_GT(stub_image->update_counter(), update_counter);
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
index 3d78e84a77e..dbdfc99565a 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
@@ -95,10 +95,10 @@ base::Optional<DawnTextureFormat> GetDawnFormat(viz::ResourceFormat format) {
case viz::LUMINANCE_8:
return DAWN_TEXTURE_FORMAT_R8_UNORM;
case viz::RG_88:
- return DAWN_TEXTURE_FORMAT_R8_G8_UNORM;
+ return DAWN_TEXTURE_FORMAT_RG8_UNORM;
case viz::RGBA_8888:
case viz::BGRA_8888:
- return DAWN_TEXTURE_FORMAT_B8_G8_R8_A8_UNORM;
+ return DAWN_TEXTURE_FORMAT_BGRA8_UNORM;
default:
return {};
}
@@ -109,11 +109,11 @@ base::Optional<DawnTextureFormat> GetDawnFormat(gfx::BufferFormat format) {
case gfx::BufferFormat::R_8:
return DAWN_TEXTURE_FORMAT_R8_UNORM;
case gfx::BufferFormat::RG_88:
- return DAWN_TEXTURE_FORMAT_R8_G8_UNORM;
+ return DAWN_TEXTURE_FORMAT_RG8_UNORM;
case gfx::BufferFormat::RGBX_8888:
case gfx::BufferFormat::RGBA_8888:
case gfx::BufferFormat::BGRX_8888:
- return DAWN_TEXTURE_FORMAT_B8_G8_R8_A8_UNORM;
+ return DAWN_TEXTURE_FORMAT_BGRA8_UNORM;
default:
return {};
}
@@ -363,7 +363,12 @@ class SharedImageRepresentationDawnIOSurface
};
#endif // BUILDFLAG(USE_DAWN)
-// Implementation of SharedImageBacking by wrapping IOSurfaces
+// Implementation of SharedImageBacking by wrapping IOSurfaces. Disable
+// unguarded availability warnings because they are incompatible with using a
+// scoped_nsprotocol for the id<MTLTexture> and because all access to Metal is
+// guarded on the context provider already successfully using Metal.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability"
class SharedImageBackingIOSurface : public SharedImageBacking {
public:
SharedImageBackingIOSurface(const Mailbox& mailbox,
@@ -396,7 +401,7 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
is_cleared_ = true;
}
- void Update() final {}
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) final {}
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final {
DCHECK(io_surface_);
@@ -416,7 +421,7 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
legacy_texture_->RemoveLightweightRef(have_context());
legacy_texture_ = nullptr;
}
-
+ mtl_texture_.reset();
io_surface_.reset();
}
@@ -449,21 +454,17 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
&gr_backend_texture);
}
if (context_state->GrContextIsMetal()) {
- if (@available(macOS 10.11, *)) {
+ if (!mtl_texture_) {
id<MTLDevice> mtl_device =
context_state->metal_context_provider()->GetMTLDevice();
- base::scoped_nsprotocol<id<MTLTexture>> mtl_texture =
+ mtl_texture_ =
CreateMetalTexture(mtl_device, io_surface_, size(), format());
- DCHECK(mtl_texture);
- // GrBackendTexture will take ownership of the MTLTexture passed in the
- // GrMtlTextureInfo argument, so pass in a retained pointer.
- GrMtlTextureInfo info;
- info.fTexture = [mtl_texture retain];
- gr_backend_texture = GrBackendTexture(size().width(), size().height(),
- GrMipMapped::kNo, info);
- } else {
- return nullptr;
+ DCHECK(mtl_texture_);
}
+ GrMtlTextureInfo info;
+ info.fTexture.retain(mtl_texture_.get());
+ gr_backend_texture = GrBackendTexture(size().width(), size().height(),
+ GrMipMapped::kNo, info);
}
sk_sp<SkPromiseImageTexture> promise_texture =
SkPromiseImageTexture::Make(gr_backend_texture);
@@ -549,7 +550,7 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
gl_info.type, cleared_rect);
texture->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, image.get(),
gles2::Texture::BOUND);
- texture->SetImmutable(true);
+ texture->SetImmutable(true, false);
DCHECK_EQ(image->GetInternalFormat(), gl_info.format);
@@ -559,6 +560,7 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
base::ScopedCFTypeRef<IOSurfaceRef> io_surface_;
base::Optional<DawnTextureFormat> dawn_format_;
+ base::scoped_nsprotocol<id<MTLTexture>> mtl_texture_;
bool is_cleared_ = false;
// A texture for the associated legacy mailbox.
@@ -566,6 +568,7 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
DISALLOW_COPY_AND_ASSIGN(SharedImageBackingIOSurface);
};
+#pragma clang diagnostic pop
// Implementation of SharedImageBackingFactoryIOSurface that creates
// SharedImageBackings wrapping IOSurfaces.
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc
index 399b7151dad..15082dc349b 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc
@@ -152,7 +152,9 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
// TODO(ericrk): Make this generic in the future.
bool allow_legacy_mailbox = false;
SharedImageBackingFactory* factory = nullptr;
- if (!using_vulkan_) {
+ if (backing_factory_for_testing_) {
+ factory = backing_factory_for_testing_;
+ } else if (!using_vulkan_) {
allow_legacy_mailbox = true;
factory = gl_backing_factory_.get();
} else {
@@ -186,12 +188,18 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
}
bool SharedImageFactory::UpdateSharedImage(const Mailbox& mailbox) {
+ return UpdateSharedImage(mailbox, nullptr);
+}
+
+bool SharedImageFactory::UpdateSharedImage(
+ const Mailbox& mailbox,
+ std::unique_ptr<gfx::GpuFence> in_fence) {
auto it = shared_images_.find(mailbox);
if (it == shared_images_.end()) {
LOG(ERROR) << "UpdateSharedImage: Could not find shared image mailbox";
return false;
}
- (*it)->Update();
+ (*it)->Update(std::move(in_fence));
return true;
}
@@ -256,6 +264,11 @@ bool SharedImageFactory::OnMemoryDump(
return true;
}
+void SharedImageFactory::RegisterSharedImageBackingFactoryForTesting(
+ SharedImageBackingFactory* factory) {
+ backing_factory_for_testing_ = factory;
+}
+
bool SharedImageFactory::IsSharedBetweenThreads(uint32_t usage) {
// If |shared_image_manager_| is thread safe, it means the display is running
// on a separate thread (which uses a separate GL context or VkDeviceQueue).
@@ -267,6 +280,9 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
uint32_t usage,
bool* allow_legacy_mailbox,
gfx::GpuMemoryBufferType gmb_type) {
+ if (backing_factory_for_testing_)
+ return backing_factory_for_testing_;
+
bool using_dawn = usage & SHARED_IMAGE_USAGE_WEBGPU;
bool vulkan_usage = using_vulkan_ && (usage & SHARED_IMAGE_USAGE_DISPLAY);
bool gl_usage = usage & SHARED_IMAGE_USAGE_GLES2;
@@ -277,6 +293,7 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
bool using_interop_factory = share_between_threads ||
share_between_gl_vulkan || using_dawn ||
share_between_gl_metal;
+
// wrapped_sk_image_factory_ is only used for OOPR and supports
// a limited number of flags (e.g. no SHARED_IMAGE_USAGE_SCANOUT).
constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER |
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h
index e5401df8623..eacfd41f1ca 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.h
@@ -74,6 +74,8 @@ class GPU_GLES2_EXPORT SharedImageFactory {
const gfx::ColorSpace& color_space,
uint32_t usage);
bool UpdateSharedImage(const Mailbox& mailbox);
+ bool UpdateSharedImage(const Mailbox& mailbox,
+ std::unique_ptr<gfx::GpuFence> in_fence);
bool DestroySharedImage(const Mailbox& mailbox);
bool HasImages() const { return !shared_images_.empty(); }
void DestroyAllSharedImages(bool have_context);
@@ -95,6 +97,9 @@ class GPU_GLES2_EXPORT SharedImageFactory {
bool RegisterBacking(std::unique_ptr<SharedImageBacking> backing,
bool allow_legacy_mailbox);
+ void RegisterSharedImageBackingFactoryForTesting(
+ SharedImageBackingFactory* factory);
+
private:
bool IsSharedBetweenThreads(uint32_t usage);
SharedImageBackingFactory* GetFactoryByUsage(
@@ -126,6 +131,8 @@ class GPU_GLES2_EXPORT SharedImageFactory {
// Used for creating DXGI Swap Chain.
std::unique_ptr<SwapChainFactoryDXGI> swap_chain_factory_;
#endif // OS_WIN
+
+ SharedImageBackingFactory* backing_factory_for_testing_ = nullptr;
};
class GPU_GLES2_EXPORT SharedImageRepresentationFactory {
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc
index 6efe9cce6d0..c38b0608dd5 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc
@@ -118,7 +118,8 @@ SharedImageManager::ProduceGLTexture(const Mailbox& mailbox,
auto found = images_.find(mailbox);
if (found == images_.end()) {
LOG(ERROR) << "SharedImageManager::ProduceGLTexture: Trying to produce a "
- "representation from a non-existent mailbox.";
+ "representation from a non-existent mailbox. "
+ << mailbox.ToDebugString();
return nullptr;
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc
index 7c2f84ddbf7..f1b39a2986d 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager_unittest.cc
@@ -59,7 +59,7 @@ class MockSharedImageBacking : public SharedImageBacking {
MOCK_CONST_METHOD0(IsCleared, bool());
MOCK_METHOD0(SetCleared, void());
- MOCK_METHOD0(Update, void());
+ MOCK_METHOD1(Update, void(std::unique_ptr<gfx::GpuFence>));
MOCK_METHOD0(Destroy, void());
MOCK_METHOD1(ProduceLegacyMailbox, bool(MailboxManager*));
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.cc b/chromium/gpu/command_buffer/service/shared_image_representation.cc
index 9cedf2e408e..cc60dd1a080 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.cc
@@ -4,6 +4,8 @@
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+
namespace gpu {
SharedImageRepresentation::SharedImageRepresentation(
@@ -27,4 +29,44 @@ bool SharedImageRepresentationGLTexturePassthrough::BeginAccess(GLenum mode) {
return true;
}
+SharedImageRepresentationSkia::ScopedWriteAccess::ScopedWriteAccess(
+ SharedImageRepresentationSkia* representation,
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores)
+ : representation_(representation),
+ surface_(representation_->BeginWriteAccess(final_msaa_count,
+ surface_props,
+ begin_semaphores,
+ end_semaphores)) {}
+
+SharedImageRepresentationSkia::ScopedWriteAccess::ScopedWriteAccess(
+ SharedImageRepresentationSkia* representation,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores)
+ : ScopedWriteAccess(representation,
+ 0 /* final_msaa_count */,
+ SkSurfaceProps(0 /* flags */, kUnknown_SkPixelGeometry),
+ begin_semaphores,
+ end_semaphores) {}
+
+SharedImageRepresentationSkia::ScopedWriteAccess::~ScopedWriteAccess() {
+ if (success())
+ representation_->EndWriteAccess(std::move(surface_));
+}
+
+SharedImageRepresentationSkia::ScopedReadAccess::ScopedReadAccess(
+ SharedImageRepresentationSkia* representation,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores)
+ : representation_(representation),
+ promise_image_texture_(
+ representation_->BeginReadAccess(begin_semaphores, end_semaphores)) {}
+
+SharedImageRepresentationSkia::ScopedReadAccess::~ScopedReadAccess() {
+ if (success())
+ representation_->EndReadAccess();
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.h b/chromium/gpu/command_buffer/service/shared_image_representation.h
index 35f409d48b9..703c6435be9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.h
@@ -16,6 +16,7 @@
#include "third_party/skia/include/core/SkSurface.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/gpu_fence.h"
typedef unsigned int GLenum;
class SkPromiseImageTexture;
@@ -26,6 +27,12 @@ class Texture;
class TexturePassthrough;
} // namespace gles2
+enum class RepresentationAccessMode {
+ kNone,
+ kRead,
+ kWrite,
+};
+
// A representation of a SharedImageBacking for use with a specific use case /
// api.
class GPU_GLES2_EXPORT SharedImageRepresentation {
@@ -51,6 +58,7 @@ class GPU_GLES2_EXPORT SharedImageRepresentation {
}
protected:
+ SharedImageManager* manager() const { return manager_; }
SharedImageBacking* backing() const { return backing_; }
bool has_context() const { return has_context_; }
@@ -69,7 +77,9 @@ class SharedImageRepresentationFactoryRef : public SharedImageRepresentation {
: SharedImageRepresentation(manager, backing, tracker) {}
const Mailbox& mailbox() const { return backing()->mailbox(); }
- void Update() { backing()->Update(); }
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) {
+ backing()->Update(std::move(in_fence));
+ }
#if defined(OS_WIN)
void PresentSwapChain() { backing()->PresentSwapChain(); }
#endif // OS_WIN
@@ -149,6 +159,43 @@ class GPU_GLES2_EXPORT SharedImageRepresentationGLTexturePassthrough
class SharedImageRepresentationSkia : public SharedImageRepresentation {
public:
+ class ScopedWriteAccess {
+ public:
+ ScopedWriteAccess(SharedImageRepresentationSkia* representation,
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores);
+ ScopedWriteAccess(SharedImageRepresentationSkia* representation,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores);
+ ~ScopedWriteAccess();
+
+ bool success() const { return !!surface_; }
+ SkSurface* surface() const { return surface_.get(); }
+
+ private:
+ SharedImageRepresentationSkia* const representation_;
+ sk_sp<SkSurface> surface_;
+ };
+
+ class ScopedReadAccess {
+ public:
+ ScopedReadAccess(SharedImageRepresentationSkia* representation,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores);
+ ~ScopedReadAccess();
+
+ bool success() const { return !!promise_image_texture_; }
+ SkPromiseImageTexture* promise_image_texture() const {
+ return promise_image_texture_.get();
+ }
+
+ private:
+ SharedImageRepresentationSkia* const representation_;
+ sk_sp<SkPromiseImageTexture> promise_image_texture_;
+ };
+
SharedImageRepresentationSkia(SharedImageManager* manager,
SharedImageBacking* backing,
MemoryTypeTracker* tracker)
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
new file mode 100644
index 00000000000..746300a2c24
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
@@ -0,0 +1,141 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_image_representation_skia_gl.h"
+
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/skia_utils.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gpu {
+
+std::ostream& operator<<(std::ostream& os, RepresentationAccessMode mode) {
+ switch (mode) {
+ case RepresentationAccessMode::kNone:
+ os << "kNone";
+ break;
+ case RepresentationAccessMode::kRead:
+ os << "kRead";
+ break;
+ case RepresentationAccessMode::kWrite:
+ os << "kWrite";
+ break;
+ }
+ return os;
+}
+
+// static method.
+std::unique_ptr<SharedImageRepresentationSkiaGL>
+SharedImageRepresentationSkiaGL::Create(
+ std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation,
+ scoped_refptr<SharedContextState> context_state,
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker) {
+ GrBackendTexture backend_texture;
+ if (!GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(),
+ gl_representation->GetTexture()->target(),
+ backing->size(),
+ gl_representation->GetTexture()->service_id(),
+ backing->format(), &backend_texture)) {
+ return nullptr;
+ }
+ auto promise_texture = SkPromiseImageTexture::Make(backend_texture);
+ if (!promise_texture)
+ return nullptr;
+ return base::WrapUnique(new SharedImageRepresentationSkiaGL(
+ std::move(gl_representation), std::move(promise_texture),
+ std::move(context_state), manager, backing, tracker));
+}
+
+SharedImageRepresentationSkiaGL::SharedImageRepresentationSkiaGL(
+ std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation,
+ sk_sp<SkPromiseImageTexture> promise_texture,
+ scoped_refptr<SharedContextState> context_state,
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker)
+ : SharedImageRepresentationSkia(manager, backing, tracker),
+ gl_representation_(std::move(gl_representation)),
+ promise_texture_(std::move(promise_texture)),
+ context_state_(std::move(context_state)) {
+#if DCHECK_IS_ON()
+ context_ = gl::GLContext::GetCurrent();
+#endif
+}
+
+SharedImageRepresentationSkiaGL::~SharedImageRepresentationSkiaGL() {
+ DCHECK_EQ(RepresentationAccessMode::kNone, mode_);
+ DCHECK(!surface_);
+}
+
+sk_sp<SkSurface> SharedImageRepresentationSkiaGL::BeginWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
+ DCHECK(!surface_);
+ CheckContext();
+
+ if (!gl_representation_->BeginAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM)) {
+ return nullptr;
+ }
+
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format());
+ auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
+ context_state_->gr_context(), promise_texture_->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
+ backing()->color_space().ToSkColorSpace(), &surface_props);
+ surface_ = surface.get();
+ mode_ = RepresentationAccessMode::kWrite;
+ return surface;
+}
+
+void SharedImageRepresentationSkiaGL::EndWriteAccess(sk_sp<SkSurface> surface) {
+ DCHECK_EQ(mode_, RepresentationAccessMode::kWrite);
+ DCHECK(surface_);
+ DCHECK_EQ(surface.get(), surface_);
+ DCHECK(surface->unique());
+
+ gl_representation_->EndAccess();
+ mode_ = RepresentationAccessMode::kNone;
+ surface_ = nullptr;
+}
+
+sk_sp<SkPromiseImageTexture> SharedImageRepresentationSkiaGL::BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
+ CheckContext();
+
+ if (!gl_representation_->BeginAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM))
+ return nullptr;
+ mode_ = RepresentationAccessMode::kRead;
+ return promise_texture_;
+}
+
+void SharedImageRepresentationSkiaGL::EndReadAccess() {
+ DCHECK_EQ(mode_, RepresentationAccessMode::kRead);
+ CheckContext();
+
+ gl_representation_->EndAccess();
+ mode_ = RepresentationAccessMode::kNone;
+ surface_ = nullptr;
+}
+
+void SharedImageRepresentationSkiaGL::CheckContext() {
+#if DCHECK_IS_ON()
+ DCHECK(gl::GLContext::GetCurrent() == context_);
+#endif
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h
new file mode 100644
index 00000000000..5ad4ab8b11d
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.h
@@ -0,0 +1,63 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_SKIA_GL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_SKIA_GL_H_
+
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "ui/gl/gl_context.h"
+
+namespace gpu {
+// This is a wrapper class for SharedImageRepresentationSkia to be used in GL
+// mode. For most of the SharedImageBackings, SharedImageRepresentationGLTexture
+// and SharedImageRepresentationSkia implementations do the same work which
+// results in duplicate code. Hence instead of implementing
+// SharedImageRepresentationSkia, this wrapper can be directly used or
+// implemented by the backings.
+class GPU_GLES2_EXPORT SharedImageRepresentationSkiaGL
+ : public SharedImageRepresentationSkia {
+ public:
+ static std::unique_ptr<SharedImageRepresentationSkiaGL> Create(
+ std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation,
+ scoped_refptr<SharedContextState> context_state,
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker);
+
+ ~SharedImageRepresentationSkiaGL() override;
+
+ sk_sp<SkSurface> BeginWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override;
+ void EndWriteAccess(sk_sp<SkSurface> surface) override;
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override;
+ void EndReadAccess() override;
+
+ private:
+ SharedImageRepresentationSkiaGL(
+ std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation,
+ sk_sp<SkPromiseImageTexture> promise_texture,
+ scoped_refptr<SharedContextState> context_state,
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker);
+ void CheckContext();
+
+ std::unique_ptr<SharedImageRepresentationGLTexture> gl_representation_;
+ sk_sp<SkPromiseImageTexture> promise_texture_;
+ scoped_refptr<SharedContextState> context_state_;
+ SkSurface* surface_ = nullptr;
+ RepresentationAccessMode mode_ = RepresentationAccessMode::kNone;
+#if DCHECK_IS_ON()
+ gl::GLContext* context_;
+#endif
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_REPRESENTATION_SKIA_GL_H_
diff --git a/chromium/gpu/command_buffer/service/skia_utils.cc b/chromium/gpu/command_buffer/service/skia_utils.cc
index ec81f696f4e..bb82e21d78c 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.cc
+++ b/chromium/gpu/command_buffer/service/skia_utils.cc
@@ -7,6 +7,7 @@
#include "base/logging.h"
#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/gl/GrGLTypes.h"
#include "ui/gfx/geometry/size.h"
@@ -90,4 +91,27 @@ void AddVulkanCleanupTaskForSkiaFlush(
#endif
}
+void DeleteGrBackendTexture(SharedContextState* context_state,
+ GrBackendTexture* backend_texture) {
+ DCHECK(backend_texture && backend_texture->isValid());
+ if (!context_state->GrContextIsVulkan()) {
+ context_state->gr_context()->deleteBackendTexture(
+ std::move(*backend_texture));
+ return;
+ }
+
+#if BUILDFLAG(ENABLE_VULKAN)
+ auto* fence_helper =
+ context_state->vk_context_provider()->GetDeviceQueue()->GetFenceHelper();
+ fence_helper->EnqueueCleanupTaskForSubmittedWork(base::BindOnce(
+ [](const sk_sp<GrContext>& gr_context, GrBackendTexture backend_texture,
+ gpu::VulkanDeviceQueue* device_queue, bool is_lost) {
+ // If underlying Vulkan device is destroyed, gr_context should have been
+ // abandoned, the deleteBackendTexture() should be noop.
+ gr_context->deleteBackendTexture(std::move(backend_texture));
+ },
+ sk_ref_sp(context_state->gr_context()), std::move(*backend_texture)));
+#endif
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/skia_utils.h b/chromium/gpu/command_buffer/service/skia_utils.h
index 3edf8b47187..e5e1a965e03 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.h
+++ b/chromium/gpu/command_buffer/service/skia_utils.h
@@ -31,6 +31,9 @@ class VulkanContextProvider;
} // namespace viz
namespace gpu {
+
+class SharedContextState;
+
// Creates a GrBackendTexture from a service ID. Skia does not take ownership.
// Returns true on success.
GPU_GLES2_EXPORT bool GetGrBackendTexture(const gl::GLVersionInfo* version_info,
@@ -50,6 +53,9 @@ GPU_GLES2_EXPORT void AddVulkanCleanupTaskForSkiaFlush(
viz::VulkanContextProvider* context_provider,
GrFlushInfo* flush_info);
+GPU_GLES2_EXPORT void DeleteGrBackendTexture(
+ SharedContextState* context_state,
+ GrBackendTexture* backend_textures);
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_SERVICE_SKIA_UTILS_H_
diff --git a/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.cc b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.cc
index 00447c1d48b..3f3fd71e9a5 100644
--- a/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.cc
+++ b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.cc
@@ -13,6 +13,7 @@
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/direct_composition_surface_win.h"
#include "ui/gl/gl_angle_util_win.h"
#include "ui/gl/gl_image_dxgi_swap_chain.h"
#include "ui/gl/trace_util.h"
@@ -158,7 +159,7 @@ class SharedImageBackingDXGISwapChain : public SharedImageBacking {
void SetCleared() override {}
- void Update() override {
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
DLOG(ERROR) << "SharedImageBackingDXGISwapChain::Update : Trying to update "
"Shared Images associated with swap chain.";
}
@@ -229,15 +230,15 @@ class SharedImageBackingDXGISwapChain : public SharedImageBacking {
gl::GLImage* image;
unsigned target = GL_TEXTURE_2D;
- gles2::Texture::ImageState image_state;
if (texture_) {
+ gles2::Texture::ImageState image_state;
image = texture_->GetLevelImage(target, 0, &image_state);
+ DCHECK_EQ(image_state, gles2::Texture::BOUND);
} else {
DCHECK(texture_passthrough_);
image = texture_passthrough_->GetLevelImage(target, 0);
}
DCHECK(image);
- DCHECK_EQ(image_state, gles2::Texture::BOUND);
if (!image->BindTexImage(target)) {
DLOG(ERROR) << "Failed to rebind texture to new surface.";
@@ -291,6 +292,11 @@ SwapChainFactoryDXGI::SwapChainBackings&
SwapChainFactoryDXGI::SwapChainBackings::operator=(
SwapChainFactoryDXGI::SwapChainBackings&&) = default;
+// static
+bool SwapChainFactoryDXGI::IsSupported() {
+ return gl::DirectCompositionSurfaceWin::IsDirectCompositionSupported();
+}
+
std::unique_ptr<SharedImageBacking> SwapChainFactoryDXGI::MakeBacking(
const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -314,6 +320,10 @@ std::unique_ptr<SharedImageBacking> SwapChainFactoryDXGI::MakeBacking(
auto image = base::MakeRefCounted<gl::GLImageDXGISwapChain>(
size, viz::BufferFormat(format), d3d11_texture, swap_chain);
+ if (!image->Initialize()) {
+ DLOG(ERROR) << "Failed to create EGL image";
+ return nullptr;
+ }
if (!image->BindTexImage(target)) {
DLOG(ERROR) << "Failed to bind image to swap chain D3D11 texture.";
return nullptr;
@@ -349,7 +359,7 @@ std::unique_ptr<SharedImageBacking> SwapChainFactoryDXGI::MakeBacking(
size.height(), 1, 0, gl_format, gl_type,
gfx::Rect(size));
texture->SetLevelImage(target, 0, image.get(), gles2::Texture::BOUND);
- texture->SetImmutable(true);
+ texture->SetImmutable(true, false);
}
return std::make_unique<SharedImageBackingDXGISwapChain>(
@@ -393,7 +403,7 @@ SwapChainFactoryDXGI::SwapChainBackings SwapChainFactoryDXGI::CreateSwapChain(
desc.Stereo = FALSE;
desc.SampleDesc.Count = 1;
desc.BufferCount = 2;
- desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT | DXGI_USAGE_SHADER_INPUT;
desc.Scaling = DXGI_SCALING_STRETCH;
desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
desc.Flags = 0;
diff --git a/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.h b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.h
index ba21b19f62e..07a1d98d251 100644
--- a/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.h
+++ b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.h
@@ -28,7 +28,10 @@ class GPU_GLES2_EXPORT SwapChainFactoryDXGI {
explicit SwapChainFactoryDXGI(bool use_passthrough);
~SwapChainFactoryDXGI();
- struct SwapChainBackings {
+ // Returns true if DXGI swap chain shared images for overlays are supported.
+ static bool IsSupported();
+
+ struct GPU_GLES2_EXPORT SwapChainBackings {
SwapChainBackings(std::unique_ptr<SharedImageBacking> front_buffer,
std::unique_ptr<SharedImageBacking> back_buffer);
~SwapChainBackings();
@@ -71,4 +74,4 @@ class GPU_GLES2_EXPORT SwapChainFactoryDXGI {
} // namespace gpu
-#endif // GPU_COMMAND_BUFFER_SERVICE_SWAP_CHAIN_FACTORY_DXGI_H_ \ No newline at end of file
+#endif // GPU_COMMAND_BUFFER_SERVICE_SWAP_CHAIN_FACTORY_DXGI_H_
diff --git a/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi_unittest.cc b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi_unittest.cc
new file mode 100644
index 00000000000..2e2ed8d90f0
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi_unittest.cc
@@ -0,0 +1,320 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/swap_chain_factory_dxgi.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/bind_helpers.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_angle_util_win.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image_dxgi_swap_chain.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/init/gl_factory.h"
+
+namespace gpu {
+namespace {
+
+class SwapChainFactoryDXGITest : public testing::Test {
+ public:
+ void SetUp() override {
+ surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
+ ASSERT_TRUE(surface_);
+ context_ = gl::init::CreateGLContext(nullptr, surface_.get(),
+ gl::GLContextAttribs());
+ ASSERT_TRUE(context_);
+ bool result = context_->MakeCurrent(surface_.get());
+ ASSERT_TRUE(result);
+
+ memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
+ shared_image_representation_factory_ =
+ std::make_unique<SharedImageRepresentationFactory>(
+ &shared_image_manager_, nullptr);
+ }
+
+ protected:
+ bool UsesPassthrough() const {
+ return gles2::PassthroughCommandDecoderSupported();
+ }
+ void CreateAndPresentSwapChain(bool uses_passthrough_texture);
+
+ scoped_refptr<gl::GLSurface> surface_;
+ scoped_refptr<gl::GLContext> context_;
+ SharedImageManager shared_image_manager_;
+ std::unique_ptr<MemoryTypeTracker> memory_type_tracker_;
+ std::unique_ptr<SharedImageRepresentationFactory>
+ shared_image_representation_factory_;
+};
+
+void SwapChainFactoryDXGITest::CreateAndPresentSwapChain(
+ bool uses_passthrough_texture) {
+ DCHECK(SwapChainFactoryDXGI::IsSupported());
+ std::unique_ptr<SwapChainFactoryDXGI> swap_chain_factory_ =
+ std::make_unique<SwapChainFactoryDXGI>(uses_passthrough_texture);
+ auto front_buffer_mailbox = Mailbox::GenerateForSharedImage();
+ auto back_buffer_mailbox = Mailbox::GenerateForSharedImage();
+ auto format = viz::RGBA_8888;
+ gfx::Size size(1, 1);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = gpu::SHARED_IMAGE_USAGE_GLES2 |
+ gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT |
+ gpu::SHARED_IMAGE_USAGE_DISPLAY |
+ gpu::SHARED_IMAGE_USAGE_SCANOUT;
+
+ auto backings = swap_chain_factory_->CreateSwapChain(
+ front_buffer_mailbox, back_buffer_mailbox, format, size, color_space,
+ usage);
+ ASSERT_TRUE(backings.front_buffer);
+ ASSERT_TRUE(backings.back_buffer);
+
+ GLenum expected_target = GL_TEXTURE_2D;
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture;
+ scoped_refptr<gles2::TexturePassthrough> back_passthrough_texture;
+ gles2::Texture* back_texture = nullptr;
+ gl::GLImageDXGISwapChain* back_image = nullptr;
+ gl::GLImageDXGISwapChain* front_image = nullptr;
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> back_factory_ref =
+ shared_image_manager_.Register(std::move(backings.back_buffer),
+ memory_type_tracker_.get());
+ std::unique_ptr<SharedImageRepresentationFactoryRef> front_factory_ref =
+ shared_image_manager_.Register(std::move(backings.front_buffer),
+ memory_type_tracker_.get());
+
+ if (uses_passthrough_texture) {
+ auto back_gl_representation =
+ shared_image_representation_factory_->ProduceGLTexturePassthrough(
+ back_buffer_mailbox);
+ ASSERT_TRUE(back_gl_representation);
+ back_passthrough_texture = back_gl_representation->GetTexturePassthrough();
+ EXPECT_TRUE(back_passthrough_texture);
+ EXPECT_EQ(TextureBase::Type::kPassthrough,
+ back_passthrough_texture->GetType());
+ EXPECT_EQ(expected_target, back_passthrough_texture->target());
+ back_gl_representation.reset();
+
+ back_image = gl::GLImageDXGISwapChain::FromGLImage(
+ back_passthrough_texture->GetLevelImage(
+ back_passthrough_texture->target(), 0));
+ ASSERT_TRUE(back_image);
+
+ auto front_gl_representation =
+ shared_image_representation_factory_->ProduceGLTexturePassthrough(
+ front_buffer_mailbox);
+ ASSERT_TRUE(front_gl_representation);
+ auto front_passthrough_texture =
+ front_gl_representation->GetTexturePassthrough();
+ EXPECT_TRUE(front_passthrough_texture);
+ EXPECT_EQ(TextureBase::Type::kPassthrough,
+ front_passthrough_texture->GetType());
+ EXPECT_EQ(expected_target, front_passthrough_texture->target());
+ front_gl_representation.reset();
+
+ front_image = gl::GLImageDXGISwapChain::FromGLImage(
+ front_passthrough_texture->GetLevelImage(
+ front_passthrough_texture->target(), 0));
+ ASSERT_TRUE(front_image);
+ front_passthrough_texture.reset();
+ } else {
+ auto back_gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(
+ back_buffer_mailbox);
+ ASSERT_TRUE(back_gl_representation);
+ back_texture = back_gl_representation->GetTexture();
+ EXPECT_TRUE(back_texture);
+ EXPECT_EQ(TextureBase::Type::kValidated, back_texture->GetType());
+ EXPECT_EQ(expected_target, back_texture->target());
+ // Ensures that back buffer is explicitly cleared.
+ EXPECT_TRUE(back_texture->IsLevelCleared(back_texture->target(), 0));
+ back_gl_representation.reset();
+
+ gles2::Texture::ImageState image_state;
+ back_image = gl::GLImageDXGISwapChain::FromGLImage(
+ back_texture->GetLevelImage(back_texture->target(), 0, &image_state));
+ ASSERT_TRUE(back_image);
+ EXPECT_EQ(gles2::Texture::BOUND, image_state);
+
+ auto front_gl_representation =
+ shared_image_representation_factory_->ProduceGLTexture(
+ front_buffer_mailbox);
+ ASSERT_TRUE(front_gl_representation);
+ gles2::Texture* front_texture = front_gl_representation->GetTexture();
+ EXPECT_TRUE(front_texture);
+ EXPECT_EQ(TextureBase::Type::kValidated, front_texture->GetType());
+ EXPECT_EQ(expected_target, front_texture->target());
+ // Ensures that front buffer is explicitly cleared.
+ EXPECT_TRUE(front_texture->IsLevelCleared(front_texture->target(), 0));
+ front_gl_representation.reset();
+
+ front_image = gl::GLImageDXGISwapChain::FromGLImage(
+ front_texture->GetLevelImage(front_texture->target(), 0, &image_state));
+ ASSERT_TRUE(front_image);
+ EXPECT_EQ(gles2::Texture::BOUND, image_state);
+ }
+
+ EXPECT_EQ(S_OK, back_image->swap_chain()->GetBuffer(
+ 0 /* buffer_index */, IID_PPV_ARGS(&d3d11_texture)));
+ EXPECT_TRUE(d3d11_texture);
+ EXPECT_EQ(d3d11_texture, back_image->texture());
+ d3d11_texture.Reset();
+
+ EXPECT_EQ(S_OK, front_image->swap_chain()->GetBuffer(
+ 1 /* buffer_index */, IID_PPV_ARGS(&d3d11_texture)));
+ EXPECT_TRUE(d3d11_texture);
+ EXPECT_EQ(d3d11_texture, front_image->texture());
+ d3d11_texture.Reset();
+
+ GLenum target;
+ GLuint service_id;
+ if (uses_passthrough_texture) {
+ target = back_passthrough_texture->target();
+ service_id = back_passthrough_texture->service_id();
+ back_passthrough_texture.reset();
+ } else {
+ target = back_texture->target();
+ service_id = back_texture->service_id();
+ }
+
+ // Create an FBO.
+ GLuint fbo = 0;
+ gl::GLApi* api = gl::g_current_gl_context;
+ api->glGenFramebuffersEXTFn(1, &fbo);
+ api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, fbo);
+ api->glBindTextureFn(target, service_id);
+ ASSERT_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
+
+ // Attach the texture to FBO.
+ api->glFramebufferTexture2DEXTFn(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target,
+ service_id, 0);
+ EXPECT_EQ(api->glCheckFramebufferStatusEXTFn(GL_FRAMEBUFFER),
+ static_cast<unsigned>(GL_FRAMEBUFFER_COMPLETE));
+ ASSERT_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
+
+ api->glViewportFn(0, 0, size.width(), size.height());
+ // Set the clear color to green.
+ api->glClearColorFn(0.0f, 1.0f, 0.0f, 1.0f);
+ api->glClearFn(GL_COLOR_BUFFER_BIT);
+ ASSERT_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
+
+ {
+ GLubyte pixel_color[4];
+ const uint8_t expected_color[4] = {0, 255, 0, 255};
+ // Checks if rendering to back buffer was successful.
+ api->glReadPixelsFn(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixel_color);
+ EXPECT_EQ(expected_color[0], pixel_color[0]);
+ EXPECT_EQ(expected_color[1], pixel_color[1]);
+ EXPECT_EQ(expected_color[2], pixel_color[2]);
+ EXPECT_EQ(expected_color[3], pixel_color[3]);
+ }
+
+ back_factory_ref->PresentSwapChain();
+
+ {
+ GLubyte pixel_color[4];
+ const uint8_t expected_color[4] = {0, 0, 0, 255};
+ // After present, back buffer should now have a clear texture.
+ api->glReadPixelsFn(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixel_color);
+ EXPECT_EQ(expected_color[0], pixel_color[0]);
+ EXPECT_EQ(expected_color[1], pixel_color[1]);
+ EXPECT_EQ(expected_color[2], pixel_color[2]);
+ EXPECT_EQ(expected_color[3], pixel_color[3]);
+ }
+
+ {
+ // A staging texture must be used to check front buffer since it cannot be
+ // bound to an FBO or use ReadPixels.
+ D3D11_TEXTURE2D_DESC desc = {};
+ desc.Width = 1;
+ desc.Height = 1;
+ desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
+ desc.MipLevels = 1;
+ desc.ArraySize = 1;
+ desc.Usage = D3D11_USAGE_STAGING;
+ desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
+ desc.SampleDesc.Count = 1;
+
+ auto d3d11_device = gl::QueryD3D11DeviceObjectFromANGLE();
+ ASSERT_TRUE(d3d11_device);
+
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> staging_texture;
+ ASSERT_TRUE(SUCCEEDED(
+ d3d11_device->CreateTexture2D(&desc, nullptr, &staging_texture)));
+
+ Microsoft::WRL::ComPtr<ID3D11DeviceContext> context;
+ d3d11_device->GetImmediateContext(&context);
+ ASSERT_TRUE(context);
+
+ context->CopyResource(staging_texture.Get(), front_image->texture().Get());
+
+ D3D11_MAPPED_SUBRESOURCE mapped_resource;
+ ASSERT_TRUE(SUCCEEDED(context->Map(staging_texture.Get(), 0, D3D11_MAP_READ,
+ 0, &mapped_resource)));
+ ASSERT_GE(mapped_resource.RowPitch, 4u);
+ // After present, front buffer should have color rendered to back buffer.
+ const uint8_t* pixel_color =
+ static_cast<const uint8_t*>(mapped_resource.pData);
+ const uint8_t expected_color[4] = {0, 255, 0, 255};
+ EXPECT_EQ(expected_color[0], pixel_color[0]);
+ EXPECT_EQ(expected_color[1], pixel_color[1]);
+ EXPECT_EQ(expected_color[2], pixel_color[2]);
+ EXPECT_EQ(expected_color[3], pixel_color[3]);
+
+ context->Unmap(staging_texture.Get(), 0);
+ }
+
+ api->glDeleteFramebuffersEXTFn(1, &fbo);
+}
+
+TEST_F(SwapChainFactoryDXGITest, InvalidFormat) {
+ if (!SwapChainFactoryDXGI::IsSupported())
+ return;
+ std::unique_ptr<SwapChainFactoryDXGI> swap_chain_factory_ =
+ std::make_unique<SwapChainFactoryDXGI>(false /* use_passthrough */);
+ auto front_buffer_mailbox = Mailbox::GenerateForSharedImage();
+ auto back_buffer_mailbox = Mailbox::GenerateForSharedImage();
+ gfx::Size size(1, 1);
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+ uint32_t usage = gpu::SHARED_IMAGE_USAGE_SCANOUT;
+ {
+ auto valid_format = viz::RGBA_8888;
+ auto backings = swap_chain_factory_->CreateSwapChain(
+ front_buffer_mailbox, back_buffer_mailbox, valid_format, size,
+ color_space, usage);
+ EXPECT_TRUE(backings.front_buffer);
+ EXPECT_TRUE(backings.back_buffer);
+ backings.front_buffer->Destroy();
+ backings.back_buffer->Destroy();
+ }
+ {
+ auto invalid_format = viz::BGRA_8888;
+ auto backings = swap_chain_factory_->CreateSwapChain(
+ front_buffer_mailbox, back_buffer_mailbox, invalid_format, size,
+ color_space, usage);
+ EXPECT_FALSE(backings.front_buffer);
+ EXPECT_FALSE(backings.back_buffer);
+ }
+}
+
+TEST_F(SwapChainFactoryDXGITest, CreateAndPresentSwapChain) {
+ if (!SwapChainFactoryDXGI::IsSupported() || UsesPassthrough())
+ return;
+ CreateAndPresentSwapChain(false /* uses_passthrough_texture */);
+}
+
+TEST_F(SwapChainFactoryDXGITest, CreateAndPresentSwapChain_PassthroughTexture) {
+ if (!SwapChainFactoryDXGI::IsSupported() || !UsesPassthrough())
+ return;
+ CreateAndPresentSwapChain(true /* uses_passthrough_texture */);
+}
+
+} // anonymous namespace
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.cc b/chromium/gpu/command_buffer/service/sync_point_manager.cc
index 740ffaa6e71..9df666db47f 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager.cc
+++ b/chromium/gpu/command_buffer/service/sync_point_manager.cc
@@ -46,7 +46,10 @@ SyncPointOrderData::OrderFence::~OrderFence() = default;
SyncPointOrderData::SyncPointOrderData(SyncPointManager* sync_point_manager,
SequenceId sequence_id)
- : sync_point_manager_(sync_point_manager), sequence_id_(sequence_id) {}
+ : sync_point_manager_(sync_point_manager), sequence_id_(sequence_id) {
+ // Creation could happen outside of GPU thread.
+ DETACH_FROM_THREAD(processing_thread_checker_);
+}
SyncPointOrderData::~SyncPointOrderData() {
DCHECK(destroyed_);
diff --git a/chromium/gpu/command_buffer/service/texture_definition.cc b/chromium/gpu/command_buffer/service/texture_definition.cc
index 91288c6fd84..0ecb37588e7 100644
--- a/chromium/gpu/command_buffer/service/texture_definition.cc
+++ b/chromium/gpu/command_buffer/service/texture_definition.cc
@@ -270,6 +270,7 @@ scoped_refptr<NativeImageBuffer> NativeImageBuffer::Create(GLuint texture_id) {
switch (gl::GetGLImplementation()) {
#if !defined(OS_MACOSX)
case gl::kGLImplementationEGLGLES2:
+ case gl::kGLImplementationEGLANGLE:
return NativeImageBufferEGL::Create(texture_id);
#endif
case gl::kGLImplementationMockGL:
@@ -330,6 +331,7 @@ TextureDefinition::TextureDefinition()
wrap_t_(0),
usage_(0),
immutable_(true),
+ immutable_storage_(false),
defined_(false) {}
TextureDefinition::TextureDefinition(
@@ -344,7 +346,8 @@ TextureDefinition::TextureDefinition(
wrap_s_(texture->wrap_s()),
wrap_t_(texture->wrap_t()),
usage_(texture->usage()),
- immutable_(texture->IsImmutable()) {
+ immutable_(texture->IsImmutable()),
+ immutable_storage_(texture->HasImmutableStorage()) {
const Texture::LevelInfo* level = texture->GetLevelInfo(target_, 0);
defined_ = !!level;
DCHECK(!image_buffer_.get() || defined_);
@@ -421,7 +424,7 @@ void TextureDefinition::UpdateTextureInternal(Texture* texture) const {
}
texture->target_ = target_;
- texture->SetImmutable(immutable_);
+ texture->SetImmutable(immutable_, immutable_storage_);
texture->sampler_state_.min_filter = min_filter_;
texture->sampler_state_.mag_filter = mag_filter_;
texture->sampler_state_.wrap_s = wrap_s_;
diff --git a/chromium/gpu/command_buffer/service/texture_definition.h b/chromium/gpu/command_buffer/service/texture_definition.h
index 780a8f05d08..07b65bc1ca8 100644
--- a/chromium/gpu/command_buffer/service/texture_definition.h
+++ b/chromium/gpu/command_buffer/service/texture_definition.h
@@ -101,6 +101,7 @@ class TextureDefinition {
GLenum wrap_t_;
GLenum usage_;
bool immutable_;
+ bool immutable_storage_;
bool defined_;
// Only support textures with one face and one level.
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index 7e545b9888b..a1d115822a7 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -516,6 +516,27 @@ TexturePassthrough::TexturePassthrough(GLuint service_id, GLenum target)
TextureBase::SetTarget(target);
}
+TexturePassthrough::TexturePassthrough(GLuint service_id,
+ GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type)
+ : TexturePassthrough(service_id, target) {
+ DCHECK(target != GL_TEXTURE_CUBE_MAP);
+ LevelInfo* level_info = GetLevelInfo(target, 0);
+ level_info->internal_format = internal_format;
+ level_info->width = width;
+ level_info->height = height;
+ level_info->depth = depth;
+ level_info->border = border;
+ level_info->format = format;
+ level_info->type = type;
+}
+
TexturePassthrough::~TexturePassthrough() {
DeleteFromMailboxManager();
if (have_context_) {
@@ -608,6 +629,32 @@ void TexturePassthrough::SetLevelImageInternal(
gl::GLImage* image,
GLStreamTextureImage* stream_texture_image,
GLuint service_id) {
+ LevelInfo* level_info = GetLevelInfo(target, level);
+ level_info->image = image;
+ level_info->stream_texture_image = stream_texture_image;
+
+ if (service_id != 0 && service_id != service_id_) {
+ service_id_ = service_id;
+ }
+
+ if (stream_texture_image &&
+ gl::g_current_gl_driver->ext.b_GL_ANGLE_texture_external_update) {
+ // Notify the texture that its size has changed
+ GLint prev_texture = 0;
+ glGetIntegerv(GetTextureBindingQuery(target_), &prev_texture);
+ glBindTexture(target_, service_id_);
+
+ glTexImage2DExternalANGLE(target_, level, level_info->internal_format,
+ level_info->width, level_info->height,
+ level_info->border, level_info->format,
+ level_info->type);
+
+ glBindTexture(target_, prev_texture);
+ }
+}
+
+TexturePassthrough::LevelInfo* TexturePassthrough::GetLevelInfo(GLenum target,
+ GLint level) {
size_t face_idx = GLES2Util::GLTargetToFaceIndex(target);
DCHECK(face_idx < level_images_.size());
DCHECK(level >= 0);
@@ -617,12 +664,7 @@ void TexturePassthrough::SetLevelImageInternal(
level_images_[face_idx].resize(level + 1);
}
- level_images_[face_idx][level].image = image;
- level_images_[face_idx][level].stream_texture_image = stream_texture_image;
-
- if (service_id != 0 && service_id != service_id_) {
- service_id_ = service_id;
- }
+ return &level_images_[face_idx][level];
}
Texture::Texture(GLuint service_id)
@@ -1702,10 +1744,14 @@ bool Texture::ClearRenderableLevels(DecoderContext* decoder) {
return true;
}
-void Texture::SetImmutable(bool immutable) {
- if (immutable_ == immutable)
+void Texture::SetImmutable(bool immutable, bool immutable_storage) {
+ DCHECK(!immutable_storage || immutable);
+
+ if (immutable_ == immutable && immutable_storage_ == immutable_storage)
return;
+
immutable_ = immutable;
+ immutable_storage_ = immutable_storage;
UpdateNumMipLevels();
}
@@ -1893,6 +1939,10 @@ void Texture::SetLevelImageState(GLenum target, GLint level, ImageState state) {
Texture::LevelInfo& info = face_infos_[face_index].level_infos[level];
DCHECK_EQ(info.target, target);
DCHECK_EQ(info.level, level);
+ // Workaround for StreamTexture which must be re-copied on each access.
+ // TODO(ericrk): Remove this once SharedImage transition is complete.
+ if (info.image && !info.image->HasMutableState())
+ return;
info.image_state = state;
}
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index b1e0cce58b2..cdc031e1d79 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -73,6 +73,15 @@ class GPU_GLES2_EXPORT TexturePassthrough final
public base::SupportsWeakPtr<TexturePassthrough> {
public:
TexturePassthrough(GLuint service_id, GLenum target);
+ TexturePassthrough(GLuint service_id,
+ GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type);
// TextureBase implementation:
TextureBase::Type GetType() const override;
@@ -129,10 +138,20 @@ class GPU_GLES2_EXPORT TexturePassthrough final
LevelInfo(const LevelInfo& rhs);
~LevelInfo();
+ GLenum internal_format = 0;
+ GLsizei width = 0;
+ GLsizei height = 0;
+ GLsizei depth = 0;
+ GLint border = 0;
+ GLenum format = 0;
+ GLenum type = 0;
+
scoped_refptr<gl::GLImage> image;
scoped_refptr<GLStreamTextureImage> stream_texture_image;
};
+ LevelInfo* GetLevelInfo(GLenum target, GLint level);
+
std::vector<std::vector<LevelInfo>> level_images_;
DISALLOW_COPY_AND_ASSIGN(TexturePassthrough);
@@ -341,12 +360,20 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
--framebuffer_attachment_count_;
}
- void SetImmutable(bool immutable);
+ // |immutable| indicates that the GPU clients cannot modify the format or
+ // dimensions of the texture object. This is an artificial restriction imposed
+ // by the GPU service on its clients. |immutable_storage| indicates that the
+ // storage for the texture is allocated using glTexStorage* functions and it
+ // is equivalent to the definition of immutability as defined in OpenGL
+ // specifications.
+ void SetImmutable(bool immutable, bool immutable_storage);
bool IsImmutable() const {
return immutable_;
}
+ bool HasImmutableStorage() const { return immutable_storage_; }
+
// Return 0 if it's not immutable.
GLint GetImmutableLevels() const;
@@ -720,6 +747,10 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// or dimensions of the texture object can be made.
bool immutable_ = false;
+ // Indicates that the storage for the texture is allocated using glTexStorage*
+ // functions.
+ bool immutable_storage_ = false;
+
// Whether or not this texture has images.
bool has_images_ = false;
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
index d5a19011e7b..52e27ae1286 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
@@ -397,8 +397,12 @@ ContextResult WebGPUDecoderImpl::Initialize() {
return ContextResult::kFatalFailure;
}
- wire_server_ = std::make_unique<dawn_wire::WireServer>(
- dawn_device_, dawn_procs_, wire_serializer_.get());
+ dawn_wire::WireServerDescriptor descriptor = {};
+ descriptor.device = dawn_device_;
+ descriptor.procs = &dawn_procs_;
+ descriptor.serializer = wire_serializer_.get();
+
+ wire_server_ = std::make_unique<dawn_wire::WireServer>(descriptor);
return ContextResult::kSuccess;
}
@@ -585,8 +589,8 @@ error::Error WebGPUDecoderImpl::HandleAssociateMailboxImmediate(
}
static constexpr uint32_t kAllowedTextureUsages = static_cast<uint32_t>(
- DAWN_TEXTURE_USAGE_BIT_TRANSFER_SRC |
- DAWN_TEXTURE_USAGE_BIT_TRANSFER_DST | DAWN_TEXTURE_USAGE_BIT_SAMPLED |
+ DAWN_TEXTURE_USAGE_BIT_COPY_SRC | DAWN_TEXTURE_USAGE_BIT_COPY_DST |
+ DAWN_TEXTURE_USAGE_BIT_SAMPLED |
DAWN_TEXTURE_USAGE_BIT_OUTPUT_ATTACHMENT);
if (usage & ~kAllowedTextureUsages) {
DLOG(ERROR) << "AssociateMailbox: Invalid usage";
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
index 448104240c3..3f5c1deaed8 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
@@ -17,6 +17,7 @@
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/skia_utils.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/core/SkSurfaceProps.h"
@@ -47,14 +48,14 @@ class WrappedSkImage : public SharedImageBacking {
void Destroy() override {
DCHECK(backend_texture_.isValid());
- context_state_->gr_context()->deleteBackendTexture(backend_texture_);
+ DeleteGrBackendTexture(context_state_, &backend_texture_);
}
bool IsCleared() const override { return cleared_; }
void SetCleared() override { cleared_ = true; }
- void Update() override {}
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {}
void OnMemoryDump(const std::string& dump_name,
base::trace_event::MemoryAllocatorDump* dump,
@@ -124,40 +125,32 @@ class WrappedSkImage : public SharedImageBacking {
context_state_->set_need_context_state_reset(true);
+ // Initializing to bright green makes it obvious if the pixels are not
+ // properly set before they are displayed (e.g. https://crbug.com/956555).
+ // We don't do this on release builds because there is a slight overhead.
+#if DCHECK_IS_ON()
+ backend_texture_ = context_state_->gr_context()->createBackendTexture(
+ size().width(), size().height(), GetSkColorType(), SkColors::kGreen,
+ GrMipMapped::kNo, GrRenderable::kYes);
+#else
backend_texture_ = context_state_->gr_context()->createBackendTexture(
size().width(), size().height(), GetSkColorType(), GrMipMapped::kNo,
GrRenderable::kYes);
+#endif
if (!backend_texture_.isValid())
return false;
-#if DCHECK_IS_ON()
- bool need_temporary_surface = true;
-#else
- bool need_temporary_surface = !data.empty();
-#endif
-
- sk_sp<SkSurface> surface =
- need_temporary_surface
- ? SkSurface::MakeFromBackendTexture(
- context_state_->gr_context(), backend_texture_,
- kTopLeft_GrSurfaceOrigin, /*sampleCnt=*/0, GetSkColorType(),
- color_space().ToSkColorSpace(), /*surfaceProps=*/nullptr)
- : nullptr;
-
-#if DCHECK_IS_ON()
- {
- auto* canvas = surface->getCanvas();
- canvas->clear(SK_ColorGREEN);
- }
-#endif
-
if (!data.empty()) {
SkBitmap bitmap;
if (!bitmap.installPixels(info, const_cast<uint8_t*>(data.data()),
info.minRowBytes())) {
return false;
}
+ sk_sp<SkSurface> surface = SkSurface::MakeFromBackendTexture(
+ context_state_->gr_context(), backend_texture_,
+ kTopLeft_GrSurfaceOrigin, /*sampleCnt=*/0, GetSkColorType(),
+ color_space().ToSkColorSpace(), /*surfaceProps=*/nullptr);
surface->writePixels(bitmap, /*dstX=*/0, /*dstY=*/0);
}