summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-03 13:42:47 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-05-15 10:27:51 +0000
commit8c5c43c7b138c9b4b0bf56d946e61d3bbc111bec (patch)
treed29d987c4d7b173cf853279b79a51598f104b403 /chromium/gpu
parent830c9e163d31a9180fadca926b3e1d7dfffb5021 (diff)
downloadqtwebengine-chromium-8c5c43c7b138c9b4b0bf56d946e61d3bbc111bec.tar.gz
BASELINE: Update Chromium to 66.0.3359.156
Change-Id: I0c9831ad39911a086b6377b16f995ad75a51e441 Reviewed-by: Michal Klocek <michal.klocek@qt.io>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn24
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_unpremultiply_and_dither_copy.txt89
-rw-r--r--chromium/gpu/GLES2/gl2chromium_autogen.h2
-rw-r--r--chromium/gpu/GLES2/gl2extchromium.h22
-rw-r--r--chromium/gpu/command_buffer/build_cmd_buffer_lib.py535
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py37
-rwxr-xr-xchromium/gpu/command_buffer/build_raster_cmd_buffer.py954
-rw-r--r--chromium/gpu/command_buffer/client/BUILD.gn58
-rw-r--r--chromium/gpu/command_buffer/client/client_discardable_texture_manager.cc62
-rw-r--r--chromium/gpu/command_buffer/client/client_discardable_texture_manager.h14
-rw-r--r--chromium/gpu/command_buffer/client/client_test_helper.h21
-rw-r--r--chromium/gpu/command_buffer/client/client_transfer_cache.cc109
-rw-r--r--chromium/gpu/command_buffer/client/client_transfer_cache.h56
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.cc1
-rw-r--r--chromium/gpu/command_buffer/client/command_buffer_direct_locked.h5
-rw-r--r--chromium/gpu/command_buffer/client/context_support.h31
-rw-r--r--chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h30
-rw-r--r--chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h18
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc526
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h244
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_autogen.h10
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h54
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc357
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h4
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h10
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h18
-rw-r--r--chromium/gpu/command_buffer/client/gpu_control.h4
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.cc340
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.h162
-rw-r--r--chromium/gpu/command_buffer/client/logging.cc23
-rw-r--r--chromium/gpu/command_buffer/client/logging.h68
-rw-r--r--chromium/gpu/command_buffer/client/mock_transfer_buffer.cc202
-rw-r--r--chromium/gpu/command_buffer/client/mock_transfer_buffer.h96
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker.cc80
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker.h43
-rw-r--r--chromium/gpu/command_buffer/client/raster_cmd_helper.cc16
-rw-r--r--chromium/gpu/command_buffer/client/raster_cmd_helper.h36
-rw-r--r--chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h248
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.cc1076
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.h336
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_autogen.h78
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.cc72
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.h32
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc45
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_impl_autogen.h171
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_unittest.cc1019
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_unittest_autogen.h135
-rw-r--r--chromium/gpu/command_buffer/client/raster_interface.h72
-rw-r--r--chromium/gpu/command_buffer/client/raster_interface_autogen.h52
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer.cc13
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer.h1
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer_test.cc9
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc10
-rw-r--r--chromium/gpu/command_buffer/common/BUILD.gn7
-rw-r--r--chromium/gpu/command_buffer/common/capabilities.h4
-rw-r--r--chromium/gpu/command_buffer/common/gl2_types.h5
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h99
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h26
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h123
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc3
-rw-r--r--chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc10
-rw-r--r--chromium/gpu/command_buffer/common/presentation_feedback.h6
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format.cc33
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format.h89
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h1158
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_test.cc65
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h381
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_ids.h22
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_ids_autogen.h52
-rw-r--r--chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt (renamed from chromium/gpu/command_buffer/cmd_buffer_functions.txt)3
-rw-r--r--chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt49
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_direct.cc11
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_direct.h2
-rw-r--r--chromium/gpu/command_buffer/service/context_group.cc3
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc47
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h5
-rw-r--r--chromium/gpu/command_buffer/service/feature_info_unittest.cc51
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.cc21
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.h11
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc57
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h5
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc465
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h39
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc109
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h23
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h9
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc161
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc32
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_framebuffers.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc16
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4.cc59
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h57
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc13
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc39
-rw-r--r--chromium/gpu/command_buffer/service/gpu_fence_manager.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gpu_preferences.h6
-rw-r--r--chromium/gpu/command_buffer/service/mailbox_manager_sync.cc2
-rw-r--r--chromium/gpu/command_buffer/service/mailbox_manager_unittest.cc62
-rw-r--r--chromium/gpu/command_buffer/service/raster_cmd_decoder_autogen.h535
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc873
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.h170
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_autogen.h368
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_mock.cc25
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_mock.h121
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h15
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_1.cc47
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h200
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc327
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h225
-rw-r--r--chromium/gpu/command_buffer/service/texture_definition.cc12
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc42
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h3
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.json142
-rw-r--r--chromium/gpu/config/gpu_driver_bug_workaround_type.h18
-rw-r--r--chromium/gpu/config/gpu_info.cc18
-rw-r--r--chromium/gpu/config/gpu_info.h28
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc96
-rw-r--r--chromium/gpu/config/gpu_info_collector.h31
-rw-r--r--chromium/gpu/config/gpu_info_collector_android.cc258
-rw-r--r--chromium/gpu/config/gpu_info_collector_fuchsia.cc15
-rw-r--r--chromium/gpu/config/gpu_info_collector_linux.cc34
-rw-r--r--chromium/gpu/config/gpu_info_collector_mac.mm28
-rw-r--r--chromium/gpu/config/gpu_info_collector_unittest.cc2
-rw-r--r--chromium/gpu/config/gpu_info_collector_win.cc120
-rw-r--r--chromium/gpu/config/gpu_info_unittest.cc6
-rw-r--r--chromium/gpu/config/gpu_lists_version.h2
-rw-r--r--chromium/gpu/config/gpu_switches.cc34
-rw-r--r--chromium/gpu/config/gpu_switches.h11
-rw-r--r--chromium/gpu/config/gpu_test_config.cc12
-rw-r--r--chromium/gpu/config/gpu_util.cc194
-rw-r--r--chromium/gpu/config/gpu_util.h31
-rw-r--r--chromium/gpu/config/gpu_util_unittest.cc85
-rw-r--r--chromium/gpu/gles2_conform_support/egl/config.cc1
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.cc4
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.h4
-rw-r--r--chromium/gpu/gles2_conform_support/egl/thread_state.cc6
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc16
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h6
-rw-r--r--chromium/gpu/ipc/client/gpu_context_tests.h5
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_android_hardware_buffer.cc3
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_dxgi.cc3
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc24
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc4
-rw-r--r--chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h3
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom15
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.cc44
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.h35
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_support.cc15
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.mojom2
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_struct_traits.h5
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_util_unittest.cc103
-rw-r--r--chromium/gpu/ipc/common/struct_traits_unittest.cc15
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.cc17
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.h10
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.cc23
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc253
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h40
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn3
-rw-r--r--chromium/gpu/ipc/service/direct_composition_child_surface_win.cc4
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.cc20
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.h5
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc30
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.h5
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc146
-rw-r--r--chromium/gpu/ipc/service/gpu_init.h7
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc101
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_win.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_win.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc102
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc12
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_mac.mm1
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h5
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm29
-rw-r--r--chromium/gpu/ipc/service/raster_command_buffer_stub.cc4
-rw-r--r--chromium/gpu/perftests/texture_upload_perftest.cc1
-rw-r--r--chromium/gpu/raster_export.h29
-rw-r--r--chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc1
183 files changed, 12884 insertions, 3632 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index 4b51ca880ce..87f7d152359 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -19,6 +19,10 @@ config("gpu_util_implementation") {
defines = [ "GPU_UTIL_IMPLEMENTATION" ]
}
+config("raster_implementation") {
+ defines = [ "RASTER_IMPLEMENTATION" ]
+}
+
component("gpu") {
public_deps = [
"//gpu/command_buffer/client:client_sources",
@@ -38,6 +42,12 @@ component("gles2") {
]
}
+component("raster") {
+ public_deps = [
+ "//gpu/command_buffer/client:raster_sources",
+ ]
+}
+
component("gpu_util") {
public_deps = [
"//gpu/ipc/common:gpu_preferences_util_sources",
@@ -137,6 +147,8 @@ static_library("test_support") {
"command_buffer/service/error_state_mock.h",
"command_buffer/service/gles2_cmd_decoder_mock.cc",
"command_buffer/service/gles2_cmd_decoder_mock.h",
+ "command_buffer/service/raster_decoder_mock.cc",
+ "command_buffer/service/raster_decoder_mock.h",
]
public_deps = [
@@ -183,6 +195,7 @@ test("gl_tests") {
"command_buffer/tests/gl_map_buffer_range_unittest.cc",
"command_buffer/tests/gl_native_gmb_backbuffer_unittest.cc",
"command_buffer/tests/gl_object_bindings_unittest.cc",
+ "command_buffer/tests/gl_offscreen_surface_unittest.cc",
"command_buffer/tests/gl_oob_attrib_unittest.cc",
"command_buffer/tests/gl_pointcoord_unittest.cc",
"command_buffer/tests/gl_program_unittest.cc",
@@ -261,9 +274,12 @@ test("gpu_unittests") {
"command_buffer/client/fenced_allocator_test.cc",
"command_buffer/client/gles2_implementation_unittest.cc",
"command_buffer/client/mapped_memory_unittest.cc",
+ "command_buffer/client/mock_transfer_buffer.cc",
+ "command_buffer/client/mock_transfer_buffer.h",
"command_buffer/client/program_info_manager_unittest.cc",
"command_buffer/client/query_tracker_unittest.cc",
"command_buffer/client/raster_implementation_gles_unittest.cc",
+ "command_buffer/client/raster_implementation_unittest.cc",
"command_buffer/client/ring_buffer_test.cc",
"command_buffer/client/transfer_buffer_unittest.cc",
"command_buffer/client/vertex_array_object_manager_unittest.cc",
@@ -278,6 +294,8 @@ test("gpu_unittests") {
"command_buffer/common/gles2_cmd_utils_unittest.cc",
"command_buffer/common/id_allocator_test.cc",
"command_buffer/common/id_type_unittest.cc",
+ "command_buffer/common/raster_cmd_format_test.cc",
+ "command_buffer/common/raster_cmd_format_test_autogen.h",
"command_buffer/common/unittest_main.cc",
"command_buffer/service/buffer_manager_unittest.cc",
"command_buffer/service/client_service_map_unittest.cc",
@@ -301,6 +319,8 @@ test("gpu_unittests") {
"command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h",
"command_buffer/service/gles2_cmd_decoder_unittest_3.cc",
"command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h",
+ "command_buffer/service/gles2_cmd_decoder_unittest_4.cc",
+ "command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h",
"command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc",
"command_buffer/service/gles2_cmd_decoder_unittest_base.cc",
"command_buffer/service/gles2_cmd_decoder_unittest_base.h",
@@ -325,6 +345,9 @@ test("gpu_unittests") {
"command_buffer/service/program_cache_unittest.cc",
"command_buffer/service/program_manager_unittest.cc",
"command_buffer/service/query_manager_unittest.cc",
+ "command_buffer/service/raster_decoder_unittest_1.cc",
+ "command_buffer/service/raster_decoder_unittest_base.cc",
+ "command_buffer/service/raster_decoder_unittest_base.h",
"command_buffer/service/renderbuffer_manager_unittest.cc",
"command_buffer/service/scheduler_unittest.cc",
"command_buffer/service/service_discardable_manager_unittest.cc",
@@ -408,6 +431,7 @@ test("gpu_unittests") {
"//components/viz/common:resource_format",
"//gpu/command_buffer/client:gles2_c_lib",
"//gpu/command_buffer/client:gles2_implementation",
+ "//gpu/command_buffer/client:raster",
"//gpu/command_buffer/common:gles2_utils",
"//gpu/ipc:gl_in_process_context",
"//gpu/ipc/common:gpu_preferences_util",
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_unpremultiply_and_dither_copy.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_unpremultiply_and_dither_copy.txt
new file mode 100644
index 00000000000..be6f178cc86
--- /dev/null
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_unpremultiply_and_dither_copy.txt
@@ -0,0 +1,89 @@
+Name
+
+ CHROMIUM_unpremultiply_and_dither_copy
+
+Name Strings
+
+ GL_CHROMIUM_unpremultiply_and_dither_copy
+
+Version
+
+ Last Modifed Date: Feb 01, 2018
+
+Dependencies
+
+ OpenGL ES 2.0 or OpenGL ES 3.0 is required.
+
+ EXT_texture_format_BGRA8888 affects the definition of this extension.
+
+Overview
+
+ This extension performs a copy from one texture to a second texture,
+ unpremultiplying and dithering the texture in the process. Additionally,
+ it allows a subrect to be specified for the copy.
+
+ Both textures:
+ - Must be the same size
+ - Have the same subrect
+
+ The source texture:
+ - Must be RGBA. If EXT_texture_format_BGRA8888 or GLES3 is available,
+ the texture may be BGRA
+ - Will have data copied from level 0
+
+ The dest texture:
+ - Must be RGBA
+ - Must have type GL_UNSIGNED_SHORT_4_4_4_4
+ - Will have data copied to level 0
+
+ In addition to the above, this extension inherits all source/dest
+ limitations from CHROMIUM_copy_texture.
+
+New Procedures and Functions
+
+ void UnpremultiplyAndDitherCopyCHROMIUM(uint sourceId,
+ uint destId,
+ int x,
+ int y,
+ sizei width,
+ sizei height)
+
+Additions to the OpenGL ES 2.0 Specification
+
+ The command
+
+ UnpremultiplyAndDitherCopyCHROMIUM
+
+ Copies the contents of level 0 of <sourceId> texture to level 0 and of
+ <destId> texture, performing an unpremultiply and dither during the copy.
+
+ <destId> must be initialized and have:
+ - a target of TEXTURE_2D, TEXTURE_RECTANGLE_ARB.
+ - a format of GL_RGBA, or GL_BGRA if EXT_texture_format_BGRA8888
+ or GLES3 is available.
+ - a data type of GL_UNSIGNED_SHORT_4_4_4_4.
+
+ <sourceId> must be initialized and have:
+ - a target of TEXTURE_2D, TEXTURE_RECTANGLE_ARB, TEXTURE_EXTERNAL_OES.
+ - a format of GL_RGBA, or GL_BGRA if EXT_texture_format_BGRA8888
+ or GLES3 is available.
+
+ INVALID_OPERATION is generated if any of the above requirements is not met,
+ or if the source / dest combination violates any requirements of
+ CHROMIUM_copy_texture.
+
+Errors
+
+ None.
+
+New Tokens
+
+ None.
+
+New State
+
+ None.
+
+Revision History
+
+ 02/01/2018 Documented the extension
diff --git a/chromium/gpu/GLES2/gl2chromium_autogen.h b/chromium/gpu/GLES2/gl2chromium_autogen.h
index 65d8dfd501d..e580227d872 100644
--- a/chromium/gpu/GLES2/gl2chromium_autogen.h
+++ b/chromium/gpu/GLES2/gl2chromium_autogen.h
@@ -320,6 +320,8 @@
GLES2_GET_FUN(GenUnverifiedSyncTokenCHROMIUM)
#define glVerifySyncTokensCHROMIUM GLES2_GET_FUN(VerifySyncTokensCHROMIUM)
#define glWaitSyncTokenCHROMIUM GLES2_GET_FUN(WaitSyncTokenCHROMIUM)
+#define glUnpremultiplyAndDitherCopyCHROMIUM \
+ GLES2_GET_FUN(UnpremultiplyAndDitherCopyCHROMIUM)
#define glDrawBuffersEXT GLES2_GET_FUN(DrawBuffersEXT)
#define glDiscardBackbufferCHROMIUM GLES2_GET_FUN(DiscardBackbufferCHROMIUM)
#define glScheduleOverlayPlaneCHROMIUM \
diff --git a/chromium/gpu/GLES2/gl2extchromium.h b/chromium/gpu/GLES2/gl2extchromium.h
index 25a07e4e3e7..443da7435fc 100644
--- a/chromium/gpu/GLES2/gl2extchromium.h
+++ b/chromium/gpu/GLES2/gl2extchromium.h
@@ -1241,6 +1241,28 @@ typedef void(GL_APIENTRYP PFNGLSETCOLORSPACEMETADATACHROMIUM)(
GLColorSpace color_space);
#endif /* GL_CHROMIUM_color_space_metadata */
+/* GL_CHROMIUM_dither_and_premultiply_copy */
+#ifndef GL_CHROMIUM_unpremultiply_and_dither_copy
+#define GL_CHROMIUM_unpremultiply_and_dither_copy 1
+
+#ifdef GL_GLEXT_PROTOTYPES
+GL_APICALL void GL_APIENTRY
+glUnpremultiplyAndDitherCopyCHROMIUM(GLenum source_id,
+ GLenum dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height);
+#endif
+typedef void(GL_APIENTRYP PFNGLUNPREMULTIPLYANDDITHERCOPYCHROMIUMPROC)(
+ GLenum source_id,
+ GLenum dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height);
+#endif /* GL_CHROMIUM_unpremultiply_and_dither_copy */
+
#ifdef __cplusplus
}
#endif
diff --git a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
index dd650917d43..6e869597c1d 100644
--- a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
+++ b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
@@ -13,14 +13,14 @@ _SIZE_OF_UINT32 = 4
_SIZE_OF_COMMAND_HEADER = 4
_FIRST_SPECIFIC_COMMAND_ID = 256
-_LICENSE = """// Copyright 2014 The Chromium Authors. All rights reserved.
+_LICENSE = """// Copyright %s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
"""
_DO_NOT_EDIT_WARNING = """// This file is auto-generated from
-// gpu/command_buffer/build_gles2_cmd_buffer.py
+// gpu/command_buffer/build_%s_cmd_buffer.py
// It's formatted by clang-format using chromium coding style:
// clang-format -i -style=chromium filename
// DO NOT EDIT!
@@ -93,6 +93,36 @@ _PEPPER_INTERFACES = [
]
+_prefix = None
+_upper_prefix = None
+_lower_prefix = None
+def InitializePrefix(mixed_case_prefix):
+ """Initialize prefix used for autogenerated code.
+
+ Must be called before autogenerating code. Prefixes are used by autogenerated
+ code in many places: class names, filenames, namespaces, constants,
+ defines. Given a single mixed case prefix suitable for a class name, we also
+ initialize lower and upper case prefixes for other uses (e.g. filenames and
+ #defines).
+ """
+ global _prefix
+ if _prefix:
+ raise AssertionError
+ _prefix = mixed_case_prefix
+
+ global _upper_prefix
+ _upper_prefix = mixed_case_prefix.upper()
+
+ global _lower_prefix
+ _lower_prefix = mixed_case_prefix.lower()
+
+
+def _Namespace():
+ if _lower_prefix != 'gles2':
+ return 'gles2::'
+ return ''
+
+
def Grouper(n, iterable, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
args = [iter(iterable)] * n
@@ -229,10 +259,10 @@ class CWriter(object):
myfile.write("hello")
# type(myfile) == file
"""
- def __init__(self, filename):
+ def __init__(self, filename, year):
self.filename = filename
self._file = open(filename, 'wb')
- self._ENTER_MSG = _LICENSE + _DO_NOT_EDIT_WARNING
+ self._ENTER_MSG = _LICENSE % year + _DO_NOT_EDIT_WARNING % _lower_prefix
self._EXIT_MSG = ""
def __enter__(self):
@@ -251,8 +281,8 @@ class CHeaderWriter(CWriter):
around it. If `file_comment` is set, it will write that before the #ifdef
guard.
"""
- def __init__(self, filename, file_comment=None):
- super(CHeaderWriter, self).__init__(filename)
+ def __init__(self, filename, year, file_comment=None):
+ super(CHeaderWriter, self).__init__(filename, year)
guard = self._get_guard()
if file_comment is None:
file_comment = ""
@@ -387,7 +417,7 @@ static_assert(offsetof(%(cmd_name)s::Result, %(field_name)s) == %(offset)d,
def WriteFormatTest(self, func, f):
"""Writes a format test for a command."""
- f.write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ f.write("TEST_F(%sFormatTest, %s) {\n" % (_prefix, func.name))
f.write(" cmds::%s& cmd = *GetBufferAs<cmds::%s>();\n" %
(func.name, func.name))
f.write(" void* next_cmd = cmd.Set(\n")
@@ -428,17 +458,17 @@ static_assert(offsetof(%(cmd_name)s::Result, %(field_name)s) == %(offset)d,
def WriteServiceHandlerFunctionHeader(self, func, f):
"""Writes function header for service implementation handlers."""
- f.write("""error::Error GLES2DecoderImpl::Handle%(name)s(
+ f.write("""error::Error %(prefix)sDecoderImpl::Handle%(name)s(
uint32_t immediate_data_size, const volatile void* cmd_data) {
- """ % {'name': func.name})
+ """ % {'name': func.name, 'prefix' : _prefix})
if func.IsES3():
f.write("""if (!feature_info_->IsWebGL2OrES3Context())
return error::kUnknownCommand;
""")
if func.GetCmdArgs():
- f.write("""const volatile gles2::cmds::%(name)s& c =
- *static_cast<const volatile gles2::cmds::%(name)s*>(cmd_data);
- """ % {'name': func.name})
+ f.write("""const volatile %(prefix)s::cmds::%(name)s& c =
+ *static_cast<const volatile %(prefix)s::cmds::%(name)s*>(cmd_data);
+ """ % {'name': func.name, 'prefix': _lower_prefix})
def WriteServiceHandlerArgGetCode(self, func, f):
"""Writes the argument unpack code for service handlers."""
@@ -774,8 +804,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
"""Writes the GLES2 Implemention."""
impl_func = func.GetInfo('impl_func', True)
if func.can_auto_generate and impl_func:
- f.write("%s GLES2Implementation::%s(%s) {\n" %
- (func.return_type, func.original_name,
+ f.write("%s %sImplementation::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name,
func.MakeTypedOriginalArgString("")))
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
self.WriteClientGLCallLog(func, f)
@@ -817,7 +847,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
client_test = func.GetInfo('client_test', True)
if func.can_auto_generate and client_test:
code = """
-TEST_F(GLES2ImplementationTest, %(name)s) {
+TEST_F(%(prefix)sImplementationTest, %(name)s) {
struct Cmds {
cmds::%(name)s cmd;
};
@@ -837,6 +867,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
]
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
@@ -847,7 +878,8 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
constants = [arg for arg in func.GetOriginalArgs() if arg.IsConstant()]
if constants:
code = """
-TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
+TEST_F(%(prefix)sImplementationTest,
+ %(name)sInvalidConstantArg%(invalid_index)d) {
gl_->%(name)s(%(args)s);
EXPECT_TRUE(NoCommandsWritten());
EXPECT_EQ(%(gl_error)s, CheckError());
@@ -863,6 +895,7 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
gl_arg_strings.append(arg.GetValidClientSideArg(func))
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'invalid_index': func.GetOriginalArgs().index(invalid_arg),
'args': ", ".join(gl_arg_strings),
@@ -875,8 +908,8 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
arg.WriteDestinationInitalizationValidation(f, func)
def WriteTraceEvent(self, func, f):
- f.write(' TRACE_EVENT0("gpu", "GLES2Implementation::%s");\n' %
- func.original_name)
+ f.write(' TRACE_EVENT0("gpu", "%sImplementation::%s");\n' %
+ (_prefix, func.original_name))
def WriteImmediateCmdComputeSize(self, _func, f):
"""Writes the size computation code for the immediate version of a cmd."""
@@ -905,7 +938,7 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
def WriteCmdHelper(self, func, f):
"""Writes the cmd helper definition for a cmd."""
code = """ void %(name)s(%(typed_args)s) {
- gles2::cmds::%(name)s* c = GetCmdSpace<gles2::cmds::%(name)s>();
+ %(lp)s::cmds::%(name)s* c = GetCmdSpace<%(lp)s::cmds::%(name)s>();
if (c) {
c->Init(%(args)s);
}
@@ -913,6 +946,7 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
"""
f.write(code % {
+ "lp" : _lower_prefix,
"name": func.name,
"typed_args": func.MakeTypedCmdArgString(""),
"args": func.MakeCmdArgString(""),
@@ -922,8 +956,8 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
"""Writes the cmd helper definition for the immediate version of a cmd."""
code = """ void %(name)s(%(typed_args)s) {
const uint32_t s = 0;
- gles2::cmds::%(name)s* c =
- GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(s);
+ %(lp)s::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<%(lp)s::cmds::%(name)s>(s);
if (c) {
c->Init(%(args)s);
}
@@ -931,6 +965,7 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
"""
f.write(code % {
+ "lp" : _lower_prefix,
"name": func.name,
"typed_args": func.MakeTypedCmdArgString(""),
"args": func.MakeCmdArgString(""),
@@ -1457,8 +1492,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
impl_func = func.GetInfo('impl_func', True)
if func.can_auto_generate and impl_func:
- f.write("%s GLES2Implementation::%s(%s) {\n" %
- (func.return_type, func.original_name,
+ f.write("%s %sImplementation::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name,
func.MakeTypedOriginalArgString("")))
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(f)
@@ -1490,7 +1525,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
if not client_test:
return
code = """
-TEST_F(GLES2ImplementationTest, %(name)s) {
+TEST_F(%(prefix)sImplementationTest, %(name)s) {
struct Cmds {
cmds::%(name)s cmd;
};
@@ -1515,6 +1550,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
]
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
@@ -1570,6 +1606,7 @@ class GENnHandler(TypeHandler):
args = {
'log_code': log_code,
'return_type': func.return_type,
+ 'prefix' : _prefix,
'name': func.original_name,
'typed_args': func.MakeTypedOriginalArgString(""),
'args': func.MakeOriginalArgString(""),
@@ -1577,7 +1614,8 @@ class GENnHandler(TypeHandler):
'count_name': func.GetOriginalArgs()[0].name,
}
f.write(
- "%(return_type)s GLES2Implementation::%(name)s(%(typed_args)s) {\n" %
+ "%(return_type)s %(prefix)sImplementation::"
+ "%(name)s(%(typed_args)s) {\n" %
args)
func.WriteDestinationInitalizationValidation(f)
self.WriteClientGLCallLog(func, f)
@@ -1618,7 +1656,7 @@ class GENnHandler(TypeHandler):
def WriteGLES2ImplementationUnitTest(self, func, f):
"""Overrriden from TypeHandler."""
code = """
-TEST_F(GLES2ImplementationTest, %(name)s) {
+TEST_F(%(prefix)sImplementationTest, %(name)s) {
GLuint ids[2] = { 0, };
struct Cmds {
cmds::%(name)sImmediate gen;
@@ -1635,6 +1673,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
}
"""
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'types': func.GetInfo('resource_types'),
})
@@ -1750,9 +1789,9 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs) {
def WriteImmediateCmdHelper(self, func, f):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
- const uint32_t size = gles2::cmds::%(name)s::ComputeSize(n);
- gles2::cmds::%(name)s* c =
- GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(size);
+ const uint32_t size = %(lp)s::cmds::%(name)s::ComputeSize(n);
+ %(lp)s::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<%(lp)s::cmds::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
@@ -1760,6 +1799,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs) {
"""
f.write(code % {
+ "lp" : _lower_prefix,
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
@@ -1767,7 +1807,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs) {
def WriteImmediateFormatTest(self, func, f):
"""Overrriden from TypeHandler."""
- f.write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ f.write("TEST_F(%sFormatTest, %s) {\n" % (_prefix, func.name))
f.write(" static GLuint ids[] = { 12, 23, 34, };\n")
f.write(" cmds::%s& cmd = *GetBufferAs<cmds::%s>();\n" %
(func.name, func.name))
@@ -1891,8 +1931,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
def WriteGLES2Implementation(self, func, f):
"""Overrriden from TypeHandler."""
- f.write("%s GLES2Implementation::%s(%s) {\n" %
- (func.return_type, func.original_name,
+ f.write("%s %sImplementation::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name,
func.MakeTypedOriginalArgString("")))
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(f)
@@ -1942,8 +1982,8 @@ class DeleteHandler(TypeHandler):
def WriteGLES2Implementation(self, func, f):
"""Overrriden from TypeHandler."""
- f.write("%s GLES2Implementation::%s(%s) {\n" %
- (func.return_type, func.original_name,
+ f.write("%s %sImplementation::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name,
func.MakeTypedOriginalArgString("")))
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(f)
@@ -1988,7 +2028,7 @@ class DELnHandler(TypeHandler):
def WriteGLES2ImplementationUnitTest(self, func, f):
"""Overrriden from TypeHandler."""
code = """
-TEST_F(GLES2ImplementationTest, %(name)s) {
+TEST_F(%(prefix)sImplementationTest, %(name)s) {
GLuint ids[2] = { k%(types)sStartId, k%(types)sStartId + 1 };
struct Cmds {
cmds::%(name)sImmediate del;
@@ -2003,6 +2043,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
}
"""
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'types': func.GetInfo('resource_types'),
})
@@ -2090,6 +2131,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs) {
if impl_func:
args = {
'return_type': func.return_type,
+ 'prefix' : _prefix,
'name': func.original_name,
'typed_args': func.MakeTypedOriginalArgString(""),
'args': func.MakeOriginalArgString(""),
@@ -2097,7 +2139,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs) {
'count_name': func.GetOriginalArgs()[0].name,
}
f.write(
- "%(return_type)s GLES2Implementation::%(name)s(%(typed_args)s) {\n" %
+ "%(return_type)s %(prefix)sImplementation::"
+ "%(name)s(%(typed_args)s) {\n" %
args)
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(f)
@@ -2176,9 +2219,9 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs) {
def WriteImmediateCmdHelper(self, func, f):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
- const uint32_t size = gles2::cmds::%(name)s::ComputeSize(n);
- gles2::cmds::%(name)s* c =
- GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(size);
+ const uint32_t size = %(lp)s::cmds::%(name)s::ComputeSize(n);
+ %(lp)s::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<%(lp)s::cmds::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
@@ -2186,6 +2229,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs) {
"""
f.write(code % {
+ "lp" : _lower_prefix,
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
@@ -2193,7 +2237,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs) {
def WriteImmediateFormatTest(self, func, f):
"""Overrriden from TypeHandler."""
- f.write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ f.write("TEST_F(%sFormatTest, %s) {\n" % (_prefix, func.name))
f.write(" static GLuint ids[] = { 12, 23, 34, };\n")
f.write(" cmds::%s& cmd = *GetBufferAs<cmds::%s>();\n" %
(func.name, func.name))
@@ -2331,8 +2375,8 @@ class GETnHandler(TypeHandler):
"""Overrriden from TypeHandler."""
impl_func = func.GetInfo('impl_func', True)
if impl_func:
- f.write("%s GLES2Implementation::%s(%s) {\n" %
- (func.return_type, func.original_name,
+ f.write("%s %sImplementation::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name,
func.MakeTypedOriginalArgString("")))
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(f)
@@ -2395,7 +2439,7 @@ class GETnHandler(TypeHandler):
def WriteGLES2ImplementationUnitTest(self, func, f):
"""Writes the GLES2 Implemention unit test."""
code = """
-TEST_F(GLES2ImplementationTest, %(name)s) {
+TEST_F(%(prefix)sImplementationTest, %(name)s) {
struct Cmds {
cmds::%(name)s cmd;
};
@@ -2428,6 +2472,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
gl_arg_strings.append(arg.GetValidClientSideArg(func))
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'args': ", ".join(gl_arg_strings),
'cmd_args': ", ".join(cmd_arg_strings),
@@ -2671,8 +2716,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
impl_func = func.GetInfo('impl_func')
if (impl_func != None and impl_func != True):
return;
- f.write("%s GLES2Implementation::%s(%s) {\n" %
- (func.return_type, func.original_name,
+ f.write("%s %sImplementation::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name,
func.MakeTypedOriginalArgString("")))
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(f)
@@ -2701,7 +2746,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
if not client_test:
return;
code = """
-TEST_F(GLES2ImplementationTest, %(name)s) {
+TEST_F(%(prefix)sImplementationTest, %(name)s) {
%(type)s data[%(count)d] = {0};
struct Cmds {
cmds::%(name)sImmediate cmd;
@@ -2725,6 +2770,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
]
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'type': self.GetArrayType(func),
'count': self.GetArrayCount(func),
@@ -2808,9 +2854,9 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
def WriteImmediateCmdHelper(self, func, f):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
- const uint32_t size = gles2::cmds::%(name)s::ComputeSize();
- gles2::cmds::%(name)s* c =
- GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(size);
+ const uint32_t size = %(lp)s::cmds::%(name)s::ComputeSize();
+ %(lp)s::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<%(lp)s::cmds::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
@@ -2818,6 +2864,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
"""
f.write(code % {
+ "lp" : _lower_prefix,
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
@@ -2825,7 +2872,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
def WriteImmediateFormatTest(self, func, f):
"""Overrriden from TypeHandler."""
- f.write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ f.write("TEST_F(%sFormatTest, %s) {\n" % (_prefix, func.name))
f.write(" const int kSomeBaseValueToTestWith = 51;\n")
f.write(" static %s data[] = {\n" % self.GetArrayType(func))
for v in range(0, self.GetArrayCount(func)):
@@ -2966,8 +3013,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
impl_func = func.GetInfo('impl_func')
if (impl_func != None and impl_func != True):
return;
- f.write("%s GLES2Implementation::%s(%s) {\n" %
- (func.return_type, func.original_name,
+ f.write("%s %sImplementation::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name,
func.MakeTypedOriginalArgString("")))
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(f)
@@ -2997,7 +3044,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
return;
code = """
-TEST_F(GLES2ImplementationTest, %(name)s) {
+TEST_F(%(prefix)sImplementationTest, %(name)s) {
%(type)s data[%(count_param)d][%(count)d] = {{0}};
struct Cmds {
cmds::%(name)sImmediate cmd;
@@ -3034,6 +3081,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
if arg.name == "count":
count_param = int(valid_value)
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'type': self.GetArrayType(func),
'count': self.GetArrayCount(func),
@@ -3051,7 +3099,8 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
return
code = """
-TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
+TEST_F(%(prefix)sImplementationTest,
+ %(name)sInvalidConstantArg%(invalid_index)d) {
%(type)s data[%(count_param)d][%(count)d] = {{0}};
for (int ii = 0; ii < %(count_param)d; ++ii) {
for (int jj = 0; jj < %(count)d; ++jj) {
@@ -3078,6 +3127,7 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
count_param = int(valid_value)
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'invalid_index': func.GetOriginalArgs().index(invalid_arg),
'type': self.GetArrayType(func),
@@ -3140,9 +3190,9 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
def WriteImmediateCmdHelper(self, func, f):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
- const uint32_t size = gles2::cmds::%(name)s::ComputeSize(count);
- gles2::cmds::%(name)s* c =
- GetImmediateCmdSpaceTotalSize<gles2::cmds::%(name)s>(size);
+ const uint32_t size = %(lp)s::cmds::%(name)s::ComputeSize(count);
+ %(lp)s::cmds::%(name)s* c =
+ GetImmediateCmdSpaceTotalSize<%(lp)s::cmds::%(name)s>(size);
if (c) {
c->Init(%(args)s);
}
@@ -3150,6 +3200,7 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
"""
f.write(code % {
+ "lp" : _lower_prefix,
"name": func.name,
"typed_args": func.MakeTypedInitString(""),
"args": func.MakeInitString("")
@@ -3162,7 +3213,7 @@ TEST_F(GLES2ImplementationTest, %(name)sInvalidConstantArg%(invalid_index)d) {
for arg in args:
if arg.name == "count":
count_param = int(arg.GetValidClientSideCmdArg(func))
- f.write("TEST_F(GLES2FormatTest, %s) {\n" % func.name)
+ f.write("TEST_F(%sFormatTest, %s) {\n" % (_prefix, func.name))
f.write(" const int kSomeBaseValueToTestWith = 51;\n")
f.write(" static %s data[] = {\n" % self.GetArrayType(func))
for v in range(0, self.GetArrayCount(func) * count_param):
@@ -3220,8 +3271,8 @@ class PUTSTRHandler(ArrayArgTypeHandler):
def WriteGLES2Implementation(self, func, f):
"""Overrriden from TypeHandler."""
- f.write("%s GLES2Implementation::%s(%s) {\n" %
- (func.return_type, func.original_name,
+ f.write("%s %sImplementation::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name,
func.MakeTypedOriginalArgString("")))
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
func.WriteDestinationInitalizationValidation(f)
@@ -3284,8 +3335,8 @@ class PUTSTRHandler(ArrayArgTypeHandler):
def WriteGLES2ImplementationUnitTest(self, func, f):
"""Overrriden from TypeHandler."""
code = """
-TEST_F(GLES2ImplementationTest, %(name)s) {
- const uint32_t kBucketId = GLES2Implementation::kResultBucketId;
+TEST_F(%(prefix)sImplementationTest, %(name)s) {
+ const uint32_t kBucketId = %(prefix)sImplementation::kResultBucketId;
const char* kString1 = "happy";
const char* kString2 = "ending";
const size_t kString1Size = ::strlen(kString1) + 1;
@@ -3347,6 +3398,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
gl_args.append(arg.GetValidClientSideArg(func))
bucket_args.append(arg.GetValidClientSideArg(func))
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'gl_args': ", ".join(gl_args),
'bucket_args': ", ".join(bucket_args),
@@ -3355,8 +3407,8 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
if self.__GetLengthArg(func) == None:
return
code = """
-TEST_F(GLES2ImplementationTest, %(name)sWithLength) {
- const uint32_t kBucketId = GLES2Implementation::kResultBucketId;
+TEST_F(%(prefix)sImplementationTest, %(name)sWithLength) {
+ const uint32_t kBucketId = %(prefix)sImplementation::kResultBucketId;
const char* kString = "foobar******";
const size_t kStringSize = 6; // We only need "foobar".
const size_t kHeaderSize = sizeof(GLint) * 2;
@@ -3405,6 +3457,7 @@ TEST_F(GLES2ImplementationTest, %(name)sWithLength) {
else:
gl_args.append(arg.GetValidClientSideArg(func))
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'gl_args': ", ".join(gl_args),
'bucket_args': ", ".join(bucket_args),
@@ -3629,8 +3682,8 @@ class GLcharHandler(CustomHandler):
"""Overrriden from TypeHandler."""
code = """ void %(name)s(%(typed_args)s) {
const uint32_t data_size = strlen(name);
- gles2::cmds::%(name)s* c =
- GetImmediateCmdSpace<gles2::cmds::%(name)s>(data_size);
+ %(lp)s::cmds::%(name)s* c =
+ GetImmediateCmdSpace<%(lp)s::cmds::%(name)s>(data_size);
if (c) {
c->Init(%(args)s, data_size);
}
@@ -3638,6 +3691,7 @@ class GLcharHandler(CustomHandler):
"""
f.write(code % {
+ "lp" : _lower_prefix,
"name": func.name,
"typed_args": func.MakeTypedOriginalArgString(""),
"args": func.MakeOriginalArgString(""),
@@ -3655,7 +3709,7 @@ class GLcharHandler(CustomHandler):
check_code.append(" EXPECT_EQ(static_cast<%s>(%d), cmd.%s);" %
(arg.type, value + 11, arg.name))
code = """
-TEST_F(GLES2FormatTest, %(func_name)s) {
+TEST_F(%(prefix)sFormatTest, %(func_name)s) {
cmds::%(func_name)s& cmd = *GetBufferAs<cmds::%(func_name)s>();
static const char* const test_str = \"test string\";
void* next_cmd = cmd.Set(
@@ -3682,6 +3736,7 @@ TEST_F(GLES2FormatTest, %(func_name)s) {
"""
f.write(code % {
+ 'prefix': _prefix,
'func_name': func.name,
'init_code': "\n".join(init_code),
'check_code': "\n".join(check_code),
@@ -3828,8 +3883,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgsBadSharedMemoryId) {
impl_func = func.GetInfo('impl_func', True)
if impl_func:
error_value = func.GetInfo("error_value") or "GL_FALSE"
- f.write("%s GLES2Implementation::%s(%s) {\n" %
- (func.return_type, func.original_name,
+ f.write("%s %sImplementation::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name,
func.MakeTypedOriginalArgString("")))
f.write(" GPU_CLIENT_SINGLE_THREAD_CHECK();\n")
self.WriteTraceEvent(func, f)
@@ -3865,7 +3920,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgsBadSharedMemoryId) {
client_test = func.GetInfo('client_test', True)
if client_test:
code = """
-TEST_F(GLES2ImplementationTest, %(name)s) {
+TEST_F(%(prefix)sImplementationTest, %(name)s) {
struct Cmds {
cmds::%(name)s cmd;
};
@@ -3887,6 +3942,7 @@ TEST_F(GLES2ImplementationTest, %(name)s) {
args = func.GetOriginalArgs()
assert len(args) == 1
f.write(code % {
+ 'prefix' : _prefix,
'name': func.name,
'cmd_id_value': args[0].GetValidClientSideCmdArg(func),
'gl_id_value': args[0].GetValidClientSideArg(func) })
@@ -3915,7 +3971,8 @@ class STRnHandler(TypeHandler):
def WriteGLES2Implementation(self, func, f):
"""Overrriden from TypeHandler."""
- code_1 = """%(return_type)s GLES2Implementation::%(func_name)s(%(args)s) {
+ code_1 = """%(return_type)s %(prefix)sImplementation::%(func_name)s(
+ %(args)s) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
"""
code_2 = """ GPU_CLIENT_LOG("[" << GetLogPrefix()
@@ -3945,6 +4002,7 @@ class STRnHandler(TypeHandler):
"""
args = func.GetOriginalArgs()
str_args = {
+ 'prefix' : _prefix,
'return_type': func.return_type,
'func_name': func.original_name,
'args': func.MakeTypedOriginalArgString(""),
@@ -4821,7 +4879,7 @@ class Int64Argument(Argument):
"""Writes specialized accessor for compound members."""
f.write(" %s %s() const volatile {\n" % (self.type, self.name))
f.write(" return static_cast<%s>(\n" % self.type)
- f.write(" GLES2Util::MapTwoUint32ToUint64(\n")
+ f.write(" %sGLES2Util::MapTwoUint32ToUint64(\n" % _Namespace())
f.write(" %s_0,\n" % self.name)
f.write(" %s_1));\n" % self.name)
f.write(" }\n")
@@ -4833,8 +4891,8 @@ class Int64Argument(Argument):
def WriteSetCode(self, f, indent, var):
indent_str = ' ' * indent
- f.write("%sGLES2Util::MapUint64ToTwoUint32(static_cast<uint64_t>(%s),\n" %
- (indent_str, var))
+ f.write("%s%sGLES2Util::MapUint64ToTwoUint32(static_cast<uint64_t>(%s),\n" %
+ (indent_str, _Namespace(), var))
f.write("%s &%s_0,\n" %
(indent_str, self.name))
f.write("%s &%s_1);\n" %
@@ -5525,11 +5583,12 @@ class GLGenerator(object):
_comment_re = re.compile(r'^//.*$')
_function_re = re.compile(r'^GL_APICALL(.*?)GL_APIENTRY (.*?) \((.*?)\);$')
- def __init__(self, verbose, function_info, named_type_info, state_info,
- capability_flags):
+ def __init__(self, verbose, year, function_info, named_type_info,
+ state_info, capability_flags):
self.original_functions = []
self.functions = []
self.verbose = verbose
+ self.year = year
self.errors = 0
self.pepper_interfaces = []
self.interface_info = {}
@@ -5659,8 +5718,8 @@ class GLGenerator(object):
def WriteCommandIds(self, filename):
"""Writes the command buffer format"""
- with CHeaderWriter(filename) as f:
- f.write("#define GLES2_COMMAND_LIST(OP) \\\n")
+ with CHeaderWriter(filename, self.year) as f:
+ f.write("#define %s_COMMAND_LIST(OP) \\\n" % _upper_prefix)
cmd_id = 256
for func in self.functions:
f.write(" %-60s /* %d */ \\\n" %
@@ -5670,26 +5729,27 @@ class GLGenerator(object):
f.write("enum CommandId {\n")
f.write(" kOneBeforeStartPoint = cmd::kLastCommonId, "
- "// All GLES2 commands start after this.\n")
- f.write("#define GLES2_CMD_OP(name) k ## name,\n")
- f.write(" GLES2_COMMAND_LIST(GLES2_CMD_OP)\n")
- f.write("#undef GLES2_CMD_OP\n")
+ "// All %s commands start after this.\n" % _prefix)
+ f.write("#define %s_CMD_OP(name) k ## name,\n" % _upper_prefix)
+ f.write(" %s_COMMAND_LIST(%s_CMD_OP)\n" %
+ (_upper_prefix, _upper_prefix))
+ f.write("#undef %s_CMD_OP\n" % _upper_prefix)
f.write(" kNumCommands,\n")
- f.write(" kFirstGLES2Command = kOneBeforeStartPoint + 1\n")
+ f.write(" kFirst%sCommand = kOneBeforeStartPoint + 1\n" % _prefix)
f.write("};\n")
f.write("\n")
self.generated_cpp_filenames.append(filename)
def WriteFormat(self, filename):
"""Writes the command buffer format"""
- with CHeaderWriter(filename) as f:
+ with CHeaderWriter(filename, self.year) as f:
# Forward declaration of a few enums used in constant argument
# to avoid including GL header files.
- enum_defines = {
- 'GL_SCANOUT_CHROMIUM': '0x6000',
- 'GL_SYNC_GPU_COMMANDS_COMPLETE': '0x9117',
- 'GL_SYNC_FLUSH_COMMANDS_BIT': '0x00000001',
- }
+ enum_defines = {'GL_SCANOUT_CHROMIUM': '0x6000'}
+ if 'FenceSync' in self.function_info:
+ enum_defines['GL_SYNC_GPU_COMMANDS_COMPLETE'] = '0x9117'
+ if 'ClientWaitSync' in self.function_info:
+ enum_defines['GL_SYNC_FLUSH_COMMANDS_BIT'] = '0x00000001'
f.write('\n')
for enum in enum_defines:
f.write("#define %s %s\n" % (enum, enum_defines[enum]))
@@ -5701,7 +5761,7 @@ class GLGenerator(object):
def WriteDocs(self, filename):
"""Writes the command buffer doc version of the commands"""
- with CHeaderWriter(filename) as f:
+ with CHeaderWriter(filename, self.year) as f:
for func in self.functions:
func.WriteDocs(f)
f.write("\n")
@@ -5709,16 +5769,17 @@ class GLGenerator(object):
def WriteFormatTest(self, filename):
"""Writes the command buffer format test."""
- comment = ("// This file contains unit tests for gles2 commmands\n"
- "// It is included by gles2_cmd_format_test.cc\n\n")
- with CHeaderWriter(filename, comment) as f:
+ comment = ("// This file contains unit tests for %s commmands\n"
+ "// It is included by %s_cmd_format_test.cc\n\n" %
+ (_lower_prefix, _lower_prefix))
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.functions:
func.WriteFormatTest(f)
self.generated_cpp_filenames.append(filename)
def WriteCmdHelperHeader(self, filename):
"""Writes the gles2 command helper."""
- with CHeaderWriter(filename) as f:
+ with CHeaderWriter(filename, self.year) as f:
for func in self.functions:
func.WriteCmdHelper(f)
self.generated_cpp_filenames.append(filename)
@@ -5726,7 +5787,7 @@ class GLGenerator(object):
def WriteServiceContextStateHeader(self, filename):
"""Writes the service context state header."""
comment = "// It is included by context_state.h\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
f.write("struct EnableFlags {\n")
f.write(" EnableFlags();\n")
for capability in self.capability_flags:
@@ -5783,7 +5844,7 @@ class GLGenerator(object):
def WriteClientContextStateHeader(self, filename):
"""Writes the client context state header."""
comment = "// It is included by client_context_state.h\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
f.write("struct EnableFlags {\n")
f.write(" EnableFlags();\n")
for capability in self.capability_flags:
@@ -5855,7 +5916,7 @@ bool %s::GetStateAs%s(
def WriteServiceContextStateImpl(self, filename):
"""Writes the context state service implementation."""
comment = "// It is included by context_state.cc\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
code = []
for capability in self.capability_flags:
code.append("%s(%s)" %
@@ -6029,7 +6090,7 @@ void ContextState::InitState(const ContextState *prev_state) const {
def WriteClientContextStateImpl(self, filename):
"""Writes the context state client side implementation."""
comment = "// It is included by client_context_state.cc\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
code = []
for capability in self.capability_flags:
if 'extension_flag' in capability:
@@ -6082,20 +6143,20 @@ bool ClientContextState::SetCapabilityState(
def WriteServiceImplementation(self, filename):
"""Writes the service decorder implementation."""
- comment = "// It is included by gles2_cmd_decoder.cc\n"
- with CHeaderWriter(filename, comment) as f:
+ comment = "// It is included by %s_cmd_decoder.cc\n" % _lower_prefix
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.functions:
func.WriteServiceImplementation(f)
-
- f.write("""
+ if self.capability_flags:
+ f.write("""
bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
switch (cap) {
""")
- for capability in self.capability_flags:
- f.write(" case GL_%s:\n" % capability['name'].upper())
- if 'state_flag' in capability:
+ for capability in self.capability_flags:
+ f.write(" case GL_%s:\n" % capability['name'].upper())
+ if 'state_flag' in capability:
- f.write("""\
+ f.write("""\
state_.enable_flags.%(name)s = enabled;
if (state_.enable_flags.cached_%(name)s != enabled
|| state_.ignore_cached_state) {
@@ -6103,8 +6164,8 @@ bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
}
return false;
""" % capability)
- else:
- f.write("""\
+ else:
+ f.write("""\
state_.enable_flags.%(name)s = enabled;
if (state_.enable_flags.cached_%(name)s != enabled
|| state_.ignore_cached_state) {
@@ -6113,7 +6174,7 @@ bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
}
return false;
""" % capability)
- f.write(""" default:
+ f.write(""" default:
NOTREACHED();
return false;
}
@@ -6123,7 +6184,7 @@ bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
def WritePassthroughServiceImplementation(self, filename):
"""Writes the passthrough service decorder implementation."""
- with CWriter(filename) as f:
+ with CWriter(filename, self.year) as f:
header = """
#include \"gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h\"
@@ -6152,15 +6213,15 @@ namespace gles2 {
for test_num in range(0, num_tests, FUNCTIONS_PER_FILE):
count += 1
filename = filename_pattern % count
- comment = "// It is included by gles2_cmd_decoder_unittest_%d.cc\n" \
- % count
- with CHeaderWriter(filename, comment) as f:
+ comment = "// It is included by %s_cmd_decoder_unittest_%d.cc\n" \
+ % (_lower_prefix, count)
+ with CHeaderWriter(filename, self.year, comment) as f:
end = test_num + FUNCTIONS_PER_FILE
if end > num_tests:
end = num_tests
for idx in range(test_num, end):
func = self.functions[idx]
- test_name = 'GLES2DecoderTest%d' % count
+ test_name = '%sDecoderTest%d' % (_prefix, count)
if func.IsES3():
test_name = 'GLES3DecoderTest%d' % count
@@ -6176,103 +6237,109 @@ namespace gles2 {
})
self.generated_cpp_filenames.append(filename)
- comment = "// It is included by gles2_cmd_decoder_unittest_base.cc\n"
+ comment = ("// It is included by %s_cmd_decoder_unittest_base.cc\n"
+ % _lower_prefix)
filename = filename_pattern % 0
- with CHeaderWriter(filename, comment) as f:
- f.write(
+ with CHeaderWriter(filename, self.year, comment) as f:
+ if self.capability_flags:
+ f.write(
"""void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations(
bool es3_capable) {""")
- for capability in self.capability_flags:
- capability_no_init = 'no_init' in capability and \
- capability['no_init'] == True
- if capability_no_init:
+ for capability in self.capability_flags:
+ capability_no_init = 'no_init' in capability and \
+ capability['no_init'] == True
+ if capability_no_init:
+ continue
+ capability_es3 = 'es3' in capability and capability['es3'] == True
+ if capability_es3:
continue
- capability_es3 = 'es3' in capability and capability['es3'] == True
- if capability_es3:
- continue
- if 'extension_flag' in capability:
- f.write(" if (group_->feature_info()->feature_flags().%s) {\n" %
- capability['extension_flag'])
- f.write(" ")
- f.write(" ExpectEnableDisable(GL_%s, %s);\n" %
- (capability['name'].upper(),
- ('false', 'true')['default' in capability]))
- if 'extension_flag' in capability:
- f.write(" }")
- f.write(" if (es3_capable) {")
- for capability in self.capability_flags:
- capability_es3 = 'es3' in capability and capability['es3'] == True
- if capability_es3:
- f.write(" ExpectEnableDisable(GL_%s, %s);\n" %
- (capability['name'].upper(),
- ('false', 'true')['default' in capability]))
- f.write(""" }
+ if 'extension_flag' in capability:
+ f.write(" if (group_->feature_info()->feature_flags().%s) {\n" %
+ capability['extension_flag'])
+ f.write(" ")
+ f.write(" ExpectEnableDisable(GL_%s, %s);\n" %
+ (capability['name'].upper(),
+ ('false', 'true')['default' in capability]))
+ if 'extension_flag' in capability:
+ f.write(" }")
+ f.write(" if (es3_capable) {")
+ for capability in self.capability_flags:
+ capability_es3 = 'es3' in capability and capability['es3'] == True
+ if capability_es3:
+ f.write(" ExpectEnableDisable(GL_%s, %s);\n" %
+ (capability['name'].upper(),
+ ('false', 'true')['default' in capability]))
+ f.write(""" }
}
-
+""")
+ if _prefix != 'Raster':
+ f.write("""
void GLES2DecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
auto* feature_info_ = group_->feature_info();
""")
- # We need to sort the keys so the expectations match
- for state_name in sorted(self.state_info.keys()):
- state = self.state_info[state_name]
- if state['type'] == 'FrontBack':
- num_states = len(state['states'])
- for ndx, group in enumerate(Grouper(num_states / 2, state['states'])):
+ # We need to sort the keys so the expectations match
+ for state_name in sorted(self.state_info.keys()):
+ state = self.state_info[state_name]
+ if state['type'] == 'FrontBack':
+ num_states = len(state['states'])
+ for ndx, group in enumerate(Grouper(num_states / 2,
+ state['states'])):
+ args = []
+ for item in group:
+ if 'expected' in item:
+ args.append(item['expected'])
+ else:
+ args.append(item['default'])
+ f.write(
+ " EXPECT_CALL(*gl_, %s(%s, %s))\n" %
+ (state['func'], ('GL_FRONT', 'GL_BACK')[ndx],
+ ", ".join(args)))
+ f.write(" .Times(1)\n")
+ f.write(" .RetiresOnSaturation();\n")
+ elif state['type'] == 'NamedParameter':
+ for item in state['states']:
+ expect_value = item['default']
+ if isinstance(expect_value, list):
+ # TODO: Currently we do not check array values.
+ expect_value = "_"
+
+ operation = []
+ operation.append(
+ " EXPECT_CALL(*gl_, %s(%s, %s))\n" %
+ (state['func'],
+ (item['enum_set']
+ if 'enum_set' in item else item['enum']),
+ expect_value))
+ operation.append(" .Times(1)\n")
+ operation.append(" .RetiresOnSaturation();\n")
+
+ guarded_operation = GuardState(item, ''.join(operation))
+ f.write(guarded_operation)
+ elif 'no_init' not in state:
+ if 'extension_flag' in state:
+ f.write(" if (group_->feature_info()->feature_flags().%s) {\n" %
+ state['extension_flag'])
+ f.write(" ")
args = []
- for item in group:
+ for item in state['states']:
if 'expected' in item:
args.append(item['expected'])
else:
args.append(item['default'])
- f.write(
- " EXPECT_CALL(*gl_, %s(%s, %s))\n" %
- (state['func'], ('GL_FRONT', 'GL_BACK')[ndx], ", ".join(args)))
- f.write(" .Times(1)\n")
- f.write(" .RetiresOnSaturation();\n")
- elif state['type'] == 'NamedParameter':
- for item in state['states']:
- expect_value = item['default']
- if isinstance(expect_value, list):
- # TODO: Currently we do not check array values.
- expect_value = "_"
-
- operation = []
- operation.append(
- " EXPECT_CALL(*gl_, %s(%s, %s))\n" %
- (state['func'],
- (item['enum_set']
- if 'enum_set' in item else item['enum']),
- expect_value))
- operation.append(" .Times(1)\n")
- operation.append(" .RetiresOnSaturation();\n")
-
- guarded_operation = GuardState(item, ''.join(operation))
- f.write(guarded_operation)
- elif 'no_init' not in state:
- if 'extension_flag' in state:
- f.write(" if (group_->feature_info()->feature_flags().%s) {\n" %
- state['extension_flag'])
- f.write(" ")
- args = []
- for item in state['states']:
- if 'expected' in item:
- args.append(item['expected'])
+ # TODO: Currently we do not check array values.
+ args = ["_" if isinstance(arg, list) else arg for arg in args]
+ if 'custom_function' in state:
+ f.write(" SetupInitStateManualExpectationsFor%s(%s);\n" %
+ (state['func'], ", ".join(args)))
else:
- args.append(item['default'])
- # TODO: Currently we do not check array values.
- args = ["_" if isinstance(arg, list) else arg for arg in args]
- if 'custom_function' in state:
- f.write(" SetupInitStateManualExpectationsFor%s(%s);\n" %
- (state['func'], ", ".join(args)))
- else:
- f.write(" EXPECT_CALL(*gl_, %s(%s))\n" %
- (state['func'], ", ".join(args)))
- f.write(" .Times(1)\n")
- f.write(" .RetiresOnSaturation();\n")
- if 'extension_flag' in state:
- f.write(" }\n")
- f.write(" SetupInitStateManualExpectations(es3_capable);\n")
- f.write("}\n")
+ f.write(" EXPECT_CALL(*gl_, %s(%s))\n" %
+ (state['func'], ", ".join(args)))
+ f.write(" .Times(1)\n")
+ f.write(" .RetiresOnSaturation();\n")
+ if 'extension_flag' in state:
+ f.write(" }\n")
+ f.write(" SetupInitStateManualExpectations(es3_capable);\n")
+ f.write("}\n")
self.generated_cpp_filenames.append(filename)
def WriteServiceUnitTestsForExtensions(self, filename):
@@ -6283,7 +6350,7 @@ void GLES2DecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
"""
functions = [f for f in self.functions if f.GetInfo('extension_flag')]
comment = "// It is included by gles2_cmd_decoder_unittest_extensions.cc\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in functions:
if True:
if func.GetInfo('unit_test') != False:
@@ -6300,7 +6367,7 @@ void GLES2DecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
def WriteGLES2Header(self, filename):
"""Writes the GLES2 header."""
comment = "// This file contains Chromium-specific GLES2 declarations.\n\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2Header(f)
f.write("\n")
@@ -6309,7 +6376,7 @@ void GLES2DecoderTestBase::SetupInitStateExpectations(bool es3_capable) {
def WriteGLES2CLibImplementation(self, filename):
"""Writes the GLES2 c lib implementation."""
comment = "// These functions emulate GLES2 over command buffers.\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2CLibImplementation(f)
f.write("""
@@ -6330,9 +6397,9 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteGLES2InterfaceHeader(self, filename):
"""Writes the GLES2 interface header."""
- comment = ("// This file is included by gles2_interface.h to declare the\n"
- "// GL api functions.\n")
- with CHeaderWriter(filename, comment) as f:
+ comment = ("// This file is included by %s_interface.h to declare the\n"
+ "// GL api functions.\n" % _lower_prefix)
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2InterfaceHeader(f)
self.generated_cpp_filenames.append(filename)
@@ -6340,7 +6407,7 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteGLES2InterfaceStub(self, filename):
"""Writes the GLES2 interface stub header."""
comment = "// This file is included by gles2_interface_stub.h.\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2InterfaceStub(f)
self.generated_cpp_filenames.append(filename)
@@ -6348,7 +6415,7 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteGLES2InterfaceStubImpl(self, filename):
"""Writes the GLES2 interface header."""
comment = "// This file is included by gles2_interface_stub.cc.\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2InterfaceStubImpl(f)
self.generated_cpp_filenames.append(filename)
@@ -6356,9 +6423,9 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteGLES2ImplementationHeader(self, filename):
"""Writes the GLES2 Implementation header."""
comment = \
- ("// This file is included by gles2_implementation.h to declare the\n"
- "// GL api functions.\n")
- with CHeaderWriter(filename, comment) as f:
+ ("// This file is included by %s_implementation.h to declare the\n"
+ "// GL api functions.\n" % _lower_prefix)
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2ImplementationHeader(f)
self.generated_cpp_filenames.append(filename)
@@ -6366,9 +6433,9 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteGLES2Implementation(self, filename):
"""Writes the GLES2 Implementation."""
comment = \
- ("// This file is included by gles2_implementation.cc to define the\n"
- "// GL api functions.\n")
- with CHeaderWriter(filename, comment) as f:
+ ("// This file is included by %s_implementation.cc to define the\n"
+ "// GL api functions.\n" % _lower_prefix)
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2Implementation(f)
self.generated_cpp_filenames.append(filename)
@@ -6376,7 +6443,7 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteGLES2TraceImplementationHeader(self, filename):
"""Writes the GLES2 Trace Implementation header."""
comment = "// This file is included by gles2_trace_implementation.h\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2TraceImplementationHeader(f)
self.generated_cpp_filenames.append(filename)
@@ -6384,7 +6451,7 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteGLES2TraceImplementation(self, filename):
"""Writes the GLES2 Trace Implementation."""
comment = "// This file is included by gles2_trace_implementation.cc\n"
- with CHeaderWriter(filename, comment) as f:
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2TraceImplementation(f)
self.generated_cpp_filenames.append(filename)
@@ -6392,16 +6459,16 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteGLES2ImplementationUnitTests(self, filename):
"""Writes the GLES2 helper header."""
comment = \
- ("// This file is included by gles2_implementation.h to declare the\n"
- "// GL api functions.\n")
- with CHeaderWriter(filename, comment) as f:
+ ("// This file is included by %s_implementation.h to declare the\n"
+ "// GL api functions.\n" % _lower_prefix)
+ with CHeaderWriter(filename, self.year, comment) as f:
for func in self.original_functions:
func.WriteGLES2ImplementationUnitTest(f)
self.generated_cpp_filenames.append(filename)
def WriteServiceUtilsHeader(self, filename):
"""Writes the gles2 auto generated utility header."""
- with CHeaderWriter(filename) as f:
+ with CHeaderWriter(filename, self.year) as f:
for name in sorted(self.named_type_info.keys()):
named_type = NamedType(self.named_type_info[name])
if not named_type.CreateValidator():
@@ -6429,7 +6496,7 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteServiceUtilsImplementation(self, filename):
"""Writes the gles2 auto generated utility implementation."""
- with CHeaderWriter(filename) as f:
+ with CHeaderWriter(filename, self.year) as f:
names = sorted(self.named_type_info.keys())
for name in names:
named_type = NamedType(self.named_type_info[name])
@@ -6540,7 +6607,7 @@ extern const NameToFunc g_gles2_function_table[] = {
def WriteCommonUtilsHeader(self, filename):
"""Writes the gles2 common utility header."""
- with CHeaderWriter(filename) as f:
+ with CHeaderWriter(filename, self.year) as f:
type_infos = sorted(self.named_type_info.keys())
for type_info in type_infos:
if self.named_type_info[type_info]['type'] == 'GLenum':
@@ -6572,25 +6639,25 @@ extern const NameToFunc g_gles2_function_table[] = {
self.Error("code collision: %s and %s have the same code %s" %
(define_dict[value], name, value))
- with CHeaderWriter(filename) as f:
- f.write("static const GLES2Util::EnumToString "
- "enum_to_string_table[] = {\n")
+ with CHeaderWriter(filename, self.year) as f:
+ f.write("static const %sUtil::EnumToString "
+ "enum_to_string_table[] = {\n" % _prefix)
for value in sorted(define_dict):
f.write(' { %s, "%s", },\n' % (value, define_dict[value]))
f.write("""};
-const GLES2Util::EnumToString* const GLES2Util::enum_to_string_table_ =
+const %(p)sUtil::EnumToString* const %(p)sUtil::enum_to_string_table_ =
enum_to_string_table;
-const size_t GLES2Util::enum_to_string_table_len_ =
+const size_t %(p)sUtil::enum_to_string_table_len_ =
sizeof(enum_to_string_table) / sizeof(enum_to_string_table[0]);
-""")
+""" % { 'p' : _prefix})
enums = sorted(self.named_type_info.keys())
for enum in enums:
if self.named_type_info[enum]['type'] == 'GLenum':
- f.write("std::string GLES2Util::GetString%s(uint32_t value) {\n" %
- enum)
+ f.write("std::string %sUtil::GetString%s(uint32_t value) {\n" %
+ (_prefix, enum))
valid_list = self.named_type_info[enum]['valid']
if 'valid_es3' in self.named_type_info[enum]:
for es3_enum in self.named_type_info[enum]['valid_es3']:
@@ -6602,22 +6669,22 @@ const size_t GLES2Util::enum_to_string_table_len_ =
for value in valid_list:
f.write(' { %s, "%s" },\n' % (value, value))
f.write(""" };
- return GLES2Util::GetQualifiedEnumString(
+ return %sUtil::GetQualifiedEnumString(
string_table, arraysize(string_table), value);
}
-""")
+""" % _prefix)
else:
- f.write(""" return GLES2Util::GetQualifiedEnumString(
+ f.write(""" return %sUtil::GetQualifiedEnumString(
NULL, 0, value);
}
-""")
+""" % _prefix)
self.generated_cpp_filenames.append(filename)
def WritePepperGLES2Interface(self, filename, dev):
"""Writes the Pepper OpenGLES interface definition."""
- with CWriter(filename) as f:
+ with CWriter(filename, self.year) as f:
f.write("label Chrome {\n")
f.write(" M39 = 1.0\n")
f.write("};\n\n")
@@ -6678,7 +6745,7 @@ const size_t GLES2Util::enum_to_string_table_len_ =
def WritePepperGLES2Implementation(self, filename):
"""Writes the Pepper OpenGLES interface implementation."""
- with CWriter(filename) as f:
+ with CWriter(filename, self.year) as f:
f.write("#include \"ppapi/shared_impl/ppb_opengles2_shared.h\"\n\n")
f.write("#include \"base/logging.h\"\n")
f.write("#include \"gpu/command_buffer/client/gles2_implementation.h\"\n")
@@ -6749,7 +6816,7 @@ const size_t GLES2Util::enum_to_string_table_len_ =
def WriteGLES2ToPPAPIBridge(self, filename):
"""Connects GLES2 helper library to PPB_OpenGLES2 interface"""
- with CWriter(filename) as f:
+ with CWriter(filename, self.year) as f:
f.write("#ifndef GL_GLEXT_PROTOTYPES\n")
f.write("#define GL_GLEXT_PROTOTYPES\n")
f.write("#endif\n")
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index ac5169560f9..e9744ec0994 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -615,7 +615,7 @@ _STATE_INFO = {
# Named type info object represents a named type that is used in OpenGL call
# arguments. Each named type defines a set of valid OpenGL call arguments. The
-# named types are used in 'cmd_buffer_functions.txt'.
+# named types are used in 'gles2_cmd_buffer_functions.txt'.
# type: The actual GL type of the named type.
# valid: The list of values that are valid for both the client and the service.
# valid_es3: The list of values that are valid in OpenGL ES 3, but not ES 2.
@@ -2277,11 +2277,11 @@ _NAMED_TYPE_INFO = {
# A function info object specifies the type and other special data for the
# command that will be generated. A base function info object is generated by
-# parsing the "cmd_buffer_functions.txt", one for each function in the
+# parsing the "gles2_cmd_buffer_functions.txt", one for each function in the
# file. These function info objects can be augmented and their values can be
# overridden by adding an object to the table below.
#
-# Must match function names specified in "cmd_buffer_functions.txt".
+# Must match function names specified in "gles2_cmd_buffer_functions.txt".
#
# cmd_comment: A comment added to the cmd format.
# type: defines which handler will be used to generate code.
@@ -3581,6 +3581,7 @@ _FUNCTION_INFO = {
'decoder_func': 'DoSwapBuffersWithBoundsCHROMIUM',
'impl_func': False,
'client_test': False,
+ 'unit_test': False,
'extension': True,
},
'SwapInterval': {
@@ -4489,10 +4490,12 @@ _FUNCTION_INFO = {
},
'SetDrawRectangleCHROMIUM': {
'decoder_func': 'DoSetDrawRectangleCHROMIUM',
+ 'unit_test': False,
'extension': 'CHROMIUM_set_draw_rectangle',
},
'SetEnableDCLayersCHROMIUM': {
'decoder_func': 'DoSetEnableDCLayersCHROMIUM',
+ 'unit_test': False,
'extension': 'CHROMIUM_dc_layers',
},
'InitializeDiscardableTextureCHROMIUM': {
@@ -4547,7 +4550,7 @@ _FUNCTION_INFO = {
'extension': 'CHROMIUM_raster_transport',
'extension_flag': 'chromium_raster_transport',
},
- "CreateTransferCacheEntryINTERNAL": {
+ 'CreateTransferCacheEntryINTERNAL': {
'decoder_func': 'DoCreateTransferCacheEntryINTERNAL',
'cmd_args': 'GLuint entry_type, GLuint entry_id, GLuint handle_shm_id, '
'GLuint handle_shm_offset, GLuint data_shm_id, '
@@ -4555,22 +4558,25 @@ _FUNCTION_INFO = {
'internal': True,
'impl_func': True,
'client_test': False,
+ 'unit_test': False,
'extension': True,
},
- "DeleteTransferCacheEntryINTERNAL": {
+ 'DeleteTransferCacheEntryINTERNAL': {
'decoder_func': 'DoDeleteTransferCacheEntryINTERNAL',
'cmd_args': 'GLuint entry_type, GLuint entry_id',
'internal': True,
'impl_func': True,
'client_test': False,
+ 'unit_test': False,
'extension': True,
},
- "UnlockTransferCacheEntryINTERNAL": {
+ 'UnlockTransferCacheEntryINTERNAL': {
'decoder_func': 'DoUnlockTransferCacheEntryINTERNAL',
'cmd_args': 'GLuint entry_type, GLuint entry_id',
'internal': True,
'impl_func': True,
'client_test': False,
+ 'unit_test': False,
'extension': True,
},
'TexStorage2DImageCHROMIUM': {
@@ -4630,6 +4636,16 @@ _FUNCTION_INFO = {
'extension': 'CHROMIUM_gpu_fence',
'extension_flag': 'chromium_gpu_fence',
},
+ 'UnpremultiplyAndDitherCopyCHROMIUM': {
+ 'decoder_func': 'DoUnpremultiplyAndDitherCopyCHROMIUM',
+ 'cmd_args': 'GLuint source_id, GLuint dest_id, GLint x, GLint y, '
+ 'GLsizei width, GLsizei height',
+ 'client_test': False,
+ 'unit_test': False,
+ 'impl_func': True,
+ 'extension': 'CHROMIUM_unpremultiply_and_dither_copy',
+ 'extension_flag': 'unpremultiply_and_dither_copy',
+ }
}
@@ -4677,10 +4693,11 @@ def main(argv):
# This script lives under gpu/command_buffer, cd to base directory.
os.chdir(os.path.dirname(__file__) + "/../..")
base_dir = os.getcwd()
- gen = build_cmd_buffer_lib.GLGenerator(options.verbose, _FUNCTION_INFO,
- _NAMED_TYPE_INFO, _STATE_INFO,
- _CAPABILITY_FLAGS)
- gen.ParseGLH("gpu/command_buffer/cmd_buffer_functions.txt")
+ build_cmd_buffer_lib.InitializePrefix("GLES2")
+ gen = build_cmd_buffer_lib.GLGenerator(options.verbose, "2014",
+ _FUNCTION_INFO, _NAMED_TYPE_INFO,
+ _STATE_INFO, _CAPABILITY_FLAGS)
+ gen.ParseGLH("gpu/command_buffer/gles2_cmd_buffer_functions.txt")
# Support generating files under gen/
if options.output_dir != None:
diff --git a/chromium/gpu/command_buffer/build_raster_cmd_buffer.py b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
new file mode 100755
index 00000000000..cd9d64ca52d
--- /dev/null
+++ b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
@@ -0,0 +1,954 @@
+#!/usr/bin/env python
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""code generator for raster command buffers."""
+
+import os
+import os.path
+import sys
+from optparse import OptionParser
+
+import build_cmd_buffer_lib
+
+# Empty flags because raster interface does not support glEnable
+_CAPABILITY_FLAGS = []
+
+_STATE_INFO = {}
+
+# TODO(backer): Figure out which of these enums are actually valid.
+#
+# Named type info object represents a named type that is used in OpenGL call
+# arguments. Each named type defines a set of valid OpenGL call arguments. The
+# named types are used in 'raster_cmd_buffer_functions.txt'.
+# type: The actual GL type of the named type.
+# valid: The list of values that are valid for both the client and the service.
+# valid_es3: The list of values that are valid in OpenGL ES 3, but not ES 2.
+# invalid: Examples of invalid values for the type. At least these values
+# should be tested to be invalid.
+# deprecated_es3: The list of values that are valid in OpenGL ES 2, but
+# deprecated in ES 3.
+# is_complete: The list of valid values of type are final and will not be
+# modified during runtime.
+# validator: If set to False will prevent creation of a ValueValidator. Values
+# are still expected to be checked for validity and will be tested.
+_NAMED_TYPE_INFO = {
+ 'CompressedTextureFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ ],
+ 'valid_es3': [
+ ],
+ },
+ 'GLState': {
+ 'type': 'GLenum',
+ 'valid': [
+ # NOTE: State an Capability entries added later.
+ 'GL_ACTIVE_TEXTURE',
+ 'GL_ALIASED_LINE_WIDTH_RANGE',
+ 'GL_ALIASED_POINT_SIZE_RANGE',
+ 'GL_ALPHA_BITS',
+ 'GL_ARRAY_BUFFER_BINDING',
+ 'GL_BLUE_BITS',
+ 'GL_COMPRESSED_TEXTURE_FORMATS',
+ 'GL_CURRENT_PROGRAM',
+ 'GL_DEPTH_BITS',
+ 'GL_DEPTH_RANGE',
+ 'GL_ELEMENT_ARRAY_BUFFER_BINDING',
+ 'GL_FRAMEBUFFER_BINDING',
+ 'GL_GENERATE_MIPMAP_HINT',
+ 'GL_GREEN_BITS',
+ 'GL_IMPLEMENTATION_COLOR_READ_FORMAT',
+ 'GL_IMPLEMENTATION_COLOR_READ_TYPE',
+ 'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS',
+ 'GL_MAX_CUBE_MAP_TEXTURE_SIZE',
+ 'GL_MAX_FRAGMENT_UNIFORM_VECTORS',
+ 'GL_MAX_RENDERBUFFER_SIZE',
+ 'GL_MAX_TEXTURE_IMAGE_UNITS',
+ 'GL_MAX_TEXTURE_SIZE',
+ 'GL_MAX_VARYING_VECTORS',
+ 'GL_MAX_VERTEX_ATTRIBS',
+ 'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS',
+ 'GL_MAX_VERTEX_UNIFORM_VECTORS',
+ 'GL_MAX_VIEWPORT_DIMS',
+ 'GL_NUM_COMPRESSED_TEXTURE_FORMATS',
+ 'GL_NUM_SHADER_BINARY_FORMATS',
+ 'GL_PACK_ALIGNMENT',
+ 'GL_RED_BITS',
+ 'GL_RENDERBUFFER_BINDING',
+ 'GL_SAMPLE_BUFFERS',
+ 'GL_SAMPLE_COVERAGE_INVERT',
+ 'GL_SAMPLE_COVERAGE_VALUE',
+ 'GL_SAMPLES',
+ 'GL_SCISSOR_BOX',
+ 'GL_SHADER_BINARY_FORMATS',
+ 'GL_SHADER_COMPILER',
+ 'GL_SUBPIXEL_BITS',
+ 'GL_STENCIL_BITS',
+ 'GL_TEXTURE_BINDING_2D',
+ 'GL_TEXTURE_BINDING_CUBE_MAP',
+ 'GL_UNPACK_ALIGNMENT',
+ 'GL_BIND_GENERATES_RESOURCE_CHROMIUM',
+ # we can add this because we emulate it if the driver does not support it.
+ 'GL_VERTEX_ARRAY_BINDING_OES',
+ 'GL_VIEWPORT',
+ ],
+ 'valid_es3': [
+ 'GL_COPY_READ_BUFFER_BINDING',
+ 'GL_COPY_WRITE_BUFFER_BINDING',
+ 'GL_DRAW_BUFFER0',
+ 'GL_DRAW_BUFFER1',
+ 'GL_DRAW_BUFFER2',
+ 'GL_DRAW_BUFFER3',
+ 'GL_DRAW_BUFFER4',
+ 'GL_DRAW_BUFFER5',
+ 'GL_DRAW_BUFFER6',
+ 'GL_DRAW_BUFFER7',
+ 'GL_DRAW_BUFFER8',
+ 'GL_DRAW_BUFFER9',
+ 'GL_DRAW_BUFFER10',
+ 'GL_DRAW_BUFFER11',
+ 'GL_DRAW_BUFFER12',
+ 'GL_DRAW_BUFFER13',
+ 'GL_DRAW_BUFFER14',
+ 'GL_DRAW_BUFFER15',
+ 'GL_DRAW_FRAMEBUFFER_BINDING',
+ 'GL_FRAGMENT_SHADER_DERIVATIVE_HINT',
+ 'GL_GPU_DISJOINT_EXT',
+ 'GL_MAJOR_VERSION',
+ 'GL_MAX_3D_TEXTURE_SIZE',
+ 'GL_MAX_ARRAY_TEXTURE_LAYERS',
+ 'GL_MAX_COLOR_ATTACHMENTS',
+ 'GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS',
+ 'GL_MAX_COMBINED_UNIFORM_BLOCKS',
+ 'GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS',
+ 'GL_MAX_DRAW_BUFFERS',
+ 'GL_MAX_ELEMENT_INDEX',
+ 'GL_MAX_ELEMENTS_INDICES',
+ 'GL_MAX_ELEMENTS_VERTICES',
+ 'GL_MAX_FRAGMENT_INPUT_COMPONENTS',
+ 'GL_MAX_FRAGMENT_UNIFORM_BLOCKS',
+ 'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS',
+ 'GL_MAX_PROGRAM_TEXEL_OFFSET',
+ 'GL_MAX_SAMPLES',
+ 'GL_MAX_SERVER_WAIT_TIMEOUT',
+ 'GL_MAX_TEXTURE_LOD_BIAS',
+ 'GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS',
+ 'GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS',
+ 'GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS',
+ 'GL_MAX_UNIFORM_BLOCK_SIZE',
+ 'GL_MAX_UNIFORM_BUFFER_BINDINGS',
+ 'GL_MAX_VARYING_COMPONENTS',
+ 'GL_MAX_VERTEX_OUTPUT_COMPONENTS',
+ 'GL_MAX_VERTEX_UNIFORM_BLOCKS',
+ 'GL_MAX_VERTEX_UNIFORM_COMPONENTS',
+ 'GL_MIN_PROGRAM_TEXEL_OFFSET',
+ 'GL_MINOR_VERSION',
+ 'GL_NUM_EXTENSIONS',
+ 'GL_NUM_PROGRAM_BINARY_FORMATS',
+ 'GL_PACK_ROW_LENGTH',
+ 'GL_PACK_SKIP_PIXELS',
+ 'GL_PACK_SKIP_ROWS',
+ 'GL_PIXEL_PACK_BUFFER_BINDING',
+ 'GL_PIXEL_UNPACK_BUFFER_BINDING',
+ 'GL_PROGRAM_BINARY_FORMATS',
+ 'GL_READ_BUFFER',
+ 'GL_READ_FRAMEBUFFER_BINDING',
+ 'GL_SAMPLER_BINDING',
+ 'GL_TIMESTAMP_EXT',
+ 'GL_TEXTURE_BINDING_2D_ARRAY',
+ 'GL_TEXTURE_BINDING_3D',
+ 'GL_TRANSFORM_FEEDBACK_BINDING',
+ 'GL_TRANSFORM_FEEDBACK_ACTIVE',
+ 'GL_TRANSFORM_FEEDBACK_BUFFER_BINDING',
+ 'GL_TRANSFORM_FEEDBACK_PAUSED',
+ 'GL_TRANSFORM_FEEDBACK_BUFFER_SIZE',
+ 'GL_TRANSFORM_FEEDBACK_BUFFER_START',
+ 'GL_UNIFORM_BUFFER_BINDING',
+ 'GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT',
+ 'GL_UNIFORM_BUFFER_SIZE',
+ 'GL_UNIFORM_BUFFER_START',
+ 'GL_UNPACK_IMAGE_HEIGHT',
+ 'GL_UNPACK_ROW_LENGTH',
+ 'GL_UNPACK_SKIP_IMAGES',
+ 'GL_UNPACK_SKIP_PIXELS',
+ 'GL_UNPACK_SKIP_ROWS',
+ # GL_VERTEX_ARRAY_BINDING is the same as GL_VERTEX_ARRAY_BINDING_OES
+ # 'GL_VERTEX_ARRAY_BINDING',
+ ],
+ 'invalid': [
+ 'GL_FOG_HINT',
+ ],
+ },
+ 'BufferUsage': {
+ 'type': 'GLenum',
+ 'is_complete': True,
+ 'valid': [
+ 'GL_STREAM_DRAW',
+ 'GL_STATIC_DRAW',
+ 'GL_DYNAMIC_DRAW',
+ ],
+ 'valid_es3': [
+ 'GL_STREAM_READ',
+ 'GL_STREAM_COPY',
+ 'GL_STATIC_READ',
+ 'GL_STATIC_COPY',
+ 'GL_DYNAMIC_READ',
+ 'GL_DYNAMIC_COPY',
+ ],
+ 'invalid': [
+ 'GL_NONE',
+ ],
+ },
+ 'TextureTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_TEXTURE_2D',
+ 'GL_TEXTURE_CUBE_MAP_POSITIVE_X',
+ 'GL_TEXTURE_CUBE_MAP_NEGATIVE_X',
+ 'GL_TEXTURE_CUBE_MAP_POSITIVE_Y',
+ 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Y',
+ 'GL_TEXTURE_CUBE_MAP_POSITIVE_Z',
+ 'GL_TEXTURE_CUBE_MAP_NEGATIVE_Z',
+ ],
+ 'invalid': [
+ 'GL_PROXY_TEXTURE_CUBE_MAP',
+ ]
+ },
+ 'TextureBindTarget': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_TEXTURE_2D',
+ 'GL_TEXTURE_CUBE_MAP',
+ ],
+ 'valid_es3': [
+ 'GL_TEXTURE_3D',
+ 'GL_TEXTURE_2D_ARRAY',
+ ],
+ 'invalid': [
+ 'GL_TEXTURE_1D',
+ 'GL_TEXTURE_3D',
+ ],
+ },
+ 'QueryObjectParameter': {
+ 'type': 'GLenum',
+ 'is_complete': True,
+ 'valid': [
+ 'GL_QUERY_RESULT_EXT',
+ 'GL_QUERY_RESULT_AVAILABLE_EXT',
+ ],
+ },
+ 'QueryTarget': {
+ 'type': 'GLenum',
+ 'is_complete': True,
+ 'valid': [
+ 'GL_SAMPLES_PASSED_ARB',
+ 'GL_ANY_SAMPLES_PASSED_EXT',
+ 'GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT',
+ 'GL_COMMANDS_ISSUED_CHROMIUM',
+ 'GL_LATENCY_QUERY_CHROMIUM',
+ 'GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM',
+ 'GL_COMMANDS_COMPLETED_CHROMIUM',
+ ],
+ },
+ 'TextureParameter': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_TEXTURE_MAG_FILTER',
+ 'GL_TEXTURE_MIN_FILTER',
+ 'GL_TEXTURE_WRAP_S',
+ 'GL_TEXTURE_WRAP_T',
+ ],
+ 'valid_es3': [
+ 'GL_TEXTURE_BASE_LEVEL',
+ 'GL_TEXTURE_COMPARE_FUNC',
+ 'GL_TEXTURE_COMPARE_MODE',
+ 'GL_TEXTURE_IMMUTABLE_FORMAT',
+ 'GL_TEXTURE_IMMUTABLE_LEVELS',
+ 'GL_TEXTURE_MAX_LEVEL',
+ 'GL_TEXTURE_MAX_LOD',
+ 'GL_TEXTURE_MIN_LOD',
+ 'GL_TEXTURE_WRAP_R',
+ ],
+ 'invalid': [
+ 'GL_GENERATE_MIPMAP',
+ ],
+ },
+ 'PixelStore': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_PACK_ALIGNMENT',
+ 'GL_UNPACK_ALIGNMENT',
+ ],
+ 'valid_es3': [
+ 'GL_PACK_ROW_LENGTH',
+ 'GL_PACK_SKIP_PIXELS',
+ 'GL_PACK_SKIP_ROWS',
+ 'GL_UNPACK_ROW_LENGTH',
+ 'GL_UNPACK_IMAGE_HEIGHT',
+ 'GL_UNPACK_SKIP_PIXELS',
+ 'GL_UNPACK_SKIP_ROWS',
+ 'GL_UNPACK_SKIP_IMAGES',
+ ],
+ 'invalid': [
+ 'GL_PACK_SWAP_BYTES',
+ 'GL_UNPACK_SWAP_BYTES',
+ ],
+ },
+ 'PixelStoreAlignment': {
+ 'type': 'GLint',
+ 'is_complete': True,
+ 'valid': [
+ '1',
+ '2',
+ '4',
+ '8',
+ ],
+ 'invalid': [
+ '3',
+ '9',
+ ],
+ },
+ 'PixelType': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_UNSIGNED_BYTE',
+ 'GL_UNSIGNED_SHORT_5_6_5',
+ 'GL_UNSIGNED_SHORT_4_4_4_4',
+ 'GL_UNSIGNED_SHORT_5_5_5_1',
+ ],
+ 'valid_es3': [
+ 'GL_BYTE',
+ 'GL_UNSIGNED_SHORT',
+ 'GL_SHORT',
+ 'GL_UNSIGNED_INT',
+ 'GL_INT',
+ 'GL_HALF_FLOAT',
+ 'GL_FLOAT',
+ 'GL_UNSIGNED_INT_2_10_10_10_REV',
+ 'GL_UNSIGNED_INT_10F_11F_11F_REV',
+ 'GL_UNSIGNED_INT_5_9_9_9_REV',
+ 'GL_UNSIGNED_INT_24_8',
+ 'GL_FLOAT_32_UNSIGNED_INT_24_8_REV',
+ ],
+ 'invalid': [
+ 'GL_UNSIGNED_BYTE_3_3_2',
+ ],
+ },
+ 'TextureFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_ALPHA',
+ 'GL_LUMINANCE',
+ 'GL_LUMINANCE_ALPHA',
+ 'GL_RGB',
+ 'GL_RGBA',
+ ],
+ 'valid_es3': [
+ 'GL_RED',
+ 'GL_RED_INTEGER',
+ 'GL_RG',
+ 'GL_RG_INTEGER',
+ 'GL_RGB_INTEGER',
+ 'GL_RGBA_INTEGER',
+ 'GL_DEPTH_COMPONENT',
+ 'GL_DEPTH_STENCIL',
+ ],
+ 'invalid': [
+ 'GL_BGRA',
+ 'GL_BGR',
+ ],
+ },
+ 'TextureInternalFormat': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_ALPHA',
+ 'GL_LUMINANCE',
+ 'GL_LUMINANCE_ALPHA',
+ 'GL_RGB',
+ 'GL_RGBA',
+ ],
+ 'valid_es3': [
+ 'GL_R8',
+ 'GL_R8_SNORM',
+ 'GL_R16F',
+ 'GL_R32F',
+ 'GL_R8UI',
+ 'GL_R8I',
+ 'GL_R16UI',
+ 'GL_R16I',
+ 'GL_R32UI',
+ 'GL_R32I',
+ 'GL_RG8',
+ 'GL_RG8_SNORM',
+ 'GL_RG16F',
+ 'GL_RG32F',
+ 'GL_RG8UI',
+ 'GL_RG8I',
+ 'GL_RG16UI',
+ 'GL_RG16I',
+ 'GL_RG32UI',
+ 'GL_RG32I',
+ 'GL_RGB8',
+ 'GL_SRGB8',
+ 'GL_RGB565',
+ 'GL_RGB8_SNORM',
+ 'GL_R11F_G11F_B10F',
+ 'GL_RGB9_E5',
+ 'GL_RGB16F',
+ 'GL_RGB32F',
+ 'GL_RGB8UI',
+ 'GL_RGB8I',
+ 'GL_RGB16UI',
+ 'GL_RGB16I',
+ 'GL_RGB32UI',
+ 'GL_RGB32I',
+ 'GL_RGBA8',
+ 'GL_SRGB8_ALPHA8',
+ 'GL_RGBA8_SNORM',
+ 'GL_RGB5_A1',
+ 'GL_RGBA4',
+ 'GL_RGB10_A2',
+ 'GL_RGBA16F',
+ 'GL_RGBA32F',
+ 'GL_RGBA8UI',
+ 'GL_RGBA8I',
+ 'GL_RGB10_A2UI',
+ 'GL_RGBA16UI',
+ 'GL_RGBA16I',
+ 'GL_RGBA32UI',
+ 'GL_RGBA32I',
+ # The DEPTH/STENCIL formats are not supported in CopyTexImage2D.
+ # We will reject them dynamically in GPU command buffer.
+ 'GL_DEPTH_COMPONENT16',
+ 'GL_DEPTH_COMPONENT24',
+ 'GL_DEPTH_COMPONENT32F',
+ 'GL_DEPTH24_STENCIL8',
+ 'GL_DEPTH32F_STENCIL8',
+ ],
+ 'invalid': [
+ 'GL_BGRA',
+ 'GL_BGR',
+ ],
+ },
+ 'TextureInternalFormatStorage': {
+ 'type': 'GLenum',
+ 'valid': [
+ 'GL_RGB565',
+ 'GL_RGBA4',
+ 'GL_RGB5_A1',
+ 'GL_ALPHA8_EXT',
+ 'GL_LUMINANCE8_EXT',
+ 'GL_LUMINANCE8_ALPHA8_EXT',
+ 'GL_RGB8_OES',
+ 'GL_RGBA8_OES',
+ ],
+ 'valid_es3': [
+ 'GL_R8',
+ 'GL_R8_SNORM',
+ 'GL_R16F',
+ 'GL_R32F',
+ 'GL_R8UI',
+ 'GL_R8I',
+ 'GL_R16UI',
+ 'GL_R16I',
+ 'GL_R32UI',
+ 'GL_R32I',
+ 'GL_RG8',
+ 'GL_RG8_SNORM',
+ 'GL_RG16F',
+ 'GL_RG32F',
+ 'GL_RG8UI',
+ 'GL_RG8I',
+ 'GL_RG16UI',
+ 'GL_RG16I',
+ 'GL_RG32UI',
+ 'GL_RG32I',
+ 'GL_RGB8',
+ 'GL_SRGB8',
+ 'GL_RGB8_SNORM',
+ 'GL_R11F_G11F_B10F',
+ 'GL_RGB9_E5',
+ 'GL_RGB16F',
+ 'GL_RGB32F',
+ 'GL_RGB8UI',
+ 'GL_RGB8I',
+ 'GL_RGB16UI',
+ 'GL_RGB16I',
+ 'GL_RGB32UI',
+ 'GL_RGB32I',
+ 'GL_RGBA8',
+ 'GL_SRGB8_ALPHA8',
+ 'GL_RGBA8_SNORM',
+ 'GL_RGB10_A2',
+ 'GL_RGBA16F',
+ 'GL_RGBA32F',
+ 'GL_RGBA8UI',
+ 'GL_RGBA8I',
+ 'GL_RGB10_A2UI',
+ 'GL_RGBA16UI',
+ 'GL_RGBA16I',
+ 'GL_RGBA32UI',
+ 'GL_RGBA32I',
+ 'GL_DEPTH_COMPONENT16',
+ 'GL_DEPTH_COMPONENT24',
+ 'GL_DEPTH_COMPONENT32F',
+ 'GL_DEPTH24_STENCIL8',
+ 'GL_DEPTH32F_STENCIL8',
+ ],
+ 'deprecated_es3': [
+ 'GL_ALPHA8_EXT',
+ 'GL_LUMINANCE8_EXT',
+ 'GL_LUMINANCE8_ALPHA8_EXT',
+ 'GL_ALPHA16F_EXT',
+ 'GL_LUMINANCE16F_EXT',
+ 'GL_LUMINANCE_ALPHA16F_EXT',
+ 'GL_ALPHA32F_EXT',
+ 'GL_LUMINANCE32F_EXT',
+ 'GL_LUMINANCE_ALPHA32F_EXT',
+ ],
+ },
+ 'TextureBorder': {
+ 'type': 'GLint',
+ 'is_complete': True,
+ 'valid': [
+ '0',
+ ],
+ 'invalid': [
+ '1',
+ ],
+ },
+ 'ResetStatus': {
+ 'type': 'GLenum',
+ 'is_complete': True,
+ 'valid': [
+ 'GL_GUILTY_CONTEXT_RESET_ARB',
+ 'GL_INNOCENT_CONTEXT_RESET_ARB',
+ 'GL_UNKNOWN_CONTEXT_RESET_ARB',
+ ],
+ },
+ 'ClientBufferUsage': {
+ 'type': 'GLenum',
+ 'is_complete': True,
+ 'valid': [
+ 'GL_SCANOUT_CHROMIUM',
+ ],
+ 'invalid': [
+ 'GL_NONE',
+ ],
+ },
+}
+
+# A function info object specifies the type and other special data for the
+# command that will be generated. A base function info object is generated by
+# parsing the "raster_cmd_buffer_functions.txt", one for each function in the
+# file. These function info objects can be augmented and their values can be
+# overridden by adding an object to the table below.
+#
+# Must match function names specified in "raster_cmd_buffer_functions.txt".
+#
+# cmd_comment: A comment added to the cmd format.
+# type: defines which handler will be used to generate code.
+# decoder_func: defines which function to call in the decoder to execute the
+# corresponding GL command. If not specified the GL command will
+# be called directly.
+# gl_test_func: GL function that is expected to be called when testing.
+# cmd_args: The arguments to use for the command. This overrides generating
+# them based on the GL function arguments.
+# data_transfer_methods: Array of methods that are used for transfering the
+# pointer data. Possible values: 'immediate', 'shm', 'bucket'.
+# The default is 'immediate' if the command has one pointer
+# argument, otherwise 'shm'. One command is generated for each
+# transfer method. Affects only commands which are not of type
+# 'GETn' or 'GLcharN'.
+# Note: the command arguments that affect this are the final args,
+# taking cmd_args override into consideration.
+# impl_func: Whether or not to generate the GLES2Implementation part of this
+# command.
+# internal: If true, this is an internal command only, not exposed to the
+# client.
+# needs_size: If True a data_size field is added to the command.
+# count: The number of units per element. For PUTn or PUT types.
+# use_count_func: If True the actual data count needs to be computed; the count
+# argument specifies the maximum count.
+# unit_test: If False no service side unit test will be generated.
+# client_test: If False no client side unit test will be generated.
+# expectation: If False the unit test will have no expected calls.
+# gen_func: Name of function that generates GL resource for corresponding
+# bind function.
+# states: array of states that get set by this function corresponding to
+# the given arguments
+# state_flag: name of flag that is set to true when function is called.
+# no_gl: no GL function is called.
+# valid_args: A dictionary of argument indices to args to use in unit tests
+# when they can not be automatically determined.
+# pepper_interface: The pepper interface that is used for this extension
+# pepper_name: The name of the function as exposed to pepper.
+# pepper_args: A string representing the argument list (what would appear in
+# C/C++ between the parentheses for the function declaration)
+# that the Pepper API expects for this function. Use this only if
+# the stable Pepper API differs from the GLES2 argument list.
+# invalid_test: False if no invalid test needed.
+# shadowed: True = the value is shadowed so no glGetXXX call will be made.
+# first_element_only: For PUT types, True if only the first element of an
+# array is used and we end up calling the single value
+# corresponding function. eg. TexParameteriv -> TexParameteri
+# extension: Function is an extension to GL and should not be exposed to
+# pepper unless pepper_interface is defined.
+# extension_flag: Function is an extension and should be enabled only when
+# the corresponding feature info flag is enabled. Implies
+# 'extension': True.
+# not_shared: For GENn types, True if objects can't be shared between contexts
+# es3: ES3 API. True if the function requires an ES3 or WebGL2 context.
+
+_FUNCTION_INFO = {
+ # TODO(backer): Kept for unittests remove once new raster API implemented.
+ 'BindTexture': {
+ 'type': 'Bind',
+ 'internal' : True,
+ 'decoder_func': 'DoBindTexture',
+ 'gen_func': 'GenTextures',
+ # TODO: remove this once client side caching works.
+ 'client_test': False,
+ 'unit_test': False,
+ 'trace_level': 2,
+ },
+ 'CompressedTexSubImage2D': {
+ 'type': 'Custom',
+ 'data_transfer_methods': ['bucket', 'shm'],
+ 'trace_level': 1,
+ },
+ 'CopyTexImage2D': {
+ 'decoder_func': 'DoCopyTexImage2D',
+ 'unit_test': False,
+ 'trace_level': 1,
+ },
+ 'CopyTexSubImage2D': {
+ 'decoder_func': 'DoCopyTexSubImage2D',
+ 'trace_level': 1,
+ },
+ 'CreateImageCHROMIUM': {
+ 'type': 'NoCommand',
+ 'cmd_args':
+ 'ClientBuffer buffer, GLsizei width, GLsizei height, '
+ 'GLenum internalformat',
+ 'result': ['GLuint'],
+ 'extension': "CHROMIUM_image",
+ 'trace_level': 1,
+ },
+ 'DestroyImageCHROMIUM': {
+ 'type': 'NoCommand',
+ 'extension': "CHROMIUM_image",
+ 'trace_level': 1,
+ },
+ 'DeleteTextures': {
+ 'type': 'DELn',
+ 'resource_type': 'Texture',
+ 'resource_types': 'Textures',
+ },
+ 'Finish': {
+ 'impl_func': False,
+ 'client_test': False,
+ 'decoder_func': 'DoFinish',
+ 'trace_level': 1,
+ },
+ 'Flush': {
+ 'impl_func': False,
+ 'decoder_func': 'DoFlush',
+ 'trace_level': 1,
+ },
+ # TODO(backer): Kept for unittests remove once new raster API implemented.
+ 'GenTextures': {
+ 'type': 'GENn',
+ 'internal' : True,
+ 'gl_test_func': 'glGenTextures',
+ 'resource_type': 'Texture',
+ 'resource_types': 'Textures',
+ },
+ 'GetError': {
+ 'type': 'Is',
+ 'decoder_func': 'GetErrorState()->GetGLError',
+ 'impl_func': False,
+ 'result': ['GLenum'],
+ 'client_test': False,
+ },
+ 'GetGraphicsResetStatusKHR': {
+ 'type': 'NoCommand',
+ 'extension': True,
+ 'trace_level': 1,
+ },
+ 'GetIntegerv': {
+ 'type': 'GETn',
+ 'result': ['SizedResult<GLint>'],
+ 'decoder_func': 'DoGetIntegerv',
+ 'client_test': False,
+ },
+ 'TexParameteri': {
+ 'decoder_func': 'DoTexParameteri',
+ 'valid_args': {
+ '2': 'GL_NEAREST'
+ },
+ },
+ 'WaitSync': {
+ 'type': 'Custom',
+ 'cmd_args': 'GLuint sync, GLbitfieldSyncFlushFlags flags, '
+ 'GLuint64 timeout',
+ 'impl_func': False,
+ 'client_test': False,
+ 'es3': True,
+ 'trace_level': 1,
+ },
+ 'CompressedCopyTextureCHROMIUM': {
+ 'decoder_func': 'DoCompressedCopyTextureCHROMIUM',
+ 'unit_test': False,
+ 'extension': 'CHROMIUM_copy_compressed_texture',
+ },
+ 'GenQueriesEXT': {
+ 'type': 'GENn',
+ 'gl_test_func': 'glGenQueriesARB',
+ 'resource_type': 'Query',
+ 'resource_types': 'Queries',
+ 'unit_test': False,
+ 'pepper_interface': 'Query',
+ 'not_shared': 'True',
+ 'extension': "occlusion_query_EXT",
+ },
+ 'DeleteQueriesEXT': {
+ 'type': 'DELn',
+ 'gl_test_func': 'glDeleteQueriesARB',
+ 'resource_type': 'Query',
+ 'resource_types': 'Queries',
+ 'unit_test': False,
+ 'pepper_interface': 'Query',
+ 'extension': "occlusion_query_EXT",
+ },
+ 'BeginQueryEXT': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'cmd_args': 'GLenumQueryTarget target, GLidQuery id, void* sync_data',
+ 'data_transfer_methods': ['shm'],
+ 'gl_test_func': 'glBeginQuery',
+ 'pepper_interface': 'Query',
+ 'extension': "occlusion_query_EXT",
+ },
+ 'EndQueryEXT': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'cmd_args': 'GLenumQueryTarget target, GLuint submit_count',
+ 'gl_test_func': 'glEndnQuery',
+ 'client_test': False,
+ 'pepper_interface': 'Query',
+ 'extension': "occlusion_query_EXT",
+ },
+ 'GetQueryObjectuivEXT': {
+ 'type': 'NoCommand',
+ 'gl_test_func': 'glGetQueryObjectuiv',
+ 'pepper_interface': 'Query',
+ 'extension': "occlusion_query_EXT",
+ },
+ 'ShallowFlushCHROMIUM': {
+ 'type': 'NoCommand',
+ 'extension': 'CHROMIUM_ordering_barrier',
+ },
+ 'OrderingBarrierCHROMIUM': {
+ 'type': 'NoCommand',
+ 'extension': 'CHROMIUM_ordering_barrier',
+ },
+ 'InsertFenceSyncCHROMIUM': {
+ 'type': 'Custom',
+ 'internal': True,
+ 'impl_func': False,
+ 'cmd_args': 'GLuint64 release_count',
+ 'extension': "CHROMIUM_sync_point",
+ 'trace_level': 1,
+ },
+ 'LoseContextCHROMIUM': {
+ 'decoder_func': 'DoLoseContextCHROMIUM',
+ 'unit_test': False,
+ 'extension': 'CHROMIUM_lose_context',
+ 'trace_level': 1,
+ },
+ 'GenSyncTokenCHROMIUM': {
+ 'type': 'NoCommand',
+ 'extension': "CHROMIUM_sync_point",
+ },
+ 'GenUnverifiedSyncTokenCHROMIUM': {
+ 'type': 'NoCommand',
+ 'extension': "CHROMIUM_sync_point",
+ },
+ 'VerifySyncTokensCHROMIUM' : {
+ 'type': 'NoCommand',
+ 'extension': "CHROMIUM_sync_point",
+ },
+ 'WaitSyncTokenCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'cmd_args': 'GLint namespace_id, '
+ 'GLuint64 command_buffer_id, '
+ 'GLuint64 release_count',
+ 'client_test': False,
+ 'extension': "CHROMIUM_sync_point",
+ },
+ 'InitializeDiscardableTextureCHROMIUM': {
+ 'type': 'Custom',
+ 'cmd_args': 'GLuint texture_id, uint32_t shm_id, '
+ 'uint32_t shm_offset',
+ 'impl_func': False,
+ 'client_test': False,
+ 'extension': True,
+ },
+ 'UnlockDiscardableTextureCHROMIUM': {
+ 'type': 'Custom',
+ 'cmd_args': 'GLuint texture_id',
+ 'impl_func': False,
+ 'client_test': False,
+ 'extension': True,
+ },
+ 'LockDiscardableTextureCHROMIUM': {
+ 'type': 'Custom',
+ 'cmd_args': 'GLuint texture_id',
+ 'impl_func': False,
+ 'client_test': False,
+ 'extension': True,
+ },
+ 'BeginRasterCHROMIUM': {
+ 'decoder_func': 'DoBeginRasterCHROMIUM',
+ 'internal': True,
+ 'impl_func': False,
+ 'unit_test': False,
+ 'extension': 'CHROMIUM_raster_transport',
+ 'extension_flag': 'chromium_raster_transport',
+ },
+ 'RasterCHROMIUM': {
+ 'type': 'Data',
+ 'internal': True,
+ 'decoder_func': 'DoRasterCHROMIUM',
+ 'data_transfer_methods': ['shm'],
+ 'extension': 'CHROMIUM_raster_transport',
+ 'extension_flag': 'chromium_raster_transport',
+ },
+ 'EndRasterCHROMIUM': {
+ 'decoder_func': 'DoEndRasterCHROMIUM',
+ 'impl_func': True,
+ 'unit_test': False,
+ 'extension': 'CHROMIUM_raster_transport',
+ 'extension_flag': 'chromium_raster_transport',
+ },
+ 'CreateTransferCacheEntryINTERNAL': {
+ 'decoder_func': 'DoCreateTransferCacheEntryINTERNAL',
+ 'cmd_args': 'GLuint entry_type, GLuint entry_id, GLuint handle_shm_id, '
+ 'GLuint handle_shm_offset, GLuint data_shm_id, '
+ 'GLuint data_shm_offset, GLuint data_size',
+ 'internal': True,
+ 'impl_func': True,
+ 'client_test': False,
+ 'unit_test': False,
+ 'extension': True,
+ },
+ 'DeleteTransferCacheEntryINTERNAL': {
+ 'decoder_func': 'DoDeleteTransferCacheEntryINTERNAL',
+ 'cmd_args': 'GLuint entry_type, GLuint entry_id',
+ 'internal': True,
+ 'impl_func': True,
+ 'client_test': False,
+ 'unit_test': False,
+ 'extension': True,
+ },
+ 'UnlockTransferCacheEntryINTERNAL': {
+ 'decoder_func': 'DoUnlockTransferCacheEntryINTERNAL',
+ 'cmd_args': 'GLuint entry_type, GLuint entry_id',
+ 'internal': True,
+ 'impl_func': True,
+ 'client_test': False,
+ 'unit_test': False,
+ 'extension': True,
+ },
+ 'UnpremultiplyAndDitherCopyCHROMIUM': {
+ 'decoder_func': 'DoUnpremultiplyAndDitherCopyCHROMIUM',
+ 'cmd_args': 'GLuint source_id, GLuint dest_id, GLint x, GLint y, '
+ 'GLsizei width, GLsizei height',
+ 'client_test': False,
+ 'unit_test': False,
+ 'impl_func': True,
+ 'extension': 'CHROMIUM_unpremultiply_and_dither_copy',
+ 'extension_flag': 'unpremultiply_and_dither_copy',
+ },
+}
+
+
+def main(argv):
+ """This is the main function."""
+ parser = OptionParser()
+ parser.add_option(
+ "--output-dir",
+ help="base directory for resulting files, under chrome/src. default is "
+ "empty. Use this if you want the result stored under gen.")
+ parser.add_option(
+ "-v", "--verbose", action="store_true",
+ help="prints more output.")
+
+ (options, _) = parser.parse_args(args=argv)
+
+ # This script lives under gpu/command_buffer, cd to base directory.
+ os.chdir(os.path.dirname(__file__) + "/../..")
+ base_dir = os.getcwd()
+ build_cmd_buffer_lib.InitializePrefix("Raster")
+ gen = build_cmd_buffer_lib.GLGenerator(options.verbose, "2018",
+ _FUNCTION_INFO, _NAMED_TYPE_INFO,
+ _STATE_INFO, _CAPABILITY_FLAGS)
+ gen.ParseGLH("gpu/command_buffer/raster_cmd_buffer_functions.txt")
+
+ # Support generating files under gen/
+ if options.output_dir != None:
+ os.chdir(options.output_dir)
+
+ os.chdir(base_dir)
+
+ # TODO(backer): Uncomment once the output looks good.
+ gen.WriteCommandIds("gpu/command_buffer/common/raster_cmd_ids_autogen.h")
+ gen.WriteFormat("gpu/command_buffer/common/raster_cmd_format_autogen.h")
+ gen.WriteFormatTest(
+ "gpu/command_buffer/common/raster_cmd_format_test_autogen.h")
+ gen.WriteGLES2InterfaceHeader(
+ "gpu/command_buffer/client/raster_interface_autogen.h")
+ # gen.WriteGLES2InterfaceStub(
+ # "gpu/command_buffer/client/raster_interface_stub_autogen.h")
+ # gen.WriteGLES2InterfaceStubImpl(
+ # "gpu/command_buffer/client/raster_interface_stub_impl_autogen.h")
+ gen.WriteGLES2ImplementationHeader(
+ "gpu/command_buffer/client/raster_implementation_autogen.h")
+ gen.WriteGLES2Implementation(
+ "gpu/command_buffer/client/raster_implementation_impl_autogen.h")
+ gen.WriteGLES2ImplementationUnitTests(
+ "gpu/command_buffer/client/raster_implementation_unittest_autogen.h")
+ # gen.WriteGLES2TraceImplementationHeader(
+ # "gpu/command_buffer/client/raster_trace_implementation_autogen.h")
+ # gen.WriteGLES2TraceImplementation(
+ # "gpu/command_buffer/client/raster_trace_implementation_impl_autogen.h")
+ # gen.WriteGLES2CLibImplementation(
+ # "gpu/command_buffer/client/raster_c_lib_autogen.h")
+ gen.WriteCmdHelperHeader(
+ "gpu/command_buffer/client/raster_cmd_helper_autogen.h")
+ gen.WriteServiceImplementation(
+ "gpu/command_buffer/service/raster_decoder_autogen.h")
+ gen.WriteServiceUnitTests(
+ "gpu/command_buffer/service/raster_decoder_unittest_%d_autogen.h")
+ # gen.WriteServiceUnitTestsForExtensions(
+ # "gpu/command_buffer/service/"
+ # "raster_cmd_decoder_unittest_extensions_autogen.h")
+ # gen.WriteServiceUtilsHeader(
+ # "gpu/command_buffer/service/raster_cmd_validation_autogen.h")
+ # gen.WriteServiceUtilsImplementation(
+ # "gpu/command_buffer/service/"
+ # "raster_cmd_validation_implementation_autogen.h")
+
+ build_cmd_buffer_lib.Format(gen.generated_cpp_filenames)
+
+ if gen.errors > 0:
+ print "%d errors" % gen.errors
+ return 1
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/chromium/gpu/command_buffer/client/BUILD.gn b/chromium/gpu/command_buffer/client/BUILD.gn
index e0475003fd1..383839d6218 100644
--- a/chromium/gpu/command_buffer/client/BUILD.gn
+++ b/chromium/gpu/command_buffer/client/BUILD.gn
@@ -29,6 +29,18 @@ group("gles2_cmd_helper") {
}
}
+group("raster") {
+ if (is_component_build) {
+ public_deps = [
+ "//gpu:raster",
+ ]
+ } else {
+ public_deps = [
+ ":raster_sources",
+ ]
+ }
+}
+
source_set("client_sources") {
# External code should depend on this via //gpu/client above rather than
# depending on this directly or the component build will break.
@@ -115,6 +127,10 @@ gles2_implementation_source_files = [
"gles2_trace_implementation_impl_autogen.h",
"gpu_switches.cc",
"gpu_switches.h",
+ "implementation_base.cc",
+ "implementation_base.h",
+ "logging.cc",
+ "logging.h",
"program_info_manager.cc",
"program_info_manager.h",
"query_tracker.cc",
@@ -169,20 +185,48 @@ component("gles2_implementation") {
if (!is_nacl) {
deps += [
- ":raster_interface",
- "//cc/paint",
"//components/viz/common:resource_format",
"//ui/gfx:color_space",
"//ui/gfx/ipc/color",
]
-
- sources += [
- "raster_implementation_gles.cc",
- "raster_implementation_gles.h",
- ]
}
}
+source_set("raster_sources") {
+ # External code should depend on this via //gpu:raster above rather than
+ # depending on this directly or the component build will break.
+ visibility = [ "//gpu/*" ]
+ all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
+
+ configs += [ "//gpu:raster_implementation" ]
+ deps = [
+ ":client",
+ ":gles2_implementation",
+ ":gles2_interface",
+ ":raster_interface",
+ "//base",
+ "//cc/paint",
+ "//components/viz/common:resource_format",
+ "//gpu/command_buffer/common",
+ "//gpu/command_buffer/common:gles2",
+ "//gpu/command_buffer/common:gles2_utils",
+ "//ui/gfx:color_space",
+ "//ui/gfx/geometry",
+ "//ui/gfx/ipc/color",
+ ]
+ sources = [
+ "raster_cmd_helper.cc",
+ "raster_cmd_helper.h",
+ "raster_cmd_helper_autogen.h",
+ "raster_implementation.cc",
+ "raster_implementation.h",
+ "raster_implementation_autogen.h",
+ "raster_implementation_gles.cc",
+ "raster_implementation_gles.h",
+ "raster_implementation_impl_autogen.h",
+ ]
+}
+
# Library emulates GLES2 using command_buffers.
component("gles2_implementation_no_check") {
sources = gles2_implementation_source_files
diff --git a/chromium/gpu/command_buffer/client/client_discardable_texture_manager.cc b/chromium/gpu/command_buffer/client/client_discardable_texture_manager.cc
index 3446b3cfb55..cc336a570e7 100644
--- a/chromium/gpu/command_buffer/client/client_discardable_texture_manager.cc
+++ b/chromium/gpu/command_buffer/client/client_discardable_texture_manager.cc
@@ -6,6 +6,15 @@
namespace gpu {
+ClientDiscardableTextureManager::TextureEntry::TextureEntry(
+ ClientDiscardableHandle::Id id)
+ : id(id) {}
+ClientDiscardableTextureManager::TextureEntry::TextureEntry(
+ const TextureEntry& other) = default;
+ClientDiscardableTextureManager::TextureEntry&
+ClientDiscardableTextureManager::TextureEntry::operator=(
+ const TextureEntry& other) = default;
+
ClientDiscardableTextureManager::ClientDiscardableTextureManager() = default;
ClientDiscardableTextureManager::~ClientDiscardableTextureManager() = default;
@@ -13,56 +22,73 @@ ClientDiscardableHandle ClientDiscardableTextureManager::InitializeTexture(
CommandBuffer* command_buffer,
uint32_t texture_id) {
base::AutoLock hold(lock_);
- DCHECK(texture_id_to_handle_id_.find(texture_id) ==
- texture_id_to_handle_id_.end());
+ DCHECK(texture_entries_.find(texture_id) == texture_entries_.end());
ClientDiscardableHandle::Id handle_id =
discardable_manager_.CreateHandle(command_buffer);
if (handle_id.is_null())
return ClientDiscardableHandle();
- texture_id_to_handle_id_[texture_id] = handle_id;
+ texture_entries_.emplace(texture_id, TextureEntry(handle_id));
return discardable_manager_.GetHandle(handle_id);
}
bool ClientDiscardableTextureManager::LockTexture(uint32_t texture_id) {
base::AutoLock hold(lock_);
- auto found = texture_id_to_handle_id_.find(texture_id);
- DCHECK(found != texture_id_to_handle_id_.end());
- return discardable_manager_.LockHandle(found->second);
+ auto found = texture_entries_.find(texture_id);
+ DCHECK(found != texture_entries_.end());
+ TextureEntry& entry = found->second;
+ if (!discardable_manager_.LockHandle(entry.id)) {
+ DCHECK_EQ(0u, entry.client_lock_count);
+ return false;
+ }
+
+ ++entry.client_lock_count;
+ return true;
+}
+
+void ClientDiscardableTextureManager::UnlockTexture(
+ uint32_t texture_id,
+ bool* should_unbind_texture) {
+ base::AutoLock hold(lock_);
+ auto found = texture_entries_.find(texture_id);
+ DCHECK(found != texture_entries_.end());
+ TextureEntry& entry = found->second;
+ DCHECK_GT(entry.client_lock_count, 0u);
+ --entry.client_lock_count;
+ *should_unbind_texture = (0u == entry.client_lock_count);
}
void ClientDiscardableTextureManager::FreeTexture(uint32_t texture_id) {
base::AutoLock hold(lock_);
- auto found = texture_id_to_handle_id_.find(texture_id);
- if (found == texture_id_to_handle_id_.end())
+ auto found = texture_entries_.find(texture_id);
+ if (found == texture_entries_.end())
return;
- ClientDiscardableHandle::Id discardable_id = found->second;
- texture_id_to_handle_id_.erase(found);
+ ClientDiscardableHandle::Id discardable_id = found->second.id;
+ texture_entries_.erase(found);
return discardable_manager_.FreeHandle(discardable_id);
}
bool ClientDiscardableTextureManager::TextureIsValid(
uint32_t texture_id) const {
base::AutoLock hold(lock_);
- return texture_id_to_handle_id_.find(texture_id) !=
- texture_id_to_handle_id_.end();
+ return texture_entries_.find(texture_id) != texture_entries_.end();
}
bool ClientDiscardableTextureManager::TextureIsDeletedForTracing(
uint32_t texture_id) const {
base::AutoLock hold(lock_);
- auto found = texture_id_to_handle_id_.find(texture_id);
- if (found == texture_id_to_handle_id_.end())
+ auto found = texture_entries_.find(texture_id);
+ if (found == texture_entries_.end())
return true;
- return discardable_manager_.HandleIsDeletedForTracing(found->second);
+ return discardable_manager_.HandleIsDeletedForTracing(found->second.id);
}
ClientDiscardableHandle ClientDiscardableTextureManager::GetHandleForTesting(
uint32_t texture_id) {
base::AutoLock hold(lock_);
- auto found = texture_id_to_handle_id_.find(texture_id);
- DCHECK(found != texture_id_to_handle_id_.end());
- return discardable_manager_.GetHandle(found->second);
+ auto found = texture_entries_.find(texture_id);
+ DCHECK(found != texture_entries_.end());
+ return discardable_manager_.GetHandle(found->second.id);
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/client_discardable_texture_manager.h b/chromium/gpu/command_buffer/client/client_discardable_texture_manager.h
index b9ba5133b5b..9460bd641a6 100644
--- a/chromium/gpu/command_buffer/client/client_discardable_texture_manager.h
+++ b/chromium/gpu/command_buffer/client/client_discardable_texture_manager.h
@@ -27,6 +27,7 @@ class GPU_EXPORT ClientDiscardableTextureManager {
ClientDiscardableHandle InitializeTexture(CommandBuffer* command_buffer,
uint32_t texture_id);
bool LockTexture(uint32_t texture_id);
+ void UnlockTexture(uint32_t texture_id, bool* should_unbind_texture);
// Must be called by the GLES2Implementation when a texture is being deleted
// to allow tracking memory to be reclaimed.
void FreeTexture(uint32_t texture_id);
@@ -42,9 +43,20 @@ class GPU_EXPORT ClientDiscardableTextureManager {
ClientDiscardableHandle GetHandleForTesting(uint32_t texture_id);
private:
+ struct TextureEntry {
+ TextureEntry(ClientDiscardableHandle::Id id);
+ TextureEntry(const TextureEntry& other);
+ TextureEntry& operator=(const TextureEntry& other);
+
+ ClientDiscardableHandle::Id id;
+ // Tracks the lock count of the given texture. Used to unbind texture
+ // the texture when fully unlocked.
+ uint32_t client_lock_count = 1;
+ };
+
// Access to other members must always be done with |lock_| held.
mutable base::Lock lock_;
- std::map<uint32_t, ClientDiscardableHandle::Id> texture_id_to_handle_id_;
+ std::map<uint32_t, TextureEntry> texture_entries_;
ClientDiscardableManager discardable_manager_;
DISALLOW_COPY_AND_ASSIGN(ClientDiscardableTextureManager);
diff --git a/chromium/gpu/command_buffer/client/client_test_helper.h b/chromium/gpu/command_buffer/client/client_test_helper.h
index d7b46885c4f..f54a44f2e5d 100644
--- a/chromium/gpu/command_buffer/client/client_test_helper.h
+++ b/chromium/gpu/command_buffer/client/client_test_helper.h
@@ -109,8 +109,14 @@ class MockClientGpuControl : public GpuControl {
size_t height,
unsigned internalformat));
MOCK_METHOD1(DestroyImage, void(int32_t id));
- MOCK_METHOD2(SignalQuery,
- void(uint32_t query, const base::Closure& callback));
+
+ // Workaround for move-only args in GMock.
+ MOCK_METHOD2(DoSignalQuery,
+ void(uint32_t query, base::OnceClosure* callback));
+ void SignalQuery(uint32_t query, base::OnceClosure callback) override {
+ DoSignalQuery(query, &callback);
+ }
+
MOCK_METHOD1(CreateStreamTexture, uint32_t(uint32_t));
MOCK_METHOD1(SetLock, void(base::Lock*));
MOCK_METHOD0(EnsureWorkVisible, void());
@@ -119,8 +125,15 @@ class MockClientGpuControl : public GpuControl {
MOCK_METHOD0(FlushPendingWork, void());
MOCK_METHOD0(GenerateFenceSyncRelease, uint64_t());
MOCK_METHOD1(IsFenceSyncReleased, bool(uint64_t release));
- MOCK_METHOD2(SignalSyncToken, void(const SyncToken& sync_token,
- const base::Closure& callback));
+
+ // Workaround for move-only args in GMock.
+ MOCK_METHOD2(DoSignalSyncToken,
+ void(const SyncToken& sync_token, base::OnceClosure* callback));
+ void SignalSyncToken(const SyncToken& sync_token,
+ base::OnceClosure callback) override {
+ DoSignalSyncToken(sync_token, &callback);
+ }
+
MOCK_METHOD1(WaitSyncTokenHint, void(const SyncToken&));
MOCK_METHOD1(CanWaitUnverifiedSyncToken, bool(const SyncToken&));
MOCK_METHOD0(SetSnapshotRequested, void());
diff --git a/chromium/gpu/command_buffer/client/client_transfer_cache.cc b/chromium/gpu/command_buffer/client/client_transfer_cache.cc
index 701add85bc4..e91703fd541 100644
--- a/chromium/gpu/command_buffer/client/client_transfer_cache.cc
+++ b/chromium/gpu/command_buffer/client/client_transfer_cache.cc
@@ -3,45 +3,51 @@
// found in the LICENSE file.
#include "gpu/command_buffer/client/client_transfer_cache.h"
-#include "gpu/command_buffer/client/gles2_cmd_helper.h"
-#include "gpu/command_buffer/client/mapped_memory.h"
namespace gpu {
-ClientTransferCache::ClientTransferCache() = default;
+ClientTransferCache::ClientTransferCache(Client* client) : client_(client) {}
+
ClientTransferCache::~ClientTransferCache() = default;
-void ClientTransferCache::CreateCacheEntry(
- gles2::GLES2CmdHelper* helper,
- MappedMemoryManager* mapped_memory,
- const cc::ClientTransferCacheEntry& entry) {
+void* ClientTransferCache::MapEntry(MappedMemoryManager* mapped_memory,
+ size_t size) {
+ DCHECK(!mapped_ptr_);
+ mapped_ptr_.emplace(size, client_->cmd_buffer_helper(), mapped_memory);
+ if (!mapped_ptr_->valid()) {
+ mapped_ptr_ = base::nullopt;
+ return nullptr;
+ } else {
+ return mapped_ptr_->address();
+ }
+}
+
+void ClientTransferCache::UnmapAndCreateEntry(uint32_t type, uint32_t id) {
+ DCHECK(mapped_ptr_);
+ EntryKey key(type, id);
+
base::AutoLock hold(lock_);
- ScopedMappedMemoryPtr mapped_alloc(entry.SerializedSize(), helper,
- mapped_memory);
- DCHECK(mapped_alloc.valid());
- bool succeeded = entry.Serialize(base::make_span(
- reinterpret_cast<uint8_t*>(mapped_alloc.address()), mapped_alloc.size()));
- DCHECK(succeeded);
-
- ClientDiscardableHandle::Id id =
- discardable_manager_.CreateHandle(helper->command_buffer());
- ClientDiscardableHandle handle = discardable_manager_.GetHandle(id);
-
- // Store the mapping from the given namespace/id to the transfer cache id.
- DCHECK(FindDiscardableHandleId(entry.Type(), entry.Id()).is_null());
- DiscardableHandleIdMap(entry.Type())[entry.Id()] = id;
-
- helper->CreateTransferCacheEntryINTERNAL(
- static_cast<uint32_t>(entry.Type()), entry.Id(), handle.shm_id(),
- handle.byte_offset(), mapped_alloc.shm_id(), mapped_alloc.offset(),
- mapped_alloc.size());
+ ClientDiscardableHandle::Id discardable_handle_id =
+ discardable_manager_.CreateHandle(client_->command_buffer());
+ ClientDiscardableHandle handle =
+ discardable_manager_.GetHandle(discardable_handle_id);
+
+ // Store the mapping from the given namespace/discardable_handle_id to the
+ // transfer cache discardable_handle_id.
+ DCHECK(FindDiscardableHandleId(key).is_null());
+ discardable_handle_id_map_.emplace(key, discardable_handle_id);
+
+ client_->IssueCreateTransferCacheEntry(
+ type, id, handle.shm_id(), handle.byte_offset(), mapped_ptr_->shm_id(),
+ mapped_ptr_->offset(), mapped_ptr_->size());
+ mapped_ptr_ = base::nullopt;
}
-bool ClientTransferCache::LockTransferCacheEntry(
- cc::TransferCacheEntryType type,
- uint32_t id) {
+bool ClientTransferCache::LockEntry(uint32_t type, uint32_t id) {
+ EntryKey key(type, id);
+
base::AutoLock hold(lock_);
- auto discardable_handle_id = FindDiscardableHandleId(type, id);
+ auto discardable_handle_id = FindDiscardableHandleId(key);
if (discardable_handle_id.is_null())
return false;
@@ -49,55 +55,38 @@ bool ClientTransferCache::LockTransferCacheEntry(
return true;
// Could not lock. Entry is already deleted service side.
- DiscardableHandleIdMap(type).erase(id);
+ discardable_handle_id_map_.erase(key);
return false;
}
-void ClientTransferCache::UnlockTransferCacheEntries(
- gles2::GLES2CmdHelper* helper,
- const std::vector<std::pair<cc::TransferCacheEntryType, uint32_t>>&
- entries) {
+void ClientTransferCache::UnlockEntries(
+ const std::vector<std::pair<uint32_t, uint32_t>>& entries) {
base::AutoLock hold(lock_);
for (const auto& entry : entries) {
- auto type = entry.first;
- auto id = entry.second;
- DCHECK(!FindDiscardableHandleId(type, id).is_null());
- helper->UnlockTransferCacheEntryINTERNAL(static_cast<uint32_t>(type), id);
+ DCHECK(!FindDiscardableHandleId(entry).is_null());
+ client_->IssueUnlockTransferCacheEntry(entry.first, entry.second);
}
}
-void ClientTransferCache::DeleteTransferCacheEntry(
- gles2::GLES2CmdHelper* helper,
- cc::TransferCacheEntryType type,
- uint32_t id) {
+void ClientTransferCache::DeleteEntry(uint32_t type, uint32_t id) {
+ EntryKey key(type, id);
base::AutoLock hold(lock_);
- auto discardable_handle_id = FindDiscardableHandleId(type, id);
+ auto discardable_handle_id = FindDiscardableHandleId(key);
if (discardable_handle_id.is_null())
return;
discardable_manager_.FreeHandle(discardable_handle_id);
- helper->DeleteTransferCacheEntryINTERNAL(static_cast<uint32_t>(type), id);
- DiscardableHandleIdMap(type).erase(id);
+ client_->IssueDeleteTransferCacheEntry(type, id);
+ discardable_handle_id_map_.erase(key);
}
ClientDiscardableHandle::Id ClientTransferCache::FindDiscardableHandleId(
- cc::TransferCacheEntryType type,
- uint32_t id) {
+ const EntryKey& key) {
lock_.AssertAcquired();
- const auto& id_map = DiscardableHandleIdMap(type);
- auto id_map_it = id_map.find(id);
- if (id_map_it == id_map.end())
+ auto id_map_it = discardable_handle_id_map_.find(key);
+ if (id_map_it == discardable_handle_id_map_.end())
return ClientDiscardableHandle::Id();
return id_map_it->second;
}
-std::map<uint32_t, ClientDiscardableHandle::Id>&
-ClientTransferCache::DiscardableHandleIdMap(
- cc::TransferCacheEntryType entry_type) {
- lock_.AssertAcquired();
- DCHECK_LE(static_cast<uint32_t>(entry_type),
- static_cast<uint32_t>(cc::TransferCacheEntryType::kLast));
- return discardable_handle_id_map_[static_cast<uint32_t>(entry_type)];
-}
-
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/client_transfer_cache.h b/chromium/gpu/command_buffer/client/client_transfer_cache.h
index ee5444e676b..315ff8a486f 100644
--- a/chromium/gpu/command_buffer/client/client_transfer_cache.h
+++ b/chromium/gpu/command_buffer/client/client_transfer_cache.h
@@ -7,16 +7,14 @@
#include <map>
+#include "base/optional.h"
#include "base/synchronization/lock.h"
-#include "cc/paint/transfer_cache_entry.h"
#include "gpu/command_buffer/client/client_discardable_manager.h"
#include "gpu/command_buffer/client/gles2_impl_export.h"
#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
namespace gpu {
-namespace gles2 {
-class GLES2CmdHelper;
-}
class MappedMemoryManager;
// ClientTransferCache allows for ClientTransferCacheEntries to be inserted
@@ -41,34 +39,46 @@ class MappedMemoryManager;
// the context lock be held.
class GLES2_IMPL_EXPORT ClientTransferCache {
public:
- ClientTransferCache();
+ class Client {
+ public:
+ virtual void IssueCreateTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id,
+ GLuint handle_shm_id,
+ GLuint handle_shm_offset,
+ GLuint data_shm_id,
+ GLuint data_shm_offset,
+ GLuint data_size) = 0;
+ virtual void IssueDeleteTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) = 0;
+ virtual void IssueUnlockTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) = 0;
+ virtual CommandBufferHelper* cmd_buffer_helper() = 0;
+ virtual CommandBuffer* command_buffer() const = 0;
+ };
+
+ explicit ClientTransferCache(Client* client);
~ClientTransferCache();
- void CreateCacheEntry(gles2::GLES2CmdHelper* helper,
- MappedMemoryManager* mapped_memory,
- const cc::ClientTransferCacheEntry& entry);
- bool LockTransferCacheEntry(cc::TransferCacheEntryType type, uint32_t id);
- void UnlockTransferCacheEntries(
- gles2::GLES2CmdHelper* helper,
- const std::vector<std::pair<cc::TransferCacheEntryType, uint32_t>>&
- entries);
- void DeleteTransferCacheEntry(gles2::GLES2CmdHelper* helper,
- cc::TransferCacheEntryType type,
- uint32_t id);
+ void* MapEntry(MappedMemoryManager* mapped_memory, size_t size);
+ void UnmapAndCreateEntry(uint32_t type, uint32_t id);
+ bool LockEntry(uint32_t type, uint32_t id);
+ void UnlockEntries(const std::vector<std::pair<uint32_t, uint32_t>>& entries);
+ void DeleteEntry(uint32_t type, uint32_t id);
private:
- ClientDiscardableHandle::Id FindDiscardableHandleId(
- cc::TransferCacheEntryType type,
- uint32_t id);
+ using EntryKey = std::pair<uint32_t, uint32_t>;
+ ClientDiscardableHandle::Id FindDiscardableHandleId(const EntryKey& key);
+
+ Client* const client_; // not owned --- client_ outlives this
- std::map<uint32_t, ClientDiscardableHandle::Id>& DiscardableHandleIdMap(
- cc::TransferCacheEntryType entry_type);
+ base::Optional<ScopedMappedMemoryPtr> mapped_ptr_;
// Access to other members must always be done with |lock_| held.
base::Lock lock_;
ClientDiscardableManager discardable_manager_;
- std::map<uint32_t, ClientDiscardableHandle::Id> discardable_handle_id_map_
- [static_cast<uint32_t>(cc::TransferCacheEntryType::kLast) + 1];
+ std::map<EntryKey, ClientDiscardableHandle::Id> discardable_handle_id_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ClientTransferCache);
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
index a5247a092c4..6aa43745b66 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
@@ -168,6 +168,7 @@ bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
}
void CommandBufferHelper::Flush() {
+ TRACE_EVENT0("gpu", "CommandBufferHelper::Flush");
// Wrap put_ before flush.
if (put_ == total_entry_count_)
put_ = 0;
diff --git a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
index 6a88e42b4a9..0d55f1186b0 100644
--- a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
+++ b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_COMMAND_BUFFER_DIRECT_LOCKED_H_
+#define GPU_COMMAND_BUFFER_CLIENT_COMMAND_BUFFER_DIRECT_LOCKED_H_
+
#include "gpu/command_buffer/service/command_buffer_direct.h"
namespace gpu {
@@ -50,3 +53,5 @@ class CommandBufferDirectLocked : public CommandBufferDirect {
};
} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_COMMAND_BUFFER_DIRECT_LOCKED_H_
diff --git a/chromium/gpu/command_buffer/client/context_support.h b/chromium/gpu/command_buffer/client/context_support.h
index 76ed7654e28..5f781981741 100644
--- a/chromium/gpu/command_buffer/client/context_support.h
+++ b/chromium/gpu/command_buffer/client/context_support.h
@@ -9,13 +9,8 @@
#include <vector>
#include "base/callback.h"
-#include "cc/paint/transfer_cache_entry.h"
#include "ui/gfx/overlay_transform.h"
-namespace cc {
-class ClientTransferCacheEntry;
-}
-
namespace gfx {
class GpuFence;
class Rect;
@@ -98,15 +93,25 @@ class ContextSupport {
// Access to transfer cache functionality for OOP raster. Only
// ThreadsafeLockTransferCacheEntry can be accessed without holding the
// context lock.
- virtual void CreateTransferCacheEntry(
- const cc::ClientTransferCacheEntry& entry) = 0;
- virtual bool ThreadsafeLockTransferCacheEntry(cc::TransferCacheEntryType type,
- uint32_t id) = 0;
+
+ // Maps a buffer that will receive serialized data for an entry to be created.
+ // Returns nullptr on failure. If success, must be paired with a call to
+ // UnmapAndCreateTransferCacheEntry.
+ virtual void* MapTransferCacheEntry(size_t serialized_size) = 0;
+
+ // Unmaps the buffer and creates a transfer cache entry with the serialized
+ // data.
+ virtual void UnmapAndCreateTransferCacheEntry(uint32_t type, uint32_t id) = 0;
+
+ // Locks a transfer cache entry. May be called on any thread.
+ virtual bool ThreadsafeLockTransferCacheEntry(uint32_t type, uint32_t id) = 0;
+
+ // Unlocks transfer cache entries.
virtual void UnlockTransferCacheEntries(
- const std::vector<std::pair<cc::TransferCacheEntryType, uint32_t>>&
- entries) = 0;
- virtual void DeleteTransferCacheEntry(cc::TransferCacheEntryType type,
- uint32_t id) = 0;
+ const std::vector<std::pair<uint32_t, uint32_t>>& entries) = 0;
+
+ // Delete a transfer cache entry.
+ virtual void DeleteTransferCacheEntry(uint32_t type, uint32_t id) = 0;
virtual unsigned int GetTransferBufferFreeSize() const = 0;
diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 69e9a35c9e3..32b5dead53e 100644
--- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -1444,6 +1444,15 @@ void GL_APIENTRY GLES2VerifySyncTokensCHROMIUM(GLbyte** sync_tokens,
void GL_APIENTRY GLES2WaitSyncTokenCHROMIUM(const GLbyte* sync_token) {
gles2::GetGLContext()->WaitSyncTokenCHROMIUM(sync_token);
}
+void GL_APIENTRY GLES2UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->UnpremultiplyAndDitherCopyCHROMIUM(
+ source_id, dest_id, x, y, width, height);
+}
void GL_APIENTRY GLES2DrawBuffersEXT(GLsizei count, const GLenum* bufs) {
gles2::GetGLContext()->DrawBuffersEXT(count, bufs);
}
@@ -1755,15 +1764,17 @@ void GL_APIENTRY GLES2UnlockDiscardableTextureCHROMIUM(GLuint texture_id) {
bool GL_APIENTRY GLES2LockDiscardableTextureCHROMIUM(GLuint texture_id) {
return gles2::GetGLContext()->LockDiscardableTextureCHROMIUM(texture_id);
}
-void GL_APIENTRY GLES2BeginRasterCHROMIUM(GLuint texture_id,
- GLuint sk_color,
- GLuint msaa_sample_count,
- GLboolean can_use_lcd_text,
- GLboolean use_distance_field_text,
- GLint pixel_config) {
+void GL_APIENTRY
+GLES2BeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) {
gles2::GetGLContext()->BeginRasterCHROMIUM(
texture_id, sk_color, msaa_sample_count, can_use_lcd_text,
- use_distance_field_text, pixel_config);
+ use_distance_field_text, color_type, color_space_transfer_cache_id);
}
void* GL_APIENTRY GLES2MapRasterCHROMIUM(GLsizeiptr size) {
return gles2::GetGLContext()->MapRasterCHROMIUM(size);
@@ -2908,6 +2919,11 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glWaitSyncTokenCHROMIUM),
},
{
+ "glUnpremultiplyAndDitherCopyCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glUnpremultiplyAndDitherCopyCHROMIUM),
+ },
+ {
"glDrawBuffersEXT",
reinterpret_cast<GLES2FunctionPointer>(glDrawBuffersEXT),
},
diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index e11011fb6a4..a146fc08f8b 100644
--- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -2701,6 +2701,19 @@ void WaitSyncTokenCHROMIUM(GLint namespace_id,
}
}
+void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::UnpremultiplyAndDitherCopyCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::UnpremultiplyAndDitherCopyCHROMIUM>();
+ if (c) {
+ c->Init(source_id, dest_id, x, y, width, height);
+ }
+}
+
void DrawBuffersEXTImmediate(GLsizei count, const GLenum* bufs) {
const uint32_t size =
gles2::cmds::DrawBuffersEXTImmediate::ComputeSize(count);
@@ -3259,12 +3272,13 @@ void BeginRasterCHROMIUM(GLuint texture_id,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config) {
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) {
gles2::cmds::BeginRasterCHROMIUM* c =
GetCmdSpace<gles2::cmds::BeginRasterCHROMIUM>();
if (c) {
c->Init(texture_id, sk_color, msaa_sample_count, can_use_lcd_text,
- use_distance_field_text, pixel_config);
+ use_distance_field_text, color_type, color_space_transfer_cache_id);
}
}
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index 12581cc64a2..d1805665277 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -45,16 +45,54 @@
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/rect_f.h"
-#if defined(GPU_CLIENT_DEBUG)
-#include "base/command_line.h"
-#include "gpu/command_buffer/client/gpu_switches.h"
-#endif
-
#if !defined(__native_client__)
#include "ui/gfx/color_space.h"
#include "ui/gfx/ipc/color/gfx_param_traits.h"
#endif
+#if defined(GPU_CLIENT_DEBUG)
+#define GPU_CLIENT_SINGLE_THREAD_CHECK() SingleThreadChecker checker(this);
+#else // !defined(GPU_CLIENT_DEBUG)
+#define GPU_CLIENT_SINGLE_THREAD_CHECK()
+#endif // defined(GPU_CLIENT_DEBUG)
+
+// Check that destination pointers point to initialized memory.
+// When the context is lost, calling GL function has no effect so if destination
+// pointers point to initialized memory it can often lead to crash bugs. eg.
+//
+// GLsizei len;
+// glGetShaderSource(shader, max_size, &len, buffer);
+// std::string src(buffer, buffer + len); // len can be uninitialized here!!!
+//
+// Because this check is not official GL this check happens only on Chrome code,
+// not Pepper.
+//
+// If it was up to us we'd just always write to the destination but the OpenGL
+// spec defines the behavior of OpenGL functions, not us. :-(
+#if defined(__native_client__) || defined(GLES2_CONFORMANCE_TESTS)
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v)
+#define GPU_CLIENT_DCHECK(v)
+#elif defined(GPU_DCHECK)
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) GPU_DCHECK(v)
+#define GPU_CLIENT_DCHECK(v) GPU_DCHECK(v)
+#elif defined(DCHECK)
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) DCHECK(v)
+#define GPU_CLIENT_DCHECK(v) DCHECK(v)
+#else
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) ASSERT(v)
+#define GPU_CLIENT_DCHECK(v) ASSERT(v)
+#endif
+
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(type, ptr) \
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT( \
+ ptr && \
+ (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
+
+#define GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(type, ptr) \
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT( \
+ !ptr || \
+ (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
+
namespace gpu {
namespace gles2 {
@@ -95,11 +133,6 @@ uint32_t GenerateNextFlushId() {
} // anonymous namespace
-#if !defined(_MSC_VER)
-const size_t GLES2Implementation::kMaxSizeOfSimpleResult;
-const unsigned int GLES2Implementation::kStartingOffset;
-#endif
-
GLES2Implementation::GLStaticState::GLStaticState() = default;
GLES2Implementation::GLStaticState::~GLStaticState() = default;
@@ -124,8 +157,8 @@ GLES2Implementation::GLES2Implementation(
bool lose_context_when_out_of_memory,
bool support_client_side_arrays,
GpuControl* gpu_control)
- : helper_(helper),
- transfer_buffer_(transfer_buffer),
+ : ImplementationBase(helper, transfer_buffer, gpu_control),
+ helper_(helper),
chromium_framebuffer_multisample_(kUnknownExtensionStatus),
pack_alignment_(4),
pack_row_length_(0),
@@ -147,36 +180,26 @@ GLES2Implementation::GLES2Implementation(
bound_copy_write_buffer_(0),
bound_pixel_pack_buffer_(0),
bound_pixel_unpack_buffer_(0),
- bound_transform_feedback_buffer_(0),
bound_uniform_buffer_(0),
bound_pixel_pack_transfer_buffer_id_(0),
bound_pixel_unpack_transfer_buffer_id_(0),
error_bits_(0),
- debug_(false),
lose_context_when_out_of_memory_(lose_context_when_out_of_memory),
support_client_side_arrays_(support_client_side_arrays),
use_count_(0),
flush_id_(0),
max_extra_transfer_buffer_size_(0),
current_trace_stack_(0),
- gpu_control_(gpu_control),
capabilities_(gpu_control->GetCapabilities()),
aggressively_free_resources_(false),
cached_extension_string_(nullptr),
weak_ptr_factory_(this) {
DCHECK(helper);
- DCHECK(transfer_buffer);
- DCHECK(gpu_control);
std::stringstream ss;
ss << std::hex << this;
this_in_hex_ = ss.str();
- GPU_CLIENT_LOG_CODE_BLOCK({
- debug_ = base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableGPUClientLogging);
- });
-
share_group_ =
(share_group ? std::move(share_group)
: new ShareGroup(
@@ -190,27 +213,12 @@ GLES2Implementation::GLES2Implementation(
gpu::ContextResult GLES2Implementation::Initialize(
const SharedMemoryLimits& limits) {
TRACE_EVENT0("gpu", "GLES2Implementation::Initialize");
- DCHECK_GE(limits.start_transfer_buffer_size, limits.min_transfer_buffer_size);
- DCHECK_LE(limits.start_transfer_buffer_size, limits.max_transfer_buffer_size);
- DCHECK_GE(limits.min_transfer_buffer_size, kStartingOffset);
-
- gpu_control_->SetGpuControlClient(this);
-
- if (!transfer_buffer_->Initialize(
- limits.start_transfer_buffer_size, kStartingOffset,
- limits.min_transfer_buffer_size, limits.max_transfer_buffer_size,
- kAlignment, kSizeToFlush)) {
- // TransferBuffer::Initialize doesn't fail for transient reasons such as if
- // the context was lost. See http://crrev.com/c/720269
- LOG(ERROR) << "ContextResult::kFatalFailure: "
- << "TransferBuffer::Initailize() failed";
- return gpu::ContextResult::kFatalFailure;
+ auto result = ImplementationBase::Initialize(limits);
+ if (result != gpu::ContextResult::kSuccess) {
+ return result;
}
max_extra_transfer_buffer_size_ = limits.max_mapped_memory_for_texture_upload;
- mapped_memory_ = std::make_unique<MappedMemoryManager>(
- helper_, limits.mapped_memory_reclaim_limit);
- mapped_memory_->set_chunk_size_multiple(limits.mapped_memory_chunk_size);
GLStaticState::ShaderPrecisionMap* shader_precisions =
&static_state_.shader_precisions;
@@ -229,7 +237,6 @@ gpu::ContextResult GLES2Implementation::Initialize(
texture_units_ = std::make_unique<TextureUnit[]>(
capabilities_.max_combined_texture_image_units);
- query_tracker_ = std::make_unique<QueryTracker>(mapped_memory_.get());
buffer_tracker_ = std::make_unique<BufferTracker>(mapped_memory_.get());
for (int i = 0; i < static_cast<int>(IdNamespaces::kNumIdNamespaces); ++i)
@@ -266,6 +273,7 @@ GLES2Implementation::~GLES2Implementation() {
// by the queries. The GPU process when validating that memory is still
// shared will fail and abort (ie, it will stop running).
WaitForCmd();
+
query_tracker_.reset();
// GLES2Implementation::Initialize() could fail before allocating
@@ -285,10 +293,6 @@ GLES2Implementation::~GLES2Implementation() {
// Make sure the commands make it the service.
WaitForCmd();
-
- // The gpu_control_ outlives this class, so clear the client on it before we
- // self-destruct.
- gpu_control_->SetGpuControlClient(nullptr);
}
GLES2CmdHelper* GLES2Implementation::helper() const {
@@ -315,8 +319,9 @@ void GLES2Implementation::OnGpuControlLostContext() {
DCHECK(!lost_context_callback_run_);
lost_context_callback_run_ = true;
share_group_->Lose();
- if (!lost_context_callback_.is_null())
- lost_context_callback_.Run();
+ if (!lost_context_callback_.is_null()) {
+ std::move(lost_context_callback_).Run();
+ }
}
void GLES2Implementation::OnGpuControlLostContextMaybeReentrant() {
@@ -332,79 +337,10 @@ void GLES2Implementation::OnGpuControlErrorMessage(const char* message,
error_message_callback_.Run(message, id);
}
-void* GLES2Implementation::GetResultBuffer() {
- return transfer_buffer_->GetResultBuffer();
-}
-
-int32_t GLES2Implementation::GetResultShmId() {
- return transfer_buffer_->GetShmId();
-}
-
-uint32_t GLES2Implementation::GetResultShmOffset() {
- return transfer_buffer_->GetResultOffset();
-}
-
-void GLES2Implementation::FreeUnusedSharedMemory() {
- mapped_memory_->FreeUnused();
-}
-
-void GLES2Implementation::FreeEverything() {
- query_tracker_->Shrink(helper_);
- FreeUnusedSharedMemory();
- transfer_buffer_->Free();
- helper_->FreeRingBuffer();
-}
-
void GLES2Implementation::FreeSharedMemory(void* mem) {
mapped_memory_->FreePendingToken(mem, helper_->InsertToken());
}
-void GLES2Implementation::RunIfContextNotLost(base::OnceClosure callback) {
- if (!lost_context_callback_run_)
- std::move(callback).Run();
-}
-
-void GLES2Implementation::FlushPendingWork() {
- gpu_control_->FlushPendingWork();
-}
-
-void GLES2Implementation::SignalSyncToken(const gpu::SyncToken& sync_token,
- base::OnceClosure callback) {
- SyncToken verified_sync_token;
- if (sync_token.HasData() &&
- GetVerifiedSyncTokenForIPC(sync_token, &verified_sync_token)) {
- // We can only send verified sync tokens across IPC.
- gpu_control_->SignalSyncToken(
- verified_sync_token,
- base::Bind(&GLES2Implementation::RunIfContextNotLost,
- weak_ptr_factory_.GetWeakPtr(), base::Passed(&callback)));
- } else {
- // Invalid sync token, just call the callback immediately.
- std::move(callback).Run();
- }
-}
-
-// This may be called from any thread. It's safe to access gpu_control_ without
-// the lock because it is const.
-bool GLES2Implementation::IsSyncTokenSignaled(
- const gpu::SyncToken& sync_token) {
- // Check that the sync token belongs to this context.
- DCHECK_EQ(gpu_control_->GetNamespaceID(), sync_token.namespace_id());
- DCHECK_EQ(gpu_control_->GetCommandBufferID(), sync_token.command_buffer_id());
- return gpu_control_->IsFenceSyncReleased(sync_token.release_count());
-}
-
-void GLES2Implementation::SignalQuery(uint32_t query,
- base::OnceClosure callback) {
- // Flush previously entered commands to ensure ordering with any
- // glBeginQueryEXT() calls that may have been put into the context.
- ShallowFlushCHROMIUM();
- gpu_control_->SignalQuery(
- query,
- base::Bind(&GLES2Implementation::RunIfContextNotLost,
- weak_ptr_factory_.GetWeakPtr(), base::Passed(&callback)));
-}
-
GLuint GLES2Implementation::CreateGpuFenceCHROMIUM() {
GLuint client_id = GetIdAllocator(IdNamespaces::kGpuFences)
->AllocateIDAtOrAbove(last_gpu_fence_id_ + 1);
@@ -440,15 +376,6 @@ GLuint GLES2Implementation::CreateClientGpuFenceCHROMIUM(
return client_id;
}
-void GLES2Implementation::GetGpuFence(
- uint32_t gpu_fence_id,
- base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback) {
- // This ShallowFlush is required to ensure that the GetGpuFence
- // call is processed after the preceding CreateGpuFenceCHROMIUM call.
- ShallowFlushCHROMIUM();
- gpu_control_->GetGpuFence(gpu_fence_id, std::move(callback));
-}
-
void GLES2Implementation::DestroyGpuFenceCHROMIUMHelper(GLuint client_id) {
if (GetIdAllocator(IdNamespaces::kGpuFences)->InUse(client_id)) {
GetIdAllocator(IdNamespaces::kGpuFences)->FreeID(client_id);
@@ -477,53 +404,6 @@ void GLES2Implementation::SetAggressivelyFreeResources(
}
}
-bool GLES2Implementation::OnMemoryDump(
- const base::trace_event::MemoryDumpArgs& args,
- base::trace_event::ProcessMemoryDump* pmd) {
- using base::trace_event::MemoryAllocatorDump;
- using base::trace_event::MemoryDumpLevelOfDetail;
-
- // Dump owned MappedMemoryManager memory as well.
- mapped_memory_->OnMemoryDump(args, pmd);
-
- if (!transfer_buffer_->HaveBuffer())
- return true;
-
- const uint64_t tracing_process_id =
- base::trace_event::MemoryDumpManager::GetInstance()
- ->GetTracingProcessId();
-
- MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(base::StringPrintf(
- "gpu/transfer_buffer_memory/buffer_%d", transfer_buffer_->GetShmId()));
- dump->AddScalar(MemoryAllocatorDump::kNameSize,
- MemoryAllocatorDump::kUnitsBytes,
- transfer_buffer_->GetSize());
-
- if (args.level_of_detail != MemoryDumpLevelOfDetail::BACKGROUND) {
- dump->AddScalar("free_size", MemoryAllocatorDump::kUnitsBytes,
- transfer_buffer_->GetFragmentedFreeSize());
- auto shared_memory_guid =
- transfer_buffer_->shared_memory_handle().GetGUID();
- const int kImportance = 2;
- if (!shared_memory_guid.is_empty()) {
- pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
- kImportance);
- } else {
- auto guid = GetBufferGUIDForTracing(tracing_process_id,
- transfer_buffer_->GetShmId());
- pmd->CreateSharedGlobalAllocatorDump(guid);
- pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
- }
- }
-
- return true;
-}
-
-void GLES2Implementation::WaitForCmd() {
- TRACE_EVENT0("gpu", "GLES2::WaitForCmd");
- helper_->CommandBufferHelper::Finish();
-}
-
bool GLES2Implementation::IsExtensionAvailable(const char* ext) {
const char* extensions =
reinterpret_cast<const char*>(GetStringHelper(GL_EXTENSIONS));
@@ -578,22 +458,6 @@ GLenum GLES2Implementation::GetError() {
return err;
}
-GLenum GLES2Implementation::GetClientSideGLError() {
- if (error_bits_ == 0) {
- return GL_NO_ERROR;
- }
-
- GLenum error = GL_NO_ERROR;
- for (uint32_t mask = 1; mask != 0; mask = mask << 1) {
- if ((error_bits_ & mask) != 0) {
- error = GLES2Util::GLErrorBitToGLError(mask);
- break;
- }
- }
- error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
- return error;
-}
-
GLenum GLES2Implementation::GetGLError() {
TRACE_EVENT0("gpu", "GLES2::GetGLError");
// Check the GL error first, then our wrapped error.
@@ -657,109 +521,6 @@ void GLES2Implementation::SetGLErrorInvalidEnum(
GLES2Util::GetStringEnum(value)).c_str());
}
-bool GLES2Implementation::GetBucketContents(uint32_t bucket_id,
- std::vector<int8_t>* data) {
- TRACE_EVENT0("gpu", "GLES2::GetBucketContents");
- DCHECK(data);
- const uint32_t kStartSize = 32 * 1024;
- ScopedTransferBufferPtr buffer(kStartSize, helper_, transfer_buffer_);
- if (!buffer.valid()) {
- return false;
- }
- typedef cmd::GetBucketStart::Result Result;
- Result* result = GetResultAs<Result*>();
- if (!result) {
- return false;
- }
- *result = 0;
- helper_->GetBucketStart(
- bucket_id, GetResultShmId(), GetResultShmOffset(),
- buffer.size(), buffer.shm_id(), buffer.offset());
- WaitForCmd();
- uint32_t size = *result;
- data->resize(size);
- if (size > 0u) {
- uint32_t offset = 0;
- while (size) {
- if (!buffer.valid()) {
- buffer.Reset(size);
- if (!buffer.valid()) {
- return false;
- }
- helper_->GetBucketData(
- bucket_id, offset, buffer.size(), buffer.shm_id(), buffer.offset());
- WaitForCmd();
- }
- uint32_t size_to_copy = std::min(size, buffer.size());
- memcpy(&(*data)[offset], buffer.address(), size_to_copy);
- offset += size_to_copy;
- size -= size_to_copy;
- buffer.Release();
- }
- // Free the bucket. This is not required but it does free up the memory.
- // and we don't have to wait for the result so from the client's perspective
- // it's cheap.
- helper_->SetBucketSize(bucket_id, 0);
- }
- return true;
-}
-
-void GLES2Implementation::SetBucketContents(uint32_t bucket_id,
- const void* data,
- size_t size) {
- DCHECK(data);
- helper_->SetBucketSize(bucket_id, size);
- if (size > 0u) {
- uint32_t offset = 0;
- while (size) {
- ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
- if (!buffer.valid()) {
- return;
- }
- memcpy(buffer.address(), static_cast<const int8_t*>(data) + offset,
- buffer.size());
- helper_->SetBucketData(
- bucket_id, offset, buffer.size(), buffer.shm_id(), buffer.offset());
- offset += buffer.size();
- size -= buffer.size();
- }
- }
-}
-
-void GLES2Implementation::SetBucketAsCString(uint32_t bucket_id,
- const char* str) {
- // NOTE: strings are passed NULL terminated. That means the empty
- // string will have a size of 1 and no-string will have a size of 0
- if (str) {
- SetBucketContents(bucket_id, str, strlen(str) + 1);
- } else {
- helper_->SetBucketSize(bucket_id, 0);
- }
-}
-
-bool GLES2Implementation::GetBucketAsString(uint32_t bucket_id,
- std::string* str) {
- DCHECK(str);
- std::vector<int8_t> data;
- // NOTE: strings are passed NULL terminated. That means the empty
- // string will have a size of 1 and no-string will have a size of 0
- if (!GetBucketContents(bucket_id, &data)) {
- return false;
- }
- if (data.empty()) {
- return false;
- }
- str->assign(&data[0], &data[0] + data.size() - 1);
- return true;
-}
-
-void GLES2Implementation::SetBucketAsString(uint32_t bucket_id,
- const std::string& str) {
- // NOTE: strings are passed NULL terminated. That means the empty
- // string will have a size of 1 and no-string will have a size of 0
- SetBucketContents(bucket_id, str.c_str(), str.size() + 1);
-}
-
void GLES2Implementation::Disable(GLenum cap) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDisable("
@@ -1117,9 +878,6 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
case GL_PIXEL_UNPACK_BUFFER_BINDING:
*params = bound_pixel_unpack_buffer_;
return true;
- case GL_TRANSFORM_FEEDBACK_BUFFER_BINDING:
- *params = bound_transform_feedback_buffer_;
- return true;
case GL_UNIFORM_BUFFER_BINDING:
*params = bound_uniform_buffer_;
return true;
@@ -1167,6 +925,7 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
case GL_TRANSFORM_FEEDBACK_BINDING:
case GL_TRANSFORM_FEEDBACK_ACTIVE:
case GL_TRANSFORM_FEEDBACK_PAUSED:
+ case GL_TRANSFORM_FEEDBACK_BUFFER_BINDING:
case GL_TRANSFORM_FEEDBACK_BUFFER_SIZE:
case GL_TRANSFORM_FEEDBACK_BUFFER_START:
case GL_UNIFORM_BUFFER_SIZE:
@@ -1453,13 +1212,17 @@ void GLES2Implementation::Flush() {
FlushHelper();
}
-void GLES2Implementation::ShallowFlushCHROMIUM() {
+void GLES2Implementation::IssueShallowFlush() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShallowFlushCHROMIUM()");
flush_id_ = GenerateNextFlushId();
FlushHelper();
}
+void GLES2Implementation::ShallowFlushCHROMIUM() {
+ IssueShallowFlush();
+}
+
void GLES2Implementation::FlushHelper() {
// Flush our command buffer
// (tell the service to execute up to the flush cmd.)
@@ -4330,12 +4093,6 @@ void GLES2Implementation::BindBufferHelper(
case GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM:
bound_pixel_unpack_transfer_buffer_id_ = buffer_id;
break;
- case GL_TRANSFORM_FEEDBACK_BUFFER:
- if (bound_transform_feedback_buffer_ != buffer_id) {
- bound_transform_feedback_buffer_ = buffer_id;
- changed = true;
- }
- break;
case GL_UNIFORM_BUFFER:
if (bound_uniform_buffer_ != buffer_id) {
bound_uniform_buffer_ = buffer_id;
@@ -4373,9 +4130,6 @@ void GLES2Implementation::BindBufferBaseHelper(
"glBindBufferBase", "index out of range");
return;
}
- if (bound_transform_feedback_buffer_ != buffer_id) {
- bound_transform_feedback_buffer_ = buffer_id;
- }
break;
case GL_UNIFORM_BUFFER:
if (index >=
@@ -4600,9 +4354,6 @@ void GLES2Implementation::DeleteBuffersHelper(
if (buffers[ii] == bound_pixel_unpack_buffer_) {
bound_pixel_unpack_buffer_ = 0;
}
- if (buffers[ii] == bound_transform_feedback_buffer_) {
- bound_transform_feedback_buffer_ = 0;
- }
if (buffers[ii] == bound_uniform_buffer_) {
bound_uniform_buffer_ = 0;
}
@@ -4675,7 +4426,13 @@ void GLES2Implementation::DeleteTexturesHelper(
}
for (GLsizei ii = 0; ii < n; ++ii) {
share_group_->discardable_texture_manager()->FreeTexture(textures[ii]);
+ }
+ UnbindTexturesHelper(n, textures);
+}
+void GLES2Implementation::UnbindTexturesHelper(GLsizei n,
+ const GLuint* textures) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
for (GLint tt = 0; tt < capabilities_.max_combined_texture_image_units;
++tt) {
TextureUnit& unit = texture_units_[tt];
@@ -5178,8 +4935,27 @@ void GLES2Implementation::UnmapBufferSubDataCHROMIUM(const void* mem) {
GLuint GLES2Implementation::GetBoundBufferHelper(GLenum target) {
GLenum binding = GLES2Util::MapBufferTargetToBindingEnum(target);
GLint id = 0;
- bool cached = GetHelper(binding, &id);
- DCHECK(cached);
+ if (target == GL_TRANSFORM_FEEDBACK_BUFFER) {
+ // GL_TRANSFORM_FEEDBACK_BUFFER is not cached locally, so we need to call
+ // the server here. We don't cache it because it's part of the transform
+ // feedback object state, which means that it's modified by things other
+ // than glBindBuffer calls, specifically glBindTransformFeedback, the
+ // success of which depends on a bunch of other states.
+ // TODO(jdarpinian): This is slow. We should audit callers of this function
+ // to figure out if they really need this information, and skip this if
+ // they don't.
+ auto* result = GetResultAs<cmds::GetIntegerv::Result*>();
+ DCHECK(result);
+ result->SetNumResults(0);
+ helper_->GetIntegerv(GL_TRANSFORM_FEEDBACK_BUFFER_BINDING, GetResultShmId(),
+ GetResultShmOffset());
+ WaitForCmd();
+ DCHECK(result->GetNumResults() == 1);
+ result->CopyResult(&id);
+ } else {
+ bool cached = GetHelper(binding, &id);
+ DCHECK(cached);
+ }
return static_cast<GLuint>(id);
}
@@ -5700,7 +5476,7 @@ void GLES2Implementation::BeginQueryEXT(GLenum target, GLuint id) {
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
if (capabilities_.major_version >= 3)
break;
- // Fall through
+ FALLTHROUGH;
default:
SetGLError(
GL_INVALID_ENUM, "glBeginQueryEXT", "unknown query target");
@@ -6148,38 +5924,6 @@ bool GLES2Implementation::ThreadsafeDiscardableTextureIsDeletedForTracing(
return manager->TextureIsDeletedForTracing(texture_id);
}
-void GLES2Implementation::CreateTransferCacheEntry(
- const cc::ClientTransferCacheEntry& entry) {
- transfer_cache_.CreateCacheEntry(helper_, mapped_memory_.get(), entry);
-}
-
-bool GLES2Implementation::ThreadsafeLockTransferCacheEntry(
- cc::TransferCacheEntryType type,
- uint32_t id) {
- return transfer_cache_.LockTransferCacheEntry(type, id);
-}
-
-void GLES2Implementation::UnlockTransferCacheEntries(
- const std::vector<std::pair<cc::TransferCacheEntryType, uint32_t>>&
- entries) {
- transfer_cache_.UnlockTransferCacheEntries(helper_, entries);
-}
-
-void GLES2Implementation::DeleteTransferCacheEntry(
- cc::TransferCacheEntryType type,
- uint32_t id) {
- transfer_cache_.DeleteTransferCacheEntry(helper_, type, id);
-}
-
-unsigned int GLES2Implementation::GetTransferBufferFreeSize() const {
- return transfer_buffer_->GetFreeSize();
-}
-
-void GLES2Implementation::SetLostContextCallback(
- const base::Closure& callback) {
- lost_context_callback_ = callback;
-}
-
void GLES2Implementation::GenSyncTokenCHROMIUM(GLbyte* sync_token) {
if (!sync_token) {
SetGLError(GL_INVALID_VALUE, "glGenSyncTokenCHROMIUM", "empty sync_token");
@@ -6272,21 +6016,6 @@ void GLES2Implementation::WaitSyncTokenCHROMIUM(const GLbyte* sync_token_data) {
gpu_control_->WaitSyncTokenHint(verified_sync_token);
}
-bool GLES2Implementation::GetVerifiedSyncTokenForIPC(
- const SyncToken& sync_token,
- SyncToken* verified_sync_token) {
- DCHECK(sync_token.HasData());
- DCHECK(verified_sync_token);
-
- if (!sync_token.verified_flush() &&
- !gpu_control_->CanWaitUnverifiedSyncToken(sync_token))
- return false;
-
- *verified_sync_token = sync_token;
- verified_sync_token->SetVerifyFlush();
- return true;
-}
-
namespace {
bool CreateImageValidInternalFormat(GLenum internalformat,
@@ -6303,6 +6032,8 @@ bool CreateImageValidInternalFormat(GLenum internalformat,
return capabilities.texture_format_etc1;
case GL_R16_EXT:
return capabilities.texture_norm16;
+ case GL_RGB10_A2_EXT:
+ return capabilities.image_xr30 || capabilities.image_xb30;
case GL_RED:
case GL_RG_EXT:
case GL_RGB:
@@ -7140,6 +6871,14 @@ void GLES2Implementation::UnlockDiscardableTextureCHROMIUM(GLuint texture_id) {
"Texture ID not initialized");
return;
}
+
+ // |should_unbind_texture| will be set to true if the texture has been fully
+ // unlocked. In this case, ensure the texture is unbound.
+ bool should_unbind_texture = false;
+ manager->UnlockTexture(texture_id, &should_unbind_texture);
+ if (should_unbind_texture)
+ UnbindTexturesHelper(1, &texture_id);
+
helper_->UnlockDiscardableTextureCHROMIUM(texture_id);
}
@@ -7230,6 +6969,79 @@ void GLES2Implementation::UnmapRasterCHROMIUM(GLsizeiptr written_size) {
CheckGLError();
}
+void GLES2Implementation::IssueBeginQuery(GLenum target,
+ GLuint id,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ helper_->BeginQueryEXT(target, id, sync_data_shm_id, sync_data_shm_offset);
+}
+
+void GLES2Implementation::IssueEndQuery(GLenum target, GLuint submit_count) {
+ helper_->EndQueryEXT(target, submit_count);
+}
+
+void GLES2Implementation::IssueQueryCounter(GLuint id,
+ GLenum target,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset,
+ GLuint submit_count) {
+ helper_->QueryCounterEXT(id, target, sync_data_shm_id, sync_data_shm_offset,
+ submit_count);
+}
+
+void GLES2Implementation::IssueSetDisjointValueSync(
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ helper_->SetDisjointValueSyncCHROMIUM(sync_data_shm_id, sync_data_shm_offset);
+}
+
+GLenum GLES2Implementation::GetClientSideGLError() {
+ if (error_bits_ == 0) {
+ return GL_NO_ERROR;
+ }
+
+ GLenum error = GL_NO_ERROR;
+ for (uint32_t mask = 1; mask != 0; mask = mask << 1) {
+ if ((error_bits_ & mask) != 0) {
+ error = GLES2Util::GLErrorBitToGLError(mask);
+ break;
+ }
+ }
+ error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
+ return error;
+}
+
+CommandBufferHelper* GLES2Implementation::cmd_buffer_helper() {
+ return helper_;
+}
+
+void GLES2Implementation::IssueCreateTransferCacheEntry(
+ GLuint entry_type,
+ GLuint entry_id,
+ GLuint handle_shm_id,
+ GLuint handle_shm_offset,
+ GLuint data_shm_id,
+ GLuint data_shm_offset,
+ GLuint data_size) {
+ helper_->CreateTransferCacheEntryINTERNAL(entry_type, entry_id, handle_shm_id,
+ handle_shm_offset, data_shm_id,
+ data_shm_offset, data_size);
+}
+
+void GLES2Implementation::IssueDeleteTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) {
+ helper_->DeleteTransferCacheEntryINTERNAL(entry_type, entry_id);
+}
+
+void GLES2Implementation::IssueUnlockTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) {
+ helper_->UnlockTransferCacheEntryINTERNAL(entry_type, entry_id);
+}
+
+CommandBuffer* GLES2Implementation::command_buffer() const {
+ return helper_->command_buffer();
+}
+
// Include the auto-generated part of this file. We split this because it means
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index 74738eb0c98..bcd3dd2535e 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -29,7 +29,10 @@
#include "gpu/command_buffer/client/gles2_impl_export.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/client/gpu_control_client.h"
+#include "gpu/command_buffer/client/implementation_base.h"
+#include "gpu/command_buffer/client/logging.h"
#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/client/query_tracker.h"
#include "gpu/command_buffer/client/ref_counted.h"
#include "gpu/command_buffer/client/share_group.h"
#include "gpu/command_buffer/client/transfer_buffer.h"
@@ -38,83 +41,14 @@
#include "gpu/command_buffer/common/context_result.h"
#include "gpu/command_buffer/common/debug_marker_manager.h"
-#if DCHECK_IS_ON() && !defined(__native_client__) && \
- !defined(GLES2_CONFORMANCE_TESTS)
- #if defined(GLES2_INLINE_OPTIMIZATION)
- // TODO(gman): Replace with macros that work with inline optmization.
- #define GPU_CLIENT_SINGLE_THREAD_CHECK()
- #define GPU_CLIENT_LOG(args)
- #define GPU_CLIENT_LOG_CODE_BLOCK(code)
- #define GPU_CLIENT_DCHECK_CODE_BLOCK(code)
- #else
- #include "base/logging.h"
- #define GPU_CLIENT_SINGLE_THREAD_CHECK() SingleThreadChecker checker(this);
- #define GPU_CLIENT_LOG(args) DLOG_IF(INFO, debug_) << args;
- #define GPU_CLIENT_LOG_CODE_BLOCK(code) code
- #define GPU_CLIENT_DCHECK_CODE_BLOCK(code) code
- #define GPU_CLIENT_DEBUG
- #endif
-#else
- #define GPU_CLIENT_SINGLE_THREAD_CHECK()
- #define GPU_CLIENT_LOG(args)
- #define GPU_CLIENT_LOG_CODE_BLOCK(code)
- #define GPU_CLIENT_DCHECK_CODE_BLOCK(code)
-#endif
-
-#if defined(GPU_CLIENT_DEBUG)
- // Set to 1 to have the client fail when a GL error is generated.
- // This helps find bugs in the renderer since the debugger stops on the error.
-# if 0
-# define GL_CLIENT_FAIL_GL_ERRORS
-# endif
-#endif
-
-// Check that destination pointers point to initialized memory.
-// When the context is lost, calling GL function has no effect so if destination
-// pointers point to initialized memory it can often lead to crash bugs. eg.
-//
-// GLsizei len;
-// glGetShaderSource(shader, max_size, &len, buffer);
-// std::string src(buffer, buffer + len); // len can be uninitialized here!!!
-//
-// Because this check is not official GL this check happens only on Chrome code,
-// not Pepper.
-//
-// If it was up to us we'd just always write to the destination but the OpenGL
-// spec defines the behavior of OpenGL functions, not us. :-(
-#if defined(__native_client__) || defined(GLES2_CONFORMANCE_TESTS)
- #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v)
- #define GPU_CLIENT_DCHECK(v)
-#elif defined(GPU_DCHECK)
- #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) GPU_DCHECK(v)
- #define GPU_CLIENT_DCHECK(v) GPU_DCHECK(v)
-#elif defined(DCHECK)
- #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) DCHECK(v)
- #define GPU_CLIENT_DCHECK(v) DCHECK(v)
-#else
- #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) ASSERT(v)
- #define GPU_CLIENT_DCHECK(v) ASSERT(v)
-#endif
-
-#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(type, ptr) \
- GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(ptr && \
- (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
-
-#define GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(type, ptr) \
- GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(!ptr || \
- (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
-
namespace gpu {
-class GpuControl;
class IdAllocator;
-struct SharedMemoryLimits;
namespace gles2 {
class GLES2CmdHelper;
class VertexArrayObjectManager;
-class QueryTracker;
// This class emulates GLES2 over command buffers. It can be used by a client
// program so that the program does not need deal with shared memory and command
@@ -122,11 +56,9 @@ class QueryTracker;
// be had by changing your code to use command buffers directly by using the
// GLES2CmdHelper but that entails changing your code to use and deal with
// shared memory and synchronization issues.
-class GLES2_IMPL_EXPORT GLES2Implementation
- : public GLES2Interface,
- public ContextSupport,
- public GpuControlClient,
- public base::trace_event::MemoryDumpProvider {
+class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
+ public ImplementationBase,
+ public QueryTrackerClient {
public:
// Stores GL state that never changes.
struct GLES2_IMPL_EXPORT GLStaticState {
@@ -140,22 +72,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation
ShaderPrecisionMap shader_precisions;
};
- // The maximum result size from simple GL get commands.
- static const size_t kMaxSizeOfSimpleResult =
- 16 * sizeof(uint32_t); // NOLINT.
-
- // used for testing only. If more things are reseved add them here.
- static const unsigned int kStartingOffset = kMaxSizeOfSimpleResult;
-
- // Size in bytes to issue async flush for transfer buffer.
- static const unsigned int kSizeToFlush = 256 * 1024;
-
- // The bucket used for results. Public for testing only.
- static const uint32_t kResultBucketId = 1;
-
- // Alignment of allocations.
- static const unsigned int kAlignment = 16;
-
// GL names for the buffers used to emulate client side buffers.
static const GLuint kClientSideArrayId = 0xFEDCBA98u;
static const GLuint kClientSideElementArrayId = 0xFEDCBA99u;
@@ -179,9 +95,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation
// this to issue cmds at a lower level for certain kinds of optimization.
GLES2CmdHelper* helper() const;
- // Gets client side generated errors.
- GLenum GetClientSideGLError();
-
// GLES2Interface implementation
void FreeSharedMemory(void*) override;
@@ -191,14 +104,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation
#include "gpu/command_buffer/client/gles2_implementation_autogen.h"
// ContextSupport implementation.
- void FlushPendingWork() override;
- void SignalSyncToken(const gpu::SyncToken& sync_token,
- base::OnceClosure callback) override;
- bool IsSyncTokenSignaled(const gpu::SyncToken& sync_token) override;
- void SignalQuery(uint32_t query, base::OnceClosure callback) override;
- void GetGpuFence(uint32_t gpu_fence_id,
- base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)>
- callback) override;
void SetAggressivelyFreeResources(bool aggressively_free_resources) override;
void Swap() override;
void SwapWithBounds(const std::vector<gfx::Rect>& rects) override;
@@ -218,20 +123,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation
uint32_t texture_id) override;
bool ThreadsafeDiscardableTextureIsDeletedForTracing(
uint32_t texture_id) override;
- void CreateTransferCacheEntry(
- const cc::ClientTransferCacheEntry& entry) override;
- bool ThreadsafeLockTransferCacheEntry(cc::TransferCacheEntryType type,
- uint32_t id) override;
- void UnlockTransferCacheEntries(
- const std::vector<std::pair<cc::TransferCacheEntryType, uint32_t>>&
- entries) override;
- void DeleteTransferCacheEntry(cc::TransferCacheEntryType type,
- uint32_t id) override;
- unsigned int GetTransferBufferFreeSize() const override;
-
- // TODO(danakj): Move to ContextSupport once ContextProvider doesn't need to
- // intercept it.
- void SetLostContextCallback(const base::Closure& callback);
void GetProgramInfoCHROMIUMHelper(GLuint program,
std::vector<int8_t>* result);
@@ -271,17 +162,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation
bool GetQueryObjectValueHelper(
const char* function_name, GLuint id, GLenum pname, GLuint64* params);
- void FreeUnusedSharedMemory();
- void FreeEverything();
-
- // Helper to set verified bit on sync token if allowed by gpu control.
- bool GetVerifiedSyncTokenForIPC(const gpu::SyncToken& sync_token,
- gpu::SyncToken* verified_sync_token);
-
- // base::trace_event::MemoryDumpProvider implementation.
- bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
- base::trace_event::ProcessMemoryDump* pmd) override;
-
const scoped_refptr<ShareGroup>& share_group() const { return share_group_; }
const Capabilities& capabilities() const {
@@ -296,6 +176,39 @@ class GLES2_IMPL_EXPORT GLES2Implementation
return &share_group_context_data_;
}
+ // QueryTrackerClient implementation.
+ void IssueBeginQuery(GLenum target,
+ GLuint id,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) override;
+ void IssueEndQuery(GLenum target, GLuint submit_count) override;
+ void IssueQueryCounter(GLuint id,
+ GLenum target,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset,
+ GLuint submit_count) override;
+ void IssueSetDisjointValueSync(uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) override;
+ GLenum GetClientSideGLError() override;
+ CommandBufferHelper* cmd_buffer_helper() override;
+ void SetGLError(GLenum error,
+ const char* function_name,
+ const char* msg) override;
+
+ // ClientTransferCache::Client implementation.
+ void IssueCreateTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id,
+ GLuint handle_shm_id,
+ GLuint handle_shm_offset,
+ GLuint data_shm_id,
+ GLuint data_shm_offset,
+ GLuint data_size) override;
+ void IssueDeleteTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) override;
+ void IssueUnlockTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) override;
+ CommandBuffer* command_buffer() const override;
+
private:
friend class GLES2ImplementationTest;
friend class VertexArrayObjectManager;
@@ -305,25 +218,23 @@ class GLES2_IMPL_EXPORT GLES2Implementation
// Used to track whether an extension is available
enum ExtensionStatus {
- kAvailableExtensionStatus,
- kUnavailableExtensionStatus,
- kUnknownExtensionStatus
+ kAvailableExtensionStatus,
+ kUnavailableExtensionStatus,
+ kUnknownExtensionStatus
};
enum Dimension {
- k2D,
- k3D,
+ k2D,
+ k3D,
};
-
// Base class for mapped resources.
struct MappedResource {
MappedResource(GLenum _access, int _shm_id, void* mem, unsigned int offset)
: access(_access),
shm_id(_shm_id),
shm_memory(mem),
- shm_offset(offset) {
- }
+ shm_offset(offset) {}
// access mode. Currently only GL_WRITE_ONLY is valid
GLenum access;
@@ -425,21 +336,14 @@ class GLES2_IMPL_EXPORT GLES2Implementation
GLES2Implementation* gles2_implementation_;
};
- // Gets the value of the result.
- template <typename T>
- T GetResultAs() {
- return static_cast<T>(GetResultBuffer());
- }
+ // ImplementationBase implementation.
+ void IssueShallowFlush() override;
// GpuControlClient implementation.
void OnGpuControlLostContext() final;
void OnGpuControlLostContextMaybeReentrant() final;
void OnGpuControlErrorMessage(const char* message, int32_t id) final;
- void* GetResultBuffer();
- int32_t GetResultShmId();
- uint32_t GetResultShmOffset();
-
bool IsChromiumFramebufferMultisampleAvailable();
bool IsExtensionAvailableHelper(
@@ -449,7 +353,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation
GLenum GetGLError();
// Sets our wrapper for the GLError.
- void SetGLError(GLenum error, const char* function_name, const char* msg);
void SetGLErrorInvalidEnum(
const char* function_name, GLenum value, const char* label);
@@ -458,29 +361,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation
return last_error_;
}
- // Waits for all commands to execute.
- void WaitForCmd();
-
- // TODO(gman): These bucket functions really seem like they belong in
- // CommandBufferHelper (or maybe BucketHelper?). Unfortunately they need
- // a transfer buffer to function which is currently managed by this class.
-
- // Gets the contents of a bucket.
- bool GetBucketContents(uint32_t bucket_id, std::vector<int8_t>* data);
-
- // Sets the contents of a bucket.
- void SetBucketContents(uint32_t bucket_id, const void* data, size_t size);
-
- // Sets the contents of a bucket as a string.
- void SetBucketAsCString(uint32_t bucket_id, const char* str);
-
- // Gets the contents of a bucket as a string. Returns false if there is no
- // string available which is a separate case from the empty string.
- bool GetBucketAsString(uint32_t bucket_id, std::string* str);
-
- // Sets the contents of a bucket as a string.
- void SetBucketAsString(uint32_t bucket_id, const std::string& str);
-
// Returns true if id is reserved.
bool IsBufferReservedId(GLuint id);
bool IsFramebufferReservedId(GLuint id) { return false; }
@@ -523,6 +403,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation
void DeleteFramebuffersHelper(GLsizei n, const GLuint* framebuffers);
void DeleteRenderbuffersHelper(GLsizei n, const GLuint* renderbuffers);
void DeleteTexturesHelper(GLsizei n, const GLuint* textures);
+ void UnbindTexturesHelper(GLsizei n, const GLuint* textures);
bool DeleteProgramHelper(GLuint program);
bool DeleteShaderHelper(GLuint shader);
void DeleteQueriesEXTHelper(GLsizei n, const GLuint* queries);
@@ -684,6 +565,15 @@ class GLES2_IMPL_EXPORT GLES2Implementation
size_t* out_paths_offset,
uint32_t* out_transforms_shm_id,
size_t* out_transforms_offset);
+
+// Set to 1 to have the client fail when a GL error is generated.
+// This helps find bugs in the renderer since the debugger stops on the error.
+#if DCHECK_IS_ON()
+#if 0
+#define GL_CLIENT_FAIL_GL_ERRORS
+#endif
+#endif
+
#if defined(GL_CLIENT_FAIL_GL_ERRORS)
void CheckGLError();
void FailGLError(GLenum error);
@@ -705,13 +595,11 @@ class GLES2_IMPL_EXPORT GLES2Implementation
GLES2Util util_;
GLES2CmdHelper* helper_;
- TransferBufferInterface* transfer_buffer_;
std::string last_error_;
DebugMarkerManager debug_marker_manager_;
std::string this_in_hex_;
base::queue<int32_t> swap_buffers_tokens_;
- base::queue<int32_t> rate_limit_tokens_;
ExtensionStatus chromium_framebuffer_multisample_;
@@ -765,8 +653,13 @@ class GLES2_IMPL_EXPORT GLES2Implementation
GLuint bound_copy_write_buffer_;
GLuint bound_pixel_pack_buffer_;
GLuint bound_pixel_unpack_buffer_;
- GLuint bound_transform_feedback_buffer_;
GLuint bound_uniform_buffer_;
+ // We don't cache the currently bound transform feedback buffer, because
+ // it is part of the current transform feedback object. Caching the transform
+ // feedback object state correctly requires predicting if a call to
+ // glBeginTransformFeedback will succeed or fail, which in turn requires
+ // caching a whole bunch of other states such as the transform feedback
+ // varyings of the current program.
// The currently bound pixel transfer buffers.
GLuint bound_pixel_pack_transfer_buffer_id_;
@@ -781,8 +674,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation
// Current GL error bits.
uint32_t error_bits_;
- // Whether or not to print debugging info.
- bool debug_;
+ LogSettings log_settings_;
// When true, the context is lost when a GL_OUT_OF_MEMORY error occurs.
const bool lose_context_when_out_of_memory_;
@@ -820,28 +712,20 @@ class GLES2_IMPL_EXPORT GLES2Implementation
typedef std::map<const void*, MappedTexture> MappedTextureMap;
MappedTextureMap mapped_textures_;
- std::unique_ptr<MappedMemoryManager> mapped_memory_;
-
scoped_refptr<ShareGroup> share_group_;
ShareGroupContextData share_group_context_data_;
- std::unique_ptr<QueryTracker> query_tracker_;
std::unique_ptr<IdAllocator>
id_allocators_[static_cast<int>(IdNamespaces::kNumIdNamespaces)];
std::unique_ptr<BufferTracker> buffer_tracker_;
- ClientTransferCache transfer_cache_;
base::Optional<ScopedTransferBufferPtr> raster_mapped_buffer_;
base::Callback<void(const char*, int32_t)> error_message_callback_;
- base::Closure lost_context_callback_;
- bool lost_context_callback_run_ = false;
int current_trace_stack_;
- GpuControl* const gpu_control_;
-
Capabilities capabilities_;
// Flag to indicate whether the implementation can retain resources, or
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
index d32003b250b..1c762323411 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -1016,6 +1016,13 @@ void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) override;
void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) override;
+void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) override;
+
void DrawBuffersEXT(GLsizei count, const GLenum* bufs) override;
void DiscardBackbufferCHROMIUM() override;
@@ -1237,7 +1244,8 @@ void BeginRasterCHROMIUM(GLuint texture_id,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config) override;
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) override;
void* MapRasterCHROMIUM(GLsizeiptr size) override;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
index 11dd2828534..9641e07a22f 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -3249,6 +3249,32 @@ void GLES2Implementation::LoseContextCHROMIUM(GLenum current, GLenum other) {
CheckGLError();
}
+void GLES2Implementation::UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glUnpremultiplyAndDitherCopyCHROMIUM(" << source_id
+ << ", " << dest_id << ", " << x << ", " << y << ", "
+ << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUnpremultiplyAndDitherCopyCHROMIUM",
+ "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUnpremultiplyAndDitherCopyCHROMIUM",
+ "height < 0");
+ return;
+ }
+ helper_->UnpremultiplyAndDitherCopyCHROMIUM(source_id, dest_id, x, y, width,
+ height);
+ CheckGLError();
+}
+
void GLES2Implementation::DrawBuffersEXT(GLsizei count, const GLenum* bufs) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawBuffersEXT(" << count << ", "
@@ -3547,22 +3573,24 @@ void GLES2Implementation::SetEnableDCLayersCHROMIUM(GLboolean enabled) {
CheckGLError();
}
-void GLES2Implementation::BeginRasterCHROMIUM(GLuint texture_id,
- GLuint sk_color,
- GLuint msaa_sample_count,
- GLboolean can_use_lcd_text,
- GLboolean use_distance_field_text,
- GLint pixel_config) {
+void GLES2Implementation::BeginRasterCHROMIUM(
+ GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBeginRasterCHROMIUM("
- << texture_id << ", " << sk_color << ", "
- << msaa_sample_count << ", "
- << GLES2Util::GetStringBool(can_use_lcd_text) << ", "
- << GLES2Util::GetStringBool(use_distance_field_text)
- << ", " << pixel_config << ")");
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glBeginRasterCHROMIUM(" << texture_id << ", "
+ << sk_color << ", " << msaa_sample_count << ", "
+ << GLES2Util::GetStringBool(can_use_lcd_text) << ", "
+ << GLES2Util::GetStringBool(use_distance_field_text) << ", "
+ << color_type << ", " << color_space_transfer_cache_id << ")");
helper_->BeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
can_use_lcd_text, use_distance_field_text,
- pixel_config);
+ color_type, color_space_transfer_cache_id);
CheckGLError();
}
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
index e41c7eeecb7..c25f145e0b1 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -19,11 +19,11 @@
#include "base/compiler_specific.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/mock_transfer_buffer.h"
#include "gpu/command_buffer/client/program_info_manager.h"
#include "gpu/command_buffer/client/query_tracker.h"
#include "gpu/command_buffer/client/ring_buffer.h"
#include "gpu/command_buffer/client/shared_memory_limits.h"
-#include "gpu/command_buffer/client/transfer_buffer.h"
#include "gpu/command_buffer/common/command_buffer.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -95,254 +95,6 @@ struct Str7 {
};
#pragma pack(pop)
-class MockTransferBuffer : public TransferBufferInterface {
- public:
- struct ExpectedMemoryInfo {
- uint32_t offset;
- int32_t id;
- uint8_t* ptr;
- };
-
- MockTransferBuffer(
- CommandBuffer* command_buffer,
- unsigned int size,
- unsigned int result_size,
- unsigned int alignment,
- bool initialize_fail)
- : command_buffer_(command_buffer),
- size_(size),
- result_size_(result_size),
- alignment_(alignment),
- actual_buffer_index_(0),
- expected_buffer_index_(0),
- last_alloc_(NULL),
- expected_offset_(result_size),
- actual_offset_(result_size),
- initialize_fail_(initialize_fail) {
- // We have to allocate the buffers here because
- // we need to know their address before GLES2Implementation::Initialize
- // is called.
- for (int ii = 0; ii < kNumBuffers; ++ii) {
- buffers_[ii] = command_buffer_->CreateTransferBuffer(
- size_ + ii * alignment_,
- &buffer_ids_[ii]);
- EXPECT_NE(-1, buffer_ids_[ii]);
- }
- }
-
- ~MockTransferBuffer() override = default;
-
- base::SharedMemoryHandle shared_memory_handle() const override;
- bool Initialize(unsigned int starting_buffer_size,
- unsigned int result_size,
- unsigned int /* min_buffer_size */,
- unsigned int /* max_buffer_size */,
- unsigned int alignment,
- unsigned int size_to_flush) override;
- int GetShmId() override;
- void* GetResultBuffer() override;
- int GetResultOffset() override;
- void Free() override;
- bool HaveBuffer() const override;
- void* AllocUpTo(unsigned int size, unsigned int* size_allocated) override;
- void* Alloc(unsigned int size) override;
- RingBuffer::Offset GetOffset(void* pointer) const override;
- void DiscardBlock(void* p) override;
- void FreePendingToken(void* p, unsigned int /* token */) override;
- unsigned int GetSize() const override;
- unsigned int GetFreeSize() const override;
- unsigned int GetFragmentedFreeSize() const override;
- void ShrinkLastBlock(unsigned int new_size) override;
-
- size_t MaxTransferBufferSize() {
- return size_ - result_size_;
- }
-
- unsigned int RoundToAlignment(unsigned int size) {
- return (size + alignment_ - 1) & ~(alignment_ - 1);
- }
-
- bool InSync() {
- return expected_buffer_index_ == actual_buffer_index_ &&
- expected_offset_ == actual_offset_;
- }
-
- ExpectedMemoryInfo GetExpectedMemory(size_t size) {
- ExpectedMemoryInfo mem;
- mem.offset = AllocateExpectedTransferBuffer(size);
- mem.id = GetExpectedTransferBufferId();
- mem.ptr = static_cast<uint8_t*>(
- GetExpectedTransferAddressFromOffset(mem.offset, size));
- return mem;
- }
-
- ExpectedMemoryInfo GetExpectedResultMemory(size_t size) {
- ExpectedMemoryInfo mem;
- mem.offset = GetExpectedResultBufferOffset();
- mem.id = GetExpectedResultBufferId();
- mem.ptr = static_cast<uint8_t*>(
- GetExpectedTransferAddressFromOffset(mem.offset, size));
- return mem;
- }
-
- private:
- static const int kNumBuffers = 2;
-
- uint8_t* actual_buffer() const {
- return static_cast<uint8_t*>(buffers_[actual_buffer_index_]->memory());
- }
-
- uint8_t* expected_buffer() const {
- return static_cast<uint8_t*>(buffers_[expected_buffer_index_]->memory());
- }
-
- uint32_t AllocateExpectedTransferBuffer(size_t size) {
- EXPECT_LE(size, MaxTransferBufferSize());
-
- // Toggle which buffer we get each time to simulate the buffer being
- // reallocated.
- expected_buffer_index_ = (expected_buffer_index_ + 1) % kNumBuffers;
-
- if (expected_offset_ + size > size_) {
- expected_offset_ = result_size_;
- }
- uint32_t offset = expected_offset_;
- expected_offset_ += RoundToAlignment(size);
-
- // Make sure each buffer has a different offset.
- return offset + expected_buffer_index_ * alignment_;
- }
-
- void* GetExpectedTransferAddressFromOffset(uint32_t offset, size_t size) {
- EXPECT_GE(offset, expected_buffer_index_ * alignment_);
- EXPECT_LE(offset + size, size_ + expected_buffer_index_ * alignment_);
- return expected_buffer() + offset;
- }
-
- int GetExpectedResultBufferId() {
- return buffer_ids_[expected_buffer_index_];
- }
-
- uint32_t GetExpectedResultBufferOffset() {
- return expected_buffer_index_ * alignment_;
- }
-
- int GetExpectedTransferBufferId() {
- return buffer_ids_[expected_buffer_index_];
- }
-
- CommandBuffer* command_buffer_;
- size_t size_;
- size_t result_size_;
- uint32_t alignment_;
- int buffer_ids_[kNumBuffers];
- scoped_refptr<Buffer> buffers_[kNumBuffers];
- int actual_buffer_index_;
- int expected_buffer_index_;
- void* last_alloc_;
- uint32_t expected_offset_;
- uint32_t actual_offset_;
- bool initialize_fail_;
-
- DISALLOW_COPY_AND_ASSIGN(MockTransferBuffer);
-};
-
-base::SharedMemoryHandle MockTransferBuffer::shared_memory_handle() const {
- return base::SharedMemoryHandle();
-}
-
-bool MockTransferBuffer::Initialize(
- unsigned int starting_buffer_size,
- unsigned int result_size,
- unsigned int /* min_buffer_size */,
- unsigned int /* max_buffer_size */,
- unsigned int alignment,
- unsigned int /* size_to_flush */) {
- // Just check they match.
- return size_ == starting_buffer_size &&
- result_size_ == result_size &&
- alignment_ == alignment && !initialize_fail_;
-};
-
-int MockTransferBuffer::GetShmId() {
- return buffer_ids_[actual_buffer_index_];
-}
-
-void* MockTransferBuffer::GetResultBuffer() {
- return actual_buffer() + actual_buffer_index_ * alignment_;
-}
-
-int MockTransferBuffer::GetResultOffset() {
- return actual_buffer_index_ * alignment_;
-}
-
-void MockTransferBuffer::Free() {
- NOTREACHED();
-}
-
-bool MockTransferBuffer::HaveBuffer() const {
- return true;
-}
-
-void* MockTransferBuffer::AllocUpTo(
- unsigned int size, unsigned int* size_allocated) {
- EXPECT_TRUE(size_allocated != NULL);
- EXPECT_TRUE(last_alloc_ == NULL);
-
- // Toggle which buffer we get each time to simulate the buffer being
- // reallocated.
- actual_buffer_index_ = (actual_buffer_index_ + 1) % kNumBuffers;
-
- size = std::min(static_cast<size_t>(size), MaxTransferBufferSize());
- if (actual_offset_ + size > size_) {
- actual_offset_ = result_size_;
- }
- uint32_t offset = actual_offset_;
- actual_offset_ += RoundToAlignment(size);
- *size_allocated = size;
-
- // Make sure each buffer has a different offset.
- last_alloc_ = actual_buffer() + offset + actual_buffer_index_ * alignment_;
- return last_alloc_;
-}
-
-void* MockTransferBuffer::Alloc(unsigned int size) {
- EXPECT_LE(size, MaxTransferBufferSize());
- unsigned int temp = 0;
- void* p = AllocUpTo(size, &temp);
- EXPECT_EQ(temp, size);
- return p;
-}
-
-RingBuffer::Offset MockTransferBuffer::GetOffset(void* pointer) const {
- // Make sure each buffer has a different offset.
- return static_cast<uint8_t*>(pointer) - actual_buffer();
-}
-
-void MockTransferBuffer::DiscardBlock(void* p) {
- EXPECT_EQ(last_alloc_, p);
- last_alloc_ = NULL;
-}
-
-void MockTransferBuffer::FreePendingToken(void* p, unsigned int /* token */) {
- EXPECT_EQ(last_alloc_, p);
- last_alloc_ = NULL;
-}
-
-unsigned int MockTransferBuffer::GetSize() const {
- return 0;
-}
-
-unsigned int MockTransferBuffer::GetFreeSize() const {
- return 0;
-}
-
-unsigned int MockTransferBuffer::GetFragmentedFreeSize() const {
- return 0;
-}
-
-void MockTransferBuffer::ShrinkLastBlock(unsigned int new_size) {}
-
// API wrapper for Buffers.
class GenBuffersAPI {
public:
@@ -4385,21 +4137,22 @@ TEST_F(GLES3ImplementationTest, GetBufferSubDataAsyncCHROMIUM) {
const GLuint kBufferId = 123;
void* mem;
- const int TARGET_COUNT = 8;
- GLenum targets[TARGET_COUNT] = {
- GL_ARRAY_BUFFER,
- GL_ELEMENT_ARRAY_BUFFER,
- GL_COPY_READ_BUFFER,
- GL_COPY_WRITE_BUFFER,
- GL_TRANSFORM_FEEDBACK_BUFFER,
- GL_UNIFORM_BUFFER,
- GL_PIXEL_PACK_BUFFER,
- GL_PIXEL_UNPACK_BUFFER,
+ std::vector<GLenum> targets = {
+ GL_ARRAY_BUFFER, GL_ELEMENT_ARRAY_BUFFER, GL_COPY_READ_BUFFER,
+ GL_COPY_WRITE_BUFFER, GL_TRANSFORM_FEEDBACK_BUFFER, GL_UNIFORM_BUFFER,
+ GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER,
};
// Positive tests
- for (int i = 0; i < TARGET_COUNT; i++) {
+ for (size_t i = 0; i < targets.size(); i++) {
gl_->BindBuffer(targets[i], kBufferId);
+ if (targets[i] == GL_TRANSFORM_FEEDBACK_BUFFER) {
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetIntegerv::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result.ptr, SizedResultHelper<GLuint>(1)))
+ .RetiresOnSaturation();
+ }
mem = gl_->GetBufferSubDataAsyncCHROMIUM(targets[i], 10, 64);
EXPECT_TRUE(mem != nullptr);
EXPECT_EQ(GL_NO_ERROR, CheckError());
@@ -4409,9 +4162,16 @@ TEST_F(GLES3ImplementationTest, GetBufferSubDataAsyncCHROMIUM) {
}
// Negative tests: invalid target
- for (int i = 0; i < TARGET_COUNT; i++) {
- GLenum wrong_target = targets[(i + 1) % TARGET_COUNT];
+ for (size_t i = 0; i < targets.size(); i++) {
+ GLenum wrong_target = targets[(i + 1) % targets.size()];
gl_->BindBuffer(targets[i], kBufferId);
+ if (wrong_target == GL_TRANSFORM_FEEDBACK_BUFFER) {
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetIntegerv::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result.ptr, SizedResultHelper<GLuint>(0)))
+ .RetiresOnSaturation();
+ }
mem = gl_->GetBufferSubDataAsyncCHROMIUM(wrong_target, 10, 64);
EXPECT_TRUE(mem == nullptr);
EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
@@ -4479,15 +4239,19 @@ TEST_F(GLES2ImplementationTest, SignalSyncToken) {
// Request a signal sync token, which gives a callback to the GpuControl to
// run when the sync token is reached.
- base::Closure signal_closure;
- EXPECT_CALL(*gpu_control_, SignalSyncToken(_, _))
- .WillOnce(SaveArg<1>(&signal_closure));
- gl_->SignalSyncToken(sync_token, base::Bind(&CountCallback, &signaled_count));
+ base::OnceClosure signal_closure;
+ EXPECT_CALL(*gpu_control_, DoSignalSyncToken(_, _))
+ .WillOnce(Invoke([&signal_closure](const SyncToken& sync_token,
+ base::OnceClosure* callback) {
+ signal_closure = std::move(*callback);
+ }));
+ gl_->SignalSyncToken(sync_token,
+ base::BindOnce(&CountCallback, &signaled_count));
EXPECT_EQ(0, signaled_count);
// When GpuControl runs the callback, the original callback we gave to
// GLES2Implementation is run.
- signal_closure.Run();
+ std::move(signal_closure).Run();
EXPECT_EQ(1, signaled_count);
}
@@ -4509,10 +4273,14 @@ TEST_F(GLES2ImplementationTest, SignalSyncTokenAfterContextLoss) {
// Request a signal sync token, which gives a callback to the GpuControl to
// run when the sync token is reached.
- base::Closure signal_closure;
- EXPECT_CALL(*gpu_control_, SignalSyncToken(_, _))
- .WillOnce(SaveArg<1>(&signal_closure));
- gl_->SignalSyncToken(sync_token, base::Bind(&CountCallback, &signaled_count));
+ base::OnceClosure signal_closure;
+ EXPECT_CALL(*gpu_control_, DoSignalSyncToken(_, _))
+ .WillOnce(Invoke([&signal_closure](const SyncToken& sync_token,
+ base::OnceClosure* callback) {
+ signal_closure = std::move(*callback);
+ }));
+ gl_->SignalSyncToken(sync_token,
+ base::BindOnce(&CountCallback, &signaled_count));
EXPECT_EQ(0, signaled_count);
// Inform the GLES2Implementation that the context is lost.
@@ -4522,14 +4290,14 @@ TEST_F(GLES2ImplementationTest, SignalSyncTokenAfterContextLoss) {
// When GpuControl runs the callback, the original callback we gave to
// GLES2Implementation is *not* run, since the context is lost and we
// have already run the lost context callback.
- signal_closure.Run();
+ std::move(signal_closure).Run();
EXPECT_EQ(0, signaled_count);
}
TEST_F(GLES2ImplementationTest, ReportLoss) {
GpuControlClient* gl_as_client = gl_;
int lost_count = 0;
- gl_->SetLostContextCallback(base::Bind(&CountCallback, &lost_count));
+ gl_->SetLostContextCallback(base::BindOnce(&CountCallback, &lost_count));
EXPECT_EQ(0, lost_count);
EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetGraphicsResetStatusKHR());
@@ -4543,7 +4311,7 @@ TEST_F(GLES2ImplementationTest, ReportLoss) {
TEST_F(GLES2ImplementationTest, ReportLossReentrant) {
GpuControlClient* gl_as_client = gl_;
int lost_count = 0;
- gl_->SetLostContextCallback(base::Bind(&CountCallback, &lost_count));
+ gl_->SetLostContextCallback(base::BindOnce(&CountCallback, &lost_count));
EXPECT_EQ(0, lost_count);
EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetGraphicsResetStatusKHR());
@@ -4588,13 +4356,16 @@ TEST_F(GLES2ImplementationTest, DiscardableMemoryDelete) {
share_group_->discardable_texture_manager()->TextureIsValid(texture_id));
}
-TEST_F(GLES2ImplementationTest, DiscardableMemoryLockFail) {
+TEST_F(GLES2ImplementationTest, DiscardableTextureLockFail) {
const GLuint texture_id = 1;
gl_->InitializeDiscardableTextureCHROMIUM(texture_id);
EXPECT_TRUE(
share_group_->discardable_texture_manager()->TextureIsValid(texture_id));
- // Unlock and delete the handle.
+ // Unlock the handle on the client side.
+ gl_->UnlockDiscardableTextureCHROMIUM(texture_id);
+
+ // Unlock and delete the handle on the service side.
ClientDiscardableHandle client_handle =
share_group_->discardable_texture_manager()->GetHandleForTesting(
texture_id);
@@ -4610,7 +4381,7 @@ TEST_F(GLES2ImplementationTest, DiscardableMemoryLockFail) {
share_group_->discardable_texture_manager()->TextureIsValid(texture_id));
}
-TEST_F(GLES2ImplementationTest, DiscardableMemoryDoubleInitError) {
+TEST_F(GLES2ImplementationTest, DiscardableTextureDoubleInitError) {
const GLuint texture_id = 1;
gl_->InitializeDiscardableTextureCHROMIUM(texture_id);
EXPECT_EQ(GL_NO_ERROR, CheckError());
@@ -4618,12 +4389,42 @@ TEST_F(GLES2ImplementationTest, DiscardableMemoryDoubleInitError) {
EXPECT_EQ(GL_INVALID_VALUE, CheckError());
}
-TEST_F(GLES2ImplementationTest, DiscardableMemoryLockError) {
+TEST_F(GLES2ImplementationTest, DiscardableTextureLockError) {
const GLuint texture_id = 1;
EXPECT_FALSE(gl_->LockDiscardableTextureCHROMIUM(texture_id));
EXPECT_EQ(GL_INVALID_VALUE, CheckError());
}
+TEST_F(GLES2ImplementationTest, DiscardableTextureLockCounting) {
+ const GLint texture_id = 1;
+ gl_->InitializeDiscardableTextureCHROMIUM(texture_id);
+ EXPECT_TRUE(
+ share_group_->discardable_texture_manager()->TextureIsValid(texture_id));
+
+ // Bind the texture.
+ gl_->BindTexture(GL_TEXTURE_2D, texture_id);
+ GLint bound_texture_id = 0;
+ gl_->GetIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture_id);
+ EXPECT_EQ(texture_id, bound_texture_id);
+
+ // Lock the texture 3 more times (for 4 locks total).
+ for (int i = 0; i < 3; ++i) {
+ gl_->LockDiscardableTextureCHROMIUM(texture_id);
+ }
+
+ // Unlock 4 times. Only after the last unlock should the texture be unbound.
+ for (int i = 0; i < 4; ++i) {
+ gl_->UnlockDiscardableTextureCHROMIUM(texture_id);
+ bound_texture_id = 0;
+ gl_->GetIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture_id);
+ if (i < 3) {
+ EXPECT_EQ(texture_id, bound_texture_id);
+ } else {
+ EXPECT_EQ(0, bound_texture_id);
+ }
+ }
+}
+
#include "base/macros.h"
#include "gpu/command_buffer/client/gles2_implementation_unittest_autogen.h"
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
index 5e1a610d9cf..61e1ec9af94 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -3087,9 +3087,9 @@ TEST_F(GLES2ImplementationTest, BeginRasterCHROMIUM) {
cmds::BeginRasterCHROMIUM cmd;
};
Cmds expected;
- expected.cmd.Init(1, 2, 3, true, true, 6);
+ expected.cmd.Init(1, 2, 3, true, true, 6, 7);
- gl_->BeginRasterCHROMIUM(1, 2, 3, true, true, 6);
+ gl_->BeginRasterCHROMIUM(1, 2, 3, true, true, 6, 7);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
index fd6e4c9e24a..aa47322b8e4 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -744,6 +744,12 @@ virtual void GenSyncTokenCHROMIUM(GLbyte* sync_token) = 0;
virtual void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) = 0;
virtual void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) = 0;
virtual void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) = 0;
+virtual void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) = 0;
virtual void DrawBuffersEXT(GLsizei count, const GLenum* bufs) = 0;
virtual void DiscardBackbufferCHROMIUM() = 0;
virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
@@ -919,7 +925,8 @@ virtual void BeginRasterCHROMIUM(GLuint texture_id,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config) = 0;
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) = 0;
virtual void* MapRasterCHROMIUM(GLsizeiptr size) = 0;
virtual void UnmapRasterCHROMIUM(GLsizeiptr written_size) = 0;
virtual void EndRasterCHROMIUM() = 0;
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index afd4c2d19d1..d19d098877d 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -722,6 +722,12 @@ void GenSyncTokenCHROMIUM(GLbyte* sync_token) override;
void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) override;
void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) override;
void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) override;
+void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) override;
void DrawBuffersEXT(GLsizei count, const GLenum* bufs) override;
void DiscardBackbufferCHROMIUM() override;
void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
@@ -892,7 +898,8 @@ void BeginRasterCHROMIUM(GLuint texture_id,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config) override;
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) override;
void* MapRasterCHROMIUM(GLsizeiptr size) override;
void UnmapRasterCHROMIUM(GLsizeiptr written_size) override;
void EndRasterCHROMIUM() override;
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index eaa3c24553a..0eab75fcf87 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -982,6 +982,13 @@ void GLES2InterfaceStub::VerifySyncTokensCHROMIUM(GLbyte** /* sync_tokens */,
GLsizei /* count */) {}
void GLES2InterfaceStub::WaitSyncTokenCHROMIUM(const GLbyte* /* sync_token */) {
}
+void GLES2InterfaceStub::UnpremultiplyAndDitherCopyCHROMIUM(
+ GLuint /* source_id */,
+ GLuint /* dest_id */,
+ GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */) {}
void GLES2InterfaceStub::DrawBuffersEXT(GLsizei /* count */,
const GLenum* /* bufs */) {}
void GLES2InterfaceStub::DiscardBackbufferCHROMIUM() {}
@@ -1199,7 +1206,8 @@ void GLES2InterfaceStub::BeginRasterCHROMIUM(
GLuint /* msaa_sample_count */,
GLboolean /* can_use_lcd_text */,
GLboolean /* use_distance_field_text */,
- GLint /* pixel_config */) {}
+ GLint /* color_type */,
+ GLuint /* color_space_transfer_cache_id */) {}
void* GLES2InterfaceStub::MapRasterCHROMIUM(GLsizeiptr /* size */) {
return 0;
}
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index 15b63b534fb..0aa7f89c0cb 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -722,6 +722,12 @@ void GenSyncTokenCHROMIUM(GLbyte* sync_token) override;
void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) override;
void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) override;
void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) override;
+void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) override;
void DrawBuffersEXT(GLsizei count, const GLenum* bufs) override;
void DiscardBackbufferCHROMIUM() override;
void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
@@ -892,7 +898,8 @@ void BeginRasterCHROMIUM(GLuint texture_id,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config) override;
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) override;
void* MapRasterCHROMIUM(GLsizeiptr size) override;
void UnmapRasterCHROMIUM(GLsizeiptr written_size) override;
void EndRasterCHROMIUM() override;
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index 1e65bb56e59..f6656262986 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -2105,6 +2105,19 @@ void GLES2TraceImplementation::WaitSyncTokenCHROMIUM(const GLbyte* sync_token) {
gl_->WaitSyncTokenCHROMIUM(sync_token);
}
+void GLES2TraceImplementation::UnpremultiplyAndDitherCopyCHROMIUM(
+ GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::UnpremultiplyAndDitherCopyCHROMIUM");
+ gl_->UnpremultiplyAndDitherCopyCHROMIUM(source_id, dest_id, x, y, width,
+ height);
+}
+
void GLES2TraceImplementation::DrawBuffersEXT(GLsizei count,
const GLenum* bufs) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DrawBuffersEXT");
@@ -2561,11 +2574,12 @@ void GLES2TraceImplementation::BeginRasterCHROMIUM(
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config) {
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BeginRasterCHROMIUM");
gl_->BeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
can_use_lcd_text, use_distance_field_text,
- pixel_config);
+ color_type, color_space_transfer_cache_id);
}
void* GLES2TraceImplementation::MapRasterCHROMIUM(GLsizeiptr size) {
diff --git a/chromium/gpu/command_buffer/client/gpu_control.h b/chromium/gpu/command_buffer/client/gpu_control.h
index b9e6ccd408e..39b9bd60a92 100644
--- a/chromium/gpu/command_buffer/client/gpu_control.h
+++ b/chromium/gpu/command_buffer/client/gpu_control.h
@@ -55,7 +55,7 @@ class GPU_EXPORT GpuControl {
// Runs |callback| when a query created via glCreateQueryEXT() has cleared
// passed the glEndQueryEXT() point.
- virtual void SignalQuery(uint32_t query, const base::Closure& callback) = 0;
+ virtual void SignalQuery(uint32_t query, base::OnceClosure callback) = 0;
virtual void CreateGpuFence(uint32_t gpu_fence_id, ClientGpuFence source) = 0;
virtual void GetGpuFence(
@@ -102,7 +102,7 @@ class GPU_EXPORT GpuControl {
// Runs |callback| when sync token is signaled.
virtual void SignalSyncToken(const SyncToken& sync_token,
- const base::Closure& callback) = 0;
+ base::OnceClosure callback) = 0;
// This allows the command buffer proxy to mark the next flush with sync token
// dependencies for the gpu scheduler. This is used in addition to the
diff --git a/chromium/gpu/command_buffer/client/implementation_base.cc b/chromium/gpu/command_buffer/client/implementation_base.cc
new file mode 100644
index 00000000000..ff791c5a3d6
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/implementation_base.cc
@@ -0,0 +1,340 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/implementation_base.h"
+
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/trace_event.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/client/query_tracker.h"
+#include "gpu/command_buffer/client/shared_memory_limits.h"
+#include "gpu/command_buffer/common/sync_token.h"
+
+namespace gpu {
+
+#if !defined(_MSC_VER)
+const size_t ImplementationBase::kMaxSizeOfSimpleResult;
+const unsigned int ImplementationBase::kStartingOffset;
+#endif
+
+ImplementationBase::ImplementationBase(CommandBufferHelper* helper,
+ TransferBufferInterface* transfer_buffer,
+ GpuControl* gpu_control)
+ : transfer_buffer_(transfer_buffer),
+ gpu_control_(gpu_control),
+ helper_(helper),
+ transfer_cache_(this),
+ weak_ptr_factory_(this) {}
+
+ImplementationBase::~ImplementationBase() {
+ // The gpu_control_ outlives this class, so clear the client on it before we
+ // self-destruct.
+ gpu_control_->SetGpuControlClient(nullptr);
+}
+
+void ImplementationBase::FreeUnusedSharedMemory() {
+ mapped_memory_->FreeUnused();
+}
+
+void ImplementationBase::FreeEverything() {
+ query_tracker_->Shrink(helper_);
+ FreeUnusedSharedMemory();
+ transfer_buffer_->Free();
+ helper_->FreeRingBuffer();
+}
+
+void ImplementationBase::SetLostContextCallback(base::OnceClosure callback) {
+ lost_context_callback_ = std::move(callback);
+}
+
+void ImplementationBase::FlushPendingWork() {
+ gpu_control_->FlushPendingWork();
+}
+
+void ImplementationBase::SignalSyncToken(const SyncToken& sync_token,
+ base::OnceClosure callback) {
+ SyncToken verified_sync_token;
+ if (sync_token.HasData() &&
+ GetVerifiedSyncTokenForIPC(sync_token, &verified_sync_token)) {
+ // We can only send verified sync tokens across IPC.
+ gpu_control_->SignalSyncToken(
+ verified_sync_token,
+ base::BindOnce(&ImplementationBase::RunIfContextNotLost,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
+ } else {
+ // Invalid sync token, just call the callback immediately.
+ std::move(callback).Run();
+ }
+}
+
+// This may be called from any thread. It's safe to access gpu_control_ without
+// the lock because it is const.
+bool ImplementationBase::IsSyncTokenSignaled(const SyncToken& sync_token) {
+ // Check that the sync token belongs to this context.
+ DCHECK_EQ(gpu_control_->GetNamespaceID(), sync_token.namespace_id());
+ DCHECK_EQ(gpu_control_->GetCommandBufferID(), sync_token.command_buffer_id());
+ return gpu_control_->IsFenceSyncReleased(sync_token.release_count());
+}
+
+void ImplementationBase::SignalQuery(uint32_t query,
+ base::OnceClosure callback) {
+ // Flush previously entered commands to ensure ordering with any
+ // glBeginQueryEXT() calls that may have been put into the context.
+ IssueShallowFlush();
+ gpu_control_->SignalQuery(
+ query,
+ base::BindOnce(&ImplementationBase::RunIfContextNotLost,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
+}
+
+void ImplementationBase::GetGpuFence(
+ uint32_t gpu_fence_id,
+ base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback) {
+ // This ShallowFlush is required to ensure that the GetGpuFence
+ // call is processed after the preceding CreateGpuFenceCHROMIUM call.
+ IssueShallowFlush();
+ gpu_control_->GetGpuFence(gpu_fence_id, std::move(callback));
+}
+
+void* ImplementationBase::MapTransferCacheEntry(size_t serialized_size) {
+ return transfer_cache_.MapEntry(mapped_memory_.get(), serialized_size);
+}
+
+void ImplementationBase::UnmapAndCreateTransferCacheEntry(uint32_t type,
+ uint32_t id) {
+ transfer_cache_.UnmapAndCreateEntry(type, id);
+}
+
+bool ImplementationBase::ThreadsafeLockTransferCacheEntry(uint32_t type,
+ uint32_t id) {
+ return transfer_cache_.LockEntry(type, id);
+}
+
+void ImplementationBase::UnlockTransferCacheEntries(
+ const std::vector<std::pair<uint32_t, uint32_t>>& entries) {
+ transfer_cache_.UnlockEntries(entries);
+}
+
+void ImplementationBase::DeleteTransferCacheEntry(uint32_t type, uint32_t id) {
+ transfer_cache_.DeleteEntry(type, id);
+}
+
+unsigned int ImplementationBase::GetTransferBufferFreeSize() const {
+ return transfer_buffer_->GetFreeSize();
+}
+
+bool ImplementationBase::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) {
+ using base::trace_event::MemoryAllocatorDump;
+ using base::trace_event::MemoryDumpLevelOfDetail;
+
+ // Dump owned MappedMemoryManager memory as well.
+ mapped_memory_->OnMemoryDump(args, pmd);
+
+ if (!transfer_buffer_->HaveBuffer())
+ return true;
+
+ const uint64_t tracing_process_id =
+ base::trace_event::MemoryDumpManager::GetInstance()
+ ->GetTracingProcessId();
+
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(base::StringPrintf(
+ "gpu/transfer_buffer_memory/buffer_%d", transfer_buffer_->GetShmId()));
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes,
+ transfer_buffer_->GetSize());
+
+ if (args.level_of_detail != MemoryDumpLevelOfDetail::BACKGROUND) {
+ dump->AddScalar("free_size", MemoryAllocatorDump::kUnitsBytes,
+ transfer_buffer_->GetFragmentedFreeSize());
+ auto shared_memory_guid =
+ transfer_buffer_->shared_memory_handle().GetGUID();
+ const int kImportance = 2;
+ if (!shared_memory_guid.is_empty()) {
+ pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
+ kImportance);
+ } else {
+ auto guid = GetBufferGUIDForTracing(tracing_process_id,
+ transfer_buffer_->GetShmId());
+ pmd->CreateSharedGlobalAllocatorDump(guid);
+ pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
+ }
+ }
+
+ return true;
+}
+
+gpu::ContextResult ImplementationBase::Initialize(
+ const SharedMemoryLimits& limits) {
+ TRACE_EVENT0("gpu", "ImplementationBase::Initialize");
+ DCHECK_GE(limits.start_transfer_buffer_size, limits.min_transfer_buffer_size);
+ DCHECK_LE(limits.start_transfer_buffer_size, limits.max_transfer_buffer_size);
+ DCHECK_GE(limits.min_transfer_buffer_size, kStartingOffset);
+
+ gpu_control_->SetGpuControlClient(this);
+
+ if (!transfer_buffer_->Initialize(
+ limits.start_transfer_buffer_size, kStartingOffset,
+ limits.min_transfer_buffer_size, limits.max_transfer_buffer_size,
+ kAlignment, kSizeToFlush)) {
+ // TransferBuffer::Initialize doesn't fail for transient reasons such as if
+ // the context was lost. See http://crrev.com/c/720269
+ LOG(ERROR) << "ContextResult::kFatalFailure: "
+ << "TransferBuffer::Initialize() failed";
+ return gpu::ContextResult::kFatalFailure;
+ }
+
+ mapped_memory_ = std::make_unique<MappedMemoryManager>(
+ helper_, limits.mapped_memory_reclaim_limit);
+ mapped_memory_->set_chunk_size_multiple(limits.mapped_memory_chunk_size);
+ query_tracker_ = std::make_unique<gles2::QueryTracker>(mapped_memory_.get());
+
+ return gpu::ContextResult::kSuccess;
+}
+
+void ImplementationBase::WaitForCmd() {
+ TRACE_EVENT0("gpu", "ImplementationBase::WaitForCmd");
+ helper_->Finish();
+}
+
+void* ImplementationBase::GetResultBuffer() {
+ return transfer_buffer_->GetResultBuffer();
+}
+
+int32_t ImplementationBase::GetResultShmId() {
+ return transfer_buffer_->GetShmId();
+}
+
+uint32_t ImplementationBase::GetResultShmOffset() {
+ return transfer_buffer_->GetResultOffset();
+}
+
+bool ImplementationBase::GetBucketContents(uint32_t bucket_id,
+ std::vector<int8_t>* data) {
+ TRACE_EVENT0("gpu", "ImplementationBase::GetBucketContents");
+ DCHECK(data);
+ const uint32_t kStartSize = 32 * 1024;
+ ScopedTransferBufferPtr buffer(kStartSize, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return false;
+ }
+ typedef cmd::GetBucketStart::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return false;
+ }
+ *result = 0;
+ helper_->GetBucketStart(bucket_id, GetResultShmId(), GetResultShmOffset(),
+ buffer.size(), buffer.shm_id(), buffer.offset());
+ WaitForCmd();
+ uint32_t size = *result;
+ data->resize(size);
+ if (size > 0u) {
+ uint32_t offset = 0;
+ while (size) {
+ if (!buffer.valid()) {
+ buffer.Reset(size);
+ if (!buffer.valid()) {
+ return false;
+ }
+ helper_->GetBucketData(bucket_id, offset, buffer.size(),
+ buffer.shm_id(), buffer.offset());
+ WaitForCmd();
+ }
+ uint32_t size_to_copy = std::min(size, buffer.size());
+ memcpy(&(*data)[offset], buffer.address(), size_to_copy);
+ offset += size_to_copy;
+ size -= size_to_copy;
+ buffer.Release();
+ }
+ // Free the bucket. This is not required but it does free up the memory.
+ // and we don't have to wait for the result so from the client's perspective
+ // it's cheap.
+ helper_->SetBucketSize(bucket_id, 0);
+ }
+ return true;
+}
+
+void ImplementationBase::SetBucketContents(uint32_t bucket_id,
+ const void* data,
+ size_t size) {
+ DCHECK(data);
+ helper_->SetBucketSize(bucket_id, size);
+ if (size > 0u) {
+ uint32_t offset = 0;
+ while (size) {
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+ memcpy(buffer.address(), static_cast<const int8_t*>(data) + offset,
+ buffer.size());
+ helper_->SetBucketData(bucket_id, offset, buffer.size(), buffer.shm_id(),
+ buffer.offset());
+ offset += buffer.size();
+ size -= buffer.size();
+ }
+ }
+}
+
+void ImplementationBase::SetBucketAsCString(uint32_t bucket_id,
+ const char* str) {
+ // NOTE: strings are passed NULL terminated. That means the empty
+ // string will have a size of 1 and no-string will have a size of 0
+ if (str) {
+ SetBucketContents(bucket_id, str, strlen(str) + 1);
+ } else {
+ helper_->SetBucketSize(bucket_id, 0);
+ }
+}
+
+bool ImplementationBase::GetBucketAsString(uint32_t bucket_id,
+ std::string* str) {
+ DCHECK(str);
+ std::vector<int8_t> data;
+ // NOTE: strings are passed NULL terminated. That means the empty
+ // string will have a size of 1 and no-string will have a size of 0
+ if (!GetBucketContents(bucket_id, &data)) {
+ return false;
+ }
+ if (data.empty()) {
+ return false;
+ }
+ str->assign(&data[0], &data[0] + data.size() - 1);
+ return true;
+}
+
+void ImplementationBase::SetBucketAsString(uint32_t bucket_id,
+ const std::string& str) {
+ // NOTE: strings are passed NULL terminated. That means the empty
+ // string will have a size of 1 and no-string will have a size of 0
+ SetBucketContents(bucket_id, str.c_str(), str.size() + 1);
+}
+
+bool ImplementationBase::GetVerifiedSyncTokenForIPC(
+ const SyncToken& sync_token,
+ SyncToken* verified_sync_token) {
+ DCHECK(sync_token.HasData());
+ DCHECK(verified_sync_token);
+
+ if (!sync_token.verified_flush() &&
+ !gpu_control_->CanWaitUnverifiedSyncToken(sync_token))
+ return false;
+
+ *verified_sync_token = sync_token;
+ verified_sync_token->SetVerifyFlush();
+ return true;
+}
+
+void ImplementationBase::RunIfContextNotLost(base::OnceClosure callback) {
+ if (!lost_context_callback_run_) {
+ std::move(callback).Run();
+ }
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/implementation_base.h b/chromium/gpu/command_buffer/client/implementation_base.h
new file mode 100644
index 00000000000..9257810ae72
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/implementation_base.h
@@ -0,0 +1,162 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_IMPLEMENTATION_BASE_H_
+#define GPU_COMMAND_BUFFER_CLIENT_IMPLEMENTATION_BASE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "gpu/command_buffer/client/client_transfer_cache.h"
+#include "gpu/command_buffer/client/context_support.h"
+#include "gpu/command_buffer/client/gles2_impl_export.h"
+#include "gpu/command_buffer/client/gpu_control_client.h"
+#include "gpu/command_buffer/client/query_tracker.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/common/context_result.h"
+
+namespace gpu {
+
+namespace gles2 {
+class QueryTracker;
+}
+
+class CommandBufferHelper;
+class GpuControl;
+class MappedMemoryManager;
+struct SharedMemoryLimits;
+
+// Base class with functionality shared between GLES2Implementation and
+// RasterImplementation.
+class GLES2_IMPL_EXPORT ImplementationBase
+ : public base::trace_event::MemoryDumpProvider,
+ public ContextSupport,
+ public GpuControlClient,
+ public ClientTransferCache::Client {
+ public:
+ // The maximum result size from simple GL get commands.
+ static const size_t kMaxSizeOfSimpleResult =
+ 16 * sizeof(uint32_t); // NOLINT.
+
+ // used for testing only. If more things are reseved add them here.
+ static const unsigned int kStartingOffset = kMaxSizeOfSimpleResult;
+
+ // Size in bytes to issue async flush for transfer buffer.
+ static const unsigned int kSizeToFlush = 256 * 1024;
+
+ // Alignment of allocations.
+ static const unsigned int kAlignment = 16;
+
+ // The bucket used for results. Public for testing only.
+ static const uint32_t kResultBucketId = 1;
+
+ ImplementationBase(CommandBufferHelper* helper,
+ TransferBufferInterface* transfer_buffer,
+ GpuControl* gpu_control);
+ ~ImplementationBase() override;
+
+ void FreeUnusedSharedMemory();
+ void FreeEverything();
+
+ // TODO(danakj): Move to ContextSupport once ContextProvider doesn't need to
+ // intercept it.
+ void SetLostContextCallback(base::OnceClosure callback);
+
+ // ContextSupport implementation.
+ void FlushPendingWork() override;
+ void SignalSyncToken(const gpu::SyncToken& sync_token,
+ base::OnceClosure callback) override;
+ bool IsSyncTokenSignaled(const gpu::SyncToken& sync_token) override;
+ void SignalQuery(uint32_t query, base::OnceClosure callback) override;
+ void GetGpuFence(uint32_t gpu_fence_id,
+ base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)>
+ callback) override;
+ void* MapTransferCacheEntry(size_t serialized_size) override;
+ void UnmapAndCreateTransferCacheEntry(uint32_t type, uint32_t id) override;
+ bool ThreadsafeLockTransferCacheEntry(uint32_t type, uint32_t id) override;
+ void UnlockTransferCacheEntries(
+ const std::vector<std::pair<uint32_t, uint32_t>>& entries) override;
+ void DeleteTransferCacheEntry(uint32_t type, uint32_t id) override;
+ unsigned int GetTransferBufferFreeSize() const override;
+
+ // base::trace_event::MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+
+ protected:
+ gpu::ContextResult Initialize(const SharedMemoryLimits& limits);
+
+ // Waits for all commands to execute.
+ void WaitForCmd();
+
+ // Gets the value of the result.
+ template <typename T>
+ T GetResultAs() {
+ return static_cast<T>(GetResultBuffer());
+ }
+
+ void* GetResultBuffer();
+ int32_t GetResultShmId();
+ uint32_t GetResultShmOffset();
+
+ // TODO(gman): These bucket functions really seem like they belong in
+ // CommandBufferHelper (or maybe BucketHelper?). Unfortunately they need
+ // a transfer buffer to function which is currently managed by this class.
+
+ // Gets the contents of a bucket.
+ bool GetBucketContents(uint32_t bucket_id, std::vector<int8_t>* data);
+
+ // Sets the contents of a bucket.
+ void SetBucketContents(uint32_t bucket_id, const void* data, size_t size);
+
+ // Sets the contents of a bucket as a string.
+ void SetBucketAsCString(uint32_t bucket_id, const char* str);
+
+ // Gets the contents of a bucket as a string. Returns false if there is no
+ // string available which is a separate case from the empty string.
+ bool GetBucketAsString(uint32_t bucket_id, std::string* str);
+
+ // Sets the contents of a bucket as a string.
+ void SetBucketAsString(uint32_t bucket_id, const std::string& str);
+
+ bool GetVerifiedSyncTokenForIPC(const SyncToken& sync_token,
+ SyncToken* verified_sync_token);
+
+ void RunIfContextNotLost(base::OnceClosure callback);
+
+ TransferBufferInterface* transfer_buffer_;
+
+ std::unique_ptr<MappedMemoryManager> mapped_memory_;
+
+ std::unique_ptr<gles2::QueryTracker> query_tracker_;
+
+ base::OnceClosure lost_context_callback_;
+ bool lost_context_callback_run_ = false;
+
+ GpuControl* const gpu_control_;
+
+ private:
+ virtual void IssueShallowFlush() = 0;
+
+ CommandBufferHelper* helper_;
+
+ ClientTransferCache transfer_cache_;
+
+ base::WeakPtrFactory<ImplementationBase> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImplementationBase);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_IMPLEMENTATION_BASE_H_
diff --git a/chromium/gpu/command_buffer/client/logging.cc b/chromium/gpu/command_buffer/client/logging.cc
new file mode 100644
index 00000000000..c92dbf6f733
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/logging.cc
@@ -0,0 +1,23 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/logging.h"
+
+#if defined(GPU_CLIENT_DEBUG)
+#include "base/command_line.h"
+#include "gpu/command_buffer/client/gpu_switches.h"
+#endif // defined(GPU_CLIENT_DEBUG)
+
+namespace gpu {
+
+LogSettings::LogSettings() : enabled_(false) {
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ enabled_ = base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUClientLogging);
+ });
+}
+
+LogSettings::~LogSettings() = default;
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/logging.h b/chromium/gpu/command_buffer/client/logging.h
new file mode 100644
index 00000000000..a56e75cc849
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/logging.h
@@ -0,0 +1,68 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_LOGGING_H_
+#define GPU_COMMAND_BUFFER_CLIENT_LOGGING_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/client/gles2_impl_export.h"
+
+// Macros to log information if DCHECK_IS_ON() and --enable-gpu-client-logging
+// flag is set. Code is optimized out if DCHECK is disabled. Requires that a
+// LogSettings named log_settings_ is in scope whenever a macro is used.
+//
+// Example usage:
+//
+// class Foo {
+// public:
+// Foo() {
+// GPU_CLIENT_LOG("[" << LogPrefix() << "] Hello world");
+// GPU_CLIENT_LOG_CODE_BLOCK({
+// for (int i = 0; i < 10; ++i) {
+// GPU_CLIENT_LOG_CODE_BLOCK("Hello again");
+// }
+// });
+// }
+//
+// std::string LogPrefix() { return "Foo"; }
+//
+// private:
+// LogSettings log_settings_;
+// };
+
+#if DCHECK_IS_ON() && !defined(__native_client__) && \
+ !defined(GLES2_CONFORMANCE_TESTS) && !defined(GLES2_INLINE_OPTIMIZATION)
+#define GPU_CLIENT_DEBUG
+#endif
+
+#if defined(GPU_CLIENT_DEBUG)
+#define GPU_CLIENT_LOG(args) DLOG_IF(INFO, log_settings_.enabled()) << args;
+#define GPU_CLIENT_LOG_CODE_BLOCK(code) code
+#define GPU_CLIENT_DCHECK_CODE_BLOCK(code) code
+#else // !defined(GPU_CLIENT_DEBUG)
+#define GPU_CLIENT_LOG(args)
+#define GPU_CLIENT_LOG_CODE_BLOCK(code)
+#define GPU_CLIENT_DCHECK_CODE_BLOCK(code)
+#endif // defined(GPU_CLIENT_DEBUG)
+
+namespace gpu {
+
+// Caches whether --enable-gpu-client-logging is set.
+class GLES2_IMPL_EXPORT LogSettings {
+ public:
+ LogSettings();
+ ~LogSettings();
+
+ bool enabled() { return enabled_; }
+
+ private:
+ bool enabled_;
+
+ DISALLOW_COPY_AND_ASSIGN(LogSettings);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_LOGGING_H_
diff --git a/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc b/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
new file mode 100644
index 00000000000..f371c257c6d
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/mock_transfer_buffer.cc
@@ -0,0 +1,202 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/mock_transfer_buffer.h"
+
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+MockTransferBuffer::MockTransferBuffer(CommandBuffer* command_buffer,
+ unsigned int size,
+ unsigned int result_size,
+ unsigned int alignment,
+ bool initialize_fail)
+ : command_buffer_(command_buffer),
+ size_(size),
+ result_size_(result_size),
+ alignment_(alignment),
+ actual_buffer_index_(0),
+ expected_buffer_index_(0),
+ last_alloc_(NULL),
+ expected_offset_(result_size),
+ actual_offset_(result_size),
+ initialize_fail_(initialize_fail) {
+ // We have to allocate the buffers here because
+ // we need to know their address before
+ // {Raster,GLES2}Implementation::Initialize is called.
+ for (int ii = 0; ii < kNumBuffers; ++ii) {
+ buffers_[ii] = command_buffer_->CreateTransferBuffer(
+ size_ + ii * alignment_, &buffer_ids_[ii]);
+ EXPECT_NE(-1, buffer_ids_[ii]);
+ }
+}
+
+MockTransferBuffer::~MockTransferBuffer() = default;
+
+base::SharedMemoryHandle MockTransferBuffer::shared_memory_handle() const {
+ return base::SharedMemoryHandle();
+}
+
+bool MockTransferBuffer::Initialize(unsigned int starting_buffer_size,
+ unsigned int result_size,
+ unsigned int /* min_buffer_size */,
+ unsigned int /* max_buffer_size */,
+ unsigned int alignment,
+ unsigned int /* size_to_flush */) {
+ // Just check they match.
+ return size_ == starting_buffer_size && result_size_ == result_size &&
+ alignment_ == alignment && !initialize_fail_;
+};
+
+int MockTransferBuffer::GetShmId() {
+ return buffer_ids_[actual_buffer_index_];
+}
+
+void* MockTransferBuffer::GetResultBuffer() {
+ return actual_buffer() + actual_buffer_index_ * alignment_;
+}
+
+int MockTransferBuffer::GetResultOffset() {
+ return actual_buffer_index_ * alignment_;
+}
+
+void MockTransferBuffer::Free() {
+ NOTREACHED();
+}
+
+bool MockTransferBuffer::HaveBuffer() const {
+ return true;
+}
+
+void* MockTransferBuffer::AllocUpTo(unsigned int size,
+ unsigned int* size_allocated) {
+ EXPECT_TRUE(size_allocated != nullptr);
+ EXPECT_TRUE(last_alloc_ == nullptr);
+
+ // Toggle which buffer we get each time to simulate the buffer being
+ // reallocated.
+ actual_buffer_index_ = (actual_buffer_index_ + 1) % kNumBuffers;
+
+ size = std::min(static_cast<size_t>(size), MaxTransferBufferSize());
+ if (actual_offset_ + size > size_) {
+ actual_offset_ = result_size_;
+ }
+ uint32_t offset = actual_offset_;
+ actual_offset_ += RoundToAlignment(size);
+ *size_allocated = size;
+
+ // Make sure each buffer has a different offset.
+ last_alloc_ = actual_buffer() + offset + actual_buffer_index_ * alignment_;
+ return last_alloc_;
+}
+
+void* MockTransferBuffer::Alloc(unsigned int size) {
+ EXPECT_LE(size, MaxTransferBufferSize());
+ unsigned int temp = 0;
+ void* p = AllocUpTo(size, &temp);
+ EXPECT_EQ(temp, size);
+ return p;
+}
+
+RingBuffer::Offset MockTransferBuffer::GetOffset(void* pointer) const {
+ // Make sure each buffer has a different offset.
+ return static_cast<uint8_t*>(pointer) - actual_buffer();
+}
+
+void MockTransferBuffer::DiscardBlock(void* p) {
+ EXPECT_EQ(last_alloc_, p);
+ last_alloc_ = nullptr;
+}
+
+void MockTransferBuffer::FreePendingToken(void* p, unsigned int /* token */) {
+ EXPECT_EQ(last_alloc_, p);
+ last_alloc_ = nullptr;
+}
+
+unsigned int MockTransferBuffer::GetSize() const {
+ return 0;
+}
+
+unsigned int MockTransferBuffer::GetFreeSize() const {
+ return 0;
+}
+
+unsigned int MockTransferBuffer::GetFragmentedFreeSize() const {
+ return 0;
+}
+
+void MockTransferBuffer::ShrinkLastBlock(unsigned int new_size) {}
+
+size_t MockTransferBuffer::MaxTransferBufferSize() {
+ return size_ - result_size_;
+}
+
+unsigned int MockTransferBuffer::RoundToAlignment(unsigned int size) {
+ return (size + alignment_ - 1) & ~(alignment_ - 1);
+}
+
+bool MockTransferBuffer::InSync() {
+ return expected_buffer_index_ == actual_buffer_index_ &&
+ expected_offset_ == actual_offset_;
+}
+
+MockTransferBuffer::ExpectedMemoryInfo MockTransferBuffer::GetExpectedMemory(
+ size_t size) {
+ ExpectedMemoryInfo mem;
+ mem.offset = AllocateExpectedTransferBuffer(size);
+ mem.id = GetExpectedTransferBufferId();
+ mem.ptr = static_cast<uint8_t*>(
+ GetExpectedTransferAddressFromOffset(mem.offset, size));
+ return mem;
+}
+
+MockTransferBuffer::ExpectedMemoryInfo
+MockTransferBuffer::GetExpectedResultMemory(size_t size) {
+ ExpectedMemoryInfo mem;
+ mem.offset = GetExpectedResultBufferOffset();
+ mem.id = GetExpectedResultBufferId();
+ mem.ptr = static_cast<uint8_t*>(
+ GetExpectedTransferAddressFromOffset(mem.offset, size));
+ return mem;
+}
+
+uint32_t MockTransferBuffer::AllocateExpectedTransferBuffer(size_t size) {
+ EXPECT_LE(size, MaxTransferBufferSize());
+
+ // Toggle which buffer we get each time to simulate the buffer being
+ // reallocated.
+ expected_buffer_index_ = (expected_buffer_index_ + 1) % kNumBuffers;
+
+ if (expected_offset_ + size > size_) {
+ expected_offset_ = result_size_;
+ }
+ uint32_t offset = expected_offset_;
+ expected_offset_ += RoundToAlignment(size);
+
+ // Make sure each buffer has a different offset.
+ return offset + expected_buffer_index_ * alignment_;
+}
+
+void* MockTransferBuffer::GetExpectedTransferAddressFromOffset(uint32_t offset,
+ size_t size) {
+ EXPECT_GE(offset, expected_buffer_index_ * alignment_);
+ EXPECT_LE(offset + size, size_ + expected_buffer_index_ * alignment_);
+ return expected_buffer() + offset;
+}
+
+int MockTransferBuffer::GetExpectedResultBufferId() {
+ return buffer_ids_[expected_buffer_index_];
+}
+
+uint32_t MockTransferBuffer::GetExpectedResultBufferOffset() {
+ return expected_buffer_index_ * alignment_;
+}
+
+int MockTransferBuffer::GetExpectedTransferBufferId() {
+ return buffer_ids_[expected_buffer_index_];
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/mock_transfer_buffer.h b/chromium/gpu/command_buffer/client/mock_transfer_buffer.h
new file mode 100644
index 00000000000..a07e00f2398
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/mock_transfer_buffer.h
@@ -0,0 +1,96 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_MOCK_TRANSFER_BUFFER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_MOCK_TRANSFER_BUFFER_H_
+
+#include "base/macros.h"
+#include "base/memory/shared_memory_handle.h"
+#include "gpu/command_buffer/client/ring_buffer.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+
+namespace gpu {
+
+class CommandBuffer;
+
+class MockTransferBuffer : public TransferBufferInterface {
+ public:
+ struct ExpectedMemoryInfo {
+ uint32_t offset;
+ int32_t id;
+ uint8_t* ptr;
+ };
+
+ MockTransferBuffer(CommandBuffer* command_buffer,
+ unsigned int size,
+ unsigned int result_size,
+ unsigned int alignment,
+ bool initialize_fail);
+
+ ~MockTransferBuffer() override;
+
+ base::SharedMemoryHandle shared_memory_handle() const override;
+ bool Initialize(unsigned int starting_buffer_size,
+ unsigned int result_size,
+ unsigned int /* min_buffer_size */,
+ unsigned int /* max_buffer_size */,
+ unsigned int alignment,
+ unsigned int size_to_flush) override;
+ int GetShmId() override;
+ void* GetResultBuffer() override;
+ int GetResultOffset() override;
+ void Free() override;
+ bool HaveBuffer() const override;
+ void* AllocUpTo(unsigned int size, unsigned int* size_allocated) override;
+ void* Alloc(unsigned int size) override;
+ RingBuffer::Offset GetOffset(void* pointer) const override;
+ void DiscardBlock(void* p) override;
+ void FreePendingToken(void* p, unsigned int /* token */) override;
+ unsigned int GetSize() const override;
+ unsigned int GetFreeSize() const override;
+ unsigned int GetFragmentedFreeSize() const override;
+ void ShrinkLastBlock(unsigned int new_size) override;
+
+ size_t MaxTransferBufferSize();
+ unsigned int RoundToAlignment(unsigned int size);
+ bool InSync();
+ ExpectedMemoryInfo GetExpectedMemory(size_t size);
+ ExpectedMemoryInfo GetExpectedResultMemory(size_t size);
+
+ private:
+ static const int kNumBuffers = 2;
+
+ uint8_t* actual_buffer() const {
+ return static_cast<uint8_t*>(buffers_[actual_buffer_index_]->memory());
+ }
+
+ uint8_t* expected_buffer() const {
+ return static_cast<uint8_t*>(buffers_[expected_buffer_index_]->memory());
+ }
+
+ uint32_t AllocateExpectedTransferBuffer(size_t size);
+ void* GetExpectedTransferAddressFromOffset(uint32_t offset, size_t size);
+ int GetExpectedResultBufferId();
+ uint32_t GetExpectedResultBufferOffset();
+ int GetExpectedTransferBufferId();
+
+ CommandBuffer* command_buffer_;
+ size_t size_;
+ size_t result_size_;
+ uint32_t alignment_;
+ int buffer_ids_[kNumBuffers];
+ scoped_refptr<Buffer> buffers_[kNumBuffers];
+ int actual_buffer_index_;
+ int expected_buffer_index_;
+ void* last_alloc_;
+ uint32_t expected_offset_;
+ uint32_t actual_offset_;
+ bool initialize_fail_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockTransferBuffer);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_MOCK_TRANSFER_BUFFER_H_
diff --git a/chromium/gpu/command_buffer/client/query_tracker.cc b/chromium/gpu/command_buffer/client/query_tracker.cc
index 60e1a76e1f7..5a6c8e590da 100644
--- a/chromium/gpu/command_buffer/client/query_tracker.cc
+++ b/chromium/gpu/command_buffer/client/query_tracker.cc
@@ -150,7 +150,7 @@ QueryTracker::Query::Query(GLuint id,
client_begin_time_us_(0),
result_(0) {}
-void QueryTracker::Query::Begin(GLES2Implementation* gl) {
+void QueryTracker::Query::Begin(QueryTrackerClient* client) {
// init memory, inc count
MarkAsActive();
@@ -161,24 +161,24 @@ void QueryTracker::Query::Begin(GLES2Implementation* gl) {
case GL_LATENCY_QUERY_CHROMIUM:
client_begin_time_us_ = MicrosecondsSinceOriginOfTime();
// tell service about id, shared memory and count
- gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset());
+ client->IssueBeginQuery(target(), id(), shm_id(), shm_offset());
break;
case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
default:
// tell service about id, shared memory and count
- gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset());
+ client->IssueBeginQuery(target(), id(), shm_id(), shm_offset());
break;
}
}
-void QueryTracker::Query::End(GLES2Implementation* gl) {
+void QueryTracker::Query::End(QueryTrackerClient* client) {
switch (target()) {
case GL_GET_ERROR_QUERY_CHROMIUM: {
- GLenum error = gl->GetClientSideGLError();
+ GLenum error = client->GetClientSideGLError();
if (error == GL_NO_ERROR) {
// There was no error so start the query on the service.
// it will end immediately.
- gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset());
+ client->IssueBeginQuery(target(), id(), shm_id(), shm_offset());
} else {
// There's an error on the client, no need to bother the service. Just
// set the query as completed and return the error.
@@ -190,19 +190,19 @@ void QueryTracker::Query::End(GLES2Implementation* gl) {
}
}
}
- flush_count_ = gl->helper()->flush_generation();
+ flush_count_ = client->cmd_buffer_helper()->flush_generation();
int32_t submit_count = NextSubmitCount();
- gl->helper()->EndQueryEXT(target(), submit_count);
- MarkAsPending(gl->helper()->InsertToken(), submit_count);
+ client->IssueEndQuery(target(), submit_count);
+ MarkAsPending(client->cmd_buffer_helper()->InsertToken(), submit_count);
}
-void QueryTracker::Query::QueryCounter(GLES2Implementation* gl) {
+void QueryTracker::Query::QueryCounter(QueryTrackerClient* client) {
MarkAsActive();
- flush_count_ = gl->helper()->flush_generation();
+ flush_count_ = client->cmd_buffer_helper()->flush_generation();
int32_t submit_count = NextSubmitCount();
- gl->helper()->QueryCounterEXT(id(), target(), shm_id(), shm_offset(),
- submit_count);
- MarkAsPending(gl->helper()->InsertToken(), submit_count);
+ client->IssueQueryCounter(id(), target(), shm_id(), shm_offset(),
+ submit_count);
+ MarkAsPending(client->cmd_buffer_helper()->InsertToken(), submit_count);
}
bool QueryTracker::Query::CheckResultsAvailable(
@@ -211,12 +211,12 @@ bool QueryTracker::Query::CheckResultsAvailable(
bool processed_all = base::subtle::Acquire_Load(
&info_.sync->process_count) == submit_count();
// We check lost on the command buffer itself here instead of checking the
- // GLES2Implementation because the GLES2Implementation will not hear about
+ // QueryTrackerClient because the QueryTrackerClient will not hear about
// the loss until we exit out of this call stack (to avoid re-entrancy), and
// we need be able to enter kComplete state on context loss.
- // TODO(danakj): If GLES2Implementation can handle being notified of loss
+ // TODO(danakj): If QueryTrackerClient can handle being notified of loss
// re-entrantly (without calling its clients re-entrantly), then we could
- // call GLES2Implementation::GetGraphicsResetStatusKHR() here and remove
+ // call QueryTrackerClient::GetGraphicsResetStatusKHR() here and remove
// this method from CommandBufferHelper.
if (processed_all || helper->IsContextLost()) {
switch (target()) {
@@ -314,65 +314,63 @@ void QueryTracker::Shrink(CommandBufferHelper* helper) {
query_sync_manager_.Shrink(helper);
}
-bool QueryTracker::BeginQuery(GLuint id, GLenum target,
- GLES2Implementation* gl) {
+bool QueryTracker::BeginQuery(GLuint id,
+ GLenum target,
+ QueryTrackerClient* client) {
QueryTracker::Query* query = GetQuery(id);
if (!query) {
query = CreateQuery(id, target);
if (!query) {
- gl->SetGLError(GL_OUT_OF_MEMORY,
- "glBeginQueryEXT",
- "transfer buffer allocation failed");
+ client->SetGLError(GL_OUT_OF_MEMORY, "glBeginQueryEXT",
+ "transfer buffer allocation failed");
return false;
}
} else if (query->target() != target) {
- gl->SetGLError(GL_INVALID_OPERATION,
- "glBeginQueryEXT",
- "target does not match");
+ client->SetGLError(GL_INVALID_OPERATION, "glBeginQueryEXT",
+ "target does not match");
return false;
}
current_queries_[query->target()] = query;
- query->Begin(gl);
+ query->Begin(client);
return true;
}
-bool QueryTracker::EndQuery(GLenum target, GLES2Implementation* gl) {
+bool QueryTracker::EndQuery(GLenum target, QueryTrackerClient* client) {
QueryTargetMap::iterator target_it = current_queries_.find(target);
if (target_it == current_queries_.end()) {
- gl->SetGLError(GL_INVALID_OPERATION,
- "glEndQueryEXT", "no active query");
+ client->SetGLError(GL_INVALID_OPERATION, "glEndQueryEXT",
+ "no active query");
return false;
}
- target_it->second->End(gl);
+ target_it->second->End(client);
current_queries_.erase(target_it);
return true;
}
-bool QueryTracker::QueryCounter(GLuint id, GLenum target,
- GLES2Implementation* gl) {
+bool QueryTracker::QueryCounter(GLuint id,
+ GLenum target,
+ QueryTrackerClient* client) {
QueryTracker::Query* query = GetQuery(id);
if (!query) {
query = CreateQuery(id, target);
if (!query) {
- gl->SetGLError(GL_OUT_OF_MEMORY,
- "glQueryCounterEXT",
- "transfer buffer allocation failed");
+ client->SetGLError(GL_OUT_OF_MEMORY, "glQueryCounterEXT",
+ "transfer buffer allocation failed");
return false;
}
} else if (query->target() != target) {
- gl->SetGLError(GL_INVALID_OPERATION,
- "glQueryCounterEXT",
- "target does not match");
+ client->SetGLError(GL_INVALID_OPERATION, "glQueryCounterEXT",
+ "target does not match");
return false;
}
- query->QueryCounter(gl);
+ query->QueryCounter(client);
return true;
}
-bool QueryTracker::SetDisjointSync(GLES2Implementation* gl) {
+bool QueryTracker::SetDisjointSync(QueryTrackerClient* client) {
if (!disjoint_count_sync_) {
// Allocate memory for disjoint value sync.
int32_t shm_id = -1;
@@ -385,7 +383,7 @@ bool QueryTracker::SetDisjointSync(GLES2Implementation* gl) {
disjoint_count_sync_shm_offset_ = shm_offset;
disjoint_count_sync_ = static_cast<DisjointValueSync*>(mem);
disjoint_count_sync_->Reset();
- gl->helper()->SetDisjointValueSyncCHROMIUM(shm_id, shm_offset);
+ client->IssueSetDisjointValueSync(shm_id, shm_offset);
}
}
return disjoint_count_sync_ != nullptr;
diff --git a/chromium/gpu/command_buffer/client/query_tracker.h b/chromium/gpu/command_buffer/client/query_tracker.h
index cbb6a28f20c..dd0ba6f42f0 100644
--- a/chromium/gpu/command_buffer/client/query_tracker.h
+++ b/chromium/gpu/command_buffer/client/query_tracker.h
@@ -30,8 +30,6 @@ class MappedMemoryManager;
namespace gles2 {
-class GLES2Implementation;
-
// Manages buckets of QuerySync instances in mapped memory.
class GLES2_IMPL_EXPORT QuerySyncManager {
public:
@@ -83,6 +81,33 @@ class GLES2_IMPL_EXPORT QuerySyncManager {
DISALLOW_COPY_AND_ASSIGN(QuerySyncManager);
};
+class GLES2_IMPL_EXPORT QueryTrackerClient {
+ public:
+ // Issue commands directly to the command buffer.
+ virtual void IssueBeginQuery(GLenum target,
+ GLuint id,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) = 0;
+ virtual void IssueEndQuery(GLenum target, GLuint submit_count) = 0;
+ virtual void IssueQueryCounter(GLuint id,
+ GLenum target,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset,
+ GLuint submit_count) = 0;
+ virtual void IssueSetDisjointValueSync(uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) = 0;
+
+ // Check for client side errors.
+ virtual GLenum GetClientSideGLError() = 0;
+
+ // Set client side error.
+ virtual void SetGLError(GLenum error,
+ const char* function_name,
+ const char* msg) = 0;
+
+ virtual CommandBufferHelper* cmd_buffer_helper() = 0;
+};
+
// Tracks queries for client side of command buffer.
class GLES2_IMPL_EXPORT QueryTracker {
public:
@@ -152,9 +177,9 @@ class GLES2_IMPL_EXPORT QueryTracker {
friend class QueryTracker;
friend class QueryTrackerTest;
- void Begin(GLES2Implementation* gl);
- void End(GLES2Implementation* gl);
- void QueryCounter(GLES2Implementation* gl);
+ void Begin(QueryTrackerClient* client);
+ void End(QueryTrackerClient* client);
+ void QueryCounter(QueryTrackerClient* client);
GLuint id_;
GLenum target_;
@@ -175,10 +200,10 @@ class GLES2_IMPL_EXPORT QueryTracker {
void RemoveQuery(GLuint id);
void Shrink(CommandBufferHelper* helper);
- bool BeginQuery(GLuint id, GLenum target, GLES2Implementation* gl);
- bool EndQuery(GLenum target, GLES2Implementation* gl);
- bool QueryCounter(GLuint id, GLenum target, GLES2Implementation* gl);
- bool SetDisjointSync(GLES2Implementation* gl);
+ bool BeginQuery(GLuint id, GLenum target, QueryTrackerClient* client);
+ bool EndQuery(GLenum target, QueryTrackerClient* client);
+ bool QueryCounter(GLuint id, GLenum target, QueryTrackerClient* client);
+ bool SetDisjointSync(QueryTrackerClient* client);
bool CheckAndResetDisjoint();
int32_t DisjointCountSyncShmID() const {
diff --git a/chromium/gpu/command_buffer/client/raster_cmd_helper.cc b/chromium/gpu/command_buffer/client/raster_cmd_helper.cc
new file mode 100644
index 00000000000..191dfa3b36a
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_cmd_helper.cc
@@ -0,0 +1,16 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/raster_cmd_helper.h"
+
+namespace gpu {
+namespace raster {
+
+RasterCmdHelper::RasterCmdHelper(CommandBuffer* command_buffer)
+ : CommandBufferHelper(command_buffer) {}
+
+RasterCmdHelper::~RasterCmdHelper() = default;
+
+} // namespace raster
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/raster_cmd_helper.h b/chromium/gpu/command_buffer/client/raster_cmd_helper.h
new file mode 100644
index 00000000000..cab4dddd514
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_cmd_helper.h
@@ -0,0 +1,36 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_CMD_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RASTER_CMD_HELPER_H_
+
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/common/raster_cmd_format.h"
+#include "gpu/raster_export.h"
+
+namespace gpu {
+namespace raster {
+
+// A class that helps write GL command buffers.
+class RASTER_EXPORT RasterCmdHelper : public CommandBufferHelper {
+ public:
+ explicit RasterCmdHelper(CommandBuffer* command_buffer);
+ ~RasterCmdHelper() override;
+
+// Include the auto-generated part of this class. We split this because it
+// means we can easily edit the non-auto generated parts right here in this
+// file instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/raster_cmd_helper_autogen.h"
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RasterCmdHelper);
+};
+
+} // namespace raster
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_RASTER_CMD_HELPER_H_
diff --git a/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
new file mode 100644
index 00000000000..e67d590c4dc
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_cmd_helper_autogen.h
@@ -0,0 +1,248 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_CMD_HELPER_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RASTER_CMD_HELPER_AUTOGEN_H_
+
+void BindTexture(GLenum target, GLuint texture) {
+ raster::cmds::BindTexture* c = GetCmdSpace<raster::cmds::BindTexture>();
+ if (c) {
+ c->Init(target, texture);
+ }
+}
+
+void DeleteTexturesImmediate(GLsizei n, const GLuint* textures) {
+ const uint32_t size = raster::cmds::DeleteTexturesImmediate::ComputeSize(n);
+ raster::cmds::DeleteTexturesImmediate* c =
+ GetImmediateCmdSpaceTotalSize<raster::cmds::DeleteTexturesImmediate>(
+ size);
+ if (c) {
+ c->Init(n, textures);
+ }
+}
+
+void Finish() {
+ raster::cmds::Finish* c = GetCmdSpace<raster::cmds::Finish>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void Flush() {
+ raster::cmds::Flush* c = GetCmdSpace<raster::cmds::Flush>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void GenTexturesImmediate(GLsizei n, GLuint* textures) {
+ const uint32_t size = raster::cmds::GenTexturesImmediate::ComputeSize(n);
+ raster::cmds::GenTexturesImmediate* c =
+ GetImmediateCmdSpaceTotalSize<raster::cmds::GenTexturesImmediate>(size);
+ if (c) {
+ c->Init(n, textures);
+ }
+}
+
+void GetError(uint32_t result_shm_id, uint32_t result_shm_offset) {
+ raster::cmds::GetError* c = GetCmdSpace<raster::cmds::GetError>();
+ if (c) {
+ c->Init(result_shm_id, result_shm_offset);
+ }
+}
+
+void GetIntegerv(GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ raster::cmds::GetIntegerv* c = GetCmdSpace<raster::cmds::GetIntegerv>();
+ if (c) {
+ c->Init(pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void TexParameteri(GLenum target, GLenum pname, GLint param) {
+ raster::cmds::TexParameteri* c = GetCmdSpace<raster::cmds::TexParameteri>();
+ if (c) {
+ c->Init(target, pname, param);
+ }
+}
+
+void GenQueriesEXTImmediate(GLsizei n, GLuint* queries) {
+ const uint32_t size = raster::cmds::GenQueriesEXTImmediate::ComputeSize(n);
+ raster::cmds::GenQueriesEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<raster::cmds::GenQueriesEXTImmediate>(size);
+ if (c) {
+ c->Init(n, queries);
+ }
+}
+
+void DeleteQueriesEXTImmediate(GLsizei n, const GLuint* queries) {
+ const uint32_t size = raster::cmds::DeleteQueriesEXTImmediate::ComputeSize(n);
+ raster::cmds::DeleteQueriesEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<raster::cmds::DeleteQueriesEXTImmediate>(
+ size);
+ if (c) {
+ c->Init(n, queries);
+ }
+}
+
+void BeginQueryEXT(GLenum target,
+ GLuint id,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ raster::cmds::BeginQueryEXT* c = GetCmdSpace<raster::cmds::BeginQueryEXT>();
+ if (c) {
+ c->Init(target, id, sync_data_shm_id, sync_data_shm_offset);
+ }
+}
+
+void EndQueryEXT(GLenum target, GLuint submit_count) {
+ raster::cmds::EndQueryEXT* c = GetCmdSpace<raster::cmds::EndQueryEXT>();
+ if (c) {
+ c->Init(target, submit_count);
+ }
+}
+
+void CompressedCopyTextureCHROMIUM(GLuint source_id, GLuint dest_id) {
+ raster::cmds::CompressedCopyTextureCHROMIUM* c =
+ GetCmdSpace<raster::cmds::CompressedCopyTextureCHROMIUM>();
+ if (c) {
+ c->Init(source_id, dest_id);
+ }
+}
+
+void LoseContextCHROMIUM(GLenum current, GLenum other) {
+ raster::cmds::LoseContextCHROMIUM* c =
+ GetCmdSpace<raster::cmds::LoseContextCHROMIUM>();
+ if (c) {
+ c->Init(current, other);
+ }
+}
+
+void InsertFenceSyncCHROMIUM(GLuint64 release_count) {
+ raster::cmds::InsertFenceSyncCHROMIUM* c =
+ GetCmdSpace<raster::cmds::InsertFenceSyncCHROMIUM>();
+ if (c) {
+ c->Init(release_count);
+ }
+}
+
+void WaitSyncTokenCHROMIUM(GLint namespace_id,
+ GLuint64 command_buffer_id,
+ GLuint64 release_count) {
+ raster::cmds::WaitSyncTokenCHROMIUM* c =
+ GetCmdSpace<raster::cmds::WaitSyncTokenCHROMIUM>();
+ if (c) {
+ c->Init(namespace_id, command_buffer_id, release_count);
+ }
+}
+
+void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ raster::cmds::UnpremultiplyAndDitherCopyCHROMIUM* c =
+ GetCmdSpace<raster::cmds::UnpremultiplyAndDitherCopyCHROMIUM>();
+ if (c) {
+ c->Init(source_id, dest_id, x, y, width, height);
+ }
+}
+
+void InitializeDiscardableTextureCHROMIUM(GLuint texture_id,
+ uint32_t shm_id,
+ uint32_t shm_offset) {
+ raster::cmds::InitializeDiscardableTextureCHROMIUM* c =
+ GetCmdSpace<raster::cmds::InitializeDiscardableTextureCHROMIUM>();
+ if (c) {
+ c->Init(texture_id, shm_id, shm_offset);
+ }
+}
+
+void UnlockDiscardableTextureCHROMIUM(GLuint texture_id) {
+ raster::cmds::UnlockDiscardableTextureCHROMIUM* c =
+ GetCmdSpace<raster::cmds::UnlockDiscardableTextureCHROMIUM>();
+ if (c) {
+ c->Init(texture_id);
+ }
+}
+
+void LockDiscardableTextureCHROMIUM(GLuint texture_id) {
+ raster::cmds::LockDiscardableTextureCHROMIUM* c =
+ GetCmdSpace<raster::cmds::LockDiscardableTextureCHROMIUM>();
+ if (c) {
+ c->Init(texture_id);
+ }
+}
+
+void BeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint color_type) {
+ raster::cmds::BeginRasterCHROMIUM* c =
+ GetCmdSpace<raster::cmds::BeginRasterCHROMIUM>();
+ if (c) {
+ c->Init(texture_id, sk_color, msaa_sample_count, can_use_lcd_text,
+ use_distance_field_text, color_type);
+ }
+}
+
+void RasterCHROMIUM(GLsizeiptr size,
+ uint32_t list_shm_id,
+ uint32_t list_shm_offset) {
+ raster::cmds::RasterCHROMIUM* c = GetCmdSpace<raster::cmds::RasterCHROMIUM>();
+ if (c) {
+ c->Init(size, list_shm_id, list_shm_offset);
+ }
+}
+
+void EndRasterCHROMIUM() {
+ raster::cmds::EndRasterCHROMIUM* c =
+ GetCmdSpace<raster::cmds::EndRasterCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void CreateTransferCacheEntryINTERNAL(GLuint entry_type,
+ GLuint entry_id,
+ GLuint handle_shm_id,
+ GLuint handle_shm_offset,
+ GLuint data_shm_id,
+ GLuint data_shm_offset,
+ GLuint data_size) {
+ raster::cmds::CreateTransferCacheEntryINTERNAL* c =
+ GetCmdSpace<raster::cmds::CreateTransferCacheEntryINTERNAL>();
+ if (c) {
+ c->Init(entry_type, entry_id, handle_shm_id, handle_shm_offset, data_shm_id,
+ data_shm_offset, data_size);
+ }
+}
+
+void DeleteTransferCacheEntryINTERNAL(GLuint entry_type, GLuint entry_id) {
+ raster::cmds::DeleteTransferCacheEntryINTERNAL* c =
+ GetCmdSpace<raster::cmds::DeleteTransferCacheEntryINTERNAL>();
+ if (c) {
+ c->Init(entry_type, entry_id);
+ }
+}
+
+void UnlockTransferCacheEntryINTERNAL(GLuint entry_type, GLuint entry_id) {
+ raster::cmds::UnlockTransferCacheEntryINTERNAL* c =
+ GetCmdSpace<raster::cmds::UnlockTransferCacheEntryINTERNAL>();
+ if (c) {
+ c->Init(entry_type, entry_id);
+ }
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_RASTER_CMD_HELPER_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc
new file mode 100644
index 00000000000..92be4cad2da
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_implementation.cc
@@ -0,0 +1,1076 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/raster_implementation.h"
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+#include <GLES3/gl3.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <algorithm>
+#include <sstream>
+#include <string>
+#include "base/atomic_sequence_num.h"
+#include "base/bits.h"
+#include "base/compiler_specific.h"
+#include "base/numerics/safe_math.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/client/query_tracker.h"
+#include "gpu/command_buffer/client/raster_cmd_helper.h"
+#include "gpu/command_buffer/client/shared_memory_limits.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/common/sync_token.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/geometry/rect_f.h"
+#include "ui/gfx/ipc/color/gfx_param_traits.h"
+
+#if defined(GPU_CLIENT_DEBUG)
+#define GPU_CLIENT_SINGLE_THREAD_CHECK() SingleThreadChecker checker(this);
+#else // !defined(GPU_CLIENT_DEBUG)
+#define GPU_CLIENT_SINGLE_THREAD_CHECK()
+#endif // defined(GPU_CLIENT_DEBUG)
+
+// TODO(backer): Update APIs to always write to the destination? See below.
+//
+// Check that destination pointers point to initialized memory.
+// When the context is lost, calling GL function has no effect so if destination
+// pointers point to initialized memory it can often lead to crash bugs. eg.
+//
+// If it was up to us we'd just always write to the destination but the OpenGL
+// spec defines the behavior of OpenGL functions, not us. :-(
+#if defined(GPU_DCHECK)
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) GPU_DCHECK(v)
+#define GPU_CLIENT_DCHECK(v) GPU_DCHECK(v)
+#elif defined(DCHECK)
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) DCHECK(v)
+#define GPU_CLIENT_DCHECK(v) DCHECK(v)
+#else
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) ASSERT(v)
+#define GPU_CLIENT_DCHECK(v) ASSERT(v)
+#endif
+
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(type, ptr) \
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT( \
+ ptr && \
+ (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
+
+#define GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(type, ptr) \
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT( \
+ !ptr || \
+ (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
+
+using gpu::gles2::GLES2Util;
+
+namespace gpu {
+namespace raster {
+
+RasterImplementation::SingleThreadChecker::SingleThreadChecker(
+ RasterImplementation* raster_implementation)
+ : raster_implementation_(raster_implementation) {
+ CHECK_EQ(0, raster_implementation_->use_count_);
+ ++raster_implementation_->use_count_;
+}
+
+RasterImplementation::SingleThreadChecker::~SingleThreadChecker() {
+ --raster_implementation_->use_count_;
+ CHECK_EQ(0, raster_implementation_->use_count_);
+}
+
+RasterImplementation::RasterImplementation(
+ RasterCmdHelper* helper,
+ TransferBufferInterface* transfer_buffer,
+ bool bind_generates_resource,
+ bool lose_context_when_out_of_memory,
+ GpuControl* gpu_control)
+ : ImplementationBase(helper, transfer_buffer, gpu_control),
+ helper_(helper),
+ active_texture_unit_(0),
+ error_bits_(0),
+ lose_context_when_out_of_memory_(lose_context_when_out_of_memory),
+ use_count_(0),
+ current_trace_stack_(0),
+ capabilities_(gpu_control->GetCapabilities()),
+ aggressively_free_resources_(false),
+ lost_(false) {
+ DCHECK(helper);
+ DCHECK(transfer_buffer);
+ DCHECK(gpu_control);
+
+ std::stringstream ss;
+ ss << std::hex << this;
+ this_in_hex_ = ss.str();
+}
+
+gpu::ContextResult RasterImplementation::Initialize(
+ const SharedMemoryLimits& limits) {
+ TRACE_EVENT0("gpu", "RasterImplementation::Initialize");
+
+ auto result = ImplementationBase::Initialize(limits);
+ if (result != gpu::ContextResult::kSuccess) {
+ return result;
+ }
+
+ texture_units_ = std::make_unique<TextureUnit[]>(
+ capabilities_.max_combined_texture_image_units);
+
+ return gpu::ContextResult::kSuccess;
+}
+
+RasterImplementation::~RasterImplementation() {
+ // Make sure the queries are finished otherwise we'll delete the
+ // shared memory (mapped_memory_) which will free the memory used
+ // by the queries. The GPU process when validating that memory is still
+ // shared will fail and abort (ie, it will stop running).
+ WaitForCmd();
+
+ query_tracker_.reset();
+
+ // Make sure the commands make it the service.
+ WaitForCmd();
+}
+
+RasterCmdHelper* RasterImplementation::helper() const {
+ return helper_;
+}
+
+IdAllocator* RasterImplementation::GetIdAllocator(IdNamespaces namespace_id) {
+ DCHECK_EQ(namespace_id, IdNamespaces::kQueries);
+ return &query_id_allocator_;
+}
+
+void RasterImplementation::OnGpuControlLostContext() {
+ OnGpuControlLostContextMaybeReentrant();
+
+ // This should never occur more than once.
+ DCHECK(!lost_context_callback_run_);
+ lost_context_callback_run_ = true;
+ if (!lost_context_callback_.is_null()) {
+ std::move(lost_context_callback_).Run();
+ }
+}
+
+void RasterImplementation::OnGpuControlLostContextMaybeReentrant() {
+ {
+ base::AutoLock hold(lost_lock_);
+ lost_ = true;
+ }
+}
+
+void RasterImplementation::OnGpuControlErrorMessage(const char* message,
+ int32_t id) {
+ if (!error_message_callback_.is_null())
+ error_message_callback_.Run(message, id);
+}
+
+void RasterImplementation::SetAggressivelyFreeResources(
+ bool aggressively_free_resources) {
+ TRACE_EVENT1("gpu", "RasterImplementation::SetAggressivelyFreeResources",
+ "aggressively_free_resources", aggressively_free_resources);
+ aggressively_free_resources_ = aggressively_free_resources;
+
+ if (aggressively_free_resources_ && helper_->HaveRingBuffer()) {
+ // Flush will delete transfer buffer resources if
+ // |aggressively_free_resources_| is true.
+ Flush();
+ } else {
+ ShallowFlushCHROMIUM();
+ }
+}
+
+void RasterImplementation::Swap() {
+ NOTREACHED();
+}
+
+void RasterImplementation::SwapWithBounds(const std::vector<gfx::Rect>&
+ /* rects */) {
+ NOTREACHED();
+}
+
+void RasterImplementation::PartialSwapBuffers(
+ const gfx::Rect& /* sub_buffer */) {
+ NOTREACHED();
+}
+
+void RasterImplementation::CommitOverlayPlanes() {
+ NOTREACHED();
+}
+
+void RasterImplementation::ScheduleOverlayPlane(
+ int /* plane_z_order */,
+ gfx::OverlayTransform /* plane_transform */,
+ unsigned /* overlay_texture_id */,
+ const gfx::Rect& /* display_bounds */,
+ const gfx::RectF& /* uv_rect */) {
+ NOTREACHED();
+}
+
+uint64_t RasterImplementation::ShareGroupTracingGUID() const {
+ NOTREACHED();
+ return 0;
+}
+
+void RasterImplementation::SetErrorMessageCallback(
+ base::RepeatingCallback<void(const char*, int32_t)> callback) {
+ error_message_callback_ = std::move(callback);
+}
+
+void RasterImplementation::SetSnapshotRequested() {
+ // Should only be called for real GL contexts.
+ NOTREACHED();
+}
+
+bool RasterImplementation::ThreadSafeShallowLockDiscardableTexture(
+ uint32_t texture_id) {
+ return discardable_texture_manager_.TextureIsValid(texture_id) &&
+ discardable_texture_manager_.LockTexture(texture_id);
+}
+
+void RasterImplementation::CompleteLockDiscardableTexureOnContextThread(
+ uint32_t texture_id) {
+ helper_->LockDiscardableTextureCHROMIUM(texture_id);
+}
+
+bool RasterImplementation::ThreadsafeDiscardableTextureIsDeletedForTracing(
+ uint32_t texture_id) {
+ return discardable_texture_manager_.TextureIsDeletedForTracing(texture_id);
+}
+
+const std::string& RasterImplementation::GetLogPrefix() const {
+ const std::string& prefix(debug_marker_manager_.GetMarker());
+ return prefix.empty() ? this_in_hex_ : prefix;
+}
+
+GLenum RasterImplementation::GetError() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetError()");
+ GLenum err = GetGLError();
+ GPU_CLIENT_LOG("returned " << GLES2Util::GetStringError(err));
+ return err;
+}
+
+void RasterImplementation::IssueBeginQuery(GLenum target,
+ GLuint id,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ helper_->BeginQueryEXT(target, id, sync_data_shm_id, sync_data_shm_offset);
+}
+
+void RasterImplementation::IssueEndQuery(GLenum target, GLuint submit_count) {
+ helper_->EndQueryEXT(target, submit_count);
+}
+
+void RasterImplementation::IssueQueryCounter(GLuint id,
+ GLenum target,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset,
+ GLuint submit_count) {
+ NOTIMPLEMENTED();
+}
+
+void RasterImplementation::IssueSetDisjointValueSync(
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ NOTIMPLEMENTED();
+}
+
+GLenum RasterImplementation::GetClientSideGLError() {
+ if (error_bits_ == 0) {
+ return GL_NO_ERROR;
+ }
+
+ GLenum error = GL_NO_ERROR;
+ for (uint32_t mask = 1; mask != 0; mask = mask << 1) {
+ if ((error_bits_ & mask) != 0) {
+ error = GLES2Util::GLErrorBitToGLError(mask);
+ break;
+ }
+ }
+ error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
+ return error;
+}
+
+CommandBufferHelper* RasterImplementation::cmd_buffer_helper() {
+ return helper_;
+}
+
+void RasterImplementation::IssueCreateTransferCacheEntry(
+ GLuint entry_type,
+ GLuint entry_id,
+ GLuint handle_shm_id,
+ GLuint handle_shm_offset,
+ GLuint data_shm_id,
+ GLuint data_shm_offset,
+ GLuint data_size) {
+ helper_->CreateTransferCacheEntryINTERNAL(entry_type, entry_id, handle_shm_id,
+ handle_shm_offset, data_shm_id,
+ data_shm_offset, data_size);
+}
+
+void RasterImplementation::IssueDeleteTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) {
+ helper_->DeleteTransferCacheEntryINTERNAL(entry_type, entry_id);
+}
+
+void RasterImplementation::IssueUnlockTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) {
+ helper_->UnlockTransferCacheEntryINTERNAL(entry_type, entry_id);
+}
+
+CommandBuffer* RasterImplementation::command_buffer() const {
+ return helper_->command_buffer();
+}
+
+GLenum RasterImplementation::GetGLError() {
+ TRACE_EVENT0("gpu", "RasterImplementation::GetGLError");
+ // Check the GL error first, then our wrapped error.
+ typedef cmds::GetError::Result Result;
+ Result* result = GetResultAs<Result*>();
+ // If we couldn't allocate a result the context is lost.
+ if (!result) {
+ return GL_NO_ERROR;
+ }
+ *result = GL_NO_ERROR;
+ helper_->GetError(GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLenum error = *result;
+ if (error == GL_NO_ERROR) {
+ error = GetClientSideGLError();
+ } else {
+ // There was an error, clear the corresponding wrapped error.
+ error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
+ }
+ return error;
+}
+
+#if defined(RASTER_CLIENT_FAIL_GL_ERRORS)
+void RasterImplementation::FailGLError(GLenum error) {
+ if (error != GL_NO_ERROR) {
+ NOTREACHED() << "Error";
+ }
+}
+// NOTE: Calling GetGLError overwrites data in the result buffer.
+void RasterImplementation::CheckGLError() {
+ FailGLError(GetGLError());
+}
+#endif // defined(RASTER_CLIENT_FAIL_GL_ERRORS)
+
+void RasterImplementation::SetGLError(GLenum error,
+ const char* function_name,
+ const char* msg) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] Client Synthesized Error: "
+ << GLES2Util::GetStringError(error) << ": "
+ << function_name << ": " << msg);
+ FailGLError(error);
+ if (msg) {
+ last_error_ = msg;
+ }
+ if (!error_message_callback_.is_null()) {
+ std::string temp(GLES2Util::GetStringError(error) + " : " + function_name +
+ ": " + (msg ? msg : ""));
+ error_message_callback_.Run(temp.c_str(), 0);
+ }
+ error_bits_ |= GLES2Util::GLErrorToErrorBit(error);
+
+ if (error == GL_OUT_OF_MEMORY && lose_context_when_out_of_memory_) {
+ helper_->LoseContextCHROMIUM(GL_GUILTY_CONTEXT_RESET_ARB,
+ GL_UNKNOWN_CONTEXT_RESET_ARB);
+ }
+}
+
+void RasterImplementation::SetGLErrorInvalidEnum(const char* function_name,
+ GLenum value,
+ const char* label) {
+ SetGLError(
+ GL_INVALID_ENUM, function_name,
+ (std::string(label) + " was " + GLES2Util::GetStringEnum(value)).c_str());
+}
+
+bool RasterImplementation::GetIntegervHelper(GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_ACTIVE_TEXTURE:
+ *params = active_texture_unit_ + GL_TEXTURE0;
+ return true;
+ case GL_MAX_TEXTURE_SIZE:
+ *params = capabilities_.max_texture_size;
+ return true;
+ case GL_TEXTURE_BINDING_2D:
+ *params = texture_units_[active_texture_unit_].bound_texture_2d;
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool RasterImplementation::GetQueryObjectValueHelper(const char* function_name,
+ GLuint id,
+ GLenum pname,
+ GLuint64* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] GetQueryObjectValueHelper(" << id
+ << ", " << GLES2Util::GetStringQueryObjectParameter(pname)
+ << ", " << static_cast<const void*>(params) << ")");
+
+ gles2::QueryTracker::Query* query = query_tracker_->GetQuery(id);
+ if (!query) {
+ SetGLError(GL_INVALID_OPERATION, function_name, "unknown query id");
+ return false;
+ }
+
+ if (query->Active()) {
+ SetGLError(GL_INVALID_OPERATION, function_name,
+ "query active. Did you call glEndQueryEXT?");
+ return false;
+ }
+
+ if (query->NeverUsed()) {
+ SetGLError(GL_INVALID_OPERATION, function_name,
+ "Never used. Did you call glBeginQueryEXT?");
+ return false;
+ }
+
+ bool valid_value = false;
+ switch (pname) {
+ case GL_QUERY_RESULT_EXT:
+ if (!query->CheckResultsAvailable(helper_)) {
+ helper_->WaitForToken(query->token());
+ if (!query->CheckResultsAvailable(helper_)) {
+ FinishHelper();
+ CHECK(query->CheckResultsAvailable(helper_));
+ }
+ }
+ *params = query->GetResult();
+ valid_value = true;
+ break;
+ case GL_QUERY_RESULT_AVAILABLE_EXT:
+ *params = query->CheckResultsAvailable(helper_);
+ valid_value = true;
+ break;
+ default:
+ SetGLErrorInvalidEnum(function_name, pname, "pname");
+ break;
+ }
+ GPU_CLIENT_LOG(" " << *params);
+ CheckGLError();
+ return valid_value;
+}
+
+void RasterImplementation::Flush() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFlush()");
+ // Insert the cmd to call glFlush
+ helper_->Flush();
+ FlushHelper();
+}
+
+void RasterImplementation::IssueShallowFlush() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShallowFlushCHROMIUM()");
+ FlushHelper();
+}
+
+void RasterImplementation::ShallowFlushCHROMIUM() {
+ IssueShallowFlush();
+}
+
+void RasterImplementation::FlushHelper() {
+ // Flush our command buffer
+ // (tell the service to execute up to the flush cmd.)
+ helper_->CommandBufferHelper::Flush();
+
+ if (aggressively_free_resources_)
+ FreeEverything();
+}
+
+void RasterImplementation::OrderingBarrierCHROMIUM() {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glOrderingBarrierCHROMIUM");
+ // Flush command buffer at the GPU channel level. May be implemented as
+ // Flush().
+ helper_->CommandBufferHelper::OrderingBarrier();
+}
+
+void RasterImplementation::Finish() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ FinishHelper();
+}
+
+void RasterImplementation::FinishHelper() {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFinish()");
+ TRACE_EVENT0("gpu", "RasterImplementation::Finish");
+ // Insert the cmd to call glFinish
+ helper_->Finish();
+ // Finish our command buffer
+ // (tell the service to execute up to the Finish cmd and wait for it to
+ // execute.)
+ helper_->CommandBufferHelper::Finish();
+
+ if (aggressively_free_resources_)
+ FreeEverything();
+}
+
+void RasterImplementation::GenQueriesEXTHelper(GLsizei /* n */,
+ const GLuint* /* queries */) {}
+
+void RasterImplementation::DeleteTexturesHelper(GLsizei n,
+ const GLuint* textures) {
+ helper_->DeleteTexturesImmediate(n, textures);
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ texture_id_allocator_.FreeID(textures[ii]);
+ discardable_texture_manager_.FreeTexture(textures[ii]);
+ }
+ UnbindTexturesHelper(n, textures);
+}
+
+void RasterImplementation::UnbindTexturesHelper(GLsizei n,
+ const GLuint* textures) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ for (GLint tt = 0; tt < capabilities_.max_combined_texture_image_units;
+ ++tt) {
+ TextureUnit& unit = texture_units_[tt];
+ if (textures[ii] == unit.bound_texture_2d) {
+ unit.bound_texture_2d = 0;
+ }
+ }
+ }
+}
+
+GLenum RasterImplementation::GetGraphicsResetStatusKHR() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetGraphicsResetStatusKHR()");
+
+ base::AutoLock hold(lost_lock_);
+ if (lost_)
+ return GL_UNKNOWN_CONTEXT_RESET_KHR;
+ return GL_NO_ERROR;
+}
+
+void RasterImplementation::DeleteQueriesEXTHelper(GLsizei n,
+ const GLuint* queries) {
+ IdAllocator* id_allocator = GetIdAllocator(IdNamespaces::kQueries);
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ query_tracker_->RemoveQuery(queries[ii]);
+ id_allocator->FreeID(queries[ii]);
+ }
+
+ helper_->DeleteQueriesEXTImmediate(n, queries);
+}
+
+void RasterImplementation::BeginQueryEXT(GLenum target, GLuint id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] BeginQueryEXT("
+ << GLES2Util::GetStringQueryTarget(target) << ", " << id
+ << ")");
+
+ switch (target) {
+ case GL_COMMANDS_COMPLETED_CHROMIUM:
+ if (!capabilities_.sync_query) {
+ SetGLError(GL_INVALID_OPERATION, "glBeginQueryEXT",
+ "not enabled for commands completed queries");
+ return;
+ }
+ break;
+ default:
+ SetGLError(GL_INVALID_ENUM, "glBeginQueryEXT", "unknown query target");
+ return;
+ }
+
+ // if any outstanding queries INV_OP
+ if (query_tracker_->GetCurrentQuery(target)) {
+ SetGLError(GL_INVALID_OPERATION, "glBeginQueryEXT",
+ "query already in progress");
+ return;
+ }
+
+ if (id == 0) {
+ SetGLError(GL_INVALID_OPERATION, "glBeginQueryEXT", "id is 0");
+ return;
+ }
+
+ if (!GetIdAllocator(IdNamespaces::kQueries)->InUse(id)) {
+ SetGLError(GL_INVALID_OPERATION, "glBeginQueryEXT", "invalid id");
+ return;
+ }
+
+ if (query_tracker_->BeginQuery(id, target, this))
+ CheckGLError();
+}
+
+void RasterImplementation::EndQueryEXT(GLenum target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] EndQueryEXT("
+ << GLES2Util::GetStringQueryTarget(target) << ")");
+ if (query_tracker_->EndQuery(target, this))
+ CheckGLError();
+}
+
+void RasterImplementation::GetQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) {
+ GLuint64 result = 0;
+ if (GetQueryObjectValueHelper("glGetQueryObjectuivEXT", id, pname, &result))
+ *params = base::saturated_cast<GLuint>(result);
+}
+
+void RasterImplementation::GenSyncTokenCHROMIUM(GLbyte* sync_token) {
+ if (!sync_token) {
+ SetGLError(GL_INVALID_VALUE, "glGenSyncTokenCHROMIUM", "empty sync_token");
+ return;
+ }
+
+ uint64_t fence_sync = gpu_control_->GenerateFenceSyncRelease();
+ helper_->InsertFenceSyncCHROMIUM(fence_sync);
+ helper_->CommandBufferHelper::OrderingBarrier();
+ gpu_control_->EnsureWorkVisible();
+
+ // Copy the data over after setting the data to ensure alignment.
+ SyncToken sync_token_data(gpu_control_->GetNamespaceID(),
+ gpu_control_->GetCommandBufferID(), fence_sync);
+ sync_token_data.SetVerifyFlush();
+ memcpy(sync_token, &sync_token_data, sizeof(sync_token_data));
+}
+
+void RasterImplementation::GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) {
+ if (!sync_token) {
+ SetGLError(GL_INVALID_VALUE, "glGenUnverifiedSyncTokenCHROMIUM",
+ "empty sync_token");
+ return;
+ }
+
+ uint64_t fence_sync = gpu_control_->GenerateFenceSyncRelease();
+ helper_->InsertFenceSyncCHROMIUM(fence_sync);
+ helper_->CommandBufferHelper::OrderingBarrier();
+
+ // Copy the data over after setting the data to ensure alignment.
+ SyncToken sync_token_data(gpu_control_->GetNamespaceID(),
+ gpu_control_->GetCommandBufferID(), fence_sync);
+ memcpy(sync_token, &sync_token_data, sizeof(sync_token_data));
+}
+
+void RasterImplementation::VerifySyncTokensCHROMIUM(GLbyte** sync_tokens,
+ GLsizei count) {
+ bool requires_synchronization = false;
+ for (GLsizei i = 0; i < count; ++i) {
+ if (sync_tokens[i]) {
+ SyncToken sync_token;
+ memcpy(&sync_token, sync_tokens[i], sizeof(sync_token));
+
+ if (sync_token.HasData() && !sync_token.verified_flush()) {
+ if (!GetVerifiedSyncTokenForIPC(sync_token, &sync_token)) {
+ SetGLError(GL_INVALID_VALUE, "glVerifySyncTokensCHROMIUM",
+ "Cannot verify sync token using this context.");
+ return;
+ }
+ requires_synchronization = true;
+ DCHECK(sync_token.verified_flush());
+ }
+
+ // Set verify bit on empty sync tokens too.
+ sync_token.SetVerifyFlush();
+
+ memcpy(sync_tokens[i], &sync_token, sizeof(sync_token));
+ }
+ }
+
+ // Ensure all the fence syncs are visible on GPU service.
+ if (requires_synchronization)
+ gpu_control_->EnsureWorkVisible();
+}
+
+void RasterImplementation::WaitSyncTokenCHROMIUM(
+ const GLbyte* sync_token_data) {
+ if (!sync_token_data)
+ return;
+
+ // Copy the data over before data access to ensure alignment.
+ SyncToken sync_token, verified_sync_token;
+ memcpy(&sync_token, sync_token_data, sizeof(SyncToken));
+
+ if (!sync_token.HasData())
+ return;
+
+ if (!GetVerifiedSyncTokenForIPC(sync_token, &verified_sync_token)) {
+ SetGLError(GL_INVALID_VALUE, "glWaitSyncTokenCHROMIUM",
+ "Cannot wait on sync_token which has not been verified");
+ return;
+ }
+
+ helper_->WaitSyncTokenCHROMIUM(
+ static_cast<GLint>(sync_token.namespace_id()),
+ sync_token.command_buffer_id().GetUnsafeValue(),
+ sync_token.release_count());
+
+ // Enqueue sync token in flush after inserting command so that it's not
+ // included in an automatic flush.
+ gpu_control_->WaitSyncTokenHint(verified_sync_token);
+}
+
+namespace {
+
+bool CreateImageValidInternalFormat(GLenum internalformat,
+ const Capabilities& capabilities) {
+ switch (internalformat) {
+ case GL_ATC_RGB_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
+ return capabilities.texture_format_atc;
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ return capabilities.texture_format_dxt1;
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ return capabilities.texture_format_dxt5;
+ case GL_ETC1_RGB8_OES:
+ return capabilities.texture_format_etc1;
+ case GL_R16_EXT:
+ return capabilities.texture_norm16;
+ case GL_RGB10_A2_EXT:
+ return capabilities.image_xr30;
+ case GL_RED:
+ case GL_RG_EXT:
+ case GL_RGB:
+ case GL_RGBA:
+ case GL_RGB_YCBCR_422_CHROMIUM:
+ case GL_RGB_YCBCR_420V_CHROMIUM:
+ case GL_RGB_YCRCB_420_CHROMIUM:
+ case GL_BGRA_EXT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+
+GLuint RasterImplementation::CreateImageCHROMIUMHelper(ClientBuffer buffer,
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat) {
+ if (width <= 0) {
+ SetGLError(GL_INVALID_VALUE, "glCreateImageCHROMIUM", "width <= 0");
+ return 0;
+ }
+
+ if (height <= 0) {
+ SetGLError(GL_INVALID_VALUE, "glCreateImageCHROMIUM", "height <= 0");
+ return 0;
+ }
+
+ if (!CreateImageValidInternalFormat(internalformat, capabilities_)) {
+ SetGLError(GL_INVALID_VALUE, "glCreateImageCHROMIUM", "invalid format");
+ return 0;
+ }
+
+ // CreateImage creates a fence sync so we must flush first to ensure all
+ // previously created fence syncs are flushed first.
+ FlushHelper();
+
+ int32_t image_id =
+ gpu_control_->CreateImage(buffer, width, height, internalformat);
+ if (image_id < 0) {
+ SetGLError(GL_OUT_OF_MEMORY, "glCreateImageCHROMIUM", "image_id < 0");
+ return 0;
+ }
+ return image_id;
+}
+
+GLuint RasterImplementation::CreateImageCHROMIUM(ClientBuffer buffer,
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCreateImageCHROMIUM(" << width
+ << ", " << height << ", "
+ << GLES2Util::GetStringImageInternalFormat(internalformat)
+ << ")");
+ GLuint image_id =
+ CreateImageCHROMIUMHelper(buffer, width, height, internalformat);
+ CheckGLError();
+ return image_id;
+}
+
+void RasterImplementation::DestroyImageCHROMIUMHelper(GLuint image_id) {
+ // Flush the command stream to make sure all pending commands
+ // that may refer to the image_id are executed on the service side.
+ helper_->CommandBufferHelper::Flush();
+ gpu_control_->DestroyImage(image_id);
+}
+
+void RasterImplementation::DestroyImageCHROMIUM(GLuint image_id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDestroyImageCHROMIUM("
+ << image_id << ")");
+ DestroyImageCHROMIUMHelper(image_id);
+ CheckGLError();
+}
+
+void RasterImplementation::InitializeDiscardableTextureCHROMIUM(
+ GLuint texture_id) {
+ if (discardable_texture_manager_.TextureIsValid(texture_id)) {
+ SetGLError(GL_INVALID_VALUE, "glInitializeDiscardableTextureCHROMIUM",
+ "Texture ID already initialized");
+ return;
+ }
+ ClientDiscardableHandle handle =
+ discardable_texture_manager_.InitializeTexture(helper_->command_buffer(),
+ texture_id);
+ if (!handle.IsValid())
+ return;
+
+ helper_->InitializeDiscardableTextureCHROMIUM(texture_id, handle.shm_id(),
+ handle.byte_offset());
+}
+
+void RasterImplementation::UnlockDiscardableTextureCHROMIUM(GLuint texture_id) {
+ if (!discardable_texture_manager_.TextureIsValid(texture_id)) {
+ SetGLError(GL_INVALID_VALUE, "glUnlockDiscardableTextureCHROMIUM",
+ "Texture ID not initialized");
+ return;
+ }
+
+ // |should_unbind_texture| will be set to true if the texture has been fully
+ // unlocked. In this case, ensure the texture is unbound.
+ bool should_unbind_texture = false;
+ discardable_texture_manager_.UnlockTexture(texture_id,
+ &should_unbind_texture);
+ if (should_unbind_texture)
+ UnbindTexturesHelper(1, &texture_id);
+
+ helper_->UnlockDiscardableTextureCHROMIUM(texture_id);
+}
+
+bool RasterImplementation::LockDiscardableTextureCHROMIUM(GLuint texture_id) {
+ if (!discardable_texture_manager_.TextureIsValid(texture_id)) {
+ SetGLError(GL_INVALID_VALUE, "glLockDiscardableTextureCHROMIUM",
+ "Texture ID not initialized");
+ return false;
+ }
+ if (!discardable_texture_manager_.LockTexture(texture_id)) {
+ // Failure to lock means that this texture has been deleted on the service
+ // side. Delete it here as well.
+ DeleteTexturesHelper(1, &texture_id);
+ return false;
+ }
+ helper_->LockDiscardableTextureCHROMIUM(texture_id);
+ return true;
+}
+
+void* RasterImplementation::MapRasterCHROMIUM(GLsizeiptr size) {
+ if (size < 0) {
+ SetGLError(GL_INVALID_VALUE, "glMapRasterCHROMIUM", "negative size");
+ return nullptr;
+ }
+ if (raster_mapped_buffer_) {
+ SetGLError(GL_INVALID_OPERATION, "glMapRasterCHROMIUM", "already mapped");
+ return nullptr;
+ }
+ raster_mapped_buffer_.emplace(size, helper_, transfer_buffer_);
+ if (!raster_mapped_buffer_->valid()) {
+ SetGLError(GL_INVALID_OPERATION, "glMapRasterCHROMIUM", "size too big");
+ raster_mapped_buffer_ = base::nullopt;
+ return nullptr;
+ }
+ return raster_mapped_buffer_->address();
+}
+
+void RasterImplementation::UnmapRasterCHROMIUM(GLsizeiptr written_size) {
+ if (written_size < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUnmapRasterCHROMIUM",
+ "negative written_size");
+ return;
+ }
+ if (!raster_mapped_buffer_) {
+ SetGLError(GL_INVALID_OPERATION, "glUnmapRasterCHROMIUM", "not mapped");
+ return;
+ }
+ DCHECK(raster_mapped_buffer_->valid());
+ if (written_size == 0) {
+ raster_mapped_buffer_->Discard();
+ raster_mapped_buffer_ = base::nullopt;
+ return;
+ }
+ raster_mapped_buffer_->Shrink(written_size);
+ helper_->RasterCHROMIUM(written_size, raster_mapped_buffer_->shm_id(),
+ raster_mapped_buffer_->offset());
+ raster_mapped_buffer_ = base::nullopt;
+ CheckGLError();
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/raster_implementation_impl_autogen.h"
+
+void RasterImplementation::GenTextures(GLsizei n, GLuint* textures) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenTextures(" << n << ", "
+ << static_cast<const void*>(textures) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenTextures", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ for (int ii = 0; ii < n; ++ii) {
+ textures[ii] = texture_id_allocator_.AllocateID();
+ }
+ // TODO(backer): Send some signal to service side.
+ // helper_->GenTexturesImmediate(n, textures);
+ // if (share_group_->bind_generates_resource())
+ // helper_->CommandBufferHelper::Flush();
+
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << textures[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void RasterImplementation::BindTexture(GLenum target, GLuint texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindTexture("
+ << GLES2Util::GetStringEnum(texture) << ")");
+ DCHECK_EQ(target, static_cast<GLenum>(GL_TEXTURE_2D));
+ if (target != GL_TEXTURE_2D) {
+ return;
+ }
+ TextureUnit& unit = texture_units_[active_texture_unit_];
+ unit.bound_texture_2d = texture;
+ // TODO(backer): Update bound texture on the server side.
+ // helper_->BindTexture(target, texture);
+ texture_id_allocator_.MarkAsUsed(texture);
+}
+
+void RasterImplementation::ActiveTexture(GLenum texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glActiveTexture("
+ << GLES2Util::GetStringEnum(texture) << ")");
+ GLuint texture_index = texture - GL_TEXTURE0;
+ if (texture_index >=
+ static_cast<GLuint>(capabilities_.max_combined_texture_image_units)) {
+ SetGLErrorInvalidEnum("glActiveTexture", texture, "texture");
+ return;
+ }
+
+ active_texture_unit_ = texture_index;
+ // TODO(backer): Update active texture on the server side.
+ // helper_->ActiveTexture(texture);
+ CheckGLError();
+}
+
+void RasterImplementation::GenerateMipmap(GLenum target) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::SetColorSpaceMetadataCHROMIUM(
+ GLuint texture_id,
+ GLColorSpace color_space) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::GenMailboxCHROMIUM(GLbyte* mailbox) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::ProduceTextureDirectCHROMIUM(GLuint texture,
+ const GLbyte* mailbox) {
+ NOTIMPLEMENTED();
+}
+GLuint RasterImplementation::CreateAndConsumeTextureCHROMIUM(
+ const GLbyte* mailbox) {
+ NOTIMPLEMENTED();
+ return 0;
+}
+void RasterImplementation::BindTexImage2DCHROMIUM(GLenum target,
+ GLint imageId) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::ReleaseTexImage2DCHROMIUM(GLenum target,
+ GLint imageId) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::TexStorageForRaster(GLenum target,
+ viz::ResourceFormat format,
+ GLsizei width,
+ GLsizei height,
+ RasterTexStorageFlags flags) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::CopySubTextureCHROMIUM(
+ GLuint source_id,
+ GLint source_level,
+ GLenum dest_target,
+ GLuint dest_id,
+ GLint dest_level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
+ GLboolean unpack_unmultiply_alpha) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::BeginRasterCHROMIUM(
+ GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config,
+ const cc::RasterColorSpace& raster_color_space) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::RasterCHROMIUM(const cc::DisplayItemList* list,
+ cc::ImageProvider* provider,
+ const gfx::Size& content_size,
+ const gfx::Rect& full_raster_rect,
+ const gfx::Rect& playback_rect,
+ const gfx::Vector2dF& post_translate,
+ GLfloat post_scale,
+ bool requires_clear) {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::BeginGpuRaster() {
+ NOTIMPLEMENTED();
+}
+void RasterImplementation::EndGpuRaster() {
+ NOTIMPLEMENTED();
+}
+
+} // namespace raster
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.h b/chromium/gpu/command_buffer/client/raster_implementation.h
new file mode 100644
index 00000000000..f01843fafd9
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_implementation.h
@@ -0,0 +1,336 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/optional.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "gpu/command_buffer/client/client_discardable_texture_manager.h"
+#include "gpu/command_buffer/client/context_support.h"
+#include "gpu/command_buffer/client/gpu_control_client.h"
+#include "gpu/command_buffer/client/implementation_base.h"
+#include "gpu/command_buffer/client/logging.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/client/raster_interface.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/command_buffer/common/context_result.h"
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+#include "gpu/raster_export.h"
+
+namespace gpu {
+
+class GpuControl;
+struct SharedMemoryLimits;
+
+namespace raster {
+
+class RasterCmdHelper;
+
+// This class emulates Raster over command buffers. It can be used by a client
+// program so that the program does not need deal with shared memory and command
+// buffer management.
+class RASTER_EXPORT RasterImplementation : public RasterInterface,
+ public ImplementationBase,
+ public gles2::QueryTrackerClient {
+ public:
+ RasterImplementation(RasterCmdHelper* helper,
+ TransferBufferInterface* transfer_buffer,
+ bool bind_generates_resource,
+ bool lose_context_when_out_of_memory,
+ GpuControl* gpu_control);
+
+ ~RasterImplementation() override;
+
+ gpu::ContextResult Initialize(const SharedMemoryLimits& limits);
+
+ // The RasterCmdHelper being used by this RasterImplementation. You can use
+ // this to issue cmds at a lower level for certain kinds of optimization.
+ RasterCmdHelper* helper() const;
+
+ // QueryTrackerClient implementation.
+ void IssueBeginQuery(GLenum target,
+ GLuint id,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) override;
+ void IssueEndQuery(GLenum target, GLuint submit_count) override;
+ void IssueQueryCounter(GLuint id,
+ GLenum target,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset,
+ GLuint submit_count) override;
+ void IssueSetDisjointValueSync(uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) override;
+ GLenum GetClientSideGLError() override;
+ CommandBufferHelper* cmd_buffer_helper() override;
+ void SetGLError(GLenum error,
+ const char* function_name,
+ const char* msg) override;
+
+ // ClientTransferCache::Client implementation.
+ void IssueCreateTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id,
+ GLuint handle_shm_id,
+ GLuint handle_shm_offset,
+ GLuint data_shm_id,
+ GLuint data_shm_offset,
+ GLuint data_size) override;
+ void IssueDeleteTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) override;
+ void IssueUnlockTransferCacheEntry(GLuint entry_type,
+ GLuint entry_id) override;
+ CommandBuffer* command_buffer() const override;
+
+// Include the auto-generated part of this class. We split this because
+// it means we can easily edit the non-auto generated parts right here in
+// this file instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/raster_implementation_autogen.h"
+
+ // RasterInterface implementation.
+ void GenTextures(GLsizei n, GLuint* textures) override;
+ void BindTexture(GLenum target, GLuint texture) override;
+ void ActiveTexture(GLenum texture) override;
+ void GenerateMipmap(GLenum target) override;
+ void SetColorSpaceMetadataCHROMIUM(GLuint texture_id,
+ GLColorSpace color_space) override;
+ void GenMailboxCHROMIUM(GLbyte* mailbox) override;
+ void ProduceTextureDirectCHROMIUM(GLuint texture,
+ const GLbyte* mailbox) override;
+ GLuint CreateAndConsumeTextureCHROMIUM(const GLbyte* mailbox) override;
+ void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) override;
+ void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) override;
+ void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) override;
+ void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) override;
+ void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) override;
+ void TexStorageForRaster(GLenum target,
+ viz::ResourceFormat format,
+ GLsizei width,
+ GLsizei height,
+ RasterTexStorageFlags flags) override;
+ void CopySubTextureCHROMIUM(GLuint source_id,
+ GLint source_level,
+ GLenum dest_target,
+ GLuint dest_id,
+ GLint dest_level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
+ GLboolean unpack_unmultiply_alpha) override;
+ void BeginRasterCHROMIUM(
+ GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config,
+ const cc::RasterColorSpace& raster_color_space) override;
+ void RasterCHROMIUM(const cc::DisplayItemList* list,
+ cc::ImageProvider* provider,
+ const gfx::Size& content_size,
+ const gfx::Rect& full_raster_rect,
+ const gfx::Rect& playback_rect,
+ const gfx::Vector2dF& post_translate,
+ GLfloat post_scale,
+ bool requires_clear) override;
+ void BeginGpuRaster() override;
+ void EndGpuRaster() override;
+
+ // ContextSupport implementation.
+ void SetAggressivelyFreeResources(bool aggressively_free_resources) override;
+ void Swap() override;
+ void SwapWithBounds(const std::vector<gfx::Rect>& rects) override;
+ void PartialSwapBuffers(const gfx::Rect& sub_buffer) override;
+ void CommitOverlayPlanes() override;
+ void ScheduleOverlayPlane(int plane_z_order,
+ gfx::OverlayTransform plane_transform,
+ unsigned overlay_texture_id,
+ const gfx::Rect& display_bounds,
+ const gfx::RectF& uv_rect) override;
+ uint64_t ShareGroupTracingGUID() const override;
+ void SetErrorMessageCallback(
+ base::RepeatingCallback<void(const char*, int32_t)> callback) override;
+ void SetSnapshotRequested() override;
+ bool ThreadSafeShallowLockDiscardableTexture(uint32_t texture_id) override;
+ void CompleteLockDiscardableTexureOnContextThread(
+ uint32_t texture_id) override;
+ bool ThreadsafeDiscardableTextureIsDeletedForTracing(
+ uint32_t texture_id) override;
+
+ bool GetQueryObjectValueHelper(const char* function_name,
+ GLuint id,
+ GLenum pname,
+ GLuint64* params);
+
+ private:
+ friend class RasterImplementationTest;
+
+ using IdNamespaces = gles2::id_namespaces::IdNamespaces;
+
+ struct TextureUnit {
+ TextureUnit() : bound_texture_2d(0) {}
+ // texture currently bound to this unit's GL_TEXTURE_2D with glBindTexture
+ GLuint bound_texture_2d;
+ };
+
+ // Checks for single threaded access.
+ class SingleThreadChecker {
+ public:
+ explicit SingleThreadChecker(RasterImplementation* raster_implementation);
+ ~SingleThreadChecker();
+
+ private:
+ RasterImplementation* raster_implementation_;
+ };
+
+ // ImplementationBase implementation.
+ void IssueShallowFlush() override;
+
+ // GpuControlClient implementation.
+ void OnGpuControlLostContext() final;
+ void OnGpuControlLostContextMaybeReentrant() final;
+ void OnGpuControlErrorMessage(const char* message, int32_t id) final;
+
+ // Gets the GLError through our wrapper.
+ GLenum GetGLError();
+
+ // Sets our wrapper for the GLError.
+ void SetGLErrorInvalidEnum(const char* function_name,
+ GLenum value,
+ const char* label);
+
+ // Returns the last error and clears it. Useful for debugging.
+ const std::string& GetLastError() { return last_error_; }
+
+ void GenQueriesEXTHelper(GLsizei n, const GLuint* queries);
+
+ void DeleteTexturesHelper(GLsizei n, const GLuint* textures);
+ void UnbindTexturesHelper(GLsizei n, const GLuint* textures);
+ void DeleteQueriesEXTHelper(GLsizei n, const GLuint* queries);
+
+ GLuint CreateImageCHROMIUMHelper(ClientBuffer buffer,
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat);
+ void DestroyImageCHROMIUMHelper(GLuint image_id);
+
+ // Helpers for query functions.
+ bool GetIntegervHelper(GLenum pname, GLint* params);
+ bool GetTexParameterivHelper(GLenum target, GLenum pname, GLint* params);
+
+ // IdAllocators for objects that can't be shared among contexts.
+ IdAllocator* GetIdAllocator(IdNamespaces id_namespace);
+
+ void FinishHelper();
+ void FlushHelper();
+
+ void RunIfContextNotLost(base::OnceClosure callback);
+
+ const std::string& GetLogPrefix() const;
+
+// Set to 1 to have the client fail when a GL error is generated.
+// This helps find bugs in the renderer since the debugger stops on the error.
+#if DCHECK_IS_ON()
+#if 0
+#define RASTER_CLIENT_FAIL_GL_ERRORS
+#endif
+#endif
+
+#if defined(RASTER_CLIENT_FAIL_GL_ERRORS)
+ void CheckGLError();
+ void FailGLError(GLenum error);
+#else
+ void CheckGLError() {}
+ void FailGLError(GLenum /* error */) {}
+#endif
+
+ void* MapRasterCHROMIUM(GLsizeiptr size);
+ void UnmapRasterCHROMIUM(GLsizeiptr written_size);
+
+ RasterCmdHelper* helper_;
+ std::string last_error_;
+ gles2::DebugMarkerManager debug_marker_manager_;
+ std::string this_in_hex_;
+
+ std::unique_ptr<TextureUnit[]> texture_units_;
+
+ // 0 to capabilities_.max_combined_texture_image_units.
+ GLuint active_texture_unit_;
+
+ // Current GL error bits.
+ uint32_t error_bits_;
+
+ LogSettings log_settings_;
+
+ // When true, the context is lost when a GL_OUT_OF_MEMORY error occurs.
+ const bool lose_context_when_out_of_memory_;
+
+ // Used to check for single threaded access.
+ int use_count_;
+
+ base::Optional<ScopedTransferBufferPtr> raster_mapped_buffer_;
+
+ base::RepeatingCallback<void(const char*, int32_t)> error_message_callback_;
+
+ int current_trace_stack_;
+
+ Capabilities capabilities_;
+
+ // Flag to indicate whether the implementation can retain resources, or
+ // whether it should aggressively free them.
+ bool aggressively_free_resources_;
+
+ IdAllocator texture_id_allocator_;
+ IdAllocator query_id_allocator_;
+
+ ClientDiscardableTextureManager discardable_texture_manager_;
+
+ mutable base::Lock lost_lock_;
+ bool lost_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterImplementation);
+};
+
+} // namespace raster
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_H_
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_autogen.h b/chromium/gpu/command_buffer/client/raster_implementation_autogen.h
new file mode 100644
index 00000000000..2c50af89bb5
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_implementation_autogen.h
@@ -0,0 +1,78 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by raster_implementation.h to declare the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_AUTOGEN_H_
+
+void DeleteTextures(GLsizei n, const GLuint* textures) override;
+
+void Finish() override;
+
+void Flush() override;
+
+GLenum GetError() override;
+
+void GetIntegerv(GLenum pname, GLint* params) override;
+
+void ShallowFlushCHROMIUM() override;
+
+void OrderingBarrierCHROMIUM() override;
+
+void TexParameteri(GLenum target, GLenum pname, GLint param) override;
+
+void GenQueriesEXT(GLsizei n, GLuint* queries) override;
+
+void DeleteQueriesEXT(GLsizei n, const GLuint* queries) override;
+
+void BeginQueryEXT(GLenum target, GLuint id) override;
+
+void EndQueryEXT(GLenum target) override;
+
+void GetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* params) override;
+
+GLuint CreateImageCHROMIUM(ClientBuffer buffer,
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat) override;
+
+void DestroyImageCHROMIUM(GLuint image_id) override;
+
+void CompressedCopyTextureCHROMIUM(GLuint source_id, GLuint dest_id) override;
+
+void LoseContextCHROMIUM(GLenum current, GLenum other) override;
+
+void GenSyncTokenCHROMIUM(GLbyte* sync_token) override;
+
+void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) override;
+
+void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) override;
+
+void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) override;
+
+void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) override;
+
+GLenum GetGraphicsResetStatusKHR() override;
+
+void InitializeDiscardableTextureCHROMIUM(GLuint texture_id) override;
+
+void UnlockDiscardableTextureCHROMIUM(GLuint texture_id) override;
+
+bool LockDiscardableTextureCHROMIUM(GLuint texture_id) override;
+
+void EndRasterCHROMIUM() override;
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
index 39572ee5bf4..e27042aa080 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
@@ -5,6 +5,7 @@
#include "gpu/command_buffer/client/raster_implementation_gles.h"
#include "base/logging.h"
+#include "cc/paint/color_space_transfer_cache_entry.h"
#include "cc/paint/decode_stashing_image_provider.h"
#include "cc/paint/display_item_list.h" // nogncheck
#include "cc/paint/paint_op_buffer_serializer.h"
@@ -13,7 +14,7 @@
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/context_support.h"
-#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/common/capabilities.h"
#include "ui/gfx/geometry/rect_conversions.h"
#include "ui/gfx/skia_util.h"
@@ -31,18 +32,28 @@ class TransferCacheSerializeHelperImpl
~TransferCacheSerializeHelperImpl() final = default;
private:
- bool LockEntryInternal(cc::TransferCacheEntryType type, uint32_t id) final {
- return support_->ThreadsafeLockTransferCacheEntry(type, id);
+ bool LockEntryInternal(const EntryKey& key) final {
+ return support_->ThreadsafeLockTransferCacheEntry(
+ static_cast<uint32_t>(key.first), key.second);
}
void CreateEntryInternal(const cc::ClientTransferCacheEntry& entry) final {
- support_->CreateTransferCacheEntry(entry);
+ size_t size = entry.SerializedSize();
+ void* data = support_->MapTransferCacheEntry(size);
+ // TODO(piman): handle error (failed to allocate/map shm)
+ DCHECK(data);
+ bool succeeded = entry.Serialize(
+ base::make_span(reinterpret_cast<uint8_t*>(data), size));
+ DCHECK(succeeded);
+ support_->UnmapAndCreateTransferCacheEntry(entry.UnsafeType(), entry.Id());
}
- void FlushEntriesInternal(
- const std::vector<std::pair<cc::TransferCacheEntryType, uint32_t>>&
- entries) final {
- support_->UnlockTransferCacheEntries(entries);
+ void FlushEntriesInternal(std::set<EntryKey> entries) final {
+ std::vector<std::pair<uint32_t, uint32_t>> transformed;
+ transformed.reserve(entries.size());
+ for (const auto& e : entries)
+ transformed.emplace_back(static_cast<uint32_t>(e.first), e.second);
+ support_->UnlockTransferCacheEntries(transformed);
}
ContextSupport* support_;
@@ -308,6 +319,17 @@ void RasterImplementationGLES::CompressedTexImage2D(GLenum target,
border, imageSize, data);
}
+void RasterImplementationGLES::UnpremultiplyAndDitherCopyCHROMIUM(
+ GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gl_->UnpremultiplyAndDitherCopyCHROMIUM(source_id, dest_id, x, y, width,
+ height);
+}
+
void RasterImplementationGLES::TexStorageForRaster(
GLenum target,
viz::ResourceFormat format,
@@ -376,19 +398,35 @@ void RasterImplementationGLES::BeginRasterCHROMIUM(
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config) {
+ GLint color_type,
+ const cc::RasterColorSpace& raster_color_space) {
+ TransferCacheSerializeHelperImpl transfer_cache_serialize_helper(support_);
+ if (!transfer_cache_serialize_helper.LockEntry(
+ cc::TransferCacheEntryType::kColorSpace,
+ raster_color_space.color_space_id)) {
+ transfer_cache_serialize_helper.CreateEntry(
+ cc::ClientColorSpaceTransferCacheEntry(raster_color_space));
+ }
+ transfer_cache_serialize_helper.AssertLocked(
+ cc::TransferCacheEntryType::kColorSpace,
+ raster_color_space.color_space_id);
+
gl_->BeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
can_use_lcd_text, use_distance_field_text,
- pixel_config);
+ color_type, raster_color_space.color_space_id);
+ transfer_cache_serialize_helper.FlushEntries();
+ background_color_ = sk_color;
};
void RasterImplementationGLES::RasterCHROMIUM(
const cc::DisplayItemList* list,
cc::ImageProvider* provider,
- const gfx::Vector2d& translate,
+ const gfx::Size& content_size,
+ const gfx::Rect& full_raster_rect,
const gfx::Rect& playback_rect,
const gfx::Vector2dF& post_translate,
- GLfloat post_scale) {
+ GLfloat post_scale,
+ bool requires_clear) {
if (std::abs(post_scale) < std::numeric_limits<float>::epsilon())
return;
@@ -406,17 +444,18 @@ void RasterImplementationGLES::RasterCHROMIUM(
// This section duplicates RasterSource::PlaybackToCanvas setup preamble.
cc::PaintOpBufferSerializer::Preamble preamble;
- preamble.translation = translate;
+ preamble.content_size = content_size;
+ preamble.full_raster_rect = full_raster_rect;
preamble.playback_rect = playback_rect;
preamble.post_translation = post_translate;
- preamble.post_scale = post_scale;
+ preamble.post_scale = gfx::SizeF(post_scale, post_scale);
+ preamble.requires_clear = requires_clear;
+ preamble.background_color = background_color_;
// Wrap the provided provider in a stashing provider so that we can delay
// unrefing images until we have serialized dependent commands.
- provider->BeginRaster();
cc::DecodeStashingImageProvider stashing_image_provider(provider);
- // TODO(enne): need to implement alpha folding optimization from POB.
// TODO(enne): don't access private members of DisplayItemList.
TransferCacheSerializeHelperImpl transfer_cache_serialize_helper(support_);
PaintOpSerializer op_serializer(free_size, gl_, &stashing_image_provider,
@@ -429,7 +468,6 @@ void RasterImplementationGLES::RasterCHROMIUM(
serializer.Serialize(&list->paint_op_buffer_, &offsets, preamble);
// TODO(piman): raise error if !serializer.valid()?
op_serializer.SendSerializedData();
- provider->EndRaster();
}
void RasterImplementationGLES::EndRasterCHROMIUM() {
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.h b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
index 3357df1fc74..841b6e25087 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.h
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.h
@@ -6,10 +6,11 @@
#define GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_GLES_H_
#include "base/macros.h"
-#include "gles2_impl_export.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/client/raster_interface.h"
#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/raster_export.h"
+#include "third_party/skia/include/core/SkColor.h"
namespace gpu {
@@ -20,7 +21,7 @@ namespace raster {
struct Capabilities;
// An implementation of RasterInterface on top of GLES2Interface.
-class GLES2_IMPL_EXPORT RasterImplementationGLES : public RasterInterface {
+class RASTER_EXPORT RasterImplementationGLES : public RasterInterface {
public:
RasterImplementationGLES(gles2::GLES2Interface* gl,
ContextSupport* support,
@@ -125,6 +126,12 @@ class GLES2_IMPL_EXPORT RasterImplementationGLES : public RasterInterface {
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) override;
void CompressedCopyTextureCHROMIUM(GLuint source_id, GLuint dest_id) override;
+ void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) override;
// Discardable textures.
void InitializeDiscardableTextureCHROMIUM(GLuint texture_id) override;
@@ -132,18 +139,22 @@ class GLES2_IMPL_EXPORT RasterImplementationGLES : public RasterInterface {
bool LockDiscardableTextureCHROMIUM(GLuint texture_id) override;
// OOP-Raster
- void BeginRasterCHROMIUM(GLuint texture_id,
- GLuint sk_color,
- GLuint msaa_sample_count,
- GLboolean can_use_lcd_text,
- GLboolean use_distance_field_text,
- GLint pixel_config) override;
+ void BeginRasterCHROMIUM(
+ GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint color_type,
+ const cc::RasterColorSpace& raster_color_space) override;
void RasterCHROMIUM(const cc::DisplayItemList* list,
cc::ImageProvider* provider,
- const gfx::Vector2d& translate,
+ const gfx::Size& content_size,
+ const gfx::Rect& full_raster_rect,
const gfx::Rect& playback_rect,
const gfx::Vector2dF& post_translate,
- GLfloat post_scale) override;
+ GLfloat post_scale,
+ bool requires_clear) override;
void EndRasterCHROMIUM() override;
// Raster via GrContext.
@@ -152,6 +163,7 @@ class GLES2_IMPL_EXPORT RasterImplementationGLES : public RasterInterface {
private:
gles2::GLES2Interface* gl_;
+ SkColor background_color_;
ContextSupport* support_;
bool use_texture_storage_;
bool use_texture_storage_image_;
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
index 28b5a6e7177..b2bff23187f 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
@@ -9,6 +9,7 @@
#include <GLES2/gl2extchromium.h>
#include <GLES3/gl3.h>
+#include "cc/paint/color_space_transfer_cache_entry.h"
#include "cc/paint/display_item_list.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/GLES2/gl2extchromium.h"
@@ -137,13 +138,14 @@ class RasterMockGLES2Interface : public gles2::GLES2InterfaceStub {
MOCK_METHOD1(LockDiscardableTextureCHROMIUM, bool(GLuint texture_id));
// OOP-Raster
- MOCK_METHOD6(BeginRasterCHROMIUM,
+ MOCK_METHOD7(BeginRasterCHROMIUM,
void(GLuint texture_id,
GLuint sk_color,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config));
+ GLint color_type,
+ GLuint color_space_id));
MOCK_METHOD2(RasterCHROMIUM, void(GLsizeiptr size, const void* list));
MOCK_METHOD1(MapRasterCHROMIUM, void*(GLsizeiptr size));
MOCK_METHOD1(UnmapRasterCHROMIUM, void(GLsizeiptr written));
@@ -194,18 +196,23 @@ class ContextSupportStub : public ContextSupport {
uint32_t texture_id) override {
return false;
}
- void CreateTransferCacheEntry(
- const cc::ClientTransferCacheEntry& entry) override {}
- bool ThreadsafeLockTransferCacheEntry(cc::TransferCacheEntryType type,
- uint32_t id) override {
+ void* MapTransferCacheEntry(size_t serialized_size) override {
+ mapped_transfer_cache_entry_.reset(new char[serialized_size]);
+ return mapped_transfer_cache_entry_.get();
+ }
+ void UnmapAndCreateTransferCacheEntry(uint32_t type, uint32_t id) override {
+ mapped_transfer_cache_entry_.reset();
+ }
+ bool ThreadsafeLockTransferCacheEntry(uint32_t type, uint32_t id) override {
return true;
}
void UnlockTransferCacheEntries(
- const std::vector<std::pair<cc::TransferCacheEntryType, uint32_t>>&
- entries) override {}
- void DeleteTransferCacheEntry(cc::TransferCacheEntryType type,
- uint32_t id) override {}
+ const std::vector<std::pair<uint32_t, uint32_t>>& entries) override {}
+ void DeleteTransferCacheEntry(uint32_t type, uint32_t id) override {}
unsigned int GetTransferBufferFreeSize() const override { return 0; }
+
+ private:
+ std::unique_ptr<char[]> mapped_transfer_cache_entry_;
};
class ImageProviderStub : public cc::ImageProvider {
@@ -666,14 +673,16 @@ TEST_F(RasterImplementationGLESTest, BeginRasterCHROMIUM) {
const GLuint msaa_sample_count = 4;
const GLboolean can_use_lcd_text = GL_TRUE;
const GLboolean use_distance_field_text = GL_FALSE;
- const GLint pixel_config = kRGBA_8888_GrPixelConfig;
+ const GLint color_type = kRGBA_8888_SkColorType;
+ const auto raster_color_space =
+ cc::RasterColorSpace(gfx::ColorSpace::CreateSRGB(), 2);
EXPECT_CALL(*gl_, BeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
can_use_lcd_text,
- use_distance_field_text, pixel_config))
+ use_distance_field_text, color_type, 2))
.Times(1);
ri_->BeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
can_use_lcd_text, use_distance_field_text,
- pixel_config);
+ color_type, raster_color_space);
}
TEST_F(RasterImplementationGLESTest, RasterCHROMIUM) {
@@ -684,10 +693,12 @@ TEST_F(RasterImplementationGLESTest, RasterCHROMIUM) {
display_list->Finalize();
ImageProviderStub image_provider;
- const gfx::Vector2d translate(1, 2);
+ const gfx::Size content_size(100, 200);
+ const gfx::Rect full_raster_rect(2, 3, 8, 9);
const gfx::Rect playback_rect(3, 4, 5, 6);
const gfx::Vector2dF post_translate(7.0f, 8.0f);
const GLfloat post_scale = 9.0f;
+ bool requires_clear = false;
constexpr const GLsizeiptr kBufferSize = 16 << 10;
char buffer[kBufferSize];
@@ -695,8 +706,10 @@ TEST_F(RasterImplementationGLESTest, RasterCHROMIUM) {
EXPECT_CALL(*gl_, MapRasterCHROMIUM(Le(kBufferSize)))
.WillOnce(Return(buffer));
EXPECT_CALL(*gl_, UnmapRasterCHROMIUM(Gt(0))).Times(1);
- ri_->RasterCHROMIUM(display_list.get(), &image_provider, translate,
- playback_rect, post_translate, post_scale);
+
+ ri_->RasterCHROMIUM(display_list.get(), &image_provider, content_size,
+ full_raster_rect, playback_rect, post_translate,
+ post_scale, requires_clear);
}
TEST_F(RasterImplementationGLESTest, EndRasterCHROMIUM) {
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/raster_implementation_impl_autogen.h
new file mode 100644
index 00000000000..2b281ec232c
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_implementation_impl_autogen.h
@@ -0,0 +1,171 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by raster_implementation.cc to define the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_IMPL_AUTOGEN_H_
+
+void RasterImplementation::DeleteTextures(GLsizei n, const GLuint* textures) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteTextures(" << n << ", "
+ << static_cast<const void*>(textures) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << textures[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(textures[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteTextures", "n < 0");
+ return;
+ }
+ DeleteTexturesHelper(n, textures);
+ CheckGLError();
+}
+
+void RasterImplementation::GetIntegerv(GLenum pname, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetIntegerv("
+ << GLES2Util::GetStringGLState(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "RasterImplementation::GetIntegerv");
+ if (GetIntegervHelper(pname, params)) {
+ return;
+ }
+ typedef cmds::GetIntegerv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetIntegerv(pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void RasterImplementation::TexParameteri(GLenum target,
+ GLenum pname,
+ GLint param) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexParameteri("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << param << ")");
+ helper_->TexParameteri(target, pname, param);
+ CheckGLError();
+}
+
+void RasterImplementation::GenQueriesEXT(GLsizei n, GLuint* queries) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenQueriesEXT(" << n << ", "
+ << static_cast<const void*>(queries) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenQueriesEXT", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ IdAllocator* id_allocator = GetIdAllocator(IdNamespaces::kQueries);
+ for (GLsizei ii = 0; ii < n; ++ii)
+ queries[ii] = id_allocator->AllocateID();
+ GenQueriesEXTHelper(n, queries);
+ helper_->GenQueriesEXTImmediate(n, queries);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << queries[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void RasterImplementation::DeleteQueriesEXT(GLsizei n, const GLuint* queries) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteQueriesEXT(" << n << ", "
+ << static_cast<const void*>(queries) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << queries[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(queries[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteQueriesEXT", "n < 0");
+ return;
+ }
+ DeleteQueriesEXTHelper(n, queries);
+ CheckGLError();
+}
+
+void RasterImplementation::CompressedCopyTextureCHROMIUM(GLuint source_id,
+ GLuint dest_id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCompressedCopyTextureCHROMIUM("
+ << source_id << ", " << dest_id << ")");
+ helper_->CompressedCopyTextureCHROMIUM(source_id, dest_id);
+ CheckGLError();
+}
+
+void RasterImplementation::LoseContextCHROMIUM(GLenum current, GLenum other) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glLoseContextCHROMIUM("
+ << GLES2Util::GetStringResetStatus(current) << ", "
+ << GLES2Util::GetStringResetStatus(other) << ")");
+ helper_->LoseContextCHROMIUM(current, other);
+ CheckGLError();
+}
+
+void RasterImplementation::UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glUnpremultiplyAndDitherCopyCHROMIUM(" << source_id
+ << ", " << dest_id << ", " << x << ", " << y << ", "
+ << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUnpremultiplyAndDitherCopyCHROMIUM",
+ "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUnpremultiplyAndDitherCopyCHROMIUM",
+ "height < 0");
+ return;
+ }
+ helper_->UnpremultiplyAndDitherCopyCHROMIUM(source_id, dest_id, x, y, width,
+ height);
+ CheckGLError();
+}
+
+void RasterImplementation::EndRasterCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEndRasterCHROMIUM("
+ << ")");
+ helper_->EndRasterCHROMIUM();
+ CheckGLError();
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
new file mode 100644
index 00000000000..7e38ec7a92a
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_implementation_unittest.cc
@@ -0,0 +1,1019 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for RasterImplementation.
+
+#include "gpu/command_buffer/client/raster_implementation.h"
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2extchromium.h>
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/client/mock_transfer_buffer.h"
+#include "gpu/command_buffer/client/query_tracker.h"
+#include "gpu/command_buffer/client/raster_cmd_helper.h"
+#include "gpu/command_buffer/client/ring_buffer.h"
+#include "gpu/command_buffer/client/shared_memory_limits.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/common/sync_token.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using gpu::gles2::QueryTracker;
+using testing::_;
+using testing::AtLeast;
+using testing::AnyNumber;
+using testing::DoAll;
+using testing::InSequence;
+using testing::Invoke;
+using testing::Mock;
+using testing::Sequence;
+using testing::StrictMock;
+using testing::Return;
+using testing::ReturnRef;
+
+namespace gpu {
+namespace raster {
+
+ACTION_P2(SetMemory, dst, obj) {
+ memcpy(dst, &obj, sizeof(obj));
+}
+
+ACTION_P3(SetMemoryFromArray, dst, array, size) {
+ memcpy(dst, array, size);
+}
+
+// Used to help set the transfer buffer result to SizedResult of a single value.
+template <typename T>
+class SizedResultHelper {
+ public:
+ explicit SizedResultHelper(T result) : size_(sizeof(result)) {
+ memcpy(result_, &result, sizeof(T));
+ }
+
+ private:
+ uint32_t size_;
+ char result_[sizeof(T)];
+};
+
+class RasterImplementationTest : public testing::Test {
+ protected:
+ static const uint8_t kInitialValue = 0xBD;
+ static const int32_t kNumCommandEntries = 500;
+ static const int32_t kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+ static const size_t kTransferBufferSize = 512;
+
+ static const GLint kMaxCombinedTextureImageUnits = 8;
+ static const GLint kMaxTextureImageUnits = 8;
+ static const GLint kMaxTextureSize = 128;
+ static const GLint kNumCompressedTextureFormats = 0;
+ static const GLuint kStartId = 1024;
+ static const GLuint kBuffersStartId = 1;
+ static const GLuint kTexturesStartId = 1;
+ static const GLuint kQueriesStartId = 1;
+
+ typedef MockTransferBuffer::ExpectedMemoryInfo ExpectedMemoryInfo;
+
+ class TestContext {
+ public:
+ TestContext() : commands_(nullptr), token_(0) {}
+
+ bool Initialize(bool bind_generates_resource_client,
+ bool bind_generates_resource_service,
+ bool lose_context_when_out_of_memory,
+ bool transfer_buffer_initialize_fail,
+ bool sync_query) {
+ SharedMemoryLimits limits = SharedMemoryLimitsForTesting();
+ command_buffer_.reset(new StrictMock<MockClientCommandBuffer>());
+
+ transfer_buffer_.reset(new MockTransferBuffer(
+ command_buffer_.get(), kTransferBufferSize,
+ RasterImplementation::kStartingOffset,
+ RasterImplementation::kAlignment, transfer_buffer_initialize_fail));
+
+ helper_.reset(new RasterCmdHelper(command_buffer()));
+ helper_->Initialize(limits.command_buffer_size);
+
+ gpu_control_.reset(new StrictMock<MockClientGpuControl>());
+ capabilities_.max_combined_texture_image_units =
+ kMaxCombinedTextureImageUnits;
+ capabilities_.max_texture_image_units = kMaxTextureImageUnits;
+ capabilities_.max_texture_size = kMaxTextureSize;
+ capabilities_.num_compressed_texture_formats =
+ kNumCompressedTextureFormats;
+ capabilities_.bind_generates_resource_chromium =
+ bind_generates_resource_service ? 1 : 0;
+ capabilities_.sync_query = sync_query;
+ EXPECT_CALL(*gpu_control_, GetCapabilities())
+ .WillOnce(ReturnRef(capabilities_));
+
+ {
+ InSequence sequence;
+
+ gl_.reset(new RasterImplementation(
+ helper_.get(), transfer_buffer_.get(),
+ bind_generates_resource_client, lose_context_when_out_of_memory,
+ gpu_control_.get()));
+ }
+
+ // The client should be set to something non-null.
+ EXPECT_CALL(*gpu_control_, SetGpuControlClient(gl_.get())).Times(1);
+
+ if (gl_->Initialize(limits) != gpu::ContextResult::kSuccess)
+ return false;
+
+ helper_->CommandBufferHelper::Finish();
+ Mock::VerifyAndClearExpectations(gl_.get());
+
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ commands_ = static_cast<CommandBufferEntry*>(ring_buffer->memory()) +
+ command_buffer()->GetServicePutOffset();
+ ClearCommands();
+ EXPECT_TRUE(transfer_buffer_->InSync());
+
+ Mock::VerifyAndClearExpectations(command_buffer());
+ return true;
+ }
+
+ void TearDown() {
+ Mock::VerifyAndClear(gl_.get());
+ EXPECT_CALL(*command_buffer(), OnFlush()).Times(AnyNumber());
+ // For command buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(AtLeast(1));
+ // The client should be unset.
+ EXPECT_CALL(*gpu_control_, SetGpuControlClient(nullptr)).Times(1);
+ gl_.reset();
+ }
+
+ MockClientCommandBuffer* command_buffer() const {
+ return command_buffer_.get();
+ }
+
+ int GetNextToken() { return ++token_; }
+
+ void ClearCommands() {
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ memset(ring_buffer->memory(), kInitialValue, ring_buffer->size());
+ }
+
+ std::unique_ptr<MockClientCommandBuffer> command_buffer_;
+ std::unique_ptr<MockClientGpuControl> gpu_control_;
+ std::unique_ptr<RasterCmdHelper> helper_;
+ std::unique_ptr<MockTransferBuffer> transfer_buffer_;
+ std::unique_ptr<RasterImplementation> gl_;
+ CommandBufferEntry* commands_;
+ int token_;
+ Capabilities capabilities_;
+ };
+
+ RasterImplementationTest() : commands_(nullptr) {}
+
+ void SetUp() override;
+ void TearDown() override;
+
+ bool NoCommandsWritten() {
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ const uint8_t* cmds =
+ reinterpret_cast<const uint8_t*>(ring_buffer->memory());
+ const uint8_t* end = cmds + ring_buffer->size();
+ for (; cmds < end; ++cmds) {
+ if (*cmds != kInitialValue) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ QueryTracker::Query* GetQuery(GLuint id) {
+ return gl_->query_tracker_->GetQuery(id);
+ }
+
+ QueryTracker* GetQueryTracker() { return gl_->query_tracker_.get(); }
+
+ ClientDiscardableTextureManager* discardable_texture_manager() {
+ return &gl_->discardable_texture_manager_;
+ }
+
+ void* MapRasterCHROMIUM(GLsizeiptr size) {
+ return gl_->MapRasterCHROMIUM(size);
+ }
+ void UnmapRasterCHROMIUM(GLsizeiptr written_size) {
+ gl_->UnmapRasterCHROMIUM(written_size);
+ }
+
+ struct ContextInitOptions {
+ ContextInitOptions()
+ : bind_generates_resource_client(true),
+ bind_generates_resource_service(true),
+ lose_context_when_out_of_memory(false),
+ transfer_buffer_initialize_fail(false),
+ sync_query(true) {}
+ bool bind_generates_resource_client;
+ bool bind_generates_resource_service;
+ bool lose_context_when_out_of_memory;
+ bool transfer_buffer_initialize_fail;
+ bool sync_query;
+ };
+
+ bool Initialize(const ContextInitOptions& init_options) {
+ bool success = true;
+ if (!test_context_.Initialize(init_options.bind_generates_resource_client,
+ init_options.bind_generates_resource_service,
+ init_options.lose_context_when_out_of_memory,
+ init_options.transfer_buffer_initialize_fail,
+ init_options.sync_query)) {
+ success = false;
+ }
+
+ // Default to test context 0.
+ gpu_control_ = test_context_.gpu_control_.get();
+ helper_ = test_context_.helper_.get();
+ transfer_buffer_ = test_context_.transfer_buffer_.get();
+ gl_ = test_context_.gl_.get();
+ commands_ = test_context_.commands_;
+ return success;
+ }
+
+ MockClientCommandBuffer* command_buffer() const {
+ return test_context_.command_buffer_.get();
+ }
+
+ int GetNextToken() { return test_context_.GetNextToken(); }
+
+ const void* GetPut() { return helper_->GetSpace(0); }
+
+ void ClearCommands() {
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ memset(ring_buffer->memory(), kInitialValue, ring_buffer->size());
+ }
+
+ size_t MaxTransferBufferSize() {
+ return transfer_buffer_->MaxTransferBufferSize();
+ }
+
+ void SetMappedMemoryLimit(size_t limit) {
+ gl_->mapped_memory_->set_max_allocated_bytes(limit);
+ }
+
+ ExpectedMemoryInfo GetExpectedMemory(size_t size) {
+ return transfer_buffer_->GetExpectedMemory(size);
+ }
+
+ ExpectedMemoryInfo GetExpectedResultMemory(size_t size) {
+ return transfer_buffer_->GetExpectedResultMemory(size);
+ }
+
+ ExpectedMemoryInfo GetExpectedMappedMemory(size_t size) {
+ ExpectedMemoryInfo mem;
+
+ // Temporarily allocate memory and expect that memory block to be reused.
+ mem.ptr = static_cast<uint8_t*>(
+ gl_->mapped_memory_->Alloc(size, &mem.id, &mem.offset));
+ gl_->mapped_memory_->Free(mem.ptr);
+
+ return mem;
+ }
+
+ int CheckError() {
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+ return gl_->GetError();
+ }
+
+ const std::string& GetLastError() { return gl_->GetLastError(); }
+
+ bool GetBucketContents(uint32_t bucket_id, std::vector<int8_t>* data) {
+ return gl_->GetBucketContents(bucket_id, data);
+ }
+
+ static SharedMemoryLimits SharedMemoryLimitsForTesting() {
+ SharedMemoryLimits limits;
+ limits.command_buffer_size = kCommandBufferSizeBytes;
+ limits.start_transfer_buffer_size = kTransferBufferSize;
+ limits.min_transfer_buffer_size = kTransferBufferSize;
+ limits.max_transfer_buffer_size = kTransferBufferSize;
+ limits.mapped_memory_reclaim_limit = SharedMemoryLimits::kNoLimit;
+ return limits;
+ }
+
+ TestContext test_context_;
+
+ MockClientGpuControl* gpu_control_;
+ RasterCmdHelper* helper_;
+ MockTransferBuffer* transfer_buffer_;
+ RasterImplementation* gl_;
+ CommandBufferEntry* commands_;
+};
+
+void RasterImplementationTest::SetUp() {
+ ContextInitOptions init_options;
+ ASSERT_TRUE(Initialize(init_options));
+}
+
+void RasterImplementationTest::TearDown() {
+ test_context_.TearDown();
+}
+
+class RasterImplementationManualInitTest : public RasterImplementationTest {
+ protected:
+ void SetUp() override {}
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const uint8_t RasterImplementationTest::kInitialValue;
+const int32_t RasterImplementationTest::kNumCommandEntries;
+const int32_t RasterImplementationTest::kCommandBufferSizeBytes;
+const size_t RasterImplementationTest::kTransferBufferSize;
+const GLint RasterImplementationTest::kMaxCombinedTextureImageUnits;
+const GLint RasterImplementationTest::kMaxTextureImageUnits;
+const GLint RasterImplementationTest::kMaxTextureSize;
+const GLint RasterImplementationTest::kNumCompressedTextureFormats;
+const GLuint RasterImplementationTest::kStartId;
+const GLuint RasterImplementationTest::kBuffersStartId;
+const GLuint RasterImplementationTest::kTexturesStartId;
+const GLuint RasterImplementationTest::kQueriesStartId;
+#endif
+
+TEST_F(RasterImplementationTest, GetBucketContents) {
+ const uint32_t kBucketId = RasterImplementation::kResultBucketId;
+ const uint32_t kTestSize = MaxTransferBufferSize() + 32;
+
+ std::unique_ptr<uint8_t[]> buf(new uint8_t[kTestSize]);
+ uint8_t* expected_data = buf.get();
+ for (uint32_t ii = 0; ii < kTestSize; ++ii) {
+ expected_data[ii] = ii * 3;
+ }
+
+ struct Cmds {
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::GetBucketData get_bucket_data;
+ cmd::SetToken set_token2;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(sizeof(uint32_t));
+ ExpectedMemoryInfo mem2 =
+ GetExpectedMemory(kTestSize - MaxTransferBufferSize());
+
+ Cmds expected;
+ expected.get_bucket_start.Init(kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.get_bucket_data.Init(kBucketId, MaxTransferBufferSize(),
+ kTestSize - MaxTransferBufferSize(), mem2.id,
+ mem2.offset);
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ expected.set_token2.Init(GetNextToken());
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(
+ SetMemory(result1.ptr, kTestSize),
+ SetMemoryFromArray(mem1.ptr, expected_data, MaxTransferBufferSize())))
+ .WillOnce(SetMemoryFromArray(mem2.ptr,
+ expected_data + MaxTransferBufferSize(),
+ kTestSize - MaxTransferBufferSize()))
+ .RetiresOnSaturation();
+
+ std::vector<int8_t> data;
+ GetBucketContents(kBucketId, &data);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ASSERT_EQ(kTestSize, data.size());
+ EXPECT_EQ(0, memcmp(expected_data, &data[0], data.size()));
+}
+
+// Test that things are cached
+TEST_F(RasterImplementationTest, GetIntegerCacheRead) {
+ struct PNameValue {
+ GLenum pname;
+ GLint expected;
+ };
+ const PNameValue pairs[] = {{
+ GL_ACTIVE_TEXTURE, GL_TEXTURE0,
+ },
+ {
+ GL_TEXTURE_BINDING_2D, 0,
+ }};
+ size_t num_pairs = sizeof(pairs) / sizeof(pairs[0]);
+ for (size_t ii = 0; ii < num_pairs; ++ii) {
+ const PNameValue& pv = pairs[ii];
+ GLint v = -1;
+ gl_->GetIntegerv(pv.pname, &v);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(pv.expected, v);
+ }
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+}
+
+TEST_F(RasterImplementationTest, GetIntegerCacheWrite) {
+ struct PNameValue {
+ GLenum pname;
+ GLint expected;
+ };
+ gl_->ActiveTexture(GL_TEXTURE4);
+ gl_->BindTexture(GL_TEXTURE_2D, 6);
+
+ const PNameValue pairs[] = {{
+ GL_ACTIVE_TEXTURE, GL_TEXTURE4,
+ },
+ {
+ GL_TEXTURE_BINDING_2D, 6,
+ }};
+ size_t num_pairs = sizeof(pairs) / sizeof(pairs[0]);
+ for (size_t ii = 0; ii < num_pairs; ++ii) {
+ const PNameValue& pv = pairs[ii];
+ GLint v = -1;
+ gl_->GetIntegerv(pv.pname, &v);
+ EXPECT_EQ(pv.expected, v);
+ }
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+}
+
+TEST_F(RasterImplementationTest, BeginEndQueryEXT) {
+ // GL_COMMANDS_COMPLETED_CHROMIUM,
+ // GL_CURRENT_QUERY_EXT
+
+ GLuint expected_ids[2] = {1, 2}; // These must match what's actually genned.
+ struct GenCmds {
+ cmds::GenQueriesEXTImmediate gen;
+ GLuint data[2];
+ };
+ GenCmds expected_gen_cmds;
+ expected_gen_cmds.gen.Init(arraysize(expected_ids), &expected_ids[0]);
+ GLuint ids[arraysize(expected_ids)] = {
+ 0,
+ };
+ gl_->GenQueriesEXT(arraysize(expected_ids), &ids[0]);
+ EXPECT_EQ(0,
+ memcmp(&expected_gen_cmds, commands_, sizeof(expected_gen_cmds)));
+ GLuint id1 = ids[0];
+ GLuint id2 = ids[1];
+ ClearCommands();
+
+ // Test BeginQueryEXT fails if id = 0.
+ gl_->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, 0);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test BeginQueryEXT inserts command.
+ struct BeginCmds {
+ cmds::BeginQueryEXT begin_query;
+ };
+ BeginCmds expected_begin_cmds;
+ const void* commands = GetPut();
+ gl_->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, id1);
+ QueryTracker::Query* query = GetQuery(id1);
+ ASSERT_TRUE(query != nullptr);
+ expected_begin_cmds.begin_query.Init(GL_COMMANDS_COMPLETED_CHROMIUM, id1,
+ query->shm_id(), query->shm_offset());
+ EXPECT_EQ(
+ 0, memcmp(&expected_begin_cmds, commands, sizeof(expected_begin_cmds)));
+ ClearCommands();
+
+ // Test BeginQueryEXT fails if between Begin/End.
+ gl_->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, id2);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test EndQueryEXT sends command
+ struct EndCmds {
+ cmds::EndQueryEXT end_query;
+ };
+ commands = GetPut();
+ gl_->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
+ EndCmds expected_end_cmds;
+ expected_end_cmds.end_query.Init(GL_COMMANDS_COMPLETED_CHROMIUM,
+ query->submit_count());
+ EXPECT_EQ(0, memcmp(&expected_end_cmds, commands, sizeof(expected_end_cmds)));
+
+ // Test EndQueryEXT fails if no current query.
+ ClearCommands();
+ gl_->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test 2nd Begin/End increments count.
+ base::subtle::Atomic32 old_submit_count = query->submit_count();
+ gl_->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, id1);
+ EXPECT_EQ(old_submit_count, query->submit_count());
+ commands = GetPut();
+ gl_->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
+ EXPECT_NE(old_submit_count, query->submit_count());
+ expected_end_cmds.end_query.Init(GL_COMMANDS_COMPLETED_CHROMIUM,
+ query->submit_count());
+ EXPECT_EQ(0, memcmp(&expected_end_cmds, commands, sizeof(expected_end_cmds)));
+
+ // Test GetQueryObjectuivEXT fails if unused id
+ GLuint available = 0xBDu;
+ ClearCommands();
+ gl_->GetQueryObjectuivEXT(id2, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(0xBDu, available);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test GetQueryObjectuivEXT fails if bad id
+ ClearCommands();
+ gl_->GetQueryObjectuivEXT(4567, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(0xBDu, available);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test GetQueryObjectuivEXT CheckResultsAvailable
+ ClearCommands();
+ gl_->GetQueryObjectuivEXT(id1, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_EQ(0u, available);
+}
+
+TEST_F(RasterImplementationManualInitTest, BadQueryTargets) {
+ ContextInitOptions init_options;
+ init_options.sync_query = false;
+ ASSERT_TRUE(Initialize(init_options));
+
+ GLuint id = 0;
+ gl_->GenQueriesEXT(1, &id);
+ ClearCommands();
+
+ gl_->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, id);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_EQ(nullptr, GetQuery(id));
+
+ gl_->BeginQueryEXT(0x123, id);
+ EXPECT_EQ(GL_INVALID_ENUM, CheckError());
+ EXPECT_EQ(nullptr, GetQuery(id));
+}
+
+TEST_F(RasterImplementationTest, GenSyncTokenCHROMIUM) {
+ const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
+ const CommandBufferId kCommandBufferId =
+ CommandBufferId::FromUnsafeValue(234u);
+ const GLuint64 kFenceSync = 123u;
+ SyncToken sync_token;
+
+ EXPECT_CALL(*gpu_control_, GetNamespaceID())
+ .WillRepeatedly(Return(kNamespaceId));
+ EXPECT_CALL(*gpu_control_, GetCommandBufferID())
+ .WillRepeatedly(Return(kCommandBufferId));
+
+ gl_->GenSyncTokenCHROMIUM(nullptr);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+
+ const void* commands = GetPut();
+ cmds::InsertFenceSyncCHROMIUM insert_fence_sync;
+ insert_fence_sync.Init(kFenceSync);
+
+ EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
+ .WillOnce(Return(kFenceSync));
+ EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
+ gl_->GenSyncTokenCHROMIUM(sync_token.GetData());
+ EXPECT_EQ(0, memcmp(&insert_fence_sync, commands, sizeof(insert_fence_sync)));
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ EXPECT_TRUE(sync_token.verified_flush());
+ EXPECT_EQ(kNamespaceId, sync_token.namespace_id());
+ EXPECT_EQ(kCommandBufferId, sync_token.command_buffer_id());
+ EXPECT_EQ(kFenceSync, sync_token.release_count());
+}
+
+TEST_F(RasterImplementationTest, GenUnverifiedSyncTokenCHROMIUM) {
+ const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
+ const CommandBufferId kCommandBufferId =
+ CommandBufferId::FromUnsafeValue(234u);
+ const GLuint64 kFenceSync = 123u;
+ SyncToken sync_token;
+
+ EXPECT_CALL(*gpu_control_, GetNamespaceID())
+ .WillRepeatedly(Return(kNamespaceId));
+ EXPECT_CALL(*gpu_control_, GetCommandBufferID())
+ .WillRepeatedly(Return(kCommandBufferId));
+
+ gl_->GenUnverifiedSyncTokenCHROMIUM(nullptr);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+
+ const void* commands = GetPut();
+ cmds::InsertFenceSyncCHROMIUM insert_fence_sync;
+ insert_fence_sync.Init(kFenceSync);
+
+ EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
+ .WillOnce(Return(kFenceSync));
+ gl_->GenUnverifiedSyncTokenCHROMIUM(sync_token.GetData());
+ EXPECT_EQ(0, memcmp(&insert_fence_sync, commands, sizeof(insert_fence_sync)));
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ EXPECT_FALSE(sync_token.verified_flush());
+ EXPECT_EQ(kNamespaceId, sync_token.namespace_id());
+ EXPECT_EQ(kCommandBufferId, sync_token.command_buffer_id());
+ EXPECT_EQ(kFenceSync, sync_token.release_count());
+}
+
+TEST_F(RasterImplementationTest, VerifySyncTokensCHROMIUM) {
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillRepeatedly(SetMemory(result.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
+ const CommandBufferId kCommandBufferId =
+ CommandBufferId::FromUnsafeValue(234u);
+ const GLuint64 kFenceSync = 123u;
+ gpu::SyncToken sync_token;
+ GLbyte* sync_token_datas[] = {sync_token.GetData()};
+
+ EXPECT_CALL(*gpu_control_, GetNamespaceID())
+ .WillRepeatedly(Return(kNamespaceId));
+ EXPECT_CALL(*gpu_control_, GetCommandBufferID())
+ .WillRepeatedly(Return(kCommandBufferId));
+
+ EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
+ .WillOnce(Return(kFenceSync));
+ gl_->GenUnverifiedSyncTokenCHROMIUM(sync_token.GetData());
+ EXPECT_TRUE(sync_token.HasData());
+ EXPECT_FALSE(sync_token.verified_flush());
+
+ ClearCommands();
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token))
+ .WillOnce(Return(false));
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, 1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ EXPECT_FALSE(sync_token.verified_flush());
+
+ ClearCommands();
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ EXPECT_EQ(kNamespaceId, sync_token.namespace_id());
+ EXPECT_EQ(kCommandBufferId, sync_token.command_buffer_id());
+ EXPECT_EQ(kFenceSync, sync_token.release_count());
+ EXPECT_TRUE(sync_token.verified_flush());
+}
+
+TEST_F(RasterImplementationTest, VerifySyncTokensCHROMIUM_Sequence) {
+ // To verify sync tokens, the sync tokens must all be verified after
+ // CanWaitUnverifiedSyncTokens() are called. This test ensures the right
+ // sequence.
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillRepeatedly(SetMemory(result.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
+ const CommandBufferId kCommandBufferId =
+ CommandBufferId::FromUnsafeValue(234u);
+ const GLuint64 kFenceSync1 = 123u;
+ const GLuint64 kFenceSync2 = 234u;
+ gpu::SyncToken sync_token1;
+ gpu::SyncToken sync_token2;
+ GLbyte* sync_token_datas[] = {sync_token1.GetData(), sync_token2.GetData()};
+
+ EXPECT_CALL(*gpu_control_, GetNamespaceID())
+ .WillRepeatedly(Return(kNamespaceId));
+ EXPECT_CALL(*gpu_control_, GetCommandBufferID())
+ .WillRepeatedly(Return(kCommandBufferId));
+
+ // Generate sync token 1.
+ EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
+ .WillOnce(Return(kFenceSync1));
+ gl_->GenUnverifiedSyncTokenCHROMIUM(sync_token1.GetData());
+ EXPECT_TRUE(sync_token1.HasData());
+ EXPECT_FALSE(sync_token1.verified_flush());
+
+ // Generate sync token 2.
+ EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
+ .WillOnce(Return(kFenceSync2));
+ gl_->GenUnverifiedSyncTokenCHROMIUM(sync_token2.GetData());
+ EXPECT_TRUE(sync_token2.HasData());
+ EXPECT_FALSE(sync_token2.verified_flush());
+
+ // Ensure proper sequence of checking and validating.
+ Sequence sequence;
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token1))
+ .InSequence(sequence)
+ .WillOnce(Return(true));
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(sync_token2))
+ .InSequence(sequence)
+ .WillOnce(Return(true));
+ EXPECT_CALL(*gpu_control_, EnsureWorkVisible()).InSequence(sequence);
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ EXPECT_TRUE(sync_token1.verified_flush());
+ EXPECT_TRUE(sync_token2.verified_flush());
+}
+
+TEST_F(RasterImplementationTest, VerifySyncTokensCHROMIUM_EmptySyncToken) {
+ // To verify sync tokens, the sync tokens must all be verified after
+ // CanWaitUnverifiedSyncTokens() are called. This test ensures the right
+ // sequence.
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillRepeatedly(SetMemory(result.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ gpu::SyncToken sync_token1, sync_token2;
+ GLbyte* sync_token_datas[] = {sync_token1.GetData(), sync_token2.GetData()};
+
+ // Ensure proper sequence of checking and validating.
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(_)).Times(0);
+ EXPECT_CALL(*gpu_control_, EnsureWorkVisible()).Times(0);
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ EXPECT_TRUE(sync_token1.verified_flush());
+ EXPECT_TRUE(sync_token2.verified_flush());
+}
+
+TEST_F(RasterImplementationTest, WaitSyncTokenCHROMIUM) {
+ const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
+ const CommandBufferId kCommandBufferId =
+ CommandBufferId::FromUnsafeValue(234u);
+ const GLuint64 kFenceSync = 456u;
+
+ gpu::SyncToken sync_token;
+ GLbyte* sync_token_data = sync_token.GetData();
+
+ struct Cmds {
+ cmds::InsertFenceSyncCHROMIUM insert_fence_sync;
+ cmds::WaitSyncTokenCHROMIUM wait_sync_token;
+ };
+ Cmds expected;
+ expected.insert_fence_sync.Init(kFenceSync);
+ expected.wait_sync_token.Init(kNamespaceId, kCommandBufferId.GetUnsafeValue(),
+ kFenceSync);
+
+ EXPECT_CALL(*gpu_control_, GetNamespaceID()).WillOnce(Return(kNamespaceId));
+ EXPECT_CALL(*gpu_control_, GetCommandBufferID())
+ .WillOnce(Return(kCommandBufferId));
+ EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
+ .WillOnce(Return(kFenceSync));
+ EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
+ gl_->GenSyncTokenCHROMIUM(sync_token_data);
+
+ EXPECT_CALL(*gpu_control_, WaitSyncTokenHint(sync_token));
+ gl_->WaitSyncTokenCHROMIUM(sync_token_data);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(RasterImplementationTest, WaitSyncTokenCHROMIUMErrors) {
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillRepeatedly(SetMemory(result.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ // Empty sync tokens should be produce no error and be a nop.
+ ClearCommands();
+ gl_->WaitSyncTokenCHROMIUM(nullptr);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+
+ // Invalid sync tokens should produce no error and be a nop.
+ ClearCommands();
+ gpu::SyncToken invalid_sync_token;
+ gl_->WaitSyncTokenCHROMIUM(invalid_sync_token.GetConstData());
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+
+ // Unverified sync token should produce INVALID_OPERATION.
+ ClearCommands();
+ gpu::SyncToken unverified_sync_token(CommandBufferNamespace::GPU_IO,
+ gpu::CommandBufferId(), 0);
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(unverified_sync_token))
+ .WillOnce(Return(false));
+ gl_->WaitSyncTokenCHROMIUM(unverified_sync_token.GetConstData());
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+}
+
+static void CountCallback(int* count) {
+ (*count)++;
+}
+
+TEST_F(RasterImplementationTest, SignalSyncToken) {
+ const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
+ const CommandBufferId kCommandBufferId = CommandBufferId::FromUnsafeValue(1);
+ const uint64_t kFenceSync = 123u;
+
+ EXPECT_CALL(*gpu_control_, GetNamespaceID())
+ .WillRepeatedly(Return(kNamespaceId));
+ EXPECT_CALL(*gpu_control_, GetCommandBufferID())
+ .WillRepeatedly(Return(kCommandBufferId));
+
+ EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
+ .WillOnce(Return(kFenceSync));
+ EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
+ gpu::SyncToken sync_token;
+ gl_->GenSyncTokenCHROMIUM(sync_token.GetData());
+
+ int signaled_count = 0;
+
+ // Request a signal sync token, which gives a callback to the GpuControl to
+ // run when the sync token is reached.
+ base::OnceClosure signal_closure;
+ EXPECT_CALL(*gpu_control_, DoSignalSyncToken(_, _))
+ .WillOnce(Invoke([&signal_closure](const SyncToken& sync_token,
+ base::OnceClosure* callback) {
+ signal_closure = std::move(*callback);
+ }));
+ gl_->SignalSyncToken(sync_token,
+ base::BindOnce(&CountCallback, &signaled_count));
+ EXPECT_EQ(0, signaled_count);
+
+ // When GpuControl runs the callback, the original callback we gave to
+ // RasterImplementation is run.
+ std::move(signal_closure).Run();
+ EXPECT_EQ(1, signaled_count);
+}
+
+TEST_F(RasterImplementationTest, SignalSyncTokenAfterContextLoss) {
+ const CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
+ const CommandBufferId kCommandBufferId = CommandBufferId::FromUnsafeValue(1);
+ const uint64_t kFenceSync = 123u;
+
+ EXPECT_CALL(*gpu_control_, GetNamespaceID()).WillOnce(Return(kNamespaceId));
+ EXPECT_CALL(*gpu_control_, GetCommandBufferID())
+ .WillOnce(Return(kCommandBufferId));
+ EXPECT_CALL(*gpu_control_, GenerateFenceSyncRelease())
+ .WillOnce(Return(kFenceSync));
+ EXPECT_CALL(*gpu_control_, EnsureWorkVisible());
+ gpu::SyncToken sync_token;
+ gl_->GenSyncTokenCHROMIUM(sync_token.GetData());
+
+ int signaled_count = 0;
+
+ // Request a signal sync token, which gives a callback to the GpuControl to
+ // run when the sync token is reached.
+ base::OnceClosure signal_closure;
+ EXPECT_CALL(*gpu_control_, DoSignalSyncToken(_, _))
+ .WillOnce(Invoke([&signal_closure](const SyncToken& sync_token,
+ base::OnceClosure* callback) {
+ signal_closure = std::move(*callback);
+ }));
+ gl_->SignalSyncToken(sync_token,
+ base::BindOnce(&CountCallback, &signaled_count));
+ EXPECT_EQ(0, signaled_count);
+
+ // Inform the RasterImplementation that the context is lost.
+ GpuControlClient* gl_as_client = gl_;
+ gl_as_client->OnGpuControlLostContext();
+
+ // When GpuControl runs the callback, the original callback we gave to
+ // RasterImplementation is *not* run, since the context is lost and we
+ // have already run the lost context callback.
+ std::move(signal_closure).Run();
+ EXPECT_EQ(0, signaled_count);
+}
+
+TEST_F(RasterImplementationTest, ReportLoss) {
+ GpuControlClient* gl_as_client = gl_;
+ int lost_count = 0;
+ gl_->SetLostContextCallback(base::BindOnce(&CountCallback, &lost_count));
+ EXPECT_EQ(0, lost_count);
+
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetGraphicsResetStatusKHR());
+ gl_as_client->OnGpuControlLostContext();
+ EXPECT_NE(static_cast<GLenum>(GL_NO_ERROR), gl_->GetGraphicsResetStatusKHR());
+ // The lost context callback should be run when RasterImplementation is
+ // notified of the loss.
+ EXPECT_EQ(1, lost_count);
+}
+
+TEST_F(RasterImplementationTest, ReportLossReentrant) {
+ GpuControlClient* gl_as_client = gl_;
+ int lost_count = 0;
+ gl_->SetLostContextCallback(base::BindOnce(&CountCallback, &lost_count));
+ EXPECT_EQ(0, lost_count);
+
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetGraphicsResetStatusKHR());
+ gl_as_client->OnGpuControlLostContextMaybeReentrant();
+ EXPECT_NE(static_cast<GLenum>(GL_NO_ERROR), gl_->GetGraphicsResetStatusKHR());
+ // The lost context callback should not be run yet to avoid calling back into
+ // clients re-entrantly, and having them re-enter RasterImplementation.
+ EXPECT_EQ(0, lost_count);
+}
+
+TEST_F(RasterImplementationManualInitTest, FailInitOnTransferBufferFail) {
+ ContextInitOptions init_options;
+ init_options.transfer_buffer_initialize_fail = true;
+ EXPECT_FALSE(Initialize(init_options));
+}
+
+TEST_F(RasterImplementationTest, DiscardableMemoryDelete) {
+ const GLuint texture_id = 1;
+ EXPECT_FALSE(discardable_texture_manager()->TextureIsValid(texture_id));
+ gl_->InitializeDiscardableTextureCHROMIUM(texture_id);
+ EXPECT_TRUE(discardable_texture_manager()->TextureIsValid(texture_id));
+
+ // Deleting a texture should clear its discardable entry.
+ gl_->DeleteTextures(1, &texture_id);
+ EXPECT_FALSE(discardable_texture_manager()->TextureIsValid(texture_id));
+}
+
+TEST_F(RasterImplementationTest, DiscardableTextureLockFail) {
+ const GLuint texture_id = 1;
+ gl_->InitializeDiscardableTextureCHROMIUM(texture_id);
+ EXPECT_TRUE(discardable_texture_manager()->TextureIsValid(texture_id));
+
+ // Unlock the handle on the client side.
+ gl_->UnlockDiscardableTextureCHROMIUM(texture_id);
+
+ // Unlock and delete the handle on the service side.
+ ClientDiscardableHandle client_handle =
+ discardable_texture_manager()->GetHandleForTesting(texture_id);
+ ServiceDiscardableHandle service_handle(client_handle.BufferForTesting(),
+ client_handle.byte_offset(),
+ client_handle.shm_id());
+ service_handle.Unlock();
+ EXPECT_TRUE(service_handle.Delete());
+
+ // Trying to re-lock the texture via GL should fail and delete the entry.
+ EXPECT_FALSE(gl_->LockDiscardableTextureCHROMIUM(texture_id));
+ EXPECT_FALSE(discardable_texture_manager()->TextureIsValid(texture_id));
+}
+
+TEST_F(RasterImplementationTest, DiscardableTextureDoubleInitError) {
+ const GLuint texture_id = 1;
+ gl_->InitializeDiscardableTextureCHROMIUM(texture_id);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+ gl_->InitializeDiscardableTextureCHROMIUM(texture_id);
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(RasterImplementationTest, DiscardableTextureLockError) {
+ const GLuint texture_id = 1;
+ EXPECT_FALSE(gl_->LockDiscardableTextureCHROMIUM(texture_id));
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(RasterImplementationTest, DiscardableTextureLockCounting) {
+ const GLint texture_id = 1;
+ gl_->InitializeDiscardableTextureCHROMIUM(texture_id);
+ EXPECT_TRUE(discardable_texture_manager()->TextureIsValid(texture_id));
+
+ // Bind the texture.
+ gl_->BindTexture(GL_TEXTURE_2D, texture_id);
+ GLint bound_texture_id = 0;
+ gl_->GetIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture_id);
+ EXPECT_EQ(texture_id, bound_texture_id);
+
+ // Lock the texture 3 more times (for 4 locks total).
+ for (int i = 0; i < 3; ++i) {
+ gl_->LockDiscardableTextureCHROMIUM(texture_id);
+ }
+
+ // Unlock 4 times. Only after the last unlock should the texture be unbound.
+ for (int i = 0; i < 4; ++i) {
+ gl_->UnlockDiscardableTextureCHROMIUM(texture_id);
+ bound_texture_id = 0;
+ gl_->GetIntegerv(GL_TEXTURE_BINDING_2D, &bound_texture_id);
+ if (i < 3) {
+ EXPECT_EQ(texture_id, bound_texture_id);
+ } else {
+ EXPECT_EQ(0, bound_texture_id);
+ }
+ }
+}
+
+#include "base/macros.h"
+#include "gpu/command_buffer/client/raster_implementation_unittest_autogen.h"
+
+} // namespace raster
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/raster_implementation_unittest_autogen.h
new file mode 100644
index 00000000000..345a7d595b9
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_implementation_unittest_autogen.h
@@ -0,0 +1,135 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by raster_implementation.h to declare the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
+
+TEST_F(RasterImplementationTest, DeleteTextures) {
+ GLuint ids[2] = {kTexturesStartId, kTexturesStartId + 1};
+ struct Cmds {
+ cmds::DeleteTexturesImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kTexturesStartId;
+ expected.data[1] = kTexturesStartId + 1;
+ gl_->DeleteTextures(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(RasterImplementationTest, Flush) {
+ struct Cmds {
+ cmds::Flush cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->Flush();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(RasterImplementationTest, GetIntegerv) {
+ struct Cmds {
+ cmds::GetIntegerv cmd;
+ };
+ typedef cmds::GetIntegerv::Result::Type ResultType;
+ ResultType result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(uint32_t) + sizeof(ResultType));
+ expected.cmd.Init(123, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<ResultType>(1)))
+ .RetiresOnSaturation();
+ gl_->GetIntegerv(123, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<ResultType>(1), result);
+}
+
+TEST_F(RasterImplementationTest, TexParameteri) {
+ struct Cmds {
+ cmds::TexParameteri cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(RasterImplementationTest, GenQueriesEXT) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenQueriesEXTImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kQueriesStartId;
+ expected.data[1] = kQueriesStartId + 1;
+ gl_->GenQueriesEXT(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kQueriesStartId, ids[0]);
+ EXPECT_EQ(kQueriesStartId + 1, ids[1]);
+}
+
+TEST_F(RasterImplementationTest, DeleteQueriesEXT) {
+ GLuint ids[2] = {kQueriesStartId, kQueriesStartId + 1};
+ struct Cmds {
+ cmds::DeleteQueriesEXTImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kQueriesStartId;
+ expected.data[1] = kQueriesStartId + 1;
+ gl_->DeleteQueriesEXT(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(RasterImplementationTest, CompressedCopyTextureCHROMIUM) {
+ struct Cmds {
+ cmds::CompressedCopyTextureCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->CompressedCopyTextureCHROMIUM(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(RasterImplementationTest, LoseContextCHROMIUM) {
+ struct Cmds {
+ cmds::LoseContextCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB, GL_GUILTY_CONTEXT_RESET_ARB);
+
+ gl_->LoseContextCHROMIUM(GL_GUILTY_CONTEXT_RESET_ARB,
+ GL_GUILTY_CONTEXT_RESET_ARB);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(RasterImplementationTest, EndRasterCHROMIUM) {
+ struct Cmds {
+ cmds::EndRasterCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->EndRasterCHROMIUM();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+#endif // GPU_COMMAND_BUFFER_CLIENT_RASTER_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/raster_interface.h b/chromium/gpu/command_buffer/client/raster_interface.h
index 279310723e7..530e177645c 100644
--- a/chromium/gpu/command_buffer/client/raster_interface.h
+++ b/chromium/gpu/command_buffer/client/raster_interface.h
@@ -12,11 +12,12 @@
namespace cc {
class DisplayItemList;
class ImageProvider;
+struct RasterColorSpace;
} // namespace cc
namespace gfx {
class Rect;
-class Vector2d;
+class Size;
class Vector2dF;
} // namespace gfx
@@ -33,43 +34,13 @@ class RasterInterface {
RasterInterface() {}
virtual ~RasterInterface() {}
- // Command buffer Flush / Finish.
- virtual void Finish() = 0;
- virtual void Flush() = 0;
- virtual void ShallowFlushCHROMIUM() = 0;
- virtual void OrderingBarrierCHROMIUM() = 0;
-
- // SyncTokens.
- virtual void GenSyncTokenCHROMIUM(GLbyte* sync_token) = 0;
- virtual void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) = 0;
- virtual void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens,
- GLsizei count) = 0;
- virtual void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) = 0;
-
- // Command buffer state.
- virtual GLenum GetError() = 0;
- virtual GLenum GetGraphicsResetStatusKHR() = 0;
- virtual void GetIntegerv(GLenum pname, GLint* params) = 0;
- virtual void LoseContextCHROMIUM(GLenum current, GLenum other) = 0;
-
- // Queries: GL_COMMANDS_ISSUED_CHROMIUM / GL_COMMANDS_COMPLETED_CHROMIUM.
- virtual void GenQueriesEXT(GLsizei n, GLuint* queries) = 0;
- virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) = 0;
- virtual void BeginQueryEXT(GLenum target, GLuint id) = 0;
- virtual void EndQueryEXT(GLenum target) = 0;
- virtual void GetQueryObjectuivEXT(GLuint id,
- GLenum pname,
- GLuint* params) = 0;
-
// Texture objects.
virtual void GenTextures(GLsizei n, GLuint* textures) = 0;
- virtual void DeleteTextures(GLsizei n, const GLuint* textures) = 0;
virtual void BindTexture(GLenum target, GLuint texture) = 0;
virtual void ActiveTexture(GLenum texture) = 0;
virtual void GenerateMipmap(GLenum target) = 0;
virtual void SetColorSpaceMetadataCHROMIUM(GLuint texture_id,
GLColorSpace color_space) = 0;
- virtual void TexParameteri(GLenum target, GLenum pname, GLint param) = 0;
// Mailboxes.
virtual void GenMailboxCHROMIUM(GLbyte* mailbox) = 0;
@@ -78,13 +49,8 @@ class RasterInterface {
virtual GLuint CreateAndConsumeTextureCHROMIUM(const GLbyte* mailbox) = 0;
// Image objects.
- virtual GLuint CreateImageCHROMIUM(ClientBuffer buffer,
- GLsizei width,
- GLsizei height,
- GLenum internalformat) = 0;
virtual void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) = 0;
virtual void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) = 0;
- virtual void DestroyImageCHROMIUM(GLuint image_id) = 0;
// Texture allocation and copying.
virtual void TexImage2D(GLenum target,
@@ -132,32 +98,32 @@ class RasterInterface {
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) = 0;
- virtual void CompressedCopyTextureCHROMIUM(GLuint source_id,
- GLuint dest_id) = 0;
-
- // Discardable textures.
- virtual void InitializeDiscardableTextureCHROMIUM(GLuint texture_id) = 0;
- virtual void UnlockDiscardableTextureCHROMIUM(GLuint texture_id) = 0;
- virtual bool LockDiscardableTextureCHROMIUM(GLuint texture_id) = 0;
-
// OOP-Raster
- virtual void BeginRasterCHROMIUM(GLuint texture_id,
- GLuint sk_color,
- GLuint msaa_sample_count,
- GLboolean can_use_lcd_text,
- GLboolean use_distance_field_text,
- GLint pixel_config) = 0;
+ virtual void BeginRasterCHROMIUM(
+ GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config,
+ const cc::RasterColorSpace& raster_color_space) = 0;
virtual void RasterCHROMIUM(const cc::DisplayItemList* list,
cc::ImageProvider* provider,
- const gfx::Vector2d& translate,
+ const gfx::Size& content_size,
+ const gfx::Rect& full_raster_rect,
const gfx::Rect& playback_rect,
const gfx::Vector2dF& post_translate,
- GLfloat post_scale) = 0;
- virtual void EndRasterCHROMIUM() = 0;
+ GLfloat post_scale,
+ bool requires_clear) = 0;
// Raster via GrContext.
virtual void BeginGpuRaster() = 0;
virtual void EndGpuRaster() = 0;
+
+// Include the auto-generated part of this class. We split this because
+// it means we can easily edit the non-auto generated parts right here in
+// this file instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/raster_interface_autogen.h"
};
} // namespace raster
diff --git a/chromium/gpu/command_buffer/client/raster_interface_autogen.h b/chromium/gpu/command_buffer/client/raster_interface_autogen.h
new file mode 100644
index 00000000000..bc6be3ec473
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/raster_interface_autogen.h
@@ -0,0 +1,52 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by raster_interface.h to declare the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RASTER_INTERFACE_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RASTER_INTERFACE_AUTOGEN_H_
+
+virtual void DeleteTextures(GLsizei n, const GLuint* textures) = 0;
+virtual void Finish() = 0;
+virtual void Flush() = 0;
+virtual GLenum GetError() = 0;
+virtual void GetIntegerv(GLenum pname, GLint* params) = 0;
+virtual void ShallowFlushCHROMIUM() = 0;
+virtual void OrderingBarrierCHROMIUM() = 0;
+virtual void TexParameteri(GLenum target, GLenum pname, GLint param) = 0;
+virtual void GenQueriesEXT(GLsizei n, GLuint* queries) = 0;
+virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) = 0;
+virtual void BeginQueryEXT(GLenum target, GLuint id) = 0;
+virtual void EndQueryEXT(GLenum target) = 0;
+virtual void GetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* params) = 0;
+virtual GLuint CreateImageCHROMIUM(ClientBuffer buffer,
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat) = 0;
+virtual void DestroyImageCHROMIUM(GLuint image_id) = 0;
+virtual void CompressedCopyTextureCHROMIUM(GLuint source_id,
+ GLuint dest_id) = 0;
+virtual void LoseContextCHROMIUM(GLenum current, GLenum other) = 0;
+virtual void GenSyncTokenCHROMIUM(GLbyte* sync_token) = 0;
+virtual void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) = 0;
+virtual void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) = 0;
+virtual void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) = 0;
+virtual void UnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual GLenum GetGraphicsResetStatusKHR() = 0;
+virtual void InitializeDiscardableTextureCHROMIUM(GLuint texture_id) = 0;
+virtual void UnlockDiscardableTextureCHROMIUM(GLuint texture_id) = 0;
+virtual bool LockDiscardableTextureCHROMIUM(GLuint texture_id) = 0;
+virtual void EndRasterCHROMIUM() = 0;
+#endif // GPU_COMMAND_BUFFER_CLIENT_RASTER_INTERFACE_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/ring_buffer.cc b/chromium/gpu/command_buffer/client/ring_buffer.cc
index 4410142b491..78b64d13f2e 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer.cc
+++ b/chromium/gpu/command_buffer/client/ring_buffer.cc
@@ -66,7 +66,7 @@ void* RingBuffer::Alloc(unsigned int size) {
size = RoundToAlignment(size);
// Wait until there is enough room.
- while (size > GetLargestFreeSizeNoWaiting()) {
+ while (size > GetLargestFreeSizeNoWaitingInternal()) {
FreeOldestBlock();
}
@@ -145,6 +145,12 @@ void RingBuffer::DiscardBlock(void* pointer) {
}
unsigned int RingBuffer::GetLargestFreeSizeNoWaiting() {
+ unsigned int size = GetLargestFreeSizeNoWaitingInternal();
+ DCHECK_EQ(size, RoundToAlignment(size));
+ return size;
+}
+
+unsigned int RingBuffer::GetLargestFreeSizeNoWaitingInternal() {
while (!blocks_.empty()) {
Block& block = blocks_.front();
if (!helper_->HasTokenPassed(block.token) || block.state == IN_USE) break;
@@ -169,7 +175,7 @@ unsigned int RingBuffer::GetLargestFreeSizeNoWaiting() {
}
unsigned int RingBuffer::GetTotalFreeSizeNoWaiting() {
- unsigned int largest_free_size = GetLargestFreeSizeNoWaiting();
+ unsigned int largest_free_size = GetLargestFreeSizeNoWaitingInternal();
if (free_offset_ > in_use_offset_) {
// It's free from free_offset_ to size_ and from 0 to in_use_offset_.
return size_ - free_offset_ + in_use_offset_;
@@ -188,6 +194,9 @@ void RingBuffer::ShrinkLastBlock(unsigned int new_size) {
// Can't shrink to size 0, see comments in Alloc.
new_size = std::max(new_size, 1u);
+ // Allocate rounded to alignment size so that the offsets are always
+ // memory-aligned.
+ new_size = RoundToAlignment(new_size);
free_offset_ = block.offset + new_size;
block.size = new_size;
}
diff --git a/chromium/gpu/command_buffer/client/ring_buffer.h b/chromium/gpu/command_buffer/client/ring_buffer.h
index b67ac7b59c0..44a87a81702 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer.h
+++ b/chromium/gpu/command_buffer/client/ring_buffer.h
@@ -120,6 +120,7 @@ class GPU_EXPORT RingBuffer {
using BlockIndex = unsigned int;
void FreeOldestBlock();
+ unsigned int GetLargestFreeSizeNoWaitingInternal();
CommandBufferHelper* helper_;
diff --git a/chromium/gpu/command_buffer/client/ring_buffer_test.cc b/chromium/gpu/command_buffer/client/ring_buffer_test.cc
index 1f24f218998..7c5d20975df 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer_test.cc
+++ b/chromium/gpu/command_buffer/client/ring_buffer_test.cc
@@ -419,4 +419,13 @@ TEST_F(RingBufferTest, DiscardAllPaddingFromBeginningTest) {
EXPECT_EQ(kAlloc1 + kAlloc2, allocator_->GetLargestFreeSizeNoWaiting());
}
+TEST_F(RingBufferTest, LargestFreeSizeNoWaiting) {
+ // GetLargestFreeSizeNoWaiting should return the largest free aligned size.
+ void* ptr = allocator_->Alloc(kBufferSize);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->ShrinkLastBlock(kBufferSize - 2 * kAlignment - 1);
+ EXPECT_EQ(2 * kAlignment, allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(ptr, helper_.get()->InsertToken());
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
index c6d6561bc01..c247a713917 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
+++ b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
@@ -527,20 +527,20 @@ TEST_F(TransferBufferExpandContractTest, Shrink) {
EXPECT_EQ(0u, transfer_buffer_->GetFreeSize());
// Shrink once.
- const unsigned int shrink_size1 = 80;
+ const unsigned int shrink_size1 = 64;
EXPECT_LT(shrink_size1, alloc_size);
- transfer_buffer_->ShrinkLastBlock(shrink_size1);
+ transfer_buffer_->ShrinkLastBlock(shrink_size1 - kAlignment + 1);
EXPECT_EQ(alloc_size - shrink_size1, transfer_buffer_->GetFreeSize());
// Shrink again.
- const unsigned int shrink_size2 = 30;
+ const unsigned int shrink_size2 = 32;
EXPECT_LT(shrink_size2, shrink_size1);
transfer_buffer_->ShrinkLastBlock(shrink_size2);
EXPECT_EQ(alloc_size - shrink_size2, transfer_buffer_->GetFreeSize());
- // Shrink to zero (minimum size is 1).
+ // Shrink to zero (minimum size is kAlignment).
transfer_buffer_->ShrinkLastBlock(0);
- EXPECT_EQ(alloc_size - 1, transfer_buffer_->GetFreeSize());
+ EXPECT_EQ(alloc_size - kAlignment, transfer_buffer_->GetFreeSize());
transfer_buffer_->FreePendingToken(ptr, 1);
}
diff --git a/chromium/gpu/command_buffer/common/BUILD.gn b/chromium/gpu/command_buffer/common/BUILD.gn
index eec7989148e..7b11ef44955 100644
--- a/chromium/gpu/command_buffer/common/BUILD.gn
+++ b/chromium/gpu/command_buffer/common/BUILD.gn
@@ -101,6 +101,13 @@ source_set("gles2_sources") {
"gles2_cmd_format_autogen.h",
"gles2_cmd_ids.h",
"gles2_cmd_ids_autogen.h",
+
+ # TODO(backer): Separate into distinct raster target.
+ "raster_cmd_format.cc",
+ "raster_cmd_format.h",
+ "raster_cmd_format_autogen.h",
+ "raster_cmd_ids.h",
+ "raster_cmd_ids_autogen.h",
]
configs += [ "//gpu:gpu_gles2_implementation" ]
diff --git a/chromium/gpu/command_buffer/common/capabilities.h b/chromium/gpu/command_buffer/common/capabilities.h
index a27f69e2cce..95add62e2a2 100644
--- a/chromium/gpu/command_buffer/common/capabilities.h
+++ b/chromium/gpu/command_buffer/common/capabilities.h
@@ -147,6 +147,8 @@ struct GPU_EXPORT Capabilities {
bool image_ycbcr_422 = false;
bool image_ycbcr_420v = false;
bool image_ycbcr_420v_disabled_for_video_frames = false;
+ bool image_xr30 = false;
+ bool image_xb30 = false;
bool render_buffer_format_bgra8888 = false;
bool occlusion_query = false;
bool occlusion_query_boolean = false;
@@ -182,6 +184,8 @@ struct GPU_EXPORT Capabilities {
bool chromium_gpu_fence = false;
+ bool unpremultiply_and_dither_copy = false;
+
int major_version = 2;
int minor_version = 0;
diff --git a/chromium/gpu/command_buffer/common/gl2_types.h b/chromium/gpu/command_buffer/common/gl2_types.h
index b8dca84e12e..3ee532cd5aa 100644
--- a/chromium/gpu/command_buffer/common/gl2_types.h
+++ b/chromium/gpu/command_buffer/common/gl2_types.h
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef GPU_COMMAND_BUFFER_COMMON_GL2_TYPES_H_
+#define GPU_COMMAND_BUFFER_COMMON_GL2_TYPES_H_
+
#include <KHR/khrplatform.h>
// GL types are forward declared to avoid including the GL headers. The problem
@@ -28,3 +31,5 @@ typedef khronos_ssize_t GLsizeiptr;
typedef struct __GLsync* GLsync;
typedef int64_t GLint64;
typedef uint64_t GLuint64;
+
+#endif // GPU_COMMAND_BUFFER_COMMON_GL2_TYPES_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index 17fb5ca0002..7997790a9f1 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -13340,6 +13340,76 @@ static_assert(offsetof(WaitSyncTokenCHROMIUM, release_count_0) == 16,
static_assert(offsetof(WaitSyncTokenCHROMIUM, release_count_1) == 20,
"offset of WaitSyncTokenCHROMIUM release_count_1 should be 20");
+struct UnpremultiplyAndDitherCopyCHROMIUM {
+ typedef UnpremultiplyAndDitherCopyCHROMIUM ValueType;
+ static const CommandId kCmdId = kUnpremultiplyAndDitherCopyCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _source_id,
+ GLuint _dest_id,
+ GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height) {
+ SetHeader();
+ source_id = _source_id;
+ dest_id = _dest_id;
+ x = _x;
+ y = _y;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd,
+ GLuint _source_id,
+ GLuint _dest_id,
+ GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height) {
+ static_cast<ValueType*>(cmd)->Init(_source_id, _dest_id, _x, _y, _width,
+ _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t source_id;
+ uint32_t dest_id;
+ int32_t x;
+ int32_t y;
+ int32_t width;
+ int32_t height;
+};
+
+static_assert(sizeof(UnpremultiplyAndDitherCopyCHROMIUM) == 28,
+ "size of UnpremultiplyAndDitherCopyCHROMIUM should be 28");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, header) == 0,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM header should be 0");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, source_id) == 4,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM source_id should be 4");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, dest_id) == 8,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM dest_id should be 8");
+static_assert(offsetof(UnpremultiplyAndDitherCopyCHROMIUM, x) == 12,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM x should be 12");
+static_assert(offsetof(UnpremultiplyAndDitherCopyCHROMIUM, y) == 16,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM y should be 16");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, width) == 20,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM width should be 20");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, height) == 24,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM height should be 24");
+
struct DrawBuffersEXTImmediate {
typedef DrawBuffersEXTImmediate ValueType;
static const CommandId kCmdId = kDrawBuffersEXTImmediate;
@@ -16006,14 +16076,16 @@ struct BeginRasterCHROMIUM {
GLuint _msaa_sample_count,
GLboolean _can_use_lcd_text,
GLboolean _use_distance_field_text,
- GLint _pixel_config) {
+ GLint _color_type,
+ GLuint _color_space_transfer_cache_id) {
SetHeader();
texture_id = _texture_id;
sk_color = _sk_color;
msaa_sample_count = _msaa_sample_count;
can_use_lcd_text = _can_use_lcd_text;
use_distance_field_text = _use_distance_field_text;
- pixel_config = _pixel_config;
+ color_type = _color_type;
+ color_space_transfer_cache_id = _color_space_transfer_cache_id;
}
void* Set(void* cmd,
@@ -16022,10 +16094,11 @@ struct BeginRasterCHROMIUM {
GLuint _msaa_sample_count,
GLboolean _can_use_lcd_text,
GLboolean _use_distance_field_text,
- GLint _pixel_config) {
- static_cast<ValueType*>(cmd)->Init(_texture_id, _sk_color,
- _msaa_sample_count, _can_use_lcd_text,
- _use_distance_field_text, _pixel_config);
+ GLint _color_type,
+ GLuint _color_space_transfer_cache_id) {
+ static_cast<ValueType*>(cmd)->Init(
+ _texture_id, _sk_color, _msaa_sample_count, _can_use_lcd_text,
+ _use_distance_field_text, _color_type, _color_space_transfer_cache_id);
return NextCmdAddress<ValueType>(cmd);
}
@@ -16035,11 +16108,12 @@ struct BeginRasterCHROMIUM {
uint32_t msaa_sample_count;
uint32_t can_use_lcd_text;
uint32_t use_distance_field_text;
- int32_t pixel_config;
+ int32_t color_type;
+ uint32_t color_space_transfer_cache_id;
};
-static_assert(sizeof(BeginRasterCHROMIUM) == 28,
- "size of BeginRasterCHROMIUM should be 28");
+static_assert(sizeof(BeginRasterCHROMIUM) == 32,
+ "size of BeginRasterCHROMIUM should be 32");
static_assert(offsetof(BeginRasterCHROMIUM, header) == 0,
"offset of BeginRasterCHROMIUM header should be 0");
static_assert(offsetof(BeginRasterCHROMIUM, texture_id) == 4,
@@ -16053,8 +16127,11 @@ static_assert(offsetof(BeginRasterCHROMIUM, can_use_lcd_text) == 16,
static_assert(
offsetof(BeginRasterCHROMIUM, use_distance_field_text) == 20,
"offset of BeginRasterCHROMIUM use_distance_field_text should be 20");
-static_assert(offsetof(BeginRasterCHROMIUM, pixel_config) == 24,
- "offset of BeginRasterCHROMIUM pixel_config should be 24");
+static_assert(offsetof(BeginRasterCHROMIUM, color_type) == 24,
+ "offset of BeginRasterCHROMIUM color_type should be 24");
+static_assert(
+ offsetof(BeginRasterCHROMIUM, color_space_transfer_cache_id) == 28,
+ "offset of BeginRasterCHROMIUM color_space_transfer_cache_id should be 28");
struct RasterCHROMIUM {
typedef RasterCHROMIUM ValueType;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 4f4085d5cd2..5dd2ab16d92 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -4474,6 +4474,26 @@ TEST_F(GLES2FormatTest, WaitSyncTokenCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, UnpremultiplyAndDitherCopyCHROMIUM) {
+ cmds::UnpremultiplyAndDitherCopyCHROMIUM& cmd =
+ *GetBufferAs<cmds::UnpremultiplyAndDitherCopyCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
+ static_cast<GLint>(13), static_cast<GLint>(14),
+ static_cast<GLsizei>(15), static_cast<GLsizei>(16));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::UnpremultiplyAndDitherCopyCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.source_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.dest_id);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.y);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, DrawBuffersEXTImmediate) {
const int kSomeBaseValueToTestWith = 51;
static GLenum data[] = {
@@ -5317,7 +5337,8 @@ TEST_F(GLES2FormatTest, BeginRasterCHROMIUM) {
void* next_cmd =
cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
static_cast<GLuint>(13), static_cast<GLboolean>(14),
- static_cast<GLboolean>(15), static_cast<GLint>(16));
+ static_cast<GLboolean>(15), static_cast<GLint>(16),
+ static_cast<GLuint>(17));
EXPECT_EQ(static_cast<uint32_t>(cmds::BeginRasterCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
@@ -5326,7 +5347,8 @@ TEST_F(GLES2FormatTest, BeginRasterCHROMIUM) {
EXPECT_EQ(static_cast<GLuint>(13), cmd.msaa_sample_count);
EXPECT_EQ(static_cast<GLboolean>(14), cmd.can_use_lcd_text);
EXPECT_EQ(static_cast<GLboolean>(15), cmd.use_distance_field_text);
- EXPECT_EQ(static_cast<GLint>(16), cmd.pixel_config);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.color_type);
+ EXPECT_EQ(static_cast<GLuint>(17), cmd.color_space_transfer_cache_id);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index dcc7e2afb99..f843757d76d 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -287,67 +287,68 @@
OP(LoseContextCHROMIUM) /* 528 */ \
OP(InsertFenceSyncCHROMIUM) /* 529 */ \
OP(WaitSyncTokenCHROMIUM) /* 530 */ \
- OP(DrawBuffersEXTImmediate) /* 531 */ \
- OP(DiscardBackbufferCHROMIUM) /* 532 */ \
- OP(ScheduleOverlayPlaneCHROMIUM) /* 533 */ \
- OP(ScheduleCALayerSharedStateCHROMIUM) /* 534 */ \
- OP(ScheduleCALayerCHROMIUM) /* 535 */ \
- OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 536 */ \
- OP(CommitOverlayPlanesCHROMIUM) /* 537 */ \
- OP(SwapInterval) /* 538 */ \
- OP(FlushDriverCachesCHROMIUM) /* 539 */ \
- OP(ScheduleDCLayerSharedStateCHROMIUM) /* 540 */ \
- OP(ScheduleDCLayerCHROMIUM) /* 541 */ \
- OP(MatrixLoadfCHROMIUMImmediate) /* 542 */ \
- OP(MatrixLoadIdentityCHROMIUM) /* 543 */ \
- OP(GenPathsCHROMIUM) /* 544 */ \
- OP(DeletePathsCHROMIUM) /* 545 */ \
- OP(IsPathCHROMIUM) /* 546 */ \
- OP(PathCommandsCHROMIUM) /* 547 */ \
- OP(PathParameterfCHROMIUM) /* 548 */ \
- OP(PathParameteriCHROMIUM) /* 549 */ \
- OP(PathStencilFuncCHROMIUM) /* 550 */ \
- OP(StencilFillPathCHROMIUM) /* 551 */ \
- OP(StencilStrokePathCHROMIUM) /* 552 */ \
- OP(CoverFillPathCHROMIUM) /* 553 */ \
- OP(CoverStrokePathCHROMIUM) /* 554 */ \
- OP(StencilThenCoverFillPathCHROMIUM) /* 555 */ \
- OP(StencilThenCoverStrokePathCHROMIUM) /* 556 */ \
- OP(StencilFillPathInstancedCHROMIUM) /* 557 */ \
- OP(StencilStrokePathInstancedCHROMIUM) /* 558 */ \
- OP(CoverFillPathInstancedCHROMIUM) /* 559 */ \
- OP(CoverStrokePathInstancedCHROMIUM) /* 560 */ \
- OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 561 */ \
- OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 562 */ \
- OP(BindFragmentInputLocationCHROMIUMBucket) /* 563 */ \
- OP(ProgramPathFragmentInputGenCHROMIUM) /* 564 */ \
- OP(GetBufferSubDataAsyncCHROMIUM) /* 565 */ \
- OP(CoverageModulationCHROMIUM) /* 566 */ \
- OP(BlendBarrierKHR) /* 567 */ \
- OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 568 */ \
- OP(BindFragDataLocationIndexedEXTBucket) /* 569 */ \
- OP(BindFragDataLocationEXTBucket) /* 570 */ \
- OP(GetFragDataIndexEXT) /* 571 */ \
- OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 572 */ \
- OP(OverlayPromotionHintCHROMIUM) /* 573 */ \
- OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 574 */ \
- OP(SetDrawRectangleCHROMIUM) /* 575 */ \
- OP(SetEnableDCLayersCHROMIUM) /* 576 */ \
- OP(InitializeDiscardableTextureCHROMIUM) /* 577 */ \
- OP(UnlockDiscardableTextureCHROMIUM) /* 578 */ \
- OP(LockDiscardableTextureCHROMIUM) /* 579 */ \
- OP(BeginRasterCHROMIUM) /* 580 */ \
- OP(RasterCHROMIUM) /* 581 */ \
- OP(EndRasterCHROMIUM) /* 582 */ \
- OP(CreateTransferCacheEntryINTERNAL) /* 583 */ \
- OP(DeleteTransferCacheEntryINTERNAL) /* 584 */ \
- OP(UnlockTransferCacheEntryINTERNAL) /* 585 */ \
- OP(TexStorage2DImageCHROMIUM) /* 586 */ \
- OP(SetColorSpaceMetadataCHROMIUM) /* 587 */ \
- OP(WindowRectanglesEXTImmediate) /* 588 */ \
- OP(CreateGpuFenceINTERNAL) /* 589 */ \
- OP(WaitGpuFenceCHROMIUM) /* 590 */ \
- OP(DestroyGpuFenceCHROMIUM) /* 591 */
+ OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 531 */ \
+ OP(DrawBuffersEXTImmediate) /* 532 */ \
+ OP(DiscardBackbufferCHROMIUM) /* 533 */ \
+ OP(ScheduleOverlayPlaneCHROMIUM) /* 534 */ \
+ OP(ScheduleCALayerSharedStateCHROMIUM) /* 535 */ \
+ OP(ScheduleCALayerCHROMIUM) /* 536 */ \
+ OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 537 */ \
+ OP(CommitOverlayPlanesCHROMIUM) /* 538 */ \
+ OP(SwapInterval) /* 539 */ \
+ OP(FlushDriverCachesCHROMIUM) /* 540 */ \
+ OP(ScheduleDCLayerSharedStateCHROMIUM) /* 541 */ \
+ OP(ScheduleDCLayerCHROMIUM) /* 542 */ \
+ OP(MatrixLoadfCHROMIUMImmediate) /* 543 */ \
+ OP(MatrixLoadIdentityCHROMIUM) /* 544 */ \
+ OP(GenPathsCHROMIUM) /* 545 */ \
+ OP(DeletePathsCHROMIUM) /* 546 */ \
+ OP(IsPathCHROMIUM) /* 547 */ \
+ OP(PathCommandsCHROMIUM) /* 548 */ \
+ OP(PathParameterfCHROMIUM) /* 549 */ \
+ OP(PathParameteriCHROMIUM) /* 550 */ \
+ OP(PathStencilFuncCHROMIUM) /* 551 */ \
+ OP(StencilFillPathCHROMIUM) /* 552 */ \
+ OP(StencilStrokePathCHROMIUM) /* 553 */ \
+ OP(CoverFillPathCHROMIUM) /* 554 */ \
+ OP(CoverStrokePathCHROMIUM) /* 555 */ \
+ OP(StencilThenCoverFillPathCHROMIUM) /* 556 */ \
+ OP(StencilThenCoverStrokePathCHROMIUM) /* 557 */ \
+ OP(StencilFillPathInstancedCHROMIUM) /* 558 */ \
+ OP(StencilStrokePathInstancedCHROMIUM) /* 559 */ \
+ OP(CoverFillPathInstancedCHROMIUM) /* 560 */ \
+ OP(CoverStrokePathInstancedCHROMIUM) /* 561 */ \
+ OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 562 */ \
+ OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 563 */ \
+ OP(BindFragmentInputLocationCHROMIUMBucket) /* 564 */ \
+ OP(ProgramPathFragmentInputGenCHROMIUM) /* 565 */ \
+ OP(GetBufferSubDataAsyncCHROMIUM) /* 566 */ \
+ OP(CoverageModulationCHROMIUM) /* 567 */ \
+ OP(BlendBarrierKHR) /* 568 */ \
+ OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 569 */ \
+ OP(BindFragDataLocationIndexedEXTBucket) /* 570 */ \
+ OP(BindFragDataLocationEXTBucket) /* 571 */ \
+ OP(GetFragDataIndexEXT) /* 572 */ \
+ OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 573 */ \
+ OP(OverlayPromotionHintCHROMIUM) /* 574 */ \
+ OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 575 */ \
+ OP(SetDrawRectangleCHROMIUM) /* 576 */ \
+ OP(SetEnableDCLayersCHROMIUM) /* 577 */ \
+ OP(InitializeDiscardableTextureCHROMIUM) /* 578 */ \
+ OP(UnlockDiscardableTextureCHROMIUM) /* 579 */ \
+ OP(LockDiscardableTextureCHROMIUM) /* 580 */ \
+ OP(BeginRasterCHROMIUM) /* 581 */ \
+ OP(RasterCHROMIUM) /* 582 */ \
+ OP(EndRasterCHROMIUM) /* 583 */ \
+ OP(CreateTransferCacheEntryINTERNAL) /* 584 */ \
+ OP(DeleteTransferCacheEntryINTERNAL) /* 585 */ \
+ OP(UnlockTransferCacheEntryINTERNAL) /* 586 */ \
+ OP(TexStorage2DImageCHROMIUM) /* 587 */ \
+ OP(SetColorSpaceMetadataCHROMIUM) /* 588 */ \
+ OP(WindowRectanglesEXTImmediate) /* 589 */ \
+ OP(CreateGpuFenceINTERNAL) /* 590 */ \
+ OP(WaitGpuFenceCHROMIUM) /* 591 */ \
+ OP(DestroyGpuFenceCHROMIUM) /* 592 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index 6c807a7657a..41dd0691589 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -1383,10 +1383,13 @@ void GLES2Util::GetColorFormatComponentSizes(
switch (internal_format) {
case GL_ALPHA8_EXT:
*a = 8;
+ break;
case GL_ALPHA16F_EXT:
*a = 16;
+ break;
case GL_ALPHA32F_EXT:
*a = 32;
+ break;
case GL_RGB8_OES:
case GL_SRGB8:
case GL_RGB8_SNORM:
diff --git a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
index d86a011fc1c..918d521135c 100644
--- a/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/command_buffer/common/gpu_memory_buffer_support.cc
@@ -74,8 +74,10 @@ bool IsImageFormatCompatibleWithGpuMemoryBufferFormat(
return format == BufferFormatForInternalFormat(internalformat);
case gfx::BufferFormat::BGR_565:
case gfx::BufferFormat::RGBX_8888:
- case gfx::BufferFormat::BGRX_1010102:
return internalformat == GL_RGB;
+ case gfx::BufferFormat::BGRX_1010102:
+ case gfx::BufferFormat::RGBX_1010102:
+ return internalformat == GL_RGB10_A2_EXT;
case gfx::BufferFormat::RGBA_4444:
case gfx::BufferFormat::RGBA_F16:
return internalformat == GL_RGBA;
@@ -108,11 +110,14 @@ bool IsImageFromGpuMemoryBufferFormatSupported(
return capabilities.texture_rg;
case gfx::BufferFormat::UYVY_422:
return capabilities.image_ycbcr_422;
+ case gfx::BufferFormat::BGRX_1010102:
+ return capabilities.image_xr30;
+ case gfx::BufferFormat::RGBX_1010102:
+ return capabilities.image_xb30;
case gfx::BufferFormat::BGR_565:
case gfx::BufferFormat::RGBA_4444:
case gfx::BufferFormat::RGBA_8888:
case gfx::BufferFormat::RGBX_8888:
- case gfx::BufferFormat::BGRX_1010102:
case gfx::BufferFormat::YVU_420:
return true;
case gfx::BufferFormat::RGBA_F16:
@@ -146,6 +151,7 @@ bool IsImageSizeValidForGpuMemoryBufferFormat(const gfx::Size& size,
case gfx::BufferFormat::BGRA_8888:
case gfx::BufferFormat::BGRX_8888:
case gfx::BufferFormat::BGRX_1010102:
+ case gfx::BufferFormat::RGBX_1010102:
case gfx::BufferFormat::RGBA_F16:
return true;
case gfx::BufferFormat::YVU_420:
diff --git a/chromium/gpu/command_buffer/common/presentation_feedback.h b/chromium/gpu/command_buffer/common/presentation_feedback.h
index cc8585b6b58..5c6413963f8 100644
--- a/chromium/gpu/command_buffer/common/presentation_feedback.h
+++ b/chromium/gpu/command_buffer/common/presentation_feedback.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef UI_GFX_PRESENTAION_FEEDBACK_H_
-#define UI_GFX_PRESENTAION_FEEDBACK_H_
+#ifndef GPU_COMMAND_BUFFER_PRESENTATION_FEEDBACK_H_
+#define GPU_COMMAND_BUFFER_PRESENTATION_FEEDBACK_H_
#include "base/time/time.h"
@@ -48,4 +48,4 @@ struct PresentationFeedback {
} // namespace gfx
-#endif // UI_GFX_PRESENTAION_FEEDBACK_H_
+#endif // GPU_COMMAND_BUFFER_PRESENTATION_FEEDBACK_H_
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format.cc b/chromium/gpu/command_buffer/common/raster_cmd_format.cc
new file mode 100644
index 00000000000..1a21bed58e5
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format.cc
@@ -0,0 +1,33 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the binary format definition of the command buffer and
+// command buffer commands.
+
+// We explicitly do NOT include raster_cmd_format.h here because client side
+// and service side have different requirements.
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+
+namespace gpu {
+namespace raster {
+
+#include <stddef.h>
+
+#include "gpu/command_buffer/common/raster_cmd_ids_autogen.h"
+
+const char* GetCommandName(CommandId id) {
+ static const char* const names[] = {
+#define RASTER_CMD_OP(name) "k" #name,
+
+ RASTER_COMMAND_LIST(RASTER_CMD_OP)
+
+#undef RASTER_CMD_OP
+ };
+
+ size_t index = static_cast<size_t>(id) - kFirstRasterCommand;
+ return (index < arraysize(names)) ? names[index] : "*unknown-command*";
+}
+
+} // namespace raster
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format.h b/chromium/gpu/command_buffer/common/raster_cmd_format.h
new file mode 100644
index 00000000000..b8a7e98c22c
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format.h
@@ -0,0 +1,89 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines the raster command buffer commands.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_H_
+#define GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/common/bitfield_helpers.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/gl2_types.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/raster_cmd_ids.h"
+
+namespace gpu {
+namespace raster {
+
+// Command buffer is GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT byte aligned.
+#pragma pack(push, 4)
+static_assert(GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT == 4,
+ "pragma pack alignment must be equal to "
+ "GPU_COMMAND_BUFFER_ENTRY_ALIGNMENT");
+
+// Used for some glGetXXX commands that return a result through a pointer. We
+// need to know if the command succeeded or not and the size of the result. If
+// the command failed its result size will 0.
+template <typename T>
+struct SizedResult {
+ typedef T Type;
+
+ T* GetData() { return static_cast<T*>(static_cast<void*>(&data)); }
+
+ // Returns the total size in bytes of the SizedResult for a given number of
+ // results including the size field.
+ static size_t ComputeSize(size_t num_results) {
+ return sizeof(T) * num_results + sizeof(uint32_t); // NOLINT
+ }
+
+ // Returns the maximum number of results for a given buffer size.
+ static uint32_t ComputeMaxResults(size_t size_of_buffer) {
+ return (size_of_buffer >= sizeof(uint32_t))
+ ? ((size_of_buffer - sizeof(uint32_t)) / sizeof(T))
+ : 0; // NOLINT
+ }
+
+ // Set the size for a given number of results.
+ void SetNumResults(size_t num_results) {
+ size = sizeof(T) * num_results; // NOLINT
+ }
+
+ // Get the number of elements in the result
+ int32_t GetNumResults() const {
+ return size / sizeof(T); // NOLINT
+ }
+
+ // Copy the result.
+ void CopyResult(void* dst) const { memcpy(dst, &data, size); }
+
+ uint32_t size; // in bytes.
+ int32_t data; // this is just here to get an offset.
+};
+
+static_assert(sizeof(SizedResult<int8_t>) == 8,
+ "size of SizedResult<int8_t> should be 8");
+static_assert(offsetof(SizedResult<int8_t>, size) == 0,
+ "offset of SizedResult<int8_t>.size should be 0");
+static_assert(offsetof(SizedResult<int8_t>, data) == 4,
+ "offset of SizedResult<int8_t>.data should be 4");
+
+namespace cmds {
+
+#include "gpu/command_buffer/common/raster_cmd_format_autogen.h"
+
+#pragma pack(pop)
+
+} // namespace cmd
+} // namespace raster
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_H_
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
new file mode 100644
index 00000000000..27b2434e2c0
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_autogen.h
@@ -0,0 +1,1158 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_AUTOGEN_H_
+
+#define GL_SCANOUT_CHROMIUM 0x6000
+
+struct BindTexture {
+ typedef BindTexture ValueType;
+ static const CommandId kCmdId = kBindTexture;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(2);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLuint _texture) {
+ SetHeader();
+ target = _target;
+ texture = _texture;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLuint _texture) {
+ static_cast<ValueType*>(cmd)->Init(_target, _texture);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t texture;
+};
+
+static_assert(sizeof(BindTexture) == 12, "size of BindTexture should be 12");
+static_assert(offsetof(BindTexture, header) == 0,
+ "offset of BindTexture header should be 0");
+static_assert(offsetof(BindTexture, target) == 4,
+ "offset of BindTexture target should be 4");
+static_assert(offsetof(BindTexture, texture) == 8,
+ "offset of BindTexture texture should be 8");
+
+struct DeleteTexturesImmediate {
+ typedef DeleteTexturesImmediate ValueType;
+ static const CommandId kCmdId = kDeleteTexturesImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei _n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * _n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei _n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(_n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei _n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(_n));
+ }
+
+ void Init(GLsizei _n, const GLuint* _textures) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _textures, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, const GLuint* _textures) {
+ static_cast<ValueType*>(cmd)->Init(_n, _textures);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+static_assert(sizeof(DeleteTexturesImmediate) == 8,
+ "size of DeleteTexturesImmediate should be 8");
+static_assert(offsetof(DeleteTexturesImmediate, header) == 0,
+ "offset of DeleteTexturesImmediate header should be 0");
+static_assert(offsetof(DeleteTexturesImmediate, n) == 4,
+ "offset of DeleteTexturesImmediate n should be 4");
+
+struct Finish {
+ typedef Finish ValueType;
+ static const CommandId kCmdId = kFinish;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+static_assert(sizeof(Finish) == 4, "size of Finish should be 4");
+static_assert(offsetof(Finish, header) == 0,
+ "offset of Finish header should be 0");
+
+struct Flush {
+ typedef Flush ValueType;
+ static const CommandId kCmdId = kFlush;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+static_assert(sizeof(Flush) == 4, "size of Flush should be 4");
+static_assert(offsetof(Flush, header) == 0,
+ "offset of Flush header should be 0");
+
+struct GenTexturesImmediate {
+ typedef GenTexturesImmediate ValueType;
+ static const CommandId kCmdId = kGenTexturesImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei _n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * _n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei _n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(_n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei _n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(_n));
+ }
+
+ void Init(GLsizei _n, GLuint* _textures) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _textures, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, GLuint* _textures) {
+ static_cast<ValueType*>(cmd)->Init(_n, _textures);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+static_assert(sizeof(GenTexturesImmediate) == 8,
+ "size of GenTexturesImmediate should be 8");
+static_assert(offsetof(GenTexturesImmediate, header) == 0,
+ "offset of GenTexturesImmediate header should be 0");
+static_assert(offsetof(GenTexturesImmediate, n) == 4,
+ "offset of GenTexturesImmediate n should be 4");
+
+struct GetError {
+ typedef GetError ValueType;
+ static const CommandId kCmdId = kGetError;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef GLenum Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(uint32_t _result_shm_id, uint32_t _result_shm_offset) {
+ SetHeader();
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd, uint32_t _result_shm_id, uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_result_shm_id, _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+static_assert(sizeof(GetError) == 12, "size of GetError should be 12");
+static_assert(offsetof(GetError, header) == 0,
+ "offset of GetError header should be 0");
+static_assert(offsetof(GetError, result_shm_id) == 4,
+ "offset of GetError result_shm_id should be 4");
+static_assert(offsetof(GetError, result_shm_offset) == 8,
+ "offset of GetError result_shm_offset should be 8");
+
+struct GetIntegerv {
+ typedef GetIntegerv ValueType;
+ static const CommandId kCmdId = kGetIntegerv;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLint> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ SetHeader();
+ pname = _pname;
+ params_shm_id = _params_shm_id;
+ params_shm_offset = _params_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _pname,
+ uint32_t _params_shm_id,
+ uint32_t _params_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_pname, _params_shm_id,
+ _params_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t pname;
+ uint32_t params_shm_id;
+ uint32_t params_shm_offset;
+};
+
+static_assert(sizeof(GetIntegerv) == 16, "size of GetIntegerv should be 16");
+static_assert(offsetof(GetIntegerv, header) == 0,
+ "offset of GetIntegerv header should be 0");
+static_assert(offsetof(GetIntegerv, pname) == 4,
+ "offset of GetIntegerv pname should be 4");
+static_assert(offsetof(GetIntegerv, params_shm_id) == 8,
+ "offset of GetIntegerv params_shm_id should be 8");
+static_assert(offsetof(GetIntegerv, params_shm_offset) == 12,
+ "offset of GetIntegerv params_shm_offset should be 12");
+
+struct TexParameteri {
+ typedef TexParameteri ValueType;
+ static const CommandId kCmdId = kTexParameteri;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLenum _pname, GLint _param) {
+ SetHeader();
+ target = _target;
+ pname = _pname;
+ param = _param;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLenum _pname, GLint _param) {
+ static_cast<ValueType*>(cmd)->Init(_target, _pname, _param);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t pname;
+ int32_t param;
+};
+
+static_assert(sizeof(TexParameteri) == 16,
+ "size of TexParameteri should be 16");
+static_assert(offsetof(TexParameteri, header) == 0,
+ "offset of TexParameteri header should be 0");
+static_assert(offsetof(TexParameteri, target) == 4,
+ "offset of TexParameteri target should be 4");
+static_assert(offsetof(TexParameteri, pname) == 8,
+ "offset of TexParameteri pname should be 8");
+static_assert(offsetof(TexParameteri, param) == 12,
+ "offset of TexParameteri param should be 12");
+
+struct GenQueriesEXTImmediate {
+ typedef GenQueriesEXTImmediate ValueType;
+ static const CommandId kCmdId = kGenQueriesEXTImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei _n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * _n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei _n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(_n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei _n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(_n));
+ }
+
+ void Init(GLsizei _n, GLuint* _queries) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _queries, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, GLuint* _queries) {
+ static_cast<ValueType*>(cmd)->Init(_n, _queries);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+static_assert(sizeof(GenQueriesEXTImmediate) == 8,
+ "size of GenQueriesEXTImmediate should be 8");
+static_assert(offsetof(GenQueriesEXTImmediate, header) == 0,
+ "offset of GenQueriesEXTImmediate header should be 0");
+static_assert(offsetof(GenQueriesEXTImmediate, n) == 4,
+ "offset of GenQueriesEXTImmediate n should be 4");
+
+struct DeleteQueriesEXTImmediate {
+ typedef DeleteQueriesEXTImmediate ValueType;
+ static const CommandId kCmdId = kDeleteQueriesEXTImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeDataSize(GLsizei _n) {
+ return static_cast<uint32_t>(sizeof(GLuint) * _n); // NOLINT
+ }
+
+ static uint32_t ComputeSize(GLsizei _n) {
+ return static_cast<uint32_t>(sizeof(ValueType) +
+ ComputeDataSize(_n)); // NOLINT
+ }
+
+ void SetHeader(GLsizei _n) {
+ header.SetCmdByTotalSize<ValueType>(ComputeSize(_n));
+ }
+
+ void Init(GLsizei _n, const GLuint* _queries) {
+ SetHeader(_n);
+ n = _n;
+ memcpy(ImmediateDataAddress(this), _queries, ComputeDataSize(_n));
+ }
+
+ void* Set(void* cmd, GLsizei _n, const GLuint* _queries) {
+ static_cast<ValueType*>(cmd)->Init(_n, _queries);
+ const uint32_t size = ComputeSize(_n);
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ int32_t n;
+};
+
+static_assert(sizeof(DeleteQueriesEXTImmediate) == 8,
+ "size of DeleteQueriesEXTImmediate should be 8");
+static_assert(offsetof(DeleteQueriesEXTImmediate, header) == 0,
+ "offset of DeleteQueriesEXTImmediate header should be 0");
+static_assert(offsetof(DeleteQueriesEXTImmediate, n) == 4,
+ "offset of DeleteQueriesEXTImmediate n should be 4");
+
+struct BeginQueryEXT {
+ typedef BeginQueryEXT ValueType;
+ static const CommandId kCmdId = kBeginQueryEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLuint _id,
+ uint32_t _sync_data_shm_id,
+ uint32_t _sync_data_shm_offset) {
+ SetHeader();
+ target = _target;
+ id = _id;
+ sync_data_shm_id = _sync_data_shm_id;
+ sync_data_shm_offset = _sync_data_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLuint _id,
+ uint32_t _sync_data_shm_id,
+ uint32_t _sync_data_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_target, _id, _sync_data_shm_id,
+ _sync_data_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t id;
+ uint32_t sync_data_shm_id;
+ uint32_t sync_data_shm_offset;
+};
+
+static_assert(sizeof(BeginQueryEXT) == 20,
+ "size of BeginQueryEXT should be 20");
+static_assert(offsetof(BeginQueryEXT, header) == 0,
+ "offset of BeginQueryEXT header should be 0");
+static_assert(offsetof(BeginQueryEXT, target) == 4,
+ "offset of BeginQueryEXT target should be 4");
+static_assert(offsetof(BeginQueryEXT, id) == 8,
+ "offset of BeginQueryEXT id should be 8");
+static_assert(offsetof(BeginQueryEXT, sync_data_shm_id) == 12,
+ "offset of BeginQueryEXT sync_data_shm_id should be 12");
+static_assert(offsetof(BeginQueryEXT, sync_data_shm_offset) == 16,
+ "offset of BeginQueryEXT sync_data_shm_offset should be 16");
+
+struct EndQueryEXT {
+ typedef EndQueryEXT ValueType;
+ static const CommandId kCmdId = kEndQueryEXT;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLuint _submit_count) {
+ SetHeader();
+ target = _target;
+ submit_count = _submit_count;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLuint _submit_count) {
+ static_cast<ValueType*>(cmd)->Init(_target, _submit_count);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t submit_count;
+};
+
+static_assert(sizeof(EndQueryEXT) == 12, "size of EndQueryEXT should be 12");
+static_assert(offsetof(EndQueryEXT, header) == 0,
+ "offset of EndQueryEXT header should be 0");
+static_assert(offsetof(EndQueryEXT, target) == 4,
+ "offset of EndQueryEXT target should be 4");
+static_assert(offsetof(EndQueryEXT, submit_count) == 8,
+ "offset of EndQueryEXT submit_count should be 8");
+
+struct CompressedCopyTextureCHROMIUM {
+ typedef CompressedCopyTextureCHROMIUM ValueType;
+ static const CommandId kCmdId = kCompressedCopyTextureCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _source_id, GLuint _dest_id) {
+ SetHeader();
+ source_id = _source_id;
+ dest_id = _dest_id;
+ }
+
+ void* Set(void* cmd, GLuint _source_id, GLuint _dest_id) {
+ static_cast<ValueType*>(cmd)->Init(_source_id, _dest_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t source_id;
+ uint32_t dest_id;
+};
+
+static_assert(sizeof(CompressedCopyTextureCHROMIUM) == 12,
+ "size of CompressedCopyTextureCHROMIUM should be 12");
+static_assert(offsetof(CompressedCopyTextureCHROMIUM, header) == 0,
+ "offset of CompressedCopyTextureCHROMIUM header should be 0");
+static_assert(offsetof(CompressedCopyTextureCHROMIUM, source_id) == 4,
+ "offset of CompressedCopyTextureCHROMIUM source_id should be 4");
+static_assert(offsetof(CompressedCopyTextureCHROMIUM, dest_id) == 8,
+ "offset of CompressedCopyTextureCHROMIUM dest_id should be 8");
+
+struct LoseContextCHROMIUM {
+ typedef LoseContextCHROMIUM ValueType;
+ static const CommandId kCmdId = kLoseContextCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _current, GLenum _other) {
+ SetHeader();
+ current = _current;
+ other = _other;
+ }
+
+ void* Set(void* cmd, GLenum _current, GLenum _other) {
+ static_cast<ValueType*>(cmd)->Init(_current, _other);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t current;
+ uint32_t other;
+};
+
+static_assert(sizeof(LoseContextCHROMIUM) == 12,
+ "size of LoseContextCHROMIUM should be 12");
+static_assert(offsetof(LoseContextCHROMIUM, header) == 0,
+ "offset of LoseContextCHROMIUM header should be 0");
+static_assert(offsetof(LoseContextCHROMIUM, current) == 4,
+ "offset of LoseContextCHROMIUM current should be 4");
+static_assert(offsetof(LoseContextCHROMIUM, other) == 8,
+ "offset of LoseContextCHROMIUM other should be 8");
+
+struct InsertFenceSyncCHROMIUM {
+ typedef InsertFenceSyncCHROMIUM ValueType;
+ static const CommandId kCmdId = kInsertFenceSyncCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint64 _release_count) {
+ SetHeader();
+ gles2::GLES2Util::MapUint64ToTwoUint32(
+ static_cast<uint64_t>(_release_count), &release_count_0,
+ &release_count_1);
+ }
+
+ void* Set(void* cmd, GLuint64 _release_count) {
+ static_cast<ValueType*>(cmd)->Init(_release_count);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ GLuint64 release_count() const volatile {
+ return static_cast<GLuint64>(gles2::GLES2Util::MapTwoUint32ToUint64(
+ release_count_0, release_count_1));
+ }
+
+ gpu::CommandHeader header;
+ uint32_t release_count_0;
+ uint32_t release_count_1;
+};
+
+static_assert(sizeof(InsertFenceSyncCHROMIUM) == 12,
+ "size of InsertFenceSyncCHROMIUM should be 12");
+static_assert(offsetof(InsertFenceSyncCHROMIUM, header) == 0,
+ "offset of InsertFenceSyncCHROMIUM header should be 0");
+static_assert(offsetof(InsertFenceSyncCHROMIUM, release_count_0) == 4,
+ "offset of InsertFenceSyncCHROMIUM release_count_0 should be 4");
+static_assert(offsetof(InsertFenceSyncCHROMIUM, release_count_1) == 8,
+ "offset of InsertFenceSyncCHROMIUM release_count_1 should be 8");
+
+struct WaitSyncTokenCHROMIUM {
+ typedef WaitSyncTokenCHROMIUM ValueType;
+ static const CommandId kCmdId = kWaitSyncTokenCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLint _namespace_id,
+ GLuint64 _command_buffer_id,
+ GLuint64 _release_count) {
+ SetHeader();
+ namespace_id = _namespace_id;
+ gles2::GLES2Util::MapUint64ToTwoUint32(
+ static_cast<uint64_t>(_command_buffer_id), &command_buffer_id_0,
+ &command_buffer_id_1);
+ gles2::GLES2Util::MapUint64ToTwoUint32(
+ static_cast<uint64_t>(_release_count), &release_count_0,
+ &release_count_1);
+ }
+
+ void* Set(void* cmd,
+ GLint _namespace_id,
+ GLuint64 _command_buffer_id,
+ GLuint64 _release_count) {
+ static_cast<ValueType*>(cmd)->Init(_namespace_id, _command_buffer_id,
+ _release_count);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ GLuint64 command_buffer_id() const volatile {
+ return static_cast<GLuint64>(gles2::GLES2Util::MapTwoUint32ToUint64(
+ command_buffer_id_0, command_buffer_id_1));
+ }
+
+ GLuint64 release_count() const volatile {
+ return static_cast<GLuint64>(gles2::GLES2Util::MapTwoUint32ToUint64(
+ release_count_0, release_count_1));
+ }
+
+ gpu::CommandHeader header;
+ int32_t namespace_id;
+ uint32_t command_buffer_id_0;
+ uint32_t command_buffer_id_1;
+ uint32_t release_count_0;
+ uint32_t release_count_1;
+};
+
+static_assert(sizeof(WaitSyncTokenCHROMIUM) == 24,
+ "size of WaitSyncTokenCHROMIUM should be 24");
+static_assert(offsetof(WaitSyncTokenCHROMIUM, header) == 0,
+ "offset of WaitSyncTokenCHROMIUM header should be 0");
+static_assert(offsetof(WaitSyncTokenCHROMIUM, namespace_id) == 4,
+ "offset of WaitSyncTokenCHROMIUM namespace_id should be 4");
+static_assert(
+ offsetof(WaitSyncTokenCHROMIUM, command_buffer_id_0) == 8,
+ "offset of WaitSyncTokenCHROMIUM command_buffer_id_0 should be 8");
+static_assert(
+ offsetof(WaitSyncTokenCHROMIUM, command_buffer_id_1) == 12,
+ "offset of WaitSyncTokenCHROMIUM command_buffer_id_1 should be 12");
+static_assert(offsetof(WaitSyncTokenCHROMIUM, release_count_0) == 16,
+ "offset of WaitSyncTokenCHROMIUM release_count_0 should be 16");
+static_assert(offsetof(WaitSyncTokenCHROMIUM, release_count_1) == 20,
+ "offset of WaitSyncTokenCHROMIUM release_count_1 should be 20");
+
+struct UnpremultiplyAndDitherCopyCHROMIUM {
+ typedef UnpremultiplyAndDitherCopyCHROMIUM ValueType;
+ static const CommandId kCmdId = kUnpremultiplyAndDitherCopyCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _source_id,
+ GLuint _dest_id,
+ GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height) {
+ SetHeader();
+ source_id = _source_id;
+ dest_id = _dest_id;
+ x = _x;
+ y = _y;
+ width = _width;
+ height = _height;
+ }
+
+ void* Set(void* cmd,
+ GLuint _source_id,
+ GLuint _dest_id,
+ GLint _x,
+ GLint _y,
+ GLsizei _width,
+ GLsizei _height) {
+ static_cast<ValueType*>(cmd)->Init(_source_id, _dest_id, _x, _y, _width,
+ _height);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t source_id;
+ uint32_t dest_id;
+ int32_t x;
+ int32_t y;
+ int32_t width;
+ int32_t height;
+};
+
+static_assert(sizeof(UnpremultiplyAndDitherCopyCHROMIUM) == 28,
+ "size of UnpremultiplyAndDitherCopyCHROMIUM should be 28");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, header) == 0,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM header should be 0");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, source_id) == 4,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM source_id should be 4");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, dest_id) == 8,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM dest_id should be 8");
+static_assert(offsetof(UnpremultiplyAndDitherCopyCHROMIUM, x) == 12,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM x should be 12");
+static_assert(offsetof(UnpremultiplyAndDitherCopyCHROMIUM, y) == 16,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM y should be 16");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, width) == 20,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM width should be 20");
+static_assert(
+ offsetof(UnpremultiplyAndDitherCopyCHROMIUM, height) == 24,
+ "offset of UnpremultiplyAndDitherCopyCHROMIUM height should be 24");
+
+struct InitializeDiscardableTextureCHROMIUM {
+ typedef InitializeDiscardableTextureCHROMIUM ValueType;
+ static const CommandId kCmdId = kInitializeDiscardableTextureCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _texture_id, uint32_t _shm_id, uint32_t _shm_offset) {
+ SetHeader();
+ texture_id = _texture_id;
+ shm_id = _shm_id;
+ shm_offset = _shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLuint _texture_id,
+ uint32_t _shm_id,
+ uint32_t _shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_texture_id, _shm_id, _shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture_id;
+ uint32_t shm_id;
+ uint32_t shm_offset;
+};
+
+static_assert(sizeof(InitializeDiscardableTextureCHROMIUM) == 16,
+ "size of InitializeDiscardableTextureCHROMIUM should be 16");
+static_assert(
+ offsetof(InitializeDiscardableTextureCHROMIUM, header) == 0,
+ "offset of InitializeDiscardableTextureCHROMIUM header should be 0");
+static_assert(
+ offsetof(InitializeDiscardableTextureCHROMIUM, texture_id) == 4,
+ "offset of InitializeDiscardableTextureCHROMIUM texture_id should be 4");
+static_assert(
+ offsetof(InitializeDiscardableTextureCHROMIUM, shm_id) == 8,
+ "offset of InitializeDiscardableTextureCHROMIUM shm_id should be 8");
+static_assert(
+ offsetof(InitializeDiscardableTextureCHROMIUM, shm_offset) == 12,
+ "offset of InitializeDiscardableTextureCHROMIUM shm_offset should be 12");
+
+struct UnlockDiscardableTextureCHROMIUM {
+ typedef UnlockDiscardableTextureCHROMIUM ValueType;
+ static const CommandId kCmdId = kUnlockDiscardableTextureCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _texture_id) {
+ SetHeader();
+ texture_id = _texture_id;
+ }
+
+ void* Set(void* cmd, GLuint _texture_id) {
+ static_cast<ValueType*>(cmd)->Init(_texture_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture_id;
+};
+
+static_assert(sizeof(UnlockDiscardableTextureCHROMIUM) == 8,
+ "size of UnlockDiscardableTextureCHROMIUM should be 8");
+static_assert(offsetof(UnlockDiscardableTextureCHROMIUM, header) == 0,
+ "offset of UnlockDiscardableTextureCHROMIUM header should be 0");
+static_assert(
+ offsetof(UnlockDiscardableTextureCHROMIUM, texture_id) == 4,
+ "offset of UnlockDiscardableTextureCHROMIUM texture_id should be 4");
+
+struct LockDiscardableTextureCHROMIUM {
+ typedef LockDiscardableTextureCHROMIUM ValueType;
+ static const CommandId kCmdId = kLockDiscardableTextureCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _texture_id) {
+ SetHeader();
+ texture_id = _texture_id;
+ }
+
+ void* Set(void* cmd, GLuint _texture_id) {
+ static_cast<ValueType*>(cmd)->Init(_texture_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture_id;
+};
+
+static_assert(sizeof(LockDiscardableTextureCHROMIUM) == 8,
+ "size of LockDiscardableTextureCHROMIUM should be 8");
+static_assert(offsetof(LockDiscardableTextureCHROMIUM, header) == 0,
+ "offset of LockDiscardableTextureCHROMIUM header should be 0");
+static_assert(
+ offsetof(LockDiscardableTextureCHROMIUM, texture_id) == 4,
+ "offset of LockDiscardableTextureCHROMIUM texture_id should be 4");
+
+struct BeginRasterCHROMIUM {
+ typedef BeginRasterCHROMIUM ValueType;
+ static const CommandId kCmdId = kBeginRasterCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _texture_id,
+ GLuint _sk_color,
+ GLuint _msaa_sample_count,
+ GLboolean _can_use_lcd_text,
+ GLboolean _use_distance_field_text,
+ GLint _color_type) {
+ SetHeader();
+ texture_id = _texture_id;
+ sk_color = _sk_color;
+ msaa_sample_count = _msaa_sample_count;
+ can_use_lcd_text = _can_use_lcd_text;
+ use_distance_field_text = _use_distance_field_text;
+ color_type = _color_type;
+ }
+
+ void* Set(void* cmd,
+ GLuint _texture_id,
+ GLuint _sk_color,
+ GLuint _msaa_sample_count,
+ GLboolean _can_use_lcd_text,
+ GLboolean _use_distance_field_text,
+ GLint _color_type) {
+ static_cast<ValueType*>(cmd)->Init(_texture_id, _sk_color,
+ _msaa_sample_count, _can_use_lcd_text,
+ _use_distance_field_text, _color_type);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture_id;
+ uint32_t sk_color;
+ uint32_t msaa_sample_count;
+ uint32_t can_use_lcd_text;
+ uint32_t use_distance_field_text;
+ int32_t color_type;
+};
+
+static_assert(sizeof(BeginRasterCHROMIUM) == 28,
+ "size of BeginRasterCHROMIUM should be 28");
+static_assert(offsetof(BeginRasterCHROMIUM, header) == 0,
+ "offset of BeginRasterCHROMIUM header should be 0");
+static_assert(offsetof(BeginRasterCHROMIUM, texture_id) == 4,
+ "offset of BeginRasterCHROMIUM texture_id should be 4");
+static_assert(offsetof(BeginRasterCHROMIUM, sk_color) == 8,
+ "offset of BeginRasterCHROMIUM sk_color should be 8");
+static_assert(offsetof(BeginRasterCHROMIUM, msaa_sample_count) == 12,
+ "offset of BeginRasterCHROMIUM msaa_sample_count should be 12");
+static_assert(offsetof(BeginRasterCHROMIUM, can_use_lcd_text) == 16,
+ "offset of BeginRasterCHROMIUM can_use_lcd_text should be 16");
+static_assert(
+ offsetof(BeginRasterCHROMIUM, use_distance_field_text) == 20,
+ "offset of BeginRasterCHROMIUM use_distance_field_text should be 20");
+static_assert(offsetof(BeginRasterCHROMIUM, color_type) == 24,
+ "offset of BeginRasterCHROMIUM color_type should be 24");
+
+struct RasterCHROMIUM {
+ typedef RasterCHROMIUM ValueType;
+ static const CommandId kCmdId = kRasterCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLsizeiptr _size,
+ uint32_t _list_shm_id,
+ uint32_t _list_shm_offset) {
+ SetHeader();
+ size = _size;
+ list_shm_id = _list_shm_id;
+ list_shm_offset = _list_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLsizeiptr _size,
+ uint32_t _list_shm_id,
+ uint32_t _list_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_size, _list_shm_id, _list_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ int32_t size;
+ uint32_t list_shm_id;
+ uint32_t list_shm_offset;
+};
+
+static_assert(sizeof(RasterCHROMIUM) == 16,
+ "size of RasterCHROMIUM should be 16");
+static_assert(offsetof(RasterCHROMIUM, header) == 0,
+ "offset of RasterCHROMIUM header should be 0");
+static_assert(offsetof(RasterCHROMIUM, size) == 4,
+ "offset of RasterCHROMIUM size should be 4");
+static_assert(offsetof(RasterCHROMIUM, list_shm_id) == 8,
+ "offset of RasterCHROMIUM list_shm_id should be 8");
+static_assert(offsetof(RasterCHROMIUM, list_shm_offset) == 12,
+ "offset of RasterCHROMIUM list_shm_offset should be 12");
+
+struct EndRasterCHROMIUM {
+ typedef EndRasterCHROMIUM ValueType;
+ static const CommandId kCmdId = kEndRasterCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+static_assert(sizeof(EndRasterCHROMIUM) == 4,
+ "size of EndRasterCHROMIUM should be 4");
+static_assert(offsetof(EndRasterCHROMIUM, header) == 0,
+ "offset of EndRasterCHROMIUM header should be 0");
+
+struct CreateTransferCacheEntryINTERNAL {
+ typedef CreateTransferCacheEntryINTERNAL ValueType;
+ static const CommandId kCmdId = kCreateTransferCacheEntryINTERNAL;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _entry_type,
+ GLuint _entry_id,
+ GLuint _handle_shm_id,
+ GLuint _handle_shm_offset,
+ GLuint _data_shm_id,
+ GLuint _data_shm_offset,
+ GLuint _data_size) {
+ SetHeader();
+ entry_type = _entry_type;
+ entry_id = _entry_id;
+ handle_shm_id = _handle_shm_id;
+ handle_shm_offset = _handle_shm_offset;
+ data_shm_id = _data_shm_id;
+ data_shm_offset = _data_shm_offset;
+ data_size = _data_size;
+ }
+
+ void* Set(void* cmd,
+ GLuint _entry_type,
+ GLuint _entry_id,
+ GLuint _handle_shm_id,
+ GLuint _handle_shm_offset,
+ GLuint _data_shm_id,
+ GLuint _data_shm_offset,
+ GLuint _data_size) {
+ static_cast<ValueType*>(cmd)->Init(_entry_type, _entry_id, _handle_shm_id,
+ _handle_shm_offset, _data_shm_id,
+ _data_shm_offset, _data_size);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t entry_type;
+ uint32_t entry_id;
+ uint32_t handle_shm_id;
+ uint32_t handle_shm_offset;
+ uint32_t data_shm_id;
+ uint32_t data_shm_offset;
+ uint32_t data_size;
+};
+
+static_assert(sizeof(CreateTransferCacheEntryINTERNAL) == 32,
+ "size of CreateTransferCacheEntryINTERNAL should be 32");
+static_assert(offsetof(CreateTransferCacheEntryINTERNAL, header) == 0,
+ "offset of CreateTransferCacheEntryINTERNAL header should be 0");
+static_assert(
+ offsetof(CreateTransferCacheEntryINTERNAL, entry_type) == 4,
+ "offset of CreateTransferCacheEntryINTERNAL entry_type should be 4");
+static_assert(
+ offsetof(CreateTransferCacheEntryINTERNAL, entry_id) == 8,
+ "offset of CreateTransferCacheEntryINTERNAL entry_id should be 8");
+static_assert(
+ offsetof(CreateTransferCacheEntryINTERNAL, handle_shm_id) == 12,
+ "offset of CreateTransferCacheEntryINTERNAL handle_shm_id should be 12");
+static_assert(offsetof(CreateTransferCacheEntryINTERNAL, handle_shm_offset) ==
+ 16,
+ "offset of CreateTransferCacheEntryINTERNAL handle_shm_offset "
+ "should be 16");
+static_assert(
+ offsetof(CreateTransferCacheEntryINTERNAL, data_shm_id) == 20,
+ "offset of CreateTransferCacheEntryINTERNAL data_shm_id should be 20");
+static_assert(
+ offsetof(CreateTransferCacheEntryINTERNAL, data_shm_offset) == 24,
+ "offset of CreateTransferCacheEntryINTERNAL data_shm_offset should be 24");
+static_assert(
+ offsetof(CreateTransferCacheEntryINTERNAL, data_size) == 28,
+ "offset of CreateTransferCacheEntryINTERNAL data_size should be 28");
+
+struct DeleteTransferCacheEntryINTERNAL {
+ typedef DeleteTransferCacheEntryINTERNAL ValueType;
+ static const CommandId kCmdId = kDeleteTransferCacheEntryINTERNAL;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _entry_type, GLuint _entry_id) {
+ SetHeader();
+ entry_type = _entry_type;
+ entry_id = _entry_id;
+ }
+
+ void* Set(void* cmd, GLuint _entry_type, GLuint _entry_id) {
+ static_cast<ValueType*>(cmd)->Init(_entry_type, _entry_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t entry_type;
+ uint32_t entry_id;
+};
+
+static_assert(sizeof(DeleteTransferCacheEntryINTERNAL) == 12,
+ "size of DeleteTransferCacheEntryINTERNAL should be 12");
+static_assert(offsetof(DeleteTransferCacheEntryINTERNAL, header) == 0,
+ "offset of DeleteTransferCacheEntryINTERNAL header should be 0");
+static_assert(
+ offsetof(DeleteTransferCacheEntryINTERNAL, entry_type) == 4,
+ "offset of DeleteTransferCacheEntryINTERNAL entry_type should be 4");
+static_assert(
+ offsetof(DeleteTransferCacheEntryINTERNAL, entry_id) == 8,
+ "offset of DeleteTransferCacheEntryINTERNAL entry_id should be 8");
+
+struct UnlockTransferCacheEntryINTERNAL {
+ typedef UnlockTransferCacheEntryINTERNAL ValueType;
+ static const CommandId kCmdId = kUnlockTransferCacheEntryINTERNAL;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _entry_type, GLuint _entry_id) {
+ SetHeader();
+ entry_type = _entry_type;
+ entry_id = _entry_id;
+ }
+
+ void* Set(void* cmd, GLuint _entry_type, GLuint _entry_id) {
+ static_cast<ValueType*>(cmd)->Init(_entry_type, _entry_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t entry_type;
+ uint32_t entry_id;
+};
+
+static_assert(sizeof(UnlockTransferCacheEntryINTERNAL) == 12,
+ "size of UnlockTransferCacheEntryINTERNAL should be 12");
+static_assert(offsetof(UnlockTransferCacheEntryINTERNAL, header) == 0,
+ "offset of UnlockTransferCacheEntryINTERNAL header should be 0");
+static_assert(
+ offsetof(UnlockTransferCacheEntryINTERNAL, entry_type) == 4,
+ "offset of UnlockTransferCacheEntryINTERNAL entry_type should be 4");
+static_assert(
+ offsetof(UnlockTransferCacheEntryINTERNAL, entry_id) == 8,
+ "offset of UnlockTransferCacheEntryINTERNAL entry_id should be 8");
+
+#endif // GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_test.cc b/chromium/gpu/command_buffer/common/raster_cmd_format_test.cc
new file mode 100644
index 00000000000..fa15cc2b345
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_test.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains unit tests for raster commmands
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "gpu/command_buffer/common/raster_cmd_format.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace raster {
+
+class RasterFormatTest : public testing::Test {
+ protected:
+ static const unsigned char kInitialValue = 0xBD;
+
+ void SetUp() override { memset(buffer_, kInitialValue, sizeof(buffer_)); }
+
+ void TearDown() override {}
+
+ template <typename T>
+ T* GetBufferAs() {
+ return static_cast<T*>(static_cast<void*>(&buffer_));
+ }
+
+ void CheckBytesWritten(const void* end,
+ size_t expected_size,
+ size_t written_size) {
+ size_t actual_size = static_cast<const unsigned char*>(end) -
+ GetBufferAs<const unsigned char>();
+ EXPECT_LT(actual_size, sizeof(buffer_));
+ EXPECT_GT(actual_size, 0u);
+ EXPECT_EQ(expected_size, actual_size);
+ EXPECT_EQ(kInitialValue, buffer_[written_size]);
+ EXPECT_NE(kInitialValue, buffer_[written_size - 1]);
+ }
+
+ void CheckBytesWrittenMatchesExpectedSize(const void* end,
+ size_t expected_size) {
+ CheckBytesWritten(end, expected_size, expected_size);
+ }
+
+ private:
+ unsigned char buffer_[1024];
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const unsigned char RasterFormatTest::kInitialValue;
+#endif
+
+#include "gpu/command_buffer/common/raster_cmd_format_test_autogen.h"
+
+} // namespace raster
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
new file mode 100644
index 00000000000..da157420fdf
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format_test_autogen.h
@@ -0,0 +1,381 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file contains unit tests for raster commmands
+// It is included by raster_cmd_format_test.cc
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_TEST_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_TEST_AUTOGEN_H_
+
+TEST_F(RasterFormatTest, BindTexture) {
+ cmds::BindTexture& cmd = *GetBufferAs<cmds::BindTexture>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BindTexture::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.texture);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, DeleteTexturesImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::DeleteTexturesImmediate& cmd =
+ *GetBufferAs<cmds::DeleteTexturesImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteTexturesImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ EXPECT_EQ(0, memcmp(ids, ImmediateDataAddress(&cmd), sizeof(ids)));
+}
+
+TEST_F(RasterFormatTest, Finish) {
+ cmds::Finish& cmd = *GetBufferAs<cmds::Finish>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Finish::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, Flush) {
+ cmds::Flush& cmd = *GetBufferAs<cmds::Flush>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::Flush::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, GenTexturesImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::GenTexturesImmediate& cmd = *GetBufferAs<cmds::GenTexturesImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GenTexturesImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ EXPECT_EQ(0, memcmp(ids, ImmediateDataAddress(&cmd), sizeof(ids)));
+}
+
+TEST_F(RasterFormatTest, GetError) {
+ cmds::GetError& cmd = *GetBufferAs<cmds::GetError>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<uint32_t>(11), static_cast<uint32_t>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetError::kCmdId), cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<uint32_t>(11), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, GetIntegerv) {
+ cmds::GetIntegerv& cmd = *GetBufferAs<cmds::GetIntegerv>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetIntegerv::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.pname);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.params_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.params_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, TexParameteri) {
+ cmds::TexParameteri& cmd = *GetBufferAs<cmds::TexParameteri>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLenum>(11),
+ static_cast<GLenum>(12), static_cast<GLint>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::TexParameteri::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.pname);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.param);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, GenQueriesEXTImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::GenQueriesEXTImmediate& cmd =
+ *GetBufferAs<cmds::GenQueriesEXTImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GenQueriesEXTImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ EXPECT_EQ(0, memcmp(ids, ImmediateDataAddress(&cmd), sizeof(ids)));
+}
+
+TEST_F(RasterFormatTest, DeleteQueriesEXTImmediate) {
+ static GLuint ids[] = {
+ 12, 23, 34,
+ };
+ cmds::DeleteQueriesEXTImmediate& cmd =
+ *GetBufferAs<cmds::DeleteQueriesEXTImmediate>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLsizei>(arraysize(ids)), ids);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DeleteQueriesEXTImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(cmd.n * 4u),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizei>(arraysize(ids)), cmd.n);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd,
+ sizeof(cmd) + RoundSizeToMultipleOfEntries(arraysize(ids) * 4u));
+ EXPECT_EQ(0, memcmp(ids, ImmediateDataAddress(&cmd), sizeof(ids)));
+}
+
+TEST_F(RasterFormatTest, BeginQueryEXT) {
+ cmds::BeginQueryEXT& cmd = *GetBufferAs<cmds::BeginQueryEXT>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12),
+ static_cast<uint32_t>(13), static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BeginQueryEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.sync_data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.sync_data_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, EndQueryEXT) {
+ cmds::EndQueryEXT& cmd = *GetBufferAs<cmds::EndQueryEXT>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::EndQueryEXT::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.submit_count);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, CompressedCopyTextureCHROMIUM) {
+ cmds::CompressedCopyTextureCHROMIUM& cmd =
+ *GetBufferAs<cmds::CompressedCopyTextureCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::CompressedCopyTextureCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.source_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.dest_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, LoseContextCHROMIUM) {
+ cmds::LoseContextCHROMIUM& cmd = *GetBufferAs<cmds::LoseContextCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLenum>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::LoseContextCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.current);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.other);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, InsertFenceSyncCHROMIUM) {
+ cmds::InsertFenceSyncCHROMIUM& cmd =
+ *GetBufferAs<cmds::InsertFenceSyncCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint64>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::InsertFenceSyncCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint64>(11), cmd.release_count());
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, WaitSyncTokenCHROMIUM) {
+ cmds::WaitSyncTokenCHROMIUM& cmd =
+ *GetBufferAs<cmds::WaitSyncTokenCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLint>(11), static_cast<GLuint64>(12),
+ static_cast<GLuint64>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::WaitSyncTokenCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLint>(11), cmd.namespace_id);
+ EXPECT_EQ(static_cast<GLuint64>(12), cmd.command_buffer_id());
+ EXPECT_EQ(static_cast<GLuint64>(13), cmd.release_count());
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, UnpremultiplyAndDitherCopyCHROMIUM) {
+ cmds::UnpremultiplyAndDitherCopyCHROMIUM& cmd =
+ *GetBufferAs<cmds::UnpremultiplyAndDitherCopyCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
+ static_cast<GLint>(13), static_cast<GLint>(14),
+ static_cast<GLsizei>(15), static_cast<GLsizei>(16));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::UnpremultiplyAndDitherCopyCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.source_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.dest_id);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.y);
+ EXPECT_EQ(static_cast<GLsizei>(15), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(16), cmd.height);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, InitializeDiscardableTextureCHROMIUM) {
+ cmds::InitializeDiscardableTextureCHROMIUM& cmd =
+ *GetBufferAs<cmds::InitializeDiscardableTextureCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::InitializeDiscardableTextureCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture_id);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, UnlockDiscardableTextureCHROMIUM) {
+ cmds::UnlockDiscardableTextureCHROMIUM& cmd =
+ *GetBufferAs<cmds::UnlockDiscardableTextureCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::UnlockDiscardableTextureCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, LockDiscardableTextureCHROMIUM) {
+ cmds::LockDiscardableTextureCHROMIUM& cmd =
+ *GetBufferAs<cmds::LockDiscardableTextureCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::LockDiscardableTextureCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, BeginRasterCHROMIUM) {
+ cmds::BeginRasterCHROMIUM& cmd = *GetBufferAs<cmds::BeginRasterCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
+ static_cast<GLuint>(13), static_cast<GLboolean>(14),
+ static_cast<GLboolean>(15), static_cast<GLint>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BeginRasterCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.sk_color);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.msaa_sample_count);
+ EXPECT_EQ(static_cast<GLboolean>(14), cmd.can_use_lcd_text);
+ EXPECT_EQ(static_cast<GLboolean>(15), cmd.use_distance_field_text);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.color_type);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, RasterCHROMIUM) {
+ cmds::RasterCHROMIUM& cmd = *GetBufferAs<cmds::RasterCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLsizeiptr>(11), static_cast<uint32_t>(12),
+ static_cast<uint32_t>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::RasterCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLsizeiptr>(11), cmd.size);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.list_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.list_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, EndRasterCHROMIUM) {
+ cmds::EndRasterCHROMIUM& cmd = *GetBufferAs<cmds::EndRasterCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::EndRasterCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, CreateTransferCacheEntryINTERNAL) {
+ cmds::CreateTransferCacheEntryINTERNAL& cmd =
+ *GetBufferAs<cmds::CreateTransferCacheEntryINTERNAL>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
+ static_cast<GLuint>(12), static_cast<GLuint>(13),
+ static_cast<GLuint>(14), static_cast<GLuint>(15),
+ static_cast<GLuint>(16), static_cast<GLuint>(17));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::CreateTransferCacheEntryINTERNAL::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.entry_type);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.entry_id);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.handle_shm_id);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.handle_shm_offset);
+ EXPECT_EQ(static_cast<GLuint>(15), cmd.data_shm_id);
+ EXPECT_EQ(static_cast<GLuint>(16), cmd.data_shm_offset);
+ EXPECT_EQ(static_cast<GLuint>(17), cmd.data_size);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, DeleteTransferCacheEntryINTERNAL) {
+ cmds::DeleteTransferCacheEntryINTERNAL& cmd =
+ *GetBufferAs<cmds::DeleteTransferCacheEntryINTERNAL>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::DeleteTransferCacheEntryINTERNAL::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.entry_type);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.entry_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(RasterFormatTest, UnlockTransferCacheEntryINTERNAL) {
+ cmds::UnlockTransferCacheEntryINTERNAL& cmd =
+ *GetBufferAs<cmds::UnlockTransferCacheEntryINTERNAL>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::UnlockTransferCacheEntryINTERNAL::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.entry_type);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.entry_id);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+#endif // GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_FORMAT_TEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_ids.h b/chromium/gpu/command_buffer/common/raster_cmd_ids.h
new file mode 100644
index 00000000000..6e9c7e697ec
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/raster_cmd_ids.h
@@ -0,0 +1,22 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines the raster command buffer commands.
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_IDS_H_
+#define GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_IDS_H_
+
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+
+namespace gpu {
+namespace raster {
+
+#include "gpu/command_buffer/common/raster_cmd_ids_autogen.h"
+
+const char* GetCommandName(CommandId command_id);
+
+} // namespace raster
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_IDS_H_
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/raster_cmd_ids_autogen.h
new file mode 100644
index 00000000000..e3e1258d690
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/raster_cmd_ids_autogen.h
@@ -0,0 +1,52 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_IDS_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_IDS_AUTOGEN_H_
+
+#define RASTER_COMMAND_LIST(OP) \
+ OP(BindTexture) /* 256 */ \
+ OP(DeleteTexturesImmediate) /* 257 */ \
+ OP(Finish) /* 258 */ \
+ OP(Flush) /* 259 */ \
+ OP(GenTexturesImmediate) /* 260 */ \
+ OP(GetError) /* 261 */ \
+ OP(GetIntegerv) /* 262 */ \
+ OP(TexParameteri) /* 263 */ \
+ OP(GenQueriesEXTImmediate) /* 264 */ \
+ OP(DeleteQueriesEXTImmediate) /* 265 */ \
+ OP(BeginQueryEXT) /* 266 */ \
+ OP(EndQueryEXT) /* 267 */ \
+ OP(CompressedCopyTextureCHROMIUM) /* 268 */ \
+ OP(LoseContextCHROMIUM) /* 269 */ \
+ OP(InsertFenceSyncCHROMIUM) /* 270 */ \
+ OP(WaitSyncTokenCHROMIUM) /* 271 */ \
+ OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 272 */ \
+ OP(InitializeDiscardableTextureCHROMIUM) /* 273 */ \
+ OP(UnlockDiscardableTextureCHROMIUM) /* 274 */ \
+ OP(LockDiscardableTextureCHROMIUM) /* 275 */ \
+ OP(BeginRasterCHROMIUM) /* 276 */ \
+ OP(RasterCHROMIUM) /* 277 */ \
+ OP(EndRasterCHROMIUM) /* 278 */ \
+ OP(CreateTransferCacheEntryINTERNAL) /* 279 */ \
+ OP(DeleteTransferCacheEntryINTERNAL) /* 280 */ \
+ OP(UnlockTransferCacheEntryINTERNAL) /* 281 */
+
+enum CommandId {
+ kOneBeforeStartPoint =
+ cmd::kLastCommonId, // All Raster commands start after this.
+#define RASTER_CMD_OP(name) k##name,
+ RASTER_COMMAND_LIST(RASTER_CMD_OP)
+#undef RASTER_CMD_OP
+ kNumCommands,
+ kFirstRasterCommand = kOneBeforeStartPoint + 1
+};
+
+#endif // GPU_COMMAND_BUFFER_COMMON_RASTER_CMD_IDS_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/cmd_buffer_functions.txt b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
index 7f79d83a94c..6372acc872f 100644
--- a/chromium/gpu/command_buffer/cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
@@ -302,6 +302,7 @@ GL_APICALL void GL_APIENTRY glGenSyncTokenCHROMIUM (GLbyte* sync_token);
GL_APICALL void GL_APIENTRY glGenUnverifiedSyncTokenCHROMIUM (GLbyte* sync_token);
GL_APICALL void GL_APIENTRY glVerifySyncTokensCHROMIUM (GLbyte** sync_tokens, GLsizei count);
GL_APICALL void GL_APIENTRY glWaitSyncTokenCHROMIUM (const GLbyte* sync_token);
+GL_APICALL void GL_APIENTRY glUnpremultiplyAndDitherCopyCHROMIUM (GLuint source_id, GLuint dest_id, GLint x, GLint y, GLsizei width, GLsizei height);
GL_APICALL void GL_APIENTRY glDrawBuffersEXT (GLsizei count, const GLenum* bufs);
GL_APICALL void GL_APIENTRY glDiscardBackbufferCHROMIUM (void);
GL_APICALL void GL_APIENTRY glScheduleOverlayPlaneCHROMIUM (GLint plane_z_order, GLenum plane_transform, GLuint overlay_texture_id, GLint bounds_x, GLint bounds_y, GLint bounds_width, GLint bounds_height, GLfloat uv_x, GLfloat uv_y, GLfloat uv_width, GLfloat uv_height);
@@ -377,7 +378,7 @@ GL_APICALL void GL_APIENTRY glUnlockDiscardableTextureCHROMIUM (GLuint t
GL_APICALL bool GL_APIENTRY glLockDiscardableTextureCHROMIUM (GLuint texture_id);
// Extension CHROMIUM_raster_transport
-GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint texture_id, GLuint sk_color, GLuint msaa_sample_count, GLboolean can_use_lcd_text, GLboolean use_distance_field_text, GLint pixel_config);
+GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint texture_id, GLuint sk_color, GLuint msaa_sample_count, GLboolean can_use_lcd_text, GLboolean use_distance_field_text, GLint color_type, GLuint color_space_transfer_cache_id);
GL_APICALL void GL_APIENTRY glRasterCHROMIUM (GLsizeiptr size, const void* list);
GL_APICALL void* GL_APIENTRY glMapRasterCHROMIUM (GLsizeiptr size);
GL_APICALL void GL_APIENTRY glUnmapRasterCHROMIUM (GLsizeiptr written_size);
diff --git a/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
new file mode 100644
index 00000000000..f4450ba5602
--- /dev/null
+++ b/chromium/gpu/command_buffer/raster_cmd_buffer_functions.txt
@@ -0,0 +1,49 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is read by build_raster_cmd_buffer.py to generate commands.
+
+GL_APICALL void GL_APIENTRY glBindTexture (GLenumTextureBindTarget target, GLidBindTexture texture);
+GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizeiNotNegative n, const GLuint* textures);
+GL_APICALL void GL_APIENTRY glFinish (void);
+GL_APICALL void GL_APIENTRY glFlush (void);
+GL_APICALL void GL_APIENTRY glGenTextures (GLsizeiNotNegative n, GLuint* textures);
+GL_APICALL GLenum GL_APIENTRY glGetError (void);
+GL_APICALL void GL_APIENTRY glGetIntegerv (GLenumGLState pname, GLint* params);
+GL_APICALL void GL_APIENTRY glShallowFlushCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glOrderingBarrierCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glTexParameteri (GLenumTextureBindTarget target, GLenumTextureParameter pname, GLint param);
+GL_APICALL void GL_APIENTRY glGenQueriesEXT (GLsizeiNotNegative n, GLuint* queries);
+GL_APICALL void GL_APIENTRY glDeleteQueriesEXT (GLsizeiNotNegative n, const GLuint* queries);
+GL_APICALL void GL_APIENTRY glBeginQueryEXT (GLenumQueryTarget target, GLidQuery id);
+GL_APICALL void GL_APIENTRY glEndQueryEXT (GLenumQueryTarget target);
+GL_APICALL void GL_APIENTRY glGetQueryObjectuivEXT (GLidQuery id, GLenumQueryObjectParameter pname, GLuint* params);
+
+// Non-GL commands.
+GL_APICALL GLuint GL_APIENTRY glCreateImageCHROMIUM (ClientBuffer buffer, GLsizei width, GLsizei height, GLenum internalformat);
+GL_APICALL void GL_APIENTRY glDestroyImageCHROMIUM (GLuint image_id);
+GL_APICALL void GL_APIENTRY glCompressedCopyTextureCHROMIUM (GLuint source_id, GLuint dest_id);
+GL_APICALL void GL_APIENTRY glLoseContextCHROMIUM (GLenumResetStatus current, GLenumResetStatus other);
+GL_APICALL GLuint64 GL_APIENTRY glInsertFenceSyncCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glGenSyncTokenCHROMIUM (GLbyte* sync_token);
+GL_APICALL void GL_APIENTRY glGenUnverifiedSyncTokenCHROMIUM (GLbyte* sync_token);
+GL_APICALL void GL_APIENTRY glVerifySyncTokensCHROMIUM (GLbyte** sync_tokens, GLsizei count);
+GL_APICALL void GL_APIENTRY glWaitSyncTokenCHROMIUM (const GLbyte* sync_token);
+GL_APICALL void GL_APIENTRY glUnpremultiplyAndDitherCopyCHROMIUM (GLuint source_id, GLuint dest_id, GLint x, GLint y, GLsizei width, GLsizei height);
+
+// Extension KHR_robustness
+GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusKHR (void);
+
+// Extension CHROMIUM_discardable_textures
+GL_APICALL void GL_APIENTRY glInitializeDiscardableTextureCHROMIUM (GLuint texture_id);
+GL_APICALL void GL_APIENTRY glUnlockDiscardableTextureCHROMIUM (GLuint texture_id);
+GL_APICALL bool GL_APIENTRY glLockDiscardableTextureCHROMIUM (GLuint texture_id);
+
+// Extension CHROMIUM_raster_transport
+GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint texture_id, GLuint sk_color, GLuint msaa_sample_count, GLboolean can_use_lcd_text, GLboolean use_distance_field_text, GLint color_type);
+GL_APICALL void GL_APIENTRY glRasterCHROMIUM (GLsizeiptr size, const void* list);
+GL_APICALL void GL_APIENTRY glEndRasterCHROMIUM (void);
+GL_APICALL void GL_APIENTRY glCreateTransferCacheEntryINTERNAL (GLuint entry_type, GLuint entry_id, GLuint handle_shm_id, GLuint handle_shm_offset, GLuint data_shm_id, GLuint data_shm_offset, GLuint data_size);
+GL_APICALL void GL_APIENTRY glDeleteTransferCacheEntryINTERNAL (GLuint entry_type, GLuint entry_id);
+GL_APICALL void GL_APIENTRY glUnlockTransferCacheEntryINTERNAL (GLuint entry_type, GLuint entry_id);
diff --git a/chromium/gpu/command_buffer/service/command_buffer_direct.cc b/chromium/gpu/command_buffer/service/command_buffer_direct.cc
index 890cd6bfd68..98f38c193fe 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_direct.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_direct.cc
@@ -5,6 +5,7 @@
#include "gpu/command_buffer/service/command_buffer_direct.h"
#include "base/bind.h"
+#include "base/callback_helpers.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/command_buffer/service/transfer_buffer_manager.h"
@@ -168,17 +169,19 @@ void CommandBufferDirect::SetCommandsPaused(bool paused) {
}
void CommandBufferDirect::SignalSyncToken(const gpu::SyncToken& sync_token,
- const base::Closure& callback) {
+ base::OnceClosure callback) {
if (sync_point_manager_) {
DCHECK(!paused_order_num_);
uint32_t order_num =
sync_point_order_data_->GenerateUnprocessedOrderNumber();
sync_point_order_data_->BeginProcessingOrderNumber(order_num);
- if (!sync_point_client_state_->Wait(sync_token, callback))
- callback.Run();
+ base::RepeatingClosure maybe_pass_callback =
+ base::AdaptCallbackForRepeating(std::move(callback));
+ if (!sync_point_client_state_->Wait(sync_token, maybe_pass_callback))
+ maybe_pass_callback.Run();
sync_point_order_data_->FinishProcessingOrderNumber(order_num);
} else {
- callback.Run();
+ std::move(callback).Run();
}
}
diff --git a/chromium/gpu/command_buffer/service/command_buffer_direct.h b/chromium/gpu/command_buffer/service/command_buffer_direct.h
index f5ac3587726..d3de8808f6e 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_direct.h
+++ b/chromium/gpu/command_buffer/service/command_buffer_direct.h
@@ -65,7 +65,7 @@ class GPU_EXPORT CommandBufferDirect : public CommandBuffer,
void SetCommandsPaused(bool paused);
void SignalSyncToken(const gpu::SyncToken& sync_token,
- const base::Closure& callback);
+ base::OnceClosure callback);
scoped_refptr<Buffer> CreateTransferBufferWithId(size_t size, int32_t id);
diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc
index d6f1c5e8094..3def1f3170c 100644
--- a/chromium/gpu/command_buffer/service/context_group.cc
+++ b/chromium/gpu/command_buffer/service/context_group.cc
@@ -163,7 +163,8 @@ gpu::ContextResult ContextGroup::Initialize(
DisallowedFeatures adjusted_disallowed_features =
AdjustDisallowedFeatures(context_type, disallowed_features);
- feature_info_->Initialize(context_type, adjusted_disallowed_features);
+ feature_info_->Initialize(context_type, use_passthrough_cmd_decoder_,
+ adjusted_disallowed_features);
const GLint kMinRenderbufferSize = 512; // GL says 1 pixel!
GLint max_renderbuffer_size = 0;
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index 4323e8100ea..68044240af5 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -25,6 +25,8 @@
#if !defined(OS_MACOSX)
#include "ui/gl/gl_fence_egl.h"
+#else
+#include "base/mac/mac_util.h"
#endif
namespace gpu {
@@ -250,23 +252,28 @@ void FeatureInfo::InitializeBasicState(const base::CommandLine* command_line) {
}
void FeatureInfo::Initialize(ContextType context_type,
+ bool is_passthrough_cmd_decoder,
const DisallowedFeatures& disallowed_features) {
disallowed_features_ = disallowed_features;
context_type_ = context_type;
+ is_passthrough_cmd_decoder_ = is_passthrough_cmd_decoder;
InitializeFeatures();
}
void FeatureInfo::InitializeForTesting(
const DisallowedFeatures& disallowed_features) {
- Initialize(CONTEXT_TYPE_OPENGLES2, disallowed_features);
+ Initialize(CONTEXT_TYPE_OPENGLES2, false /* is_passthrough_cmd_decoder */,
+ disallowed_features);
}
void FeatureInfo::InitializeForTesting() {
- Initialize(CONTEXT_TYPE_OPENGLES2, DisallowedFeatures());
+ Initialize(CONTEXT_TYPE_OPENGLES2, false /* is_passthrough_cmd_decoder */,
+ DisallowedFeatures());
}
void FeatureInfo::InitializeForTesting(ContextType context_type) {
- Initialize(context_type, DisallowedFeatures());
+ Initialize(context_type, false /* is_passthrough_cmd_decoder */,
+ DisallowedFeatures());
}
bool IsGL_REDSupportedOnFBOs() {
@@ -1072,6 +1079,26 @@ void FeatureInfo::InitializeFeatures() {
feature_flags_.chromium_image_ycbcr_422 = true;
}
+#if defined(OS_MACOSX)
+ // Mac can create GLImages out of XR30 IOSurfaces only after High Sierra.
+ feature_flags_.chromium_image_xr30 = base::mac::IsAtLeastOS10_13();
+#elif !defined(OS_WIN)
+ // TODO(mcasas): connect in Windows, https://crbug.com/803451
+ // XB30 support was introduced in GLES 3.0/ OpenGL 3.3, before that it was
+ // signalled via a specific extension.
+ feature_flags_.chromium_image_xb30 =
+ gl_version_info_->IsAtLeastGL(3, 3) ||
+ gl_version_info_->IsAtLeastGLES(3, 0) ||
+ gl::HasExtension(extensions, "GL_EXT_texture_type_2_10_10_10_REV");
+#endif
+ if (feature_flags_.chromium_image_xr30 ||
+ feature_flags_.chromium_image_xb30) {
+ validators_.texture_internal_format.AddValue(GL_RGB10_A2_EXT);
+ validators_.render_buffer_format.AddValue(GL_RGB10_A2_EXT);
+ validators_.texture_internal_format_storage.AddValue(GL_RGB10_A2_EXT);
+ validators_.pixel_type.AddValue(GL_UNSIGNED_INT_2_10_10_10_REV);
+ }
+
// TODO(gman): Add support for these extensions.
// GL_OES_depth32
@@ -1108,11 +1135,10 @@ void FeatureInfo::InitializeFeatures() {
!have_arb_occlusion_query2;
}
- if (!workarounds_.disable_angle_instanced_arrays &&
- (gl::HasExtension(extensions, "GL_ANGLE_instanced_arrays") ||
- (gl::HasExtension(extensions, "GL_ARB_instanced_arrays") &&
- gl::HasExtension(extensions, "GL_ARB_draw_instanced")) ||
- gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile)) {
+ if (gl::HasExtension(extensions, "GL_ANGLE_instanced_arrays") ||
+ (gl::HasExtension(extensions, "GL_ARB_instanced_arrays") &&
+ gl::HasExtension(extensions, "GL_ARB_draw_instanced")) ||
+ gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile) {
AddExtensionString("GL_ANGLE_instanced_arrays");
feature_flags_.angle_instanced_arrays = true;
validators_.vertex_attribute.AddValue(GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE);
@@ -1438,6 +1464,11 @@ void FeatureInfo::InitializeFeatures() {
feature_flags_.angle_robust_resource_initialization =
gl::HasExtension(extensions, "GL_ANGLE_robust_resource_initialization");
feature_flags_.nv_fence = gl::HasExtension(extensions, "GL_NV_fence");
+
+ // UnpremultiplyAndDitherCopyCHROMIUM is only implemented on the full decoder.
+ feature_flags_.unpremultiply_and_dither_copy = !is_passthrough_cmd_decoder_;
+ if (feature_flags_.unpremultiply_and_dither_copy)
+ AddExtensionString("GL_CHROMIUM_unpremultiply_and_dither_copy");
}
void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index 2548add7ff0..d11afc0e665 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -93,6 +93,8 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool ext_texture_norm16 = false;
bool chromium_image_ycbcr_420v = false;
bool chromium_image_ycbcr_422 = false;
+ bool chromium_image_xr30 = false;
+ bool chromium_image_xb30 = false;
bool emulate_primitive_restart_fixed_index = false;
bool ext_render_buffer_format_bgra8888 = false;
bool ext_multisample_compatibility = false;
@@ -124,6 +126,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool chromium_texture_storage_image = false;
bool ext_window_rectangles = false;
bool chromium_gpu_fence = false;
+ bool unpremultiply_and_dither_copy = false;
};
FeatureInfo();
@@ -134,6 +137,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
// Initializes the feature information. Needs a current GL context.
void Initialize(ContextType context_type,
+ bool is_passthrough_cmd_decoder,
const DisallowedFeatures& disallowed_features);
// Helper that defaults to no disallowed features and a GLES2 context.
@@ -214,6 +218,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
DisallowedFeatures disallowed_features_;
ContextType context_type_ = CONTEXT_TYPE_OPENGLES2;
+ bool is_passthrough_cmd_decoder_ = false;
// The set of extensions returned by glGetString(GL_EXTENSIONS);
gl::ExtensionSet extensions_;
diff --git a/chromium/gpu/command_buffer/service/feature_info_unittest.cc b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
index a03fe2c32ed..f7c020bc4e4 100644
--- a/chromium/gpu/command_buffer/service/feature_info_unittest.cc
+++ b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
@@ -43,7 +43,11 @@ enum MockedGLVersionKind {
ES2_on_Version3_0,
ES2_on_Version3_2Compatibility,
ES3_on_Version3_0,
- ES3_on_Version3_2Compatibility
+ ES3_on_Version3_2Compatibility,
+
+ // Currently, nothing cares about both ES version and passthrough, so just
+ // create one representative passthrough case.
+ ES2_on_Version3_0_Passthrough
};
class FeatureInfoTest
@@ -58,6 +62,7 @@ class FeatureInfoTest
// OpenGL compatibility profile.
switch (GetParam()) {
case ES2_on_Version3_0:
+ case ES2_on_Version3_0_Passthrough:
case ES3_on_Version3_0:
SetupInitExpectationsWithGLVersion(extensions_str.c_str(), "", "3.0");
break;
@@ -77,6 +82,7 @@ class FeatureInfoTest
ContextType GetContextType() {
switch (GetParam()) {
case ES2_on_Version3_0:
+ case ES2_on_Version3_0_Passthrough:
case ES2_on_Version3_2Compatibility:
return CONTEXT_TYPE_OPENGLES2;
case ES3_on_Version3_0:
@@ -88,13 +94,29 @@ class FeatureInfoTest
}
}
+ bool IsPassthroughCmdDecoder() {
+ switch (GetParam()) {
+ case ES2_on_Version3_0_Passthrough:
+ return true;
+ case ES2_on_Version3_0:
+ case ES2_on_Version3_2Compatibility:
+ case ES3_on_Version3_0:
+ case ES3_on_Version3_2Compatibility:
+ return false;
+ default:
+ NOTREACHED();
+ return false;
+ }
+ }
+
void SetupInitExpectationsWithGLVersion(
const char* extensions, const char* renderer, const char* version) {
GpuServiceTest::SetUpWithGLVersion(version, extensions);
TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
gl_.get(), extensions, renderer, version, GetContextType());
info_ = new FeatureInfo();
- info_->Initialize(GetContextType(), DisallowedFeatures());
+ info_->Initialize(GetContextType(), IsPassthroughCmdDecoder(),
+ DisallowedFeatures());
}
void SetupInitExpectationsWithGLVersionAndDisallowedFeatures(
@@ -106,7 +128,8 @@ class FeatureInfoTest
TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
gl_.get(), extensions, renderer, version, GetContextType());
info_ = new FeatureInfo();
- info_->Initialize(GetContextType(), disallowed_features);
+ info_->Initialize(GetContextType(), IsPassthroughCmdDecoder(),
+ disallowed_features);
}
void SetupWithWorkarounds(const gpu::GpuDriverBugWorkarounds& workarounds) {
@@ -121,7 +144,8 @@ class FeatureInfoTest
TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
gl_.get(), extensions, "", "", GetContextType());
info_ = new FeatureInfo(workarounds);
- info_->Initialize(GetContextType(), DisallowedFeatures());
+ info_->Initialize(GetContextType(), IsPassthroughCmdDecoder(),
+ DisallowedFeatures());
}
void SetupWithoutInit() {
@@ -1686,5 +1710,24 @@ TEST_P(FeatureInfoTest, InitializeCHROMIUM_ycbcr_422_imageTrue) {
EXPECT_TRUE(info_->feature_flags().chromium_image_ycbcr_422);
}
+TEST_P(FeatureInfoTest, InitializeCHROMIUM_unpremultiply_and_dither_copy) {
+ SetupInitExpectations("");
+ switch (GetParam()) {
+ case ES2_on_Version3_0_Passthrough:
+ EXPECT_FALSE(info_->feature_flags().unpremultiply_and_dither_copy);
+ EXPECT_FALSE(gl::HasExtension(
+ info_->extensions(), "GL_CHROMIUM_unpremultiply_and_dither_copy"));
+ break;
+ case ES2_on_Version3_0:
+ case ES2_on_Version3_2Compatibility:
+ case ES3_on_Version3_0:
+ case ES3_on_Version3_2Compatibility:
+ EXPECT_TRUE(info_->feature_flags().unpremultiply_and_dither_copy);
+ EXPECT_TRUE(gl::HasExtension(
+ info_->extensions(), "GL_CHROMIUM_unpremultiply_and_dither_copy"));
+ break;
+ }
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gl_utils.cc b/chromium/gpu/command_buffer/service/gl_utils.cc
index 468e5416515..5b9e33cbb37 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.cc
+++ b/chromium/gpu/command_buffer/service/gl_utils.cc
@@ -265,15 +265,14 @@ const char* GetServiceShadingLanguageVersionString(
return "OpenGL ES GLSL ES 1.0 Chromium";
}
-static void APIENTRY LogGLDebugMessage(GLenum source,
- GLenum type,
- GLuint id,
- GLenum severity,
- GLsizei length,
- const GLchar* message,
- GLvoid* user_param) {
+void LogGLDebugMessage(GLenum source,
+ GLenum type,
+ GLuint id,
+ GLenum severity,
+ GLsizei length,
+ const GLchar* message,
+ Logger* error_logger) {
std::string id_string = GLES2Util::GetStringEnum(id);
- Logger* error_logger = static_cast<Logger*>(user_param);
if (type == GL_DEBUG_TYPE_ERROR && source == GL_DEBUG_SOURCE_API) {
error_logger->LogMessage(__FILE__, __LINE__,
" " + id_string + ": " + message);
@@ -286,7 +285,9 @@ static void APIENTRY LogGLDebugMessage(GLenum source,
}
}
-void InitializeGLDebugLogging(bool log_non_errors, Logger* error_logger) {
+void InitializeGLDebugLogging(bool log_non_errors,
+ GLDEBUGPROC callback,
+ const void* user_param) {
glEnable(GL_DEBUG_OUTPUT);
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
@@ -305,7 +306,7 @@ void InitializeGLDebugLogging(bool log_non_errors, Logger* error_logger) {
GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr, GL_FALSE);
}
- glDebugMessageCallback(&LogGLDebugMessage, error_logger);
+ glDebugMessageCallback(callback, user_param);
}
bool ValidContextLostReason(GLenum reason) {
diff --git a/chromium/gpu/command_buffer/service/gl_utils.h b/chromium/gpu/command_buffer/service/gl_utils.h
index 19d9b01009d..34da7e71fe9 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.h
+++ b/chromium/gpu/command_buffer/service/gl_utils.h
@@ -80,7 +80,16 @@ const char* GetServiceVersionString(const FeatureInfo* feature_info);
const char* GetServiceShadingLanguageVersionString(
const FeatureInfo* feature_info);
-void InitializeGLDebugLogging(bool log_non_errors, Logger* error_logger);
+void LogGLDebugMessage(GLenum source,
+ GLenum type,
+ GLuint id,
+ GLenum severity,
+ GLsizei length,
+ const GLchar* message,
+ Logger* error_logger);
+void InitializeGLDebugLogging(bool log_non_errors,
+ GLDEBUGPROC callback,
+ const void* user_param);
bool ValidContextLostReason(GLenum reason);
error::ContextLostReason GetContextLostReasonFromResetStatus(
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
index 78c8850deb0..7f821fb796b 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
@@ -282,7 +282,7 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::
GL_RGBA8, GL_TEXTURE_2D, source_texture, 0,
internal_format, 0, 0, 0, 0, width_, height_,
width_, height_, width_, height_, false, false,
- false, method, nullptr);
+ false, false, method, nullptr);
} else {
ApplyCMAAEffectTexture(source_texture, source_texture, do_copy);
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
index 92ed35cd682..85087b27c68 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -79,9 +79,11 @@ enum {
NUM_D_FORMAT
};
+const unsigned kAlphaSize = 4;
+const unsigned kDitherSize = 2;
const unsigned kNumVertexShaders = NUM_SAMPLERS;
const unsigned kNumFragmentShaders =
- 4 * NUM_SAMPLERS * NUM_S_FORMAT * NUM_D_FORMAT;
+ kAlphaSize * kDitherSize * NUM_SAMPLERS * NUM_S_FORMAT * NUM_D_FORMAT;
typedef unsigned ShaderId;
@@ -108,16 +110,19 @@ ShaderId GetVertexShaderId(GLenum target) {
// the premultiply alpha pixel store settings and target.
ShaderId GetFragmentShaderId(bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
GLenum target,
GLenum source_format,
GLenum dest_format) {
unsigned alphaIndex = 0;
+ unsigned ditherIndex = 0;
unsigned targetIndex = 0;
unsigned sourceFormatIndex = 0;
unsigned destFormatIndex = 0;
alphaIndex = (premultiply_alpha ? (1 << 0) : 0) |
(unpremultiply_alpha ? (1 << 1) : 0);
+ ditherIndex = dither ? 1 : 0;
switch (target) {
case GL_TEXTURE_2D:
@@ -280,8 +285,11 @@ ShaderId GetFragmentShaderId(bool premultiply_alpha,
break;
}
- return alphaIndex + targetIndex * 4 + sourceFormatIndex * 4 * NUM_SAMPLERS +
- destFormatIndex * 4 * NUM_SAMPLERS * NUM_S_FORMAT;
+ return alphaIndex + ditherIndex * kAlphaSize +
+ targetIndex * kAlphaSize * kDitherSize +
+ sourceFormatIndex * kAlphaSize * kDitherSize * NUM_SAMPLERS +
+ destFormatIndex * kAlphaSize * kDitherSize * NUM_SAMPLERS *
+ NUM_S_FORMAT;
}
const char* kShaderPrecisionPreamble =
@@ -342,6 +350,7 @@ std::string GetVertexShaderSource(const gl::GLVersionInfo& gl_version_info,
std::string GetFragmentShaderSource(const gl::GLVersionInfo& gl_version_info,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
bool nv_egl_stream_consumer_external,
GLenum target,
GLenum source_format,
@@ -441,6 +450,27 @@ std::string GetFragmentShaderSource(const gl::GLVersionInfo& gl_version_info,
source += " }\n";
}
+ // Dither after moving us to our desired alpha format.
+ if (dither) {
+ // Simulate a 4x4 dither pattern using mod/step. This code was tested for
+ // performance in Skia.
+ source +=
+ " float range = 1.0 / 15.0;\n"
+ " vec4 modValues = mod(gl_FragCoord.xyxy, vec4(2.0, 2.0, 4.0, 4.0));\n"
+ " vec4 stepValues = step(modValues, vec4(1.0, 1.0, 2.0, 2.0));\n"
+ " float dither_value = \n"
+ " dot(stepValues, \n"
+ " vec4(8.0 / 16.0, 4.0 / 16.0, 2.0 / 16.0, 1.0 / 16.0)) -\n"
+ " 15.0 / 32.0;\n";
+ // Apply the dither offset to the color. Only dither alpha if non-opaque.
+ source +=
+ " if (color.a < 1.0) {\n"
+ " color += dither_value * range;\n"
+ " } else {\n"
+ " color.rgb += dither_value * range;\n"
+ " }\n";
+ }
+
source += " FRAGCOLOR = TextureType(color * ScaleValue);\n";
// Main function end.
@@ -928,6 +958,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTexture(
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
CopyTextureMethod method,
gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
if (method == DIRECT_COPY) {
@@ -968,7 +999,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTexture(
source_internal_format, dest_target, dest_texture,
dest_level, dest_internal_format, width, height,
flip_y, premultiply_alpha, unpremultiply_alpha,
- kIdentityMatrix, luma_emulation_blitter);
+ dither, kIdentityMatrix, luma_emulation_blitter);
if (method == DRAW_AND_COPY || method == DRAW_AND_READBACK) {
source_level = 0;
@@ -1010,6 +1041,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopySubTexture(
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
CopyTextureMethod method,
gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
if (method == DIRECT_COPY) {
@@ -1056,7 +1088,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopySubTexture(
decoder, source_target, source_id, source_level, source_internal_format,
dest_target, dest_texture, dest_level, dest_internal_format, dest_xoffset,
dest_yoffset, x, y, width, height, dest_width, dest_height, source_width,
- source_height, flip_y, premultiply_alpha, unpremultiply_alpha,
+ source_height, flip_y, premultiply_alpha, unpremultiply_alpha, dither,
kIdentityMatrix, luma_emulation_blitter);
if (method == DRAW_AND_COPY || method == DRAW_AND_READBACK) {
@@ -1101,13 +1133,14 @@ void CopyTextureCHROMIUMResourceManager::DoCopySubTextureWithTransform(
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
const GLfloat transform_matrix[16],
gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
DoCopyTextureInternal(
decoder, source_target, source_id, source_level, source_internal_format,
dest_target, dest_id, dest_level, dest_internal_format, xoffset, yoffset,
x, y, width, height, dest_width, dest_height, source_width, source_height,
- flip_y, premultiply_alpha, unpremultiply_alpha, transform_matrix,
+ flip_y, premultiply_alpha, unpremultiply_alpha, dither, transform_matrix,
luma_emulation_blitter);
}
@@ -1126,6 +1159,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureWithTransform(
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
const GLfloat transform_matrix[16],
gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
GLsizei dest_width = width;
@@ -1134,7 +1168,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureWithTransform(
decoder, source_target, source_id, source_level, source_format,
dest_target, dest_id, dest_level, dest_format, 0, 0, 0, 0, width, height,
dest_width, dest_height, width, height, flip_y, premultiply_alpha,
- unpremultiply_alpha, transform_matrix, luma_emulation_blitter);
+ unpremultiply_alpha, dither, transform_matrix, luma_emulation_blitter);
}
void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
@@ -1160,6 +1194,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
const GLfloat transform_matrix[16],
gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
DCHECK(source_target == GL_TEXTURE_2D ||
@@ -1198,9 +1233,9 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
ShaderId vertex_shader_id = GetVertexShaderId(source_target);
DCHECK_LT(static_cast<size_t>(vertex_shader_id), vertex_shaders_.size());
- ShaderId fragment_shader_id = GetFragmentShaderId(
- premultiply_alpha, unpremultiply_alpha, source_target,
- source_format, dest_format);
+ ShaderId fragment_shader_id =
+ GetFragmentShaderId(premultiply_alpha, unpremultiply_alpha, dither,
+ source_target, source_format, dest_format);
DCHECK_LT(static_cast<size_t>(fragment_shader_id), fragment_shaders_.size());
ProgramMapKey key(fragment_shader_id);
@@ -1220,7 +1255,7 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
if (!*fragment_shader) {
*fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
std::string source = GetFragmentShaderSource(
- gl_version_info, premultiply_alpha, unpremultiply_alpha,
+ gl_version_info, premultiply_alpha, unpremultiply_alpha, dither,
nv_egl_stream_consumer_external_, source_target, source_format,
dest_format);
CompileShader(*fragment_shader, source.c_str());
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
index e17b29ea729..b36c057adad 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
@@ -67,6 +67,7 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter);
@@ -92,6 +93,7 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter);
@@ -118,6 +120,7 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
const GLfloat transform_matrix[16],
CopyTexImageResourceManager* luma_emulation_blitter);
@@ -140,6 +143,7 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
const GLfloat transform_matrix[16],
CopyTexImageResourceManager* luma_emulation_blitter);
@@ -196,6 +200,7 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
bool flip_y,
bool premultiply_alpha,
bool unpremultiply_alpha,
+ bool dither,
const GLfloat transform_matrix[16],
CopyTexImageResourceManager* luma_emulation_blitter);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index bb809b0ba7e..6937c93388b 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -27,6 +27,7 @@
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
+#include "cc/paint/color_space_transfer_cache_entry.h"
#include "cc/paint/paint_op_buffer.h"
#include "cc/paint/transfer_cache_entry.h"
#include "gpu/command_buffer/common/debug_marker_manager.h"
@@ -75,6 +76,7 @@
#include "gpu/command_buffer/service/vertex_attrib_manager.h"
#include "third_party/angle/src/image_util/loadimage.h"
#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkColorSpaceXformCanvas.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/core/SkSurfaceProps.h"
#include "third_party/skia/include/core/SkTypeface.h"
@@ -97,6 +99,7 @@
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/gl_image.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface.h"
@@ -204,7 +207,16 @@ bool AnyOtherBitsSet(GLbitfield bits, GLbitfield ref) {
return ((bits & mask) != 0);
}
-void EmptyPresentation(const gfx::PresentationFeedback&) {}
+void APIENTRY GLDebugMessageCallback(GLenum source,
+ GLenum type,
+ GLuint id,
+ GLenum severity,
+ GLsizei length,
+ const GLchar* message,
+ GLvoid* user_param) {
+ Logger* error_logger = static_cast<Logger*>(user_param);
+ LogGLDebugMessage(source, type, id, severity, length, message, error_logger);
+}
} // namespace
@@ -1993,7 +2005,8 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config);
+ GLint color_type,
+ GLuint color_space_transfer_cache_id);
void DoRasterCHROMIUM(GLsizeiptr size, const void* list);
void DoEndRasterCHROMIUM();
@@ -2007,6 +2020,13 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
void DoUnlockTransferCacheEntryINTERNAL(GLuint entry_type, GLuint entry_id);
void DoDeleteTransferCacheEntryINTERNAL(GLuint entry_type, GLuint entry_id);
+ void DoUnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height);
+
void DoWindowRectanglesEXT(GLenum mode, GLsizei n, const volatile GLint* box);
// Returns false if textures were replaced.
@@ -2152,10 +2172,27 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLenum dest_internal_format,
bool flip_y,
bool premultiply_alpha,
- bool unpremultiply_alpha);
+ bool unpremultiply_alpha,
+ bool dither);
bool ValidateCompressedCopyTextureCHROMIUM(const char* function_name,
TextureRef* source_texture_ref,
TextureRef* dest_texture_ref);
+ void CopySubTextureHelper(const char* function_name,
+ GLuint source_id,
+ GLint source_level,
+ GLenum dest_target,
+ GLuint dest_id,
+ GLint dest_level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
+ GLboolean unpack_unmultiply_alpha,
+ GLboolean dither);
void RenderWarning(const char* filename, int line, const std::string& msg);
void PerformanceWarning(
@@ -2377,6 +2414,8 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLsizei offscreen_target_samples_;
GLboolean offscreen_target_buffer_preserved_;
+ GLint max_offscreen_framebuffer_size_;
+
// Whether or not offscreen color buffers exist in front/back pairs that
// can be swapped.
GLboolean offscreen_single_buffer_;
@@ -2578,6 +2617,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Raster helpers.
sk_sp<GrContext> gr_context_;
sk_sp<SkSurface> sk_surface_;
+ std::unique_ptr<SkCanvas> raster_canvas_;
base::WeakPtrFactory<GLES2DecoderImpl> weak_ptr_factory_;
@@ -3197,6 +3237,7 @@ GLES2DecoderImpl::GLES2DecoderImpl(
offscreen_target_stencil_format_(0),
offscreen_target_samples_(0),
offscreen_target_buffer_preserved_(true),
+ max_offscreen_framebuffer_size_(0),
offscreen_single_buffer_(false),
offscreen_saved_color_format_(0),
offscreen_buffer_should_have_alpha_(false),
@@ -3551,6 +3592,10 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
? GL_RGBA
: GL_RGB;
+ max_offscreen_framebuffer_size_ =
+ std::min(renderbuffer_manager()->max_renderbuffer_size(),
+ texture_manager()->MaxSizeForTarget(GL_TEXTURE_2D));
+
gfx::Size initial_size = attrib_helper.offscreen_framebuffer_size;
if (initial_size.IsEmpty()) {
// If we're an offscreen surface with zero width and/or height, set to a
@@ -3779,10 +3824,6 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
supports_dc_layers_ = !offscreen && surface->SupportsDCLayers();
- if (workarounds().reverse_point_sprite_coord_origin) {
- api()->glPointParameteriFn(GL_POINT_SPRITE_COORD_ORIGIN, GL_LOWER_LEFT);
- }
-
if (workarounds().unbind_fbo_on_context_switch) {
context_->SetUnbindFboOnMakeCurrent();
}
@@ -3801,7 +3842,7 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
if (group_->gpu_preferences().enable_gpu_driver_debug_logging &&
feature_info_->feature_flags().khr_debug) {
- InitializeGLDebugLogging(true, &logger_);
+ InitializeGLDebugLogging(true, GLDebugMessageCallback, &logger_);
}
if (feature_info_->feature_flags().chromium_texture_filtering_hint &&
@@ -4029,6 +4070,8 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
caps.image_ycbcr_420v_disabled_for_video_frames =
group_->gpu_preferences()
.disable_biplanar_gpu_memory_buffers_for_video_frames;
+ caps.image_xr30 = feature_info_->feature_flags().chromium_image_xr30;
+ caps.image_xb30 = feature_info_->feature_flags().chromium_image_xb30;
caps.max_copy_texture_chromium_size =
workarounds().max_copy_texture_chromium_size;
caps.render_buffer_format_bgra8888 =
@@ -4054,6 +4097,8 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
feature_info_->feature_flags().chromium_texture_storage_image;
caps.supports_oop_raster = supports_oop_raster_;
caps.chromium_gpu_fence = feature_info_->feature_flags().chromium_gpu_fence;
+ caps.unpremultiply_and_dither_copy =
+ feature_info_->feature_flags().unpremultiply_and_dither_copy;
caps.texture_target_exception_list =
group_->gpu_preferences().texture_target_exception_list;
@@ -5313,7 +5358,8 @@ bool GLES2DecoderImpl::ResizeOffscreenFramebuffer(const gfx::Size& size) {
offscreen_size_ = size;
int w = offscreen_size_.width();
int h = offscreen_size_.height();
- if (w < 0 || h < 0 || h >= (INT_MAX / 4) / (w ? w : 1)) {
+ if (w < 0 || h < 0 || w > max_offscreen_framebuffer_size_ ||
+ h > max_offscreen_framebuffer_size_) {
LOG(ERROR) << "GLES2DecoderImpl::ResizeOffscreenFramebuffer failed "
<< "to allocate storage due to excessive dimensions.";
return false;
@@ -5430,8 +5476,12 @@ error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(
GLboolean has_alpha = c.alpha;
TRACE_EVENT2("gpu", "glResizeChromium", "width", width, "height", height);
- width = std::max(1U, width);
- height = std::max(1U, height);
+ // gfx::Size uses integers, make sure width and height do not overflow
+ static_assert(sizeof(GLuint) >= sizeof(int), "Unexpected GLuint size.");
+ static const GLuint kMaxDimension =
+ static_cast<GLuint>(std::numeric_limits<int>::max());
+ width = std::min(std::max(1U, width), kMaxDimension);
+ height = std::min(std::max(1U, height), kMaxDimension);
gl::GLSurface::ColorSpace surface_color_space =
gl::GLSurface::ColorSpace::UNSPECIFIED;
@@ -6547,17 +6597,6 @@ void GLES2DecoderImpl::DoGenerateMipmap(GLenum target) {
}
LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("glGenerateMipmap");
- // Workaround for Mac driver bug. In the large scheme of things setting
- // glTexParamter twice for glGenerateMipmap is probably not a lage performance
- // hit so there's probably no need to make this conditional. The bug appears
- // to be that if the filtering mode is set to something that doesn't require
- // mipmaps for rendering, or is never set to something other than the default,
- // then glGenerateMipmap misbehaves.
- if (workarounds().set_texture_filter_before_generating_mipmap) {
- api()->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER,
- GL_NEAREST_MIPMAP_NEAREST);
- }
-
// Workaround for Mac driver bug. If the base level is non-zero but the zero
// level of a texture has not been set glGenerateMipmaps sets the entire mip
// chain to opaque black. If the zero level is set at all, however, the mip
@@ -6613,10 +6652,6 @@ void GLES2DecoderImpl::DoGenerateMipmap(GLenum target) {
nullptr);
}
- if (workarounds().set_texture_filter_before_generating_mipmap) {
- api()->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER,
- texture_ref->texture()->min_filter());
- }
GLenum error = LOCAL_PEEK_GL_ERROR("glGenerateMipmap");
if (error == GL_NO_ERROR) {
texture_manager()->MarkMipmapsGenerated(texture_ref);
@@ -11775,53 +11810,6 @@ void GLES2DecoderImpl::FinishReadPixels(GLsizei width,
if (result != NULL) {
result->success = 1;
}
-
- uint32_t channels_exist = GLES2Util::GetChannelsForFormat(read_format);
- if ((channels_exist & 0x0008) == 0 &&
- workarounds().clear_alpha_in_readpixels) {
- // Set the alpha to 255 because some drivers are buggy in this regard.
- uint32_t temp_size;
-
- uint32_t unpadded_row_size;
- uint32_t padded_row_size;
- if (!GLES2Util::ComputeImageDataSizes(
- width, 2, 1, format, type, pack_alignment, &temp_size,
- &unpadded_row_size, &padded_row_size)) {
- return;
- }
-
- uint32_t channel_count = 0;
- uint32_t alpha_channel = 0;
- switch (format) {
- case GL_RGBA:
- case GL_BGRA_EXT:
- channel_count = 4;
- alpha_channel = 3;
- break;
- case GL_ALPHA:
- channel_count = 1;
- alpha_channel = 0;
- break;
- }
-
- if (channel_count > 0) {
- switch (type) {
- case GL_UNSIGNED_BYTE:
- WriteAlphaData<uint8_t>(pixels, height, channel_count, alpha_channel,
- unpadded_row_size, padded_row_size, 0xFF);
- break;
- case GL_FLOAT:
- WriteAlphaData<float>(
- pixels, height, channel_count, alpha_channel, unpadded_row_size,
- padded_row_size, 1.0f);
- break;
- case GL_HALF_FLOAT:
- WriteAlphaData<uint16_t>(pixels, height, channel_count, alpha_channel,
- unpadded_row_size, padded_row_size, 0x3C00);
- break;
- }
- }
- }
}
error::Error GLES2DecoderImpl::HandleReadPixels(uint32_t immediate_data_size,
@@ -12268,8 +12256,7 @@ void GLES2DecoderImpl::DoSwapBuffersWithBoundsCHROMIUM(
bounds[i] = gfx::Rect(rects[i * 4 + 0], rects[i * 4 + 1], rects[i * 4 + 2],
rects[i * 4 + 3]);
}
- FinishSwapBuffers(
- surface_->SwapBuffersWithBounds(bounds, base::Bind(&EmptyPresentation)));
+ FinishSwapBuffers(surface_->SwapBuffersWithBounds(bounds, base::DoNothing()));
}
error::Error GLES2DecoderImpl::HandlePostSubBufferCHROMIUM(
@@ -12308,7 +12295,7 @@ error::Error GLES2DecoderImpl::HandlePostSubBufferCHROMIUM(
c.x, c.y, c.width, c.height,
base::Bind(&GLES2DecoderImpl::FinishAsyncSwapBuffers,
weak_ptr_factory_.GetWeakPtr()),
- base::Bind(&EmptyPresentation));
+ base::DoNothing());
} else {
// TODO(sunnyps): Remove Alias calls after crbug.com/724999 is fixed.
gl::GLContext* current = gl::GLContext::GetCurrent();
@@ -12320,7 +12307,7 @@ error::Error GLES2DecoderImpl::HandlePostSubBufferCHROMIUM(
bool is_current = context_->IsCurrent(surface_.get());
base::debug::Alias(&is_current);
FinishSwapBuffers(surface_->PostSubBuffer(c.x, c.y, c.width, c.height,
- base::Bind(&EmptyPresentation)));
+ base::DoNothing()));
}
return error::kNoError;
@@ -16106,7 +16093,7 @@ void GLES2DecoderImpl::DoSwapBuffers() {
surface_->SwapBuffersAsync(
base::Bind(&GLES2DecoderImpl::FinishAsyncSwapBuffers,
weak_ptr_factory_.GetWeakPtr()),
- base::Bind(&EmptyPresentation));
+ base::DoNothing());
} else {
// TODO(sunnyps): Remove Alias calls after crbug.com/724999 is fixed.
gl::GLContext* current = gl::GLContext::GetCurrent();
@@ -16117,7 +16104,7 @@ void GLES2DecoderImpl::DoSwapBuffers() {
base::debug::Alias(&context);
bool is_current = context_->IsCurrent(surface_.get());
base::debug::Alias(&is_current);
- FinishSwapBuffers(surface_->SwapBuffers(base::Bind(&EmptyPresentation)));
+ FinishSwapBuffers(surface_->SwapBuffers(base::DoNothing()));
}
// This may be a slow command. Exit command processing to allow for
@@ -16163,10 +16150,9 @@ void GLES2DecoderImpl::DoCommitOverlayPlanes() {
surface_->CommitOverlayPlanesAsync(
base::Bind(&GLES2DecoderImpl::FinishSwapBuffers,
weak_ptr_factory_.GetWeakPtr()),
- base::Bind(&EmptyPresentation));
+ base::DoNothing());
} else {
- FinishSwapBuffers(
- surface_->CommitOverlayPlanes(base::Bind(&EmptyPresentation)));
+ FinishSwapBuffers(surface_->CommitOverlayPlanes(base::DoNothing()));
}
}
@@ -16233,7 +16219,8 @@ error::Error GLES2DecoderImpl::HandleGetRequestableExtensionsCHROMIUM(
scoped_refptr<FeatureInfo> info(new FeatureInfo(workarounds()));
DisallowedFeatures disallowed_features = feature_info_->disallowed_features();
disallowed_features.AllowExtensions();
- info->Initialize(feature_info_->context_type(), disallowed_features);
+ info->Initialize(feature_info_->context_type(),
+ false /* is_passthrough_cmd_decoder */, disallowed_features);
bucket->SetFromString(gl::MakeExtensionString(info->extensions()).c_str());
return error::kNoError;
}
@@ -16737,7 +16724,7 @@ error::Error GLES2DecoderImpl::HandleBeginQueryEXT(
if (feature_info_->IsWebGL2OrES3Context()) {
break;
}
- // Fall through.
+ FALLTHROUGH;
default:
LOCAL_SET_GL_ERROR(
GL_INVALID_ENUM, "glBeginQueryEXT",
@@ -17178,7 +17165,8 @@ CopyTextureMethod GLES2DecoderImpl::getCopyTextureCHROMIUMMethod(
GLenum dest_internal_format,
bool flip_y,
bool premultiply_alpha,
- bool unpremultiply_alpha) {
+ bool unpremultiply_alpha,
+ bool dither) {
bool premultiply_alpha_change = premultiply_alpha ^ unpremultiply_alpha;
bool source_format_color_renderable =
Texture::ColorRenderable(GetFeatureInfo(), source_internal_format, false);
@@ -17239,7 +17227,7 @@ CopyTextureMethod GLES2DecoderImpl::getCopyTextureCHROMIUMMethod(
if (source_target == GL_TEXTURE_2D &&
(dest_target == GL_TEXTURE_2D || dest_target == GL_TEXTURE_CUBE_MAP) &&
source_format_color_renderable && copy_tex_image_format_valid &&
- source_level == 0 && !flip_y && !premultiply_alpha_change)
+ source_level == 0 && !flip_y && !premultiply_alpha_change && !dither)
return DIRECT_COPY;
if (dest_format_color_renderable && dest_level == 0 &&
dest_target != GL_TEXTURE_CUBE_MAP)
@@ -17472,8 +17460,8 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
source_internal_format, dest_target, dest_texture->service_id(),
dest_level, internal_format, source_width, source_height,
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE, transform_matrix,
- copy_tex_image_blit_.get());
+ unpack_unmultiply_alpha == GL_TRUE, false /* dither */,
+ transform_matrix, copy_tex_image_blit_.get());
return;
}
}
@@ -17482,44 +17470,43 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
source_target, source_level, source_internal_format, source_type,
dest_binding_target, dest_level, internal_format,
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE);
+ unpack_unmultiply_alpha == GL_TRUE, false /* dither */);
copy_texture_CHROMIUM_->DoCopyTexture(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
dest_level, internal_format, source_width, source_height,
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE, method, copy_tex_image_blit_.get());
-}
-
-void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
- GLuint source_id,
- GLint source_level,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLboolean unpack_flip_y,
- GLboolean unpack_premultiply_alpha,
- GLboolean unpack_unmultiply_alpha) {
- TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoCopySubTextureCHROMIUM");
-
- static const char kFunctionName[] = "glCopySubTextureCHROMIUM";
+ unpack_unmultiply_alpha == GL_TRUE, false /* dither */, method,
+ copy_tex_image_blit_.get());
+}
+
+void GLES2DecoderImpl::CopySubTextureHelper(const char* function_name,
+ GLuint source_id,
+ GLint source_level,
+ GLenum dest_target,
+ GLuint dest_id,
+ GLint dest_level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
+ GLboolean unpack_unmultiply_alpha,
+ GLboolean dither) {
TextureRef* source_texture_ref = GetTexture(source_id);
TextureRef* dest_texture_ref = GetTexture(dest_id);
if (!ValidateCopyTextureCHROMIUMTextures(
- kFunctionName, dest_target, source_texture_ref, dest_texture_ref)) {
+ function_name, dest_target, source_texture_ref, dest_texture_ref)) {
return;
}
if (source_level < 0 || dest_level < 0 ||
(feature_info_->IsWebGL1OrES2Context() && source_level > 0)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name,
"source_level or dest_level out of range");
return;
}
@@ -17537,7 +17524,7 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
source_width = size.width();
source_height = size.height();
if (source_width <= 0 || source_height <= 0) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName, "invalid image size");
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "invalid image size");
return;
}
@@ -17551,14 +17538,14 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
int32_t max_y;
if (!SafeAddInt32(x, width, &max_x) || !SafeAddInt32(y, height, &max_y) ||
x < 0 || y < 0 || max_x > source_width || max_y > source_height) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name,
"source texture bad dimensions");
return;
}
} else {
if (!source_texture->GetLevelSize(source_target, source_level,
&source_width, &source_height, nullptr)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name,
"source texture has no data for level");
return;
}
@@ -17566,14 +17553,14 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
// Check that this type of texture is allowed.
if (!texture_manager()->ValidForTarget(source_target, source_level,
source_width, source_height, 1)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name,
"source texture bad dimensions");
return;
}
if (!source_texture->ValidForTexture(source_target, source_level, x, y, 0,
width, height, 1)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name,
"source texture bad dimensions.");
return;
}
@@ -17589,19 +17576,19 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
bool dest_level_defined = dest_texture->GetLevelType(
dest_target, dest_level, &dest_type, &dest_internal_format);
if (!dest_level_defined) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
"destination texture is not defined");
return;
}
if (!dest_texture->ValidForTexture(dest_target, dest_level, xoffset, yoffset,
0, width, height, 1)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name,
"destination texture bad dimensions.");
return;
}
if (!ValidateCopyTextureCHROMIUMInternalFormats(
- kFunctionName, source_internal_format, dest_internal_format)) {
+ function_name, source_internal_format, dest_internal_format)) {
return;
}
@@ -17617,12 +17604,12 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
// Clear the source texture if necessary.
if (!texture_manager()->ClearTextureLevel(this, source_texture_ref,
source_target, source_level)) {
- LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, kFunctionName,
+ LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, function_name,
"source texture dimensions too big");
return;
}
- if (!InitializeCopyTextureCHROMIUM(kFunctionName))
+ if (!InitializeCopyTextureCHROMIUM(function_name))
return;
int dest_width = 0;
@@ -17646,7 +17633,7 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
// Otherwise clear part of texture level that is not already cleared.
if (!texture_manager()->ClearTextureLevel(this, dest_texture_ref,
dest_target, dest_level)) {
- LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, kFunctionName,
+ LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, function_name,
"destination texture dimensions too big");
return;
}
@@ -17661,7 +17648,8 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
(unpack_premultiply_alpha ^ unpack_unmultiply_alpha) != 0;
// TODO(qiankun.miao@intel.com): Support level > 0 for CopyTexSubImage.
if (image && dest_internal_format == source_internal_format &&
- dest_level == 0 && !unpack_flip_y && !unpack_premultiply_alpha_change) {
+ dest_level == 0 && !unpack_flip_y && !unpack_premultiply_alpha_change &&
+ !dither) {
ScopedTextureBinder binder(&state_, dest_texture->service_id(),
dest_binding_target);
if (image->CopyTexSubImage(dest_target, gfx::Point(xoffset, yoffset),
@@ -17686,8 +17674,8 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
dest_level, dest_internal_format, xoffset, yoffset, x, y, width,
height, dest_width, dest_height, source_width, source_height,
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE, transform_matrix,
- copy_tex_image_blit_.get());
+ unpack_unmultiply_alpha == GL_TRUE, dither == GL_TRUE,
+ transform_matrix, copy_tex_image_blit_.get());
return;
}
}
@@ -17696,12 +17684,13 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
source_target, source_level, source_internal_format, source_type,
dest_binding_target, dest_level, dest_internal_format,
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE);
+ unpack_unmultiply_alpha == GL_TRUE, dither == GL_TRUE);
#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
// glDrawArrays is faster than glCopyTexSubImage2D on IA Mesa driver,
// although opposite in Android.
// TODO(dshwang): After Mesa fixes this issue, remove this hack.
- // https://bugs.freedesktop.org/show_bug.cgi?id=98478, crbug.com/535198.
+ // https://bugs.freedesktop.org/show_bug.cgi?id=98478,
+ // https://crbug.com/535198.
if (Texture::ColorRenderable(GetFeatureInfo(), dest_internal_format,
dest_texture->IsImmutable()) &&
method == DIRECT_COPY) {
@@ -17715,7 +17704,31 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
dest_level, dest_internal_format, xoffset, yoffset, x, y, width, height,
dest_width, dest_height, source_width, source_height,
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE, method, copy_tex_image_blit_.get());
+ unpack_unmultiply_alpha == GL_TRUE, dither == GL_TRUE, method,
+ copy_tex_image_blit_.get());
+}
+
+void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
+ GLuint source_id,
+ GLint source_level,
+ GLenum dest_target,
+ GLuint dest_id,
+ GLint dest_level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
+ GLboolean unpack_unmultiply_alpha) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoCopySubTextureCHROMIUM");
+ static const char kFunctionName[] = "glCopySubTextureCHROMIUM";
+ CopySubTextureHelper(kFunctionName, source_id, source_level, dest_target,
+ dest_id, dest_level, xoffset, yoffset, x, y, width,
+ height, unpack_flip_y, unpack_premultiply_alpha,
+ unpack_unmultiply_alpha, GL_FALSE /* dither */);
}
bool GLES2DecoderImpl::InitializeCopyTexImageBlitter(
@@ -17904,7 +17917,7 @@ void GLES2DecoderImpl::DoCompressedCopyTextureCHROMIUM(GLuint source_id,
this, source_texture->target(), source_texture->service_id(), 0,
source_internal_format, dest_texture->target(),
dest_texture->service_id(), 0, GL_RGBA, source_width, source_height,
- false, false, false, DIRECT_DRAW, copy_tex_image_blit_.get());
+ false, false, false, false, DIRECT_DRAW, copy_tex_image_blit_.get());
}
void GLES2DecoderImpl::TexStorageImpl(GLenum target,
@@ -18003,7 +18016,13 @@ void GLES2DecoderImpl::TexStorageImpl(GLenum target,
}
}
- GLenum compatibility_internal_format = internal_format;
+ // First lookup compatibility format via texture manager for swizzling legacy
+ // LUMINANCE/ALPHA formats.
+ GLenum compatibility_internal_format =
+ texture_manager()->AdjustTexStorageFormat(feature_info_.get(),
+ internal_format);
+
+ // Then lookup compatibility format for compressed formats.
const CompressedFormatInfo* format_info =
GetCompressedFormatInfo(internal_format);
if (format_info != nullptr && !format_info->support_check(*feature_info_)) {
@@ -18054,6 +18073,7 @@ void GLES2DecoderImpl::TexStorageImpl(GLenum target,
if (target == GL_TEXTURE_3D)
level_depth = std::max(1, level_depth >> 1);
}
+ texture->ApplyFormatWorkarounds(feature_info_.get());
texture->SetImmutable(true);
}
}
@@ -20302,12 +20322,33 @@ error::Error GLES2DecoderImpl::HandleLockDiscardableTextureCHROMIUM(
return error::kNoError;
}
-void GLES2DecoderImpl::DoBeginRasterCHROMIUM(GLuint texture_id,
- GLuint sk_color,
- GLuint msaa_sample_count,
- GLboolean can_use_lcd_text,
- GLboolean use_distance_field_text,
- GLint pixel_config) {
+class TransferCacheDeserializeHelperImpl
+ : public cc::TransferCacheDeserializeHelper {
+ public:
+ explicit TransferCacheDeserializeHelperImpl(
+ ServiceTransferCache* transfer_cache)
+ : transfer_cache_(transfer_cache) {
+ DCHECK(transfer_cache_);
+ }
+ ~TransferCacheDeserializeHelperImpl() override = default;
+
+ private:
+ cc::ServiceTransferCacheEntry* GetEntryInternal(
+ cc::TransferCacheEntryType entry_type,
+ uint32_t entry_id) override {
+ return transfer_cache_->GetEntry(entry_type, entry_id);
+ }
+ ServiceTransferCache* transfer_cache_;
+};
+
+void GLES2DecoderImpl::DoBeginRasterCHROMIUM(
+ GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) {
if (!gr_context_) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
"chromium_raster_transport not enabled via attribs");
@@ -20319,6 +20360,7 @@ void GLES2DecoderImpl::DoBeginRasterCHROMIUM(GLuint texture_id,
return;
}
+ DCHECK(!raster_canvas_);
gr_context_->resetContext();
// This function should look identical to
@@ -20356,32 +20398,46 @@ void GLES2DecoderImpl::DoBeginRasterCHROMIUM(GLuint texture_id,
return;
}
- switch (pixel_config) {
- case kRGBA_4444_GrPixelConfig:
- case kRGBA_8888_GrPixelConfig:
- case kSRGBA_8888_GrPixelConfig:
+ // GetInternalFormat may return a base internal format but Skia requires a
+ // sized internal format. So this may be adjusted below.
+ texture_info.fFormat = GetInternalFormat(&gl_version_info(), internal_format);
+ switch (color_type) {
+ case kARGB_4444_SkColorType:
+ if (internal_format != GL_RGBA4 && internal_format != GL_RGBA) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "color type mismatch");
+ return;
+ }
+ if (texture_info.fFormat == GL_RGBA)
+ texture_info.fFormat = GL_RGBA4;
+ break;
+ case kRGBA_8888_SkColorType:
if (internal_format != GL_RGBA8_OES && internal_format != GL_RGBA) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
- "pixel config mismatch");
+ "color type mismatch");
return;
}
+ if (texture_info.fFormat == GL_RGBA)
+ texture_info.fFormat = GL_RGBA8_OES;
break;
- case kBGRA_8888_GrPixelConfig:
- case kSBGRA_8888_GrPixelConfig:
+ case kBGRA_8888_SkColorType:
if (internal_format != GL_BGRA_EXT && internal_format != GL_BGRA8_EXT) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
- "pixel config mismatch");
+ "color type mismatch");
return;
}
+ if (texture_info.fFormat == GL_BGRA_EXT)
+ texture_info.fFormat = GL_BGRA8_EXT;
+ if (texture_info.fFormat == GL_RGBA)
+ texture_info.fFormat = GL_RGBA8_OES;
break;
default:
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
- "unsupported pixel config");
+ "unsupported color type");
return;
}
- GrBackendTexture gr_texture(
- width, height, static_cast<GrPixelConfig>(pixel_config), texture_info);
+ GrBackendTexture gr_texture(width, height, GrMipMapped::kNo, texture_info);
uint32_t flags =
use_distance_field_text ? SkSurfaceProps::kUseDistanceFieldFonts_Flag : 0;
@@ -20393,12 +20449,14 @@ void GLES2DecoderImpl::DoBeginRasterCHROMIUM(GLuint texture_id,
SkSurfaceProps(flags, SkSurfaceProps::kLegacyFontHost_InitType);
}
+ SkColorType sk_color_type = static_cast<SkColorType>(color_type);
// Resolve requested msaa samples with GrGpu capabilities.
- int final_msaa_count = gr_context_->caps()->getSampleCount(
- msaa_sample_count, static_cast<GrPixelConfig>(pixel_config));
+ int final_msaa_count =
+ std::min(static_cast<int>(msaa_sample_count),
+ gr_context_->maxSurfaceSampleCountForColorType(sk_color_type));
sk_surface_ = SkSurface::MakeFromBackendTextureAsRenderTarget(
gr_context_.get(), gr_texture, kTopLeft_GrSurfaceOrigin, final_msaa_count,
- nullptr, &surface_props);
+ sk_color_type, nullptr, &surface_props);
if (!sk_surface_) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
@@ -20406,6 +20464,21 @@ void GLES2DecoderImpl::DoBeginRasterCHROMIUM(GLuint texture_id,
return;
}
+ TransferCacheDeserializeHelperImpl transfer_cache_deserializer(
+ transfer_cache_.get());
+ auto* color_space_entry =
+ transfer_cache_deserializer
+ .GetEntryAs<cc::ServiceColorSpaceTransferCacheEntry>(
+ color_space_transfer_cache_id);
+ if (!color_space_entry || !color_space_entry->color_space().IsValid()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "failed to find valid color space");
+ return;
+ }
+ raster_canvas_ = SkCreateColorSpaceXformCanvas(
+ sk_surface_->getCanvas(),
+ color_space_entry->color_space().ToSkColorSpace());
+
// All or nothing clearing, as no way to validate the client's input on what
// is the "used" part of the texture.
if (texture->IsLevelCleared(texture->target(), 0))
@@ -20414,29 +20487,10 @@ void GLES2DecoderImpl::DoBeginRasterCHROMIUM(GLuint texture_id,
// TODO(enne): this doesn't handle the case where the background color
// changes and so any extra pixels outside the raster area that get
// sampled may be incorrect.
- sk_surface_->getCanvas()->drawColor(sk_color);
+ raster_canvas_->drawColor(sk_color);
texture_manager()->SetLevelCleared(texture_ref, texture->target(), 0, true);
}
-class TransferCacheDeserializeHelperImpl
- : public cc::TransferCacheDeserializeHelper {
- public:
- explicit TransferCacheDeserializeHelperImpl(
- ServiceTransferCache* transfer_cache)
- : transfer_cache_(transfer_cache) {
- DCHECK(transfer_cache_);
- }
- ~TransferCacheDeserializeHelperImpl() override = default;
-
- private:
- cc::ServiceTransferCacheEntry* GetEntryInternal(
- cc::TransferCacheEntryType entry_type,
- uint32_t entry_id) override {
- return transfer_cache_->GetEntry(entry_type, entry_id);
- }
- ServiceTransferCache* transfer_cache_;
-};
-
void GLES2DecoderImpl::DoRasterCHROMIUM(GLsizeiptr size, const void* list) {
if (!sk_surface_) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glRasterCHROMIUM",
@@ -20449,7 +20503,7 @@ void GLES2DecoderImpl::DoRasterCHROMIUM(GLsizeiptr size, const void* list) {
cc::PaintOpBuffer::PaintOpAlign) char data[sizeof(cc::LargestPaintOp)];
const char* buffer = static_cast<const char*>(list);
- SkCanvas* canvas = sk_surface_->getCanvas();
+ SkCanvas* canvas = raster_canvas_.get();
SkMatrix original_ctm;
cc::PlaybackParams playback_params(nullptr, original_ctm);
cc::PaintOp::DeserializeOptions options;
@@ -20484,6 +20538,7 @@ void GLES2DecoderImpl::DoEndRasterCHROMIUM() {
return;
}
+ raster_canvas_ = nullptr;
sk_surface_->prepareForExternalIO();
sk_surface_.reset();
@@ -20614,6 +20669,72 @@ void GLES2DecoderImpl::DoDeleteTransferCacheEntryINTERNAL(GLuint raw_entry_type,
}
}
+void GLES2DecoderImpl::DoUnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoUnpremultiplyAndDitherCopyCHROMIUM");
+ static const char kFunctionName[] = "glUnpremultiplyAndDitherCopyCHROMIUM";
+
+ // Do basic validation of our params. Because we don't rely on the caller to
+ // provide the targets / formats of src / dst, we read them here before
+ // forwarding to CopySubTextureHelper. This extension always deals with level
+ // 0.
+ const GLint kLevel = 0;
+
+ TextureRef* source_texture_ref = GetTexture(source_id);
+ TextureRef* dest_texture_ref = GetTexture(dest_id);
+ if (!source_texture_ref || !dest_texture_ref) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName, "unknown texture id");
+ return;
+ }
+
+ Texture* source_texture = source_texture_ref->texture();
+ GLenum source_target = source_texture->target();
+ Texture* dest_texture = dest_texture_ref->texture();
+ GLenum dest_target = dest_texture->target();
+ if ((source_target != GL_TEXTURE_2D &&
+ source_target != GL_TEXTURE_RECTANGLE_ARB &&
+ source_target != GL_TEXTURE_EXTERNAL_OES) ||
+ (dest_target != GL_TEXTURE_2D &&
+ dest_target != GL_TEXTURE_RECTANGLE_ARB)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ "invalid texture target");
+ return;
+ }
+
+ GLenum source_type = 0;
+ GLenum source_internal_format = 0;
+ source_texture->GetLevelType(source_target, kLevel, &source_type,
+ &source_internal_format);
+
+ GLenum dest_type = 0;
+ GLenum dest_internal_format = 0;
+ dest_texture->GetLevelType(dest_target, kLevel, &dest_type,
+ &dest_internal_format);
+ GLenum format =
+ TextureManager::ExtractFormatFromStorageFormat(dest_internal_format);
+
+ if (format != GL_BGRA && format != GL_RGBA) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName, "invalid format");
+ return;
+ }
+
+ if (dest_type != GL_UNSIGNED_SHORT_4_4_4_4) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
+ "invalid destination type");
+ return;
+ }
+
+ CopySubTextureHelper(
+ kFunctionName, source_id, kLevel, dest_target, dest_id, kLevel, x, y, x,
+ y, width, height, GL_FALSE /* unpack_flip_y */,
+ GL_FALSE /* unpack_premultiply_alpha */,
+ GL_TRUE /* unpack_unmultiply_alpha */, GL_TRUE /* dither */);
+}
+
// Include the auto-generated part of this file. We split this because it means
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index 05d738c98f8..0884f7b8552 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -4814,6 +4814,37 @@ error::Error GLES2DecoderImpl::HandleLoseContextCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleUnpremultiplyAndDitherCopyCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::UnpremultiplyAndDitherCopyCHROMIUM& c =
+ *static_cast<
+ const volatile gles2::cmds::UnpremultiplyAndDitherCopyCHROMIUM*>(
+ cmd_data);
+ if (!features().unpremultiply_and_dither_copy) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint source_id = static_cast<GLuint>(c.source_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glUnpremultiplyAndDitherCopyCHROMIUM",
+ "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glUnpremultiplyAndDitherCopyCHROMIUM",
+ "height < 0");
+ return error::kNoError;
+ }
+ DoUnpremultiplyAndDitherCopyCHROMIUM(source_id, dest_id, x, y, width, height);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderImpl::HandleDrawBuffersEXTImmediate(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -5150,10 +5181,12 @@ error::Error GLES2DecoderImpl::HandleBeginRasterCHROMIUM(
GLboolean can_use_lcd_text = static_cast<GLboolean>(c.can_use_lcd_text);
GLboolean use_distance_field_text =
static_cast<GLboolean>(c.use_distance_field_text);
- GLint pixel_config = static_cast<GLint>(c.pixel_config);
+ GLint color_type = static_cast<GLint>(c.color_type);
+ GLuint color_space_transfer_cache_id =
+ static_cast<GLuint>(c.color_space_transfer_cache_id);
DoBeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
- can_use_lcd_text, use_distance_field_text,
- pixel_config);
+ can_use_lcd_text, use_distance_field_text, color_type,
+ color_space_transfer_cache_id);
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index af67c911cb9..0e877c7e686 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -76,6 +76,21 @@ void RequestExtensions(gl::GLApi* api,
}
}
+void APIENTRY GLDebugMessageCallback(GLenum source,
+ GLenum type,
+ GLuint id,
+ GLenum severity,
+ GLsizei length,
+ const GLchar* message,
+ GLvoid* user_param) {
+ DCHECK(user_param != nullptr);
+ GLES2DecoderPassthroughImpl* command_decoder =
+ static_cast<GLES2DecoderPassthroughImpl*>(const_cast<void*>(user_param));
+ command_decoder->OnDebugMessage(source, type, id, severity, length, message);
+ LogGLDebugMessage(source, type, id, severity, length, message,
+ command_decoder->GetLogger());
+}
+
} // anonymous namespace
PassthroughResources::PassthroughResources() : texture_object_map(nullptr) {}
@@ -353,6 +368,7 @@ void GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::Blit(
bool GLES2DecoderPassthroughImpl::EmulatedDefaultFramebuffer::Resize(
const gfx::Size& new_size,
const FeatureInfo* feature_info) {
+ DCHECK(!new_size.IsEmpty());
if (size == new_size) {
return true;
}
@@ -600,8 +616,11 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
gl::GetRequestableGLExtensionsFromCurrentContext());
static constexpr const char* kRequiredFunctionalityExtensions[] = {
- "GL_CHROMIUM_bind_uniform_location", "GL_CHROMIUM_sync_query",
- "GL_EXT_debug_marker", "GL_NV_fence",
+ "GL_CHROMIUM_bind_uniform_location",
+ "GL_CHROMIUM_sync_query",
+ "GL_EXT_debug_marker",
+ "GL_KHR_debug",
+ "GL_NV_fence",
};
RequestExtensions(api(), requestable_extensions,
kRequiredFunctionalityExtensions,
@@ -655,7 +674,9 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
// Each context initializes its own feature info because some extensions may
// be enabled dynamically. Don't disallow any features, leave it up to ANGLE
// to dynamically enable extensions.
- feature_info_->Initialize(attrib_helper.context_type, DisallowedFeatures());
+ feature_info_->Initialize(attrib_helper.context_type,
+ true /* is_passthrough_cmd_decoder */,
+ DisallowedFeatures());
// Check for required extensions
// TODO(geofflang): verify
@@ -668,7 +689,8 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
api()->glIsEnabledFn(GL_CLIENT_ARRAYS_ANGLE) != GL_FALSE ||
feature_info_->feature_flags().angle_webgl_compatibility !=
IsWebGLContextType(attrib_helper.context_type) ||
- !feature_info_->feature_flags().angle_request_extension) {
+ !feature_info_->feature_flags().angle_request_extension ||
+ !feature_info_->feature_flags().khr_debug) {
Destroy(true);
LOG(ERROR) << "ContextResult::kFatalFailure: "
"missing required extension";
@@ -723,15 +745,11 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
bound_buffers_[GL_DISPATCH_INDIRECT_BUFFER] = 0;
}
- if (feature_info_->feature_flags().khr_debug) {
- // For WebGL contexts, log GL errors so they appear in devtools. Otherwise
- // only enable debug logging if requested.
- bool log_non_errors =
- group_->gpu_preferences().enable_gpu_driver_debug_logging;
- if (IsWebGLContextType(attrib_helper.context_type) || log_non_errors) {
- InitializeGLDebugLogging(log_non_errors, &logger_);
- }
- }
+ // For WebGL contexts, log GL errors so they appear in devtools. Otherwise
+ // only enable debug logging if requested.
+ bool log_non_errors =
+ group_->gpu_preferences().enable_gpu_driver_debug_logging;
+ InitializeGLDebugLogging(log_non_errors, GLDebugMessageCallback, this);
if (feature_info_->feature_flags().chromium_texture_filtering_hint &&
feature_info_->feature_flags().is_swiftshader) {
@@ -744,6 +762,9 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
attrib_helper.lose_context_when_out_of_memory;
api()->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &max_2d_texture_size_);
+ api()->glGetIntegervFn(GL_MAX_RENDERBUFFER_SIZE, &max_renderbuffer_size_);
+ max_offscreen_framebuffer_size_ =
+ std::min(max_2d_texture_size_, max_renderbuffer_size_);
if (offscreen_) {
offscreen_single_buffer_ = attrib_helper.single_buffer;
@@ -795,11 +816,15 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
}
}
- FlushErrors();
+ CheckErrorCallbackState();
emulated_back_buffer_ = std::make_unique<EmulatedDefaultFramebuffer>(
api(), emulated_default_framebuffer_format_, feature_info_.get());
- if (!emulated_back_buffer_->Resize(attrib_helper.offscreen_framebuffer_size,
- feature_info_.get())) {
+ // Make sure to use a non-empty offscreen surface so that the framebuffer is
+ // complete.
+ gfx::Size initial_size(
+ std::max(1, attrib_helper.offscreen_framebuffer_size.width()),
+ std::max(1, attrib_helper.offscreen_framebuffer_size.height()));
+ if (!emulated_back_buffer_->Resize(initial_size, feature_info_.get())) {
bool was_lost = CheckResetStatus();
Destroy(true);
LOG(ERROR) << (was_lost ? "ContextResult::kTransientFailure: "
@@ -809,7 +834,7 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
: gpu::ContextResult::kFatalFailure;
}
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
Destroy(true);
// Errors are considered fatal, including OOM.
LOG(ERROR)
@@ -1020,14 +1045,14 @@ bool GLES2DecoderPassthroughImpl::ResizeOffscreenFramebuffer(
}
if (size.width() < 0 || size.height() < 0 ||
- size.width() > max_2d_texture_size_ ||
- size.height() > max_2d_texture_size_) {
+ size.width() > max_offscreen_framebuffer_size_ ||
+ size.height() > max_offscreen_framebuffer_size_) {
LOG(ERROR) << "GLES2DecoderPassthroughImpl::ResizeOffscreenFramebuffer "
"failed to allocate storage due to excessive dimensions.";
return false;
}
- FlushErrors();
+ CheckErrorCallbackState();
if (!emulated_back_buffer_->Resize(size, feature_info_.get())) {
LOG(ERROR) << "GLES2DecoderPassthroughImpl::ResizeOffscreenFramebuffer "
@@ -1035,7 +1060,7 @@ bool GLES2DecoderPassthroughImpl::ResizeOffscreenFramebuffer(
return false;
}
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
LOG(ERROR) << "GLES2DecoderPassthroughImpl::ResizeOffscreenFramebuffer "
"failed to resize the emulated framebuffer because errors "
"were generated.";
@@ -1164,6 +1189,8 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
caps.image_ycbcr_420v_disabled_for_video_frames =
group_->gpu_preferences()
.disable_biplanar_gpu_memory_buffers_for_video_frames;
+ caps.image_xr30 = feature_info_->feature_flags().chromium_image_xr30;
+ caps.image_xb30 = feature_info_->feature_flags().chromium_image_xb30;
caps.max_copy_texture_chromium_size =
feature_info_->workarounds().max_copy_texture_chromium_size;
caps.render_buffer_format_bgra8888 =
@@ -1451,6 +1478,17 @@ void GLES2DecoderPassthroughImpl::BindImage(uint32_t client_texture_id,
passthrough_texture->SetLevelImage(texture_target, 0, image);
}
+void GLES2DecoderPassthroughImpl::OnDebugMessage(GLenum source,
+ GLenum type,
+ GLuint id,
+ GLenum severity,
+ GLsizei length,
+ const GLchar* message) {
+ if (type == GL_DEBUG_TYPE_ERROR && source == GL_DEBUG_SOURCE_API) {
+ had_error_callback_ = true;
+ }
+}
+
const char* GLES2DecoderPassthroughImpl::GetCommandName(
unsigned int command_id) const {
if (command_id >= kFirstGLES2Command && command_id < kNumCommands) {
@@ -1678,19 +1716,8 @@ GLenum GLES2DecoderPassthroughImpl::PopError() {
}
bool GLES2DecoderPassthroughImpl::FlushErrors() {
- auto get_next_error = [this]() {
- // Always read a real GL error so that it can be replaced by the injected
- // error
- GLenum error = api()->glGetErrorFn();
- if (!injected_driver_errors_.empty()) {
- error = injected_driver_errors_.front();
- injected_driver_errors_.pop_front();
- }
- return error;
- };
-
bool had_error = false;
- GLenum error = get_next_error();
+ GLenum error = glGetError();
while (error != GL_NO_ERROR) {
errors_.insert(error);
had_error = true;
@@ -1709,15 +1736,11 @@ bool GLES2DecoderPassthroughImpl::FlushErrors() {
break;
}
- error = get_next_error();
+ error = glGetError();
}
return had_error;
}
-void GLES2DecoderPassthroughImpl::InjectDriverError(GLenum error) {
- injected_driver_errors_.push_back(error);
-}
-
bool GLES2DecoderPassthroughImpl::CheckResetStatus() {
DCHECK(!WasContextLost());
DCHECK(context_->IsCurrent(nullptr));
@@ -2047,6 +2070,16 @@ GLES2DecoderPassthroughImpl::GLenumToTextureTarget(GLenum target) {
}
}
+bool GLES2DecoderPassthroughImpl::CheckErrorCallbackState() {
+ bool had_error_ = had_error_callback_;
+ had_error_callback_ = false;
+ if (had_error_) {
+ // Make sure lose-context-on-OOM logic is triggered as early as possible.
+ FlushErrors();
+ }
+ return had_error_;
+}
+
#define GLES2_CMD_OP(name) \
{ \
&GLES2DecoderPassthroughImpl::Handle##name, cmds::name::kArgFlags, \
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index cbb75760e77..716080ce348 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -305,6 +305,13 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
gl::GLImage* image,
bool can_bind_to_sampler) override;
+ void OnDebugMessage(GLenum source,
+ GLenum type,
+ GLuint id,
+ GLenum severity,
+ GLsizei length,
+ const GLchar* message);
+
private:
// Allow unittests to inspect internal state tracking
friend class GLES2DecoderPassthroughTestBase;
@@ -363,10 +370,6 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
GLenum PopError();
bool FlushErrors();
- // Inject a driver-level GL error that will replace the result of the next
- // call to glGetError
- void InjectDriverError(GLenum error);
-
bool IsRobustnessSupported();
bool IsEmulatedQueryTarget(GLenum target) const;
@@ -559,9 +562,13 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
base::circular_deque<PendingReadPixels> pending_read_pixels_;
// Error state
- base::circular_deque<GLenum> injected_driver_errors_;
std::set<GLenum> errors_;
+ // Checks if an error has been generated since the last call to
+ // CheckErrorCallbackState
+ bool CheckErrorCallbackState();
+ bool had_error_callback_ = false;
+
// Default framebuffer emulation
struct EmulatedDefaultFramebufferFormat {
GLenum color_renderbuffer_internal_format = GL_NONE;
@@ -644,8 +651,10 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
std::vector<std::unique_ptr<EmulatedColorBuffer>> available_color_textures_;
size_t create_color_buffer_count_for_test_;
- // Maximum 2D texture size for limiting offscreen framebuffer sizes
- GLint max_2d_texture_size_;
+ // Maximum 2D resource sizes for limiting offscreen framebuffer sizes
+ GLint max_2d_texture_size_ = 0;
+ GLint max_renderbuffer_size_ = 0;
+ GLint max_offscreen_framebuffer_size_ = 0;
// State tracking of currently bound draw and read framebuffers (client IDs)
GLuint bound_draw_framebuffer_;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index 54131613cea..26c7dbf3f88 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -1006,7 +1006,8 @@ error::Error DoBeginRasterCHROMIUM(GLuint texture_id,
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config);
+ GLint color_type,
+ GLuint color_space_transfer_cache_id);
error::Error DoRasterCHROMIUM(GLsizeiptr size, const void* list);
error::Error DoEndRasterCHROMIUM();
error::Error DoCreateTransferCacheEntryINTERNAL(GLuint entry_type,
@@ -1026,3 +1027,9 @@ error::Error DoWindowRectanglesEXT(GLenum mode,
error::Error DoCreateGpuFenceINTERNAL(GLuint gpu_fence_id);
error::Error DoWaitGpuFenceCHROMIUM(GLuint gpu_fence_id);
error::Error DoDestroyGpuFenceCHROMIUM(GLuint gpu_fence_id);
+error::Error DoUnpremultiplyAndDitherCopyCHROMIUM(GLuint src_texture,
+ GLuint dst_texture,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index 4d34083425b..6b9680741d0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h"
+#include "base/bind_helpers.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/gpu_fence_manager.h"
@@ -320,9 +321,9 @@ bool ModifyAttachmentsForEmulatedFramebuffer(std::vector<GLenum>* attachments) {
// Implementations of commands
error::Error GLES2DecoderPassthroughImpl::DoActiveTexture(GLenum texture) {
- FlushErrors();
+ CheckErrorCallbackState();
api()->glActiveTextureFn(texture);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -350,10 +351,10 @@ error::Error GLES2DecoderPassthroughImpl::DoBindAttribLocation(
error::Error GLES2DecoderPassthroughImpl::DoBindBuffer(GLenum target,
GLuint buffer) {
- FlushErrors();
+ CheckErrorCallbackState();
api()->glBindBufferFn(target, GetBufferServiceID(api(), buffer, resources_,
bind_generates_resource_));
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -366,11 +367,11 @@ error::Error GLES2DecoderPassthroughImpl::DoBindBuffer(GLenum target,
error::Error GLES2DecoderPassthroughImpl::DoBindBufferBase(GLenum target,
GLuint index,
GLuint buffer) {
- FlushErrors();
+ CheckErrorCallbackState();
api()->glBindBufferBaseFn(
target, index,
GetBufferServiceID(api(), buffer, resources_, bind_generates_resource_));
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -385,12 +386,12 @@ error::Error GLES2DecoderPassthroughImpl::DoBindBufferRange(GLenum target,
GLuint buffer,
GLintptr offset,
GLsizeiptr size) {
- FlushErrors();
+ CheckErrorCallbackState();
api()->glBindBufferRangeFn(
target, index,
GetBufferServiceID(api(), buffer, resources_, bind_generates_resource_),
offset, size);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -403,11 +404,11 @@ error::Error GLES2DecoderPassthroughImpl::DoBindBufferRange(GLenum target,
error::Error GLES2DecoderPassthroughImpl::DoBindFramebuffer(
GLenum target,
GLuint framebuffer) {
- FlushErrors();
+ CheckErrorCallbackState();
api()->glBindFramebufferEXTFn(
target, GetFramebufferServiceID(api(), framebuffer, &framebuffer_id_map_,
bind_generates_resource_));
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -454,12 +455,12 @@ error::Error GLES2DecoderPassthroughImpl::DoBindTexture(GLenum target,
GLuint service_id =
GetTextureServiceID(api(), texture, resources_, bind_generates_resource_);
- FlushErrors();
+ CheckErrorCallbackState();
api()->glBindTextureFn(target, service_id);
// Only update tracking if no error was generated in the bind call
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -536,9 +537,9 @@ error::Error GLES2DecoderPassthroughImpl::DoBufferData(GLenum target,
GLsizeiptr size,
const void* data,
GLenum usage) {
- FlushErrors();
+ CheckErrorCallbackState();
api()->glBufferDataFn(target, size, data, usage);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -1051,10 +1052,10 @@ error::Error GLES2DecoderPassthroughImpl::DoFenceSync(GLenum condition,
return error::kInvalidArguments;
}
- FlushErrors();
+ CheckErrorCallbackState();
GLsync service_id = api()->glFenceSyncFn(condition, flags);
- if (FlushErrors()) {
- return error::kInvalidArguments;
+ if (CheckErrorCallbackState()) {
+ return error::kNoError;
}
resources_->sync_id_map.SetIDMapping(client_id,
@@ -1254,13 +1255,13 @@ error::Error GLES2DecoderPassthroughImpl::DoGetActiveAttrib(GLuint program,
GLenum* type,
std::string* name,
int32_t* success) {
- FlushErrors();
+ CheckErrorCallbackState();
GLuint service_id = GetProgramServiceID(program, resources_);
GLint active_attribute_max_length = 0;
api()->glGetProgramivFn(service_id, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH,
&active_attribute_max_length);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
*success = 0;
return error::kNoError;
}
@@ -1269,7 +1270,7 @@ error::Error GLES2DecoderPassthroughImpl::DoGetActiveAttrib(GLuint program,
api()->glGetActiveAttribFn(service_id, index, name_buffer.size(), nullptr,
size, type, name_buffer.data());
*name = std::string(name_buffer.data());
- *success = FlushErrors() ? 0 : 1;
+ *success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
@@ -1279,13 +1280,13 @@ error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniform(GLuint program,
GLenum* type,
std::string* name,
int32_t* success) {
- FlushErrors();
+ CheckErrorCallbackState();
GLuint service_id = GetProgramServiceID(program, resources_);
GLint active_uniform_max_length = 0;
api()->glGetProgramivFn(service_id, GL_ACTIVE_UNIFORM_MAX_LENGTH,
&active_uniform_max_length);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
*success = 0;
return error::kNoError;
}
@@ -1294,7 +1295,7 @@ error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniform(GLuint program,
api()->glGetActiveUniformFn(service_id, index, name_buffer.size(), nullptr,
size, type, name_buffer.data());
*name = std::string(name_buffer.data());
- *success = FlushErrors() ? 0 : 1;
+ *success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
@@ -1315,7 +1316,7 @@ error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniformBlockName(
GLuint program,
GLuint index,
std::string* name) {
- FlushErrors();
+ CheckErrorCallbackState();
GLuint program_service_id = GetProgramServiceID(program, resources_);
GLint max_name_length = 0;
@@ -1323,7 +1324,7 @@ error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniformBlockName(
GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH,
&max_name_length);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -1371,11 +1372,11 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBufferSubDataAsyncCHROMIUM(
GLintptr offset,
GLsizeiptr size,
uint8_t* mem) {
- FlushErrors();
+ CheckErrorCallbackState();
void* mapped_ptr =
api()->glMapBufferRangeFn(target, offset, size, GL_MAP_READ_BIT);
- if (FlushErrors() || mapped_ptr == nullptr) {
+ if (CheckErrorCallbackState() || mapped_ptr == nullptr) {
// Had an error while mapping, don't copy any data
return error::kNoError;
}
@@ -1404,10 +1405,10 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteri64v(
GLsizei bufsize,
GLsizei* length,
GLint64* params) {
- FlushErrors();
+ CheckErrorCallbackState();
api()->glGetBufferParameteri64vRobustANGLEFn(target, pname, bufsize, length,
params);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
PatchGetBufferResults(target, pname, bufsize, length, params);
@@ -1420,10 +1421,10 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteriv(
GLsizei bufsize,
GLsizei* length,
GLint* params) {
- FlushErrors();
+ CheckErrorCallbackState();
api()->glGetBufferParameterivRobustANGLEFn(target, pname, bufsize, length,
params);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
PatchGetBufferResults(target, pname, bufsize, length, params);
@@ -1485,14 +1486,14 @@ error::Error GLES2DecoderPassthroughImpl::DoGetFramebufferAttachmentParameteriv(
}
}
- FlushErrors();
+ CheckErrorCallbackState();
// Get a scratch buffer to hold the result of the query
GLint* scratch_params = GetTypedScratchMemory<GLint>(bufsize);
api()->glGetFramebufferAttachmentParameterivRobustANGLEFn(
target, updated_attachment, pname, bufsize, length, scratch_params);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
DCHECK(*length == 0);
return error::kNoError;
}
@@ -1576,12 +1577,12 @@ error::Error GLES2DecoderPassthroughImpl::DoGetProgramiv(GLuint program,
error::Error GLES2DecoderPassthroughImpl::DoGetProgramInfoLog(
GLuint program,
std::string* infolog) {
- FlushErrors();
+ CheckErrorCallbackState();
GLint info_log_len = 0;
api()->glGetProgramivFn(GetProgramServiceID(program, resources_),
GL_INFO_LOG_LENGTH, &info_log_len);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -1640,12 +1641,12 @@ error::Error GLES2DecoderPassthroughImpl::DoGetShaderiv(GLuint shader,
error::Error GLES2DecoderPassthroughImpl::DoGetShaderInfoLog(
GLuint shader,
std::string* infolog) {
- FlushErrors();
+ CheckErrorCallbackState();
GLuint service_id = GetShaderServiceID(shader, resources_);
GLint info_log_len = 0;
api()->glGetShaderivFn(service_id, GL_INFO_LOG_LENGTH, &info_log_len);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -1663,23 +1664,23 @@ error::Error GLES2DecoderPassthroughImpl::DoGetShaderPrecisionFormat(
GLint* range,
GLint* precision,
int32_t* success) {
- FlushErrors();
+ CheckErrorCallbackState();
api()->glGetShaderPrecisionFormatFn(shadertype, precisiontype, range,
precision);
- *success = FlushErrors() ? 0 : 1;
+ *success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoGetShaderSource(
GLuint shader,
std::string* source) {
- FlushErrors();
+ CheckErrorCallbackState();
GLuint shader_service_id = GetShaderServiceID(shader, resources_);
GLint shader_source_length = 0;
api()->glGetShaderivFn(shader_service_id, GL_SHADER_SOURCE_LENGTH,
&shader_source_length);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -1757,13 +1758,13 @@ error::Error GLES2DecoderPassthroughImpl::DoGetTransformFeedbackVarying(
GLenum* type,
std::string* name,
int32_t* success) {
- FlushErrors();
+ CheckErrorCallbackState();
GLuint service_id = GetProgramServiceID(program, resources_);
GLint transform_feedback_varying_max_length = 0;
api()->glGetProgramivFn(service_id, GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH,
&transform_feedback_varying_max_length);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
*success = 0;
return error::kNoError;
}
@@ -1773,7 +1774,7 @@ error::Error GLES2DecoderPassthroughImpl::DoGetTransformFeedbackVarying(
nullptr, size, type,
name_buffer.data());
*name = std::string(name_buffer.data());
- *success = FlushErrors() ? 0 : 1;
+ *success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
@@ -2077,12 +2078,12 @@ error::Error GLES2DecoderPassthroughImpl::DoReadPixels(GLint x,
GLsizei* rows,
void* pixels,
int32_t* success) {
- FlushErrors();
+ CheckErrorCallbackState();
ScopedPackStateRowLengthReset reset_row_length(
api(), bufsize != 0 && feature_info_->gl_version_info().is_es3);
api()->glReadPixelsRobustANGLEFn(x, y, width, height, format, type, bufsize,
length, columns, rows, pixels);
- *success = FlushErrors() ? 0 : 1;
+ *success = CheckErrorCallbackState() ? 0 : 1;
return error::kNoError;
}
@@ -2104,7 +2105,7 @@ error::Error GLES2DecoderPassthroughImpl::DoReadPixelsAsync(
DCHECK(feature_info_->feature_flags().use_async_readpixels &&
bound_buffers_[GL_PIXEL_PACK_BUFFER] == 0);
- FlushErrors();
+ CheckErrorCallbackState();
ScopedPackStateRowLengthReset reset_row_length(
api(), bufsize != 0 && feature_info_->gl_version_info().is_es3);
@@ -2151,13 +2152,13 @@ error::Error GLES2DecoderPassthroughImpl::DoReadPixelsAsync(
api()->glBindBufferFn(GL_PIXEL_PACK_BUFFER_ARB, 0);
// Test for errors now before creating a fence
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
pending_read_pixels.fence.reset(gl::GLFence::Create());
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -3009,12 +3010,12 @@ error::Error GLES2DecoderPassthroughImpl::DoQueryCounterEXT(
GLuint service_id = GetQueryServiceID(id, &query_id_map_);
// Flush all previous errors
- FlushErrors();
+ CheckErrorCallbackState();
api()->glQueryCounterFn(service_id, target);
// Check if a new error was generated
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -3070,12 +3071,12 @@ error::Error GLES2DecoderPassthroughImpl::DoBeginQueryEXT(
}
} else {
// Flush all previous errors
- FlushErrors();
+ CheckErrorCallbackState();
api()->glBeginQueryFn(target, service_id);
// Check if a new error was generated
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
}
@@ -3117,12 +3118,12 @@ error::Error GLES2DecoderPassthroughImpl::DoEndQueryEXT(GLenum target,
}
} else {
// Flush all previous errors
- FlushErrors();
+ CheckErrorCallbackState();
api()->glEndQueryFn(target);
// Check if a new error was generated
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
}
@@ -3253,8 +3254,7 @@ error::Error GLES2DecoderPassthroughImpl::DoSwapBuffers() {
return error::kNoError;
}
- gfx::SwapResult result = surface_->SwapBuffers(
- base::Bind([](const gfx::PresentationFeedback&) {}));
+ gfx::SwapResult result = surface_->SwapBuffers(base::DoNothing());
if (result == gfx::SwapResult::SWAP_FAILED) {
LOG(ERROR) << "Context lost because SwapBuffers failed.";
if (!CheckResetStatus()) {
@@ -3292,7 +3292,7 @@ error::Error GLES2DecoderPassthroughImpl::DoMapBufferRange(
int32_t data_shm_id,
uint32_t data_shm_offset,
uint32_t* result) {
- FlushErrors();
+ CheckErrorCallbackState();
GLbitfield filtered_access = access;
@@ -3315,7 +3315,7 @@ error::Error GLES2DecoderPassthroughImpl::DoMapBufferRange(
void* mapped_ptr =
api()->glMapBufferRangeFn(target, offset, size, filtered_access);
- if (FlushErrors() || mapped_ptr == nullptr) {
+ if (CheckErrorCallbackState() || mapped_ptr == nullptr) {
// Had an error while mapping, don't copy any data
*result = 0;
return error::kNoError;
@@ -3391,8 +3391,14 @@ error::Error GLES2DecoderPassthroughImpl::DoResizeCHROMIUM(GLuint width,
GLfloat scale_factor,
GLenum color_space,
GLboolean alpha) {
+ // gfx::Size uses integers, make sure width and height do not overflow
+ static_assert(sizeof(GLuint) >= sizeof(int), "Unexpected GLuint size.");
+ static const GLuint kMaxDimension =
+ static_cast<GLuint>(std::numeric_limits<int>::max());
+ gfx::Size safe_size(std::min(std::max(1U, width), kMaxDimension),
+ std::min(std::max(1U, height), kMaxDimension));
if (offscreen_) {
- if (!ResizeOffscreenFramebuffer(gfx::Size(width, height))) {
+ if (!ResizeOffscreenFramebuffer(safe_size)) {
LOG(ERROR) << "GLES2DecoderPassthroughImpl: Context lost because "
<< "ResizeOffscreenFramebuffer failed.";
return error::kLostContext;
@@ -3418,8 +3424,8 @@ error::Error GLES2DecoderPassthroughImpl::DoResizeCHROMIUM(GLuint width,
"specified color space was invalid.";
return error::kLostContext;
}
- if (!surface_->Resize(gfx::Size(width, height), scale_factor,
- surface_color_space, !!alpha)) {
+ if (!surface_->Resize(safe_size, scale_factor, surface_color_space,
+ !!alpha)) {
LOG(ERROR)
<< "GLES2DecoderPassthroughImpl: Context lost because resize failed.";
return error::kLostContext;
@@ -3452,6 +3458,7 @@ error::Error GLES2DecoderPassthroughImpl::DoRequestExtensionCHROMIUM(
// Make sure newly enabled extensions are exposed and usable.
context_->ReinitializeDynamicBindings();
feature_info_->Initialize(feature_info_->context_type(),
+ true /* is_passthrough_cmd_decoder */,
feature_info_->disallowed_features());
return error::kNoError;
@@ -3804,12 +3811,12 @@ error::Error GLES2DecoderPassthroughImpl::DoGetUniformsES3CHROMIUM(
error::Error GLES2DecoderPassthroughImpl::DoGetTranslatedShaderSourceANGLE(
GLuint shader,
std::string* source) {
- FlushErrors();
+ CheckErrorCallbackState();
GLuint service_id = GetShaderServiceID(shader, resources_);
GLint translated_source_length = 0;
api()->glGetShaderivFn(service_id, GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE,
&translated_source_length);
- if (FlushErrors()) {
+ if (CheckErrorCallbackState()) {
return error::kNoError;
}
@@ -3835,8 +3842,8 @@ error::Error GLES2DecoderPassthroughImpl::DoSwapBuffersWithBoundsCHROMIUM(
bounds[i] = gfx::Rect(rects[i * 4 + 0], rects[i * 4 + 1], rects[i * 4 + 2],
rects[i * 4 + 3]);
}
- gfx::SwapResult result = surface_->SwapBuffersWithBounds(
- bounds, base::Bind([](const gfx::PresentationFeedback&) {}));
+ gfx::SwapResult result =
+ surface_->SwapBuffersWithBounds(bounds, base::DoNothing());
if (result == gfx::SwapResult::SWAP_FAILED) {
LOG(ERROR) << "Context lost because SwapBuffersWithBounds failed.";
}
@@ -3855,8 +3862,8 @@ error::Error GLES2DecoderPassthroughImpl::DoPostSubBufferCHROMIUM(
return error::kNoError;
}
- gfx::SwapResult result = surface_->PostSubBuffer(
- x, y, width, height, base::Bind([](const gfx::PresentationFeedback&) {}));
+ gfx::SwapResult result =
+ surface_->PostSubBuffer(x, y, width, height, base::DoNothing());
if (result == gfx::SwapResult::SWAP_FAILED) {
LOG(ERROR) << "Context lost because PostSubBuffer failed.";
if (!CheckResetStatus()) {
@@ -4583,8 +4590,6 @@ error::Error GLES2DecoderPassthroughImpl::DoSetDrawRectangleCHROMIUM(
GLint y,
GLint width,
GLint height) {
- FlushErrors();
-
GLint current_framebuffer = 0;
api()->glGetIntegervFn(GL_FRAMEBUFFER_BINDING, &current_framebuffer);
if (current_framebuffer != 0) {
@@ -4609,8 +4614,6 @@ error::Error GLES2DecoderPassthroughImpl::DoSetDrawRectangleCHROMIUM(
error::Error GLES2DecoderPassthroughImpl::DoSetEnableDCLayersCHROMIUM(
GLboolean enable) {
- FlushErrors();
-
GLint current_framebuffer = 0;
api()->glGetIntegervFn(GL_FRAMEBUFFER_BINDING, &current_framebuffer);
if (current_framebuffer != 0) {
@@ -4638,7 +4641,8 @@ error::Error GLES2DecoderPassthroughImpl::DoBeginRasterCHROMIUM(
GLuint msaa_sample_count,
GLboolean can_use_lcd_text,
GLboolean use_distance_field_text,
- GLint pixel_config) {
+ GLint color_type,
+ GLuint color_space_transfer_cache_id) {
NOTIMPLEMENTED();
return error::kNoError;
}
@@ -4718,5 +4722,16 @@ error::Error GLES2DecoderPassthroughImpl::DoDestroyGpuFenceCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoUnpremultiplyAndDitherCopyCHROMIUM(
+ GLuint src_texture,
+ GLuint dst_texture,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index c81b0a8ac49..74fdfdbba86 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -4190,6 +4190,32 @@ error::Error GLES2DecoderPassthroughImpl::HandleLoseContextCHROMIUM(
return error::kNoError;
}
+error::Error
+GLES2DecoderPassthroughImpl::HandleUnpremultiplyAndDitherCopyCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::UnpremultiplyAndDitherCopyCHROMIUM& c =
+ *static_cast<
+ const volatile gles2::cmds::UnpremultiplyAndDitherCopyCHROMIUM*>(
+ cmd_data);
+ if (!features().unpremultiply_and_dither_copy) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint source_id = static_cast<GLuint>(c.source_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ error::Error error = DoUnpremultiplyAndDitherCopyCHROMIUM(
+ source_id, dest_id, x, y, width, height);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleDrawBuffersEXTImmediate(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -4540,10 +4566,12 @@ error::Error GLES2DecoderPassthroughImpl::HandleBeginRasterCHROMIUM(
GLboolean can_use_lcd_text = static_cast<GLboolean>(c.can_use_lcd_text);
GLboolean use_distance_field_text =
static_cast<GLboolean>(c.use_distance_field_text);
- GLint pixel_config = static_cast<GLint>(c.pixel_config);
+ GLint color_type = static_cast<GLint>(c.color_type);
+ GLuint color_space_transfer_cache_id =
+ static_cast<GLuint>(c.color_space_transfer_cache_id);
error::Error error = DoBeginRasterCHROMIUM(
texture_id, sk_color, msaa_sample_count, can_use_lcd_text,
- use_distance_field_text, pixel_config);
+ use_distance_field_text, color_type, color_space_transfer_cache_id);
if (error != error::kNoError) {
return error;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_framebuffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_framebuffers.cc
index c8fd6c14956..56d10fc8394 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_framebuffers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_framebuffers.cc
@@ -349,12 +349,10 @@ TEST_F(GLES2DecoderPassthroughTest, ReadPixelsAsyncError) {
uint32_t pixels_shm_id = shared_memory_id_;
uint32_t pixels_shm_offset = kSharedMemoryOffset + sizeof(Result);
- // Inject an INVALID_OPERATION error on the call to ReadPixels
- InjectGLError(GL_NO_ERROR);
- InjectGLError(GL_INVALID_OPERATION);
-
+ // Provide parameters that will cause glReadPixels to fail with
+ // GL_INVALID_OPERATION
ReadPixels cmd;
- cmd.Init(0, 0, kWidth, kHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels_shm_id,
+ cmd.Init(0, 0, kWidth, kHeight, GL_RGBA, GL_UNSIGNED_SHORT, pixels_shm_id,
pixels_shm_offset, result_shm_id, result_shm_offset, true);
result->success = 0;
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index ecd91f4541c..a0276a82459 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -54,13 +54,9 @@ namespace gles2 {
using namespace cmds;
void GLES2DecoderRGBBackbufferTest::SetUp() {
- // Test codepath with workaround clear_alpha_in_readpixels because
- // ReadPixelsEmulator emulates the incorrect driver behavior.
- gpu::GpuDriverBugWorkarounds workarounds;
- workarounds.clear_alpha_in_readpixels = true;
InitState init;
init.bind_generates_resource = true;
- InitDecoderWithWorkarounds(init, workarounds);
+ InitDecoder(init);
SetupDefaultProgram();
}
@@ -1581,7 +1577,11 @@ TEST_P(GLES2DecoderDoCommandsTest, DoCommandsBadArgSize) {
decoder_->DoCommands(
2, &cmds_, entries_per_cmd_ * 2 + 1, &num_processed));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
- EXPECT_EQ(entries_per_cmd_ + cmds_[1].header.size, num_processed);
+ // gpu::CommandHeader::size is a 21-bit field, so casting it to int is safe.
+ // Without the explicit cast, Visual Studio ends up promoting the left hand
+ // side to unsigned, and emits a sign mismatch warning.
+ EXPECT_EQ(entries_per_cmd_ + static_cast<int>(cmds_[1].header.size),
+ num_processed);
}
class GLES2DecoderDescheduleUntilFinishedTest : public GLES2DecoderTest {
@@ -1683,13 +1683,11 @@ void GLES3DecoderWithShaderTest::SetUp() {
}
void GLES3DecoderRGBBackbufferTest::SetUp() {
- gpu::GpuDriverBugWorkarounds workarounds;
- workarounds.clear_alpha_in_readpixels = true;
InitState init;
init.gl_version = "OpenGL ES 3.0";
init.bind_generates_resource = true;
init.context_type = CONTEXT_TYPE_OPENGLES3;
- InitDecoderWithWorkarounds(init, workarounds);
+ InitDecoder(init);
SetupDefaultProgram();
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4.cc
new file mode 100644
index 00000000000..d5b10acf9d5
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4.cc
@@ -0,0 +1,59 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+#include <stdint.h>
+
+#include "base/command_line.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gl::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+class GLES2DecoderTest4 : public GLES2DecoderTestBase {
+ public:
+ GLES2DecoderTest4() = default;
+};
+
+class GLES3DecoderTest4 : public GLES2DecoderTest4 {
+ public:
+ GLES3DecoderTest4() { shader_language_version_ = 300; }
+
+ protected:
+ void SetUp() override {
+ InitState init;
+ init.gl_version = "OpenGL ES 3.0";
+ init.bind_generates_resource = true;
+ init.context_type = CONTEXT_TYPE_OPENGLES3;
+ InitDecoder(init);
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service, GLES2DecoderTest4, ::testing::Bool());
+INSTANTIATE_TEST_CASE_P(Service, GLES3DecoderTest4, ::testing::Bool());
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h
index ba73447d73b..9abba521520 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_4_autogen.h
@@ -12,61 +12,4 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_4_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_4_AUTOGEN_H_
-TEST_P(GLES2DecoderTest4, SwapBuffersWithBoundsCHROMIUMImmediateValidArgs) {
- cmds::SwapBuffersWithBoundsCHROMIUMImmediate& cmd =
- *GetImmediateAs<cmds::SwapBuffersWithBoundsCHROMIUMImmediate>();
- SpecializedSetup<cmds::SwapBuffersWithBoundsCHROMIUMImmediate, 0>(true);
- GLint temp[4 * 2] = {
- 0,
- };
- EXPECT_CALL(*gl_, SwapBuffersWithBoundsCHROMIUM(1, PointsToArray(temp, 4)));
- cmd.Init(1, &temp[0]);
- EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTest4, SetDrawRectangleCHROMIUMValidArgs) {
- EXPECT_CALL(*gl_, SetDrawRectangleCHROMIUM(1, 2, 3, 4));
- SpecializedSetup<cmds::SetDrawRectangleCHROMIUM, 0>(true);
- cmds::SetDrawRectangleCHROMIUM cmd;
- cmd.Init(1, 2, 3, 4);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTest4, SetEnableDCLayersCHROMIUMValidArgs) {
- EXPECT_CALL(*gl_, SetEnableDCLayersCHROMIUM(true));
- SpecializedSetup<cmds::SetEnableDCLayersCHROMIUM, 0>(true);
- cmds::SetEnableDCLayersCHROMIUM cmd;
- cmd.Init(true);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTest4, CreateTransferCacheEntryINTERNALValidArgs) {
- EXPECT_CALL(*gl_, CreateTransferCacheEntryINTERNAL(1, 2, 3, 4, 5, 6, 7));
- SpecializedSetup<cmds::CreateTransferCacheEntryINTERNAL, 0>(true);
- cmds::CreateTransferCacheEntryINTERNAL cmd;
- cmd.Init(1, 2, 3, 4, 5, 6, 7);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTest4, DeleteTransferCacheEntryINTERNALValidArgs) {
- EXPECT_CALL(*gl_, DeleteTransferCacheEntryINTERNAL(1, 2));
- SpecializedSetup<cmds::DeleteTransferCacheEntryINTERNAL, 0>(true);
- cmds::DeleteTransferCacheEntryINTERNAL cmd;
- cmd.Init(1, 2);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTest4, UnlockTransferCacheEntryINTERNALValidArgs) {
- EXPECT_CALL(*gl_, UnlockTransferCacheEntryINTERNAL(1, 2));
- SpecializedSetup<cmds::UnlockTransferCacheEntryINTERNAL, 0>(true);
- cmds::UnlockTransferCacheEntryINTERNAL cmd;
- cmd.Init(1, 2);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_4_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
index 09ef3e7f8bc..365b482ea9a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -2452,10 +2452,6 @@ GLint GLES2DecoderPassthroughTestBase::GetGLError() {
return static_cast<GLint>(*GetSharedMemoryAs<GLenum*>());
}
-void GLES2DecoderPassthroughTestBase::InjectGLError(GLenum error) {
- decoder_->InjectDriverError(error);
-}
-
void GLES2DecoderPassthroughTestBase::DoRequestExtension(
const char* extension) {
DCHECK(extension != nullptr);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
index d37d4e481b2..9bdcfb1bbff 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
@@ -921,7 +921,6 @@ class GLES2DecoderPassthroughTestBase : public testing::Test,
}
GLint GetGLError();
- void InjectGLError(GLenum error);
protected:
void DoRequestExtension(const char* extension);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
index 74630da0beb..1367c1e5ea4 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -1079,11 +1079,6 @@ TEST_P(GLES2DecoderRGBBackbufferTest, ReadPixelsNoAlphaBackbuffer) {
const GLsizei kHeight = 3;
const GLint kBytesPerPixel = 4;
const GLint kPackAlignment = 4;
- static const uint8_t kExpectedPixels[kWidth * kHeight * kBytesPerPixel] = {
- 12, 13, 14, 255, 19, 18, 19, 255, 13, 14, 18, 255,
- 29, 28, 23, 255, 21, 22, 21, 255, 28, 23, 22, 255,
- 31, 34, 39, 255, 32, 37, 32, 255, 34, 39, 37, 255,
- };
static const uint8_t kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 29, 28, 23, 22, 21, 22,
21, 29, 28, 23, 22, 21, 31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32,
@@ -1091,12 +1086,8 @@ TEST_P(GLES2DecoderRGBBackbufferTest, ReadPixelsNoAlphaBackbuffer) {
surface_->SetSize(gfx::Size(INT_MAX, INT_MAX));
- ReadPixelsEmulator emu(kWidth,
- kHeight,
- kBytesPerPixel,
- kSrcPixels,
- kExpectedPixels,
- kPackAlignment);
+ ReadPixelsEmulator emu(kWidth, kHeight, kBytesPerPixel, kSrcPixels,
+ kSrcPixels, kPackAlignment);
typedef ReadPixels::Result Result;
Result* result = GetSharedMemoryAs<Result*>();
uint32_t result_shm_id = shared_memory_id_;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index 17d773b3569..4f603ea837d 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -148,45 +148,6 @@ TEST_P(GLES3DecoderTest, GenerateMipmapBaseLevel) {
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
-// Same as GenerateMipmapClearsUnclearedTexture, but with workaround
-// |set_texture_filters_before_generating_mipmap|.
-TEST_P(GLES2DecoderManualInitTest, SetTextureFiltersBeforeGenerateMipmap) {
- gpu::GpuDriverBugWorkarounds workarounds;
- workarounds.set_texture_filter_before_generating_mipmap = true;
- InitState init;
- init.bind_generates_resource = true;
- InitDecoderWithWorkarounds(init, workarounds);
-
- EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
- DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
- DoTexImage2D(
- GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
- SetupClearTextureExpectations(kServiceTextureId, kServiceTextureId,
- GL_TEXTURE_2D, GL_TEXTURE_2D, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, 0, 0, 2, 2, 0);
- EXPECT_CALL(
- *gl_,
- TexParameteri(
- GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, GenerateMipmapEXT(GL_TEXTURE_2D));
- EXPECT_CALL(
- *gl_,
- TexParameteri(
- GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_LINEAR))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, GetError())
- .WillOnce(Return(GL_NO_ERROR))
- .WillOnce(Return(GL_NO_ERROR))
- .RetiresOnSaturation();
- GenerateMipmap cmd;
- cmd.Init(GL_TEXTURE_2D);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
TEST_P(GLES2DecoderTest, ActiveTextureValidArgs) {
EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE1));
SpecializedSetup<ActiveTexture, 0>(true);
diff --git a/chromium/gpu/command_buffer/service/gpu_fence_manager.cc b/chromium/gpu/command_buffer/service/gpu_fence_manager.cc
index 68cb18ff474..00cf3de04d7 100644
--- a/chromium/gpu/command_buffer/service/gpu_fence_manager.cc
+++ b/chromium/gpu/command_buffer/service/gpu_fence_manager.cc
@@ -4,6 +4,8 @@
#include "gpu/command_buffer/service/gpu_fence_manager.h"
+#include <memory>
+
#include "base/bind.h"
#include "base/logging.h"
#include "ui/gfx/gpu_fence.h"
@@ -28,7 +30,7 @@ bool GpuFenceManager::CreateGpuFence(uint32_t client_id) {
if (it != gpu_fence_entries_.end())
return false;
- auto entry = base::MakeUnique<GpuFenceEntry>();
+ auto entry = std::make_unique<GpuFenceEntry>();
entry->gl_fence_ = gl::GLFence::CreateForGpuFence();
if (!entry->gl_fence_)
return false;
@@ -51,7 +53,7 @@ bool GpuFenceManager::CreateGpuFenceFromHandle(
return false;
gfx::GpuFence gpu_fence(handle);
- auto entry = base::MakeUnique<GpuFenceEntry>();
+ auto entry = std::make_unique<GpuFenceEntry>();
entry->gl_fence_ = gl::GLFence::CreateFromGpuFence(gpu_fence);
if (!entry->gl_fence_)
return false;
diff --git a/chromium/gpu/command_buffer/service/gpu_preferences.h b/chromium/gpu/command_buffer/service/gpu_preferences.h
index ed0650d490e..ce22a338ded 100644
--- a/chromium/gpu/command_buffer/service/gpu_preferences.h
+++ b/chromium/gpu/command_buffer/service/gpu_preferences.h
@@ -59,9 +59,6 @@ struct GPU_EXPORT GpuPreferences {
// Starts the GPU sandbox before creating a GL context.
bool gpu_sandbox_start_early = false;
- // Disables HW encode acceleration for WebRTC.
- bool disable_web_rtc_hw_encoding = false;
-
// Enables experimental hardware acceleration for VP8/VP9 video decoding.
// Bitmask - 0x1=Microsoft, 0x2=AMD, 0x03=Try all. Windows only.
VpxDecodeVendors enable_accelerated_vpx_decode = VPX_VENDOR_MICROSOFT;
@@ -171,6 +168,9 @@ struct GPU_EXPORT GpuPreferences {
// Ignores GPU blacklist.
bool ignore_gpu_blacklist = false;
+
+ // Please update gpu_preferences_util_unittest.cc when making additions or
+ // changes to this struct.
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc b/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc
index a5b06f451e5..cb38432d704 100644
--- a/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc
+++ b/chromium/gpu/command_buffer/service/mailbox_manager_sync.cc
@@ -53,7 +53,7 @@ void CreateFenceLocked(const SyncToken& sync_token) {
}
// Need to use EGL fences since we are likely not in a single share group.
auto fence = gl::GLFenceEGL::Create();
- DCHECK(fence);
+ CHECK(fence) << "eglCreateSyncKHR failed";
std::pair<SyncTokenToFenceMap::iterator, bool> result =
sync_point_to_fence.insert(
std::make_pair(sync_token, std::move(fence)));
diff --git a/chromium/gpu/command_buffer/service/mailbox_manager_unittest.cc b/chromium/gpu/command_buffer/service/mailbox_manager_unittest.cc
index fd6552034f6..cd7f6075eed 100644
--- a/chromium/gpu/command_buffer/service/mailbox_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/mailbox_manager_unittest.cc
@@ -687,6 +687,68 @@ TEST_F(MailboxManagerSyncTest, ProduceBothWays) {
DestroyTexture(new_texture);
}
+// Test for crbug.com/816693
+// A: produce texture (without images) into M, B: consume into new_texture
+// B: push updates
+TEST_F(MailboxManagerSyncTest, ProduceTextureNotDefined) {
+ const GLuint kNewTextureId = 1234;
+ InSequence sequence;
+
+ Texture* texture = CreateTexture();
+ const GLsizei levels_needed = TextureManager::ComputeMipMapCount(
+ GL_TEXTURE_2D, kMaxTextureWidth, kMaxTextureHeight, kMaxTextureDepth);
+ SetTarget(texture, GL_TEXTURE_2D, levels_needed);
+ SetParameter(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ SetParameter(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ Mailbox name = Mailbox::Generate();
+
+ manager_->ProduceTexture(name, texture);
+
+ // Share
+ manager_->PushTextureUpdates(g_sync_token);
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgPointee<1>(kNewTextureId));
+ SetupUpdateTexParamExpectations(kNewTextureId, GL_LINEAR, GL_LINEAR,
+ GL_REPEAT, GL_REPEAT);
+ TextureBase* new_texture = manager2_->ConsumeTexture(name);
+ EXPECT_EQ(kNewTextureId, new_texture->service_id());
+
+ // Change something so that the push recreates the TextureDefinition.
+ SetParameter(static_cast<Texture*>(new_texture), GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST);
+
+ // Synchronize manager2 -> manager
+ manager2_->PushTextureUpdates(g_sync_token);
+ SetupUpdateTexParamExpectations(1, GL_NEAREST, GL_LINEAR, GL_REPEAT,
+ GL_REPEAT);
+ manager_->PullTextureUpdates(g_sync_token);
+
+ DestroyTexture(texture);
+ DestroyTexture(new_texture);
+}
+
+TEST_F(MailboxManagerSyncTest, ProduceTextureNotBound) {
+ InSequence sequence;
+
+ Texture* texture = CreateTexture();
+ Mailbox name = Mailbox::Generate();
+
+ manager_->ProduceTexture(name, texture);
+
+ // Share
+ manager_->PushTextureUpdates(g_sync_token);
+
+ // Consume should fail.
+ TextureBase* new_texture = manager2_->ConsumeTexture(name);
+ EXPECT_EQ(nullptr, new_texture);
+
+ // Synchronize manager2 -> manager
+ manager2_->PushTextureUpdates(g_sync_token);
+ manager_->PullTextureUpdates(g_sync_token);
+
+ DestroyTexture(texture);
+}
+
// TODO: Texture::level_infos_[][].size()
// TODO: unsupported targets and formats
diff --git a/chromium/gpu/command_buffer/service/raster_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/raster_cmd_decoder_autogen.h
new file mode 100644
index 00000000000..d1ba94c6b6d
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/raster_cmd_decoder_autogen.h
@@ -0,0 +1,535 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by raster_cmd_decoder.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_CMD_DECODER_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_RASTER_CMD_DECODER_AUTOGEN_H_
+
+error::Error RasterDecoderImpl::HandleActiveTexture(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::ActiveTexture& c =
+ *static_cast<const volatile raster::cmds::ActiveTexture*>(cmd_data);
+ GLenum texture = static_cast<GLenum>(c.texture);
+ DoActiveTexture(texture);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleBindTexture(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::BindTexture& c =
+ *static_cast<const volatile raster::cmds::BindTexture*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint texture = c.texture;
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindTexture", target, "target");
+ return error::kNoError;
+ }
+ DoBindTexture(target, texture);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleDeleteTexturesImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::DeleteTexturesImmediate& c =
+ *static_cast<const volatile raster::cmds::DeleteTexturesImmediate*>(
+ cmd_data);
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ volatile const GLuint* textures = GetImmediateDataAs<volatile const GLuint*>(
+ c, data_size, immediate_data_size);
+ if (textures == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteTexturesHelper(n, textures);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleFinish(uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ DoFinish();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleFlush(uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ DoFlush();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleGenTexturesImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::GenTexturesImmediate& c =
+ *static_cast<const volatile raster::cmds::GenTexturesImmediate*>(
+ cmd_data);
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ volatile GLuint* textures =
+ GetImmediateDataAs<volatile GLuint*>(c, data_size, immediate_data_size);
+ if (textures == NULL) {
+ return error::kOutOfBounds;
+ }
+ auto textures_copy = std::make_unique<GLuint[]>(n);
+ GLuint* textures_safe = textures_copy.get();
+ std::copy(textures, textures + n, textures_safe);
+ if (!CheckUniqueAndNonNullIds(n, textures_safe) ||
+ !GenTexturesHelper(n, textures_safe)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleGetError(uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::GetError& c =
+ *static_cast<const volatile raster::cmds::GetError*>(cmd_data);
+ typedef cmds::GetError::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = GetErrorState()->GetGLError();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleGetIntegerv(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::GetIntegerv& c =
+ *static_cast<const volatile raster::cmds::GetIntegerv*>(cmd_data);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetIntegerv::Result Result;
+ GLsizei num_values = 0;
+ if (!GetNumValuesReturnedForGLGet(pname, &num_values)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(":GetIntegerv", pname, "pname");
+ return error::kNoError;
+ }
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetIntegerv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetIntegerv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetIntegerv(pname, params, num_values);
+ GLenum error = LOCAL_PEEK_GL_ERROR("GetIntegerv");
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ }
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleTexParameteri(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::TexParameteri& c =
+ *static_cast<const volatile raster::cmds::TexParameteri*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLint param = static_cast<GLint>(c.param);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteri", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteri", pname, "pname");
+ return error::kNoError;
+ }
+ DoTexParameteri(target, pname, param);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleTexStorage2DEXT(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::TexStorage2DEXT& c =
+ *static_cast<const volatile raster::cmds::TexStorage2DEXT*>(cmd_data);
+ if (!features().ext_texture_storage) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLsizei levels = static_cast<GLsizei>(c.levels);
+ GLenum internalFormat = static_cast<GLenum>(c.internalFormat);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexStorage2DEXT", target, "target");
+ return error::kNoError;
+ }
+ if (levels < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DEXT", "levels < 0");
+ return error::kNoError;
+ }
+ if (!validators_->texture_internal_format_storage.IsValid(internalFormat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexStorage2DEXT", internalFormat,
+ "internalFormat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DEXT", "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DEXT", "height < 0");
+ return error::kNoError;
+ }
+ DoTexStorage2DEXT(target, levels, internalFormat, width, height);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleGenQueriesEXTImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::GenQueriesEXTImmediate& c =
+ *static_cast<const volatile raster::cmds::GenQueriesEXTImmediate*>(
+ cmd_data);
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ volatile GLuint* queries =
+ GetImmediateDataAs<volatile GLuint*>(c, data_size, immediate_data_size);
+ if (queries == NULL) {
+ return error::kOutOfBounds;
+ }
+ auto queries_copy = std::make_unique<GLuint[]>(n);
+ GLuint* queries_safe = queries_copy.get();
+ std::copy(queries, queries + n, queries_safe);
+ if (!CheckUniqueAndNonNullIds(n, queries_safe) ||
+ !GenQueriesEXTHelper(n, queries_safe)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleDeleteQueriesEXTImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::DeleteQueriesEXTImmediate& c =
+ *static_cast<const volatile raster::cmds::DeleteQueriesEXTImmediate*>(
+ cmd_data);
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ volatile const GLuint* queries = GetImmediateDataAs<volatile const GLuint*>(
+ c, data_size, immediate_data_size);
+ if (queries == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteQueriesEXTHelper(n, queries);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleCopySubTextureCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::CopySubTextureCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::CopySubTextureCHROMIUM*>(
+ cmd_data);
+ GLuint source_id = static_cast<GLuint>(c.source_id);
+ GLint source_level = static_cast<GLint>(c.source_level);
+ GLenum dest_target = static_cast<GLenum>(c.dest_target);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
+ GLint dest_level = static_cast<GLint>(c.dest_level);
+ GLint xoffset = static_cast<GLint>(c.xoffset);
+ GLint yoffset = static_cast<GLint>(c.yoffset);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ GLboolean unpack_flip_y = static_cast<GLboolean>(c.unpack_flip_y);
+ GLboolean unpack_premultiply_alpha =
+ static_cast<GLboolean>(c.unpack_premultiply_alpha);
+ GLboolean unpack_unmultiply_alpha =
+ static_cast<GLboolean>(c.unpack_unmultiply_alpha);
+ if (!validators_->texture_target.IsValid(dest_target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCopySubTextureCHROMIUM", dest_target,
+ "dest_target");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTextureCHROMIUM",
+ "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTextureCHROMIUM",
+ "height < 0");
+ return error::kNoError;
+ }
+ DoCopySubTextureCHROMIUM(source_id, source_level, dest_target, dest_id,
+ dest_level, xoffset, yoffset, x, y, width, height,
+ unpack_flip_y, unpack_premultiply_alpha,
+ unpack_unmultiply_alpha);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleCompressedCopyTextureCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::CompressedCopyTextureCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::CompressedCopyTextureCHROMIUM*>(
+ cmd_data);
+ GLuint source_id = static_cast<GLuint>(c.source_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
+ DoCompressedCopyTextureCHROMIUM(source_id, dest_id);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleProduceTextureDirectCHROMIUMImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::ProduceTextureDirectCHROMIUMImmediate& c =
+ *static_cast<
+ const volatile raster::cmds::ProduceTextureDirectCHROMIUMImmediate*>(
+ cmd_data);
+ GLuint texture = c.texture;
+ uint32_t data_size;
+ if (!GLES2Util::ComputeDataSize<GLbyte, 16>(1, &data_size)) {
+ return error::kOutOfBounds;
+ }
+ if (data_size > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ volatile const GLbyte* mailbox = GetImmediateDataAs<volatile const GLbyte*>(
+ c, data_size, immediate_data_size);
+ if (mailbox == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoProduceTextureDirectCHROMIUM(texture, mailbox);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleBindTexImage2DCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::BindTexImage2DCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::BindTexImage2DCHROMIUM*>(
+ cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint imageId = static_cast<GLint>(c.imageId);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindTexImage2DCHROMIUM", target,
+ "target");
+ return error::kNoError;
+ }
+ DoBindTexImage2DCHROMIUM(target, imageId);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleReleaseTexImage2DCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::ReleaseTexImage2DCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::ReleaseTexImage2DCHROMIUM*>(
+ cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLint imageId = static_cast<GLint>(c.imageId);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glReleaseTexImage2DCHROMIUM", target,
+ "target");
+ return error::kNoError;
+ }
+ DoReleaseTexImage2DCHROMIUM(target, imageId);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleTraceEndCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ DoTraceEndCHROMIUM();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleLoseContextCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::LoseContextCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::LoseContextCHROMIUM*>(cmd_data);
+ GLenum current = static_cast<GLenum>(c.current);
+ GLenum other = static_cast<GLenum>(c.other);
+ if (!validators_->reset_status.IsValid(current)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glLoseContextCHROMIUM", current,
+ "current");
+ return error::kNoError;
+ }
+ if (!validators_->reset_status.IsValid(other)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glLoseContextCHROMIUM", other, "other");
+ return error::kNoError;
+ }
+ DoLoseContextCHROMIUM(current, other);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleBeginRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::BeginRasterCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::BeginRasterCHROMIUM*>(cmd_data);
+ if (!features().chromium_raster_transport) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint texture_id = static_cast<GLuint>(c.texture_id);
+ GLuint sk_color = static_cast<GLuint>(c.sk_color);
+ GLuint msaa_sample_count = static_cast<GLuint>(c.msaa_sample_count);
+ GLboolean can_use_lcd_text = static_cast<GLboolean>(c.can_use_lcd_text);
+ GLboolean use_distance_field_text =
+ static_cast<GLboolean>(c.use_distance_field_text);
+ GLint color_type = static_cast<GLint>(c.color_type);
+ DoBeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
+ can_use_lcd_text, use_distance_field_text, color_type);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::RasterCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::RasterCHROMIUM*>(cmd_data);
+ if (!features().chromium_raster_transport) {
+ return error::kUnknownCommand;
+ }
+
+ GLsizeiptr size = static_cast<GLsizeiptr>(c.size);
+ uint32_t data_size = size;
+ const void* list = GetSharedMemoryAs<const void*>(
+ c.list_shm_id, c.list_shm_offset, data_size);
+ if (size < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRasterCHROMIUM", "size < 0");
+ return error::kNoError;
+ }
+ if (list == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoRasterCHROMIUM(size, list);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleEndRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!features().chromium_raster_transport) {
+ return error::kUnknownCommand;
+ }
+
+ DoEndRasterCHROMIUM();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleCreateTransferCacheEntryINTERNAL(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::CreateTransferCacheEntryINTERNAL& c =
+ *static_cast<
+ const volatile raster::cmds::CreateTransferCacheEntryINTERNAL*>(
+ cmd_data);
+ GLuint entry_type = static_cast<GLuint>(c.entry_type);
+ GLuint entry_id = static_cast<GLuint>(c.entry_id);
+ GLuint handle_shm_id = static_cast<GLuint>(c.handle_shm_id);
+ GLuint handle_shm_offset = static_cast<GLuint>(c.handle_shm_offset);
+ GLuint data_shm_id = static_cast<GLuint>(c.data_shm_id);
+ GLuint data_shm_offset = static_cast<GLuint>(c.data_shm_offset);
+ GLuint data_size = static_cast<GLuint>(c.data_size);
+ DoCreateTransferCacheEntryINTERNAL(entry_type, entry_id, handle_shm_id,
+ handle_shm_offset, data_shm_id,
+ data_shm_offset, data_size);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleDeleteTransferCacheEntryINTERNAL(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::DeleteTransferCacheEntryINTERNAL& c =
+ *static_cast<
+ const volatile raster::cmds::DeleteTransferCacheEntryINTERNAL*>(
+ cmd_data);
+ GLuint entry_type = static_cast<GLuint>(c.entry_type);
+ GLuint entry_id = static_cast<GLuint>(c.entry_id);
+ DoDeleteTransferCacheEntryINTERNAL(entry_type, entry_id);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleUnlockTransferCacheEntryINTERNAL(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::UnlockTransferCacheEntryINTERNAL& c =
+ *static_cast<
+ const volatile raster::cmds::UnlockTransferCacheEntryINTERNAL*>(
+ cmd_data);
+ GLuint entry_type = static_cast<GLuint>(c.entry_type);
+ GLuint entry_id = static_cast<GLuint>(c.entry_id);
+ DoUnlockTransferCacheEntryINTERNAL(entry_type, entry_id);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleTexStorage2DImageCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::TexStorage2DImageCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::TexStorage2DImageCHROMIUM*>(
+ cmd_data);
+ if (!features().chromium_texture_storage_image) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum internalFormat = static_cast<GLenum>(c.internalFormat);
+ GLenum bufferUsage = static_cast<GLenum>(c.bufferUsage);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexStorage2DImageCHROMIUM", target,
+ "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_internal_format_storage.IsValid(internalFormat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexStorage2DImageCHROMIUM",
+ internalFormat, "internalFormat");
+ return error::kNoError;
+ }
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DImageCHROMIUM",
+ "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexStorage2DImageCHROMIUM",
+ "height < 0");
+ return error::kNoError;
+ }
+ DoTexStorage2DImageCHROMIUM(target, internalFormat, bufferUsage, width,
+ height);
+ return error::kNoError;
+}
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_CMD_DECODER_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index e8c1f724a36..f69fc6357cd 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -4,18 +4,32 @@
#include "gpu/command_buffer/service/raster_decoder.h"
+#include <stdint.h>
+
+#include <string>
+
#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
#include "base/trace_event/trace_event.h"
#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/command_buffer/common/command_buffer_id.h"
+#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/context_result.h"
-#include "gpu/command_buffer/common/gles2_cmd_ids.h"
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "gpu/command_buffer/common/raster_cmd_format.h"
+#include "gpu/command_buffer/common/raster_cmd_ids.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/decoder_client.h"
-#include "gpu/command_buffer/service/gl_utils.h"
-#include "gpu/command_buffer/service/shader_translator.h"
+#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gles2_cmd_validation.h"
+#include "gpu/command_buffer/service/logger.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_surface.h"
+#include "ui/gl/gl_version_info.h"
// Local versions of the SET_GL_ERROR macros
#define LOCAL_SET_GL_ERROR(error, function_name, msg) \
@@ -39,23 +53,367 @@ using namespace gpu::gles2;
namespace gpu {
namespace raster {
-// TODO(backer): Use a different set of commands.
-RasterDecoder::CommandInfo RasterDecoder::command_info[] = {
-#define GLES2_CMD_OP(name) \
- { \
- nullptr, cmds::name::kArgFlags, cmds::name::cmd_flags, \
- sizeof(cmds::name) / sizeof(CommandBufferEntry) - 1, \
+class RasterDecoderImpl : public RasterDecoder, public gles2::ErrorStateClient {
+ public:
+ RasterDecoderImpl(DecoderClient* client,
+ CommandBufferServiceBase* command_buffer_service,
+ gles2::Outputter* outputter,
+ gles2::ContextGroup* group);
+ ~RasterDecoderImpl() override;
+
+ GLES2Util* GetGLES2Util() override { return &util_; }
+
+ // DecoderContext implementation.
+ base::WeakPtr<DecoderContext> AsWeakPtr() override;
+ gpu::ContextResult Initialize(
+ const scoped_refptr<gl::GLSurface>& surface,
+ const scoped_refptr<gl::GLContext>& context,
+ bool offscreen,
+ const gles2::DisallowedFeatures& disallowed_features,
+ const ContextCreationAttribs& attrib_helper) override;
+ const gles2::ContextState* GetContextState() override;
+ void Destroy(bool have_context) override;
+ bool MakeCurrent() override;
+ gl::GLContext* GetGLContext() override;
+ Capabilities GetCapabilities() override;
+ void RestoreState(const gles2::ContextState* prev_state) override;
+ void RestoreActiveTexture() const override;
+ void RestoreAllTextureUnitAndSamplerBindings(
+ const gles2::ContextState* prev_state) const override;
+ void RestoreActiveTextureUnitBinding(unsigned int target) const override;
+ void RestoreBufferBinding(unsigned int target) override;
+ void RestoreBufferBindings() const override;
+ void RestoreFramebufferBindings() const override;
+ void RestoreRenderbufferBindings() override;
+ void RestoreProgramBindings() const override;
+ void RestoreTextureState(unsigned service_id) const override;
+ void RestoreTextureUnitBindings(unsigned unit) const override;
+ void RestoreVertexAttribArray(unsigned index) override;
+ void RestoreAllExternalTextureBindingsIfNeeded() override;
+ gles2::QueryManager* GetQueryManager() override;
+ gles2::GpuFenceManager* GetGpuFenceManager() override;
+ bool HasPendingQueries() const override;
+ void ProcessPendingQueries(bool did_finish) override;
+ bool HasMoreIdleWork() const override;
+ void PerformIdleWork() override;
+ bool HasPollingWork() const override;
+ void PerformPollingWork() override;
+ TextureBase* GetTextureBase(uint32_t client_id) override;
+ bool WasContextLost() const override;
+ bool WasContextLostByRobustnessExtension() const override;
+ void MarkContextLost(error::ContextLostReason reason) override;
+ bool CheckResetStatus() override;
+ void BeginDecoding() override;
+ void EndDecoding() override;
+ const char* GetCommandName(unsigned int command_id) const;
+ error::Error DoCommands(unsigned int num_commands,
+ const volatile void* buffer,
+ int num_entries,
+ int* entries_processed) override;
+ base::StringPiece GetLogPrefix() override;
+ void BindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler) override;
+ gles2::ContextGroup* GetContextGroup() override;
+ gles2::ErrorState* GetErrorState() override;
+
+ // ErrorClientState implementation.
+ void OnContextLostError() override;
+ void OnOutOfMemoryError() override;
+
+ Logger* GetLogger() override;
+
+ void SetIgnoreCachedStateForTest(bool ignore) override;
+
+ private:
+ gl::GLApi* api() const { return state_.api(); }
+
+ const FeatureInfo::FeatureFlags& features() const {
+ return feature_info_->feature_flags();
+ }
+
+ const gl::GLVersionInfo& gl_version_info() {
+ return feature_info_->gl_version_info();
+ }
+
+ const TextureManager* texture_manager() const {
+ return group_->texture_manager();
+ }
+
+ TextureManager* texture_manager() { return group_->texture_manager(); }
+
+ // Creates a Texture for the given texture.
+ TextureRef* CreateTexture(GLuint client_id, GLuint service_id) {
+ return texture_manager()->CreateTexture(client_id, service_id);
+ }
+
+ // Gets the texture info for the given texture. Returns nullptr if none
+ // exists.
+ TextureRef* GetTexture(GLuint client_id) const {
+ return texture_manager()->GetTexture(client_id);
+ }
+
+ // Deletes the texture info for the given texture.
+ void RemoveTexture(GLuint client_id) {
+ texture_manager()->RemoveTexture(client_id);
+ }
+
+ void UnbindTexture(TextureRef* texture_ref) {
+ // Unbind texture_ref from texture_ref units.
+ state_.UnbindTexture(texture_ref);
+ }
+
+ // Set remaining commands to process to 0 to force DoCommands to return
+ // and allow context preemption and GPU watchdog checks in
+ // CommandExecutor().
+ void ExitCommandProcessingEarly() { commands_to_process_ = 0; }
+
+ template <bool DebugImpl>
+ error::Error DoCommandsImpl(unsigned int num_commands,
+ const volatile void* buffer,
+ int num_entries,
+ int* entries_processed);
+
+ // Helper for glGetIntegerv. Returns false if pname is unhandled.
+ bool GetHelper(GLenum pname, GLint* params, GLsizei* num_written);
+
+ // Gets the number of values that will be returned by glGetXXX. Returns
+ // false if pname is unknown.
+ bool GetNumValuesReturnedForGLGet(GLenum pname, GLsizei* num_values);
+
+ void DoActiveTexture(GLenum texture_unit) { NOTIMPLEMENTED(); }
+ void DoBindTexture(GLenum target, GLuint texture);
+ void DeleteTexturesHelper(GLsizei n, const volatile GLuint* client_ids);
+ bool GenTexturesHelper(GLsizei n, const GLuint* client_ids);
+ bool GenQueriesEXTHelper(GLsizei n, const GLuint* client_ids) {
+ NOTIMPLEMENTED();
+ return true;
+ }
+ void DeleteQueriesEXTHelper(GLsizei n, const volatile GLuint* client_ids) {
+ NOTIMPLEMENTED();
+ }
+ void DoFinish();
+ void DoFlush();
+ void DoGetIntegerv(GLenum pname, GLint* params, GLsizei params_size);
+ void DoTexParameteri(GLenum target, GLenum pname, GLint param);
+ void DoTexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height) {
+ NOTIMPLEMENTED();
+ }
+ void DoTexStorage2DImageCHROMIUM(GLenum target,
+ GLenum internal_format,
+ GLenum buffer_usage,
+ GLsizei width,
+ GLsizei height) {
+ NOTIMPLEMENTED();
+ }
+ void DoCopySubTextureCHROMIUM(GLuint source_id,
+ GLint source_level,
+ GLenum dest_target,
+ GLuint dest_id,
+ GLint dest_level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
+ GLboolean unpack_unmultiply_alpha) {
+ NOTIMPLEMENTED();
+ }
+ void DoCompressedCopyTextureCHROMIUM(GLuint source_id, GLuint dest_id) {
+ NOTIMPLEMENTED();
+ }
+ void DoProduceTextureDirectCHROMIUM(GLuint texture,
+ const volatile GLbyte* key) {
+ NOTIMPLEMENTED();
+ }
+ void DoBindTexImage2DCHROMIUM(GLenum target, GLint image_id) {
+ NOTIMPLEMENTED();
+ }
+ void DoReleaseTexImage2DCHROMIUM(GLenum target, GLint image_id) {
+ NOTIMPLEMENTED();
+ }
+ void DoTraceEndCHROMIUM();
+ void DoLoseContextCHROMIUM(GLenum current, GLenum other) { NOTIMPLEMENTED(); }
+ void DoBeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint color_type) {
+ NOTIMPLEMENTED();
+ }
+ void DoRasterCHROMIUM(GLsizeiptr size, const void* list) { NOTIMPLEMENTED(); }
+ void DoEndRasterCHROMIUM() { NOTIMPLEMENTED(); }
+ void DoCreateTransferCacheEntryINTERNAL(GLuint entry_type,
+ GLuint entry_id,
+ GLuint handle_shm_id,
+ GLuint handle_shm_offset,
+ GLuint data_shm_id,
+ GLuint data_shm_offset,
+ GLuint data_size) {
+ NOTIMPLEMENTED();
+ }
+ void DoUnlockTransferCacheEntryINTERNAL(GLuint entry_type, GLuint entry_id) {
+ NOTIMPLEMENTED();
+ }
+ void DoDeleteTransferCacheEntryINTERNAL(GLuint entry_type, GLuint entry_id) {
+ NOTIMPLEMENTED();
+ }
+ void DoUnpremultiplyAndDitherCopyCHROMIUM(GLuint source_id,
+ GLuint dest_id,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ NOTIMPLEMENTED();
+ }
+
+#if defined(NDEBUG)
+ void LogClientServiceMapping(const char* /* function_name */,
+ GLuint /* client_id */,
+ GLuint /* service_id */) {}
+ template <typename T>
+ void LogClientServiceForInfo(T* /* info */,
+ GLuint /* client_id */,
+ const char* /* function_name */) {}
+#else
+ void LogClientServiceMapping(const char* function_name,
+ GLuint client_id,
+ GLuint service_id) {
+ if (service_logging_) {
+ VLOG(1) << "[" << logger_.GetLogPrefix() << "] " << function_name
+ << ": client_id = " << client_id
+ << ", service_id = " << service_id;
+ }
+ }
+ template <typename T>
+ void LogClientServiceForInfo(T* info,
+ GLuint client_id,
+ const char* function_name) {
+ if (info) {
+ LogClientServiceMapping(function_name, client_id, info->service_id());
+ }
+ }
+#endif
+
+// Generate a member function prototype for each command in an automated and
+// typesafe way.
+#define RASTER_CMD_OP(name) \
+ Error Handle##name(uint32_t immediate_data_size, const volatile void* data);
+
+ RASTER_COMMAND_LIST(RASTER_CMD_OP)
+#undef RASTER_CMD_OP
+
+ typedef error::Error (RasterDecoderImpl::*CmdHandler)(
+ uint32_t immediate_data_size,
+ const volatile void* data);
+
+ // A struct to hold info about each command.
+ struct CommandInfo {
+ CmdHandler cmd_handler;
+ uint8_t arg_flags; // How to handle the arguments for this command
+ uint8_t cmd_flags; // How to handle this command
+ uint16_t arg_count; // How many arguments are expected for this command.
+ };
+
+ // A table of CommandInfo for all the commands.
+ static const CommandInfo command_info[kNumCommands - kFirstRasterCommand];
+
+ // Number of commands remaining to be processed in DoCommands().
+ int commands_to_process_;
+
+ // The current decoder error communicates the decoder error through command
+ // processing functions that do not return the error value. Should be set
+ // only if not returning an error.
+ error::Error current_decoder_error_;
+
+ scoped_refptr<gl::GLSurface> surface_;
+ scoped_refptr<gl::GLContext> context_;
+
+ DecoderClient* client_;
+
+ gles2::DebugMarkerManager debug_marker_manager_;
+ gles2::Logger logger_;
+
+ // The ContextGroup for this decoder uses to track resources.
+ scoped_refptr<gles2::ContextGroup> group_;
+ const gles2::Validators* validators_;
+ scoped_refptr<gles2::FeatureInfo> feature_info_;
+
+ // All the state for this context.
+ gles2::ContextState state_;
+
+ GLES2Util util_;
+
+ bool gpu_debug_commands_;
+
+ // Log extra info.
+ bool service_logging_;
+
+ base::WeakPtrFactory<DecoderContext> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(RasterDecoderImpl);
+};
+
+constexpr RasterDecoderImpl::CommandInfo RasterDecoderImpl::command_info[] = {
+#define RASTER_CMD_OP(name) \
+ { \
+ &RasterDecoderImpl::Handle##name, cmds::name::kArgFlags, \
+ cmds::name::cmd_flags, \
+ sizeof(cmds::name) / sizeof(CommandBufferEntry) - 1, \
}, /* NOLINT */
- GLES2_COMMAND_LIST(GLES2_CMD_OP)
-#undef GLES2_CMD_OP
+ RASTER_COMMAND_LIST(RASTER_CMD_OP)
+#undef RASTER_CMD_OP
};
-RasterDecoder::RasterDecoder(DecoderClient* client,
- CommandBufferServiceBase* command_buffer_service,
- Outputter* outputter,
- ContextGroup* group)
+// static
+RasterDecoder* RasterDecoder::Create(
+ DecoderClient* client,
+ CommandBufferServiceBase* command_buffer_service,
+ Outputter* outputter,
+ ContextGroup* group) {
+ return new RasterDecoderImpl(client, command_buffer_service, outputter,
+ group);
+}
+
+RasterDecoder::RasterDecoder(CommandBufferServiceBase* command_buffer_service)
: CommonDecoder(command_buffer_service),
initialized_(false),
+ debug_(false),
+ log_commands_(false) {}
+
+RasterDecoder::~RasterDecoder() {}
+
+bool RasterDecoder::initialized() const {
+ return initialized_;
+}
+
+TextureBase* RasterDecoder::GetTextureBase(uint32_t client_id) {
+ return nullptr;
+}
+
+void RasterDecoder::BeginDecoding() {}
+
+void RasterDecoder::EndDecoding() {}
+
+base::StringPiece RasterDecoder::GetLogPrefix() {
+ return GetLogger()->GetLogPrefix();
+}
+
+RasterDecoderImpl::RasterDecoderImpl(
+ DecoderClient* client,
+ CommandBufferServiceBase* command_buffer_service,
+ Outputter* outputter,
+ ContextGroup* group)
+ : RasterDecoder(command_buffer_service),
commands_to_process_(0),
current_decoder_error_(error::kNoError),
client_(client),
@@ -64,55 +422,36 @@ RasterDecoder::RasterDecoder(DecoderClient* client,
validators_(group_->feature_info()->validators()),
feature_info_(group_->feature_info()),
state_(group_->feature_info(), this, &logger_),
- debug_(false),
- log_commands_(false),
+ service_logging_(
+ group_->gpu_preferences().enable_gpu_service_logging_gpu),
weak_ptr_factory_(this) {}
-RasterDecoder::~RasterDecoder() {}
+RasterDecoderImpl::~RasterDecoderImpl() {}
-base::WeakPtr<DecoderContext> RasterDecoder::AsWeakPtr() {
+base::WeakPtr<DecoderContext> RasterDecoderImpl::AsWeakPtr() {
return weak_ptr_factory_.GetWeakPtr();
}
-gpu::ContextResult RasterDecoder::Initialize(
+gpu::ContextResult RasterDecoderImpl::Initialize(
const scoped_refptr<gl::GLSurface>& surface,
const scoped_refptr<gl::GLContext>& context,
bool offscreen,
const DisallowedFeatures& disallowed_features,
const ContextCreationAttribs& attrib_helper) {
- TRACE_EVENT0("gpu", "RasterDecoder::Initialize");
+ TRACE_EVENT0("gpu", "RasterDecoderImpl::Initialize");
DCHECK(context->IsCurrent(surface.get()));
DCHECK(!context_.get());
state_.set_api(gl::g_current_gl_context);
- initialized_ = true;
-
- // TODO(backer): Remove temporary hack once we use a separate set of
- // commands. Thread safe because Initialize is always called from CrGpuMain
- // thread.
- static bool updated_command_info = false;
- if (!updated_command_info) {
- updated_command_info = true;
- command_info[cmds::GetString::kCmdId - kFirstGLES2Command].cmd_handler =
- &RasterDecoder::HandleGetString;
- command_info[cmds::TraceBeginCHROMIUM::kCmdId - kFirstGLES2Command]
- .cmd_handler = &RasterDecoder::HandleTraceBeginCHROMIUM;
- command_info[cmds::TraceEndCHROMIUM::kCmdId - kFirstGLES2Command]
- .cmd_handler = &RasterDecoder::HandleTraceEndCHROMIUM;
- command_info[cmds::InsertFenceSyncCHROMIUM::kCmdId - kFirstGLES2Command]
- .cmd_handler = &RasterDecoder::HandleInsertFenceSyncCHROMIUM;
- command_info[cmds::WaitSyncTokenCHROMIUM::kCmdId - kFirstGLES2Command]
- .cmd_handler = &RasterDecoder::HandleWaitSyncTokenCHROMIUM;
- }
+ set_initialized();
if (!offscreen) {
return gpu::ContextResult::kFatalFailure;
}
- // FIXME(backer):
- // if (group_->gpu_preferences().enable_gpu_debugging)
- // set_debug(true);
+ if (group_->gpu_preferences().enable_gpu_debugging)
+ set_debug(true);
if (group_->gpu_preferences().enable_gpu_command_logging)
set_log_commands(true);
@@ -130,33 +469,55 @@ gpu::ContextResult RasterDecoder::Initialize(
}
CHECK_GL_ERROR();
- return gpu::ContextResult::kSuccess;
-}
+ state_.texture_units.resize(group_->max_texture_units());
+ state_.sampler_units.resize(group_->max_texture_units());
+ for (uint32_t tt = 0; tt < state_.texture_units.size(); ++tt) {
+ api()->glActiveTextureFn(GL_TEXTURE0 + tt);
+ TextureRef* ref;
+ ref = texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_2D);
+ state_.texture_units[tt].bound_texture_2d = ref;
+ api()->glBindTextureFn(GL_TEXTURE_2D, ref ? ref->service_id() : 0);
+ }
+ api()->glActiveTextureFn(GL_TEXTURE0);
+ CHECK_GL_ERROR();
-bool RasterDecoder::initialized() const {
- return initialized_;
+ return gpu::ContextResult::kSuccess;
}
-const gles2::ContextState* RasterDecoder::GetContextState() {
+const gles2::ContextState* RasterDecoderImpl::GetContextState() {
NOTIMPLEMENTED();
return nullptr;
}
-void RasterDecoder::Destroy(bool have_context) {}
+void RasterDecoderImpl::Destroy(bool have_context) {
+ if (group_.get()) {
+ group_->Destroy(this, have_context);
+ group_ = NULL;
+ }
+
+ // Destroy the surface before the context, some surface destructors make GL
+ // calls.
+ surface_ = nullptr;
+
+ if (context_.get()) {
+ context_->ReleaseCurrent(NULL);
+ context_ = NULL;
+ }
+}
// Make this decoder's GL context current.
-bool RasterDecoder::MakeCurrent() {
+bool RasterDecoderImpl::MakeCurrent() {
DCHECK(surface_);
if (!context_.get())
return false;
if (WasContextLost()) {
- LOG(ERROR) << " GLES2DecoderImpl: Trying to make lost context current.";
+ LOG(ERROR) << " RasterDecoderImpl: Trying to make lost context current.";
return false;
}
if (!context_->MakeCurrent(surface_.get())) {
- LOG(ERROR) << " GLES2DecoderImpl: Context lost during MakeCurrent.";
+ LOG(ERROR) << " RasterDecoderImpl: Context lost during MakeCurrent.";
MarkContextLost(error::kMakeCurrentFailed);
group_->LoseContexts(error::kUnknown);
return false;
@@ -164,11 +525,11 @@ bool RasterDecoder::MakeCurrent() {
return true;
}
-gl::GLContext* RasterDecoder::GetGLContext() {
+gl::GLContext* RasterDecoderImpl::GetGLContext() {
return context_.get();
}
-Capabilities RasterDecoder::GetCapabilities() {
+Capabilities RasterDecoderImpl::GetCapabilities() {
gpu::Capabilities caps;
caps.gpu_rasterization = true;
caps.supports_oop_raster = true;
@@ -177,140 +538,149 @@ Capabilities RasterDecoder::GetCapabilities() {
return caps;
}
-void RasterDecoder::RestoreState(const ContextState* prev_state) {
+void RasterDecoderImpl::RestoreState(const ContextState* prev_state) {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreActiveTexture() const {
+void RasterDecoderImpl::RestoreActiveTexture() const {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreAllTextureUnitAndSamplerBindings(
+void RasterDecoderImpl::RestoreAllTextureUnitAndSamplerBindings(
const ContextState* prev_state) const {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreActiveTextureUnitBinding(unsigned int target) const {
+void RasterDecoderImpl::RestoreActiveTextureUnitBinding(
+ unsigned int target) const {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreBufferBinding(unsigned int target) {
+void RasterDecoderImpl::RestoreBufferBinding(unsigned int target) {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreBufferBindings() const {
+void RasterDecoderImpl::RestoreBufferBindings() const {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreFramebufferBindings() const {
+void RasterDecoderImpl::RestoreFramebufferBindings() const {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreRenderbufferBindings() {
+void RasterDecoderImpl::RestoreRenderbufferBindings() {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreProgramBindings() const {
+void RasterDecoderImpl::RestoreProgramBindings() const {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreTextureState(unsigned service_id) const {
+void RasterDecoderImpl::RestoreTextureState(unsigned service_id) const {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreTextureUnitBindings(unsigned unit) const {
+void RasterDecoderImpl::RestoreTextureUnitBindings(unsigned unit) const {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreVertexAttribArray(unsigned index) {
+void RasterDecoderImpl::RestoreVertexAttribArray(unsigned index) {
NOTIMPLEMENTED();
}
-void RasterDecoder::RestoreAllExternalTextureBindingsIfNeeded() {
+void RasterDecoderImpl::RestoreAllExternalTextureBindingsIfNeeded() {
NOTIMPLEMENTED();
}
-QueryManager* RasterDecoder::GetQueryManager() {
+QueryManager* RasterDecoderImpl::GetQueryManager() {
NOTIMPLEMENTED();
return nullptr;
}
-GpuFenceManager* RasterDecoder::GetGpuFenceManager() {
+GpuFenceManager* RasterDecoderImpl::GetGpuFenceManager() {
NOTIMPLEMENTED();
return nullptr;
}
-bool RasterDecoder::HasPendingQueries() const {
+bool RasterDecoderImpl::HasPendingQueries() const {
NOTIMPLEMENTED();
return false;
}
-void RasterDecoder::ProcessPendingQueries(bool did_finish) {
+void RasterDecoderImpl::ProcessPendingQueries(bool did_finish) {
NOTIMPLEMENTED();
}
-bool RasterDecoder::HasMoreIdleWork() const {
+bool RasterDecoderImpl::HasMoreIdleWork() const {
NOTIMPLEMENTED();
return false;
}
-void RasterDecoder::PerformIdleWork() {
+void RasterDecoderImpl::PerformIdleWork() {
NOTIMPLEMENTED();
}
-bool RasterDecoder::HasPollingWork() const {
+bool RasterDecoderImpl::HasPollingWork() const {
NOTIMPLEMENTED();
return false;
}
-void RasterDecoder::PerformPollingWork() {
+void RasterDecoderImpl::PerformPollingWork() {
NOTIMPLEMENTED();
}
-TextureBase* RasterDecoder::GetTextureBase(uint32_t client_id) {
+TextureBase* RasterDecoderImpl::GetTextureBase(uint32_t client_id) {
NOTIMPLEMENTED();
return nullptr;
}
-bool RasterDecoder::WasContextLost() const {
+bool RasterDecoderImpl::WasContextLost() const {
return false;
}
-bool RasterDecoder::WasContextLostByRobustnessExtension() const {
+bool RasterDecoderImpl::WasContextLostByRobustnessExtension() const {
NOTIMPLEMENTED();
return false;
}
-void RasterDecoder::MarkContextLost(error::ContextLostReason reason) {
+void RasterDecoderImpl::MarkContextLost(error::ContextLostReason reason) {
NOTIMPLEMENTED();
}
-bool RasterDecoder::CheckResetStatus() {
+bool RasterDecoderImpl::CheckResetStatus() {
NOTIMPLEMENTED();
return false;
}
-void RasterDecoder::BeginDecoding() {
+Logger* RasterDecoderImpl::GetLogger() {
+ return &logger_;
+}
+
+void RasterDecoderImpl::SetIgnoreCachedStateForTest(bool ignore) {
+ state_.SetIgnoreCachedStateForTest(ignore);
+}
+
+void RasterDecoderImpl::BeginDecoding() {
// TODO(backer): Add support the tracing commands.
gpu_debug_commands_ = log_commands() || debug();
}
-void RasterDecoder::EndDecoding() {
+void RasterDecoderImpl::EndDecoding() {
NOTIMPLEMENTED();
}
-const char* RasterDecoder::GetCommandName(unsigned int command_id) const {
- if (command_id >= kFirstGLES2Command && command_id < kNumCommands) {
- return gles2::GetCommandName(static_cast<CommandId>(command_id));
+const char* RasterDecoderImpl::GetCommandName(unsigned int command_id) const {
+ if (command_id >= kFirstRasterCommand && command_id < kNumCommands) {
+ return raster::GetCommandName(static_cast<CommandId>(command_id));
}
return GetCommonCommandName(static_cast<cmd::CommandId>(command_id));
}
template <bool DebugImpl>
-error::Error RasterDecoder::DoCommandsImpl(unsigned int num_commands,
- const volatile void* buffer,
- int num_entries,
- int* entries_processed) {
+error::Error RasterDecoderImpl::DoCommandsImpl(unsigned int num_commands,
+ const volatile void* buffer,
+ int num_entries,
+ int* entries_processed) {
DCHECK(entries_processed);
commands_to_process_ = num_commands;
error::Error result = error::kNoError;
@@ -340,7 +710,7 @@ error::Error RasterDecoder::DoCommandsImpl(unsigned int num_commands,
}
const unsigned int arg_count = size - 1;
- unsigned int command_index = command - kFirstGLES2Command;
+ unsigned int command_index = command - kFirstRasterCommand;
if (command_index < arraysize(command_info)) {
const CommandInfo& info = command_info[command_index];
unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
@@ -348,20 +718,14 @@ error::Error RasterDecoder::DoCommandsImpl(unsigned int num_commands,
(info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
uint32_t immediate_data_size = (arg_count - info_arg_count) *
sizeof(CommandBufferEntry); // NOLINT
- if (info.cmd_handler == nullptr) {
- LOG(ERROR) << "[" << logger_.GetLogPrefix() << "] "
- << GetCommandName(command) << "(" << command << ", "
- << command_index << ") is NOTIMPLEMENTED";
- } else {
- result = (this->*info.cmd_handler)(immediate_data_size, cmd_data);
- if (DebugImpl && debug() && !WasContextLost()) {
- GLenum error;
- while ((error = api()->glGetErrorFn()) != GL_NO_ERROR) {
- LOG(ERROR) << "[" << logger_.GetLogPrefix() << "] "
- << "GL ERROR: " << GLES2Util::GetStringEnum(error)
- << " : " << GetCommandName(command);
- LOCAL_SET_GL_ERROR(error, "DoCommand", "GL error from driver");
- }
+ result = (this->*info.cmd_handler)(immediate_data_size, cmd_data);
+ if (DebugImpl && debug() && !WasContextLost()) {
+ GLenum error;
+ while ((error = api()->glGetErrorFn()) != GL_NO_ERROR) {
+ LOG(ERROR) << "[" << logger_.GetLogPrefix() << "] "
+ << "GL ERROR: " << GLES2Util::GetStringEnum(error)
+ << " : " << GetCommandName(command);
+ LOCAL_SET_GL_ERROR(error, "DoCommand", "GL error from driver");
}
}
} else {
@@ -393,10 +757,10 @@ error::Error RasterDecoder::DoCommandsImpl(unsigned int num_commands,
return result;
}
-error::Error RasterDecoder::DoCommands(unsigned int num_commands,
- const volatile void* buffer,
- int num_entries,
- int* entries_processed) {
+error::Error RasterDecoderImpl::DoCommands(unsigned int num_commands,
+ const volatile void* buffer,
+ int num_entries,
+ int* entries_processed) {
if (gpu_debug_commands_) {
return DoCommandsImpl<true>(num_commands, buffer, num_entries,
entries_processed);
@@ -406,79 +770,73 @@ error::Error RasterDecoder::DoCommands(unsigned int num_commands,
}
}
-base::StringPiece RasterDecoder::GetLogPrefix() {
- return logger_.GetLogPrefix();
-}
+bool RasterDecoderImpl::GetHelper(GLenum pname,
+ GLint* params,
+ GLsizei* num_written) {
+ DCHECK(num_written);
+ switch (pname) {
+ case GL_MAX_TEXTURE_SIZE:
+ *num_written = 1;
+ if (params) {
+ params[0] = texture_manager()->MaxSizeForTarget(GL_TEXTURE_2D);
+ }
+ return true;
+ default:
+ *num_written = util_.GLGetNumValuesReturned(pname);
+ if (*num_written)
+ break;
-void RasterDecoder::BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- gl::GLImage* image,
- bool can_bind_to_sampler) {
- NOTIMPLEMENTED();
-}
+ return false;
+ }
-gles2::ContextGroup* RasterDecoder::GetContextGroup() {
- return group_.get();
+ // TODO(backer): Only GL_ACTIVE_TEXTURE supported?
+ if (pname != GL_ACTIVE_TEXTURE) {
+ return false;
+ }
+
+ if (params) {
+ api()->glGetIntegervFn(pname, params);
+ }
+ return true;
}
-gles2::ErrorState* RasterDecoder::GetErrorState() {
- return state_.GetErrorState();
+bool RasterDecoderImpl::GetNumValuesReturnedForGLGet(GLenum pname,
+ GLsizei* num_values) {
+ *num_values = 0;
+ if (state_.GetStateAsGLint(pname, NULL, num_values)) {
+ return true;
+ }
+ return GetHelper(pname, NULL, num_values);
}
-void RasterDecoder::OnContextLostError() {
- NOTIMPLEMENTED();
+base::StringPiece RasterDecoderImpl::GetLogPrefix() {
+ return logger_.GetLogPrefix();
}
-void RasterDecoder::OnOutOfMemoryError() {
+void RasterDecoderImpl::BindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler) {
NOTIMPLEMENTED();
}
-error::Error RasterDecoder::HandleTraceBeginCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::TraceBeginCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::TraceBeginCHROMIUM*>(cmd_data);
- Bucket* category_bucket = GetBucket(c.category_bucket_id);
- Bucket* name_bucket = GetBucket(c.name_bucket_id);
- if (!category_bucket || category_bucket->size() == 0 || !name_bucket ||
- name_bucket->size() == 0) {
- return error::kInvalidArguments;
- }
-
- std::string category_name;
- std::string trace_name;
- if (!category_bucket->GetAsString(&category_name) ||
- !name_bucket->GetAsString(&trace_name)) {
- return error::kInvalidArguments;
- }
-
- debug_marker_manager_.PushGroup(trace_name);
- return error::kNoError;
+gles2::ContextGroup* RasterDecoderImpl::GetContextGroup() {
+ return group_.get();
}
-error::Error RasterDecoder::HandleTraceEndCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- debug_marker_manager_.PopGroup();
- return error::kNoError;
+gles2::ErrorState* RasterDecoderImpl::GetErrorState() {
+ return state_.GetErrorState();
}
-error::Error RasterDecoder::HandleInsertFenceSyncCHROMIUM(
- uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::InsertFenceSyncCHROMIUM& c =
- *static_cast<const volatile gles2::cmds::InsertFenceSyncCHROMIUM*>(
- cmd_data);
+void RasterDecoderImpl::OnContextLostError() {
+ NOTIMPLEMENTED();
+}
- const uint64_t release_count = c.release_count();
- client_->OnFenceSyncRelease(release_count);
- // Exit inner command processing loop so that we check the scheduling state
- // and yield if necessary as we may have unblocked a higher priority context.
- ExitCommandProcessingEarly();
- return error::kNoError;
+void RasterDecoderImpl::OnOutOfMemoryError() {
+ NOTIMPLEMENTED();
}
-error::Error RasterDecoder::HandleWaitSyncTokenCHROMIUM(
+error::Error RasterDecoderImpl::HandleWaitSyncTokenCHROMIUM(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
const volatile gles2::cmds::WaitSyncTokenCHROMIUM& c =
@@ -506,41 +864,172 @@ error::Error RasterDecoder::HandleWaitSyncTokenCHROMIUM(
: error::kNoError;
}
-error::Error RasterDecoder::HandleGetString(uint32_t immediate_data_size,
- const volatile void* cmd_data) {
- const volatile gles2::cmds::GetString& c =
- *static_cast<const volatile gles2::cmds::GetString*>(cmd_data);
- GLenum name = static_cast<GLenum>(c.name);
+error::Error RasterDecoderImpl::HandleBeginQueryEXT(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleEndQueryEXT(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleInitializeDiscardableTextureCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleUnlockDiscardableTextureCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleLockDiscardableTextureCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleInsertFenceSyncCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
+void RasterDecoderImpl::DoFinish() {
+ api()->glFinishFn();
+ ProcessPendingQueries(true);
+}
+
+void RasterDecoderImpl::DoFlush() {
+ api()->glFlushFn();
+ ProcessPendingQueries(false);
+}
- // TODO(backer): Passthrough decoder does not validate. It's possible that
- // we don't have a validator there.
- if (!validators_->string_type.IsValid(name)) {
- LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetString", name, "name");
- return error::kNoError;
+void RasterDecoderImpl::DoGetIntegerv(GLenum pname,
+ GLint* params,
+ GLsizei params_size) {
+ DCHECK(params);
+ GLsizei num_written = 0;
+ if (state_.GetStateAsGLint(pname, params, &num_written) ||
+ GetHelper(pname, params, &num_written)) {
+ DCHECK_EQ(num_written, params_size);
+ return;
}
+ NOTREACHED() << "Unhandled enum " << pname;
+}
- const char* str = nullptr;
- std::string extensions;
- switch (name) {
- case GL_VERSION:
- str = GetServiceVersionString(feature_info_.get());
- break;
- case GL_SHADING_LANGUAGE_VERSION:
- str = GetServiceShadingLanguageVersionString(feature_info_.get());
- break;
- case GL_EXTENSIONS: {
- str = "";
- NOTIMPLEMENTED();
- break;
+void RasterDecoderImpl::DeleteTexturesHelper(
+ GLsizei n,
+ const volatile GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ GLuint client_id = client_ids[ii];
+ TextureRef* texture_ref = GetTexture(client_id);
+ if (texture_ref) {
+ UnbindTexture(texture_ref);
+ RemoveTexture(client_id);
}
- default:
- str = reinterpret_cast<const char*>(api()->glGetStringFn(name));
- break;
}
- Bucket* bucket = CreateBucket(c.bucket_id);
- bucket->SetFromString(str);
- return error::kNoError;
}
+bool RasterDecoderImpl::GenTexturesHelper(GLsizei n, const GLuint* client_ids) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (GetTexture(client_ids[ii])) {
+ return false;
+ }
+ }
+ std::unique_ptr<GLuint[]> service_ids(new GLuint[n]);
+ api()->glGenTexturesFn(n, service_ids.get());
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ CreateTexture(client_ids[ii], service_ids[ii]);
+ }
+ return true;
+}
+
+void RasterDecoderImpl::DoTexParameteri(GLenum target,
+ GLenum pname,
+ GLint param) {
+ TextureRef* texture =
+ texture_manager()->GetTextureInfoForTarget(&state_, target);
+ if (!texture) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glTexParameteri", "unknown texture");
+ return;
+ }
+
+ texture_manager()->SetParameteri("glTexParameteri", GetErrorState(), texture,
+ pname, param);
+}
+
+void RasterDecoderImpl::DoBindTexture(GLenum target, GLuint client_id) {
+ TextureRef* texture_ref = NULL;
+ GLuint service_id = 0;
+ if (client_id != 0) {
+ texture_ref = GetTexture(client_id);
+ if (!texture_ref) {
+ if (!group_->bind_generates_resource()) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBindTexture",
+ "id not generated by glGenTextures");
+ return;
+ }
+
+ // It's a new id so make a texture texture for it.
+ api()->glGenTexturesFn(1, &service_id);
+ DCHECK_NE(0u, service_id);
+ CreateTexture(client_id, service_id);
+ texture_ref = GetTexture(client_id);
+ }
+ } else {
+ texture_ref = texture_manager()->GetDefaultTextureInfo(target);
+ }
+
+ // Check the texture exists
+ if (texture_ref) {
+ Texture* texture = texture_ref->texture();
+ // Check that we are not trying to bind it to a different target.
+ if (texture->target() != 0 && texture->target() != target) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBindTexture",
+ "texture bound to more than 1 target.");
+ return;
+ }
+ LogClientServiceForInfo(texture, client_id, "glBindTexture");
+ api()->glBindTextureFn(target, texture->service_id());
+ if (texture->target() == 0) {
+ texture_manager()->SetTarget(texture_ref, target);
+ if (!gl_version_info().BehavesLikeGLES() &&
+ gl_version_info().IsAtLeastGL(3, 2)) {
+ // In Desktop GL core profile and GL ES, depth textures are always
+ // sampled to the RED channel, whereas on Desktop GL compatibility
+ // proifle, they are sampled to RED, LUMINANCE, INTENSITY, or ALPHA
+ // channel, depending on the DEPTH_TEXTURE_MODE value.
+ // In theory we only need to apply this for depth textures, but it is
+ // simpler to apply to all textures.
+ api()->glTexParameteriFn(target, GL_DEPTH_TEXTURE_MODE, GL_RED);
+ }
+ }
+ } else {
+ api()->glBindTextureFn(target, 0);
+ }
+
+ TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
+ unit.bind_target = target;
+ unit.SetInfoForTarget(target, texture_ref);
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "base/macros.h"
+#include "gpu/command_buffer/service/raster_decoder_autogen.h"
+
} // namespace raster
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.h b/chromium/gpu/command_buffer/service/raster_decoder.h
index 2cabd9369b0..e86e577785f 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.h
+++ b/chromium/gpu/command_buffer/service/raster_decoder.h
@@ -5,193 +5,63 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_H_
#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_H_
-#include <stdint.h>
-
-#include <string>
-#include <vector>
-
-#include "base/callback_forward.h"
#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
-#include "base/time/time.h"
-#include "build/build_config.h"
-#include "gpu/command_buffer/common/capabilities.h"
-#include "gpu/command_buffer/common/command_buffer_id.h"
-#include "gpu/command_buffer/common/constants.h"
-#include "gpu/command_buffer/common/context_result.h"
-#include "gpu/command_buffer/common/debug_marker_manager.h"
-#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/service/common_decoder.h"
-#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/decoder_context.h"
-#include "gpu/command_buffer/service/error_state.h"
-#include "gpu/command_buffer/service/feature_info.h"
-#include "gpu/command_buffer/service/gles2_cmd_validation.h"
-#include "gpu/command_buffer/service/logger.h"
#include "gpu/gpu_gles2_export.h"
namespace gpu {
-class TextureBase;
class DecoderClient;
+namespace gles2 {
+class GLES2Util;
+class Logger;
+class Outputter;
+} // namespace gles2
+
namespace raster {
// This class implements the AsyncAPIInterface interface, decoding
// RasterInterface commands and calling GL.
class GPU_GLES2_EXPORT RasterDecoder : public DecoderContext,
- public CommonDecoder,
- public gles2::ErrorStateClient {
+ public CommonDecoder {
public:
- RasterDecoder(DecoderClient* client,
- CommandBufferServiceBase* command_buffer_service,
- gles2::Outputter* outputter,
- gles2::ContextGroup* group);
+ static RasterDecoder* Create(DecoderClient* client,
+ CommandBufferServiceBase* command_buffer_service,
+ gles2::Outputter* outputter,
+ gles2::ContextGroup* group);
~RasterDecoder() override;
// DecoderContext implementation.
- base::WeakPtr<DecoderContext> AsWeakPtr() override;
- gpu::ContextResult Initialize(
- const scoped_refptr<gl::GLSurface>& surface,
- const scoped_refptr<gl::GLContext>& context,
- bool offscreen,
- const gles2::DisallowedFeatures& disallowed_features,
- const ContextCreationAttribs& attrib_helper) override;
bool initialized() const override;
- const gles2::ContextState* GetContextState() override;
- void Destroy(bool have_context) override;
- bool MakeCurrent() override;
- gl::GLContext* GetGLContext() override;
- Capabilities GetCapabilities() override;
- void RestoreState(const gles2::ContextState* prev_state) override;
- void RestoreActiveTexture() const override;
- void RestoreAllTextureUnitAndSamplerBindings(
- const gles2::ContextState* prev_state) const override;
- void RestoreActiveTextureUnitBinding(unsigned int target) const override;
- void RestoreBufferBinding(unsigned int target) override;
- void RestoreBufferBindings() const override;
- void RestoreFramebufferBindings() const override;
- void RestoreRenderbufferBindings() override;
- void RestoreProgramBindings() const override;
- void RestoreTextureState(unsigned service_id) const override;
- void RestoreTextureUnitBindings(unsigned unit) const override;
- void RestoreVertexAttribArray(unsigned index) override;
- void RestoreAllExternalTextureBindingsIfNeeded() override;
- gles2::QueryManager* GetQueryManager() override;
- gles2::GpuFenceManager* GetGpuFenceManager() override;
- bool HasPendingQueries() const override;
- void ProcessPendingQueries(bool did_finish) override;
- bool HasMoreIdleWork() const override;
- void PerformIdleWork() override;
- bool HasPollingWork() const override;
- void PerformPollingWork() override;
TextureBase* GetTextureBase(uint32_t client_id) override;
- bool WasContextLost() const override;
- bool WasContextLostByRobustnessExtension() const override;
- void MarkContextLost(error::ContextLostReason reason) override;
- bool CheckResetStatus() override;
void BeginDecoding() override;
void EndDecoding() override;
- const char* GetCommandName(unsigned int command_id) const;
- error::Error DoCommands(unsigned int num_commands,
- const volatile void* buffer,
- int num_entries,
- int* entries_processed) override;
base::StringPiece GetLogPrefix() override;
- void BindImage(uint32_t client_texture_id,
- uint32_t texture_target,
- gl::GLImage* image,
- bool can_bind_to_sampler) override;
- gles2::ContextGroup* GetContextGroup() override;
- gles2::ErrorState* GetErrorState() override;
- // ErrorClientState implementation.
- void OnContextLostError() override;
- void OnOutOfMemoryError() override;
+ virtual gles2::GLES2Util* GetGLES2Util() = 0;
+ virtual gles2::Logger* GetLogger() = 0;
+ virtual void SetIgnoreCachedStateForTest(bool ignore) = 0;
- bool debug() const { return debug_; }
+ void set_initialized() { initialized_ = true; }
// Set to true to call glGetError after every command.
void set_debug(bool debug) { debug_ = debug; }
-
- bool log_commands() const { return log_commands_; }
+ bool debug() const { return debug_; }
// Set to true to LOG every command.
void set_log_commands(bool log_commands) { log_commands_ = log_commands; }
+ bool log_commands() const { return log_commands_; }
- private:
- gl::GLApi* api() const { return state_.api(); }
-
- // Set remaining commands to process to 0 to force DoCommands to return
- // and allow context preemption and GPU watchdog checks in CommandExecutor().
- void ExitCommandProcessingEarly() { commands_to_process_ = 0; }
-
- error::Error HandleGetString(uint32_t immediate_data_size,
- const volatile void* cmd_data);
- error::Error HandleTraceBeginCHROMIUM(uint32_t immediate_data_size,
- const volatile void* cmd_data);
- error::Error HandleTraceEndCHROMIUM(uint32_t immediate_data_size,
- const volatile void* cmd_data);
- error::Error HandleInsertFenceSyncCHROMIUM(uint32_t immediate_data_size,
- const volatile void* cmd_data);
- error::Error HandleWaitSyncTokenCHROMIUM(uint32_t immediate_data_size,
- const volatile void* cmd_data);
-
- template <bool DebugImpl>
- error::Error DoCommandsImpl(unsigned int num_commands,
- const volatile void* buffer,
- int num_entries,
- int* entries_processed);
-
- typedef gpu::gles2::GLES2Decoder::Error (RasterDecoder::*CmdHandler)(
- uint32_t immediate_data_size,
- const volatile void* data);
-
- // A struct to hold info about each command.
- struct CommandInfo {
- CmdHandler cmd_handler;
- uint8_t arg_flags; // How to handle the arguments for this command
- uint8_t cmd_flags; // How to handle this command
- uint16_t arg_count; // How many arguments are expected for this command.
- };
-
- // A table of CommandInfo for all the commands.
- static CommandInfo
- command_info[gles2::kNumCommands - gles2::kFirstGLES2Command];
+ protected:
+ RasterDecoder(CommandBufferServiceBase* command_buffer_service);
+ private:
bool initialized_;
-
- // Number of commands remaining to be processed in DoCommands().
- int commands_to_process_;
-
- // The current decoder error communicates the decoder error through command
- // processing functions that do not return the error value. Should be set
- // only if not returning an error.
- error::Error current_decoder_error_;
-
- scoped_refptr<gl::GLSurface> surface_;
- scoped_refptr<gl::GLContext> context_;
-
- DecoderClient* client_;
-
- gles2::DebugMarkerManager debug_marker_manager_;
- gles2::Logger logger_;
-
- // The ContextGroup for this decoder uses to track resources.
- scoped_refptr<gles2::ContextGroup> group_;
- const gles2::Validators* validators_;
- scoped_refptr<gles2::FeatureInfo> feature_info_;
-
- // All the state for this context.
- gles2::ContextState state_;
-
bool debug_;
bool log_commands_;
- bool gpu_debug_commands_;
-
- base::WeakPtrFactory<DecoderContext> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(RasterDecoder);
};
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
new file mode 100644
index 00000000000..425e749ece0
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/raster_decoder_autogen.h
@@ -0,0 +1,368 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by raster_cmd_decoder.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_AUTOGEN_H_
+
+error::Error RasterDecoderImpl::HandleBindTexture(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::BindTexture& c =
+ *static_cast<const volatile raster::cmds::BindTexture*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint texture = c.texture;
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glBindTexture", target, "target");
+ return error::kNoError;
+ }
+ DoBindTexture(target, texture);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleDeleteTexturesImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::DeleteTexturesImmediate& c =
+ *static_cast<const volatile raster::cmds::DeleteTexturesImmediate*>(
+ cmd_data);
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ volatile const GLuint* textures = GetImmediateDataAs<volatile const GLuint*>(
+ c, data_size, immediate_data_size);
+ if (textures == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteTexturesHelper(n, textures);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleFinish(uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ DoFinish();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleFlush(uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ DoFlush();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleGenTexturesImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::GenTexturesImmediate& c =
+ *static_cast<const volatile raster::cmds::GenTexturesImmediate*>(
+ cmd_data);
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ volatile GLuint* textures =
+ GetImmediateDataAs<volatile GLuint*>(c, data_size, immediate_data_size);
+ if (textures == NULL) {
+ return error::kOutOfBounds;
+ }
+ auto textures_copy = std::make_unique<GLuint[]>(n);
+ GLuint* textures_safe = textures_copy.get();
+ std::copy(textures, textures + n, textures_safe);
+ if (!CheckUniqueAndNonNullIds(n, textures_safe) ||
+ !GenTexturesHelper(n, textures_safe)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleGetError(uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::GetError& c =
+ *static_cast<const volatile raster::cmds::GetError*>(cmd_data);
+ typedef cmds::GetError::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = GetErrorState()->GetGLError();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleGetIntegerv(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::GetIntegerv& c =
+ *static_cast<const volatile raster::cmds::GetIntegerv*>(cmd_data);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ typedef cmds::GetIntegerv::Result Result;
+ GLsizei num_values = 0;
+ if (!GetNumValuesReturnedForGLGet(pname, &num_values)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(":GetIntegerv", pname, "pname");
+ return error::kNoError;
+ }
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.params_shm_id, c.params_shm_offset, Result::ComputeSize(num_values));
+ GLint* params = result ? result->GetData() : NULL;
+ if (!validators_->g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetIntegerv", pname, "pname");
+ return error::kNoError;
+ }
+ if (params == NULL) {
+ return error::kOutOfBounds;
+ }
+ LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER("GetIntegerv");
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetIntegerv(pname, params, num_values);
+ GLenum error = LOCAL_PEEK_GL_ERROR("GetIntegerv");
+ if (error == GL_NO_ERROR) {
+ result->SetNumResults(num_values);
+ }
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleTexParameteri(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::TexParameteri& c =
+ *static_cast<const volatile raster::cmds::TexParameteri*>(cmd_data);
+ GLenum target = static_cast<GLenum>(c.target);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLint param = static_cast<GLint>(c.param);
+ if (!validators_->texture_bind_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteri", target, "target");
+ return error::kNoError;
+ }
+ if (!validators_->texture_parameter.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glTexParameteri", pname, "pname");
+ return error::kNoError;
+ }
+ DoTexParameteri(target, pname, param);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleGenQueriesEXTImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::GenQueriesEXTImmediate& c =
+ *static_cast<const volatile raster::cmds::GenQueriesEXTImmediate*>(
+ cmd_data);
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ volatile GLuint* queries =
+ GetImmediateDataAs<volatile GLuint*>(c, data_size, immediate_data_size);
+ if (queries == NULL) {
+ return error::kOutOfBounds;
+ }
+ auto queries_copy = std::make_unique<GLuint[]>(n);
+ GLuint* queries_safe = queries_copy.get();
+ std::copy(queries, queries + n, queries_safe);
+ if (!CheckUniqueAndNonNullIds(n, queries_safe) ||
+ !GenQueriesEXTHelper(n, queries_safe)) {
+ return error::kInvalidArguments;
+ }
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleDeleteQueriesEXTImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::DeleteQueriesEXTImmediate& c =
+ *static_cast<const volatile raster::cmds::DeleteQueriesEXTImmediate*>(
+ cmd_data);
+ GLsizei n = static_cast<GLsizei>(c.n);
+ uint32_t data_size;
+ if (!SafeMultiplyUint32(n, sizeof(GLuint), &data_size)) {
+ return error::kOutOfBounds;
+ }
+ volatile const GLuint* queries = GetImmediateDataAs<volatile const GLuint*>(
+ c, data_size, immediate_data_size);
+ if (queries == NULL) {
+ return error::kOutOfBounds;
+ }
+ DeleteQueriesEXTHelper(n, queries);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleCompressedCopyTextureCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::CompressedCopyTextureCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::CompressedCopyTextureCHROMIUM*>(
+ cmd_data);
+ GLuint source_id = static_cast<GLuint>(c.source_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
+ DoCompressedCopyTextureCHROMIUM(source_id, dest_id);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleLoseContextCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::LoseContextCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::LoseContextCHROMIUM*>(cmd_data);
+ GLenum current = static_cast<GLenum>(c.current);
+ GLenum other = static_cast<GLenum>(c.other);
+ if (!validators_->reset_status.IsValid(current)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glLoseContextCHROMIUM", current,
+ "current");
+ return error::kNoError;
+ }
+ if (!validators_->reset_status.IsValid(other)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glLoseContextCHROMIUM", other, "other");
+ return error::kNoError;
+ }
+ DoLoseContextCHROMIUM(current, other);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleUnpremultiplyAndDitherCopyCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::UnpremultiplyAndDitherCopyCHROMIUM& c =
+ *static_cast<
+ const volatile raster::cmds::UnpremultiplyAndDitherCopyCHROMIUM*>(
+ cmd_data);
+ if (!features().unpremultiply_and_dither_copy) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint source_id = static_cast<GLuint>(c.source_id);
+ GLuint dest_id = static_cast<GLuint>(c.dest_id);
+ GLint x = static_cast<GLint>(c.x);
+ GLint y = static_cast<GLint>(c.y);
+ GLsizei width = static_cast<GLsizei>(c.width);
+ GLsizei height = static_cast<GLsizei>(c.height);
+ if (width < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glUnpremultiplyAndDitherCopyCHROMIUM",
+ "width < 0");
+ return error::kNoError;
+ }
+ if (height < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glUnpremultiplyAndDitherCopyCHROMIUM",
+ "height < 0");
+ return error::kNoError;
+ }
+ DoUnpremultiplyAndDitherCopyCHROMIUM(source_id, dest_id, x, y, width, height);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleBeginRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::BeginRasterCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::BeginRasterCHROMIUM*>(cmd_data);
+ if (!features().chromium_raster_transport) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint texture_id = static_cast<GLuint>(c.texture_id);
+ GLuint sk_color = static_cast<GLuint>(c.sk_color);
+ GLuint msaa_sample_count = static_cast<GLuint>(c.msaa_sample_count);
+ GLboolean can_use_lcd_text = static_cast<GLboolean>(c.can_use_lcd_text);
+ GLboolean use_distance_field_text =
+ static_cast<GLboolean>(c.use_distance_field_text);
+ GLint color_type = static_cast<GLint>(c.color_type);
+ DoBeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
+ can_use_lcd_text, use_distance_field_text, color_type);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::RasterCHROMIUM& c =
+ *static_cast<const volatile raster::cmds::RasterCHROMIUM*>(cmd_data);
+ if (!features().chromium_raster_transport) {
+ return error::kUnknownCommand;
+ }
+
+ GLsizeiptr size = static_cast<GLsizeiptr>(c.size);
+ uint32_t data_size = size;
+ const void* list = GetSharedMemoryAs<const void*>(
+ c.list_shm_id, c.list_shm_offset, data_size);
+ if (size < 0) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glRasterCHROMIUM", "size < 0");
+ return error::kNoError;
+ }
+ if (list == NULL) {
+ return error::kOutOfBounds;
+ }
+ DoRasterCHROMIUM(size, list);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleEndRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!features().chromium_raster_transport) {
+ return error::kUnknownCommand;
+ }
+
+ DoEndRasterCHROMIUM();
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleCreateTransferCacheEntryINTERNAL(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::CreateTransferCacheEntryINTERNAL& c =
+ *static_cast<
+ const volatile raster::cmds::CreateTransferCacheEntryINTERNAL*>(
+ cmd_data);
+ GLuint entry_type = static_cast<GLuint>(c.entry_type);
+ GLuint entry_id = static_cast<GLuint>(c.entry_id);
+ GLuint handle_shm_id = static_cast<GLuint>(c.handle_shm_id);
+ GLuint handle_shm_offset = static_cast<GLuint>(c.handle_shm_offset);
+ GLuint data_shm_id = static_cast<GLuint>(c.data_shm_id);
+ GLuint data_shm_offset = static_cast<GLuint>(c.data_shm_offset);
+ GLuint data_size = static_cast<GLuint>(c.data_size);
+ DoCreateTransferCacheEntryINTERNAL(entry_type, entry_id, handle_shm_id,
+ handle_shm_offset, data_shm_id,
+ data_shm_offset, data_size);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleDeleteTransferCacheEntryINTERNAL(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::DeleteTransferCacheEntryINTERNAL& c =
+ *static_cast<
+ const volatile raster::cmds::DeleteTransferCacheEntryINTERNAL*>(
+ cmd_data);
+ GLuint entry_type = static_cast<GLuint>(c.entry_type);
+ GLuint entry_id = static_cast<GLuint>(c.entry_id);
+ DoDeleteTransferCacheEntryINTERNAL(entry_type, entry_id);
+ return error::kNoError;
+}
+
+error::Error RasterDecoderImpl::HandleUnlockTransferCacheEntryINTERNAL(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile raster::cmds::UnlockTransferCacheEntryINTERNAL& c =
+ *static_cast<
+ const volatile raster::cmds::UnlockTransferCacheEntryINTERNAL*>(
+ cmd_data);
+ GLuint entry_type = static_cast<GLuint>(c.entry_type);
+ GLuint entry_id = static_cast<GLuint>(c.entry_id);
+ DoUnlockTransferCacheEntryINTERNAL(entry_type, entry_id);
+ return error::kNoError;
+}
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_mock.cc b/chromium/gpu/command_buffer/service/raster_decoder_mock.cc
new file mode 100644
index 00000000000..ccef0fb62ac
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/raster_decoder_mock.cc
@@ -0,0 +1,25 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/raster_decoder_mock.h"
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+
+namespace gpu {
+namespace raster {
+
+MockRasterDecoder::MockRasterDecoder(
+ CommandBufferServiceBase* command_buffer_service)
+ : RasterDecoder(command_buffer_service), weak_ptr_factory_(this) {
+ ON_CALL(*this, MakeCurrent()).WillByDefault(testing::Return(true));
+}
+
+MockRasterDecoder::~MockRasterDecoder() = default;
+
+base::WeakPtr<DecoderContext> MockRasterDecoder::AsWeakPtr() {
+ return weak_ptr_factory_.GetWeakPtr();
+}
+
+} // namespace raster
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_mock.h b/chromium/gpu/command_buffer/service/raster_decoder_mock.h
new file mode 100644
index 00000000000..9928d4e0185
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/raster_decoder_mock.h
@@ -0,0 +1,121 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the mock RasterDecoder class.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_MOCK_H_
+#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_MOCK_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/common/context_creation_attribs.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/raster_decoder.h"
+#include "gpu/command_buffer/service/shader_translator.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace gl {
+class GLContext;
+class GLSurface;
+} // namespace gl
+
+namespace gpu {
+
+namespace gles2 {
+class ContextGroup;
+class ErrorState;
+class GpuFenceManager;
+class QueryManager;
+class GLES2Util;
+struct ContextState;
+class FeatureInfo;
+class Logger;
+} // namespace gles2
+
+namespace raster {
+
+class MockRasterDecoder : public RasterDecoder {
+ public:
+ MockRasterDecoder(CommandBufferServiceBase* command_buffer_service);
+ ~MockRasterDecoder() override;
+
+ base::WeakPtr<DecoderContext> AsWeakPtr() override;
+
+ MOCK_METHOD5(
+ Initialize,
+ gpu::ContextResult(const scoped_refptr<gl::GLSurface>& surface,
+ const scoped_refptr<gl::GLContext>& context,
+ bool offscreen,
+ const gles2::DisallowedFeatures& disallowed_features,
+ const ContextCreationAttribs& attrib_helper));
+ MOCK_METHOD1(Destroy, void(bool have_context));
+ MOCK_METHOD0(MakeCurrent, bool());
+ MOCK_METHOD1(GetServiceIdForTesting, uint32_t(uint32_t client_id));
+ MOCK_METHOD0(GetGLES2Util, gles2::GLES2Util*());
+ MOCK_METHOD0(GetGLSurface, gl::GLSurface*());
+ MOCK_METHOD0(GetGLContext, gl::GLContext*());
+ MOCK_METHOD0(GetContextGroup, gles2::ContextGroup*());
+ MOCK_CONST_METHOD0(GetFeatureInfo, const gles2::FeatureInfo*());
+ MOCK_METHOD0(GetContextState, const gles2::ContextState*());
+ MOCK_METHOD0(GetCapabilities, Capabilities());
+ MOCK_CONST_METHOD0(HasPendingQueries, bool());
+ MOCK_METHOD1(ProcessPendingQueries, void(bool));
+ MOCK_CONST_METHOD0(HasMoreIdleWork, bool());
+ MOCK_METHOD0(PerformIdleWork, void());
+ MOCK_CONST_METHOD0(HasPollingWork, bool());
+ MOCK_METHOD0(PerformPollingWork, void());
+ MOCK_METHOD1(RestoreState, void(const gles2::ContextState* prev_state));
+ MOCK_CONST_METHOD0(RestoreActiveTexture, void());
+ MOCK_CONST_METHOD1(RestoreAllTextureUnitAndSamplerBindings,
+ void(const gles2::ContextState* state));
+ MOCK_CONST_METHOD1(RestoreActiveTextureUnitBinding,
+ void(unsigned int target));
+ MOCK_METHOD0(RestoreAllExternalTextureBindingsIfNeeded, void());
+ MOCK_METHOD1(RestoreBufferBinding, void(unsigned int target));
+ MOCK_CONST_METHOD0(RestoreBufferBindings, void());
+ MOCK_CONST_METHOD0(RestoreFramebufferBindings, void());
+ MOCK_CONST_METHOD0(RestoreProgramBindings, void());
+ MOCK_METHOD0(RestoreRenderbufferBindings, void());
+ MOCK_CONST_METHOD1(RestoreTextureState, void(unsigned service_id));
+ MOCK_CONST_METHOD1(RestoreTextureUnitBindings, void(unsigned unit));
+ MOCK_METHOD1(RestoreVertexAttribArray, void(unsigned index));
+
+ MOCK_METHOD0(GetQueryManager, gles2::QueryManager*());
+ MOCK_METHOD0(GetGpuFenceManager, gpu::gles2::GpuFenceManager*());
+ MOCK_METHOD1(SetIgnoreCachedStateForTest, void(bool ignore));
+ MOCK_METHOD4(DoCommands,
+ error::Error(unsigned int num_commands,
+ const volatile void* buffer,
+ int num_entries,
+ int* entries_processed));
+ MOCK_METHOD2(GetServiceTextureId,
+ bool(uint32_t client_texture_id, uint32_t* service_texture_id));
+ MOCK_METHOD0(GetErrorState, gles2::ErrorState*());
+
+ MOCK_METHOD0(GetLogger, gles2::Logger*());
+ MOCK_CONST_METHOD0(WasContextLost, bool());
+ MOCK_CONST_METHOD0(WasContextLostByRobustnessExtension, bool());
+ MOCK_METHOD1(MarkContextLost, void(gpu::error::ContextLostReason reason));
+ MOCK_METHOD0(CheckResetStatus, bool());
+ MOCK_METHOD4(BindImage,
+ void(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler));
+
+ private:
+ base::WeakPtrFactory<MockRasterDecoder> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockRasterDecoder);
+};
+
+} // namespace raster
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_MOCK_H_
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h
new file mode 100644
index 00000000000..baaeb8d5688
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h
@@ -0,0 +1,15 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by raster_cmd_decoder_unittest_base.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_0_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_0_AUTOGEN_H_
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_0_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_1.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1.cc
new file mode 100644
index 00000000000..6c68be0cb69
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1.cc
@@ -0,0 +1,47 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/raster_decoder.h"
+
+#include "base/command_line.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/raster_cmd_format.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/raster_decoder_unittest_base.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+
+using ::gl::MockGLInterface;
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::MatcherCast;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using ::testing::SetArgPointee;
+using ::testing::StrEq;
+
+namespace gpu {
+namespace raster {
+
+class RasterDecoderTest1 : public RasterDecoderTestBase {
+ public:
+ RasterDecoderTest1() = default;
+};
+
+INSTANTIATE_TEST_CASE_P(Service, RasterDecoderTest1, ::testing::Bool());
+
+template <>
+void RasterDecoderTestBase::SpecializedSetup<cmds::TexParameteri, 0>(
+ bool /* valid */) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+};
+
+#include "gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h"
+
+} // namespace raster
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h
new file mode 100644
index 00000000000..8349f94fd3e
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_1_autogen.h
@@ -0,0 +1,200 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_raster_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by raster_cmd_decoder_unittest_1.cc
+#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_1_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_1_AUTOGEN_H_
+
+TEST_P(RasterDecoderTest1, DeleteTexturesImmediateValidArgs) {
+ EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(kServiceTextureId))).Times(1);
+ cmds::DeleteTexturesImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteTexturesImmediate>();
+ SpecializedSetup<cmds::DeleteTexturesImmediate, 0>(true);
+ cmd.Init(1, &client_texture_id_);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd, sizeof(client_texture_id_)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetTexture(client_texture_id_) == NULL);
+}
+
+TEST_P(RasterDecoderTest1, DeleteTexturesImmediateInvalidArgs) {
+ cmds::DeleteTexturesImmediate& cmd =
+ *GetImmediateAs<cmds::DeleteTexturesImmediate>();
+ SpecializedSetup<cmds::DeleteTexturesImmediate, 0>(false);
+ GLuint temp = kInvalidClientId;
+ cmd.Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
+}
+
+TEST_P(RasterDecoderTest1, FinishValidArgs) {
+ EXPECT_CALL(*gl_, Finish());
+ SpecializedSetup<cmds::Finish, 0>(true);
+ cmds::Finish cmd;
+ cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(RasterDecoderTest1, FlushValidArgs) {
+ EXPECT_CALL(*gl_, Flush());
+ SpecializedSetup<cmds::Flush, 0>(true);
+ cmds::Flush cmd;
+ cmd.Init();
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(RasterDecoderTest1, GenTexturesImmediateValidArgs) {
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgPointee<1>(kNewServiceId));
+ cmds::GenTexturesImmediate* cmd =
+ GetImmediateAs<cmds::GenTexturesImmediate>();
+ GLuint temp = kNewClientId;
+ SpecializedSetup<cmds::GenTexturesImmediate, 0>(true);
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(GetTexture(kNewClientId) != NULL);
+}
+
+TEST_P(RasterDecoderTest1, GenTexturesImmediateDuplicateOrNullIds) {
+ EXPECT_CALL(*gl_, GenTextures(_, _)).Times(0);
+ cmds::GenTexturesImmediate* cmd =
+ GetImmediateAs<cmds::GenTexturesImmediate>();
+ GLuint temp[3] = {kNewClientId, kNewClientId + 1, kNewClientId};
+ SpecializedSetup<cmds::GenTexturesImmediate, 1>(true);
+ cmd->Init(3, temp);
+ EXPECT_EQ(error::kInvalidArguments, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_TRUE(GetTexture(kNewClientId) == NULL);
+ EXPECT_TRUE(GetTexture(kNewClientId + 1) == NULL);
+ GLuint null_id[2] = {kNewClientId, 0};
+ cmd->Init(2, null_id);
+ EXPECT_EQ(error::kInvalidArguments, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+ EXPECT_TRUE(GetTexture(kNewClientId) == NULL);
+}
+
+TEST_P(RasterDecoderTest1, GenTexturesImmediateInvalidArgs) {
+ EXPECT_CALL(*gl_, GenTextures(_, _)).Times(0);
+ cmds::GenTexturesImmediate* cmd =
+ GetImmediateAs<cmds::GenTexturesImmediate>();
+ SpecializedSetup<cmds::GenTexturesImmediate, 0>(false);
+ cmd->Init(1, &client_texture_id_);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(*cmd, sizeof(&client_texture_id_)));
+}
+
+TEST_P(RasterDecoderTest1, GetErrorValidArgs) {
+ EXPECT_CALL(*gl_, GetError());
+ SpecializedSetup<cmds::GetError, 0>(true);
+ cmds::GetError cmd;
+ cmd.Init(shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(RasterDecoderTest1, GetErrorInvalidArgsBadSharedMemoryId) {
+ EXPECT_CALL(*gl_, GetError()).Times(0);
+ SpecializedSetup<cmds::GetError, 0>(false);
+ cmds::GetError cmd;
+ cmd.Init(kInvalidSharedMemoryId, shared_memory_offset_);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+}
+
+TEST_P(RasterDecoderTest1, GetIntegervValidArgs) {
+ EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_NO_ERROR));
+ SpecializedSetup<cmds::GetIntegerv, 0>(true);
+ typedef cmds::GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ EXPECT_CALL(*gl_, GetIntegerv(GL_ACTIVE_TEXTURE, result->GetData()));
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ACTIVE_TEXTURE),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(RasterDecoderTest1, GetIntegervInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetIntegerv, 0>(false);
+ cmds::GetIntegerv::Result* result =
+ static_cast<cmds::GetIntegerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_FOG_HINT, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(RasterDecoderTest1, GetIntegervInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetIntegerv, 0>(false);
+ cmds::GetIntegerv::Result* result =
+ static_cast<cmds::GetIntegerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(RasterDecoderTest1, GetIntegervInvalidArgs1_1) {
+ EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0);
+ SpecializedSetup<cmds::GetIntegerv, 0>(false);
+ cmds::GetIntegerv::Result* result =
+ static_cast<cmds::GetIntegerv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_ACTIVE_TEXTURE, shared_memory_id_, kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(RasterDecoderTest1, TexParameteriValidArgs) {
+ EXPECT_CALL(*gl_,
+ TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
+ SpecializedSetup<cmds::TexParameteri, 0>(true);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(RasterDecoderTest1, TexParameteriInvalidArgs0_0) {
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameteri, 0>(false);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(RasterDecoderTest1, TexParameteriInvalidArgs0_1) {
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameteri, 0>(false);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(RasterDecoderTest1, TexParameteriInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, TexParameteri(_, _, _)).Times(0);
+ SpecializedSetup<cmds::TexParameteri, 0>(false);
+ cmds::TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_NEAREST);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_1_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
new file mode 100644
index 00000000000..98123abe7b4
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
@@ -0,0 +1,327 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/raster_decoder_unittest_base.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/raster_cmd_format.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/logger.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/service_utils.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/vertex_attrib_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/init/gl_factory.h"
+#include "ui/gl/test/gl_surface_test_support.h"
+
+using ::gl::MockGLInterface;
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::AtMost;
+using ::testing::InSequence;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+using ::testing::StrictMock;
+
+using namespace gpu::gles2;
+
+namespace gpu {
+namespace raster {
+
+RasterDecoderTestBase::RasterDecoderTestBase()
+ : surface_(NULL),
+ context_(NULL),
+ memory_tracker_(NULL),
+ client_texture_id_(106),
+ shared_memory_id_(0),
+ shared_memory_offset_(0),
+ shared_memory_address_(nullptr),
+ shared_memory_base_(nullptr),
+ ignore_cached_state_for_test_(GetParam()),
+ shader_translator_cache_(gpu_preferences_) {
+ memset(immediate_buffer_, 0xEE, sizeof(immediate_buffer_));
+}
+
+RasterDecoderTestBase::~RasterDecoderTestBase() = default;
+
+void RasterDecoderTestBase::OnConsoleMessage(int32_t id,
+ const std::string& message) {}
+void RasterDecoderTestBase::CacheShader(const std::string& key,
+ const std::string& shader) {}
+void RasterDecoderTestBase::OnFenceSyncRelease(uint64_t release) {}
+bool RasterDecoderTestBase::OnWaitSyncToken(const gpu::SyncToken&) {
+ return false;
+}
+void RasterDecoderTestBase::OnDescheduleUntilFinished() {}
+void RasterDecoderTestBase::OnRescheduleAfterFinished() {}
+
+void RasterDecoderTestBase::SetUp() {
+ InitDecoderWithWorkarounds();
+}
+
+void RasterDecoderTestBase::InitDecoderWithWorkarounds() {
+ const std::string extensions("GL_EXT_framebuffer_object ");
+ const std::string gl_version("2.1");
+ const bool bind_generates_resource(false);
+ const bool lose_context_when_out_of_memory(false);
+ const ContextType context_type(CONTEXT_TYPE_OPENGLES2);
+
+ // For easier substring/extension matching
+ gl::SetGLGetProcAddressProc(gl::MockGLInterface::GetGLProcAddress);
+ gl::GLSurfaceTestSupport::InitializeOneOffWithMockBindings();
+
+ gl_.reset(new StrictMock<MockGLInterface>());
+ ::gl::MockGLInterface::SetGLInterface(gl_.get());
+
+ gpu::GpuDriverBugWorkarounds workarounds;
+ scoped_refptr<FeatureInfo> feature_info = new FeatureInfo(workarounds);
+
+ group_ = scoped_refptr<ContextGroup>(new ContextGroup(
+ gpu_preferences_, false, &mailbox_manager_, memory_tracker_,
+ &shader_translator_cache_, &framebuffer_completeness_cache_, feature_info,
+ bind_generates_resource, &image_manager_, nullptr /* image_factory */,
+ nullptr /* progress_reporter */, GpuFeatureInfo(),
+ &discardable_manager_));
+
+ InSequence sequence;
+
+ surface_ = new gl::GLSurfaceStub;
+
+ // Context needs to be created before initializing ContextGroup, which willxo
+ // in turn initialize FeatureInfo, which needs a context to determine
+ // extension support.
+ context_ = new StrictMock<GLContextMock>();
+ context_->SetExtensionsString(extensions.c_str());
+ context_->SetGLVersionString(gl_version.c_str());
+
+ context_->GLContextStub::MakeCurrent(surface_.get());
+
+ TestHelper::SetupContextGroupInitExpectations(
+ gl_.get(), DisallowedFeatures(), extensions.c_str(), gl_version.c_str(),
+ context_type, bind_generates_resource);
+
+ // We initialize the ContextGroup with a MockRasterDecoder so that
+ // we can use the ContextGroup to figure out how the real RasterDecoder
+ // will initialize itself.
+ command_buffer_service_.reset(new FakeCommandBufferServiceBase());
+ mock_decoder_.reset(new MockRasterDecoder(command_buffer_service_.get()));
+
+ EXPECT_EQ(group_->Initialize(mock_decoder_.get(), context_type,
+ DisallowedFeatures()),
+ gpu::ContextResult::kSuccess);
+
+ scoped_refptr<gpu::Buffer> buffer =
+ command_buffer_service_->CreateTransferBufferHelper(kSharedBufferSize,
+ &shared_memory_id_);
+ shared_memory_offset_ = kSharedMemoryOffset;
+ shared_memory_address_ =
+ reinterpret_cast<int8_t*>(buffer->memory()) + shared_memory_offset_;
+ shared_memory_base_ = buffer->memory();
+ ClearSharedMemory();
+
+ ContextCreationAttribs attribs;
+ attribs.lose_context_when_out_of_memory = lose_context_when_out_of_memory;
+ attribs.context_type = context_type;
+
+ bool use_default_textures = bind_generates_resource;
+ for (GLint tt = 0; tt < TestHelper::kNumTextureUnits; ++tt) {
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0 + tt))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, BindTexture(GL_TEXTURE_2D,
+ use_default_textures
+ ? TestHelper::kServiceDefaultTexture2dId
+ : 0))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ EXPECT_CALL(*gl_, ActiveTexture(GL_TEXTURE0)).Times(1).RetiresOnSaturation();
+
+ decoder_.reset(RasterDecoder::Create(this, command_buffer_service_.get(),
+ &outputter_, group_.get()));
+ decoder_->SetIgnoreCachedStateForTest(ignore_cached_state_for_test_);
+ decoder_->GetLogger()->set_log_synthesized_gl_errors(false);
+ ASSERT_EQ(decoder_->Initialize(surface_, context_, true, DisallowedFeatures(),
+ attribs),
+ gpu::ContextResult::kSuccess);
+
+ EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
+ if (context_->WasAllocatedUsingRobustnessExtension()) {
+ EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
+ .WillOnce(Return(GL_NO_ERROR));
+ }
+ decoder_->MakeCurrent();
+ decoder_->BeginDecoding();
+
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgPointee<1>(kServiceTextureId))
+ .RetiresOnSaturation();
+ GenHelper<cmds::GenTexturesImmediate>(client_texture_id_);
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+void RasterDecoderTestBase::ResetDecoder() {
+ if (!decoder_.get())
+ return;
+ // All Tests should have read all their GLErrors before getting here.
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ decoder_->EndDecoding();
+ decoder_->Destroy(!decoder_->WasContextLost());
+ decoder_.reset();
+ group_->Destroy(mock_decoder_.get(), false);
+ command_buffer_service_.reset();
+ ::gl::MockGLInterface::SetGLInterface(NULL);
+ gl_.reset();
+ gl::init::ShutdownGL(false);
+}
+
+void RasterDecoderTestBase::TearDown() {
+ ResetDecoder();
+}
+
+GLint RasterDecoderTestBase::GetGLError() {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ cmds::GetError cmd;
+ cmd.Init(shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ return static_cast<GLint>(*GetSharedMemoryAs<GLenum*>());
+}
+
+void RasterDecoderTestBase::SetBucketData(uint32_t bucket_id,
+ const void* data,
+ uint32_t data_size) {
+ DCHECK(data || data_size == 0);
+ cmd::SetBucketSize cmd1;
+ cmd1.Init(bucket_id, data_size);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ if (data) {
+ memcpy(shared_memory_address_, data, data_size);
+ cmd::SetBucketData cmd2;
+ cmd2.Init(bucket_id, 0, data_size, shared_memory_id_, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ ClearSharedMemory();
+ }
+}
+
+void RasterDecoderTestBase::SetBucketAsCString(uint32_t bucket_id,
+ const char* str) {
+ SetBucketData(bucket_id, str, str ? (strlen(str) + 1) : 0);
+}
+
+void RasterDecoderTestBase::SetBucketAsCStrings(uint32_t bucket_id,
+ GLsizei count,
+ const char** str,
+ GLsizei count_in_header,
+ char str_end) {
+ uint32_t header_size = sizeof(GLint) * (count + 1);
+ uint32_t total_size = header_size;
+ std::unique_ptr<GLint[]> header(new GLint[count + 1]);
+ header[0] = static_cast<GLint>(count_in_header);
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ header[ii + 1] = str && str[ii] ? strlen(str[ii]) : 0;
+ total_size += header[ii + 1] + 1;
+ }
+ cmd::SetBucketSize cmd1;
+ cmd1.Init(bucket_id, total_size);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd1));
+ memcpy(shared_memory_address_, header.get(), header_size);
+ uint32_t offset = header_size;
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (str && str[ii]) {
+ size_t str_len = strlen(str[ii]);
+ memcpy(reinterpret_cast<char*>(shared_memory_address_) + offset, str[ii],
+ str_len);
+ offset += str_len;
+ }
+ memcpy(reinterpret_cast<char*>(shared_memory_address_) + offset, &str_end,
+ 1);
+ offset += 1;
+ }
+ cmd::SetBucketData cmd2;
+ cmd2.Init(bucket_id, 0, total_size, shared_memory_id_, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
+ ClearSharedMemory();
+}
+
+void RasterDecoderTestBase::DoBindTexture(GLenum target,
+ GLuint client_id,
+ GLuint service_id) {
+ EXPECT_CALL(*gl_, BindTexture(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ if (!group_->feature_info()->gl_version_info().BehavesLikeGLES() &&
+ group_->feature_info()->gl_version_info().IsAtLeastGL(3, 2)) {
+ EXPECT_CALL(*gl_, TexParameteri(target, GL_DEPTH_TEXTURE_MODE, GL_RED))
+ .Times(AtMost(1));
+ }
+ cmds::BindTexture cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void RasterDecoderTestBase::DoDeleteTexture(GLuint client_id,
+ GLuint service_id) {
+ {
+ InSequence s;
+
+ // Calling DoDeleteTexture will unbind the texture from any texture units
+ // it's currently bound to.
+ EXPECT_CALL(*gl_, BindTexture(_, 0)).Times(AnyNumber());
+
+ EXPECT_CALL(*gl_, DeleteTextures(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+
+ GenHelper<cmds::DeleteTexturesImmediate>(client_id);
+ }
+}
+
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/service/raster_decoder_unittest_0_autogen.h"
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const GLint RasterDecoderTestBase::kMaxTextureSize;
+const GLint RasterDecoderTestBase::kNumTextureUnits;
+
+const GLuint RasterDecoderTestBase::kServiceBufferId;
+const GLuint RasterDecoderTestBase::kServiceTextureId;
+
+const size_t RasterDecoderTestBase::kSharedBufferSize;
+const uint32_t RasterDecoderTestBase::kSharedMemoryOffset;
+const int32_t RasterDecoderTestBase::kInvalidSharedMemoryId;
+const uint32_t RasterDecoderTestBase::kInvalidSharedMemoryOffset;
+const uint32_t RasterDecoderTestBase::kInitialResult;
+const uint8_t RasterDecoderTestBase::kInitialMemoryValue;
+
+const uint32_t RasterDecoderTestBase::kNewClientId;
+const uint32_t RasterDecoderTestBase::kNewServiceId;
+const uint32_t RasterDecoderTestBase::kInvalidClientId;
+#endif
+
+} // namespace raster
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
new file mode 100644
index 00000000000..44d947bbc17
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.h
@@ -0,0 +1,225 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_BASE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_BASE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <array>
+#include <memory>
+
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/raster_cmd_format.h"
+#include "gpu/command_buffer/service/buffer_manager.h"
+#include "gpu/command_buffer/service/context_group.h"
+#include "gpu/command_buffer/service/decoder_client.h"
+#include "gpu/command_buffer/service/framebuffer_manager.h"
+#include "gpu/command_buffer/service/gl_context_mock.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "gpu/command_buffer/service/image_manager.h"
+#include "gpu/command_buffer/service/mailbox_manager_impl.h"
+#include "gpu/command_buffer/service/raster_decoder.h"
+#include "gpu/command_buffer/service/raster_decoder_mock.h"
+#include "gpu/command_buffer/service/service_discardable_manager.h"
+#include "gpu/command_buffer/service/shader_manager.h"
+#include "gpu/command_buffer/service/test_helper.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/gl_mock.h"
+#include "ui/gl/gl_surface_stub.h"
+#include "ui/gl/gl_version_info.h"
+
+namespace gpu {
+
+namespace gles2 {
+class MemoryTracker;
+} // namespace gles2
+
+namespace raster {
+
+class RasterDecoderTestBase : public ::testing::TestWithParam<bool>,
+ public DecoderClient {
+ public:
+ RasterDecoderTestBase();
+ ~RasterDecoderTestBase() override;
+
+ void OnConsoleMessage(int32_t id, const std::string& message) override;
+ void CacheShader(const std::string& key, const std::string& shader) override;
+ void OnFenceSyncRelease(uint64_t release) override;
+ bool OnWaitSyncToken(const gpu::SyncToken&) override;
+ void OnDescheduleUntilFinished() override;
+ void OnRescheduleAfterFinished() override;
+
+ // Template to call glGenXXX functions.
+ template <typename T>
+ void GenHelper(GLuint client_id) {
+ int8_t buffer[sizeof(T) + sizeof(client_id)];
+ T& cmd = *reinterpret_cast<T*>(&buffer);
+ cmd.Init(1, &client_id);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(client_id)));
+ }
+
+ // This template exists solely so we can specialize it for
+ // certain commands.
+ template <typename T, int id>
+ void SpecializedSetup(bool valid) {}
+
+ template <typename T>
+ T* GetImmediateAs() {
+ return reinterpret_cast<T*>(immediate_buffer_);
+ }
+
+ void ClearSharedMemory() {
+ memset(shared_memory_base_, kInitialMemoryValue, kSharedBufferSize);
+ }
+
+ void SetUp() override;
+ void TearDown() override;
+
+ template <typename T>
+ error::Error ExecuteCmd(const T& cmd) {
+ static_assert(T::kArgFlags == cmd::kFixed,
+ "T::kArgFlags should equal cmd::kFixed");
+ int entries_processed = 0;
+ return decoder_->DoCommands(1, (const void*)&cmd,
+ ComputeNumEntries(sizeof(cmd)),
+ &entries_processed);
+ }
+
+ template <typename T>
+ error::Error ExecuteImmediateCmd(const T& cmd, size_t data_size) {
+ static_assert(T::kArgFlags == cmd::kAtLeastN,
+ "T::kArgFlags should equal cmd::kAtLeastN");
+ int entries_processed = 0;
+ return decoder_->DoCommands(1, (const void*)&cmd,
+ ComputeNumEntries(sizeof(cmd) + data_size),
+ &entries_processed);
+ }
+
+ template <typename T>
+ T GetSharedMemoryAs() {
+ return reinterpret_cast<T>(shared_memory_address_);
+ }
+
+ template <typename T>
+ T GetSharedMemoryAsWithOffset(uint32_t offset) {
+ void* ptr = reinterpret_cast<int8_t*>(shared_memory_address_) + offset;
+ return reinterpret_cast<T>(ptr);
+ }
+
+ gles2::TextureRef* GetTexture(GLuint client_id) {
+ return group_->texture_manager()->GetTexture(client_id);
+ }
+
+ void SetBucketData(uint32_t bucket_id, const void* data, uint32_t data_size);
+ void SetBucketAsCString(uint32_t bucket_id, const char* str);
+ // If we want a valid bucket, just set |count_in_header| as |count|,
+ // and set |str_end| as 0.
+ void SetBucketAsCStrings(uint32_t bucket_id,
+ GLsizei count,
+ const char** str,
+ GLsizei count_in_header,
+ char str_end);
+
+ void InitDecoderWithWorkarounds();
+
+ void ResetDecoder();
+
+ const gles2::ContextGroup& group() const { return *group_.get(); }
+
+ void LoseContexts(error::ContextLostReason reason) const {
+ group_->LoseContexts(reason);
+ }
+
+ error::ContextLostReason GetContextLostReason() const {
+ return command_buffer_service_->GetState().context_lost_reason;
+ }
+
+ ::testing::StrictMock<::gl::MockGLInterface>* GetGLMock() const {
+ return gl_.get();
+ }
+
+ RasterDecoder* GetDecoder() const { return decoder_.get(); }
+
+ typedef gles2::TestHelper::AttribInfo AttribInfo;
+ typedef gles2::TestHelper::UniformInfo UniformInfo;
+
+ void SetupInitStateExpectations(bool es3_capable);
+
+ void SetupTexture();
+
+ // Note that the error is returned as GLint instead of GLenum.
+ // This is because there is a mismatch in the types of GLenum and
+ // the error values GL_NO_ERROR, GL_INVALID_ENUM, etc. GLenum is
+ // typedef'd as unsigned int while the error values are defined as
+ // integers. This is problematic for template functions such as
+ // EXPECT_EQ that expect both types to be the same.
+ GLint GetGLError();
+
+ void DoBindTexture(GLenum target, GLuint client_id, GLuint service_id);
+ void DoDeleteTexture(GLuint client_id, GLuint service_id);
+
+ GLvoid* BufferOffset(unsigned i) { return static_cast<int8_t*>(NULL) + (i); }
+
+ protected:
+ static const GLint kMaxTextureSize = 2048;
+ static const GLint kNumTextureUnits = 8;
+
+ static const GLuint kServiceBufferId = 301;
+ static const GLuint kServiceTextureId = 304;
+
+ static const size_t kSharedBufferSize = 2048;
+ static const uint32_t kSharedMemoryOffset = 132;
+ static const int32_t kInvalidSharedMemoryId =
+ FakeCommandBufferServiceBase::kTransferBufferBaseId - 1;
+ static const uint32_t kInvalidSharedMemoryOffset = kSharedBufferSize + 1;
+ static const uint32_t kInitialResult = 0xBDBDBDBDu;
+ static const uint8_t kInitialMemoryValue = 0xBDu;
+
+ static const uint32_t kNewClientId = 501;
+ static const uint32_t kNewServiceId = 502;
+ static const uint32_t kInvalidClientId = 601;
+
+ // Use StrictMock to make 100% sure we know how GL will be called.
+ std::unique_ptr<::testing::StrictMock<::gl::MockGLInterface>> gl_;
+ scoped_refptr<gl::GLSurfaceStub> surface_;
+ scoped_refptr<GLContextMock> context_;
+ std::unique_ptr<FakeCommandBufferServiceBase> command_buffer_service_;
+ gles2::TraceOutputter outputter_;
+ std::unique_ptr<MockRasterDecoder> mock_decoder_;
+ std::unique_ptr<RasterDecoder> decoder_;
+ gles2::MemoryTracker* memory_tracker_;
+
+ GLuint client_texture_id_;
+
+ int32_t shared_memory_id_;
+ uint32_t shared_memory_offset_;
+ void* shared_memory_address_;
+ void* shared_memory_base_;
+
+ uint32_t immediate_buffer_[64];
+
+ const bool ignore_cached_state_for_test_;
+
+ private:
+ GpuPreferences gpu_preferences_;
+ gles2::MailboxManagerImpl mailbox_manager_;
+ gles2::ShaderTranslatorCache shader_translator_cache_;
+ gles2::FramebufferCompletenessCache framebuffer_completeness_cache_;
+ gles2::ImageManager image_manager_;
+ ServiceDiscardableManager discardable_manager_;
+ scoped_refptr<gles2::ContextGroup> group_;
+ base::MessageLoop message_loop_;
+};
+
+} // namespace raster
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_RASTER_DECODER_UNITTEST_BASE_H_
diff --git a/chromium/gpu/command_buffer/service/texture_definition.cc b/chromium/gpu/command_buffer/service/texture_definition.cc
index 4105482a188..231de63f3f2 100644
--- a/chromium/gpu/command_buffer/service/texture_definition.cc
+++ b/chromium/gpu/command_buffer/service/texture_definition.cc
@@ -319,8 +319,8 @@ TextureDefinition::TextureDefinition()
wrap_s_(0),
wrap_t_(0),
usage_(0),
- immutable_(true) {
-}
+ immutable_(true),
+ defined_(false) {}
TextureDefinition::TextureDefinition(
Texture* texture,
@@ -342,6 +342,7 @@ TextureDefinition::TextureDefinition(
DCHECK(image_buffer_.get());
}
+ DCHECK(!texture->face_infos_.empty());
const Texture::FaceInfo& first_face = texture->face_infos_[0];
if (image_buffer_.get()) {
scoped_refptr<gl::GLImage> gl_image(new GLImageSync(
@@ -350,6 +351,7 @@ TextureDefinition::TextureDefinition(
texture->SetLevelImage(target_, 0, gl_image.get(), Texture::BOUND);
}
+ DCHECK(!first_face.level_infos.empty());
const Texture::LevelInfo& level = first_face.level_infos[0];
level_info_ = LevelInfo(level.target, level.internal_format, level.width,
level.height, level.depth, level.border, level.format,
@@ -361,6 +363,8 @@ TextureDefinition::TextureDefinition(const TextureDefinition& other) = default;
TextureDefinition::~TextureDefinition() = default;
Texture* TextureDefinition::CreateTexture() const {
+ if (!target_)
+ return nullptr;
GLuint texture_id;
glGenTextures(1, &texture_id);
@@ -385,9 +389,9 @@ void TextureDefinition::UpdateTextureInternal(Texture* texture) const {
}
}
+ texture->face_infos_.resize(1);
+ texture->face_infos_[0].level_infos.resize(1);
if (defined_) {
- texture->face_infos_.resize(1);
- texture->face_infos_[0].level_infos.resize(1);
texture->SetLevelInfo(level_info_.target, 0,
level_info_.internal_format, level_info_.width,
level_info_.height, level_info_.depth,
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index fd60d2c9886..36fccf08050 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -361,6 +361,12 @@ bool SizedFormatAvailable(const FeatureInfo* feature_info,
return true;
}
+ if (internal_format == GL_RGB10_A2_EXT &&
+ (feature_info->feature_flags().chromium_image_xr30 ||
+ feature_info->feature_flags().chromium_image_xb30)) {
+ return true;
+ }
+
// TODO(dshwang): check if it's possible to remove
// CHROMIUM_color_buffer_float_rgb. crbug.com/329605
if (feature_info->feature_flags().chromium_color_buffer_float_rgb &&
@@ -2794,7 +2800,8 @@ void TextureManager::ValidateAndDoTexImage(
}
}
- if (texture_state->unpack_alignment_workaround_with_unpack_buffer && buffer) {
+ if (texture_state->unpack_alignment_workaround_with_unpack_buffer && buffer &&
+ args.width && args.height && args.depth) {
uint32_t buffer_size = static_cast<uint32_t>(buffer->size());
if (buffer_size - args.pixels_size - ToGLuint(args.pixels) < args.padding) {
// In ValidateTexImage(), we already made sure buffer size is no less
@@ -3023,7 +3030,8 @@ void TextureManager::ValidateAndDoTexSubImage(
}
}
- if (texture_state->unpack_alignment_workaround_with_unpack_buffer && buffer) {
+ if (texture_state->unpack_alignment_workaround_with_unpack_buffer && buffer &&
+ args.width && args.height && args.depth) {
uint32_t buffer_size = static_cast<uint32_t>(buffer->size());
if (buffer_size - args.pixels_size - ToGLuint(args.pixels) < args.padding) {
DoTexSubImageWithAlignmentWorkaround(texture_state, state, args);
@@ -3307,6 +3315,36 @@ GLenum TextureManager::AdjustTexFormat(const gles2::FeatureInfo* feature_info,
return format;
}
+// static
+GLenum TextureManager::AdjustTexStorageFormat(
+ const gles2::FeatureInfo* feature_info,
+ GLenum format) {
+ // We need to emulate luminance/alpha on core profile only.
+ if (feature_info->gl_version_info().is_desktop_core_profile) {
+ switch (format) {
+ case GL_ALPHA8_EXT:
+ return GL_R8_EXT;
+ case GL_LUMINANCE8_EXT:
+ return GL_R8_EXT;
+ case GL_LUMINANCE8_ALPHA8_EXT:
+ return GL_RG8_EXT;
+ case GL_ALPHA16F_EXT:
+ return GL_R16F_EXT;
+ case GL_LUMINANCE16F_EXT:
+ return GL_R16F_EXT;
+ case GL_LUMINANCE_ALPHA16F_EXT:
+ return GL_RG16F_EXT;
+ case GL_ALPHA32F_EXT:
+ return GL_R32F_EXT;
+ case GL_LUMINANCE32F_EXT:
+ return GL_R32F_EXT;
+ case GL_LUMINANCE_ALPHA32F_EXT:
+ return GL_RG32F_EXT;
+ }
+ }
+ return format;
+}
+
void TextureManager::DoTexImage(
DecoderTextureState* texture_state,
ContextState* state,
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index fe57445c87a..3fa41a3a098 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -1092,6 +1092,9 @@ class GPU_GLES2_EXPORT TextureManager
static GLenum AdjustTexFormat(const gles2::FeatureInfo* feature_info,
GLenum format);
+ static GLenum AdjustTexStorageFormat(const gles2::FeatureInfo* feature_info,
+ GLenum format);
+
void WorkaroundCopyTexImageCubeMap(
DecoderTextureState* texture_state,
ContextState* state,
diff --git a/chromium/gpu/config/gpu_driver_bug_list.json b/chromium/gpu/config/gpu_driver_bug_list.json
index 0b1ed081cc6..8a9b224ac2a 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.json
+++ b/chromium/gpu/config/gpu_driver_bug_list.json
@@ -4,6 +4,7 @@
{
"id": 1,
"description": "Imagination driver doesn't like uploading lots of buffer data constantly",
+ "cr_bugs": [178093],
"os": {
"type": "android"
},
@@ -15,6 +16,7 @@
{
"id": 2,
"description": "ARM driver doesn't like uploading lots of buffer data constantly",
+ "cr_bugs": [178093],
"os": {
"type": "android"
},
@@ -157,20 +159,6 @@
]
},
{
- "id": 27,
- "cr_bugs": [265115],
- "description": "Async Readpixels with GL_BGRA format is broken on Haswell chipset on Macs",
- "os": {
- "type": "macosx"
- },
- "vendor_id": "0x8086",
- "device_id": ["0x0402", "0x0406", "0x040a", "0x0412", "0x0416", "0x041a",
- "0x0a04", "0x0a16", "0x0a22", "0x0a26", "0x0a2a"],
- "features": [
- "swizzle_rgba_for_async_readpixels"
- ]
- },
- {
"id": 31,
"cr_bugs": [154715, 10068, 269829, 294779, 285292],
"description": "The Mali-Txxx driver does not guarantee flush ordering",
@@ -1016,6 +1004,9 @@
"gl_renderer": "Adreno \\(TM\\) 3.*",
"features": [
"disable_timestamp_queries"
+ ],
+ "disabled_extensions": [
+ "GL_EXT_disjoint_timer_query"
]
},
{
@@ -1425,21 +1416,6 @@
]
},
{
- "id": 148,
- "description": "Mali-4xx GPU on JB doesn't support DetachGLContext",
- "os": {
- "type": "android",
- "version": {
- "op": "<=",
- "value": "4.4.4"
- }
- },
- "gl_renderer": ".*Mali-4.*",
- "features": [
- "surface_texture_cant_detach"
- ]
- },
- {
"id": 149,
"description": "Direct composition flashes black initially on Win <10",
"cr_bugs": [588588],
@@ -1503,24 +1479,6 @@
]
},
{
- "id": 156,
- "cr_bugs": [598474],
- "description": "glEGLImageTargetTexture2DOES crashes",
- "os": {
- "type": "android",
- "version": {
- "op": "between",
- "value": "4.4",
- "value2": "4.4.4"
- }
- },
- "gl_vendor": "Imagination.*",
- "gl_renderer": "PowerVR SGX 544MP",
- "features": [
- "avda_dont_copy_pictures"
- ]
- },
- {
"id": 157,
"description": "Testing fences was broken on Mali ES2 drivers for specific phone models",
"cr_bugs": [589814],
@@ -1658,19 +1616,6 @@
]
},
{
- "id": 167,
- "cr_bugs": [610516],
- "description": "glEGLImageTargetTexture2DOES crashes on Mali-400",
- "os": {
- "type": "android"
- },
- "gl_vendor": "ARM.*",
- "gl_renderer": ".*Mali-4.*",
- "features": [
- "avda_dont_copy_pictures"
- ]
- },
- {
"id": 168,
"description": "VirtualBox driver doesn't correctly support partial swaps.",
"cr_bugs": [613722],
@@ -2537,18 +2482,6 @@
]
},
{
- "id": 234,
- "description": "Avoid using EGL_IMAGE_EXTERNAL_FLUSH_EXT with eglCreateImageKHR on NVIDIA",
- "cr_bugs": [727462],
- "os": {
- "type": "chromeos"
- },
- "gl_vendor": "NVIDIA.*",
- "features": [
- "avoid_using_image_flush_external_with_egl_create_image"
- ]
- },
- {
"id": 235,
"description": "Avoid waiting on a egl fence before pageflipping and rely on implicit sync.",
"cr_bugs": [721463],
@@ -2619,7 +2552,7 @@
"value": "9.0"
}
},
- "gl_renderer": "Adreno \\(TM\\) 5[34]0",
+ "gl_renderer": "Adreno \\(TM\\) (3.*|5[34]0)",
"disabled_extensions": [
"GL_EXT_disjoint_timer_query",
"GL_EXT_disjoint_timer_query_webgl2"
@@ -2736,13 +2669,15 @@
},
{
"id": 250,
- "description": "Depth/stencil renderbuffers can't be resized on NVIDIA on macOS 10.13",
+ "description": "Depth/stencil renderbuffers can't be resized on NVIDIA on early macOS 10.13",
"cr_bugs": [775202],
"os": {
"type": "macosx",
"version": {
- "op": ">=",
- "value": "10.13.0"
+ "op": "between",
+ "value": "10.13.0",
+ "value2": "10.13.3",
+ "comment": "Fixed in 10.13.4."
}
},
"vendor_id": "0x10de",
@@ -2790,6 +2725,21 @@
]
},
{
+ "id": 255,
+ "description": "Fullscreen video crashes macOS 10.11 WindowServer.",
+ "cr_bugs": [806506],
+ "os": {
+ "type": "macosx",
+ "version": {
+ "op": "<",
+ "value": "10.12"
+ }
+ },
+ "features": [
+ "disable_av_sample_buffer_display_layer"
+ ]
+ },
+ {
"id": 256,
"description": "Don't expose disjoint_timer_query extensions to WebGL",
"cr_bugs": [808744],
@@ -2832,6 +2782,46 @@
"features": [
"max_msaa_sample_count_4"
]
+ },
+ {
+ "id": 260,
+ "cr_bugs": [760389],
+ "description": "eglClientWaitSyncKHR with nonzero timeout waits too long",
+ "comment": [
+ "This bug is specific to Samsung S8/S8+ on Android N, but there ",
+ "isn't currently a way to restrict the filter to those devices. The ",
+ "functionality is currently only used for WebVR on Daydream ready ",
+ "devices, and the non-Samsung Daydream devices generally use ",
+ "Android O, so an overbroad match seems acceptable."
+ ],
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "<",
+ "value": "8.0"
+ }
+ },
+ "gl_renderer": "Adreno \\(TM\\) 540",
+ "features": [
+ "dont_use_eglclientwaitsync_with_timeout"
+ ]
+ },
+ {
+ "id": 261,
+ "cr_bugs": [811661],
+ "description": "Crash in glQueryCounter() in VMware driver",
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x15ad",
+ "device_id": ["0x0405"],
+ "features": [
+ "disable_timestamp_queries"
+ ],
+ "disabled_extensions": [
+ "GL_EXT_disjoint_timer_query",
+ "GL_EXT_disjoint_timer_query_webgl2"
+ ]
}
]
}
diff --git a/chromium/gpu/config/gpu_driver_bug_workaround_type.h b/chromium/gpu/config/gpu_driver_bug_workaround_type.h
index e5ee729ae61..690ad3c9038 100644
--- a/chromium/gpu/config/gpu_driver_bug_workaround_type.h
+++ b/chromium/gpu/config/gpu_driver_bug_workaround_type.h
@@ -17,20 +17,14 @@
add_and_true_to_loop_condition) \
GPU_OP(ADJUST_SRC_DST_REGION_FOR_BLITFRAMEBUFFER, \
adjust_src_dst_region_for_blitframebuffer) \
- GPU_OP(AVDA_DONT_COPY_PICTURES, \
- avda_dont_copy_pictures) \
GPU_OP(AVOID_EGL_IMAGE_TARGET_TEXTURE_REUSE, \
avoid_egl_image_target_texture_reuse) \
GPU_OP(AVOID_ONE_COMPONENT_EGL_IMAGES, \
avoid_one_component_egl_images) \
GPU_OP(AVOID_STENCIL_BUFFERS, \
avoid_stencil_buffers) \
- GPU_OP(AVOID_USING_IMAGE_FLUSH_EXTERNAL_WITH_EGL_CREATE_IMAGE, \
- avoid_using_image_flush_external_with_egl_create_image) \
GPU_OP(BROKEN_EGL_IMAGE_REF_COUNTING, \
broken_egl_image_ref_counting) \
- GPU_OP(CLEAR_ALPHA_IN_READPIXELS, \
- clear_alpha_in_readpixels) \
GPU_OP(CLEAR_TO_ZERO_OR_ONE_BROKEN, \
clear_to_zero_or_one_broken) \
GPU_OP(CLEAR_UNIFORMS_BEFORE_FIRST_PROGRAM_USE, \
@@ -43,8 +37,6 @@
depth_stencil_renderbuffer_resize_emulation) \
GPU_OP(DISABLE_ACCELERATED_VPX_DECODE, \
disable_accelerated_vpx_decode) \
- GPU_OP(DISABLE_ANGLE_INSTANCED_ARRAYS, \
- disable_angle_instanced_arrays) \
GPU_OP(DISABLE_ASYNC_READPIXELS, \
disable_async_readpixels) \
GPU_OP(DISABLE_AV_SAMPLE_BUFFER_DISPLAY_LAYER, \
@@ -109,6 +101,8 @@
dont_initialize_uninitialized_locals) \
GPU_OP(DONT_REMOVE_INVARIANT_FOR_FRAGMENT_INPUT, \
dont_remove_invariant_for_fragment_input) \
+ GPU_OP(DONT_USE_EGLCLIENTWAITSYNC_WITH_TIMEOUT, \
+ dont_use_eglclientwaitsync_with_timeout) \
GPU_OP(DONT_USE_LOOPS_TO_INITIALIZE_VARIABLES, \
dont_use_loops_to_initialize_variables) \
GPU_OP(ETC1_POWER_OF_TWO_ONLY, \
@@ -187,8 +181,6 @@
reset_teximage2d_base_level) \
GPU_OP(RESTORE_SCISSOR_ON_FBO_CHANGE, \
restore_scissor_on_fbo_change) \
- GPU_OP(REVERSE_POINT_SPRITE_COORD_ORIGIN, \
- reverse_point_sprite_coord_origin) \
GPU_OP(REWRITE_DO_WHILE_LOOPS, \
rewrite_do_while_loops) \
GPU_OP(REWRITE_FLOAT_UNARY_MINUS_OPERATOR, \
@@ -197,16 +189,10 @@
rewrite_texelfetchoffset_to_texelfetch) \
GPU_OP(SCALARIZE_VEC_AND_MAT_CONSTRUCTOR_ARGS, \
scalarize_vec_and_mat_constructor_args) \
- GPU_OP(SET_TEXTURE_FILTER_BEFORE_GENERATING_MIPMAP, \
- set_texture_filter_before_generating_mipmap) \
GPU_OP(SET_ZERO_LEVEL_BEFORE_GENERATING_MIPMAP, \
set_zero_level_before_generating_mipmap) \
GPU_OP(SIMULATE_OUT_OF_MEMORY_ON_LARGE_TEXTURES, \
simulate_out_of_memory_on_large_textures) \
- GPU_OP(SURFACE_TEXTURE_CANT_DETACH, \
- surface_texture_cant_detach) \
- GPU_OP(SWIZZLE_RGBA_FOR_ASYNC_READPIXELS, \
- swizzle_rgba_for_async_readpixels) \
GPU_OP(TEXSUBIMAGE_FASTER_THAN_TEXIMAGE, \
texsubimage_faster_than_teximage) \
GPU_OP(UNBIND_ATTACHMENTS_ON_BOUND_RENDER_FBO_DELETE, \
diff --git a/chromium/gpu/config/gpu_info.cc b/chromium/gpu/config/gpu_info.cc
index d1dc27eb792..b2a49004201 100644
--- a/chromium/gpu/config/gpu_info.cc
+++ b/chromium/gpu/config/gpu_info.cc
@@ -73,14 +73,8 @@ GPUInfo::GPUInfo()
software_rendering(false),
direct_rendering(true),
sandboxed(false),
- process_crash_count(0),
in_process_gpu(true),
passthrough_cmd_decoder(false),
- basic_info_state(kCollectInfoNone),
- context_info_state(kCollectInfoNone),
-#if defined(OS_WIN)
- dx_diagnostics_info_state(kCollectInfoNone),
-#endif
jpeg_decode_accelerator_supported(false)
#if defined(USE_X11)
,
@@ -95,7 +89,7 @@ GPUInfo::GPUInfo(const GPUInfo& other) = default;
GPUInfo::~GPUInfo() = default;
const GPUInfo::GPUDevice& GPUInfo::active_gpu() const {
- if (gpu.active)
+ if (gpu.active || secondary_gpus.empty())
return gpu;
for (const GPUDevice& secondary_gpu : secondary_gpus) {
if (secondary_gpu.active)
@@ -131,16 +125,12 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
bool software_rendering;
bool direct_rendering;
bool sandboxed;
- int process_crash_count;
bool in_process_gpu;
bool passthrough_cmd_decoder;
bool direct_composition;
bool supports_overlays;
bool can_support_threaded_texture_mailbox;
- CollectInfoResult basic_info_state;
- CollectInfoResult context_info_state;
#if defined(OS_WIN)
- CollectInfoResult dx_diagnostics_info_state;
DxDiagNode dx_diagnostics;
#endif
VideoDecodeAcceleratorCapabilities video_decode_accelerator_capabilities;
@@ -192,18 +182,12 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
enumerator->AddBool("softwareRendering", software_rendering);
enumerator->AddBool("directRendering", direct_rendering);
enumerator->AddBool("sandboxed", sandboxed);
- enumerator->AddInt("processCrashCount", process_crash_count);
enumerator->AddBool("inProcessGpu", in_process_gpu);
enumerator->AddBool("passthroughCmdDecoder", passthrough_cmd_decoder);
enumerator->AddBool("directComposition", direct_composition);
enumerator->AddBool("supportsOverlays", supports_overlays);
enumerator->AddBool("canSupportThreadedTextureMailbox",
can_support_threaded_texture_mailbox);
- enumerator->AddInt("basicInfoState", basic_info_state);
- enumerator->AddInt("contextInfoState", context_info_state);
-#if defined(OS_WIN)
- enumerator->AddInt("DxDiagnosticsInfoState", dx_diagnostics_info_state);
-#endif
// TODO(kbr): add dx_diagnostics on Windows.
enumerator->AddInt("videoDecodeAcceleratorFlags",
video_decode_accelerator_capabilities.flags);
diff --git a/chromium/gpu/config/gpu_info.h b/chromium/gpu/config/gpu_info.h
index 7c5650b535e..35705e41a56 100644
--- a/chromium/gpu/config/gpu_info.h
+++ b/chromium/gpu/config/gpu_info.h
@@ -26,19 +26,6 @@ typedef unsigned long VisualID;
namespace gpu {
-// Result for the various Collect*Info* functions below.
-// Fatal failures are for cases where we can't create a context at all or
-// something, making the use of the GPU impossible.
-// Non-fatal failures are for cases where we could gather most info, but maybe
-// some is missing (e.g. unable to parse a version string or to detect the exact
-// model).
-enum CollectInfoResult {
- kCollectInfoNone = 0,
- kCollectInfoSuccess = 1,
- kCollectInfoNonFatalFailure = 2,
- kCollectInfoFatalFailure = 3
-};
-
// Video profile. This *must* match media::VideoCodecProfile.
enum VideoCodecProfile {
VIDEO_CODEC_PROFILE_UNKNOWN = -1,
@@ -129,6 +116,9 @@ struct GPU_EXPORT GPUInfo {
GPUInfo(const GPUInfo& other);
~GPUInfo();
+ // The currently active gpu.
+ const GPUDevice& active_gpu() const;
+
// The amount of time taken to get from the process starting to the message
// loop being pumped.
base::TimeDelta initialization_time;
@@ -145,9 +135,6 @@ struct GPU_EXPORT GPUInfo {
// Secondary GPUs, for example, the integrated GPU in a dual GPU machine.
std::vector<GPUDevice> secondary_gpus;
- // The currently active gpu.
- const GPUDevice& active_gpu() const;
-
// The vendor of the graphics driver currently installed.
std::string driver_vendor;
@@ -213,9 +200,6 @@ struct GPU_EXPORT GPUInfo {
// Whether the gpu process is running in a sandbox.
bool sandboxed;
- // Number of GPU process crashes recorded.
- int process_crash_count;
-
// True if the GPU is running in the browser process instead of its own.
bool in_process_gpu;
@@ -233,13 +217,7 @@ struct GPU_EXPORT GPUInfo {
// is only implemented on Android.
bool can_support_threaded_texture_mailbox = false;
- // The state of whether the basic/context/DxDiagnostics info is collected and
- // if the collection fails or not.
- CollectInfoResult basic_info_state;
- CollectInfoResult context_info_state;
#if defined(OS_WIN)
- CollectInfoResult dx_diagnostics_info_state;
-
// The information returned by the DirectX Diagnostics Tool.
DxDiagNode dx_diagnostics;
#endif
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index ba9e8262aad..688c414203c 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -24,6 +24,7 @@
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface.h"
+#include "ui/gl/gl_switches.h"
#include "ui/gl/gl_version_info.h"
#include "ui/gl/init/gl_factory.h"
@@ -111,20 +112,46 @@ int StringContainsName(
namespace gpu {
-CollectInfoResult CollectGraphicsInfoGL(GPUInfo* gpu_info) {
+bool CollectBasicGraphicsInfo(const base::CommandLine* command_line,
+ GPUInfo* gpu_info) {
+ const char* software_gl_impl_name =
+ gl::GetGLImplementationName(gl::GetSoftwareGLImplementation());
+ if ((command_line->GetSwitchValueASCII(switches::kUseGL) ==
+ software_gl_impl_name) ||
+ command_line->HasSwitch(switches::kOverrideUseSoftwareGLForTests)) {
+ // If using the software GL implementation, use fake vendor and
+ // device ids to make sure it never gets blacklisted. It allows us
+ // to proceed with loading the blacklist which may have non-device
+ // specific entries we want to apply anyways (e.g., OS version
+ // blacklisting).
+ gpu_info->gpu.vendor_id = 0xffff;
+ gpu_info->gpu.device_id = 0xffff;
+
+ // Also declare the driver_vendor to be <software GL> to be able to
+ // specify exceptions based on driver_vendor==<software GL> for some
+ // blacklist rules.
+ gpu_info->driver_vendor = software_gl_impl_name;
+
+ return true;
+ }
+
+ return CollectBasicGraphicsInfo(gpu_info);
+}
+
+bool CollectGraphicsInfoGL(GPUInfo* gpu_info) {
TRACE_EVENT0("startup", "gpu_info_collector::CollectGraphicsInfoGL");
DCHECK_NE(gl::GetGLImplementation(), gl::kGLImplementationNone);
scoped_refptr<gl::GLSurface> surface(InitializeGLSurface());
if (!surface.get()) {
LOG(ERROR) << "Could not create surface for info collection.";
- return kCollectInfoFatalFailure;
+ return false;
}
scoped_refptr<gl::GLContext> context(InitializeGLContext(surface.get()));
if (!context.get()) {
LOG(ERROR) << "Could not create context for info collection.";
- return kCollectInfoFatalFailure;
+ return false;
}
gpu_info->gl_renderer = GetGLString(GL_RENDERER);
@@ -193,58 +220,8 @@ CollectInfoResult CollectGraphicsInfoGL(GPUInfo* gpu_info) {
gpu_info->vertex_shader_version = glsl_version;
IdentifyActiveGPU(gpu_info);
- return CollectDriverInfoGL(gpu_info);
-}
-
-void MergeGPUInfoGL(GPUInfo* basic_gpu_info,
- const GPUInfo& context_gpu_info) {
- DCHECK(basic_gpu_info);
- // Copy over GPUs because which one is active could change.
- basic_gpu_info->gpu = context_gpu_info.gpu;
- basic_gpu_info->secondary_gpus = context_gpu_info.secondary_gpus;
-
- basic_gpu_info->gl_renderer = context_gpu_info.gl_renderer;
- basic_gpu_info->gl_vendor = context_gpu_info.gl_vendor;
- basic_gpu_info->gl_version = context_gpu_info.gl_version;
- basic_gpu_info->gl_extensions = context_gpu_info.gl_extensions;
- basic_gpu_info->pixel_shader_version =
- context_gpu_info.pixel_shader_version;
- basic_gpu_info->vertex_shader_version =
- context_gpu_info.vertex_shader_version;
- basic_gpu_info->max_msaa_samples =
- context_gpu_info.max_msaa_samples;
- basic_gpu_info->gl_ws_vendor = context_gpu_info.gl_ws_vendor;
- basic_gpu_info->gl_ws_version = context_gpu_info.gl_ws_version;
- basic_gpu_info->gl_ws_extensions = context_gpu_info.gl_ws_extensions;
- basic_gpu_info->gl_reset_notification_strategy =
- context_gpu_info.gl_reset_notification_strategy;
- basic_gpu_info->software_rendering = context_gpu_info.software_rendering;
-
- if (!context_gpu_info.driver_vendor.empty())
- basic_gpu_info->driver_vendor = context_gpu_info.driver_vendor;
- if (!context_gpu_info.driver_version.empty())
- basic_gpu_info->driver_version = context_gpu_info.driver_version;
-
- basic_gpu_info->sandboxed = context_gpu_info.sandboxed;
- basic_gpu_info->direct_rendering = context_gpu_info.direct_rendering;
- basic_gpu_info->in_process_gpu = context_gpu_info.in_process_gpu;
- basic_gpu_info->passthrough_cmd_decoder =
- context_gpu_info.passthrough_cmd_decoder;
- basic_gpu_info->direct_composition = context_gpu_info.direct_composition;
- basic_gpu_info->supports_overlays = context_gpu_info.supports_overlays;
- basic_gpu_info->context_info_state = context_gpu_info.context_info_state;
- basic_gpu_info->initialization_time = context_gpu_info.initialization_time;
- basic_gpu_info->video_decode_accelerator_capabilities =
- context_gpu_info.video_decode_accelerator_capabilities;
- basic_gpu_info->video_encode_accelerator_supported_profiles =
- context_gpu_info.video_encode_accelerator_supported_profiles;
- basic_gpu_info->jpeg_decode_accelerator_supported =
- context_gpu_info.jpeg_decode_accelerator_supported;
-
-#if defined(USE_X11)
- basic_gpu_info->system_visual = context_gpu_info.system_visual;
- basic_gpu_info->rgba_visual = context_gpu_info.rgba_visual;
-#endif
+ CollectDriverInfoGL(gpu_info);
+ return true;
}
void IdentifyActiveGPU(GPUInfo* gpu_info) {
@@ -356,4 +333,13 @@ void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
gpu_info->machine_model_version = system_info->machineModelVersion;
}
+void CollectGraphicsInfoForTesting(GPUInfo* gpu_info) {
+ DCHECK(gpu_info);
+#if defined(OS_ANDROID)
+ CollectContextGraphicsInfo(gpu_info);
+#else
+ CollectBasicGraphicsInfo(gpu_info);
+#endif // OS_ANDROID
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_collector.h b/chromium/gpu/config/gpu_info_collector.h
index aa873336213..7a15eed2e42 100644
--- a/chromium/gpu/config/gpu_info_collector.h
+++ b/chromium/gpu/config/gpu_info_collector.h
@@ -15,17 +15,26 @@ namespace angle {
struct SystemInfo;
}
+namespace base {
+class CommandLine;
+}
+
namespace gpu {
// Collects basic GPU info without creating a GL/DirectX context (and without
// the danger of crashing), including vendor_id and device_id.
// This is called at browser process startup time.
// The subset each platform collects may be different.
-GPU_EXPORT CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info);
+GPU_EXPORT bool CollectBasicGraphicsInfo(GPUInfo* gpu_info);
+
+// Similar to above, except it handles the case where the software renderer of
+// the platform is used.
+GPU_EXPORT bool CollectBasicGraphicsInfo(const base::CommandLine* command_line,
+ GPUInfo* gpu_info);
// Create a GL/DirectX context and collect related info.
// This is called at GPU process startup time.
-GPU_EXPORT CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info);
+GPU_EXPORT bool CollectContextGraphicsInfo(GPUInfo* gpu_info);
#if defined(OS_WIN)
// Collect the DirectX Disagnostics information about the attached displays.
@@ -33,20 +42,10 @@ GPU_EXPORT bool GetDxDiagnostics(DxDiagNode* output);
#endif // OS_WIN
// Create a GL context and collect GL strings and versions.
-GPU_EXPORT CollectInfoResult CollectGraphicsInfoGL(GPUInfo* gpu_info);
+GPU_EXPORT bool CollectGraphicsInfoGL(GPUInfo* gpu_info);
// Each platform stores the driver version on the GL_VERSION string differently
-GPU_EXPORT CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info);
-
-// Merge GPUInfo from CollectContextGraphicsInfo into basic GPUInfo.
-// This is platform specific, depending on which info are collected at which
-// stage.
-GPU_EXPORT void MergeGPUInfo(GPUInfo* basic_gpu_info,
- const GPUInfo& context_gpu_info);
-
-// MergeGPUInfo() when GL driver is used.
-GPU_EXPORT void MergeGPUInfoGL(GPUInfo* basic_gpu_info,
- const GPUInfo& context_gpu_info);
+GPU_EXPORT void CollectDriverInfoGL(GPUInfo* gpu_info);
// If more than one GPUs are identified, and GL strings are available,
// identify the active GPU based on GL strings.
@@ -57,6 +56,10 @@ GPU_EXPORT void IdentifyActiveGPU(GPUInfo* gpu_info);
void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
angle::SystemInfo* system_info);
+// On Android, this calls CollectContextGraphicsInfo().
+// On other platforms, this calls CollectBasicGraphicsInfo().
+GPU_EXPORT void CollectGraphicsInfoForTesting(GPUInfo* gpu_info);
+
} // namespace gpu
#endif // GPU_CONFIG_GPU_INFO_COLLECTOR_H_
diff --git a/chromium/gpu/config/gpu_info_collector_android.cc b/chromium/gpu/config/gpu_info_collector_android.cc
index 26611976cb8..2750a1b5f9c 100644
--- a/chromium/gpu/config/gpu_info_collector_android.cc
+++ b/chromium/gpu/config/gpu_info_collector_android.cc
@@ -5,29 +5,11 @@
#include "gpu/config/gpu_info_collector.h"
#include <stddef.h>
-#include <stdint.h>
#include "base/android/build_info.h"
#include "base/android/jni_android.h"
-#include "base/command_line.h"
-#include "base/files/file_path.h"
#include "base/logging.h"
-#include "base/native_library.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-#include "gpu/config/gpu_switches.h"
-#include "ui/gl/egl_util.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_features.h"
-#include "ui/gl/gl_surface.h"
-
-#if BUILDFLAG(USE_STATIC_ANGLE)
-#include <EGL/egl.h>
-#endif // BUILDFLAG(USE_STATIC_ANGLE)
namespace {
@@ -72,217 +54,11 @@ std::string GetDriverVersionFromString(const std::string& version_string) {
return driver_version.first;
}
-gpu::CollectInfoResult CollectDriverInfo(gpu::GPUInfo* gpu_info) {
-#if BUILDFLAG(USE_STATIC_ANGLE)
-#pragma push_macro("eglGetProcAddress")
-#undef eglGetProcAddress
-#define LOOKUP_FUNC(x) \
- auto x##Fn = reinterpret_cast<gl::x##Proc>(eglGetProcAddress(#x))
-#else // BUILDFLAG(USE_STATIC_ANGLE)
- // Go through the process of loading GL libs and initializing an EGL
- // context so that we can get GL vendor/version/renderer strings.
- base::NativeLibrary gles_library, egl_library;
- base::NativeLibraryLoadError error;
- gles_library =
- base::LoadNativeLibrary(base::FilePath("libGLESv2.so"), &error);
- if (!gles_library) {
- LOG(ERROR) << "Failed to load libGLESv2.so";
- return gpu::kCollectInfoFatalFailure;
- }
-
- egl_library = base::LoadNativeLibrary(base::FilePath("libEGL.so"), &error);
- if (!egl_library) {
- LOG(ERROR) << "Failed to load libEGL.so";
- return gpu::kCollectInfoFatalFailure;
- }
-
- typedef void* (*eglGetProcAddressProc)(const char* name);
-
- auto eglGetProcAddressFn = reinterpret_cast<eglGetProcAddressProc>(
- base::GetFunctionPointerFromNativeLibrary(egl_library,
- "eglGetProcAddress"));
- if (!eglGetProcAddressFn) {
- LOG(ERROR) << "eglGetProcAddress not found.";
- return gpu::kCollectInfoFatalFailure;
- }
-
- auto get_func = [eglGetProcAddressFn, gles_library, egl_library](
- const char* name) {
- void *proc;
- proc = base::GetFunctionPointerFromNativeLibrary(egl_library, name);
- if (proc)
- return proc;
- proc = base::GetFunctionPointerFromNativeLibrary(gles_library, name);
- if (proc)
- return proc;
- proc = eglGetProcAddressFn(name);
- if (proc)
- return proc;
- LOG(FATAL) << "Failed to look up " << name;
- return (void *)nullptr;
- };
-
-#define LOOKUP_FUNC(x) auto x##Fn = reinterpret_cast<gl::x##Proc>(get_func(#x))
-#endif // BUILDFLAG(USE_STATIC_ANGLE)
-
- LOOKUP_FUNC(eglGetError);
- LOOKUP_FUNC(eglQueryString);
- LOOKUP_FUNC(eglGetCurrentContext);
- LOOKUP_FUNC(eglGetCurrentDisplay);
- LOOKUP_FUNC(eglGetCurrentSurface);
- LOOKUP_FUNC(eglGetDisplay);
- LOOKUP_FUNC(eglInitialize);
- LOOKUP_FUNC(eglChooseConfig);
- LOOKUP_FUNC(eglCreateContext);
- LOOKUP_FUNC(eglCreatePbufferSurface);
- LOOKUP_FUNC(eglMakeCurrent);
- LOOKUP_FUNC(eglDestroySurface);
- LOOKUP_FUNC(eglDestroyContext);
-
- LOOKUP_FUNC(glGetString);
- LOOKUP_FUNC(glGetIntegerv);
-
-#undef LOOKUP_FUNC
-#if BUILDFLAG(USE_STATIC_ANGLE)
-#pragma pop_macro("eglGetProcAddress")
-#endif // BUILDFLAG(USE_STATIC_ANGLE)
-
- EGLDisplay curr_display = eglGetCurrentDisplayFn();
- EGLContext curr_context = eglGetCurrentContextFn();
- EGLSurface curr_draw_surface = eglGetCurrentSurfaceFn(EGL_DRAW);
- EGLSurface curr_read_surface = eglGetCurrentSurfaceFn(EGL_READ);
-
- EGLDisplay temp_display = EGL_NO_DISPLAY;
- EGLContext temp_context = EGL_NO_CONTEXT;
- EGLSurface temp_surface = EGL_NO_SURFACE;
-
- const EGLint kConfigAttribs[] = {
- EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
- EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
- EGL_NONE};
- const EGLint kContextAttribs[] = {
- EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_EXT,
- EGL_LOSE_CONTEXT_ON_RESET_EXT,
- EGL_CONTEXT_CLIENT_VERSION, 2,
- EGL_NONE};
- const EGLint kSurfaceAttribs[] = {
- EGL_WIDTH, 1,
- EGL_HEIGHT, 1,
- EGL_NONE};
-
- EGLint major, minor;
-
- EGLConfig config;
- EGLint num_configs;
-
- auto errorstr = [eglGetErrorFn]() {
- uint32_t err = eglGetErrorFn();
- return base::StringPrintf("%s (%x)", ui::GetEGLErrorString(err), err);
- };
-
- temp_display = eglGetDisplayFn(EGL_DEFAULT_DISPLAY);
-
- if (temp_display == EGL_NO_DISPLAY) {
- LOG(ERROR) << "failed to get display. " << errorstr();
- return gpu::kCollectInfoFatalFailure;
- }
-
- eglInitializeFn(temp_display, &major, &minor);
-
- bool egl_create_context_robustness_supported =
- strstr(reinterpret_cast<const char*>(
- eglQueryStringFn(temp_display, EGL_EXTENSIONS)),
- "EGL_EXT_create_context_robustness") != NULL;
-
- if (!eglChooseConfigFn(temp_display, kConfigAttribs, &config, 1,
- &num_configs)) {
- LOG(ERROR) << "failed to choose an egl config. " << errorstr();
- return gpu::kCollectInfoFatalFailure;
- }
-
- temp_context = eglCreateContextFn(
- temp_display, config, EGL_NO_CONTEXT,
- kContextAttribs + (egl_create_context_robustness_supported ? 0 : 2));
- if (temp_context == EGL_NO_CONTEXT) {
- LOG(ERROR)
- << "failed to create a temporary context for fetching driver strings. "
- << errorstr();
- return gpu::kCollectInfoFatalFailure;
- }
-
- temp_surface =
- eglCreatePbufferSurfaceFn(temp_display, config, kSurfaceAttribs);
-
- if (temp_surface == EGL_NO_SURFACE) {
- eglDestroyContextFn(temp_display, temp_context);
- LOG(ERROR)
- << "failed to create a pbuffer surface for fetching driver strings. "
- << errorstr();
- return gpu::kCollectInfoFatalFailure;
- }
-
- eglMakeCurrentFn(temp_display, temp_surface, temp_surface, temp_context);
-
- gpu_info->gl_vendor = reinterpret_cast<const char*>(glGetStringFn(GL_VENDOR));
- gpu_info->gl_version =
- reinterpret_cast<const char*>(glGetStringFn(GL_VERSION));
- gpu_info->gl_renderer =
- reinterpret_cast<const char*>(glGetStringFn(GL_RENDERER));
- gpu_info->gl_extensions =
- reinterpret_cast<const char*>(glGetStringFn(GL_EXTENSIONS));
-
- std::string egl_extensions = eglQueryStringFn(temp_display, EGL_EXTENSIONS);
-
- GLint max_samples = 0;
- glGetIntegervFn(GL_MAX_SAMPLES, &max_samples);
- gpu_info->max_msaa_samples = base::IntToString(max_samples);
-
- bool supports_robustness =
- gpu_info->gl_extensions.find("GL_EXT_robustness") != std::string::npos ||
- gpu_info->gl_extensions.find("GL_KHR_robustness") != std::string::npos ||
- gpu_info->gl_extensions.find("GL_ARB_robustness") != std::string::npos;
-
- if (supports_robustness) {
- glGetIntegervFn(
- GL_RESET_NOTIFICATION_STRATEGY_ARB,
- reinterpret_cast<GLint*>(&gpu_info->gl_reset_notification_strategy));
- }
-
- gpu_info->can_support_threaded_texture_mailbox =
- egl_extensions.find("EGL_KHR_fence_sync") != std::string::npos &&
- egl_extensions.find("EGL_KHR_image_base") != std::string::npos &&
- egl_extensions.find("EGL_KHR_gl_texture_2D_image") != std::string::npos &&
- gpu_info->gl_extensions.find("GL_OES_EGL_image") != std::string::npos;
-
- std::string glsl_version_string;
- if (const char* glsl_version_cstring = reinterpret_cast<const char*>(
- glGetStringFn(GL_SHADING_LANGUAGE_VERSION)))
- glsl_version_string = glsl_version_cstring;
-
- std::string glsl_version = GetVersionFromString(glsl_version_string).first;
- gpu_info->pixel_shader_version = glsl_version;
- gpu_info->vertex_shader_version = glsl_version;
-
- if (curr_display != EGL_NO_DISPLAY &&
- curr_context != EGL_NO_CONTEXT) {
- eglMakeCurrentFn(curr_display, curr_draw_surface, curr_read_surface,
- curr_context);
- } else {
- eglMakeCurrentFn(temp_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
- EGL_NO_CONTEXT);
- }
-
- eglDestroySurfaceFn(temp_display, temp_surface);
- eglDestroyContextFn(temp_display, temp_context);
-
- return gpu::kCollectInfoSuccess;
-}
-
}
namespace gpu {
-CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
// When command buffer is compiled as a standalone library, the process might
// not have a Java environment.
if (base::android::IsVMInitialized()) {
@@ -291,41 +67,19 @@ CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
}
// At this point GL bindings have been initialized already.
- CollectInfoResult result = CollectGraphicsInfoGL(gpu_info);
- gpu_info->basic_info_state = result;
- gpu_info->context_info_state = result;
- return result;
+ return CollectGraphicsInfoGL(gpu_info);
}
-CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
- // When command buffer is compiled as a standalone library, the process might
- // not have a Java environment.
- if (base::android::IsVMInitialized()) {
- gpu_info->machine_model_name =
- base::android::BuildInfo::GetInstance()->model();
- }
-
- // Create a short-lived context on the UI thread to collect the GL strings.
- // Make sure we restore the existing context if there is one.
- CollectInfoResult result = CollectDriverInfo(gpu_info);
- if (result == kCollectInfoSuccess)
- result = CollectDriverInfoGL(gpu_info);
- gpu_info->basic_info_state = result;
- gpu_info->context_info_state = result;
- return result;
+bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
+ NOTREACHED();
+ return false;
}
-CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
+void CollectDriverInfoGL(GPUInfo* gpu_info) {
gpu_info->driver_version = GetDriverVersionFromString(
gpu_info->gl_version);
gpu_info->gpu.vendor_string = gpu_info->gl_vendor;
gpu_info->gpu.device_string = gpu_info->gl_renderer;
- return kCollectInfoSuccess;
-}
-
-void MergeGPUInfo(GPUInfo* basic_gpu_info,
- const GPUInfo& context_gpu_info) {
- MergeGPUInfoGL(basic_gpu_info, context_gpu_info);
}
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_collector_fuchsia.cc b/chromium/gpu/config/gpu_info_collector_fuchsia.cc
index 95dac4c9f8e..a23a24251e2 100644
--- a/chromium/gpu/config/gpu_info_collector_fuchsia.cc
+++ b/chromium/gpu/config/gpu_info_collector_fuchsia.cc
@@ -6,26 +6,21 @@
namespace gpu {
-CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
// TODO(crbug.com/707031): Implement this.
NOTIMPLEMENTED();
- return kCollectInfoFatalFailure;
+ return false;
}
-CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
// TODO(crbug.com/707031): Implement this.
NOTIMPLEMENTED();
- return kCollectInfoFatalFailure;
+ return false;
}
-CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
+void CollectDriverInfoGL(GPUInfo* gpu_info) {
// TODO(crbug.com/707031): Implement this.
NOTIMPLEMENTED();
- return kCollectInfoFatalFailure;
-}
-
-void MergeGPUInfo(GPUInfo* basic_gpu_info, const GPUInfo& context_gpu_info) {
- MergeGPUInfoGL(basic_gpu_info, context_gpu_info);
}
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_collector_linux.cc b/chromium/gpu/config/gpu_info_collector_linux.cc
index a1782d9fdb5..62a02a8f817 100644
--- a/chromium/gpu/config/gpu_info_collector_linux.cc
+++ b/chromium/gpu/config/gpu_info_collector_linux.cc
@@ -12,38 +12,28 @@
namespace gpu {
-CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
TRACE_EVENT0("gpu", "gpu_info_collector::CollectGraphicsInfo");
- CollectInfoResult result = CollectGraphicsInfoGL(gpu_info);
- gpu_info->context_info_state = result;
- return result;
+ return CollectGraphicsInfoGL(gpu_info);
}
-CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
angle::SystemInfo system_info;
bool success = angle::GetSystemInfo(&system_info);
FillGPUInfoFromSystemInfo(gpu_info, &system_info);
-
- if (success) {
- gpu_info->basic_info_state = kCollectInfoSuccess;
- } else {
- gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
- }
-
- return gpu_info->basic_info_state;
+ return success;
}
-CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
+void CollectDriverInfoGL(GPUInfo* gpu_info) {
DCHECK(gpu_info);
- if (!gpu_info->driver_vendor.empty() && !gpu_info->driver_version.empty()) {
- return kCollectInfoSuccess;
- }
+ if (!gpu_info->driver_vendor.empty() && !gpu_info->driver_version.empty())
+ return;
std::string gl_version = gpu_info->gl_version;
std::vector<std::string> pieces = base::SplitString(
@@ -52,7 +42,7 @@ CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
// In linux, the gl version string might be in the format of
// GLVersion DriverVendor DriverVersion
if (pieces.size() < 3)
- return kCollectInfoNonFatalFailure;
+ return;
// Search from the end for the first piece that starts with major.minor or
// major.minor.micro but assume the driver version cannot be in the first two
@@ -68,16 +58,10 @@ CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
}
if (driver_version.empty())
- return kCollectInfoNonFatalFailure;
+ return;
gpu_info->driver_vendor = *(++it);
gpu_info->driver_version = driver_version;
- return kCollectInfoSuccess;
-}
-
-void MergeGPUInfo(GPUInfo* basic_gpu_info,
- const GPUInfo& context_gpu_info) {
- MergeGPUInfoGL(basic_gpu_info, context_gpu_info);
}
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_collector_mac.mm b/chromium/gpu/config/gpu_info_collector_mac.mm
index 0488da5cf2a..6d34d3adc86 100644
--- a/chromium/gpu/config/gpu_info_collector_mac.mm
+++ b/chromium/gpu/config/gpu_info_collector_mac.mm
@@ -9,33 +9,23 @@
namespace gpu {
-CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
TRACE_EVENT0("gpu", "gpu_info_collector::CollectGraphicsInfo");
-
- CollectInfoResult result = CollectGraphicsInfoGL(gpu_info);
- gpu_info->context_info_state = result;
- return result;
+ return CollectGraphicsInfoGL(gpu_info);
}
-CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
angle::SystemInfo system_info;
bool success = angle::GetSystemInfo(&system_info);
FillGPUInfoFromSystemInfo(gpu_info, &system_info);
-
- if (success) {
- gpu_info->basic_info_state = kCollectInfoSuccess;
- } else {
- gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
- }
-
- return gpu_info->basic_info_state;
+ return success;
}
-CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
+void CollectDriverInfoGL(GPUInfo* gpu_info) {
DCHECK(gpu_info);
// Extract the OpenGL driver version string from the GL_VERSION string.
@@ -44,14 +34,8 @@ CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
// Use some jiggery-pokery to turn that utf8 string into a std::wstring.
size_t pos = gpu_info->gl_version.find_last_of('-');
if (pos == std::string::npos)
- return kCollectInfoNonFatalFailure;
+ return;
gpu_info->driver_version = gpu_info->gl_version.substr(pos + 1);
- return kCollectInfoSuccess;
-}
-
-void MergeGPUInfo(GPUInfo* basic_gpu_info,
- const GPUInfo& context_gpu_info) {
- MergeGPUInfoGL(basic_gpu_info, context_gpu_info);
}
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_collector_unittest.cc b/chromium/gpu/config/gpu_info_collector_unittest.cc
index 1b63526bc2e..695d25b9043 100644
--- a/chromium/gpu/config/gpu_info_collector_unittest.cc
+++ b/chromium/gpu/config/gpu_info_collector_unittest.cc
@@ -380,7 +380,7 @@ TEST_F(CollectDriverInfoGLTest, CollectDriverInfoGL) {
gpu_info.gl_renderer = testStrings.gl_renderer;
gpu_info.gl_vendor = testStrings.gl_vendor;
gpu_info.gl_version = testStrings.gl_version;
- EXPECT_EQ(kCollectInfoSuccess, CollectDriverInfoGL(&gpu_info));
+ CollectDriverInfoGL(&gpu_info);
EXPECT_EQ(testStrings.expected_driver_version, gpu_info.driver_version);
if (testStrings.expected_driver_vendor) {
EXPECT_EQ(testStrings.expected_driver_vendor, gpu_info.driver_vendor);
diff --git a/chromium/gpu/config/gpu_info_collector_win.cc b/chromium/gpu/config/gpu_info_collector_win.cc
index dea8fdbb914..ea4c97a9b2d 100644
--- a/chromium/gpu/config/gpu_info_collector_win.cc
+++ b/chromium/gpu/config/gpu_info_collector_win.cc
@@ -68,8 +68,7 @@ void GetAMDVideocardInfo(GPUInfo* gpu_info) {
}
#endif
-CollectInfoResult CollectDriverInfoD3D(const std::wstring& device_id,
- GPUInfo* gpu_info) {
+bool CollectDriverInfoD3D(const std::wstring& device_id, GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "CollectDriverInfoD3D");
// Display adapter class GUID from
@@ -84,7 +83,7 @@ CollectInfoResult CollectDriverInfoD3D(const std::wstring& device_id,
::SetupDiGetClassDevs(&display_class, NULL, NULL, DIGCF_PRESENT);
if (device_info == INVALID_HANDLE_VALUE) {
LOG(ERROR) << "Creating device info failed";
- return kCollectInfoNonFatalFailure;
+ return false;
}
struct GPUDriver {
@@ -224,19 +223,16 @@ CollectInfoResult CollectDriverInfoD3D(const std::wstring& device_id,
}
}
- return found ? kCollectInfoSuccess : kCollectInfoNonFatalFailure;
+ return found;
}
-CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "CollectGraphicsInfo");
DCHECK(gpu_info);
- CollectInfoResult result = CollectGraphicsInfoGL(gpu_info);
- if (result != kCollectInfoSuccess) {
- gpu_info->context_info_state = result;
- return result;
- }
+ if (!CollectGraphicsInfoGL(gpu_info))
+ return false;
// ANGLE's renderer strings are of the form:
// ANGLE (<adapter_identifier> Direct3D<version> vs_x_x ps_x_x)
@@ -267,17 +263,39 @@ CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
pixel_shader_major_version,
pixel_shader_minor_version);
+ DCHECK(!gpu_info->vertex_shader_version.empty());
+ // Note: do not reorder, used by UMA_HISTOGRAM below
+ enum ShaderModel {
+ SHADER_MODEL_UNKNOWN,
+ SHADER_MODEL_2_0,
+ SHADER_MODEL_3_0,
+ SHADER_MODEL_4_0,
+ SHADER_MODEL_4_1,
+ SHADER_MODEL_5_0,
+ NUM_SHADER_MODELS
+ };
+ ShaderModel shader_model = SHADER_MODEL_UNKNOWN;
+ if (gpu_info->vertex_shader_version == "5.0") {
+ shader_model = SHADER_MODEL_5_0;
+ } else if (gpu_info->vertex_shader_version == "4.1") {
+ shader_model = SHADER_MODEL_4_1;
+ } else if (gpu_info->vertex_shader_version == "4.0") {
+ shader_model = SHADER_MODEL_4_0;
+ } else if (gpu_info->vertex_shader_version == "3.0") {
+ shader_model = SHADER_MODEL_3_0;
+ } else if (gpu_info->vertex_shader_version == "2.0") {
+ shader_model = SHADER_MODEL_2_0;
+ }
+ UMA_HISTOGRAM_ENUMERATION("GPU.D3DShaderModel", shader_model,
+ NUM_SHADER_MODELS);
+
// DirectX diagnostics are collected asynchronously because it takes a
// couple of seconds.
- } else {
- gpu_info->dx_diagnostics_info_state = kCollectInfoNonFatalFailure;
}
-
- gpu_info->context_info_state = kCollectInfoSuccess;
- return kCollectInfoSuccess;
+ return true;
}
-CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "CollectPreliminaryGraphicsInfo");
DCHECK(gpu_info);
@@ -306,82 +324,24 @@ CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
// Chained DD" or the citrix display driver.
if (wcscmp(dd.DeviceString, L"RDPUDD Chained DD") != 0 &&
wcscmp(dd.DeviceString, L"Citrix Systems Inc. Display Driver") != 0) {
- gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
- return kCollectInfoNonFatalFailure;
+ return false;
}
}
DeviceIDToVendorAndDevice(id, &gpu_info->gpu.vendor_id,
&gpu_info->gpu.device_id);
// TODO(zmo): we only need to call CollectDriverInfoD3D() if we use ANGLE.
- if (!CollectDriverInfoD3D(id, gpu_info)) {
- gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
- return kCollectInfoNonFatalFailure;
- }
-
- gpu_info->basic_info_state = kCollectInfoSuccess;
- return kCollectInfoSuccess;
+ return CollectDriverInfoD3D(id, gpu_info);
}
-CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
+void CollectDriverInfoGL(GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "CollectDriverInfoGL");
if (!gpu_info->driver_version.empty())
- return kCollectInfoSuccess;
-
- bool parsed = RE2::PartialMatch(
- gpu_info->gl_version, "([\\d\\.]+)$", &gpu_info->driver_version);
- return parsed ? kCollectInfoSuccess : kCollectInfoNonFatalFailure;
-}
-
-void MergeGPUInfo(GPUInfo* basic_gpu_info,
- const GPUInfo& context_gpu_info) {
- DCHECK(basic_gpu_info);
-
- // Track D3D Shader Model (if available)
- const std::string& shader_version =
- context_gpu_info.vertex_shader_version;
-
- // Only gather if this is the first time we're seeing
- // a non-empty shader version string.
- if (!shader_version.empty() &&
- basic_gpu_info->vertex_shader_version.empty()) {
-
- // Note: do not reorder, used by UMA_HISTOGRAM below
- enum ShaderModel {
- SHADER_MODEL_UNKNOWN,
- SHADER_MODEL_2_0,
- SHADER_MODEL_3_0,
- SHADER_MODEL_4_0,
- SHADER_MODEL_4_1,
- SHADER_MODEL_5_0,
- NUM_SHADER_MODELS
- };
-
- ShaderModel shader_model = SHADER_MODEL_UNKNOWN;
-
- if (shader_version == "5.0") {
- shader_model = SHADER_MODEL_5_0;
- } else if (shader_version == "4.1") {
- shader_model = SHADER_MODEL_4_1;
- } else if (shader_version == "4.0") {
- shader_model = SHADER_MODEL_4_0;
- } else if (shader_version == "3.0") {
- shader_model = SHADER_MODEL_3_0;
- } else if (shader_version == "2.0") {
- shader_model = SHADER_MODEL_2_0;
- }
-
- UMA_HISTOGRAM_ENUMERATION("GPU.D3DShaderModel",
- shader_model,
- NUM_SHADER_MODELS);
- }
-
- MergeGPUInfoGL(basic_gpu_info, context_gpu_info);
+ return;
- basic_gpu_info->dx_diagnostics_info_state =
- context_gpu_info.dx_diagnostics_info_state;
- basic_gpu_info->dx_diagnostics = context_gpu_info.dx_diagnostics;
+ RE2::PartialMatch(gpu_info->gl_version, "([\\d\\.]+)$",
+ &gpu_info->driver_version);
}
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_unittest.cc b/chromium/gpu/config/gpu_info_unittest.cc
index 0ef15fa6a7a..bcdbcbb36ec 100644
--- a/chromium/gpu/config/gpu_info_unittest.cc
+++ b/chromium/gpu/config/gpu_info_unittest.cc
@@ -27,11 +27,6 @@ TEST(GPUInfoBasicTest, EmptyGPUInfo) {
EXPECT_EQ(gpu_info.gl_ws_vendor, "");
EXPECT_EQ(gpu_info.gl_ws_version, "");
EXPECT_EQ(gpu_info.gl_ws_extensions, "");
- EXPECT_EQ(gpu_info.basic_info_state, kCollectInfoNone);
- EXPECT_EQ(gpu_info.context_info_state, kCollectInfoNone);
-#if defined(OS_WIN)
- EXPECT_EQ(gpu_info.dx_diagnostics_info_state, kCollectInfoNone);
-#endif
EXPECT_EQ(gpu_info.video_decode_accelerator_capabilities.flags, 0u);
EXPECT_EQ(
gpu_info.video_decode_accelerator_capabilities.supported_profiles.size(),
@@ -40,4 +35,3 @@ TEST(GPUInfoBasicTest, EmptyGPUInfo) {
}
} // namespace gpu
-
diff --git a/chromium/gpu/config/gpu_lists_version.h b/chromium/gpu/config/gpu_lists_version.h
index 2cc6499bd40..3338ed2b803 100644
--- a/chromium/gpu/config/gpu_lists_version.h
+++ b/chromium/gpu/config/gpu_lists_version.h
@@ -3,6 +3,6 @@
#ifndef GPU_CONFIG_GPU_LISTS_VERSION_H_
#define GPU_CONFIG_GPU_LISTS_VERSION_H_
-#define GPU_LISTS_VERSION "926d27280b919a1aeb0091392d56f63a38d2bc0b"
+#define GPU_LISTS_VERSION "ecf119fc6a3bde1fefd18d2ef67abf0e21a35fe9"
#endif // GPU_CONFIG_GPU_LISTS_VERSION_H_
diff --git a/chromium/gpu/config/gpu_switches.cc b/chromium/gpu/config/gpu_switches.cc
index 05a0d464a5d..68ba5773aae 100644
--- a/chromium/gpu/config/gpu_switches.cc
+++ b/chromium/gpu/config/gpu_switches.cc
@@ -6,9 +6,6 @@
namespace switches {
-// Passes if it's AMD switchable dual GPUs from browser process to GPU process.
-const char kAMDSwitchable[] = "amd-switchable";
-
// Disable workarounds for various GPU driver bugs.
const char kDisableGpuDriverBugWorkarounds[] =
"disable-gpu-driver-bug-workarounds";
@@ -29,37 +26,6 @@ const char kEnableOOPRasterization[] = "enable-oop-rasterization";
// Passes encoded GpuPreferences to GPU process.
const char kGpuPreferences[] = "gpu-preferences";
-// Passes active gpu vendor id from browser process to GPU process.
-const char kGpuActiveVendorID[] = "gpu-active-vendor-id";
-
-// Passes active gpu device id from browser process to GPU process.
-const char kGpuActiveDeviceID[] = "gpu-active-device-id";
-
-// Passes gpu device_id from browser process to GPU process.
-const char kGpuDeviceID[] = "gpu-device-id";
-
-// Passes gpu driver_vendor from browser process to GPU process.
-const char kGpuDriverVendor[] = "gpu-driver-vendor";
-
-// Passes gpu driver_version from browser process to GPU process.
-const char kGpuDriverVersion[] = "gpu-driver-version";
-
-// Passes gpu driver_date from browser process to GPU process.
-const char kGpuDriverDate[] = "gpu-driver-date";
-
-// Passes secondary gpu vendor ids from browser process to GPU process.
-const char kGpuSecondaryVendorIDs[] = "gpu-secondary-vendor-ids";
-
-// Passes secondary gpu device ids from browser process to GPU process.
-const char kGpuSecondaryDeviceIDs[] = "gpu-secondary-device-ids";
-
-// Testing switch to not launch the gpu process for full gpu info collection.
-const char kGpuTestingNoCompleteInfoCollection[] =
- "gpu-no-complete-info-collection";
-
-// Passes gpu vendor_id from browser process to GPU process.
-const char kGpuVendorID[] = "gpu-vendor-id";
-
// Ignores GPU blacklist.
const char kIgnoreGpuBlacklist[] = "ignore-gpu-blacklist";
diff --git a/chromium/gpu/config/gpu_switches.h b/chromium/gpu/config/gpu_switches.h
index bcd5440e8d0..b71176645c4 100644
--- a/chromium/gpu/config/gpu_switches.h
+++ b/chromium/gpu/config/gpu_switches.h
@@ -9,22 +9,11 @@
namespace switches {
-GPU_EXPORT extern const char kAMDSwitchable[];
GPU_EXPORT extern const char kDisableGpuDriverBugWorkarounds[];
GPU_EXPORT extern const char kDisableGpuRasterization[];
GPU_EXPORT extern const char kEnableGpuRasterization[];
GPU_EXPORT extern const char kEnableOOPRasterization[];
GPU_EXPORT extern const char kGpuPreferences[];
-GPU_EXPORT extern const char kGpuActiveVendorID[];
-GPU_EXPORT extern const char kGpuActiveDeviceID[];
-GPU_EXPORT extern const char kGpuDeviceID[];
-GPU_EXPORT extern const char kGpuDriverVendor[];
-GPU_EXPORT extern const char kGpuDriverVersion[];
-GPU_EXPORT extern const char kGpuDriverDate[];
-GPU_EXPORT extern const char kGpuSecondaryVendorIDs[];
-GPU_EXPORT extern const char kGpuSecondaryDeviceIDs[];
-GPU_EXPORT extern const char kGpuTestingNoCompleteInfoCollection[];
-GPU_EXPORT extern const char kGpuVendorID[];
GPU_EXPORT extern const char kIgnoreGpuBlacklist[];
GPU_EXPORT extern const char kGpuBlacklistTestGroup[];
GPU_EXPORT extern const char kGpuDriverBugListTestGroup[];
diff --git a/chromium/gpu/config/gpu_test_config.cc b/chromium/gpu/config/gpu_test_config.cc
index ed1a831bbe9..fb7a8358327 100644
--- a/chromium/gpu/config/gpu_test_config.cc
+++ b/chromium/gpu/config/gpu_test_config.cc
@@ -9,6 +9,7 @@
#include "base/logging.h"
#include "base/sys_info.h"
+#include "build/build_config.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_info_collector.h"
#include "gpu/config/gpu_test_expectations_parser.h"
@@ -244,15 +245,19 @@ bool GPUTestBotConfig::Matches(const std::string& config_data) const {
bool GPUTestBotConfig::LoadCurrentConfig(const GPUInfo* gpu_info) {
bool rt;
- if (gpu_info == NULL) {
+ if (!gpu_info) {
+#if defined(OS_ANDROID)
+ // TODO(zmo): Implement this.
+ rt = false;
+#else
GPUInfo my_gpu_info;
- CollectInfoResult result = CollectBasicGraphicsInfo(&my_gpu_info);
- if (result != kCollectInfoSuccess) {
+ if (!CollectBasicGraphicsInfo(&my_gpu_info)) {
LOG(ERROR) << "Fail to identify GPU";
rt = false;
} else {
rt = SetGPUInfo(my_gpu_info);
}
+#endif // OS_ANDROID
} else {
rt = SetGPUInfo(*gpu_info);
}
@@ -296,4 +301,3 @@ bool GPUTestBotConfig::GpuBlacklistedOnBot() {
}
} // namespace gpu
-
diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc
index d68eef65ed5..384c48011cd 100644
--- a/chromium/gpu/config/gpu_util.cc
+++ b/chromium/gpu/config/gpu_util.cc
@@ -8,12 +8,9 @@
#include <vector>
#include "base/command_line.h"
-#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
-#include "base/sys_info.h"
#include "gpu/config/gpu_blacklist.h"
#include "gpu/config/gpu_crash_keys.h"
#include "gpu/config/gpu_driver_bug_list.h"
@@ -25,22 +22,16 @@
#include "ui/gl/extension_set.h"
#include "ui/gl/gl_switches.h"
+#if defined(OS_ANDROID)
+#include "base/no_destructor.h"
+#include "base/synchronization/lock.h"
+#include "ui/gl/init/gl_factory.h"
+#endif // OS_ANDROID
+
namespace gpu {
namespace {
-// |str| is in the format of "0x040a;0x10de;...;hex32_N".
-void StringToIds(const std::string& str, std::vector<uint32_t>* list) {
- DCHECK(list);
- for (const base::StringPiece& piece : base::SplitStringPiece(
- str, ";", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL)) {
- uint32_t id = 0;
- bool succeed = base::HexStringToUInt(piece, &id);
- DCHECK(succeed);
- list->push_back(id);
- }
-}
-
GpuFeatureStatus GetGpuRasterizationFeatureStatus(
const std::set<int>& blacklisted_features,
const base::CommandLine& command_line) {
@@ -64,7 +55,7 @@ GpuFeatureStatus GetWebGLFeatureStatus(
const std::set<int>& blacklisted_features,
bool use_swift_shader) {
if (use_swift_shader)
- return kGpuFeatureStatusSoftware;
+ return kGpuFeatureStatusEnabled;
if (blacklisted_features.count(GPU_FEATURE_TYPE_ACCELERATED_WEBGL))
return kGpuFeatureStatusBlacklisted;
return kGpuFeatureStatusEnabled;
@@ -74,7 +65,7 @@ GpuFeatureStatus GetWebGL2FeatureStatus(
const std::set<int>& blacklisted_features,
bool use_swift_shader) {
if (use_swift_shader)
- return kGpuFeatureStatusSoftware;
+ return kGpuFeatureStatusEnabled;
if (blacklisted_features.count(GPU_FEATURE_TYPE_ACCELERATED_WEBGL2))
return kGpuFeatureStatusBlacklisted;
return kGpuFeatureStatusEnabled;
@@ -183,93 +174,6 @@ GpuFeatureInfo* g_gpu_feature_info_cache = nullptr;
} // namespace anonymous
-void ParseSecondaryGpuDevicesFromCommandLine(
- const base::CommandLine& command_line,
- GPUInfo* gpu_info) {
- DCHECK(gpu_info);
-
- const char* secondary_vendor_switch_key = switches::kGpuSecondaryVendorIDs;
- const char* secondary_device_switch_key = switches::kGpuSecondaryDeviceIDs;
-
- if (!command_line.HasSwitch(secondary_vendor_switch_key) ||
- !command_line.HasSwitch(secondary_device_switch_key)) {
- return;
- }
-
- std::vector<uint32_t> vendor_ids;
- std::vector<uint32_t> device_ids;
- StringToIds(command_line.GetSwitchValueASCII(secondary_vendor_switch_key),
- &vendor_ids);
- StringToIds(command_line.GetSwitchValueASCII(secondary_device_switch_key),
- &device_ids);
-
- DCHECK(vendor_ids.size() == device_ids.size());
- gpu_info->secondary_gpus.clear();
- for (size_t i = 0; i < vendor_ids.size() && i < device_ids.size(); ++i) {
- gpu::GPUInfo::GPUDevice secondary_device;
- secondary_device.active = false;
- secondary_device.vendor_id = vendor_ids[i];
- secondary_device.device_id = device_ids[i];
- gpu_info->secondary_gpus.push_back(secondary_device);
- }
-}
-
-void GetGpuInfoFromCommandLine(const base::CommandLine& command_line,
- GPUInfo* gpu_info) {
- DCHECK(gpu_info);
-
- if (!command_line.HasSwitch(switches::kGpuVendorID) ||
- !command_line.HasSwitch(switches::kGpuDeviceID) ||
- !command_line.HasSwitch(switches::kGpuDriverVersion))
- return;
- bool success = base::HexStringToUInt(
- command_line.GetSwitchValueASCII(switches::kGpuVendorID),
- &gpu_info->gpu.vendor_id);
- DCHECK(success);
- success = base::HexStringToUInt(
- command_line.GetSwitchValueASCII(switches::kGpuDeviceID),
- &gpu_info->gpu.device_id);
- DCHECK(success);
- gpu_info->driver_vendor =
- command_line.GetSwitchValueASCII(switches::kGpuDriverVendor);
- gpu_info->driver_version =
- command_line.GetSwitchValueASCII(switches::kGpuDriverVersion);
- gpu_info->driver_date =
- command_line.GetSwitchValueASCII(switches::kGpuDriverDate);
- gpu::ParseSecondaryGpuDevicesFromCommandLine(command_line, gpu_info);
-
- // Set active gpu device.
- if (command_line.HasSwitch(switches::kGpuActiveVendorID) &&
- command_line.HasSwitch(switches::kGpuActiveDeviceID)) {
- uint32_t active_vendor_id = 0;
- uint32_t active_device_id = 0;
- success = base::HexStringToUInt(
- command_line.GetSwitchValueASCII(switches::kGpuActiveVendorID),
- &active_vendor_id);
- DCHECK(success);
- success = base::HexStringToUInt(
- command_line.GetSwitchValueASCII(switches::kGpuActiveDeviceID),
- &active_device_id);
- DCHECK(success);
- if (gpu_info->gpu.vendor_id == active_vendor_id &&
- gpu_info->gpu.device_id == active_device_id) {
- gpu_info->gpu.active = true;
- } else {
- for (size_t i = 0; i < gpu_info->secondary_gpus.size(); ++i) {
- if (gpu_info->secondary_gpus[i].vendor_id == active_vendor_id &&
- gpu_info->secondary_gpus[i].device_id == active_device_id) {
- gpu_info->secondary_gpus[i].active = true;
- break;
- }
- }
- }
- }
-
- if (command_line.HasSwitch(switches::kAMDSwitchable)) {
- gpu_info->amd_switchable = true;
- }
-}
-
GpuFeatureInfo ComputeGpuFeatureInfoWithHardwareAccelerationDisabled() {
GpuFeatureInfo gpu_feature_info;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS] =
@@ -298,6 +202,34 @@ GpuFeatureInfo ComputeGpuFeatureInfoWithHardwareAccelerationDisabled() {
return gpu_feature_info;
}
+GpuFeatureInfo ComputeGpuFeatureInfoWithNoGpuProcess() {
+ GpuFeatureInfo gpu_feature_info;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS] =
+ kGpuFeatureStatusSoftware;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_COMPOSITING] =
+ kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] =
+ kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH3D] =
+ kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D] =
+ kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE] =
+ kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE] =
+ kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] =
+ kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL2] =
+ kGpuFeatureStatusDisabled;
+#if DCHECK_IS_ON()
+ for (int ii = 0; ii < NUMBER_OF_GPU_FEATURE_TYPES; ++ii) {
+ DCHECK_NE(kGpuFeatureStatusUndefined, gpu_feature_info.status_values[ii]);
+ }
+#endif
+ return gpu_feature_info;
+}
+
GpuFeatureInfo ComputeGpuFeatureInfoForSwiftShader() {
GpuFeatureInfo gpu_feature_info;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS] =
@@ -330,7 +262,9 @@ GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info,
bool ignore_gpu_blacklist,
bool disable_gpu_driver_bug_workarounds,
bool log_gpu_control_list_decisions,
- base::CommandLine* command_line) {
+ base::CommandLine* command_line,
+ bool* needs_more_info) {
+ DCHECK(!needs_more_info || !(*needs_more_info));
bool use_swift_shader = false;
bool use_swift_shader_for_webgl = false;
if (command_line->HasSwitch(switches::kUseGL)) {
@@ -345,7 +279,8 @@ GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info,
GpuFeatureInfo gpu_feature_info;
std::set<int> blacklisted_features;
- if (!ignore_gpu_blacklist) {
+ if (!ignore_gpu_blacklist &&
+ !command_line->HasSwitch(switches::kUseGpuInTests)) {
std::unique_ptr<GpuBlacklist> list(GpuBlacklist::Create());
if (log_gpu_control_list_decisions)
list->EnableControlListLogging("gpu_blacklist");
@@ -359,6 +294,9 @@ GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info,
blacklisted_features = list->MakeDecision(
GpuControlList::kOsAny, std::string(), gpu_info, target_test_group);
gpu_feature_info.applied_gpu_blacklist_entries = list->GetActiveEntries();
+ if (needs_more_info) {
+ *needs_more_info = list->needs_more_info();
+ }
}
gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] =
@@ -499,4 +437,48 @@ bool PopGpuFeatureInfoCache(GpuFeatureInfo* gpu_feature_info) {
return true;
}
+#if defined(OS_ANDROID)
+bool InitializeGLThreadSafe(base::CommandLine* command_line,
+ bool ignore_gpu_blacklist,
+ bool disable_gpu_driver_bug_workarounds,
+ bool log_gpu_control_list_decisions,
+ GPUInfo* out_gpu_info,
+ GpuFeatureInfo* out_gpu_feature_info) {
+ static base::NoDestructor<base::Lock> gl_bindings_initialization_lock;
+ base::AutoLock auto_lock(*gl_bindings_initialization_lock);
+ DCHECK(command_line);
+ DCHECK(out_gpu_info && out_gpu_feature_info);
+ bool gpu_info_cached = PopGPUInfoCache(out_gpu_info);
+ bool gpu_feature_info_cached = PopGpuFeatureInfoCache(out_gpu_feature_info);
+ DCHECK_EQ(gpu_info_cached, gpu_feature_info_cached);
+ if (gpu_info_cached) {
+ // GL bindings have already been initialized in another thread.
+ DCHECK_NE(gl::kGLImplementationNone, gl::GetGLImplementation());
+ return true;
+ }
+ if (gl::GetGLImplementation() == gl::kGLImplementationNone) {
+ // Some tests initialize bindings by themselves.
+ if (!gl::init::InitializeGLNoExtensionsOneOff()) {
+ VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed";
+ return false;
+ }
+ }
+ CollectContextGraphicsInfo(out_gpu_info);
+ *out_gpu_feature_info = ComputeGpuFeatureInfo(
+ *out_gpu_info, ignore_gpu_blacklist, disable_gpu_driver_bug_workarounds,
+ log_gpu_control_list_decisions, command_line, nullptr);
+ if (!out_gpu_feature_info->disabled_extensions.empty()) {
+ gl::init::SetDisabledExtensionsPlatform(
+ out_gpu_feature_info->disabled_extensions);
+ }
+ if (!gl::init::InitializeExtensionSettingsOneOffPlatform()) {
+ VLOG(1) << "gl::init::InitializeExtensionSettingsOneOffPlatform failed";
+ return false;
+ }
+ CacheGPUInfo(*out_gpu_info);
+ CacheGpuFeatureInfo(*out_gpu_feature_info);
+ return true;
+}
+#endif // OS_ANDROID
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_util.h b/chromium/gpu/config/gpu_util.h
index b4ac30f127f..dfcc3074ed6 100644
--- a/chromium/gpu/config/gpu_util.h
+++ b/chromium/gpu/config/gpu_util.h
@@ -17,22 +17,13 @@ namespace gpu {
struct GPUInfo;
-// With provided command line, fill gpu_info->secondary_gpus with parsed
-// secondary vendor and device ids.
-GPU_EXPORT void ParseSecondaryGpuDevicesFromCommandLine(
- const base::CommandLine& command_line,
- GPUInfo* gpu_info);
-
-// Command line contains basic GPU info collected at browser startup time in
-// GpuDataManagerImplPrivate::Initialize().
-// TODO(zmo): Obsolete this.
-GPU_EXPORT void GetGpuInfoFromCommandLine(const base::CommandLine& command_line,
- GPUInfo* gpu_info);
-
// Set GPU feature status if hardware acceleration is disabled.
GPU_EXPORT GpuFeatureInfo
ComputeGpuFeatureInfoWithHardwareAccelerationDisabled();
+// Set GPU feature status if GPU process is blocked.
+GPU_EXPORT GpuFeatureInfo ComputeGpuFeatureInfoWithNoGpuProcess();
+
// Set GPU feature status for SwiftShader.
GPU_EXPORT GpuFeatureInfo ComputeGpuFeatureInfoForSwiftShader();
@@ -45,7 +36,8 @@ ComputeGpuFeatureInfo(const GPUInfo& gpu_info,
bool ignore_gpu_blacklist,
bool disable_gpu_driver_bug_workarounds,
bool log_gpu_control_list_decisions,
- base::CommandLine* command_line);
+ base::CommandLine* command_line,
+ bool* needs_more_info);
GPU_EXPORT void SetKeysForCrashLogging(const GPUInfo& gpu_info);
@@ -63,6 +55,19 @@ GPU_EXPORT void CacheGpuFeatureInfo(const GpuFeatureInfo& gpu_feature_info);
// return true; otherwise, return false;
GPU_EXPORT bool PopGpuFeatureInfoCache(GpuFeatureInfo* gpu_feature_info);
+#if defined(OS_ANDROID)
+// Check if GL bindings are initialized. If not, initializes GL
+// bindings, create a GL context, collects GPUInfo, make blacklist and
+// GPU driver bug workaround decisions. This is intended to be called
+// by Android WebView render thread and in-process GPU thread.
+GPU_EXPORT bool InitializeGLThreadSafe(base::CommandLine* command_line,
+ bool ignore_gpu_blacklist,
+ bool disable_gpu_driver_bug_workarounds,
+ bool log_gpu_control_list_decisions,
+ GPUInfo* out_gpu_info,
+ GpuFeatureInfo* out_gpu_feature_info);
+#endif // OS_ANDROID
+
} // namespace gpu
#endif // GPU_CONFIG_GPU_UTIL_H_
diff --git a/chromium/gpu/config/gpu_util_unittest.cc b/chromium/gpu/config/gpu_util_unittest.cc
index 17b3af1e96b..fae90ab68d7 100644
--- a/chromium/gpu/config/gpu_util_unittest.cc
+++ b/chromium/gpu/config/gpu_util_unittest.cc
@@ -5,7 +5,6 @@
#include "gpu/config/gpu_util.h"
#include "base/command_line.h"
-#include "base/strings/stringprintf.h"
#include "gpu/config/gpu_driver_bug_workaround_type.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_switches.h"
@@ -13,80 +12,12 @@
namespace gpu {
-TEST(GpuUtilTest, ParseSecondaryGpuDevicesFromCommandLine_Simple) {
- base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
- command_line.AppendSwitchASCII(switches::kGpuSecondaryVendorIDs, "0x10de");
- command_line.AppendSwitchASCII(switches::kGpuSecondaryDeviceIDs, "0x0de1");
-
- GPUInfo gpu_info;
- ParseSecondaryGpuDevicesFromCommandLine(command_line, &gpu_info);
-
- EXPECT_EQ(gpu_info.secondary_gpus.size(), 1ul);
- EXPECT_EQ(gpu_info.secondary_gpus[0].vendor_id, 0x10deul);
- EXPECT_EQ(gpu_info.secondary_gpus[0].device_id, 0x0de1ul);
-}
-
-TEST(GpuUtilTest, ParseSecondaryGpuDevicesFromCommandLine_Multiple) {
- std::vector<std::pair<uint32_t, uint32_t>> gpu_devices = {
- {0x10de, 0x0de1}, {0x1002, 0x6779}, {0x8086, 0x040a}};
-
- base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
- command_line.AppendSwitchASCII(switches::kGpuSecondaryVendorIDs,
- "0x10de;0x1002;0x8086");
- command_line.AppendSwitchASCII(switches::kGpuSecondaryDeviceIDs,
- "0x0de1;0x6779;0x040a");
-
- GPUInfo gpu_info;
- ParseSecondaryGpuDevicesFromCommandLine(command_line, &gpu_info);
- EXPECT_EQ(gpu_info.secondary_gpus.size(), 3ul);
- EXPECT_EQ(gpu_info.secondary_gpus.size(), gpu_devices.size());
-
- for (size_t i = 0; i < gpu_info.secondary_gpus.size(); ++i) {
- EXPECT_EQ(gpu_info.secondary_gpus[i].vendor_id, gpu_devices[i].first);
- EXPECT_EQ(gpu_info.secondary_gpus[i].device_id, gpu_devices[i].second);
- }
-}
-
-TEST(GpuUtilTest, ParseSecondaryGpuDevicesFromCommandLine_Generated) {
- base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
- std::vector<std::pair<uint32_t, uint32_t>> gpu_devices;
-
- std::string vendor_ids_str;
- std::string device_ids_str;
- for (uint32_t i = 0x80000000; i > 1; i >>= 1) {
- gpu_devices.push_back(std::pair<uint32_t, uint32_t>(i, i + 1));
-
- if (!vendor_ids_str.empty())
- vendor_ids_str += ";";
- if (!device_ids_str.empty())
- device_ids_str += ";";
- vendor_ids_str += base::StringPrintf("0x%04x", i);
- device_ids_str += base::StringPrintf("0x%04x", i + 1);
- }
-
- command_line.AppendSwitchASCII(switches::kGpuSecondaryVendorIDs,
- vendor_ids_str);
- command_line.AppendSwitchASCII(switches::kGpuSecondaryDeviceIDs,
- device_ids_str);
-
- GPUInfo gpu_info;
- ParseSecondaryGpuDevicesFromCommandLine(command_line, &gpu_info);
-
- EXPECT_EQ(gpu_devices.size(), 31ul);
- EXPECT_EQ(gpu_info.secondary_gpus.size(), gpu_devices.size());
-
- for (size_t i = 0; i < gpu_info.secondary_gpus.size(); ++i) {
- EXPECT_EQ(gpu_info.secondary_gpus[i].vendor_id, gpu_devices[i].first);
- EXPECT_EQ(gpu_info.secondary_gpus[i].device_id, gpu_devices[i].second);
- }
-}
-
TEST(GpuUtilTest, GetGpuFeatureInfo_WorkaroundFromCommandLine) {
{
base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
GPUInfo gpu_info;
- GpuFeatureInfo gpu_feature_info =
- ComputeGpuFeatureInfo(gpu_info, false, false, false, &command_line);
+ GpuFeatureInfo gpu_feature_info = ComputeGpuFeatureInfo(
+ gpu_info, false, false, false, &command_line, nullptr);
EXPECT_FALSE(gpu_feature_info.IsWorkaroundEnabled(
USE_GPU_DRIVER_WORKAROUND_FOR_TESTING));
}
@@ -97,8 +28,8 @@ TEST(GpuUtilTest, GetGpuFeatureInfo_WorkaroundFromCommandLine) {
USE_GPU_DRIVER_WORKAROUND_FOR_TESTING),
"1");
GPUInfo gpu_info;
- GpuFeatureInfo gpu_feature_info =
- ComputeGpuFeatureInfo(gpu_info, false, false, false, &command_line);
+ GpuFeatureInfo gpu_feature_info = ComputeGpuFeatureInfo(
+ gpu_info, false, false, false, &command_line, nullptr);
EXPECT_TRUE(gpu_feature_info.IsWorkaroundEnabled(
USE_GPU_DRIVER_WORKAROUND_FOR_TESTING));
}
@@ -108,8 +39,8 @@ TEST(GpuUtilTest, GetGpuFeatureInfo_WorkaroundFromCommandLine) {
command_line.AppendSwitchASCII(switches::kGpuDriverBugListTestGroup, "1");
// See gpu/config/gpu_driver_bug_list.json, test_group 1, entry 215.
GPUInfo gpu_info;
- GpuFeatureInfo gpu_feature_info =
- ComputeGpuFeatureInfo(gpu_info, false, false, false, &command_line);
+ GpuFeatureInfo gpu_feature_info = ComputeGpuFeatureInfo(
+ gpu_info, false, false, false, &command_line, nullptr);
EXPECT_TRUE(gpu_feature_info.IsWorkaroundEnabled(
USE_GPU_DRIVER_WORKAROUND_FOR_TESTING));
}
@@ -122,8 +53,8 @@ TEST(GpuUtilTest, GetGpuFeatureInfo_WorkaroundFromCommandLine) {
"0");
// See gpu/config/gpu_driver_bug_list.json, test_group 1, entry 215.
GPUInfo gpu_info;
- GpuFeatureInfo gpu_feature_info =
- ComputeGpuFeatureInfo(gpu_info, false, false, false, &command_line);
+ GpuFeatureInfo gpu_feature_info = ComputeGpuFeatureInfo(
+ gpu_info, false, false, false, &command_line, nullptr);
EXPECT_FALSE(gpu_feature_info.IsWorkaroundEnabled(
USE_GPU_DRIVER_WORKAROUND_FOR_TESTING));
}
diff --git a/chromium/gpu/gles2_conform_support/egl/config.cc b/chromium/gpu/gles2_conform_support/egl/config.cc
index 5c4767e5198..72baeff68c4 100644
--- a/chromium/gpu/gles2_conform_support/egl/config.cc
+++ b/chromium/gpu/gles2_conform_support/egl/config.cc
@@ -56,6 +56,7 @@ bool Config::Matches(const EGLint* attrib_list) const {
(requested_surface_type & surface_type_) !=
requested_surface_type)
return false;
+ break;
}
default:
break;
diff --git a/chromium/gpu/gles2_conform_support/egl/context.cc b/chromium/gpu/gles2_conform_support/egl/context.cc
index fae825b1bd3..cdd99a07b11 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.cc
+++ b/chromium/gpu/gles2_conform_support/egl/context.cc
@@ -183,7 +183,7 @@ void Context::DestroyImage(int32_t id) {
NOTIMPLEMENTED();
}
-void Context::SignalQuery(uint32_t query, const base::Closure& callback) {
+void Context::SignalQuery(uint32_t query, base::OnceClosure callback) {
NOTIMPLEMENTED();
}
@@ -227,7 +227,7 @@ bool Context::IsFenceSyncReleased(uint64_t release) {
}
void Context::SignalSyncToken(const gpu::SyncToken& sync_token,
- const base::Closure& callback) {
+ base::OnceClosure callback) {
NOTIMPLEMENTED();
}
diff --git a/chromium/gpu/gles2_conform_support/egl/context.h b/chromium/gpu/gles2_conform_support/egl/context.h
index d9237ef1f83..518b6ba5ca2 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.h
+++ b/chromium/gpu/gles2_conform_support/egl/context.h
@@ -65,7 +65,7 @@ class Context : public base::RefCountedThreadSafe<Context>,
size_t height,
unsigned internalformat) override;
void DestroyImage(int32_t id) override;
- void SignalQuery(uint32_t query, const base::Closure& callback) override;
+ void SignalQuery(uint32_t query, base::OnceClosure callback) override;
void CreateGpuFence(uint32_t gpu_fence_id, ClientGpuFence source) override;
void GetGpuFence(uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)>
@@ -78,7 +78,7 @@ class Context : public base::RefCountedThreadSafe<Context>,
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
- const base::Closure& callback) override;
+ base::OnceClosure callback) override;
void WaitSyncTokenHint(const gpu::SyncToken& sync_token) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) override;
void SetSnapshotRequested() override;
diff --git a/chromium/gpu/gles2_conform_support/egl/thread_state.cc b/chromium/gpu/gles2_conform_support/egl/thread_state.cc
index 282ccd6d119..4eaaa4f3ba4 100644
--- a/chromium/gpu/gles2_conform_support/egl/thread_state.cc
+++ b/chromium/gpu/gles2_conform_support/egl/thread_state.cc
@@ -78,20 +78,20 @@ egl::ThreadState* ThreadState::Get() {
// Need to call both Init and InitFromArgv, since Windows does not use
// argc, argv in CommandLine::Init(argc, argv).
command_line->InitFromArgv(argv);
+ gl::init::InitializeGLNoExtensionsOneOff();
gpu::GpuFeatureInfo gpu_feature_info;
if (!command_line->HasSwitch(switches::kDisableGpuDriverBugWorkarounds)) {
gpu::GPUInfo gpu_info;
- gpu::CollectBasicGraphicsInfo(&gpu_info);
+ gpu::CollectGraphicsInfoForTesting(&gpu_info);
gpu_feature_info = gpu::ComputeGpuFeatureInfo(
gpu_info,
false, // ignore_gpu_blacklist
false, // disable_gpu_driver_bug_workarounds
false, // log_gpu_control_list_decisions
- command_line);
+ command_line, nullptr);
Context::SetPlatformGpuFeatureInfo(gpu_feature_info);
}
- gl::init::InitializeGLNoExtensionsOneOff();
gl::init::SetDisabledExtensionsPlatform(
gpu_feature_info.disabled_extensions);
gl::init::InitializeExtensionSettingsOneOffPlatform();
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index 8b2929b1ab3..7f60970a94b 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -4,6 +4,7 @@
#include "gpu/ipc/client/command_buffer_proxy_impl.h"
+#include <memory>
#include <utility>
#include <vector>
@@ -11,7 +12,6 @@
#include "base/command_line.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/memory/ptr_util.h"
#include "base/memory/shared_memory.h"
#include "base/optional.h"
#include "base/stl_util.h"
@@ -234,9 +234,9 @@ void CommandBufferProxyImpl::OnSignalAck(uint32_t id,
gpu::error::kLostContext);
return;
}
- base::Closure callback = it->second;
+ base::OnceClosure callback = std::move(it->second);
signal_tasks_.erase(it);
- callback.Run();
+ std::move(callback).Run();
}
CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
@@ -575,7 +575,7 @@ bool CommandBufferProxyImpl::IsFenceSyncReleased(uint64_t release) {
}
void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token,
- const base::Closure& callback) {
+ base::OnceClosure callback) {
CheckLock();
base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
@@ -584,7 +584,7 @@ void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token,
uint32_t signal_id = next_signal_id_++;
Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, sync_token,
signal_id));
- signal_tasks_.insert(std::make_pair(signal_id, callback));
+ signal_tasks_.insert(std::make_pair(signal_id, std::move(callback)));
}
void CommandBufferProxyImpl::WaitSyncTokenHint(
@@ -614,7 +614,7 @@ void CommandBufferProxyImpl::SetSnapshotRequested() {
}
void CommandBufferProxyImpl::SignalQuery(uint32_t query,
- const base::Closure& callback) {
+ base::OnceClosure callback) {
CheckLock();
base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
@@ -630,7 +630,7 @@ void CommandBufferProxyImpl::SignalQuery(uint32_t query,
// called, leading to stalled threads and/or memory leaks.
uint32_t signal_id = next_signal_id_++;
Send(new GpuCommandBufferMsg_SignalQuery(route_id_, query, signal_id));
- signal_tasks_.insert(std::make_pair(signal_id, callback));
+ signal_tasks_.insert(std::make_pair(signal_id, std::move(callback)));
}
void CommandBufferProxyImpl::CreateGpuFence(uint32_t gpu_fence_id,
@@ -667,7 +667,7 @@ void CommandBufferProxyImpl::OnGetGpuFenceHandleComplete(
uint32_t gpu_fence_id,
const gfx::GpuFenceHandle& handle) {
// Always consume the provided handle to avoid leaks on error.
- auto gpu_fence = base::MakeUnique<gfx::GpuFence>(handle);
+ auto gpu_fence = std::make_unique<gfx::GpuFence>(handle);
GetGpuFenceTaskMap::iterator it = get_gpu_fence_tasks_.find(gpu_fence_id);
if (it == get_gpu_fence_tasks_.end()) {
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index afa5eb77e7f..5786d9cbde5 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -116,7 +116,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
size_t height,
unsigned internal_format) override;
void DestroyImage(int32_t id) override;
- void SignalQuery(uint32_t query, const base::Closure& callback) override;
+ void SignalQuery(uint32_t query, base::OnceClosure callback) override;
void CreateGpuFence(uint32_t gpu_fence_id, ClientGpuFence source) override;
void GetGpuFence(uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)>
@@ -130,7 +130,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
- const base::Closure& callback) override;
+ base::OnceClosure callback) override;
void WaitSyncTokenHint(const gpu::SyncToken& sync_token) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) override;
void SetSnapshotRequested() override;
@@ -172,7 +172,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
private:
typedef std::map<int32_t, scoped_refptr<gpu::Buffer>> TransferBufferMap;
- typedef base::hash_map<uint32_t, base::Closure> SignalTaskMap;
+ typedef base::hash_map<uint32_t, base::OnceClosure> SignalTaskMap;
void CheckLock() {
if (lock_) {
diff --git a/chromium/gpu/ipc/client/gpu_context_tests.h b/chromium/gpu/ipc/client/gpu_context_tests.h
index 1118afd0463..b5b40cc4729 100644
--- a/chromium/gpu/ipc/client/gpu_context_tests.h
+++ b/chromium/gpu/ipc/client/gpu_context_tests.h
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef GPU_IPC_CLIENT_GPU_CONTEXT_TESTS_H_
+#define GPU_IPC_CLIENT_GPU_CONTEXT_TESTS_H_
+
// These tests are run twice:
// Once in a gpu test with an in-process command buffer.
// Once in a browsertest with an out-of-process command buffer and gpu-process.
@@ -197,3 +200,5 @@ CONTEXT_TEST_F(GpuFenceTest, BasicGpuFenceTest) {
#endif // defined(OS_ANDROID)
}; // namespace
+
+#endif // GPU_IPC_CLIENT_GPU_CONTEXT_TESTS_H_
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_android_hardware_buffer.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_android_hardware_buffer.cc
index d85a90bf4a6..3242c742e0a 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_android_hardware_buffer.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_android_hardware_buffer.cc
@@ -8,6 +8,7 @@
#include "base/android/android_hardware_buffer_compat.h"
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/memory/shared_memory_handle.h"
@@ -150,7 +151,7 @@ base::Closure GpuMemoryBufferImplAndroidHardwareBuffer::AllocateForTesting(
DCHECK(buffer);
handle->handle =
base::SharedMemoryHandle(buffer, 0, base::UnguessableToken::Create());
- return base::Bind(&base::DoNothing);
+ return base::DoNothing();
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_dxgi.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_dxgi.cc
index 36ee720e877..43796ba66c3 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_dxgi.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_dxgi.cc
@@ -5,6 +5,7 @@
#include <wrl.h>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/command_line.h"
#include "base/memory/ptr_util.h"
#include "gpu/ipc/client/gpu_memory_buffer_impl_dxgi.h"
@@ -85,7 +86,7 @@ base::Closure GpuMemoryBufferImplDXGI::AllocateForTesting(
base::UnguessableToken::Create());
handle->type = gfx::DXGI_SHARED_HANDLE;
handle->id = kBufferId;
- return base::Bind(&base::DoNothing);
+ return base::DoNothing();
}
bool GpuMemoryBufferImplDXGI::Map() {
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc
index 40e592b900b..ae26b9e2f27 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc
@@ -5,6 +5,8 @@
#include "gpu/ipc/client/gpu_memory_buffer_impl_io_surface.h"
#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/debug/dump_without_crashing.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
@@ -14,6 +16,10 @@
namespace gpu {
namespace {
+// The maximum number of times to dump before throttling (to avoid sending
+// thousands of crash dumps).
+const int kMaxCrashDumps = 10;
+
uint32_t LockFlags(gfx::BufferUsage usage) {
switch (usage) {
case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
@@ -30,8 +36,6 @@ uint32_t LockFlags(gfx::BufferUsage usage) {
return 0;
}
-void NoOp() {}
-
} // namespace
GpuMemoryBufferImplIOSurface::GpuMemoryBufferImplIOSurface(
@@ -55,10 +59,22 @@ GpuMemoryBufferImplIOSurface::CreateFromHandle(
gfx::BufferFormat format,
gfx::BufferUsage usage,
const DestructionCallback& callback) {
+ if (!handle.mach_port) {
+ LOG(ERROR) << "Invalid IOSurface mach port returned to client.";
+ return nullptr;
+ }
+
base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
IOSurfaceLookupFromMachPort(handle.mach_port.get()));
- if (!io_surface)
+ if (!io_surface) {
+ LOG(ERROR) << "Failed to open IOSurface via mach port returned to client.";
+ static int dump_counter = kMaxCrashDumps;
+ if (dump_counter) {
+ dump_counter -= 1;
+ base::debug::DumpWithoutCrashing();
+ }
return nullptr;
+ }
return base::WrapUnique(
new GpuMemoryBufferImplIOSurface(handle.id, size, format, callback,
@@ -85,7 +101,7 @@ base::Closure GpuMemoryBufferImplIOSurface::AllocateForTesting(
handle->type = gfx::IO_SURFACE_BUFFER;
handle->id = kBufferId;
handle->mach_port.reset(IOSurfaceCreateMachPort(io_surface));
- return base::Bind(&NoOp);
+ return base::DoNothing();
}
bool GpuMemoryBufferImplIOSurface::Map() {
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
index 07506ca0553..2adc835aec7 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
@@ -8,6 +8,7 @@
#include <utility>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/format_macros.h"
#include "base/memory/ptr_util.h"
#include "base/numerics/safe_math.h"
@@ -149,6 +150,7 @@ bool GpuMemoryBufferImplSharedMemory::IsSizeValidForFormat(
case gfx::BufferFormat::BGRA_8888:
case gfx::BufferFormat::BGRX_8888:
case gfx::BufferFormat::BGRX_1010102:
+ case gfx::BufferFormat::RGBX_1010102:
case gfx::BufferFormat::RGBA_F16:
return true;
case gfx::BufferFormat::YVU_420:
@@ -176,7 +178,7 @@ base::Closure GpuMemoryBufferImplSharedMemory::AllocateForTesting(
gfx::BufferUsage usage,
gfx::GpuMemoryBufferHandle* handle) {
*handle = CreateGpuMemoryBuffer(handle->id, size, format, usage);
- return base::Bind(&base::DoNothing);
+ return base::DoNothing();
}
bool GpuMemoryBufferImplSharedMemory::Map() {
diff --git a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
index 237dbd38f90..ea6796fff98 100644
--- a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
+++ b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
@@ -119,6 +119,8 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::Capabilities)
IPC_STRUCT_TRAITS_MEMBER(image_ycbcr_422)
IPC_STRUCT_TRAITS_MEMBER(image_ycbcr_420v)
IPC_STRUCT_TRAITS_MEMBER(image_ycbcr_420v_disabled_for_video_frames)
+ IPC_STRUCT_TRAITS_MEMBER(image_xr30)
+ IPC_STRUCT_TRAITS_MEMBER(image_xb30)
IPC_STRUCT_TRAITS_MEMBER(render_buffer_format_bgra8888)
IPC_STRUCT_TRAITS_MEMBER(occlusion_query)
IPC_STRUCT_TRAITS_MEMBER(occlusion_query_boolean)
@@ -138,6 +140,7 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::Capabilities)
IPC_STRUCT_TRAITS_MEMBER(texture_storage_image)
IPC_STRUCT_TRAITS_MEMBER(supports_oop_raster)
IPC_STRUCT_TRAITS_MEMBER(chromium_gpu_fence)
+ IPC_STRUCT_TRAITS_MEMBER(unpremultiply_and_dither_copy)
IPC_STRUCT_TRAITS_MEMBER(major_version)
IPC_STRUCT_TRAITS_MEMBER(minor_version)
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index bd27a69a558..5d9d4033a36 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -19,14 +19,6 @@ struct GpuDevice {
string device_string;
};
-// gpu::CollectInfoResult
-enum CollectInfoResult {
- kCollectInfoNone = 0,
- kCollectInfoSuccess = 1,
- kCollectInfoNonFatalFailure = 2,
- kCollectInfoFatalFailure = 3
-};
-
// gpu::VideoCodecProfile
enum VideoCodecProfile {
VIDEO_CODEC_PROFILE_UNKNOWN = -1,
@@ -105,16 +97,13 @@ struct GpuInfo {
bool software_rendering;
bool direct_rendering;
bool sandboxed;
- int32 process_crash_count;
bool in_process_gpu;
bool passthrough_cmd_decoder;
bool direct_composition;
bool supports_overlays;
bool can_support_threaded_texture_mailbox;
- CollectInfoResult basic_info_state;
- CollectInfoResult context_info_state;
- CollectInfoResult dx_diagnostics_info_state;
- DxDiagNode? dx_diagnostics;
+ [EnableIf=is_win]
+ DxDiagNode dx_diagnostics;
VideoDecodeAcceleratorCapabilities video_decode_accelerator_capabilities;
array<VideoEncodeAcceleratorSupportedProfile>
video_encode_accelerator_supported_profiles;
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
index 4ea3e562209..c97c165ab6c 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
@@ -20,46 +20,6 @@ bool StructTraits<gpu::mojom::GpuDeviceDataView, gpu::GPUInfo::GPUDevice>::Read(
}
// static
-gpu::mojom::CollectInfoResult
-EnumTraits<gpu::mojom::CollectInfoResult, gpu::CollectInfoResult>::ToMojom(
- gpu::CollectInfoResult collect_info_result) {
- switch (collect_info_result) {
- case gpu::CollectInfoResult::kCollectInfoNone:
- return gpu::mojom::CollectInfoResult::kCollectInfoNone;
- case gpu::CollectInfoResult::kCollectInfoSuccess:
- return gpu::mojom::CollectInfoResult::kCollectInfoSuccess;
- case gpu::CollectInfoResult::kCollectInfoNonFatalFailure:
- return gpu::mojom::CollectInfoResult::kCollectInfoNonFatalFailure;
- case gpu::CollectInfoResult::kCollectInfoFatalFailure:
- return gpu::mojom::CollectInfoResult::kCollectInfoFatalFailure;
- }
- NOTREACHED() << "Invalid CollectInfoResult value:" << collect_info_result;
- return gpu::mojom::CollectInfoResult::kCollectInfoNone;
-}
-
-// static
-bool EnumTraits<gpu::mojom::CollectInfoResult, gpu::CollectInfoResult>::
- FromMojom(gpu::mojom::CollectInfoResult input,
- gpu::CollectInfoResult* out) {
- switch (input) {
- case gpu::mojom::CollectInfoResult::kCollectInfoNone:
- *out = gpu::CollectInfoResult::kCollectInfoNone;
- return true;
- case gpu::mojom::CollectInfoResult::kCollectInfoSuccess:
- *out = gpu::CollectInfoResult::kCollectInfoSuccess;
- return true;
- case gpu::mojom::CollectInfoResult::kCollectInfoNonFatalFailure:
- *out = gpu::CollectInfoResult::kCollectInfoNonFatalFailure;
- return true;
- case gpu::mojom::CollectInfoResult::kCollectInfoFatalFailure:
- *out = gpu::CollectInfoResult::kCollectInfoFatalFailure;
- return true;
- }
- NOTREACHED() << "Invalid CollectInfoResult value:" << input;
- return false;
-}
-
-// static
gpu::mojom::VideoCodecProfile
EnumTraits<gpu::mojom::VideoCodecProfile, gpu::VideoCodecProfile>::ToMojom(
gpu::VideoCodecProfile video_codec_profile) {
@@ -258,7 +218,6 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
out->supports_overlays = data.supports_overlays();
out->can_support_threaded_texture_mailbox =
data.can_support_threaded_texture_mailbox();
- out->process_crash_count = data.process_crash_count();
out->jpeg_decode_accelerator_supported =
data.jpeg_decode_accelerator_supported();
@@ -285,10 +244,7 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
data.ReadGlWsVendor(&out->gl_ws_vendor) &&
data.ReadGlWsVersion(&out->gl_ws_version) &&
data.ReadGlWsExtensions(&out->gl_ws_extensions) &&
- data.ReadBasicInfoState(&out->basic_info_state) &&
- data.ReadContextInfoState(&out->context_info_state) &&
#if defined(OS_WIN)
- data.ReadDxDiagnosticsInfoState(&out->dx_diagnostics_info_state) &&
data.ReadDxDiagnostics(&out->dx_diagnostics) &&
#endif
data.ReadVideoDecodeAcceleratorCapabilities(
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
index e928acb18b8..28d52e983b1 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
@@ -43,15 +43,6 @@ struct StructTraits<gpu::mojom::GpuDeviceDataView, gpu::GPUInfo::GPUDevice> {
};
template <>
-struct EnumTraits<gpu::mojom::CollectInfoResult, gpu::CollectInfoResult> {
- static gpu::mojom::CollectInfoResult ToMojom(
- gpu::CollectInfoResult collect_info_result);
-
- static bool FromMojom(gpu::mojom::CollectInfoResult input,
- gpu::CollectInfoResult* out);
-};
-
-template <>
struct EnumTraits<gpu::mojom::VideoCodecProfile, gpu::VideoCodecProfile> {
static gpu::mojom::VideoCodecProfile ToMojom(
gpu::VideoCodecProfile video_codec_profile);
@@ -228,10 +219,6 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
static bool sandboxed(const gpu::GPUInfo& input) { return input.sandboxed; }
- static int process_crash_count(const gpu::GPUInfo& input) {
- return input.process_crash_count;
- }
-
static bool in_process_gpu(const gpu::GPUInfo& input) {
return input.in_process_gpu;
}
@@ -252,33 +239,11 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
return input.can_support_threaded_texture_mailbox;
}
- static gpu::CollectInfoResult basic_info_state(const gpu::GPUInfo& input) {
- return input.basic_info_state;
- }
-
- static gpu::CollectInfoResult context_info_state(const gpu::GPUInfo& input) {
- return input.context_info_state;
- }
#if defined(OS_WIN)
static const gpu::DxDiagNode& dx_diagnostics(const gpu::GPUInfo& input) {
return input.dx_diagnostics;
}
-#else
- static const base::Optional<gpu::DxDiagNode>& dx_diagnostics(
- const gpu::GPUInfo& input) {
- static const base::Optional<gpu::DxDiagNode> dx_diag_node(base::nullopt);
- return dx_diag_node;
- }
-#endif
-
- static gpu::CollectInfoResult dx_diagnostics_info_state(
- const gpu::GPUInfo& input) {
-#if defined(OS_WIN)
- return input.dx_diagnostics_info_state;
-#else
- return gpu::CollectInfoResult::kCollectInfoNone;
#endif
- }
static const gpu::VideoDecodeAcceleratorCapabilities&
video_decode_accelerator_capabilities(const gpu::GPUInfo& input) {
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
index 6d4a1ec5fe6..18bb4fd156b 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
@@ -40,22 +40,15 @@ bool IsNativeGpuMemoryBufferConfigurationSupported(gfx::BufferFormat format,
switch (usage) {
case gfx::BufferUsage::GPU_READ:
case gfx::BufferUsage::SCANOUT:
+ case gfx::BufferUsage::SCANOUT_CPU_READ_WRITE:
+ case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
+ case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT:
return format == gfx::BufferFormat::BGRA_8888 ||
format == gfx::BufferFormat::RGBA_8888 ||
format == gfx::BufferFormat::BGRX_8888 ||
format == gfx::BufferFormat::R_8 ||
format == gfx::BufferFormat::RGBA_F16 ||
- format == gfx::BufferFormat::UYVY_422 ||
- format == gfx::BufferFormat::YUV_420_BIPLANAR;
- case gfx::BufferUsage::SCANOUT_CPU_READ_WRITE:
- return format == gfx::BufferFormat::BGRA_8888 ||
- format == gfx::BufferFormat::RGBA_8888 ||
- format == gfx::BufferFormat::BGRX_8888;
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
- case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT:
- return format == gfx::BufferFormat::R_8 ||
- format == gfx::BufferFormat::BGRA_8888 ||
- format == gfx::BufferFormat::RGBA_F16 ||
+ format == gfx::BufferFormat::BGRX_1010102 ||
format == gfx::BufferFormat::UYVY_422 ||
format == gfx::BufferFormat::YUV_420_BIPLANAR;
case gfx::BufferUsage::SCANOUT_VDA_WRITE:
diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom
index 13fa84087eb..c0cd8762587 100644
--- a/chromium/gpu/ipc/common/gpu_preferences.mojom
+++ b/chromium/gpu/ipc/common/gpu_preferences.mojom
@@ -25,8 +25,6 @@ struct GpuPreferences {
bool disable_gpu_watchdog;
bool gpu_sandbox_start_early;
- bool disable_web_rtc_hw_encoding;
-
// TODO(http://crbug.com/676224) Support preprocessing of mojoms. Following
// variables should be used on Windows only.
VpxDecodeVendors enable_accelerated_vpx_decode;
diff --git a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
index f9245b9bfb1..cc43de1a729 100644
--- a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
@@ -63,7 +63,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->gpu_startup_dialog = prefs.gpu_startup_dialog();
out->disable_gpu_watchdog = prefs.disable_gpu_watchdog();
out->gpu_sandbox_start_early = prefs.gpu_sandbox_start_early();
- out->disable_web_rtc_hw_encoding = prefs.disable_web_rtc_hw_encoding();
if (!prefs.ReadEnableAcceleratedVpxDecode(
&out->enable_accelerated_vpx_decode))
return false;
@@ -142,10 +141,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
return prefs.gpu_sandbox_start_early;
}
- static bool disable_web_rtc_hw_encoding(const gpu::GpuPreferences& prefs) {
- return prefs.disable_web_rtc_hw_encoding;
- }
-
static gpu::GpuPreferences::VpxDecodeVendors enable_accelerated_vpx_decode(
const gpu::GpuPreferences& prefs) {
return prefs.enable_accelerated_vpx_decode;
diff --git a/chromium/gpu/ipc/common/gpu_preferences_util_unittest.cc b/chromium/gpu/ipc/common/gpu_preferences_util_unittest.cc
index 89e6819b7b2..268d76b7d67 100644
--- a/chromium/gpu/ipc/common/gpu_preferences_util_unittest.cc
+++ b/chromium/gpu/ipc/common/gpu_preferences_util_unittest.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
#include <cstring>
#include "build/build_config.h"
@@ -13,46 +14,79 @@ namespace gpu {
namespace {
-class ScopedGpuPreferences {
- public:
- ScopedGpuPreferences() {
- // To make sure paddings are zeroed so we can use memcmp() in the tests.
- memset(buffer_, 0, sizeof(buffer_));
- prefs_ = new (buffer_) GpuPreferences();
- }
-
- ~ScopedGpuPreferences() { prefs_->~GpuPreferences(); }
-
- GpuPreferences& Ref() { return *prefs_; }
-
- private:
- GpuPreferences* prefs_;
- alignas(GpuPreferences) char buffer_[sizeof(GpuPreferences)];
-};
+void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
+ EXPECT_EQ(left.single_process, right.single_process);
+ EXPECT_EQ(left.in_process_gpu, right.in_process_gpu);
+ EXPECT_EQ(left.disable_accelerated_video_decode,
+ right.disable_accelerated_video_decode);
+ EXPECT_EQ(left.disable_accelerated_video_encode,
+ right.disable_accelerated_video_encode);
+ EXPECT_EQ(left.gpu_startup_dialog, right.gpu_startup_dialog);
+ EXPECT_EQ(left.disable_gpu_watchdog, right.disable_gpu_watchdog);
+ EXPECT_EQ(left.gpu_sandbox_start_early, right.gpu_sandbox_start_early);
+ EXPECT_EQ(left.enable_accelerated_vpx_decode,
+ right.enable_accelerated_vpx_decode);
+ EXPECT_EQ(left.enable_low_latency_dxva, right.enable_low_latency_dxva);
+ EXPECT_EQ(left.enable_zero_copy_dxgi_video,
+ right.enable_zero_copy_dxgi_video);
+ EXPECT_EQ(left.enable_nv12_dxgi_video, right.enable_nv12_dxgi_video);
+ EXPECT_EQ(left.enable_media_foundation_vea_on_windows7,
+ right.enable_media_foundation_vea_on_windows7);
+ EXPECT_EQ(left.disable_software_rasterizer,
+ right.disable_software_rasterizer);
+ EXPECT_EQ(left.log_gpu_control_list_decisions,
+ right.log_gpu_control_list_decisions);
+ EXPECT_EQ(left.compile_shader_always_succeeds,
+ right.compile_shader_always_succeeds);
+ EXPECT_EQ(left.disable_gl_error_limit, right.disable_gl_error_limit);
+ EXPECT_EQ(left.disable_glsl_translator, right.disable_glsl_translator);
+ EXPECT_EQ(left.disable_shader_name_hashing,
+ right.disable_shader_name_hashing);
+ EXPECT_EQ(left.enable_gpu_command_logging, right.enable_gpu_command_logging);
+ EXPECT_EQ(left.enable_gpu_debugging, right.enable_gpu_debugging);
+ EXPECT_EQ(left.enable_gpu_service_logging_gpu,
+ right.enable_gpu_service_logging_gpu);
+ EXPECT_EQ(left.enable_gpu_driver_debug_logging,
+ right.enable_gpu_driver_debug_logging);
+ EXPECT_EQ(left.disable_gpu_program_cache, right.disable_gpu_program_cache);
+ EXPECT_EQ(left.enforce_gl_minimums, right.enforce_gl_minimums);
+ EXPECT_EQ(left.force_gpu_mem_available, right.force_gpu_mem_available);
+ EXPECT_EQ(left.gpu_program_cache_size, right.gpu_program_cache_size);
+ EXPECT_EQ(left.disable_gpu_shader_disk_cache,
+ right.disable_gpu_shader_disk_cache);
+ EXPECT_EQ(left.enable_threaded_texture_mailboxes,
+ right.enable_threaded_texture_mailboxes);
+ EXPECT_EQ(left.gl_shader_interm_output, right.gl_shader_interm_output);
+ EXPECT_EQ(left.emulate_shader_precision, right.emulate_shader_precision);
+ EXPECT_EQ(left.enable_raster_decoder, right.enable_raster_decoder);
+ EXPECT_EQ(left.enable_gpu_service_logging, right.enable_gpu_service_logging);
+ EXPECT_EQ(left.enable_gpu_service_tracing, right.enable_gpu_service_tracing);
+ EXPECT_EQ(left.use_passthrough_cmd_decoder,
+ right.use_passthrough_cmd_decoder);
+ EXPECT_EQ(left.disable_biplanar_gpu_memory_buffers_for_video_frames,
+ right.disable_biplanar_gpu_memory_buffers_for_video_frames);
+ EXPECT_EQ(left.texture_target_exception_list,
+ right.texture_target_exception_list);
+ EXPECT_EQ(left.disable_gpu_driver_bug_workarounds,
+ right.disable_gpu_driver_bug_workarounds);
+ EXPECT_EQ(left.ignore_gpu_blacklist, right.ignore_gpu_blacklist);
+}
} // namespace
-// TODO(https://crbug.com/799458): Fix this test.
-#if defined(OS_WIN)
-#define MAYBE_EncodeDecode DISABLED_EncodeDecode
-#else
-#define MAYBE_EncodeDecode EncodeDecode
-#endif
-TEST(GpuPreferencesUtilTest, MAYBE_EncodeDecode) {
+TEST(GpuPreferencesUtilTest, EncodeDecode) {
{ // Testing default values.
- ScopedGpuPreferences scoped_input_prefs, scoped_decoded_prefs;
- GpuPreferences& input_prefs = scoped_input_prefs.Ref();
- GpuPreferences& decoded_prefs = scoped_decoded_prefs.Ref();
+ GpuPreferences input_prefs;
+ GpuPreferences decoded_prefs;
std::string encoded = GpuPreferencesToSwitchValue(input_prefs);
bool flag = SwitchValueToGpuPreferences(encoded, &decoded_prefs);
EXPECT_TRUE(flag);
- EXPECT_EQ(0, memcmp(&input_prefs, &decoded_prefs, sizeof(input_prefs)));
+ CheckGpuPreferencesEqual(input_prefs, decoded_prefs);
}
{ // Change all fields to non default values.
- ScopedGpuPreferences scoped_input_prefs, scoped_decoded_prefs;
- GpuPreferences& input_prefs = scoped_input_prefs.Ref();
- GpuPreferences& decoded_prefs = scoped_decoded_prefs.Ref();
+ GpuPreferences input_prefs;
+ GpuPreferences decoded_prefs;
GpuPreferences default_prefs;
mojom::GpuPreferences prefs_mojom;
@@ -76,7 +110,6 @@ TEST(GpuPreferencesUtilTest, MAYBE_EncodeDecode) {
GPU_PREFERENCES_FIELD(gpu_startup_dialog, true)
GPU_PREFERENCES_FIELD(disable_gpu_watchdog, true)
GPU_PREFERENCES_FIELD(gpu_sandbox_start_early, true)
- GPU_PREFERENCES_FIELD(disable_web_rtc_hw_encoding, true)
GPU_PREFERENCES_FIELD(enable_accelerated_vpx_decode,
GpuPreferences::VPX_VENDOR_AMD)
GPU_PREFERENCES_FIELD(enable_low_latency_dxva, false)
@@ -102,6 +135,7 @@ TEST(GpuPreferencesUtilTest, MAYBE_EncodeDecode) {
GPU_PREFERENCES_FIELD(enable_threaded_texture_mailboxes, true)
GPU_PREFERENCES_FIELD(gl_shader_interm_output, true)
GPU_PREFERENCES_FIELD(emulate_shader_precision, true)
+ GPU_PREFERENCES_FIELD(enable_raster_decoder, true)
GPU_PREFERENCES_FIELD(enable_gpu_service_logging, true)
GPU_PREFERENCES_FIELD(enable_gpu_service_tracing, true)
GPU_PREFERENCES_FIELD(use_passthrough_cmd_decoder, true)
@@ -110,11 +144,16 @@ TEST(GpuPreferencesUtilTest, MAYBE_EncodeDecode) {
GPU_PREFERENCES_FIELD(disable_gpu_driver_bug_workarounds, true)
GPU_PREFERENCES_FIELD(ignore_gpu_blacklist, true)
+ input_prefs.texture_target_exception_list.emplace_back(
+ gfx::BufferUsage::SCANOUT, gfx::BufferFormat::RGBA_8888);
+ input_prefs.texture_target_exception_list.emplace_back(
+ gfx::BufferUsage::GPU_READ, gfx::BufferFormat::BGRA_8888);
+
// Make sure every field is encoded/decoded.
std::string encoded = GpuPreferencesToSwitchValue(input_prefs);
bool flag = SwitchValueToGpuPreferences(encoded, &decoded_prefs);
EXPECT_TRUE(flag);
- EXPECT_EQ(0, memcmp(&input_prefs, &decoded_prefs, sizeof(input_prefs)));
+ CheckGpuPreferencesEqual(input_prefs, decoded_prefs);
}
}
diff --git a/chromium/gpu/ipc/common/struct_traits_unittest.cc b/chromium/gpu/ipc/common/struct_traits_unittest.cc
index ddf0a7944a4..7c30aebb965 100644
--- a/chromium/gpu/ipc/common/struct_traits_unittest.cc
+++ b/chromium/gpu/ipc/common/struct_traits_unittest.cc
@@ -150,18 +150,11 @@ TEST_F(StructTraitsTest, GpuInfo) {
const bool software_rendering = true;
const bool direct_rendering = true;
const bool sandboxed = true;
- const int process_crash_count = 0xdead;
const bool in_process_gpu = true;
const bool passthrough_cmd_decoder = true;
const bool direct_composition = true;
const bool supports_overlays = true;
- const gpu::CollectInfoResult basic_info_state =
- gpu::CollectInfoResult::kCollectInfoSuccess;
- const gpu::CollectInfoResult context_info_state =
- gpu::CollectInfoResult::kCollectInfoSuccess;
#if defined(OS_WIN)
- const gpu::CollectInfoResult dx_diagnostics_info_state =
- gpu::CollectInfoResult::kCollectInfoSuccess;
const DxDiagNode dx_diagnostics;
#endif
const gpu::VideoDecodeAcceleratorCapabilities
@@ -199,15 +192,11 @@ TEST_F(StructTraitsTest, GpuInfo) {
input.software_rendering = software_rendering;
input.direct_rendering = direct_rendering;
input.sandboxed = sandboxed;
- input.process_crash_count = process_crash_count;
input.in_process_gpu = in_process_gpu;
input.passthrough_cmd_decoder = passthrough_cmd_decoder;
input.direct_composition = direct_composition;
input.supports_overlays = supports_overlays;
- input.basic_info_state = basic_info_state;
- input.context_info_state = context_info_state;
#if defined(OS_WIN)
- input.dx_diagnostics_info_state = dx_diagnostics_info_state;
input.dx_diagnostics = dx_diagnostics;
#endif
input.video_decode_accelerator_capabilities =
@@ -261,15 +250,11 @@ TEST_F(StructTraitsTest, GpuInfo) {
EXPECT_EQ(software_rendering, output.software_rendering);
EXPECT_EQ(direct_rendering, output.direct_rendering);
EXPECT_EQ(sandboxed, output.sandboxed);
- EXPECT_EQ(process_crash_count, output.process_crash_count);
EXPECT_EQ(in_process_gpu, output.in_process_gpu);
EXPECT_EQ(passthrough_cmd_decoder, output.passthrough_cmd_decoder);
EXPECT_EQ(direct_composition, output.direct_composition);
EXPECT_EQ(supports_overlays, output.supports_overlays);
- EXPECT_EQ(basic_info_state, output.basic_info_state);
- EXPECT_EQ(context_info_state, output.context_info_state);
#if defined(OS_WIN)
- EXPECT_EQ(output.dx_diagnostics_info_state, dx_diagnostics_info_state);
EXPECT_EQ(dx_diagnostics.values, output.dx_diagnostics.values);
#endif
EXPECT_EQ(output.video_decode_accelerator_capabilities.flags,
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.cc b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
index 7115c15302d..4fc78253abc 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.cc
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
@@ -10,28 +10,31 @@
namespace gpu {
GpuInProcessThreadService::GpuInProcessThreadService(
+ bool use_virtualized_gl_context,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
gpu::SyncPointManager* sync_point_manager,
gpu::MailboxManager* mailbox_manager,
scoped_refptr<gl::GLShareGroup> share_group,
- const GpuFeatureInfo& gpu_feature_info)
- : gpu::InProcessCommandBuffer::Service(GpuPreferences(),
+ const GpuFeatureInfo& gpu_feature_info,
+ const GpuPreferences& gpu_preferences)
+ : gpu::InProcessCommandBuffer::Service(gpu_preferences,
mailbox_manager,
share_group,
gpu_feature_info),
+ use_virtualized_gl_context_(use_virtualized_gl_context),
task_runner_(task_runner),
sync_point_manager_(sync_point_manager) {}
-void GpuInProcessThreadService::ScheduleTask(const base::Closure& task) {
- task_runner_->PostTask(FROM_HERE, task);
+void GpuInProcessThreadService::ScheduleTask(base::OnceClosure task) {
+ task_runner_->PostTask(FROM_HERE, std::move(task));
}
-void GpuInProcessThreadService::ScheduleDelayedWork(const base::Closure& task) {
- task_runner_->PostDelayedTask(FROM_HERE, task,
+void GpuInProcessThreadService::ScheduleDelayedWork(base::OnceClosure task) {
+ task_runner_->PostDelayedTask(FROM_HERE, std::move(task),
base::TimeDelta::FromMilliseconds(2));
}
bool GpuInProcessThreadService::UseVirtualizedGLContexts() {
- return true;
+ return use_virtualized_gl_context_;
}
gpu::SyncPointManager* GpuInProcessThreadService::sync_point_manager() {
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.h b/chromium/gpu/ipc/gpu_in_process_thread_service.h
index a9bcece67c3..714d9745ab2 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.h
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.h
@@ -21,15 +21,17 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GpuInProcessThreadService
public base::RefCountedThreadSafe<GpuInProcessThreadService> {
public:
GpuInProcessThreadService(
+ bool use_virtualized_gl_context,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
gpu::SyncPointManager* sync_point_manager,
gpu::MailboxManager* mailbox_manager,
scoped_refptr<gl::GLShareGroup> share_group,
- const GpuFeatureInfo& gpu_feature_info);
+ const GpuFeatureInfo& gpu_feature_info,
+ const GpuPreferences& gpu_preferences);
// gpu::InProcessCommandBuffer::Service implementation.
- void ScheduleTask(const base::Closure& task) override;
- void ScheduleDelayedWork(const base::Closure& task) override;
+ void ScheduleTask(base::OnceClosure task) override;
+ void ScheduleDelayedWork(base::OnceClosure task) override;
bool UseVirtualizedGLContexts() override;
gpu::SyncPointManager* sync_point_manager() override;
void AddRef() const override;
@@ -41,6 +43,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GpuInProcessThreadService
~GpuInProcessThreadService() override;
+ const bool use_virtualized_gl_context_;
+
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
gpu::SyncPointManager* sync_point_manager_; // Non-owning.
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
index 78c7c82c458..e0c5420dc7e 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
@@ -40,15 +40,24 @@ GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations() {
defined(OS_ANDROID)
if (AreNativeGpuMemoryBuffersEnabled()) {
const gfx::BufferFormat kNativeFormats[] = {
- gfx::BufferFormat::R_8, gfx::BufferFormat::RG_88,
- gfx::BufferFormat::R_16, gfx::BufferFormat::BGR_565,
- gfx::BufferFormat::RGBA_4444, gfx::BufferFormat::RGBA_8888,
- gfx::BufferFormat::BGRA_8888, gfx::BufferFormat::BGRX_1010102,
- gfx::BufferFormat::RGBA_F16, gfx::BufferFormat::UYVY_422,
- gfx::BufferFormat::YVU_420, gfx::BufferFormat::YUV_420_BIPLANAR};
+ gfx::BufferFormat::R_8,
+ gfx::BufferFormat::RG_88,
+ gfx::BufferFormat::R_16,
+ gfx::BufferFormat::BGR_565,
+ gfx::BufferFormat::RGBA_4444,
+ gfx::BufferFormat::RGBA_8888,
+ gfx::BufferFormat::BGRA_8888,
+ gfx::BufferFormat::BGRX_1010102,
+ gfx::BufferFormat::RGBX_1010102,
+ gfx::BufferFormat::RGBA_F16,
+ gfx::BufferFormat::UYVY_422,
+ gfx::BufferFormat::YVU_420,
+ gfx::BufferFormat::YUV_420_BIPLANAR};
const gfx::BufferUsage kNativeUsages[] = {
- gfx::BufferUsage::GPU_READ, gfx::BufferUsage::SCANOUT,
+ gfx::BufferUsage::GPU_READ,
+ gfx::BufferUsage::SCANOUT,
gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE,
+ gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT};
for (auto format : kNativeFormats) {
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index 229618261e2..b19f9a4fe8e 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -13,6 +13,7 @@
#include "base/atomic_sequence_num.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/containers/queue.h"
#include "base/lazy_instance.h"
@@ -72,11 +73,15 @@ base::AtomicSequenceNumber g_next_command_buffer_id;
base::AtomicSequenceNumber g_next_image_id;
template <typename T>
-static void RunTaskWithResult(base::Callback<T(void)> task,
- T* result,
- base::WaitableEvent* completion) {
- *result = task.Run();
- completion->Signal();
+base::OnceClosure WrapTaskWithResult(base::OnceCallback<T(void)> task,
+ T* result,
+ base::WaitableEvent* completion) {
+ auto wrapper = [](base::OnceCallback<T(void)> task, T* result,
+ base::WaitableEvent* completion) {
+ *result = std::move(task).Run();
+ completion->Signal();
+ };
+ return base::BindOnce(wrapper, std::move(task), result, completion);
}
class GpuInProcessThreadHolder : public base::Thread {
@@ -95,9 +100,10 @@ class GpuInProcessThreadHolder : public base::Thread {
const scoped_refptr<InProcessCommandBuffer::Service>& GetGpuThreadService() {
if (!gpu_thread_service_) {
- gpu_thread_service_ = new GpuInProcessThreadService(
- task_runner(), sync_point_manager_.get(), nullptr, nullptr,
- gpu_feature_info_);
+ gpu_thread_service_ = base::MakeRefCounted<GpuInProcessThreadService>(
+ true /* use_virtualized_gl_context */, task_runner(),
+ sync_point_manager_.get(), nullptr, nullptr, gpu_feature_info_,
+ GpuPreferences());
}
return gpu_thread_service_;
}
@@ -137,6 +143,8 @@ scoped_refptr<InProcessCommandBuffer::Service> GetInitialService(
} // anonyous namespace
+const int InProcessCommandBuffer::kGpuMemoryBufferClientId = 1;
+
InProcessCommandBuffer::Service::Service(
const GpuPreferences& gpu_preferences,
MailboxManager* mailbox_manager,
@@ -270,16 +278,16 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
InitializeOnGpuThreadParams params(is_offscreen, window, attribs,
&capabilities, share_group, image_factory);
- base::Callback<gpu::ContextResult(void)> init_task =
- base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
- base::Unretained(this), params);
+ base::OnceCallback<gpu::ContextResult(void)> init_task =
+ base::BindOnce(&InProcessCommandBuffer::InitializeOnGpuThread,
+ base::Unretained(this), params);
base::WaitableEvent completion(
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
gpu::ContextResult result = gpu::ContextResult::kSuccess;
- QueueTask(true, base::Bind(&RunTaskWithResult<gpu::ContextResult>, init_task,
- &result, &completion));
+ QueueOnceTask(true,
+ WrapTaskWithResult(std::move(init_task), &result, &completion));
completion.Wait();
if (result == gpu::ContextResult::kSuccess)
@@ -314,7 +322,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
service_->framebuffer_completeness_cache(), feature_info,
bind_generates_resource, service_->image_manager(),
nullptr /* image_factory */, nullptr /* progress_reporter */,
- GpuFeatureInfo(), service_->discardable_manager());
+ service_->gpu_feature_info(), service_->discardable_manager());
command_buffer_ = std::make_unique<CommandBufferService>(
this, transfer_buffer_manager_.get());
@@ -352,6 +360,8 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
GetNamespaceID(), GetCommandBufferID(),
sync_point_order_data_->sequence_id());
+ // TODO(crbug.com/811979): Unify logic for using virtualized contexts in
+ // InProcessCommandBuffer and GLES2CommandBufferStub.
use_virtualized_gl_context_ =
service_->UseVirtualizedGLContexts() || decoder_->GetContextGroup()
->feature_info()
@@ -440,10 +450,10 @@ void InProcessCommandBuffer::Destroy() {
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
bool result = false;
- base::Callback<bool(void)> destroy_task = base::Bind(
+ base::OnceCallback<bool(void)> destroy_task = base::BindOnce(
&InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
- QueueTask(true, base::Bind(&RunTaskWithResult<bool>, destroy_task, &result,
- &completion));
+ QueueOnceTask(
+ true, WrapTaskWithResult(std::move(destroy_task), &result, &completion));
completion.Wait();
}
@@ -460,8 +470,14 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
decoder_.reset();
}
command_buffer_.reset();
- context_ = nullptr;
+
+ // Destroy the surface with the context current, some surface destructors make
+ // GL calls.
+ if (context_)
+ context_->MakeCurrent(surface_.get());
surface_ = nullptr;
+
+ context_ = nullptr;
if (sync_point_order_data_) {
sync_point_order_data_->Destroy();
sync_point_order_data_ = nullptr;
@@ -510,10 +526,10 @@ void InProcessCommandBuffer::OnContextLost() {
gpu_control_client_->OnGpuControlLostContext();
}
-void InProcessCommandBuffer::QueueTask(bool out_of_order,
- const base::Closure& task) {
+void InProcessCommandBuffer::QueueOnceTask(bool out_of_order,
+ base::OnceClosure task) {
if (out_of_order) {
- service_->ScheduleTask(task);
+ service_->ScheduleTask(std::move(task));
return;
}
// Release the |task_queue_lock_| before calling ScheduleTask because
@@ -521,7 +537,23 @@ void InProcessCommandBuffer::QueueTask(bool out_of_order,
uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber();
{
base::AutoLock lock(task_queue_lock_);
- task_queue_.push(std::make_unique<GpuTask>(task, order_num));
+ std::unique_ptr<GpuTask> gpu_task =
+ std::make_unique<GpuTask>(std::move(task), order_num);
+ task_queue_.push(std::move(gpu_task));
+ }
+ service_->ScheduleTask(base::BindOnce(
+ &InProcessCommandBuffer::ProcessTasksOnGpuThread, gpu_thread_weak_ptr_));
+}
+
+void InProcessCommandBuffer::QueueRepeatableTask(base::RepeatingClosure task) {
+ // Release the |task_queue_lock_| before calling ScheduleTask because
+ // the callback may get called immediately and attempt to acquire the lock.
+ uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber();
+ {
+ base::AutoLock lock(task_queue_lock_);
+ std::unique_ptr<GpuTask> gpu_task =
+ std::make_unique<GpuTask>(std::move(task), order_num);
+ task_queue_.push(std::move(gpu_task));
}
service_->ScheduleTask(base::Bind(
&InProcessCommandBuffer::ProcessTasksOnGpuThread, gpu_thread_weak_ptr_));
@@ -536,16 +568,17 @@ void InProcessCommandBuffer::ProcessTasksOnGpuThread() {
if (task_queue_.empty())
break;
GpuTask* task = task_queue_.front().get();
- sync_point_order_data_->BeginProcessingOrderNumber(task->order_number);
- task->callback.Run();
+ sync_point_order_data_->BeginProcessingOrderNumber(task->order_number());
+ task->Run();
if (!command_buffer_->scheduled() &&
!service_->BlockThreadOnWaitSyncToken()) {
- sync_point_order_data_->PauseProcessingOrderNumber(task->order_number);
+ sync_point_order_data_->PauseProcessingOrderNumber(task->order_number());
// Don't pop the task if it was preempted - it may have been preempted, so
// we need to execute it again later.
+ DCHECK(task->is_repeatable());
return;
}
- sync_point_order_data_->FinishProcessingOrderNumber(task->order_number);
+ sync_point_order_data_->FinishProcessingOrderNumber(task->order_number());
task_queue_.pop();
}
}
@@ -625,11 +658,11 @@ void InProcessCommandBuffer::Flush(int32_t put_offset) {
return;
last_put_offset_ = put_offset;
- base::Closure task =
- base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
- gpu_thread_weak_ptr_, put_offset, snapshot_requested_);
+ base::RepeatingClosure task = base::BindRepeating(
+ &InProcessCommandBuffer::FlushOnGpuThread, gpu_thread_weak_ptr_,
+ put_offset, snapshot_requested_);
snapshot_requested_ = false;
- QueueTask(false, task);
+ QueueRepeatableTask(std::move(task));
flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
}
@@ -673,10 +706,10 @@ void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) {
base::WaitableEvent completion(
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
- base::Closure task =
- base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread,
- base::Unretained(this), shm_id, &completion);
- QueueTask(false, task);
+ base::OnceClosure task =
+ base::BindOnce(&InProcessCommandBuffer::SetGetBufferOnGpuThread,
+ base::Unretained(this), shm_id, &completion);
+ QueueOnceTask(false, std::move(task));
completion.Wait();
last_put_offset_ = 0;
@@ -701,11 +734,11 @@ scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(
void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) {
CheckSequencedThread();
- base::Closure task =
- base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
- base::Unretained(this), id);
+ base::OnceClosure task =
+ base::BindOnce(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
+ base::Unretained(this), id);
- QueueTask(false, task);
+ QueueOnceTask(false, std::move(task));
}
void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) {
@@ -758,13 +791,14 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_);
}
- QueueTask(false, base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
- base::Unretained(this), new_id, handle,
- gfx::Size(base::checked_cast<int>(width),
- base::checked_cast<int>(height)),
- gpu_memory_buffer->GetFormat(),
- base::checked_cast<uint32_t>(internalformat),
- fence_sync));
+ QueueOnceTask(
+ false,
+ base::BindOnce(&InProcessCommandBuffer::CreateImageOnGpuThread,
+ base::Unretained(this), new_id, handle,
+ gfx::Size(base::checked_cast<int>(width),
+ base::checked_cast<int>(height)),
+ gpu_memory_buffer->GetFormat(),
+ base::checked_cast<uint32_t>(internalformat), fence_sync));
if (fence_sync) {
flushed_fence_sync_release_ = fence_sync;
@@ -814,12 +848,9 @@ void InProcessCommandBuffer::CreateImageOnGpuThread(
return;
}
- // Note: this assumes that client ID is always 0.
- const int kClientId = 0;
-
scoped_refptr<gl::GLImage> image =
image_factory_->CreateImageForGpuMemoryBuffer(
- handle, size, format, internalformat, kClientId,
+ handle, size, format, internalformat, kGpuMemoryBufferClientId,
kNullSurfaceHandle);
if (!image.get()) {
LOG(ERROR) << "Failed to create image for buffer.";
@@ -838,8 +869,9 @@ void InProcessCommandBuffer::CreateImageOnGpuThread(
void InProcessCommandBuffer::DestroyImage(int32_t id) {
CheckSequencedThread();
- QueueTask(false, base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
- base::Unretained(this), id));
+ QueueOnceTask(false,
+ base::BindOnce(&InProcessCommandBuffer::DestroyImageOnGpuThread,
+ base::Unretained(this), id));
}
void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) {
@@ -940,30 +972,37 @@ void InProcessCommandBuffer::OnRescheduleAfterFinished() {
void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
const SyncToken& sync_token,
- const base::Closure& callback) {
- if (!sync_point_client_state_->Wait(sync_token, WrapCallback(callback)))
- callback.Run();
+ base::OnceClosure callback) {
+ base::RepeatingClosure maybe_pass_callback =
+ base::AdaptCallbackForRepeating(WrapCallback(std::move(callback)));
+ if (!sync_point_client_state_->Wait(sync_token, maybe_pass_callback)) {
+ maybe_pass_callback.Run();
+ }
}
void InProcessCommandBuffer::SignalQuery(unsigned query_id,
- const base::Closure& callback) {
+ base::OnceClosure callback) {
CheckSequencedThread();
- QueueTask(false, base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
- base::Unretained(this), query_id,
- WrapCallback(callback)));
+ QueueOnceTask(false,
+ base::BindOnce(&InProcessCommandBuffer::SignalQueryOnGpuThread,
+ base::Unretained(this), query_id,
+ WrapCallback(std::move(callback))));
}
void InProcessCommandBuffer::SignalQueryOnGpuThread(
unsigned query_id,
- const base::Closure& callback) {
- gles2::QueryManager* query_manager_ = decoder_->GetQueryManager();
- DCHECK(query_manager_);
+ base::OnceClosure callback) {
+ gles2::QueryManager* query_manager = decoder_->GetQueryManager();
+ if (query_manager) {
+ gles2::QueryManager::Query* query = query_manager->GetQuery(query_id);
+ if (query) {
+ query->AddCallback(base::AdaptCallbackForRepeating(std::move(callback)));
+ return;
+ }
+ }
- gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id);
- if (!query)
- callback.Run();
- else
- query->AddCallback(callback);
+ // Something went wrong, run callback immediately.
+ std::move(callback).Run();
}
void InProcessCommandBuffer::CreateGpuFence(uint32_t gpu_fence_id,
@@ -976,11 +1015,9 @@ void InProcessCommandBuffer::CreateGpuFence(uint32_t gpu_fence_id,
gfx::GpuFenceHandle handle =
gfx::CloneHandleForIPC(gpu_fence->GetGpuFenceHandle());
- // TODO(crbug.com/789349): Should be base::BindOnce, but QueueTask requires a
- // RepeatingClosure.
- QueueTask(false, base::BindRepeating(
- &InProcessCommandBuffer::CreateGpuFenceOnGpuThread,
- base::Unretained(this), gpu_fence_id, handle));
+ QueueOnceTask(
+ false, base::BindOnce(&InProcessCommandBuffer::CreateGpuFenceOnGpuThread,
+ base::Unretained(this), gpu_fence_id, handle));
}
void InProcessCommandBuffer::CreateGpuFenceOnGpuThread(
@@ -1007,17 +1044,13 @@ void InProcessCommandBuffer::GetGpuFence(
uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback) {
CheckSequencedThread();
- // TODO(crbug.com/789349): QueueTask requires a RepeatingClosure,
- // but it's actually single use, hence AdaptCallbackForRepeating.
- // Cf. WrapCallback, which we can't use directly since our callback
- // still needs an argument, so it's not a Closure.
auto task_runner = base::ThreadTaskRunnerHandle::IsSet()
? base::ThreadTaskRunnerHandle::Get()
: nullptr;
- QueueTask(false, base::AdaptCallbackForRepeating(base::BindOnce(
- &InProcessCommandBuffer::GetGpuFenceOnGpuThread,
- base::Unretained(this), gpu_fence_id, task_runner,
- std::move(callback))));
+ QueueOnceTask(
+ false, base::BindOnce(&InProcessCommandBuffer::GetGpuFenceOnGpuThread,
+ base::Unretained(this), gpu_fence_id, task_runner,
+ std::move(callback)));
}
void InProcessCommandBuffer::GetGpuFenceOnGpuThread(
@@ -1084,12 +1117,12 @@ bool InProcessCommandBuffer::IsFenceSyncReleased(uint64_t release) {
}
void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token,
- const base::Closure& callback) {
+ base::OnceClosure callback) {
CheckSequencedThread();
- QueueTask(
- false,
- base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
- base::Unretained(this), sync_token, WrapCallback(callback)));
+ QueueOnceTask(
+ false, base::BindOnce(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
+ base::Unretained(this), sync_token,
+ WrapCallback(std::move(callback))));
}
void InProcessCommandBuffer::WaitSyncTokenHint(const SyncToken& sync_token) {}
@@ -1110,6 +1143,9 @@ void InProcessCommandBuffer::DidCreateAcceleratedSurfaceChildWindow(
// In the browser process call ::SetParent() directly.
if (!gpu_channel_manager_delegate_) {
::SetParent(child_window, parent_window);
+ // Move D3D window behind Chrome's window to avoid losing some messages.
+ ::SetWindowPos(child_window, HWND_BOTTOM, 0, 0, 0, 0,
+ SWP_NOMOVE | SWP_NOSIZE);
return;
}
@@ -1223,42 +1259,55 @@ namespace {
void PostCallback(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const base::Closure& callback) {
+ base::OnceClosure callback) {
// The task_runner.get() check is to support using InProcessCommandBuffer on
// a thread without a message loop.
if (task_runner.get() && !task_runner->BelongsToCurrentThread()) {
- task_runner->PostTask(FROM_HERE, callback);
+ task_runner->PostTask(FROM_HERE, std::move(callback));
} else {
- callback.Run();
+ std::move(callback).Run();
}
}
-void RunOnTargetThread(std::unique_ptr<base::Closure> callback) {
- DCHECK(callback.get());
- callback->Run();
+void RunOnTargetThread(base::OnceClosure callback) {
+ DCHECK(!callback.is_null());
+ std::move(callback).Run();
}
} // anonymous namespace
-base::Closure InProcessCommandBuffer::WrapCallback(
- const base::Closure& callback) {
+base::OnceClosure InProcessCommandBuffer::WrapCallback(
+ base::OnceClosure callback) {
// Make sure the callback gets deleted on the target thread by passing
// ownership.
- std::unique_ptr<base::Closure> scoped_callback(new base::Closure(callback));
- base::Closure callback_on_client_thread =
- base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
- base::Closure wrapped_callback =
- base::Bind(&PostCallback, base::ThreadTaskRunnerHandle::IsSet()
- ? base::ThreadTaskRunnerHandle::Get()
- : nullptr,
- callback_on_client_thread);
+ base::OnceClosure callback_on_client_thread =
+ base::BindOnce(&RunOnTargetThread, std::move(callback));
+ base::OnceClosure wrapped_callback =
+ base::BindOnce(&PostCallback,
+ base::ThreadTaskRunnerHandle::IsSet()
+ ? base::ThreadTaskRunnerHandle::Get()
+ : nullptr,
+ std::move(callback_on_client_thread));
return wrapped_callback;
}
-InProcessCommandBuffer::GpuTask::GpuTask(const base::Closure& callback,
+InProcessCommandBuffer::GpuTask::GpuTask(base::OnceClosure callback,
+ uint32_t order_number)
+ : once_closure_(std::move(callback)), order_number_(order_number) {}
+
+InProcessCommandBuffer::GpuTask::GpuTask(base::RepeatingClosure callback,
uint32_t order_number)
- : callback(callback), order_number(order_number) {}
+ : repeating_closure_(std::move(callback)), order_number_(order_number) {}
InProcessCommandBuffer::GpuTask::~GpuTask() = default;
+void InProcessCommandBuffer::GpuTask::Run() {
+ if (once_closure_) {
+ std::move(once_closure_).Run();
+ return;
+ }
+ DCHECK(repeating_closure_) << "Trying to run a OnceClosure more than once.";
+ repeating_closure_.Run();
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index ee55d73a258..818b968a998 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -131,7 +131,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
size_t height,
unsigned internalformat) override;
void DestroyImage(int32_t id) override;
- void SignalQuery(uint32_t query_id, const base::Closure& callback) override;
+ void SignalQuery(uint32_t query_id, base::OnceClosure callback) override;
void CreateGpuFence(uint32_t gpu_fence_id, ClientGpuFence source) override;
void GetGpuFence(uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)>
@@ -144,7 +144,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const SyncToken& sync_token,
- const base::Closure& callback) override;
+ base::OnceClosure callback) override;
void WaitSyncTokenHint(const SyncToken& sync_token) override;
bool CanWaitUnverifiedSyncToken(const SyncToken& sync_token) override;
void SetSnapshotRequested() override;
@@ -215,6 +215,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
return decoder_->GetTransferCacheForTest();
}
+ static const int kGpuMemoryBufferClientId;
+
// The serializer interface to the GPU service (i.e. thread).
class Service {
public:
@@ -229,11 +231,11 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
virtual void Release() const = 0;
// Queues a task to run as soon as possible.
- virtual void ScheduleTask(const base::Closure& task) = 0;
+ virtual void ScheduleTask(base::OnceClosure task) = 0;
// Schedules |callback| to run at an appropriate time for performing delayed
// work.
- virtual void ScheduleDelayedWork(const base::Closure& task) = 0;
+ virtual void ScheduleDelayedWork(base::OnceClosure task) = 0;
virtual bool UseVirtualizedGLContexts() = 0;
virtual SyncPointManager* sync_point_manager() = 0;
@@ -303,14 +305,17 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
void UpdateLastStateOnGpuThread();
void ScheduleDelayedWorkOnGpuThread();
bool MakeCurrent();
- base::Closure WrapCallback(const base::Closure& callback);
- void QueueTask(bool out_of_order, const base::Closure& task);
+ base::OnceClosure WrapCallback(base::OnceClosure callback);
+
+ void QueueOnceTask(bool out_of_order, base::OnceClosure task);
+ void QueueRepeatableTask(base::RepeatingClosure task);
+
void ProcessTasksOnGpuThread();
void CheckSequencedThread();
void OnWaitSyncTokenCompleted(const SyncToken& sync_token);
void SignalSyncTokenOnGpuThread(const SyncToken& sync_token,
- const base::Closure& callback);
- void SignalQueryOnGpuThread(unsigned query_id, const base::Closure& callback);
+ base::OnceClosure callback);
+ void SignalQueryOnGpuThread(unsigned query_id, base::OnceClosure callback);
void DestroyTransferBufferOnGpuThread(int32_t id);
void CreateImageOnGpuThread(int32_t id,
const gfx::GpuMemoryBufferHandle& handle,
@@ -386,11 +391,22 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
std::unique_ptr<base::SequenceChecker> sequence_checker_;
base::Lock task_queue_lock_;
- struct GpuTask {
- GpuTask(const base::Closure& callback, uint32_t order_number);
+ class GpuTask {
+ public:
+ GpuTask(base::OnceClosure callback, uint32_t order_number);
+ GpuTask(base::RepeatingClosure callback, uint32_t order_number);
~GpuTask();
- base::Closure callback;
- uint32_t order_number;
+
+ uint32_t order_number() { return order_number_; }
+ bool is_repeatable() { return !!repeating_closure_; }
+ void Run();
+
+ private:
+ base::OnceClosure once_closure_;
+ base::RepeatingClosure repeating_closure_;
+ uint32_t order_number_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuTask);
};
base::queue<std::unique_ptr<GpuTask>> task_queue_;
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index 11b63a8a7ea..6b22443e3ab 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -43,6 +43,9 @@ component("service") {
"switches.h",
]
defines = [ "GPU_IPC_SERVICE_IMPLEMENTATION" ]
+ if (is_chromecast) {
+ defines += [ "IS_CHROMECAST" ]
+ }
public_deps = [
"//base",
"//ipc",
diff --git a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
index b8c5711996a..b95e029c853 100644
--- a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
+++ b/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
@@ -210,7 +210,9 @@ void* DirectCompositionChildSurfaceWin::GetHandle() {
gfx::SwapResult DirectCompositionChildSurfaceWin::SwapBuffers(
const PresentationCallback& callback) {
- // TODO(penghuang): Provide presentation feedback. https://crbug.com/776877
+ // PresentationCallback is handled by DirectCompositionSurfaceWin. The child
+ // surface doesn't need provide presentation feedback.
+ DCHECK(!callback);
ReleaseDrawTexture(false);
return gfx::SwapResult::SWAP_ACK;
}
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
index 0df3457c877..9b009bbd275 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.cc
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
@@ -35,6 +35,7 @@
#include "ui/gl/gl_image_dxgi.h"
#include "ui/gl/gl_image_memory.h"
#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/gl_surface_presentation_helper.h"
#include "ui/gl/scoped_make_current.h"
#ifndef EGL_ANGLE_flexible_surface_compatibility
@@ -1205,10 +1206,16 @@ bool DirectCompositionSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
return false;
}
- return RecreateRootSurface();
+ if (!RecreateRootSurface())
+ return false;
+
+ presentation_helper_ =
+ std::make_unique<gl::GLSurfacePresentationHelper>(vsync_provider_.get());
+ return true;
}
void DirectCompositionSurfaceWin::Destroy() {
+ presentation_helper_ = nullptr;
if (default_surface_) {
if (!eglDestroySurface(GetDisplay(), default_surface_)) {
DLOG(ERROR) << "eglDestroySurface failed with error "
@@ -1254,12 +1261,15 @@ bool DirectCompositionSurfaceWin::Resize(const gfx::Size& size,
gfx::SwapResult DirectCompositionSurfaceWin::SwapBuffers(
const PresentationCallback& callback) {
+ gl::GLSurfacePresentationHelper::ScopedSwapBuffers scoped_swap_buffers(
+ presentation_helper_.get(), callback);
ui::ScopedReleaseCurrent release_current;
- root_surface_->SwapBuffers(callback);
+ root_surface_->SwapBuffers(PresentationCallback());
layer_tree_->CommitAndClearPendingOverlays();
child_window_.ClearInvalidContents();
- return release_current.Restore() ? gfx::SwapResult::SWAP_ACK
- : gfx::SwapResult::SWAP_FAILED;
+ if (!release_current.Restore())
+ scoped_swap_buffers.set_result(gfx::SwapResult::SWAP_FAILED);
+ return scoped_swap_buffers.result();
}
gfx::SwapResult DirectCompositionSurfaceWin::PostSubBuffer(
@@ -1298,6 +1308,8 @@ bool DirectCompositionSurfaceWin::SupportsPostSubBuffer() {
}
bool DirectCompositionSurfaceWin::OnMakeCurrent(gl::GLContext* context) {
+ if (presentation_helper_)
+ presentation_helper_->OnMakeCurrent(context, this);
if (root_surface_)
return root_surface_->OnMakeCurrent(context);
return true;
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.h b/chromium/gpu/ipc/service/direct_composition_surface_win.h
index f135c1dfd20..a7c1b0d5001 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.h
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.h
@@ -18,6 +18,10 @@
#include "ui/gl/gl_image.h"
#include "ui/gl/gl_surface_egl.h"
+namespace gl {
+class GLSurfacePresentationHelper;
+}
+
namespace gpu {
class DCLayerTree;
@@ -107,6 +111,7 @@ class GPU_IPC_SERVICE_EXPORT DirectCompositionSurfaceWin
bool is_hdr_ = false;
bool has_alpha_ = true;
std::unique_ptr<gfx::VSyncProvider> vsync_provider_;
+ std::unique_ptr<gl::GLSurfacePresentationHelper> presentation_helper_;
scoped_refptr<DirectCompositionChildSurfaceWin> root_surface_;
std::unique_ptr<DCLayerTree> layer_tree_;
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc b/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
index 09c951772ce..fe35a50a82b 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
@@ -4,6 +4,7 @@
#include "gpu/ipc/service/direct_composition_surface_win.h"
+#include "base/bind_helpers.h"
#include "base/memory/ref_counted_memory.h"
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
@@ -42,8 +43,6 @@ bool CheckIfDCSupported() {
return true;
}
-void EmptyPresentation(const gfx::PresentationFeedback&) {}
-
class TestImageTransportSurfaceDelegate
: public ImageTransportSurfaceDelegate,
public base::SupportsWeakPtr<TestImageTransportSurfaceDelegate> {
@@ -178,7 +177,7 @@ TEST(DirectCompositionSurfaceTest, TestMakeCurrent) {
EXPECT_TRUE(context1->MakeCurrent(surface1.get()));
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface1->SwapBuffers(base::Bind(&EmptyPresentation)));
+ surface1->SwapBuffers(base::DoNothing()));
EXPECT_TRUE(context1->IsCurrent(surface1.get()));
@@ -247,8 +246,7 @@ TEST(DirectCompositionSurfaceTest, DXGIDCLayerSwitch) {
EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
EXPECT_TRUE(context->MakeCurrent(surface.get()));
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface->SwapBuffers(base::Bind(&EmptyPresentation)));
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
EXPECT_TRUE(context->IsCurrent(surface.get()));
@@ -263,8 +261,7 @@ TEST(DirectCompositionSurfaceTest, DXGIDCLayerSwitch) {
surface->SetEnableDCLayers(false);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface->SwapBuffers(base::Bind(&EmptyPresentation)));
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
// Surface switched to use IDXGISwapChain, so must draw to entire
// surface.
@@ -360,8 +357,7 @@ TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
surface->GetLayerSwapChainForTesting(1);
ASSERT_FALSE(swap_chain);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface->SwapBuffers(base::Bind(&EmptyPresentation)));
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
swap_chain = surface->GetLayerSwapChainForTesting(1);
ASSERT_TRUE(swap_chain);
@@ -374,8 +370,7 @@ TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
EXPECT_EQ(2u, last_present_count);
surface->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface->SwapBuffers(base::Bind(&EmptyPresentation)));
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain2 =
surface->GetLayerSwapChainForTesting(1);
@@ -393,8 +388,7 @@ TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
0);
surface->ScheduleDCLayer(params2);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface->SwapBuffers(base::Bind(&EmptyPresentation)));
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain3 =
surface->GetLayerSwapChainForTesting(1);
@@ -469,7 +463,7 @@ class DirectCompositionPixelTest : public testing::Test {
glClear(GL_COLOR_BUFFER_BIT);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::Bind(&EmptyPresentation)));
+ surface_->SwapBuffers(base::DoNothing()));
// Ensure DWM swap completed.
Sleep(1000);
@@ -544,7 +538,7 @@ class DirectCompositionVideoPixelTest : public DirectCompositionPixelTest {
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::Bind(&EmptyPresentation)));
+ surface_->SwapBuffers(base::DoNothing()));
// Scaling up the swapchain with the same image should cause it to be
// transformed again, but not presented again.
@@ -556,7 +550,7 @@ class DirectCompositionVideoPixelTest : public DirectCompositionPixelTest {
surface_->ScheduleDCLayer(params2);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::Bind(&EmptyPresentation)));
+ surface_->SwapBuffers(base::DoNothing()));
Sleep(1000);
if (check_color) {
@@ -640,7 +634,7 @@ TEST_F(DirectCompositionPixelTest, SoftwareVideoSwapchain) {
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::Bind(&EmptyPresentation)));
+ surface_->SwapBuffers(base::DoNothing()));
Sleep(1000);
SkColor expected_color = SkColorSetRGB(0xff, 0xb7, 0xff);
@@ -690,7 +684,7 @@ TEST_F(DirectCompositionPixelTest, VideoHandleSwapchain) {
surface_->ScheduleDCLayer(params);
EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::Bind(&EmptyPresentation)));
+ surface_->SwapBuffers(base::DoNothing()));
Sleep(1000);
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.h b/chromium/gpu/ipc/service/gpu_channel_test_common.h
index 9e20b24417e..03cd2152ff5 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.h
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.h
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef GPU_IPC_SERVICE_GPU_CHANNEL_TEST_COMMON_H_
+#define GPU_IPC_SERVICE_GPU_CHANNEL_TEST_COMMON_H_
+
#include <memory>
#include "base/memory/ref_counted.h"
@@ -50,3 +53,5 @@ class GpuChannelTestCommon : public testing::Test {
};
} // namespace gpu
+
+#endif // GPU_IPC_SERVICE_GPU_CHANNEL_TEST_COMMON_H_
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index a6a630a8bc1..f4cd63bae6d 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -19,6 +19,7 @@
#include "gpu/config/gpu_util.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "gpu/ipc/service/switches.h"
+#include "ui/base/ui_base_features.h"
#include "ui/gfx/switches.h"
#include "ui/gl/gl_features.h"
#include "ui/gl/gl_implementation.h"
@@ -28,6 +29,7 @@
#if defined(USE_OZONE)
#include "ui/ozone/public/ozone_platform.h"
+#include "ui/ozone/public/ozone_switches.h"
#endif
#if defined(OS_WIN)
@@ -39,29 +41,13 @@ namespace gpu {
namespace {
#if !defined(OS_MACOSX)
-void CollectGraphicsInfo(GPUInfo* gpu_info) {
+bool CollectGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
-#if defined(OS_FUCHSIA)
- // TODO(crbug.com/707031): Implement this.
- NOTIMPLEMENTED();
- return;
-#else
TRACE_EVENT0("gpu,startup", "Collect Graphics Info");
base::TimeTicks before_collect_context_graphics_info = base::TimeTicks::Now();
- CollectInfoResult result = CollectContextGraphicsInfo(gpu_info);
- switch (result) {
- case kCollectInfoFatalFailure:
- LOG(ERROR) << "gpu::CollectGraphicsInfo failed (fatal).";
- break;
- case kCollectInfoNonFatalFailure:
- DVLOG(1) << "gpu::CollectGraphicsInfo failed (non-fatal).";
- break;
- case kCollectInfoNone:
- NOTREACHED();
- break;
- case kCollectInfoSuccess:
- break;
- }
+ bool success = CollectContextGraphicsInfo(gpu_info);
+ if (!success)
+ LOG(ERROR) << "gpu::CollectGraphicsInfo failed.";
#if defined(OS_WIN)
if (gl::GetGLImplementation() == gl::kGLImplementationEGLGLES2 &&
@@ -75,16 +61,16 @@ void CollectGraphicsInfo(GPUInfo* gpu_info) {
}
#endif // defined(OS_WIN)
- if (result != kCollectInfoFatalFailure) {
+ if (success) {
base::TimeDelta collect_context_time =
base::TimeTicks::Now() - before_collect_context_graphics_info;
UMA_HISTOGRAM_TIMES("GPU.CollectContextGraphicsInfo", collect_context_time);
}
-#endif // defined(OS_FUCHSIA)
+ return success;
}
#endif // defined(OS_MACOSX)
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && !defined(IS_CHROMECAST)
bool CanAccessNvidiaDeviceFile() {
bool res = true;
base::AssertBlockingAllowed();
@@ -94,7 +80,7 @@ bool CanAccessNvidiaDeviceFile() {
}
return res;
}
-#endif // defined(OS_LINUX) && !defined(OS_CHROMEOS)
+#endif // OS_LINUX && !OS_CHROMEOS && !IS_CHROMECAST
} // namespace
@@ -107,13 +93,13 @@ GpuInit::~GpuInit() {
bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
const GpuPreferences& gpu_preferences) {
gpu_preferences_ = gpu_preferences;
-#if !defined(OS_ANDROID)
+ // Blacklist decisions based on basic GPUInfo may not be final. It might
+ // need more context based GPUInfo. In such situations, switching to
+ // SwiftShader needs to wait until creating a context.
+ bool needs_more_info = false;
+#if !defined(OS_ANDROID) && !defined(IS_CHROMECAST)
if (!PopGPUInfoCache(&gpu_info_)) {
- // Get vendor_id, device_id, driver_version from browser process through
- // commandline switches.
- // TODO(zmo): Collect basic GPU info (without a context) here instead of
- // passing from browser process.
- GetGpuInfoFromCommandLine(*command_line, &gpu_info_);
+ CollectBasicGraphicsInfo(command_line, &gpu_info_);
}
// Set keys for crash logging based on preliminary gpu info, in case we
@@ -131,13 +117,14 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
gpu_info_, gpu_preferences.ignore_gpu_blacklist,
gpu_preferences.disable_gpu_driver_bug_workarounds,
- gpu_preferences.log_gpu_control_list_decisions, command_line);
+ gpu_preferences.log_gpu_control_list_decisions, command_line,
+ &needs_more_info);
}
if (gpu::SwitchableGPUsSupported(gpu_info_, *command_line)) {
gpu::InitializeSwitchableGPUs(
gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
}
-#endif // OS_ANDROID
+#endif // !OS_ANDROID && !IS_CHROMECAST
gpu_info_.in_process_gpu = false;
bool enable_watchdog = !gpu_preferences.disable_gpu_watchdog &&
@@ -197,10 +184,11 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// may also have started at this point.
ui::OzonePlatform::InitParams params;
params.single_process = false;
+ params.using_mojo = command_line->HasSwitch(switches::kEnableDrmMojo);
ui::OzonePlatform::InitializeForGPU(params);
#endif
- bool use_swiftshader = ShouldEnableSwiftShader(command_line);
+ bool use_swiftshader = ShouldEnableSwiftShader(command_line, needs_more_info);
// Load and initialize the GL implementation and locate the GL entry points if
// needed. This initialization may have already happened if running in the
// browser process, for example.
@@ -224,19 +212,17 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// because the basic GPU information is passed down from the host process.
#if !defined(OS_MACOSX)
if (!use_swiftshader) {
- CollectGraphicsInfo(&gpu_info_);
- if (gpu_info_.context_info_state == gpu::kCollectInfoFatalFailure)
+ if (!CollectGraphicsInfo(&gpu_info_))
return false;
gpu::SetKeysForCrashLogging(gpu_info_);
gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
gpu_info_, gpu_preferences.ignore_gpu_blacklist,
gpu_preferences.disable_gpu_driver_bug_workarounds,
- gpu_preferences.log_gpu_control_list_decisions, command_line);
- use_swiftshader = ShouldEnableSwiftShader(command_line);
+ gpu_preferences.log_gpu_control_list_decisions, command_line, nullptr);
+ use_swiftshader = ShouldEnableSwiftShader(command_line, false);
if (use_swiftshader) {
gl::init::ShutdownGL(true);
- gl_initialized = gl::init::InitializeGLNoExtensionsOneOff();
- if (!gl_initialized) {
+ if (!gl::init::InitializeGLNoExtensionsOneOff()) {
VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff with SwiftShader "
<< "failed";
return false;
@@ -257,8 +243,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
gl::init::SetDisabledExtensionsPlatform(
gpu_feature_info_.disabled_extensions);
}
- gl_initialized = gl::init::InitializeExtensionSettingsOneOffPlatform();
- if (!gl_initialized) {
+ if (!gl::init::InitializeExtensionSettingsOneOffPlatform()) {
VLOG(1) << "gl::init::InitializeExtensionSettingsOneOffPlatform failed";
return false;
}
@@ -300,56 +285,72 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
gles2::PassthroughCommandDecoderSupported();
init_successful_ = true;
+#if defined(USE_OZONE)
+ ui::OzonePlatform::GetInstance()->AfterSandboxEntry();
+#endif
return true;
}
+#if defined(OS_ANDROID)
+void GpuInit::InitializeInProcess(base::CommandLine* command_line,
+ const GpuPreferences& gpu_preferences) {
+ gpu_preferences_ = gpu_preferences;
+ init_successful_ = true;
+ DCHECK(!ShouldEnableSwiftShader(command_line, false));
+
+ InitializeGLThreadSafe(command_line, gpu_preferences.ignore_gpu_blacklist,
+ gpu_preferences.disable_gpu_driver_bug_workarounds,
+ gpu_preferences.log_gpu_control_list_decisions,
+ &gpu_info_, &gpu_feature_info_);
+}
+#else
void GpuInit::InitializeInProcess(base::CommandLine* command_line,
- const GpuPreferences& gpu_preferences,
- const GPUInfo* gpu_info,
- const GpuFeatureInfo* gpu_feature_info) {
+ const GpuPreferences& gpu_preferences) {
gpu_preferences_ = gpu_preferences;
init_successful_ = true;
#if defined(USE_OZONE)
ui::OzonePlatform::InitParams params;
params.single_process = true;
- ui::OzonePlatform::InitializeForGPU(params);
+#if defined(OS_CHROMEOS)
+ params.using_mojo = base::FeatureList::IsEnabled(features::kMash) ||
+ command_line->HasSwitch(switches::kEnableDrmMojo);
+#else
+ params.using_mojo = command_line->HasSwitch(switches::kEnableDrmMojo);
#endif
-
- if (gpu_info && gpu_feature_info) {
- gpu_info_ = *gpu_info;
- gpu_feature_info_ = *gpu_feature_info;
- } else {
-#if !defined(OS_ANDROID)
- if (!PopGPUInfoCache(&gpu_info_)) {
- // TODO(zmo): Collect basic GPU info here instead.
- gpu::GetGpuInfoFromCommandLine(*command_line, &gpu_info_);
- }
- if (!PopGpuFeatureInfoCache(&gpu_feature_info_)) {
- gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
- gpu_info_, gpu_preferences.ignore_gpu_blacklist,
- gpu_preferences.disable_gpu_driver_bug_workarounds,
- gpu_preferences.log_gpu_control_list_decisions, command_line);
- }
+ ui::OzonePlatform::InitializeForGPU(params);
+ ui::OzonePlatform::GetInstance()->AfterSandboxEntry();
#endif
+ bool needs_more_info = false;
+#if !defined(IS_CHROMECAST)
+ if (!PopGPUInfoCache(&gpu_info_)) {
+ CollectBasicGraphicsInfo(command_line, &gpu_info_);
}
- if (gpu::SwitchableGPUsSupported(gpu_info_, *command_line)) {
- gpu::InitializeSwitchableGPUs(
+ if (!PopGpuFeatureInfoCache(&gpu_feature_info_)) {
+ gpu_feature_info_ = ComputeGpuFeatureInfo(
+ gpu_info_, gpu_preferences.ignore_gpu_blacklist,
+ gpu_preferences.disable_gpu_driver_bug_workarounds,
+ gpu_preferences.log_gpu_control_list_decisions, command_line,
+ &needs_more_info);
+ }
+ if (SwitchableGPUsSupported(gpu_info_, *command_line)) {
+ InitializeSwitchableGPUs(
gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
}
+#endif // !IS_CHROMECAST
- bool use_swiftshader = ShouldEnableSwiftShader(command_line);
+ bool use_swiftshader = ShouldEnableSwiftShader(command_line, needs_more_info);
if (!gl::init::InitializeGLNoExtensionsOneOff()) {
VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed";
return;
}
if (!use_swiftshader) {
- gpu::CollectContextGraphicsInfo(&gpu_info_);
- gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
+ CollectContextGraphicsInfo(&gpu_info_);
+ gpu_feature_info_ = ComputeGpuFeatureInfo(
gpu_info_, gpu_preferences.ignore_gpu_blacklist,
gpu_preferences.disable_gpu_driver_bug_workarounds,
- gpu_preferences.log_gpu_control_list_decisions, command_line);
- use_swiftshader = ShouldEnableSwiftShader(command_line);
+ gpu_preferences.log_gpu_control_list_decisions, command_line, nullptr);
+ use_swiftshader = ShouldEnableSwiftShader(command_line, false);
if (use_swiftshader) {
gl::init::ShutdownGL(true);
if (!gl::init::InitializeGLNoExtensionsOneOff()) {
@@ -370,16 +371,19 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
VLOG(1) << "gl::init::InitializeExtensionSettingsOneOffPlatform failed";
}
}
+#endif // OS_ANDROID
-bool GpuInit::ShouldEnableSwiftShader(base::CommandLine* command_line) {
+bool GpuInit::ShouldEnableSwiftShader(base::CommandLine* command_line,
+ bool blacklist_needs_more_info) {
#if BUILDFLAG(ENABLE_SWIFTSHADER)
if (gpu_preferences_.disable_software_rasterizer)
return false;
// Don't overwrite user preference.
if (command_line->HasSwitch(switches::kUseGL))
return false;
- if (gpu_feature_info_.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] !=
- kGpuFeatureStatusEnabled) {
+ if (!blacklist_needs_more_info &&
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] !=
+ kGpuFeatureStatusEnabled) {
command_line->AppendSwitchASCII(
switches::kUseGL, gl::kGLImplementationSwiftShaderForWebGLName);
return true;
diff --git a/chromium/gpu/ipc/service/gpu_init.h b/chromium/gpu/ipc/service/gpu_init.h
index 9fd6b89d964..9ee296c3716 100644
--- a/chromium/gpu/ipc/service/gpu_init.h
+++ b/chromium/gpu/ipc/service/gpu_init.h
@@ -44,9 +44,7 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
bool InitializeAndStartSandbox(base::CommandLine* command_line,
const GpuPreferences& gpu_preferences);
void InitializeInProcess(base::CommandLine* command_line,
- const GpuPreferences& gpu_preferences,
- const GPUInfo* gpu_info = nullptr,
- const GpuFeatureInfo* gpu_feature_info = nullptr);
+ const GpuPreferences& gpu_preferences);
const GPUInfo& gpu_info() const { return gpu_info_; }
const GpuFeatureInfo& gpu_feature_info() const { return gpu_feature_info_; }
@@ -64,7 +62,8 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
GpuPreferences gpu_preferences_;
bool init_successful_ = false;
- bool ShouldEnableSwiftShader(base::CommandLine* command_line);
+ bool ShouldEnableSwiftShader(base::CommandLine* command_line,
+ bool blacklist_needs_more_info);
void AdjustInfoToSwiftShader();
DISALLOW_COPY_AND_ASSIGN(GpuInit);
};
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc
index 472612bb40a..d739aad3c19 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc
@@ -6,6 +6,7 @@
#include <vector>
+#include "base/debug/dump_without_crashing.h"
#include "base/logging.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "ui/gfx/buffer_format_util.h"
@@ -14,6 +15,15 @@
namespace gpu {
+namespace {
+// A GpuMemoryBuffer with client_id = 0 behaves like anonymous shared memory.
+const int kAnonymousClientId = 0;
+
+// The maximum number of times to dump before throttling (to avoid sending
+// thousands of crash dumps).
+const int kMaxCrashDumps = 10;
+} // namespace
+
GpuMemoryBufferFactoryIOSurface::GpuMemoryBufferFactoryIOSurface() {
}
@@ -28,26 +38,48 @@ GpuMemoryBufferFactoryIOSurface::CreateGpuMemoryBuffer(
gfx::BufferUsage usage,
int client_id,
SurfaceHandle surface_handle) {
- // Don't clear anonymous io surfaces.
- bool should_clear = (client_id != 0);
+ DCHECK_NE(client_id, kAnonymousClientId);
+
+ bool should_clear = true;
base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
gfx::CreateIOSurface(size, format, should_clear));
- if (!io_surface)
+ if (!io_surface) {
+ LOG(ERROR) << "Failed to allocate IOSurface.";
return gfx::GpuMemoryBufferHandle();
+ }
- // A GpuMemoryBuffer with client_id = 0 behaves like anonymous shared memory.
- if (client_id != 0) {
- base::AutoLock lock(io_surfaces_lock_);
+ gfx::GpuMemoryBufferHandle handle;
+ handle.type = gfx::IO_SURFACE_BUFFER;
+ handle.id = id;
+ handle.mach_port.reset(IOSurfaceCreateMachPort(io_surface));
+ CHECK(handle.mach_port);
+
+ // This IOSurface will be opened via mach port in the client process. It has
+ // been observed in https://crbug.com/574014 that these ports sometimes fail
+ // to be opened in the client process. It has further been observed in
+ // https://crbug.com/795649#c30 that these ports fail to be opened in creating
+ // process. To determine if these failures are independent, attempt to open
+ // the creating process first (and don't not return those that fail).
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface_from_mach_port(
+ IOSurfaceLookupFromMachPort(handle.mach_port.get()));
+ if (!io_surface_from_mach_port) {
+ LOG(ERROR) << "Failed to locally open IOSurface from mach port to be "
+ "returned to client, not returning to client.";
+ static int dump_counter = kMaxCrashDumps;
+ if (dump_counter) {
+ dump_counter -= 1;
+ base::debug::DumpWithoutCrashing();
+ }
+ return gfx::GpuMemoryBufferHandle();
+ }
+ {
+ base::AutoLock lock(io_surfaces_lock_);
IOSurfaceMapKey key(id, client_id);
DCHECK(io_surfaces_.find(key) == io_surfaces_.end());
io_surfaces_[key] = io_surface;
}
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::IO_SURFACE_BUFFER;
- handle.id = id;
- handle.mach_port.reset(IOSurfaceCreateMachPort(io_surface));
return handle;
}
@@ -80,13 +112,17 @@ GpuMemoryBufferFactoryIOSurface::CreateImageForGpuMemoryBuffer(
DCHECK_EQ(handle.type, gfx::IO_SURFACE_BUFFER);
IOSurfaceMapKey key(handle.id, client_id);
IOSurfaceMap::iterator it = io_surfaces_.find(key);
- if (it == io_surfaces_.end())
+ if (it == io_surfaces_.end()) {
+ DLOG(ERROR) << "Failed to find IOSurface based on key.";
return scoped_refptr<gl::GLImage>();
+ }
scoped_refptr<gl::GLImageIOSurface> image(
gl::GLImageIOSurface::Create(size, internalformat));
- if (!image->Initialize(it->second.get(), handle.id, format))
+ if (!image->Initialize(it->second.get(), handle.id, format)) {
+ DLOG(ERROR) << "Failed to initialize GLImage for IOSurface.";
return scoped_refptr<gl::GLImage>();
+ }
return image;
}
@@ -97,20 +133,43 @@ GpuMemoryBufferFactoryIOSurface::CreateAnonymousImage(const gfx::Size& size,
gfx::BufferUsage usage,
unsigned internalformat,
bool* is_cleared) {
- // Note that the child id doesn't matter since the texture will never be
- // directly exposed to other processes, only via a mailbox.
- gfx::GpuMemoryBufferHandle handle = CreateGpuMemoryBuffer(
- gfx::GpuMemoryBufferId(next_anonymous_image_id_++), size, format, usage,
- 0 /* client_id */, gpu::kNullSurfaceHandle);
+ bool should_clear = false;
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
+ gfx::CreateIOSurface(size, format, should_clear));
+ if (!io_surface) {
+ LOG(ERROR) << "Failed to allocate IOSurface.";
+ return nullptr;
+ }
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface;
- io_surface.reset(IOSurfaceLookupFromMachPort(handle.mach_port.get()));
- DCHECK_NE(nullptr, io_surface.get());
+ // This IOSurface does not require passing via a mach port, but attempt to
+ // locally open via a mach port to gather data to include in a Radar about
+ // this failure.
+ // https://crbug.com/795649
+ gfx::ScopedRefCountedIOSurfaceMachPort mach_port(
+ IOSurfaceCreateMachPort(io_surface));
+ if (mach_port) {
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface_from_mach_port(
+ IOSurfaceLookupFromMachPort(mach_port.get()));
+ if (!io_surface_from_mach_port) {
+ LOG(ERROR) << "Failed to locally open anonymous IOSurface mach port "
+ "(ignoring failure).";
+ static int dump_counter = kMaxCrashDumps;
+ if (dump_counter) {
+ dump_counter -= 1;
+ base::debug::DumpWithoutCrashing();
+ }
+ }
+ } else {
+ LOG(ERROR) << "Failed to create IOSurface mach port.";
+ }
+ gfx::GenericSharedMemoryId image_id(++next_anonymous_image_id_);
scoped_refptr<gl::GLImageIOSurface> image(
gl::GLImageIOSurface::Create(size, internalformat));
- if (!image->Initialize(io_surface.get(), handle.id, format))
+ if (!image->Initialize(io_surface.get(), image_id, format)) {
+ DLOG(ERROR) << "Failed to initialize anonymous GLImage.";
return scoped_refptr<gl::GLImage>();
+ }
*is_cleared = false;
return image;
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc b/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
index 7c387fec43b..4b1daf15372 100644
--- a/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
@@ -669,7 +669,11 @@ bool GpuVSyncProviderWin::GetVSyncParametersIfAvailable(
return false;
}
-bool GpuVSyncProviderWin::SupportGetVSyncParametersIfAvailable() {
+bool GpuVSyncProviderWin::SupportGetVSyncParametersIfAvailable() const {
+ return false;
+}
+
+bool GpuVSyncProviderWin::IsHWClock() const {
return false;
}
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_win.h b/chromium/gpu/ipc/service/gpu_vsync_provider_win.h
index d9ab1d3f17c..a18fb2f7426 100644
--- a/chromium/gpu/ipc/service/gpu_vsync_provider_win.h
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_win.h
@@ -33,7 +33,8 @@ class GPU_IPC_SERVICE_EXPORT GpuVSyncProviderWin : public gfx::VSyncProvider {
void GetVSyncParameters(const UpdateVSyncCallback& callback) override;
bool GetVSyncParametersIfAvailable(base::TimeTicks* timebase,
base::TimeDelta* interval) override;
- bool SupportGetVSyncParametersIfAvailable() override;
+ bool SupportGetVSyncParametersIfAvailable() const override;
+ bool IsHWClock() const override;
private:
void OnVSync(base::TimeTicks timestamp, base::TimeDelta interval);
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index de3e362ea54..a39c10054eb 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -158,8 +158,11 @@ GpuWatchdogThread::~GpuWatchdogThread() {
#if defined(USE_X11)
if (tty_file_)
fclose(tty_file_);
- XDestroyWindow(display_, window_);
- XCloseDisplay(display_);
+ if (display_) {
+ DCHECK(window_);
+ XDestroyWindow(display_, window_);
+ XCloseDisplay(display_);
+ }
#endif
watched_message_loop_->RemoveTaskObserver(&task_observer_);
@@ -243,8 +246,7 @@ void GpuWatchdogThread::OnCheck(bool after_suspend) {
// Post a task to the monitored thread that does nothing but wake up the
// TaskObserver. Any other tasks that are pending on the watched thread will
// also wake up the observer. This simply ensures there is at least one.
- watched_message_loop_->task_runner()->PostTask(FROM_HERE,
- base::Bind(&base::DoNothing));
+ watched_message_loop_->task_runner()->PostTask(FROM_HERE, base::DoNothing());
// Post a task to the watchdog thread to exit if the monitored thread does
// not respond in time.
@@ -283,8 +285,8 @@ void GpuWatchdogThread::OnCheckTimeout() {
// Post a task that does nothing on the watched thread to bump its priority
// and make it more likely to get scheduled.
- watched_message_loop_->task_runner()->PostTask(
- FROM_HERE, base::Bind(&base::DoNothing));
+ watched_message_loop_->task_runner()->PostTask(FROM_HERE,
+ base::DoNothing());
return;
}
@@ -312,46 +314,48 @@ void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
#endif
#if defined(USE_X11)
- XWindowAttributes attributes;
- XGetWindowAttributes(display_, window_, &attributes);
-
- XSelectInput(display_, window_, PropertyChangeMask);
- SetupXChangeProp();
-
- XFlush(display_);
-
- // We wait for the property change event with a timeout. If it arrives we know
- // that X is responsive and is not the cause of the watchdog trigger, so we
- // should
- // terminate. If it times out, it may be due to X taking a long time, but
- // terminating won't help, so ignore the watchdog trigger.
- XEvent event_return;
- base::TimeTicks deadline = base::TimeTicks::Now() + timeout_;
- while (true) {
- base::TimeDelta delta = deadline - base::TimeTicks::Now();
- if (delta < base::TimeDelta()) {
- return;
- } else {
- while (XCheckWindowEvent(display_, window_, PropertyChangeMask,
- &event_return)) {
- if (MatchXEventAtom(&event_return))
- break;
- }
- struct pollfd fds[1];
- fds[0].fd = XConnectionNumber(display_);
- fds[0].events = POLLIN;
- int status = poll(fds, 1, delta.InMilliseconds());
- if (status == -1) {
- if (errno == EINTR) {
- continue;
- } else {
- LOG(FATAL) << "Lost X connection, aborting.";
- break;
- }
- } else if (status == 0) {
+ if (display_) {
+ DCHECK(window_);
+ XWindowAttributes attributes;
+ XGetWindowAttributes(display_, window_, &attributes);
+
+ XSelectInput(display_, window_, PropertyChangeMask);
+ SetupXChangeProp();
+
+ XFlush(display_);
+
+ // We wait for the property change event with a timeout. If it arrives we
+ // know that X is responsive and is not the cause of the watchdog trigger,
+ // so we should terminate. If it times out, it may be due to X taking a long
+ // time, but terminating won't help, so ignore the watchdog trigger.
+ XEvent event_return;
+ base::TimeTicks deadline = base::TimeTicks::Now() + timeout_;
+ while (true) {
+ base::TimeDelta delta = deadline - base::TimeTicks::Now();
+ if (delta < base::TimeDelta()) {
return;
} else {
- continue;
+ while (XCheckWindowEvent(display_, window_, PropertyChangeMask,
+ &event_return)) {
+ if (MatchXEventAtom(&event_return))
+ break;
+ }
+ struct pollfd fds[1];
+ fds[0].fd = XConnectionNumber(display_);
+ fds[0].events = POLLIN;
+ int status = poll(fds, 1, delta.InMilliseconds());
+ if (status == -1) {
+ if (errno == EINTR) {
+ continue;
+ } else {
+ LOG(FATAL) << "Lost X connection, aborting.";
+ break;
+ }
+ } else if (status == 0) {
+ return;
+ } else {
+ continue;
+ }
}
}
}
@@ -426,13 +430,17 @@ void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
#if defined(USE_X11)
void GpuWatchdogThread::SetupXServer() {
display_ = XOpenDisplay(NULL);
- window_ = XCreateWindow(display_, DefaultRootWindow(display_), 0, 0, 1, 1, 0,
- CopyFromParent, InputOutput, CopyFromParent, 0, NULL);
- atom_ = XInternAtom(display_, "CHECK", x11::False);
+ if (display_) {
+ window_ =
+ XCreateWindow(display_, DefaultRootWindow(display_), 0, 0, 1, 1, 0,
+ CopyFromParent, InputOutput, CopyFromParent, 0, NULL);
+ atom_ = XInternAtom(display_, "CHECK", x11::False);
+ }
host_tty_ = GetActiveTTY();
}
void GpuWatchdogThread::SetupXChangeProp() {
+ DCHECK(display_);
XChangeProperty(display_, window_, atom_, XA_STRING, 8, PropModeReplace, text,
(arraysize(text) - 1));
}
diff --git a/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc b/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc
index 8818adfe765..f2feaed4c02 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc
@@ -6,8 +6,8 @@
#include "base/logging.h"
#include "ui/gl/gl_surface.h"
-#include "ui/gl/gl_surface_osmesa.h"
#include "ui/gl/gl_surface_stub.h"
+#include "ui/gl/init/gl_factory.h"
namespace gpu {
@@ -16,14 +16,12 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
SurfaceHandle surface_handle,
gl::GLSurfaceFormat format) {
- if (gl::GetGLImplementation() == gl::kGLImplementationOSMesaGL) {
- return gl::InitializeGLSurfaceWithFormat(
- new gl::GLSurfaceOSMesa(format, gfx::Size(1, 1)), format);
+ if (gl::GetGLImplementation() == gl::kGLImplementationMockGL ||
+ gl::GetGLImplementation() == gl::kGLImplementationStubGL) {
+ return new gl::GLSurfaceStub;
}
- DCHECK(gl::GetGLImplementation() == gl::kGLImplementationMockGL ||
- gl::GetGLImplementation() == gl::kGLImplementationStubGL);
- return new gl::GLSurfaceStub;
+ return gl::init::CreateViewGLSurface(surface_handle);
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_transport_surface_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_mac.mm
index d67f9edc4c2..b13d6397734 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_mac.mm
@@ -53,6 +53,7 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
case gl::kGLImplementationDesktopGL:
case gl::kGLImplementationDesktopGLCoreProfile:
case gl::kGLImplementationAppleGL:
+ case gl::kGLImplementationEGLGLES2:
return base::WrapRefCounted<gl::GLSurface>(
new ImageTransportSurfaceOverlayMac(delegate));
case gl::kGLImplementationMockGL:
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index 3838521c04d..89abf863160 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -5,15 +5,12 @@
#ifndef GPU_IPC_SERVICE_IMAGE_TRANSPORT_SURFACE_OVERLAY_MAC_H_
#define GPU_IPC_SERVICE_IMAGE_TRANSPORT_SURFACE_OVERLAY_MAC_H_
-#include <list>
-#include <memory>
#include <vector>
#import "base/mac/scoped_nsobject.h"
-#include "base/timer/timer.h"
#include "gpu/ipc/service/command_buffer_stub.h"
#include "gpu/ipc/service/image_transport_surface.h"
-#include "ui/base/cocoa/remote_layer_api.h"
+#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/gpu_switching_observer.h"
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index 8cad9b07e04..3ac5d2293c7 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -4,24 +4,7 @@
#include "gpu/ipc/service/image_transport_surface_overlay_mac.h"
-#include <CoreGraphics/CoreGraphics.h>
-#include <IOSurface/IOSurface.h>
-#include <OpenGL/CGLRenderers.h>
-#include <OpenGL/CGLTypes.h>
-#include <OpenGL/gl.h>
-#include <stddef.h>
-
-#include <algorithm>
-
-// This type consistently causes problem on Mac, and needs to be dealt with
-// in a systemic way.
-// http://crbug.com/517208
-#ifndef GL_OES_EGL_image
-typedef void* GLeglImageOES;
-#endif
-
#include "base/bind.h"
-#include "base/bind_helpers.h"
#include "base/command_line.h"
#include "base/metrics/histogram_macros.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -32,12 +15,8 @@ typedef void* GLeglImageOES;
#include "gpu/ipc/service/image_transport_surface_delegate.h"
#include "ui/accelerated_widget_mac/ca_layer_tree_coordinator.h"
#include "ui/accelerated_widget_mac/io_surface_context.h"
-#include "ui/base/cocoa/animation_utils.h"
#include "ui/base/cocoa/remote_layer_api.h"
#include "ui/base/ui_base_switches.h"
-#include "ui/gfx/geometry/rect_conversions.h"
-#include "ui/gfx/swap_result.h"
-#include "ui/gfx/transform.h"
#include "ui/gl/ca_renderer_layer_params.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_fence.h"
@@ -54,9 +33,6 @@ void CheckGLErrors(const char* msg) {
}
}
-void IOSurfaceContextNoOp(scoped_refptr<ui::IOSurfaceContext>) {
-}
-
} // namespace
namespace gpu {
@@ -390,7 +366,10 @@ void ImageTransportSurfaceOverlayMac::OnGpuSwitched() {
// this is to avoid creating-then-destroying the context for every image
// transport surface that is observing the GPU switch.
base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&IOSurfaceContextNoOp, context_on_new_gpu));
+ FROM_HERE,
+ base::Bind(
+ base::DoNothing::Repeatedly<scoped_refptr<ui::IOSurfaceContext>>(),
+ context_on_new_gpu));
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
index de0648d5a7b..49a08a3bf49 100644
--- a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
@@ -133,8 +133,8 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
command_buffer_ = std::make_unique<CommandBufferService>(
this, context_group_->transfer_buffer_manager());
- auto decoder = std::make_unique<raster::RasterDecoder>(
- this, command_buffer_.get(), manager->outputter(), context_group_.get());
+ std::unique_ptr<raster::RasterDecoder> decoder(raster::RasterDecoder::Create(
+ this, command_buffer_.get(), manager->outputter(), context_group_.get()));
sync_point_client_state_ =
channel_->sync_point_manager()->CreateSyncPointClientState(
diff --git a/chromium/gpu/perftests/texture_upload_perftest.cc b/chromium/gpu/perftests/texture_upload_perftest.cc
index 6ad71c7cd67..bc683b1a946 100644
--- a/chromium/gpu/perftests/texture_upload_perftest.cc
+++ b/chromium/gpu/perftests/texture_upload_perftest.cc
@@ -149,6 +149,7 @@ bool CompareBufferToRGBABuffer(GLenum format,
case GL_LUMINANCE: // (L_t, L_t, L_t, 1)
expected[1] = pixels[pixels_index];
expected[2] = pixels[pixels_index];
+ FALLTHROUGH;
case GL_RED: // (R_t, 0, 0, 1)
expected[0] = pixels[pixels_index];
expected[3] = 255;
diff --git a/chromium/gpu/raster_export.h b/chromium/gpu/raster_export.h
new file mode 100644
index 00000000000..6e1641dc1b2
--- /dev/null
+++ b/chromium/gpu/raster_export.h
@@ -0,0 +1,29 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_RASTER_EXPORT_H_
+#define GPU_RASTER_EXPORT_H_
+
+#if defined(COMPONENT_BUILD) && !defined(NACL_WIN64)
+#if defined(WIN32)
+
+#if defined(RASTER_IMPLEMENTATION)
+#define RASTER_EXPORT __declspec(dllexport)
+#else
+#define RASTER_EXPORT __declspec(dllimport)
+#endif // defined(RASTER_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(RASTER_IMPLEMENTATION)
+#define RASTER_EXPORT __attribute__((visibility("default")))
+#else
+#define RASTER_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define RASTER_EXPORT
+#endif
+
+#endif // GPU_RASTER_EXPORT_H_
diff --git a/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc b/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc
index 7ace97fd6ed..311b27a6e21 100644
--- a/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc
+++ b/chromium/gpu/skia_bindings/grcontext_for_gles2_interface.cc
@@ -70,6 +70,7 @@ GrContextForGLES2Interface::GrContextForGLES2Interface(
options.fGlyphCacheTextureMaximumBytes = max_glyph_cache_texture_bytes;
options.fAvoidStencilBuffers = capabilities.avoid_stencil_buffers;
options.fAllowPathMaskCaching = false;
+ options.fSharpenMipmappedTextures = true;
sk_sp<GrGLInterface> interface(
skia_bindings::CreateGLES2InterfaceBindings(gl));
gr_context_ = GrContext::MakeGL(std::move(interface), options);