summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-11-20 10:33:36 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-11-22 11:45:12 +0000
commitbe59a35641616a4cf23c4a13fa0632624b021c1b (patch)
tree9da183258bdf9cc413f7562079d25ace6955467f /chromium/gpu
parentd702e4b6a64574e97fc7df8fe3238cde70242080 (diff)
downloadqtwebengine-chromium-be59a35641616a4cf23c4a13fa0632624b021c1b.tar.gz
BASELINE: Update Chromium to 62.0.3202.101
Change-Id: I2d5eca8117600df6d331f6166ab24d943d9814ac Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn10
-rw-r--r--chromium/gpu/DEPS1
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_resize.txt81
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_texture_filtering_hint.txt86
-rw-r--r--chromium/gpu/GLES2/gl2chromium_autogen.h5
-rw-r--r--chromium/gpu/GLES2/gl2extchromium.h31
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py34
-rw-r--r--chromium/gpu/command_buffer/client/BUILD.gn21
-rw-r--r--chromium/gpu/command_buffer/client/DEPS2
-rw-r--r--chromium/gpu/command_buffer/client/client_test_helper.h3
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.cc107
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.h19
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc93
-rw-r--r--chromium/gpu/command_buffer/client/command_buffer_direct_locked.cc55
-rw-r--r--chromium/gpu/command_buffer/client/command_buffer_direct_locked.h52
-rw-r--r--chromium/gpu/command_buffer/client/context_support.h7
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator.cc31
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator.h26
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator_test.cc14
-rw-r--r--chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h56
-rw-r--r--chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h56
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc195
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h22
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_autogen.h23
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h60
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc63
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h45
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface.h5
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_autogen.h19
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h19
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h21
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation.h3
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h19
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h44
-rw-r--r--chromium/gpu/command_buffer/client/gpu_control.h8
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory.cc23
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory.h16
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory_unittest.cc51
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker.cc155
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker.h77
-rw-r--r--chromium/gpu/command_buffer/client/query_tracker_unittest.cc184
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer.cc20
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer.h3
-rw-r--r--chromium/gpu/command_buffer/client/shared_memory_limits.h31
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer.cc13
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer.h6
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc48
-rw-r--r--chromium/gpu/command_buffer/cmd_buffer_functions.txt16
-rw-r--r--chromium/gpu/command_buffer/common/BUILD.gn1
-rw-r--r--chromium/gpu/command_buffer/common/capabilities.h3
-rw-r--r--chromium/gpu/command_buffer/common/constants.h2
-rw-r--r--chromium/gpu/command_buffer/common/gl2_types.h30
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format.h29
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h263
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h77
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h92
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc19
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.h37
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h15
-rw-r--r--chromium/gpu/command_buffer/common/mailbox_holder.h3
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn7
-rw-r--r--chromium/gpu/command_buffer/service/DEPS2
-rw-r--r--chromium/gpu/command_buffer/service/buffer_manager.cc8
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service.cc12
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service.h1
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc34
-rw-r--r--chromium/gpu/command_buffer/service/context_group.cc4
-rw-r--r--chromium/gpu/command_buffer/service/context_group.h6
-rw-r--r--chromium/gpu/command_buffer/service/context_group_unittest.cc5
-rw-r--r--chromium/gpu/command_buffer/service/context_state.cc14
-rw-r--r--chromium/gpu/command_buffer/service/create_gr_gl_interface.cc490
-rw-r--r--chromium/gpu/command_buffer/service/create_gr_gl_interface.h25
-rw-r--r--chromium/gpu/command_buffer/service/disk_cache_proto.proto2
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc369
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h12
-rw-r--r--chromium/gpu/command_buffer/service/feature_info_unittest.cc91
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual.cc11
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual.h6
-rw-r--r--chromium/gpu/command_buffer/service/gl_stream_texture_image.h4
-rw-r--r--chromium/gpu/command_buffer/service/gl_surface_mock.h7
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.h18
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc498
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.h14
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h54
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h5
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc291
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h40
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h13
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc323
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc14
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc38
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_buffers.cc706
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc16
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h12
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h18
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc48
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc205
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h129
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc20
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc26
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc72
-rw-r--r--chromium/gpu/command_buffer/service/gpu_preferences.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.h1
-rw-r--r--chromium/gpu/command_buffer/service/gpu_tracer.cc8
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.cc156
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.h25
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc10
-rw-r--r--chromium/gpu/command_buffer/service/mocks.h5
-rw-r--r--chromium/gpu/command_buffer/service/passthrough_program_cache.cc138
-rw-r--r--chromium/gpu/command_buffer/service/passthrough_program_cache.h57
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.cc1
-rw-r--r--chromium/gpu/command_buffer/service/program_manager_unittest.cc65
-rw-r--r--chromium/gpu/command_buffer/service/renderbuffer_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.cc30
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.h21
-rw-r--r--chromium/gpu/command_buffer/service/scheduler_unittest.cc158
-rw-r--r--chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.cc26
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.h7
-rw-r--r--chromium/gpu/command_buffer/service/shader_manager.cc7
-rw-r--r--chromium/gpu/command_buffer/service/shader_manager.h7
-rw-r--r--chromium/gpu/command_buffer/service/shader_manager_unittest.cc14
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator.cc17
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator.h12
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator_cache.h2
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator_unittest.cc15
-rw-r--r--chromium/gpu/command_buffer/service/test_helper.cc126
-rw-r--r--chromium/gpu/command_buffer/service/test_helper.h23
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc36
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h6
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager_unittest.cc36
-rw-r--r--chromium/gpu/command_buffer/service/transfer_buffer_manager.cc8
-rw-r--r--chromium/gpu/command_buffer/service/transfer_buffer_manager.h4
-rw-r--r--chromium/gpu/command_buffer/service/vertex_attrib_manager.cc27
-rw-r--r--chromium/gpu/command_buffer/service/vertex_attrib_manager.h22
-rw-r--r--chromium/gpu/config/BUILD.gn6
-rw-r--r--chromium/gpu/config/gpu_control_list.cc45
-rw-r--r--chromium/gpu/config/gpu_control_list.h37
-rw-r--r--chromium/gpu/config/gpu_control_list_unittest.cc11
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.cc9
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.json67
-rw-r--r--chromium/gpu/config/gpu_driver_bug_workaround_type.h8
-rw-r--r--chromium/gpu/config/gpu_driver_bug_workarounds.cc74
-rw-r--r--chromium/gpu/config/gpu_driver_bug_workarounds.h25
-rw-r--r--chromium/gpu/config/gpu_dx_diagnostics_win.cc15
-rw-r--r--chromium/gpu/config/gpu_feature_info.cc35
-rw-r--r--chromium/gpu/config/gpu_feature_info.h30
-rw-r--r--chromium/gpu/config/gpu_info.cc6
-rw-r--r--chromium/gpu/config/gpu_info.h4
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc45
-rw-r--r--chromium/gpu/config/gpu_info_collector_fuchsia.cc31
-rw-r--r--chromium/gpu/config/gpu_info_collector_linux.cc6
-rw-r--r--chromium/gpu/config/gpu_info_collector_mac.mm6
-rw-r--r--chromium/gpu/config/gpu_info_collector_win.cc15
-rw-r--r--chromium/gpu/config/gpu_switches.cc14
-rw-r--r--chromium/gpu/config/gpu_switches.h4
-rw-r--r--chromium/gpu/config/gpu_switching.cc100
-rw-r--r--chromium/gpu/config/gpu_switching.h39
-rw-r--r--chromium/gpu/config/gpu_test_config.cc6
-rw-r--r--chromium/gpu/config/gpu_util.cc187
-rw-r--r--chromium/gpu/config/gpu_util.h27
-rw-r--r--chromium/gpu/config/gpu_util_unittest.cc100
-rwxr-xr-xchromium/gpu/config/process_json.py5
-rw-r--r--chromium/gpu/config/software_rendering_list.json34
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.cc34
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.h8
-rw-r--r--chromium/gpu/gles2_conform_support/egl/thread_state.cc9
-rw-r--r--chromium/gpu/ipc/client/DEPS6
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc153
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h27
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.cc169
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.h70
-rw-r--r--chromium/gpu/ipc/client/gpu_in_process_context_tests.cc74
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc10
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc8
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h5
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn2
-rw-r--r--chromium/gpu/ipc/common/flush_params.cc16
-rw-r--r--chromium/gpu/ipc/common/flush_params.h41
-rw-r--r--chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h3
-rw-r--r--chromium/gpu/ipc/common/gpu_feature_info.mojom13
-rw-r--r--chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h21
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.cc2
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.h4
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_support.cc3
-rw-r--r--chromium/gpu/ipc/common/gpu_messages.h13
-rw-r--r--chromium/gpu/ipc/common/gpu_param_traits_macros.h13
-rw-r--r--chromium/gpu/ipc/common/mailbox_struct_traits.cc11
-rw-r--r--chromium/gpu/ipc/common/mailbox_struct_traits.h9
-rw-r--r--chromium/gpu/ipc/common/struct_traits_unittest.cc6
-rw-r--r--chromium/gpu/ipc/gl_in_process_context.cc6
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.cc8
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.h5
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.cc6
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.cc27
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.h13
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache_unittest.cc26
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc83
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h27
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn5
-rw-r--r--chromium/gpu/ipc/service/OWNERS3
-rw-r--r--chromium/gpu/ipc/service/child_window_surface_win.cc176
-rw-r--r--chromium/gpu/ipc/service/child_window_surface_win.h46
-rw-r--r--chromium/gpu/ipc/service/direct_composition_child_surface_win.cc26
-rw-r--r--chromium/gpu/ipc/service/direct_composition_child_surface_win.h11
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.cc137
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.h5
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc200
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.cc71
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc29
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h4
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.cc1
-rw-r--r--chromium/gpu/ipc/service/gpu_command_buffer_stub.cc59
-rw-r--r--chromium/gpu/ipc/service/gpu_command_buffer_stub.h12
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc191
-rw-r--r--chromium/gpu/ipc/service/gpu_init.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h2
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h5
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc5
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_win.cc10
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_delegate.h3
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc22
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h1
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm1
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_win.cc28
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.cc4
-rw-r--r--chromium/gpu/ipc/service/switches.cc2
-rw-r--r--chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc3
-rw-r--r--chromium/gpu/tools/compositor_model_bench/BUILD.gn2
235 files changed, 7575 insertions, 2752 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index 86b3d704ba7..4f0042e2bb6 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -163,6 +163,7 @@ test("gl_tests") {
"command_buffer/tests/gl_query_unittest.cc",
"command_buffer/tests/gl_readback_unittest.cc",
"command_buffer/tests/gl_request_extension_unittest.cc",
+ "command_buffer/tests/gl_set_aggressively_free_resources_unittest.cc",
"command_buffer/tests/gl_shared_resources_unittest.cc",
"command_buffer/tests/gl_stream_draw_unittest.cc",
"command_buffer/tests/gl_test_utils.cc",
@@ -191,6 +192,7 @@ test("gl_tests") {
"//base",
"//base/test:test_support",
"//base/third_party/dynamic_annotations",
+ "//components/viz/test:test_support",
"//gpu/command_buffer/client:gles2_c_lib",
"//gpu/command_buffer/client:gles2_implementation",
"//gpu/command_buffer/common:gles2_utils",
@@ -224,6 +226,8 @@ test("gpu_unittests") {
"command_buffer/client/buffer_tracker_unittest.cc",
"command_buffer/client/client_discardable_manager_unittest.cc",
"command_buffer/client/cmd_buffer_helper_test.cc",
+ "command_buffer/client/command_buffer_direct_locked.cc",
+ "command_buffer/client/command_buffer_direct_locked.h",
"command_buffer/client/fenced_allocator_test.cc",
"command_buffer/client/gles2_implementation_unittest.cc",
"command_buffer/client/mapped_memory_unittest.cc",
@@ -333,6 +337,12 @@ test("gpu_unittests") {
sources += [ "ipc/client/gpu_memory_buffer_impl_native_pixmap_unittest.cc" ]
}
+ # TODO(geofflang): Run passthrough command decoder unittests on more platforms
+ # once initialization of ANGLE's NULL context is supported
+ if ((is_win || (is_linux && !use_ozone)) && !is_asan) {
+ sources += [ "command_buffer/service/gles2_cmd_decoder_passthrough_unittest_buffers.cc" ]
+ }
+
configs += [
"//build/config:precompiled_headers",
diff --git a/chromium/gpu/DEPS b/chromium/gpu/DEPS
index a395bb7ce2f..0273fe34a2f 100644
--- a/chromium/gpu/DEPS
+++ b/chromium/gpu/DEPS
@@ -5,6 +5,7 @@ include_rules = [
"+third_party/smhasher",
"+third_party/swiftshader",
"+third_party/protbuf",
+ "+third_party/zlib",
"+crypto",
"+ui/gfx",
"+ui/gl",
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_resize.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_resize.txt
new file mode 100644
index 00000000000..18ec563405f
--- /dev/null
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_resize.txt
@@ -0,0 +1,81 @@
+Name
+
+ CHROMIUM_resize
+
+Name Strings
+
+ GL_CHROMIUM_resize
+
+Version
+
+ Last Modified Date: July 26, 2017
+
+Dependencies
+
+ OpenGL ES 2.0 is required.
+
+Overview
+
+ This extension allows a client to request that the output surface be
+ dynamically resized.
+
+Issues
+
+ None
+
+New Tokens
+
+ Accepted by the <color_space> parameter of glResizeCHROMIUM:
+ GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM 0x8AF1
+ GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM 0x8AF2
+ GL_COLOR_SPACE_SRGB_CHROMIUM 0x8AF3
+ GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM 0x8AF4
+
+New Procedures and Functions
+
+ The command
+
+ glResizeCHROMIUM(GLint width,
+ GLint height,
+ GLfloat scale_factor,
+ GLenum color_space,
+ GLboolean alpha);
+
+ changes the current output surface to be changed.
+ <width> and <height> specify the dimensions for the surface in pixels.
+ <scale_factor> specifies the device scale factor for the surface.
+ <color_space> specifies the color space in which the pixels of the surface
+ should be interpreted by the display system. Note that this value does not
+ impact blending. All blending will be done on the raw pixel values.
+ The values have the following interpretation:
+ GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM: Indicates that the display system
+ should use whatever its default interpretation of color values is.
+ GL_COLOR_SPACE_SRGB_CHROMIUM: Indicates that the display system should
+ interpret output colors as being sRGB values. On EGL-based systems this
+ corresponds to using the default value, EGL_GL_COLORSPACE_LINEAR_KHR,
+ for the EGL_GL_COLORSPACE_KHR property in EGL_KHR_gl_colorspace.
+ GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM: Indicates that the display system
+ should interpret output colors as being in P3 D65 color space. As above,
+ this corresponds to the EGL_GL_COLORSPACE_DISPLAY_P3_LINEAR_EXT value
+ from EGL_EXT_gl_colorspace_display_p3_linear.
+ GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM: Indicates that the display system
+ should interpret output colors as being in linear-gamma extended scRGB
+ color space. On Windows, this will result in HDR being enabled for the
+ surface, when possible.
+ <alpha> indicates whether or not the surface must allocate an alpha channel
+
+Errors
+
+ A context lost will result when this call fails, either because of an
+ invalid parameter value or because of a runtime error such as an out of
+ memory condition.
+
+New State
+
+ None.
+
+Revision History
+
+ 7/24/2017 Initial checkin. This extension had been in use for several
+ years without documentation prior to this.
+
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_texture_filtering_hint.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_texture_filtering_hint.txt
new file mode 100644
index 00000000000..09170d4ff18
--- /dev/null
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_texture_filtering_hint.txt
@@ -0,0 +1,86 @@
+Name
+
+ CHROMIUM_texture_filtering_hint
+
+Name Strings
+
+ GL_CHROMIUM_texture_filtering_hint
+
+Contributors
+
+ Alexis Hetu, Google Inc.
+ Nicolas Capens, Google Inc.
+ Shannon Woods, Google Inc.
+
+Contact
+
+ Alexis Hetu, Google Inc. (sugoi 'at' chromium 'dot' org)
+
+Version
+
+ Last Modifed Date: July 18, 2017
+
+Dependencies
+
+ This extension is written against the OpenGL ES 2.0 specification.
+
+ OpenGL ES 2.0 is required.
+
+Overview
+
+ This extension defines a way to request high precision texture filtering
+ using a new value to Hint.
+
+ When this extension is enabled, TEXTURE_FILTERING_HINT_CHROMIUM can be used
+ by the implementation as a means to distinguish between a performance
+ focused implementation, using FASTEST, or a precision focused
+ implementation, using NICEST.
+
+ Like other hints, either option is spec compliant and the behavior of
+ DONT_CARE is implementation specific.
+
+New Tokens
+
+ Accepted by the <pname> parameter of GetIntegerv, GetFloatv and GetBooleanv
+ and by the <target> parameter of Hint:
+
+ TEXTURE_FILTERING_HINT_CHROMIUM 0x8AF0
+
+New Procedures and Functions
+
+ None.
+
+Errors
+
+ None.
+
+New State
+
+ None.
+
+Issues
+
+ 1) When does the hint take effect?
+
+ At the time of the next draw call, and all subsequent draw calls.
+
+ 2) Does the first draw call after the filtering hint is changed use the
+ updated filtering method?
+
+ Yes
+
+ 3) Can I switch it back and forth between every draw call, multiple times
+ during a single frame?
+
+ Yes
+
+ 4) Do program objects which were created before the filtering hint was
+ changed and which contain sampling instructions use the filtering method
+ from when they were created, or the method at the time of draw call?
+
+ At the time of draw call.
+
+Revision History
+
+ 2/7/2014 Documented the extension
+
diff --git a/chromium/gpu/GLES2/gl2chromium_autogen.h b/chromium/gpu/GLES2/gl2chromium_autogen.h
index 37f2d8fd4ff..a33d7ba1eee 100644
--- a/chromium/gpu/GLES2/gl2chromium_autogen.h
+++ b/chromium/gpu/GLES2/gl2chromium_autogen.h
@@ -330,6 +330,8 @@
#define glScheduleCALayerSharedStateCHROMIUM \
GLES2_GET_FUN(ScheduleCALayerSharedStateCHROMIUM)
#define glScheduleCALayerCHROMIUM GLES2_GET_FUN(ScheduleCALayerCHROMIUM)
+#define glSetColorSpaceForScanoutCHROMIUM \
+ GLES2_GET_FUN(SetColorSpaceForScanoutCHROMIUM)
#define glScheduleCALayerInUseQueryCHROMIUM \
GLES2_GET_FUN(ScheduleCALayerInUseQueryCHROMIUM)
#define glCommitOverlayPlanesCHROMIUM GLES2_GET_FUN(CommitOverlayPlanesCHROMIUM)
@@ -397,5 +399,8 @@
GLES2_GET_FUN(UnlockDiscardableTextureCHROMIUM)
#define glLockDiscardableTextureCHROMIUM \
GLES2_GET_FUN(LockDiscardableTextureCHROMIUM)
+#define glBeginRasterCHROMIUM GLES2_GET_FUN(BeginRasterCHROMIUM)
+#define glRasterCHROMIUM GLES2_GET_FUN(RasterCHROMIUM)
+#define glEndRasterCHROMIUM GLES2_GET_FUN(EndRasterCHROMIUM)
#endif // GPU_GLES2_GL2CHROMIUM_AUTOGEN_H_
diff --git a/chromium/gpu/GLES2/gl2extchromium.h b/chromium/gpu/GLES2/gl2extchromium.h
index 8c1f3df16cf..ce76cba53ba 100644
--- a/chromium/gpu/GLES2/gl2extchromium.h
+++ b/chromium/gpu/GLES2/gl2extchromium.h
@@ -621,10 +621,32 @@ typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSEXTPROC) (
GL_APICALL void GL_APIENTRY glResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha);
+
+#endif
+typedef void(GL_APIENTRYP PFNGLRESIZECHROMIUMPROC)(GLuint width,
+ GLuint height,
+ GLfloat scale_factor,
+ GLenum color_space,
+ GLboolean alpha);
+
+#ifndef GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM
+#define GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM 0x8AF1
#endif
-typedef void (GL_APIENTRYP PFNGLRESIZECHROMIUMPROC) (
- GLuint width, GLuint height);
+
+#ifndef GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM
+#define GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM 0x8AF2
+#endif
+
+#ifndef GL_COLOR_SPACE_SRGB_CHROMIUM
+#define GL_COLOR_SPACE_SRGB_CHROMIUM 0x8AF3
+#endif
+
+#ifndef GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM
+#define GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM 0x8AF4
+#endif
+
#endif /* GL_CHROMIUM_resize */
/* GL_CHROMIUM_get_multiple */
@@ -1199,6 +1221,11 @@ GL_APICALL void GL_APIENTRY glCoverageModulationCHROMIUM(GLenum components);
#define GL_SAMPLES_PASSED_ARB 0x8914
#endif /* GL_ARB_occlusion_query */
+#ifndef GL_CHROMIUM_texture_filtering_hint
+#define GL_CHROMIUM_texture_filtering_hint 1
+#define GL_TEXTURE_FILTERING_HINT_CHROMIUM 0x8AF0
+#endif /* GL_CHROMIUM_texture_filtering_hint */
+
#ifdef __cplusplus
}
#endif
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index fa67855e8cd..c11416239e4 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -2817,6 +2817,7 @@ _FUNCTION_INFO = {
'DisableVertexAttribArray': {
'decoder_func': 'DoDisableVertexAttribArray',
'impl_func': False,
+ 'unit_test': False,
},
'DrawArrays': {
'type': 'Custom',
@@ -2846,6 +2847,7 @@ _FUNCTION_INFO = {
'EnableVertexAttribArray': {
'decoder_func': 'DoEnableVertexAttribArray',
'impl_func': False,
+ 'unit_test': False,
},
'FenceSync': {
'type': 'Create',
@@ -4412,6 +4414,14 @@ _FUNCTION_INFO = {
'GLuint shm_offset',
'extension': 'CHROMIUM_schedule_ca_layer',
},
+ 'SetColorSpaceForScanoutCHROMIUM': {
+ 'type': 'Custom',
+ 'impl_func': False,
+ 'client_test': False,
+ 'cmd_args': 'GLuint texture_id, GLuint shm_id, GLuint shm_offset, '
+ 'GLsizei color_space_size',
+ 'extension': 'CHROMIUM_schedule_ca_layer',
+ },
'CommitOverlayPlanesCHROMIUM': {
'impl_func': False,
'decoder_func': 'DoCommitOverlayPlanes',
@@ -4582,6 +4592,30 @@ _FUNCTION_INFO = {
'client_test': False,
'extension': True,
},
+ 'BeginRasterCHROMIUM': {
+ 'decoder_func': 'DoBeginRasterCHROMIUM',
+ 'impl_func': True,
+ 'unit_test': False,
+ 'extension': 'CHROMIUM_raster_transport',
+ 'extension_flag': 'chromium_raster_transport',
+ },
+ 'RasterCHROMIUM': {
+ 'type': 'Custom',
+ 'decoder_func': 'DoRasterCHROMIUM',
+ 'impl_func': False,
+ 'immediate': False,
+ 'data_transfer_methods': ['shm'],
+ 'needs_size': True,
+ 'extension': 'CHROMIUM_raster_transport',
+ 'extension_flag': 'chromium_raster_transport',
+ },
+ 'EndRasterCHROMIUM': {
+ 'decoder_func': 'DoEndRasterCHROMIUM',
+ 'impl_func': True,
+ 'unit_test': False,
+ 'extension': 'CHROMIUM_raster_transport',
+ 'extension_flag': 'chromium_raster_transport',
+ },
}
diff --git a/chromium/gpu/command_buffer/client/BUILD.gn b/chromium/gpu/command_buffer/client/BUILD.gn
index 30c6f60f5d7..45919991d18 100644
--- a/chromium/gpu/command_buffer/client/BUILD.gn
+++ b/chromium/gpu/command_buffer/client/BUILD.gn
@@ -15,6 +15,13 @@ group("client") {
":client_sources",
]
}
+
+ if (!is_nacl) {
+ deps = [
+ "//cc/paint",
+ "//skia",
+ ]
+ }
}
group("gles2_cmd_helper") {
@@ -161,6 +168,13 @@ component("gles2_implementation") {
"//gpu/command_buffer/common:gles2_utils",
"//ui/gfx/geometry",
]
+
+ if (!is_nacl) {
+ deps += [
+ "//ui/gfx:color_space",
+ "//ui/gfx/ipc/color",
+ ]
+ }
}
# Library emulates GLES2 using command_buffers.
@@ -185,6 +199,13 @@ component("gles2_implementation_no_check") {
"//ui/gfx",
"//ui/gfx/geometry",
]
+
+ if (!is_nacl) {
+ deps += [
+ "//ui/gfx:color_space",
+ "//ui/gfx/ipc/color",
+ ]
+ }
}
component("gles2_c_lib") {
diff --git a/chromium/gpu/command_buffer/client/DEPS b/chromium/gpu/command_buffer/client/DEPS
index 867a547a663..58bc085b699 100644
--- a/chromium/gpu/command_buffer/client/DEPS
+++ b/chromium/gpu/command_buffer/client/DEPS
@@ -1,3 +1,5 @@
include_rules = [
"+ui/latency",
+ "+cc/paint",
+ "+third_party/skia",
]
diff --git a/chromium/gpu/command_buffer/client/client_test_helper.h b/chromium/gpu/command_buffer/client/client_test_helper.h
index 8881436a1d5..25a05ada70c 100644
--- a/chromium/gpu/command_buffer/client/client_test_helper.h
+++ b/chromium/gpu/command_buffer/client/client_test_helper.h
@@ -117,8 +117,7 @@ class MockClientGpuControl : public GpuControl {
MOCK_METHOD0(EnsureWorkVisible, void());
MOCK_CONST_METHOD0(GetNamespaceID, CommandBufferNamespace());
MOCK_CONST_METHOD0(GetCommandBufferID, CommandBufferId());
- MOCK_CONST_METHOD0(GetStreamId, int32_t());
- MOCK_METHOD1(FlushOrderingBarrierOnStream, void(int32_t));
+ MOCK_METHOD0(FlushPendingWork, void());
MOCK_METHOD0(GenerateFenceSyncRelease, uint64_t());
MOCK_METHOD1(IsFenceSyncRelease, bool(uint64_t release));
MOCK_METHOD1(IsFenceSyncFlushed, bool(uint64_t release));
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
index afa6c3902cf..f29f7449bb5 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
@@ -60,8 +60,9 @@ bool CommandBufferHelper::IsContextLost() {
void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
DCHECK_GE(waiting_count, 0);
- // Check if usable & allocated.
- if (!usable() || !HaveRingBuffer()) {
+ // If not allocated, no entries are available. If not usable, it will not be
+ // allocated.
+ if (!HaveRingBuffer()) {
immediate_entry_count_ = 0;
return;
}
@@ -111,17 +112,27 @@ bool CommandBufferHelper::AllocateRingBuffer() {
scoped_refptr<Buffer> buffer =
command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
if (id < 0) {
- ClearUsable();
- DCHECK(context_lost_);
+ usable_ = false;
+ context_lost_ = true;
+ CalcImmediateEntries(0);
return false;
}
- ring_buffer_ = buffer;
- ring_buffer_id_ = id;
+ SetGetBuffer(id, std::move(buffer));
+ return true;
+}
+
+void CommandBufferHelper::SetGetBuffer(int32_t id,
+ scoped_refptr<Buffer> buffer) {
command_buffer_->SetGetBuffer(id);
+ ring_buffer_ = std::move(buffer);
+ ring_buffer_id_ = id;
++set_get_buffer_count_;
- entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
- total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
+ entries_ = ring_buffer_
+ ? static_cast<CommandBufferEntry*>(ring_buffer_->memory())
+ : 0;
+ total_entry_count_ =
+ ring_buffer_ ? ring_buffer_size_ / sizeof(CommandBufferEntry) : 0;
// Call to SetGetBuffer(id) above resets get and put offsets to 0.
// No need to query it through IPC.
put_ = 0;
@@ -129,32 +140,23 @@ bool CommandBufferHelper::AllocateRingBuffer() {
cached_get_offset_ = 0;
service_on_old_buffer_ = true;
CalcImmediateEntries(0);
- return true;
}
-void CommandBufferHelper::FreeResources() {
+void CommandBufferHelper::FreeRingBuffer() {
if (HaveRingBuffer()) {
+ FlushLazy();
command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
- ring_buffer_id_ = -1;
- CalcImmediateEntries(0);
- entries_ = nullptr;
- ring_buffer_ = nullptr;
+ SetGetBuffer(-1, nullptr);
}
}
-void CommandBufferHelper::FreeRingBuffer() {
- CHECK((put_ == cached_get_offset_) ||
- error::IsError(command_buffer_->GetLastState().error));
- FreeResources();
-}
-
bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
ring_buffer_size_ = ring_buffer_size;
return AllocateRingBuffer();
}
CommandBufferHelper::~CommandBufferHelper() {
- FreeResources();
+ FreeRingBuffer();
}
void CommandBufferHelper::UpdateCachedState(const CommandBuffer::State& state) {
@@ -171,9 +173,6 @@ void CommandBufferHelper::UpdateCachedState(const CommandBuffer::State& state) {
bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
DCHECK(start >= 0 && start <= total_entry_count_);
DCHECK(end >= 0 && end <= total_entry_count_);
- if (!usable()) {
- return false;
- }
CommandBuffer::State last_state = command_buffer_->WaitForGetOffsetInRange(
set_get_buffer_count_, start, end);
UpdateCachedState(last_state);
@@ -185,7 +184,7 @@ void CommandBufferHelper::Flush() {
if (put_ == total_entry_count_)
put_ = 0;
- if (usable()) {
+ if (HaveRingBuffer()) {
last_flush_time_ = base::TimeTicks::Now();
last_put_sent_ = put_;
command_buffer_->Flush(put_);
@@ -194,12 +193,18 @@ void CommandBufferHelper::Flush() {
}
}
+void CommandBufferHelper::FlushLazy() {
+ if (put_ == last_put_sent_)
+ return;
+ Flush();
+}
+
void CommandBufferHelper::OrderingBarrier() {
// Wrap put_ before setting the barrier.
if (put_ == total_entry_count_)
put_ = 0;
- if (usable()) {
+ if (HaveRingBuffer()) {
command_buffer_->OrderingBarrier(put_);
++flush_generation_;
CalcImmediateEntries(0);
@@ -220,17 +225,11 @@ void CommandBufferHelper::PeriodicFlushCheck() {
// error is set.
bool CommandBufferHelper::Finish() {
TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
- if (!usable()) {
- return false;
- }
// If there is no work just exit.
if (put_ == cached_get_offset_ && !service_on_old_buffer_) {
- return true;
+ return !context_lost_;
}
- DCHECK(HaveRingBuffer() ||
- error::IsError(command_buffer_->GetLastState().error));
- if (last_put_sent_ != put_)
- Flush();
+ FlushLazy();
if (!WaitForGetOffsetInRange(put_, put_))
return false;
DCHECK_EQ(cached_get_offset_, put_);
@@ -243,18 +242,14 @@ bool CommandBufferHelper::Finish() {
// Inserts a new token into the command stream. It uses an increasing value
// scheme so that we don't lose tokens (a token has passed if the current token
// value is higher than that token). Calls Finish() if the token value wraps,
-// which will be rare.
+// which will be rare. If we can't allocate a command buffer, token doesn't
+// increase, ensuring WaitForToken eventually returns.
int32_t CommandBufferHelper::InsertToken() {
- AllocateRingBuffer();
- if (!usable()) {
- return token_;
- }
- DCHECK(HaveRingBuffer());
// Increment token as 31-bit integer. Negative values are used to signal an
// error.
- token_ = (token_ + 1) & 0x7FFFFFFF;
cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
if (cmd) {
+ token_ = (token_ + 1) & 0x7FFFFFFF;
cmd->Init(token_);
if (token_ == 0) {
TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
@@ -280,20 +275,10 @@ bool CommandBufferHelper::HasTokenPassed(int32_t token) {
// Waits until the current token value is greater or equal to the value passed
// in argument.
void CommandBufferHelper::WaitForToken(int32_t token) {
- if (!usable() || !HaveRingBuffer()) {
+ DCHECK_GE(token, 0);
+ if (HasTokenPassed(token))
return;
- }
- // Return immediately if corresponding InsertToken failed.
- if (token < 0)
- return;
- if (token > token_)
- return; // we wrapped
- if (cached_last_token_read_ >= token)
- return;
- UpdateCachedState(command_buffer_->GetLastState());
- if (cached_last_token_read_ >= token)
- return;
- Flush();
+ FlushLazy();
CommandBuffer::State last_state =
command_buffer_->WaitForTokenInRange(token, token_);
UpdateCachedState(last_state);
@@ -305,10 +290,8 @@ void CommandBufferHelper::WaitForToken(int32_t token) {
// function will return early if an error occurs, in which case the available
// space may not be available.
void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
- AllocateRingBuffer();
- if (!usable()) {
+ if (!AllocateRingBuffer())
return;
- }
DCHECK(HaveRingBuffer());
DCHECK(count < total_entry_count_);
if (put_ + count > total_entry_count_) {
@@ -320,7 +303,7 @@ void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
int32_t curr_get = cached_get_offset_;
if (curr_get > put_ || curr_get == 0) {
TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
- Flush();
+ FlushLazy();
if (!WaitForGetOffsetInRange(1, put_))
return;
curr_get = cached_get_offset_;
@@ -342,7 +325,7 @@ void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
CalcImmediateEntries(count);
if (immediate_entry_count_ < count) {
// Try again with a shallow Flush().
- Flush();
+ FlushLazy();
CalcImmediateEntries(count);
if (immediate_entry_count_ < count) {
// Buffer is full. Need to wait for entries.
@@ -390,12 +373,12 @@ bool CommandBufferHelper::OnMemoryDump(
GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry));
base::UnguessableToken shared_memory_guid =
ring_buffer_->backing()->shared_memory_handle().GetGUID();
- auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
const int kImportance = 2;
if (!shared_memory_guid.is_empty()) {
- pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), guid,
- shared_memory_guid, kImportance);
+ pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
+ kImportance);
} else {
+ auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
pmd->CreateSharedGlobalAllocatorDump(guid);
pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
}
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
index bdf7bbad1d2..5c38d0ed0f5 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -74,6 +74,9 @@ class GPU_EXPORT CommandBufferHelper
// returns, the command buffer service is aware of all pending commands.
void Flush();
+ // Flushes if the put pointer has changed since the last flush.
+ void FlushLazy();
+
// Ensures that commands up to the put pointer will be processed in the
// command buffer service before any future commands on other command buffers
// sharing a channel.
@@ -255,24 +258,24 @@ class GPU_EXPORT CommandBufferHelper
void FreeRingBuffer();
- bool HaveRingBuffer() const { return ring_buffer_id_ != -1; }
+ bool HaveRingBuffer() const {
+ bool have_ring_buffer = !!ring_buffer_;
+ DCHECK(usable() || !have_ring_buffer);
+ return have_ring_buffer;
+ }
bool usable() const { return usable_; }
- void ClearUsable() {
- usable_ = false;
- context_lost_ = true;
- CalcImmediateEntries(0);
- }
-
// Overridden from base::trace_event::MemoryDumpProvider:
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
+ int32_t GetPutOffsetForTest() const { return put_; }
+
private:
void CalcImmediateEntries(int waiting_count);
bool AllocateRingBuffer();
- void FreeResources();
+ void SetGetBuffer(int32_t id, scoped_refptr<Buffer> buffer);
// Waits for the get offset to be in a specific range, inclusive. Returns
// false if there was an error.
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
index 9027ae50670..bdc776450e0 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
@@ -17,7 +17,7 @@
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
-#include "gpu/command_buffer/service/command_buffer_direct.h"
+#include "gpu/command_buffer/client/command_buffer_direct_locked.h"
#include "gpu/command_buffer/service/mocks.h"
#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -37,60 +37,6 @@ const int32_t kCommandBufferSizeBytes =
kTotalNumCommandEntries * sizeof(CommandBufferEntry);
const int32_t kUnusedCommandId = 5; // we use 0 and 2 currently.
-// Override CommandBufferDirect::Flush() to lock flushing and simulate
-// the buffer becoming full in asynchronous mode.
-class CommandBufferDirectLocked : public CommandBufferDirect {
- public:
- explicit CommandBufferDirectLocked(
- TransferBufferManager* transfer_buffer_manager)
- : CommandBufferDirect(transfer_buffer_manager),
- flush_locked_(false),
- last_flush_(-1),
- previous_put_offset_(0),
- flush_count_(0) {}
- ~CommandBufferDirectLocked() override {}
-
- // Overridden from CommandBufferDirect
- void Flush(int32_t put_offset) override {
- flush_count_++;
- if (!flush_locked_) {
- last_flush_ = -1;
- previous_put_offset_ = put_offset;
- CommandBufferDirect::Flush(put_offset);
- } else {
- last_flush_ = put_offset;
- }
- }
-
- void LockFlush() { flush_locked_ = true; }
-
- void UnlockFlush() { flush_locked_ = false; }
-
- int FlushCount() { return flush_count_; }
-
- State WaitForGetOffsetInRange(uint32_t set_get_buffer_count,
- int32_t start,
- int32_t end) override {
- // Flush only if it's required to unblock this Wait.
- if (last_flush_ != -1 && !InRange(start, end, previous_put_offset_)) {
- previous_put_offset_ = last_flush_;
- CommandBufferDirect::Flush(last_flush_);
- last_flush_ = -1;
- }
- return CommandBufferDirect::WaitForGetOffsetInRange(set_get_buffer_count,
- start, end);
- }
-
- int GetServicePutOffset() { return previous_put_offset_; }
-
- private:
- bool flush_locked_;
- int last_flush_;
- int previous_put_offset_;
- int flush_count_;
- DISALLOW_COPY_AND_ASSIGN(CommandBufferDirectLocked);
-};
-
// Test fixture for CommandBufferHelper test - Creates a CommandBufferHelper,
// using a CommandBufferServiceLocked with a mock AsyncAPIInterface for its
// interface (calling it directly, not through the RPC mechanism).
@@ -260,21 +206,16 @@ class CommandBufferHelperTest : public testing::Test {
base::MessageLoop message_loop_;
};
-// Checks immediate_entry_count_ changes based on 'usable' state.
-TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesNotUsable) {
- // Auto flushing mode is tested separately.
- helper_->SetAutomaticFlushes(false);
- EXPECT_EQ(helper_->usable(), true);
- EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
- helper_->ClearUsable();
- EXPECT_EQ(ImmediateEntryCount(), 0);
-}
-
// Checks immediate_entry_count_ changes based on RingBuffer state.
TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesNoRingBuffer) {
helper_->SetAutomaticFlushes(false);
EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
helper_->FreeRingBuffer();
+ EXPECT_TRUE(helper_->usable());
+ EXPECT_EQ(ImmediateEntryCount(), 0);
+ command_buffer_->set_fail_create_transfer_buffer(true);
+ helper_->WaitForAvailableEntries(1);
+ EXPECT_FALSE(helper_->usable());
EXPECT_EQ(ImmediateEntryCount(), 0);
}
@@ -647,6 +588,28 @@ TEST_F(CommandBufferHelperTest, FreeRingBuffer) {
// Check that the commands did happen.
Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Test that FreeRingBuffer doesn't force a finish
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId, 0, NULL);
+ EXPECT_TRUE(helper_->HaveRingBuffer());
+ int32_t old_get_offset = command_buffer_->GetLastState().get_offset;
+ EXPECT_NE(helper_->GetPutOffsetForTest(), old_get_offset);
+ int old_flush_count = command_buffer_->FlushCount();
+
+ helper_->FreeRingBuffer();
+ EXPECT_FALSE(helper_->HaveRingBuffer());
+ // FreeRingBuffer should have caused a flush.
+ EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 1);
+ // However it shouldn't force a finish.
+ EXPECT_EQ(command_buffer_->GetLastState().get_offset, old_get_offset);
+
+ // Finish should not cause extra flushes, or recreate the ring buffer, but it
+ // should work.
+ helper_->Finish();
+ EXPECT_FALSE(helper_->HaveRingBuffer());
+ EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 1);
+ EXPECT_EQ(command_buffer_->GetLastState().get_offset,
+ helper_->GetPutOffsetForTest());
}
TEST_F(CommandBufferHelperTest, Noop) {
diff --git a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.cc b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.cc
new file mode 100644
index 00000000000..f51210e97b0
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.cc
@@ -0,0 +1,55 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/command_buffer_direct_locked.h"
+
+namespace gpu {
+
+void CommandBufferDirectLocked::Flush(int32_t put_offset) {
+ flush_count_++;
+ client_put_offset_ = put_offset;
+ if (!flush_locked_)
+ DoFlush();
+}
+
+CommandBuffer::State CommandBufferDirectLocked::WaitForTokenInRange(
+ int32_t start,
+ int32_t end) {
+ State state = GetLastState();
+ if (state.error != error::kNoError || InRange(start, end, state.token)) {
+ return state;
+ } else {
+ DoFlush();
+ return CommandBufferDirect::WaitForTokenInRange(start, end);
+ }
+}
+
+CommandBuffer::State CommandBufferDirectLocked::WaitForGetOffsetInRange(
+ uint32_t set_get_buffer_count,
+ int32_t start,
+ int32_t end) {
+ State state = GetLastState();
+ if (state.error != error::kNoError ||
+ (InRange(start, end, state.get_offset) &&
+ (set_get_buffer_count == state.set_get_buffer_count))) {
+ return state;
+ } else {
+ DoFlush();
+ return CommandBufferDirect::WaitForGetOffsetInRange(set_get_buffer_count,
+ start, end);
+ }
+}
+
+scoped_refptr<Buffer> CommandBufferDirectLocked::CreateTransferBuffer(
+ size_t size,
+ int32_t* id) {
+ if (fail_create_transfer_buffer_) {
+ *id = -1;
+ return nullptr;
+ } else {
+ return CommandBufferDirect::CreateTransferBuffer(size, id);
+ }
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
new file mode 100644
index 00000000000..a69ba113843
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/command_buffer_direct_locked.h
@@ -0,0 +1,52 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/command_buffer_direct.h"
+
+namespace gpu {
+
+// A CommandBuffer that allows "locking" flushes, that is delaying progress
+// until either it gets unlocked or the client waits for progress.
+class CommandBufferDirectLocked : public CommandBufferDirect {
+ public:
+ explicit CommandBufferDirectLocked(
+ TransferBufferManager* transfer_buffer_manager)
+ : CommandBufferDirect(transfer_buffer_manager) {}
+ ~CommandBufferDirectLocked() override {}
+
+ // Overridden from CommandBufferDirect
+ void Flush(int32_t put_offset) override;
+ CommandBuffer::State WaitForTokenInRange(int32_t start, int32_t end) override;
+ CommandBuffer::State WaitForGetOffsetInRange(uint32_t set_get_buffer_count,
+ int32_t start,
+ int32_t end) override;
+ scoped_refptr<Buffer> CreateTransferBuffer(size_t size, int32_t* id) override;
+
+ void LockFlush() { flush_locked_ = true; }
+
+ void UnlockFlush() { flush_locked_ = false; }
+
+ int FlushCount() { return flush_count_; }
+
+ int GetServicePutOffset() { return service_put_offset_; }
+
+ void set_fail_create_transfer_buffer(bool fail) {
+ fail_create_transfer_buffer_ = fail;
+ }
+
+ private:
+ void DoFlush() {
+ CommandBufferDirect::Flush(client_put_offset_);
+ service_put_offset_ = client_put_offset_;
+ }
+
+ bool fail_create_transfer_buffer_ = false;
+ bool flush_locked_ = false;
+ int client_put_offset_ = 0;
+ int service_put_offset_ = 0;
+ int flush_count_ = 0;
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferDirectLocked);
+};
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/context_support.h b/chromium/gpu/command_buffer/client/context_support.h
index dd5c601fa4f..32ccd1ee5bb 100644
--- a/chromium/gpu/command_buffer/client/context_support.h
+++ b/chromium/gpu/command_buffer/client/context_support.h
@@ -26,11 +26,8 @@ struct SyncToken;
class ContextSupport {
public:
- // Returns the stream id for this context.
- virtual int32_t GetStreamId() const = 0;
-
- // Flush any outstanding ordering barriers on given stream.
- virtual void FlushOrderingBarrierOnStream(int32_t stream_id) = 0;
+ // Flush any outstanding ordering barriers for all contexts.
+ virtual void FlushPendingWork() = 0;
// Runs |callback| when the given sync token is signalled. The sync token may
// belong to any context.
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator.cc b/chromium/gpu/command_buffer/client/fenced_allocator.cc
index 47450d31542..4ca5ff9fffd 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator.cc
+++ b/chromium/gpu/command_buffer/client/fenced_allocator.cc
@@ -36,15 +36,10 @@ FencedAllocator::FencedAllocator(unsigned int size, CommandBufferHelper* helper)
}
FencedAllocator::~FencedAllocator() {
- // Free blocks pending tokens.
- for (unsigned int i = 0; i < blocks_.size(); ++i) {
- if (blocks_[i].state == FREE_PENDING_TOKEN) {
- i = WaitForTokenAndFreeBlock(i);
- }
- }
-
- DCHECK_EQ(blocks_.size(), 1u);
- DCHECK_EQ(blocks_[0].state, FREE);
+ // All IN_USE blocks should be released at this point. There may still be
+ // FREE_PENDING_TOKEN blocks, the assumption is that the underlying memory
+ // will not be re-used without higher level synchronization.
+ DCHECK_EQ(bytes_in_use_, 0u);
}
// Looks for a non-allocated block that is big enough. Search in the FREE
@@ -86,8 +81,9 @@ FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
// necessary.
void FencedAllocator::Free(FencedAllocator::Offset offset) {
BlockIndex index = GetBlockByOffset(offset);
- DCHECK_NE(blocks_[index].state, FREE);
Block &block = blocks_[index];
+ DCHECK_NE(block.state, FREE);
+ DCHECK_EQ(block.offset, offset);
if (block.state == IN_USE)
bytes_in_use_ -= block.size;
@@ -101,6 +97,7 @@ void FencedAllocator::FreePendingToken(FencedAllocator::Offset offset,
int32_t token) {
BlockIndex index = GetBlockByOffset(offset);
Block &block = blocks_[index];
+ DCHECK_EQ(block.offset, offset);
if (block.state == IN_USE)
bytes_in_use_ -= block.size;
block.state = FREE_PENDING_TOKEN;
@@ -171,10 +168,20 @@ bool FencedAllocator::CheckConsistency() {
// Returns false if all blocks are actually FREE, in which
// case they would be coalesced into one block, true otherwise.
-bool FencedAllocator::InUse() {
+bool FencedAllocator::InUseOrFreePending() {
return blocks_.size() != 1 || blocks_[0].state != FREE;
}
+FencedAllocator::State FencedAllocator::GetBlockStatusForTest(
+ Offset offset,
+ int32_t* token_if_pending) {
+ BlockIndex index = GetBlockByOffset(offset);
+ Block& block = blocks_[index];
+ if ((block.state == FREE_PENDING_TOKEN) && token_if_pending)
+ *token_if_pending = block.token;
+ return block.state;
+}
+
// Collapse the block to the next one, then to the previous one. Provided the
// structure is consistent, those are the only blocks eligible for collapse.
FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
@@ -247,7 +254,7 @@ FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
Block templ = { IN_USE, offset, 0, kUnusedToken };
Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
templ, OffsetCmp());
- DCHECK(it != blocks_.end() && it->offset == offset);
+ DCHECK(it != blocks_.end());
return it-blocks_.begin();
}
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator.h b/chromium/gpu/command_buffer/client/fenced_allocator.h
index 0e8c64c9f83..9c42d34ec13 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator.h
+++ b/chromium/gpu/command_buffer/client/fenced_allocator.h
@@ -39,6 +39,9 @@ class GPU_EXPORT FencedAllocator {
// Allocation alignment, must be a power of two.
enum : unsigned int { kAllocAlignment = 16 };
+ // Status of a block of memory, for book-keeping.
+ enum State { IN_USE, FREE, FREE_PENDING_TOKEN };
+
// Creates a FencedAllocator. Note that the size of the buffer is passed, but
// not its base address: everything is handled as offsets into the buffer.
FencedAllocator(unsigned int size, CommandBufferHelper* helper);
@@ -90,19 +93,16 @@ class GPU_EXPORT FencedAllocator {
bool CheckConsistency();
// True if any memory is allocated.
- bool InUse();
+ bool InUseOrFreePending();
// Return bytes of memory that is IN_USE
size_t bytes_in_use() const { return bytes_in_use_; }
- private:
- // Status of a block of memory, for book-keeping.
- enum State {
- IN_USE,
- FREE,
- FREE_PENDING_TOKEN
- };
+ // Gets the status of a block, as well as the corresponding token if
+ // FREE_PENDING_TOKEN.
+ State GetBlockStatusForTest(Offset offset, int32_t* token_if_pending);
+ private:
// Book-keeping sturcture that describes a block of memory.
struct Block {
State state;
@@ -252,14 +252,18 @@ class FencedAllocatorWrapper {
}
// True if any memory is allocated.
- bool InUse() {
- return allocator_.InUse();
- }
+ bool InUseOrFreePending() { return allocator_.InUseOrFreePending(); }
FencedAllocator &allocator() { return allocator_; }
size_t bytes_in_use() const { return allocator_.bytes_in_use(); }
+ FencedAllocator::State GetPointerStatusForTest(void* pointer,
+ int32_t* token_if_pending) {
+ return allocator_.GetBlockStatusForTest(GetOffset(pointer),
+ token_if_pending);
+ }
+
private:
FencedAllocator allocator_;
void* base_;
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
index 000738d7a49..6f968102cc9 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
+++ b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -96,17 +96,17 @@ class FencedAllocatorTest : public BaseFencedAllocatorTest {
// Checks basic alloc and free.
TEST_F(FencedAllocatorTest, TestBasic) {
allocator_->CheckConsistency();
- EXPECT_FALSE(allocator_->InUse());
+ EXPECT_FALSE(allocator_->InUseOrFreePending());
const unsigned int kSize = 16;
FencedAllocator::Offset offset = allocator_->Alloc(kSize);
- EXPECT_TRUE(allocator_->InUse());
+ EXPECT_TRUE(allocator_->InUseOrFreePending());
EXPECT_NE(FencedAllocator::kInvalidOffset, offset);
EXPECT_GE(kBufferSize, offset+kSize);
EXPECT_TRUE(allocator_->CheckConsistency());
allocator_->Free(offset);
- EXPECT_FALSE(allocator_->InUse());
+ EXPECT_FALSE(allocator_->InUseOrFreePending());
EXPECT_TRUE(allocator_->CheckConsistency());
}
@@ -114,7 +114,7 @@ TEST_F(FencedAllocatorTest, TestBasic) {
TEST_F(FencedAllocatorTest, TestAllocZero) {
FencedAllocator::Offset offset = allocator_->Alloc(0);
EXPECT_EQ(FencedAllocator::kInvalidOffset, offset);
- EXPECT_FALSE(allocator_->InUse());
+ EXPECT_FALSE(allocator_->InUseOrFreePending());
EXPECT_TRUE(allocator_->CheckConsistency());
}
@@ -224,7 +224,7 @@ TEST_F(FencedAllocatorTest, FreeUnused) {
EXPECT_GE(kBufferSize, offsets[i]+kSize);
EXPECT_TRUE(allocator_->CheckConsistency());
}
- EXPECT_TRUE(allocator_->InUse());
+ EXPECT_TRUE(allocator_->InUseOrFreePending());
// No memory should be available.
EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
@@ -261,14 +261,14 @@ TEST_F(FencedAllocatorTest, FreeUnused) {
// Check that the new largest free size takes into account the unused blocks.
EXPECT_EQ(kSize * 3, allocator_->GetLargestFreeSize());
- EXPECT_TRUE(allocator_->InUse());
+ EXPECT_TRUE(allocator_->InUseOrFreePending());
// Free up everything.
for (unsigned int i = 3; i < kAllocCount; ++i) {
allocator_->Free(offsets[i]);
EXPECT_TRUE(allocator_->CheckConsistency());
}
- EXPECT_FALSE(allocator_->InUse());
+ EXPECT_FALSE(allocator_->InUseOrFreePending());
}
// Tests GetLargestFreeSize
diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 22759a38e94..f0d6e361514 100644
--- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -1269,8 +1269,10 @@ void GL_APIENTRY GLES2UnmapTexSubImage2DCHROMIUM(const void* mem) {
void GL_APIENTRY GLES2ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha) {
- gles2::GetGLContext()->ResizeCHROMIUM(width, height, scale_factor, alpha);
+ gles2::GetGLContext()->ResizeCHROMIUM(width, height, scale_factor,
+ color_space, alpha);
}
const GLchar* GL_APIENTRY GLES2GetRequestableExtensionsCHROMIUM() {
return gles2::GetGLContext()->GetRequestableExtensionsCHROMIUM();
@@ -1499,6 +1501,12 @@ void GL_APIENTRY GLES2ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
bounds_rect, filter);
}
void GL_APIENTRY
+GLES2SetColorSpaceForScanoutCHROMIUM(GLuint texture_id,
+ GLColorSpace color_space) {
+ gles2::GetGLContext()->SetColorSpaceForScanoutCHROMIUM(texture_id,
+ color_space);
+}
+void GL_APIENTRY
GLES2ScheduleCALayerInUseQueryCHROMIUM(GLsizei count, const GLuint* textures) {
gles2::GetGLContext()->ScheduleCALayerInUseQueryCHROMIUM(count, textures);
}
@@ -1740,9 +1748,12 @@ GLES2UniformMatrix4fvStreamTextureMatrixCHROMIUM(GLint location,
void GL_APIENTRY GLES2OverlayPromotionHintCHROMIUM(GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) {
- gles2::GetGLContext()->OverlayPromotionHintCHROMIUM(texture, promotion_hint,
- display_x, display_y);
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) {
+ gles2::GetGLContext()->OverlayPromotionHintCHROMIUM(
+ texture, promotion_hint, display_x, display_y, display_width,
+ display_height);
}
void GL_APIENTRY GLES2SwapBuffersWithBoundsCHROMIUM(GLsizei count,
const GLint* rects) {
@@ -1766,6 +1777,26 @@ void GL_APIENTRY GLES2UnlockDiscardableTextureCHROMIUM(GLuint texture_id) {
bool GL_APIENTRY GLES2LockDiscardableTextureCHROMIUM(GLuint texture_id) {
return gles2::GetGLContext()->LockDiscardableTextureCHROMIUM(texture_id);
}
+void GL_APIENTRY GLES2BeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) {
+ gles2::GetGLContext()->BeginRasterCHROMIUM(
+ texture_id, sk_color, msaa_sample_count, can_use_lcd_text,
+ use_distance_field_text, pixel_config);
+}
+void GL_APIENTRY GLES2RasterCHROMIUM(const cc::DisplayItemList* list,
+ GLint x,
+ GLint y,
+ GLint w,
+ GLint h) {
+ gles2::GetGLContext()->RasterCHROMIUM(list, x, y, w, h);
+}
+void GL_APIENTRY GLES2EndRasterCHROMIUM() {
+ gles2::GetGLContext()->EndRasterCHROMIUM();
+}
namespace gles2 {
@@ -2904,6 +2935,11 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glScheduleCALayerCHROMIUM),
},
{
+ "glSetColorSpaceForScanoutCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glSetColorSpaceForScanoutCHROMIUM),
+ },
+ {
"glScheduleCALayerInUseQueryCHROMIUM",
reinterpret_cast<GLES2FunctionPointer>(
glScheduleCALayerInUseQueryCHROMIUM),
@@ -3106,6 +3142,18 @@ extern const NameToFunc g_gles2_function_table[] = {
glLockDiscardableTextureCHROMIUM),
},
{
+ "glBeginRasterCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glBeginRasterCHROMIUM),
+ },
+ {
+ "glRasterCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glRasterCHROMIUM),
+ },
+ {
+ "glEndRasterCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glEndRasterCHROMIUM),
+ },
+ {
NULL, NULL,
},
};
diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index 77d259db84c..b4ee0e9e764 100644
--- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -2425,10 +2425,11 @@ void FlushMappedBufferRange(GLenum target, GLintptr offset, GLsizeiptr size) {
void ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha) {
gles2::cmds::ResizeCHROMIUM* c = GetCmdSpace<gles2::cmds::ResizeCHROMIUM>();
if (c) {
- c->Init(width, height, scale_factor, alpha);
+ c->Init(width, height, scale_factor, color_space, alpha);
}
}
@@ -2788,6 +2789,17 @@ void ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
}
}
+void SetColorSpaceForScanoutCHROMIUM(GLuint texture_id,
+ GLuint shm_id,
+ GLuint shm_offset,
+ GLsizei color_space_size) {
+ gles2::cmds::SetColorSpaceForScanoutCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::SetColorSpaceForScanoutCHROMIUM>();
+ if (c) {
+ c->Init(texture_id, shm_id, shm_offset, color_space_size);
+ }
+}
+
void ScheduleCALayerInUseQueryCHROMIUMImmediate(GLsizei count,
const GLuint* textures) {
const uint32_t size =
@@ -3213,11 +3225,14 @@ void UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate(
void OverlayPromotionHintCHROMIUM(GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) {
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) {
gles2::cmds::OverlayPromotionHintCHROMIUM* c =
GetCmdSpace<gles2::cmds::OverlayPromotionHintCHROMIUM>();
if (c) {
- c->Init(texture, promotion_hint, display_x, display_y);
+ c->Init(texture, promotion_hint, display_x, display_y, display_width,
+ display_height);
}
}
@@ -3274,4 +3289,39 @@ void LockDiscardableTextureCHROMIUM(GLuint texture_id) {
}
}
+void BeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) {
+ gles2::cmds::BeginRasterCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::BeginRasterCHROMIUM>();
+ if (c) {
+ c->Init(texture_id, sk_color, msaa_sample_count, can_use_lcd_text,
+ use_distance_field_text, pixel_config);
+ }
+}
+
+void RasterCHROMIUM(uint32_t list_shm_id,
+ uint32_t list_shm_offset,
+ GLint x,
+ GLint y,
+ GLint w,
+ GLint h,
+ uint32_t data_size) {
+ gles2::cmds::RasterCHROMIUM* c = GetCmdSpace<gles2::cmds::RasterCHROMIUM>();
+ if (c) {
+ c->Init(list_shm_id, list_shm_offset, x, y, w, h, data_size);
+ }
+}
+
+void EndRasterCHROMIUM() {
+ gles2::cmds::EndRasterCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::EndRasterCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index 8cd75fe4e74..9d63f5f174c 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -29,6 +29,7 @@
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/client/buffer_tracker.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/gpu_control.h"
@@ -48,6 +49,15 @@
#include "gpu/command_buffer/client/gpu_switches.h"
#endif
+#if !defined(OS_NACL)
+#include "cc/paint/display_item_list.h" // nogncheck
+#endif
+
+#if !defined(__native_client__)
+#include "ui/gfx/color_space.h"
+#include "ui/gfx/ipc/color/gfx_param_traits.h"
+#endif
+
namespace gpu {
namespace gles2 {
@@ -152,17 +162,7 @@ GLES2Implementation::GLES2Implementation(
support_client_side_arrays_(support_client_side_arrays),
use_count_(0),
flush_id_(0),
- max_extra_transfer_buffer_size_(
-#if defined(OS_NACL)
- 0),
-#else
- // Do not use more than 5% of extra shared memory, and do not
- // use any extra for memory contrained devices (<=1GB).
- base::SysInfo::AmountOfPhysicalMemory() > 1024 * 1024 * 1024
- ? base::saturated_cast<uint32_t>(
- base::SysInfo::AmountOfPhysicalMemory() / 20)
- : 0),
-#endif
+ max_extra_transfer_buffer_size_(0),
current_trace_stack_(0),
gpu_control_(gpu_control),
capabilities_(gpu_control->GetCapabilities()),
@@ -192,38 +192,25 @@ GLES2Implementation::GLES2Implementation(
memset(&reserved_ids_, 0, sizeof(reserved_ids_));
}
-bool GLES2Implementation::Initialize(
- unsigned int starting_transfer_buffer_size,
- unsigned int min_transfer_buffer_size,
- unsigned int max_transfer_buffer_size,
- unsigned int mapped_memory_limit) {
+bool GLES2Implementation::Initialize(const SharedMemoryLimits& limits) {
TRACE_EVENT0("gpu", "GLES2Implementation::Initialize");
- DCHECK_GE(starting_transfer_buffer_size, min_transfer_buffer_size);
- DCHECK_LE(starting_transfer_buffer_size, max_transfer_buffer_size);
- DCHECK_GE(min_transfer_buffer_size, kStartingOffset);
+ DCHECK_GE(limits.start_transfer_buffer_size, limits.min_transfer_buffer_size);
+ DCHECK_LE(limits.start_transfer_buffer_size, limits.max_transfer_buffer_size);
+ DCHECK_GE(limits.min_transfer_buffer_size, kStartingOffset);
gpu_control_->SetGpuControlClient(this);
if (!transfer_buffer_->Initialize(
- starting_transfer_buffer_size,
- kStartingOffset,
- min_transfer_buffer_size,
- max_transfer_buffer_size,
- kAlignment,
- kSizeToFlush)) {
+ limits.start_transfer_buffer_size, kStartingOffset,
+ limits.min_transfer_buffer_size, limits.max_transfer_buffer_size,
+ kAlignment, kSizeToFlush)) {
return false;
}
- mapped_memory_.reset(new MappedMemoryManager(helper_, mapped_memory_limit));
-
- unsigned chunk_size = 2 * 1024 * 1024;
- if (mapped_memory_limit != SharedMemoryLimits::kNoLimit) {
- // Use smaller chunks if the client is very memory conscientious.
- chunk_size = std::min(mapped_memory_limit / 4, chunk_size);
- chunk_size = base::bits::Align(chunk_size,
- FencedAllocator::kAllocAlignment);
- }
- mapped_memory_->set_chunk_size_multiple(chunk_size);
+ max_extra_transfer_buffer_size_ = limits.max_mapped_memory_for_texture_upload;
+ mapped_memory_.reset(
+ new MappedMemoryManager(helper_, limits.mapped_memory_reclaim_limit));
+ mapped_memory_->set_chunk_size_multiple(limits.mapped_memory_chunk_size);
GLStaticState::ShaderPrecisionMap* shader_precisions =
&static_state_.shader_precisions;
@@ -360,8 +347,7 @@ void GLES2Implementation::FreeUnusedSharedMemory() {
}
void GLES2Implementation::FreeEverything() {
- WaitForCmd();
- query_tracker_->Shrink();
+ query_tracker_->Shrink(helper_);
FreeUnusedSharedMemory();
transfer_buffer_->Free();
helper_->FreeRingBuffer();
@@ -376,12 +362,8 @@ void GLES2Implementation::RunIfContextNotLost(const base::Closure& callback) {
callback.Run();
}
-int32_t GLES2Implementation::GetStreamId() const {
- return gpu_control_->GetStreamId();
-}
-
-void GLES2Implementation::FlushOrderingBarrierOnStream(int32_t stream_id) {
- gpu_control_->FlushOrderingBarrierOnStream(stream_id);
+void GLES2Implementation::FlushPendingWork() {
+ gpu_control_->FlushPendingWork();
}
void GLES2Implementation::SignalSyncToken(const gpu::SyncToken& sync_token,
@@ -465,15 +447,15 @@ bool GLES2Implementation::OnMemoryDump(
if (args.level_of_detail != MemoryDumpLevelOfDetail::BACKGROUND) {
dump->AddScalar("free_size", MemoryAllocatorDump::kUnitsBytes,
transfer_buffer_->GetFreeSize());
- auto guid = GetBufferGUIDForTracing(tracing_process_id,
- transfer_buffer_->GetShmId());
auto shared_memory_guid =
transfer_buffer_->shared_memory_handle().GetGUID();
const int kImportance = 2;
if (!shared_memory_guid.is_empty()) {
- pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), guid,
- shared_memory_guid, kImportance);
+ pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
+ kImportance);
} else {
+ auto guid = GetBufferGUIDForTracing(tracing_process_id,
+ transfer_buffer_->GetShmId());
pmd->CreateSharedGlobalAllocatorDump(guid);
pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
}
@@ -4995,6 +4977,33 @@ void GLES2Implementation::ScheduleDCLayerSharedStateCHROMIUM(
buffer.shm_id(), buffer.offset());
}
+void GLES2Implementation::SetColorSpaceForScanoutCHROMIUM(
+ GLuint texture_id,
+ GLColorSpace color_space) {
+#if defined(__native_client__)
+ // Including gfx::ColorSpace would bring Skia and a lot of other code into
+ // NaCl's IRT.
+ SetGLError(GL_INVALID_VALUE, "GLES2::SetColorSpaceForScanoutCHROMIUM",
+ "not supported");
+#else
+ gfx::ColorSpace* gfx_color_space =
+ reinterpret_cast<gfx::ColorSpace*>(color_space);
+ base::Pickle color_space_data;
+ IPC::ParamTraits<gfx::ColorSpace>::Write(&color_space_data, *gfx_color_space);
+
+ ScopedTransferBufferPtr buffer(color_space_data.size(), helper_,
+ transfer_buffer_);
+ if (!buffer.valid() || buffer.size() < color_space_data.size()) {
+ SetGLError(GL_OUT_OF_MEMORY, "GLES2::SetColorSpaceForScanoutCHROMIUM",
+ "out of memory");
+ return;
+ }
+ memcpy(buffer.address(), color_space_data.data(), color_space_data.size());
+ helper_->SetColorSpaceForScanoutCHROMIUM(
+ texture_id, buffer.shm_id(), buffer.offset(), color_space_data.size());
+#endif
+}
+
void GLES2Implementation::ScheduleDCLayerCHROMIUM(
GLsizei num_textures,
const GLuint* contents_texture_ids,
@@ -5316,11 +5325,12 @@ void GLES2Implementation::UnmapTexSubImage2DCHROMIUM(const void* mem) {
void GLES2Implementation::ResizeCHROMIUM(GLuint width,
GLuint height,
float scale_factor,
+ GLenum color_space,
GLboolean alpha) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glResizeCHROMIUM(" << width << ", "
<< height << ", " << scale_factor << ", " << alpha << ")");
- helper_->ResizeCHROMIUM(width, height, scale_factor, alpha);
+ helper_->ResizeCHROMIUM(width, height, scale_factor, color_space, alpha);
CheckGLError();
}
@@ -6130,8 +6140,7 @@ void GLES2Implementation::GenSyncTokenCHROMIUM(GLuint64 fence_sync,
}
// Copy the data over after setting the data to ensure alignment.
- SyncToken sync_token_data(gpu_control_->GetNamespaceID(),
- gpu_control_->GetStreamId(),
+ SyncToken sync_token_data(gpu_control_->GetNamespaceID(), 0,
gpu_control_->GetCommandBufferID(), fence_sync);
sync_token_data.SetVerifyFlush();
memcpy(sync_token, &sync_token_data, sizeof(sync_token_data));
@@ -6154,8 +6163,7 @@ void GLES2Implementation::GenUnverifiedSyncTokenCHROMIUM(GLuint64 fence_sync,
}
// Copy the data over after setting the data to ensure alignment.
- SyncToken sync_token_data(gpu_control_->GetNamespaceID(),
- gpu_control_->GetStreamId(),
+ SyncToken sync_token_data(gpu_control_->GetNamespaceID(), 0,
gpu_control_->GetCommandBufferID(), fence_sync);
memcpy(sync_token, &sync_token_data, sizeof(sync_token_data));
}
@@ -6176,20 +6184,18 @@ void GLES2Implementation::VerifySyncTokensCHROMIUM(GLbyte **sync_tokens,
}
requires_synchronization = true;
DCHECK(sync_token.verified_flush());
- memcpy(sync_tokens[i], &sync_token, sizeof(sync_token));
}
+
+ // Set verify bit on empty sync tokens too.
+ sync_token.SetVerifyFlush();
+
+ memcpy(sync_tokens[i], &sync_token, sizeof(sync_token));
}
}
- // This step must be done after all unverified tokens have finished processing
- // CanWaitUnverifiedSyncToken(), command buffers use that to do any necessary
- // flushes.
- if (requires_synchronization) {
- // Make sure we have no pending ordering barriers by flushing now.
- FlushHelper();
- // Ensure all the fence syncs are visible on GPU service.
+ // Ensure all the fence syncs are visible on GPU service.
+ if (requires_synchronization)
gpu_control_->EnsureWorkVisible();
- }
}
void GLES2Implementation::WaitSyncTokenCHROMIUM(const GLbyte* sync_token_data) {
@@ -7130,6 +7136,73 @@ void GLES2Implementation::Viewport(GLint x,
CheckGLError();
}
+void GLES2Implementation::RasterCHROMIUM(const cc::DisplayItemList* list,
+ GLint x,
+ GLint y,
+ GLint w,
+ GLint h) {
+#if defined(OS_NACL)
+ NOTREACHED();
+#else
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glRasterChromium(" << list << ", "
+ << x << ", " << y << ", " << w << ", " << h << ")");
+
+ // TODO(enne): tune these numbers
+ // TODO(enne): convert these types here and in transfer buffer to be size_t.
+ static constexpr unsigned int kMinAlloc = 16 * 1024;
+ static constexpr unsigned int kBlockAlloc = 512 * 1024;
+
+ unsigned int free_size = std::max(transfer_buffer_->GetFreeSize(), kMinAlloc);
+ ScopedTransferBufferPtr buffer(free_size, helper_, transfer_buffer_);
+ DCHECK(buffer.valid());
+
+ char* memory = static_cast<char*>(buffer.address());
+ size_t written_bytes = 0;
+ size_t free_bytes = buffer.size();
+
+ cc::PaintOp::SerializeOptions options;
+
+ // TODO(enne): need to implement alpha folding optimization from POB.
+ // TODO(enne): don't access private members of DisplayItemList.
+ gfx::Rect playback_rect(x, y, w, h);
+ std::vector<size_t> indices = list->rtree_.Search(playback_rect);
+ for (cc::PaintOpBuffer::FlatteningIterator iter(&list->paint_op_buffer_,
+ &indices);
+ iter; ++iter) {
+ const cc::PaintOp* op = *iter;
+ size_t size = op->Serialize(memory + written_bytes, free_bytes, options);
+ if (!size) {
+ buffer.Shrink(written_bytes);
+ helper_->RasterCHROMIUM(buffer.shm_id(), buffer.offset(), x, y, w, h,
+ written_bytes);
+ buffer.Reset(kBlockAlloc);
+ memory = static_cast<char*>(buffer.address());
+ written_bytes = 0;
+ free_bytes = buffer.size();
+
+ size = op->Serialize(memory + written_bytes, free_bytes, options);
+ }
+ DCHECK_GE(size, 4u);
+ DCHECK_EQ(size % cc::PaintOpBuffer::PaintOpAlign, 0u);
+ DCHECK_LE(size, free_bytes);
+ DCHECK_EQ(free_bytes + written_bytes, buffer.size());
+
+ written_bytes += size;
+ free_bytes -= size;
+ }
+
+ buffer.Shrink(written_bytes);
+
+ if (!written_bytes)
+ return;
+ helper_->RasterCHROMIUM(buffer.shm_id(), buffer.offset(), x, y, w, h,
+ buffer.size());
+
+ CheckGLError();
+#endif
+}
+
// Include the auto-generated part of this file. We split this because it means
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index 2a3941279fb..17f77992128 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -105,6 +105,7 @@ namespace gpu {
class GpuControl;
class IdAllocator;
class ScopedTransferBufferPtr;
+struct SharedMemoryLimits;
class TransferBufferInterface;
namespace gles2 {
@@ -120,10 +121,10 @@ class QueryTracker;
// GLES2CmdHelper but that entails changing your code to use and deal with
// shared memory and synchronization issues.
class GLES2_IMPL_EXPORT GLES2Implementation
- : NON_EXPORTED_BASE(public GLES2Interface),
- NON_EXPORTED_BASE(public ContextSupport),
- NON_EXPORTED_BASE(public GpuControlClient),
- NON_EXPORTED_BASE(public base::trace_event::MemoryDumpProvider) {
+ : public GLES2Interface,
+ public ContextSupport,
+ public GpuControlClient,
+ public base::trace_event::MemoryDumpProvider {
public:
// Stores GL state that never changes.
struct GLES2_IMPL_EXPORT GLStaticState {
@@ -170,11 +171,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation
~GLES2Implementation() override;
- bool Initialize(
- unsigned int starting_transfer_buffer_size,
- unsigned int min_transfer_buffer_size,
- unsigned int max_transfer_buffer_size,
- unsigned int mapped_memory_limit);
+ bool Initialize(const SharedMemoryLimits& limits);
// The GLES2CmdHelper being used by this GLES2Implementation. You can use
// this to issue cmds at a lower level for certain kinds of optimization.
@@ -192,8 +189,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation
#include "gpu/command_buffer/client/gles2_implementation_autogen.h"
// ContextSupport implementation.
- int32_t GetStreamId() const override;
- void FlushOrderingBarrierOnStream(int32_t stream_id) override;
+ void FlushPendingWork() override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) override;
bool IsSyncTokenSignaled(const gpu::SyncToken& sync_token) override;
@@ -785,9 +781,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation
// Maximum amount of extra memory from the mapped memory pool to use when
// needing to transfer something exceeding the default transfer buffer.
- // This should be 0 for low memory devices since they are already memory
- // constrained.
- const uint32_t max_extra_transfer_buffer_size_;
+ uint32_t max_extra_transfer_buffer_size_;
// Set of strings returned from glGetString. We need to cache these because
// the pointer passed back to the client has to remain valid for eternity.
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
index c868c2c90de..0540db6e597 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -891,6 +891,7 @@ void UnmapTexSubImage2DCHROMIUM(const void* mem) override;
void ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha) override;
const GLchar* GetRequestableExtensionsCHROMIUM() override;
@@ -1053,6 +1054,9 @@ void ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
const GLfloat* bounds_rect,
GLuint filter) override;
+void SetColorSpaceForScanoutCHROMIUM(GLuint texture_id,
+ GLColorSpace color_space) override;
+
void ScheduleCALayerInUseQueryCHROMIUM(GLsizei count,
const GLuint* textures) override;
@@ -1221,7 +1225,9 @@ void UniformMatrix4fvStreamTextureMatrixCHROMIUM(
void OverlayPromotionHintCHROMIUM(GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) override;
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) override;
void SwapBuffersWithBoundsCHROMIUM(GLsizei count, const GLint* rects) override;
@@ -1238,4 +1244,19 @@ void UnlockDiscardableTextureCHROMIUM(GLuint texture_id) override;
bool LockDiscardableTextureCHROMIUM(GLuint texture_id) override;
+void BeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) override;
+
+void RasterCHROMIUM(const cc::DisplayItemList* list,
+ GLint x,
+ GLint y,
+ GLint w,
+ GLint h) override;
+
+void EndRasterCHROMIUM() override;
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
index 2be26ed3e2d..e5655de73dc 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -3122,9 +3122,10 @@ void GLES2Implementation::CopyTextureCHROMIUM(
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG(
"[" << GetLogPrefix() << "] glCopyTextureCHROMIUM(" << source_id << ", "
- << source_level << ", " << GLES2Util::GetStringEnum(dest_target)
- << ", " << dest_id << ", " << dest_level << ", " << internalformat
- << ", " << GLES2Util::GetStringPixelType(dest_type) << ", "
+ << source_level << ", "
+ << GLES2Util::GetStringTextureTarget(dest_target) << ", " << dest_id
+ << ", " << dest_level << ", " << internalformat << ", "
+ << GLES2Util::GetStringPixelType(dest_type) << ", "
<< GLES2Util::GetStringBool(unpack_flip_y) << ", "
<< GLES2Util::GetStringBool(unpack_premultiply_alpha) << ", "
<< GLES2Util::GetStringBool(unpack_unmultiply_alpha) << ")");
@@ -3154,9 +3155,9 @@ void GLES2Implementation::CopySubTextureCHROMIUM(
GPU_CLIENT_LOG(
"[" << GetLogPrefix() << "] glCopySubTextureCHROMIUM(" << source_id
<< ", " << source_level << ", "
- << GLES2Util::GetStringEnum(dest_target) << ", " << dest_id << ", "
- << dest_level << ", " << xoffset << ", " << yoffset << ", " << x
- << ", " << y << ", " << width << ", " << height << ", "
+ << GLES2Util::GetStringTextureTarget(dest_target) << ", " << dest_id
+ << ", " << dest_level << ", " << xoffset << ", " << yoffset << ", "
+ << x << ", " << y << ", " << width << ", " << height << ", "
<< GLES2Util::GetStringBool(unpack_flip_y) << ", "
<< GLES2Util::GetStringBool(unpack_premultiply_alpha) << ", "
<< GLES2Util::GetStringBool(unpack_unmultiply_alpha) << ")");
@@ -3198,11 +3199,11 @@ void GLES2Implementation::BindTexImage2DWithInternalformatCHROMIUM(
GLenum internalformat,
GLint imageId) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix()
- << "] glBindTexImage2DWithInternalformatCHROMIUM("
- << GLES2Util::GetStringTextureBindTarget(target) << ", "
- << GLES2Util::GetStringEnum(internalformat) << ", "
- << imageId << ")");
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glBindTexImage2DWithInternalformatCHROMIUM("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureInternalFormat(internalformat) << ", "
+ << imageId << ")");
helper_->BindTexImage2DWithInternalformatCHROMIUM(target, internalformat,
imageId);
CheckGLError();
@@ -3512,14 +3513,18 @@ void GLES2Implementation::UniformMatrix4fvStreamTextureMatrixCHROMIUM(
void GLES2Implementation::OverlayPromotionHintCHROMIUM(GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) {
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glOverlayPromotionHintCHROMIUM("
<< texture << ", "
<< GLES2Util::GetStringBool(promotion_hint) << ", "
- << display_x << ", " << display_y << ")");
+ << display_x << ", " << display_y << ", " << display_width
+ << ", " << display_height << ")");
helper_->OverlayPromotionHintCHROMIUM(texture, promotion_hint, display_x,
- display_y);
+ display_y, display_width,
+ display_height);
CheckGLError();
}
@@ -3542,4 +3547,31 @@ void GLES2Implementation::SetEnableDCLayersCHROMIUM(GLboolean enabled) {
CheckGLError();
}
+void GLES2Implementation::BeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBeginRasterCHROMIUM("
+ << texture_id << ", " << sk_color << ", "
+ << msaa_sample_count << ", "
+ << GLES2Util::GetStringBool(can_use_lcd_text) << ", "
+ << GLES2Util::GetStringBool(use_distance_field_text)
+ << ", " << pixel_config << ")");
+ helper_->BeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
+ can_use_lcd_text, use_distance_field_text,
+ pixel_config);
+ CheckGLError();
+}
+
+void GLES2Implementation::EndRasterCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEndRasterCHROMIUM("
+ << ")");
+ helper_->EndRasterCHROMIUM();
+ CheckGLError();
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
index 6bb2dc50fec..386ff6afb4f 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -150,6 +150,7 @@ class MockTransferBuffer : public TransferBufferInterface {
void FreePendingToken(void* p, unsigned int /* token */) override;
unsigned int GetSize() const override;
unsigned int GetFreeSize() const override;
+ void ShrinkLastBlock(unsigned int new_size) override;
size_t MaxTransferBufferSize() {
return size_ - result_size_;
@@ -334,6 +335,8 @@ unsigned int MockTransferBuffer::GetFreeSize() const {
return 0;
}
+void MockTransferBuffer::ShrinkLastBlock(unsigned int new_size) {}
+
// API wrapper for Buffers.
class GenBuffersAPI {
public:
@@ -428,6 +431,7 @@ class GLES2ImplementationTest : public testing::Test {
bool timer_queries,
int major_version,
int minor_version) {
+ SharedMemoryLimits limits = SharedMemoryLimitsForTesting();
command_buffer_.reset(new StrictMock<MockClientCommandBuffer>());
transfer_buffer_.reset(
@@ -438,7 +442,7 @@ class GLES2ImplementationTest : public testing::Test {
transfer_buffer_initialize_fail));
helper_.reset(new GLES2CmdHelper(command_buffer()));
- helper_->Initialize(kCommandBufferSizeBytes);
+ helper_->Initialize(limits.command_buffer_size);
gpu_control_.reset(new StrictMock<MockClientGpuControl>());
Capabilities capabilities;
@@ -494,8 +498,7 @@ class GLES2ImplementationTest : public testing::Test {
// The client should be set to something non-null.
EXPECT_CALL(*gpu_control_, SetGpuControlClient(gl_.get())).Times(1);
- if (!gl_->Initialize(kTransferBufferSize, kTransferBufferSize,
- kTransferBufferSize, SharedMemoryLimits::kNoLimit))
+ if (!gl_->Initialize(limits))
return false;
helper_->CommandBufferHelper::Finish();
@@ -689,6 +692,16 @@ class GLES2ImplementationTest : public testing::Test {
return gl_->max_extra_transfer_buffer_size_ > 0;
}
+ static SharedMemoryLimits SharedMemoryLimitsForTesting() {
+ SharedMemoryLimits limits;
+ limits.command_buffer_size = kCommandBufferSizeBytes;
+ limits.start_transfer_buffer_size = kTransferBufferSize;
+ limits.min_transfer_buffer_size = kTransferBufferSize;
+ limits.max_transfer_buffer_size = kTransferBufferSize;
+ limits.mapped_memory_reclaim_limit = SharedMemoryLimits::kNoLimit;
+ return limits;
+ }
+
TestContext test_contexts_[kNumTestContexts];
scoped_refptr<ShareGroup> share_group_;
@@ -3376,11 +3389,11 @@ TEST_F(GLES2ImplementationTest, BeginEndQueryEXT) {
struct EndCmds {
cmds::EndQueryEXT end_query;
};
+ commands = GetPut();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
EndCmds expected_end_cmds;
expected_end_cmds.end_query.Init(
GL_ANY_SAMPLES_PASSED_EXT, query->submit_count());
- commands = GetPut();
- gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
EXPECT_EQ(0, memcmp(
&expected_end_cmds, commands, sizeof(expected_end_cmds)));
@@ -3393,11 +3406,12 @@ TEST_F(GLES2ImplementationTest, BeginEndQueryEXT) {
// Test 2nd Begin/End increments count.
base::subtle::Atomic32 old_submit_count = query->submit_count();
gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, id1);
+ EXPECT_EQ(old_submit_count, query->submit_count());
+ commands = GetPut();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
EXPECT_NE(old_submit_count, query->submit_count());
expected_end_cmds.end_query.Init(
GL_ANY_SAMPLES_PASSED_EXT, query->submit_count());
- commands = GetPut();
- gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
EXPECT_EQ(0, memcmp(
&expected_end_cmds, commands, sizeof(expected_end_cmds)));
@@ -3606,13 +3620,13 @@ TEST_F(GLES2ImplementationTest, ErrorQuery) {
cmds::BeginQueryEXT begin_query;
cmds::EndQueryEXT end_query;
};
+ const void* commands = GetPut();
+ gl_->EndQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM);
EndCmds expected_end_cmds;
expected_end_cmds.begin_query.Init(
GL_GET_ERROR_QUERY_CHROMIUM, id, query->shm_id(), query->shm_offset());
expected_end_cmds.end_query.Init(
GL_GET_ERROR_QUERY_CHROMIUM, query->submit_count());
- const void* commands = GetPut();
- gl_->EndQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM);
EXPECT_EQ(0, memcmp(
&expected_end_cmds, commands, sizeof(expected_end_cmds)));
ClearCommands();
@@ -3956,7 +3970,6 @@ TEST_F(GLES2ImplementationTest, GenSyncTokenCHROMIUM) {
.WillRepeatedly(Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillRepeatedly(Return(kCommandBufferId));
- EXPECT_CALL(*gpu_control_, GetStreamId()).WillRepeatedly(Return(0));
gl_->GenSyncTokenCHROMIUM(kFenceSync, nullptr);
EXPECT_EQ(GL_INVALID_VALUE, CheckError());
@@ -4001,7 +4014,6 @@ TEST_F(GLES2ImplementationTest, GenUnverifiedSyncTokenCHROMIUM) {
.WillRepeatedly(Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillRepeatedly(Return(kCommandBufferId));
- EXPECT_CALL(*gpu_control_, GetStreamId()).WillRepeatedly(Return(0));
gl_->GenUnverifiedSyncTokenCHROMIUM(kFenceSync, nullptr);
EXPECT_EQ(GL_INVALID_VALUE, CheckError());
@@ -4053,7 +4065,6 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM) {
.WillRepeatedly(Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillRepeatedly(Return(kCommandBufferId));
- EXPECT_CALL(*gpu_control_, GetStreamId()).WillRepeatedly(Return(0));
EXPECT_CALL(*gpu_control_, IsFenceSyncRelease(kFenceSync))
.WillOnce(Return(true));
@@ -4108,7 +4119,6 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM_Sequence) {
.WillRepeatedly(Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillRepeatedly(Return(kCommandBufferId));
- EXPECT_CALL(*gpu_control_, GetStreamId()).WillRepeatedly(Return(0));
// Generate sync token 1.
EXPECT_CALL(*gpu_control_, IsFenceSyncRelease(kFenceSync1))
@@ -4146,6 +4156,30 @@ TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM_Sequence) {
EXPECT_TRUE(sync_token2.verified_flush());
}
+TEST_F(GLES2ImplementationTest, VerifySyncTokensCHROMIUM_EmptySyncToken) {
+ // To verify sync tokens, the sync tokens must all be verified after
+ // CanWaitUnverifiedSyncTokens() are called. This test ensures the right
+ // sequence.
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillRepeatedly(SetMemory(result.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ gpu::SyncToken sync_token1, sync_token2;
+ GLbyte* sync_token_datas[] = {sync_token1.GetData(), sync_token2.GetData()};
+
+ // Ensure proper sequence of checking and validating.
+ EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(_)).Times(0);
+ EXPECT_CALL(*gpu_control_, EnsureWorkVisible()).Times(0);
+ gl_->VerifySyncTokensCHROMIUM(sync_token_datas, arraysize(sync_token_datas));
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ EXPECT_TRUE(sync_token1.verified_flush());
+ EXPECT_TRUE(sync_token2.verified_flush());
+}
+
TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUM) {
CommandBufferNamespace kNamespaceId = CommandBufferNamespace::GPU_IO;
CommandBufferId kCommandBufferId = CommandBufferId::FromUnsafeValue(234u);
@@ -4161,7 +4195,6 @@ TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUM) {
EXPECT_CALL(*gpu_control_, GetNamespaceID()).WillOnce(Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillOnce(Return(kCommandBufferId));
- EXPECT_CALL(*gpu_control_, GetStreamId()).WillOnce(Return(0));
gl_->GenSyncTokenCHROMIUM(kFenceSync, sync_token_data);
struct Cmds {
@@ -4499,7 +4532,6 @@ TEST_F(GLES2ImplementationTest, SignalSyncToken) {
EXPECT_CALL(*gpu_control_, IsFenceSyncFlushReceived(fence_sync))
.WillOnce(Return(true));
EXPECT_CALL(*gpu_control_, GetNamespaceID()).WillOnce(Return(GPU_IO));
- EXPECT_CALL(*gpu_control_, GetStreamId()).WillOnce(Return(0));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillOnce(Return(CommandBufferId::FromUnsafeValue(1)));
gpu::SyncToken sync_token;
@@ -4531,7 +4563,6 @@ TEST_F(GLES2ImplementationTest, SignalSyncTokenAfterContextLoss) {
EXPECT_CALL(*gpu_control_, IsFenceSyncFlushReceived(fence_sync))
.WillOnce(Return(true));
EXPECT_CALL(*gpu_control_, GetNamespaceID()).WillOnce(Return(GPU_IO));
- EXPECT_CALL(*gpu_control_, GetStreamId()).WillOnce(Return(0));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillOnce(Return(CommandBufferId::FromUnsafeValue(1)));
gpu::SyncToken sync_token;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
index 55e10eaec65..95c61b34b4f 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -2695,9 +2695,9 @@ TEST_F(GLES2ImplementationTest, ResizeCHROMIUM) {
cmds::ResizeCHROMIUM cmd;
};
Cmds expected;
- expected.cmd.Init(1, 2, 3, true);
+ expected.cmd.Init(1, 2, 3, 4, true);
- gl_->ResizeCHROMIUM(1, 2, 3, true);
+ gl_->ResizeCHROMIUM(1, 2, 3, 4, true);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -2717,11 +2717,11 @@ TEST_F(GLES2ImplementationTest, CopyTextureCHROMIUM) {
cmds::CopyTextureCHROMIUM cmd;
};
Cmds expected;
- expected.cmd.Init(1, 2, 3, 4, 5, GL_ALPHA, GL_UNSIGNED_BYTE, true, true,
- true);
+ expected.cmd.Init(1, 2, GL_TEXTURE_2D, 4, 5, GL_ALPHA, GL_UNSIGNED_BYTE, true,
+ true, true);
- gl_->CopyTextureCHROMIUM(1, 2, 3, 4, 5, GL_ALPHA, GL_UNSIGNED_BYTE, true,
- true, true);
+ gl_->CopyTextureCHROMIUM(1, 2, GL_TEXTURE_2D, 4, 5, GL_ALPHA,
+ GL_UNSIGNED_BYTE, true, true, true);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -2730,10 +2730,11 @@ TEST_F(GLES2ImplementationTest, CopySubTextureCHROMIUM) {
cmds::CopySubTextureCHROMIUM cmd;
};
Cmds expected;
- expected.cmd.Init(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, true, true, true);
+ expected.cmd.Init(1, 2, GL_TEXTURE_2D, 4, 5, 6, 7, 8, 9, 10, 11, true, true,
+ true);
- gl_->CopySubTextureCHROMIUM(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, true, true,
- true);
+ gl_->CopySubTextureCHROMIUM(1, 2, GL_TEXTURE_2D, 4, 5, 6, 7, 8, 9, 10, 11,
+ true, true, true);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -2786,9 +2787,9 @@ TEST_F(GLES2ImplementationTest, BindTexImage2DWithInternalformatCHROMIUM) {
cmds::BindTexImage2DWithInternalformatCHROMIUM cmd;
};
Cmds expected;
- expected.cmd.Init(GL_TEXTURE_2D, 2, 3);
+ expected.cmd.Init(GL_TEXTURE_2D, GL_ALPHA, 3);
- gl_->BindTexImage2DWithInternalformatCHROMIUM(GL_TEXTURE_2D, 2, 3);
+ gl_->BindTexImage2DWithInternalformatCHROMIUM(GL_TEXTURE_2D, GL_ALPHA, 3);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -3080,4 +3081,26 @@ TEST_F(GLES2ImplementationTest, SetEnableDCLayersCHROMIUM) {
gl_->SetEnableDCLayersCHROMIUM(true);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
+
+TEST_F(GLES2ImplementationTest, BeginRasterCHROMIUM) {
+ struct Cmds {
+ cmds::BeginRasterCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, true, true, 6);
+
+ gl_->BeginRasterCHROMIUM(1, 2, 3, true, true, 6);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, EndRasterCHROMIUM) {
+ struct Cmds {
+ cmds::EndRasterCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->EndRasterCHROMIUM();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface.h b/chromium/gpu/command_buffer/client/gles2_interface.h
index 0457d5eda1a..030ecdf29a9 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface.h
@@ -9,7 +9,12 @@
#include "base/compiler_specific.h"
+namespace cc {
+class DisplayItemList;
+}
+
extern "C" typedef struct _ClientBuffer* ClientBuffer;
+extern "C" typedef struct _GLColorSpace* GLColorSpace;
namespace gpu {
namespace gles2 {
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
index d42c6570a80..24698754755 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -651,6 +651,7 @@ virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) = 0;
virtual void ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha) = 0;
virtual const GLchar* GetRequestableExtensionsCHROMIUM() = 0;
virtual void RequestExtensionCHROMIUM(const char* extension) = 0;
@@ -773,6 +774,8 @@ virtual void ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
GLuint edge_aa_mask,
const GLfloat* bounds_rect,
GLuint filter) = 0;
+virtual void SetColorSpaceForScanoutCHROMIUM(GLuint texture_id,
+ GLColorSpace color_space) = 0;
virtual void ScheduleCALayerInUseQueryCHROMIUM(GLsizei count,
const GLuint* textures) = 0;
virtual void CommitOverlayPlanesCHROMIUM() = 0;
@@ -906,7 +909,9 @@ virtual void UniformMatrix4fvStreamTextureMatrixCHROMIUM(
virtual void OverlayPromotionHintCHROMIUM(GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) = 0;
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) = 0;
virtual void SwapBuffersWithBoundsCHROMIUM(GLsizei count,
const GLint* rects) = 0;
virtual void SetDrawRectangleCHROMIUM(GLint x,
@@ -917,4 +922,16 @@ virtual void SetEnableDCLayersCHROMIUM(GLboolean enabled) = 0;
virtual void InitializeDiscardableTextureCHROMIUM(GLuint texture_id) = 0;
virtual void UnlockDiscardableTextureCHROMIUM(GLuint texture_id) = 0;
virtual bool LockDiscardableTextureCHROMIUM(GLuint texture_id) = 0;
+virtual void BeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) = 0;
+virtual void RasterCHROMIUM(const cc::DisplayItemList* list,
+ GLint x,
+ GLint y,
+ GLint w,
+ GLint h) = 0;
+virtual void EndRasterCHROMIUM() = 0;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index f70d6713eeb..c810f9e3607 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -630,6 +630,7 @@ void UnmapTexSubImage2DCHROMIUM(const void* mem) override;
void ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha) override;
const GLchar* GetRequestableExtensionsCHROMIUM() override;
void RequestExtensionCHROMIUM(const char* extension) override;
@@ -751,6 +752,8 @@ void ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
GLuint edge_aa_mask,
const GLfloat* bounds_rect,
GLuint filter) override;
+void SetColorSpaceForScanoutCHROMIUM(GLuint texture_id,
+ GLColorSpace color_space) override;
void ScheduleCALayerInUseQueryCHROMIUM(GLsizei count,
const GLuint* textures) override;
void CommitOverlayPlanesCHROMIUM() override;
@@ -880,7 +883,9 @@ void UniformMatrix4fvStreamTextureMatrixCHROMIUM(
void OverlayPromotionHintCHROMIUM(GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) override;
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) override;
void SwapBuffersWithBoundsCHROMIUM(GLsizei count, const GLint* rects) override;
void SetDrawRectangleCHROMIUM(GLint x,
GLint y,
@@ -890,4 +895,16 @@ void SetEnableDCLayersCHROMIUM(GLboolean enabled) override;
void InitializeDiscardableTextureCHROMIUM(GLuint texture_id) override;
void UnlockDiscardableTextureCHROMIUM(GLuint texture_id) override;
bool LockDiscardableTextureCHROMIUM(GLuint texture_id) override;
+void BeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) override;
+void RasterCHROMIUM(const cc::DisplayItemList* list,
+ GLint x,
+ GLint y,
+ GLint w,
+ GLint h) override;
+void EndRasterCHROMIUM() override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index 93ab09263c9..ab4e8d06f9c 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -868,6 +868,7 @@ void GLES2InterfaceStub::UnmapTexSubImage2DCHROMIUM(const void* /* mem */) {}
void GLES2InterfaceStub::ResizeCHROMIUM(GLuint /* width */,
GLuint /* height */,
GLfloat /* scale_factor */,
+ GLenum /* color_space */,
GLboolean /* alpha */) {}
const GLchar* GLES2InterfaceStub::GetRequestableExtensionsCHROMIUM() {
return 0;
@@ -1020,6 +1021,9 @@ void GLES2InterfaceStub::ScheduleCALayerCHROMIUM(
GLuint /* edge_aa_mask */,
const GLfloat* /* bounds_rect */,
GLuint /* filter */) {}
+void GLES2InterfaceStub::SetColorSpaceForScanoutCHROMIUM(
+ GLuint /* texture_id */,
+ GLColorSpace /* color_space */) {}
void GLES2InterfaceStub::ScheduleCALayerInUseQueryCHROMIUM(
GLsizei /* count */,
const GLuint* /* textures */) {}
@@ -1184,7 +1188,9 @@ void GLES2InterfaceStub::OverlayPromotionHintCHROMIUM(
GLuint /* texture */,
GLboolean /* promotion_hint */,
GLint /* display_x */,
- GLint /* display_y */) {}
+ GLint /* display_y */,
+ GLint /* display_width */,
+ GLint /* display_height */) {}
void GLES2InterfaceStub::SwapBuffersWithBoundsCHROMIUM(
GLsizei /* count */,
const GLint* /* rects */) {}
@@ -1201,4 +1207,17 @@ bool GLES2InterfaceStub::LockDiscardableTextureCHROMIUM(
GLuint /* texture_id */) {
return 0;
}
+void GLES2InterfaceStub::BeginRasterCHROMIUM(
+ GLuint /* texture_id */,
+ GLuint /* sk_color */,
+ GLuint /* msaa_sample_count */,
+ GLboolean /* can_use_lcd_text */,
+ GLboolean /* use_distance_field_text */,
+ GLint /* pixel_config */) {}
+void GLES2InterfaceStub::RasterCHROMIUM(const cc::DisplayItemList* /* list */,
+ GLint /* x */,
+ GLint /* y */,
+ GLint /* w */,
+ GLint /* h */) {}
+void GLES2InterfaceStub::EndRasterCHROMIUM() {}
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation.h
index 780a6087e2e..9af6bada31a 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation.h
@@ -13,8 +13,7 @@ namespace gpu {
namespace gles2 {
// GLES2TraceImplementation is calls TRACE for every GL call.
-class GLES2_IMPL_EXPORT GLES2TraceImplementation
- : NON_EXPORTED_BASE(public GLES2Interface) {
+class GLES2_IMPL_EXPORT GLES2TraceImplementation : public GLES2Interface {
public:
explicit GLES2TraceImplementation(GLES2Interface* gl);
~GLES2TraceImplementation() override;
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index c8dd078740d..45300bf33af 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -630,6 +630,7 @@ void UnmapTexSubImage2DCHROMIUM(const void* mem) override;
void ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha) override;
const GLchar* GetRequestableExtensionsCHROMIUM() override;
void RequestExtensionCHROMIUM(const char* extension) override;
@@ -751,6 +752,8 @@ void ScheduleCALayerCHROMIUM(GLuint contents_texture_id,
GLuint edge_aa_mask,
const GLfloat* bounds_rect,
GLuint filter) override;
+void SetColorSpaceForScanoutCHROMIUM(GLuint texture_id,
+ GLColorSpace color_space) override;
void ScheduleCALayerInUseQueryCHROMIUM(GLsizei count,
const GLuint* textures) override;
void CommitOverlayPlanesCHROMIUM() override;
@@ -880,7 +883,9 @@ void UniformMatrix4fvStreamTextureMatrixCHROMIUM(
void OverlayPromotionHintCHROMIUM(GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) override;
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) override;
void SwapBuffersWithBoundsCHROMIUM(GLsizei count, const GLint* rects) override;
void SetDrawRectangleCHROMIUM(GLint x,
GLint y,
@@ -890,4 +895,16 @@ void SetEnableDCLayersCHROMIUM(GLboolean enabled) override;
void InitializeDiscardableTextureCHROMIUM(GLuint texture_id) override;
void UnlockDiscardableTextureCHROMIUM(GLuint texture_id) override;
bool LockDiscardableTextureCHROMIUM(GLuint texture_id) override;
+void BeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) override;
+void RasterCHROMIUM(const cc::DisplayItemList* list,
+ GLint x,
+ GLint y,
+ GLint w,
+ GLint h) override;
+void EndRasterCHROMIUM() override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index 5d229f5e3f4..83bff0b573d 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -1850,9 +1850,10 @@ void GLES2TraceImplementation::UnmapTexSubImage2DCHROMIUM(const void* mem) {
void GLES2TraceImplementation::ResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ResizeCHROMIUM");
- gl_->ResizeCHROMIUM(width, height, scale_factor, alpha);
+ gl_->ResizeCHROMIUM(width, height, scale_factor, color_space, alpha);
}
const GLchar* GLES2TraceImplementation::GetRequestableExtensionsCHROMIUM() {
@@ -2180,6 +2181,14 @@ void GLES2TraceImplementation::ScheduleCALayerCHROMIUM(
filter);
}
+void GLES2TraceImplementation::SetColorSpaceForScanoutCHROMIUM(
+ GLuint texture_id,
+ GLColorSpace color_space) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::SetColorSpaceForScanoutCHROMIUM");
+ gl_->SetColorSpaceForScanoutCHROMIUM(texture_id, color_space);
+}
+
void GLES2TraceImplementation::ScheduleCALayerInUseQueryCHROMIUM(
GLsizei count,
const GLuint* textures) {
@@ -2524,11 +2533,13 @@ void GLES2TraceImplementation::OverlayPromotionHintCHROMIUM(
GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) {
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu",
"GLES2Trace::OverlayPromotionHintCHROMIUM");
gl_->OverlayPromotionHintCHROMIUM(texture, promotion_hint, display_x,
- display_y);
+ display_y, display_width, display_height);
}
void GLES2TraceImplementation::SwapBuffersWithBoundsCHROMIUM(
@@ -2573,4 +2584,31 @@ bool GLES2TraceImplementation::LockDiscardableTextureCHROMIUM(
return gl_->LockDiscardableTextureCHROMIUM(texture_id);
}
+void GLES2TraceImplementation::BeginRasterCHROMIUM(
+ GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BeginRasterCHROMIUM");
+ gl_->BeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
+ can_use_lcd_text, use_distance_field_text,
+ pixel_config);
+}
+
+void GLES2TraceImplementation::RasterCHROMIUM(const cc::DisplayItemList* list,
+ GLint x,
+ GLint y,
+ GLint w,
+ GLint h) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::RasterCHROMIUM");
+ gl_->RasterCHROMIUM(list, x, y, w, h);
+}
+
+void GLES2TraceImplementation::EndRasterCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::EndRasterCHROMIUM");
+ gl_->EndRasterCHROMIUM();
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gpu_control.h b/chromium/gpu/command_buffer/client/gpu_control.h
index f8657455267..7c6384bd4ea 100644
--- a/chromium/gpu/command_buffer/client/gpu_control.h
+++ b/chromium/gpu/command_buffer/client/gpu_control.h
@@ -78,12 +78,8 @@ class GPU_EXPORT GpuControl {
virtual CommandBufferNamespace GetNamespaceID() const = 0;
virtual CommandBufferId GetCommandBufferID() const = 0;
- // Returns the stream id for this context. Only relevant for IPC command
- // buffer proxy. Used as extra command buffer data in sync tokens.
- virtual int32_t GetStreamId() const = 0;
-
- // Flush any outstanding ordering barriers on given stream.
- virtual void FlushOrderingBarrierOnStream(int32_t stream_id) = 0;
+ // Flush any outstanding ordering barriers on all contexts.
+ virtual void FlushPendingWork() = 0;
// Generates a fence sync which should be inserted into the GL command stream.
// When the service executes the fence sync it is released. Fence syncs are
diff --git a/chromium/gpu/command_buffer/client/mapped_memory.cc b/chromium/gpu/command_buffer/client/mapped_memory.cc
index d6c72448c28..be85f0fe97b 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory.cc
+++ b/chromium/gpu/command_buffer/client/mapped_memory.cc
@@ -50,6 +50,7 @@ MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
}
MappedMemoryManager::~MappedMemoryManager() {
+ helper_->FlushLazy();
CommandBuffer* cmd_buf = helper_->command_buffer();
for (auto& chunk : chunks_) {
cmd_buf->DestroyTransferBuffer(chunk->shm_id());
@@ -146,7 +147,9 @@ void MappedMemoryManager::FreeUnused() {
while (iter != chunks_.end()) {
MemoryChunk* chunk = (*iter).get();
chunk->FreeUnused();
- if (!chunk->InUse()) {
+ if (chunk->bytes_in_use() == 0u) {
+ if (chunk->InUseOrFreePending())
+ helper_->FlushLazy();
cmd_buf->DestroyTransferBuffer(chunk->shm_id());
allocated_memory_ -= chunk->GetSize();
iter = chunks_.erase(iter);
@@ -186,15 +189,14 @@ bool MappedMemoryManager::OnMemoryDump(
dump->AddScalar("free_size", MemoryAllocatorDump::kUnitsBytes,
chunk->GetFreeSize());
- auto guid = GetBufferGUIDForTracing(tracing_process_id, chunk->shm_id());
-
auto shared_memory_guid =
chunk->shared_memory()->backing()->shared_memory_handle().GetGUID();
const int kImportance = 2;
if (!shared_memory_guid.is_empty()) {
- pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), guid,
- shared_memory_guid, kImportance);
+ pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
+ kImportance);
} else {
+ auto guid = GetBufferGUIDForTracing(tracing_process_id, chunk->shm_id());
pmd->CreateSharedGlobalAllocatorDump(guid);
pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
}
@@ -203,6 +205,17 @@ bool MappedMemoryManager::OnMemoryDump(
return true;
}
+FencedAllocator::State MappedMemoryManager::GetPointerStatusForTest(
+ void* pointer,
+ int32_t* token_if_pending) {
+ for (auto& chunk : chunks_) {
+ if (chunk->IsInChunk(pointer)) {
+ return chunk->GetPointerStatusForTest(pointer, token_if_pending);
+ }
+ }
+ return FencedAllocator::FREE;
+}
+
void ScopedMappedMemoryPtr::Release() {
if (buffer_) {
mapped_memory_manager_->FreePendingToken(buffer_, helper_->InsertToken());
diff --git a/chromium/gpu/command_buffer/client/mapped_memory.h b/chromium/gpu/command_buffer/client/mapped_memory.h
index ac4c33fcece..c23e60259d3 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory.h
+++ b/chromium/gpu/command_buffer/client/mapped_memory.h
@@ -105,15 +105,18 @@ class GPU_EXPORT MemoryChunk {
reinterpret_cast<const int8_t*>(shm_->memory()) + shm_->size();
}
- // Returns true of any memory in this chunk is in use.
- bool InUse() {
- return allocator_.InUse();
- }
+ // Returns true of any memory in this chunk is in use or free pending token.
+ bool InUseOrFreePending() { return allocator_.InUseOrFreePending(); }
size_t bytes_in_use() const {
return allocator_.bytes_in_use();
}
+ FencedAllocator::State GetPointerStatusForTest(void* pointer,
+ int32_t* token_if_pending) {
+ return allocator_.GetPointerStatusForTest(pointer, token_if_pending);
+ }
+
private:
int32_t shm_id_;
scoped_refptr<gpu::Buffer> shm_;
@@ -202,6 +205,11 @@ class GPU_EXPORT MappedMemoryManager {
return allocated_memory_;
}
+ // Gets the status of a previous allocation, as well as the corresponding
+ // token if FREE_PENDING_TOKEN (and token_if_pending is not null).
+ FencedAllocator::State GetPointerStatusForTest(void* pointer,
+ int32_t* token_if_pending);
+
private:
typedef std::vector<std::unique_ptr<MemoryChunk>> MemoryChunkVector;
diff --git a/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc b/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc
index c36f66358e0..f1786218ff3 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc
+++ b/chromium/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -15,7 +15,7 @@
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
-#include "gpu/command_buffer/service/command_buffer_direct.h"
+#include "gpu/command_buffer/client/command_buffer_direct_locked.h"
#include "gpu/command_buffer/service/mocks.h"
#include "gpu/command_buffer/service/transfer_buffer_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -37,7 +37,7 @@ class MappedMemoryTestBase : public testing::Test {
void SetUp() override {
transfer_buffer_manager_ = base::MakeUnique<TransferBufferManager>(nullptr);
command_buffer_.reset(
- new CommandBufferDirect(transfer_buffer_manager_.get()));
+ new CommandBufferDirectLocked(transfer_buffer_manager_.get()));
api_mock_.reset(new AsyncAPIMock(true, command_buffer_->service()));
command_buffer_->set_handler(api_mock_.get());
@@ -57,7 +57,7 @@ class MappedMemoryTestBase : public testing::Test {
int32_t GetToken() { return command_buffer_->GetLastState().token; }
std::unique_ptr<TransferBufferManager> transfer_buffer_manager_;
- std::unique_ptr<CommandBufferDirect> command_buffer_;
+ std::unique_ptr<CommandBufferDirectLocked> command_buffer_;
std::unique_ptr<AsyncAPIMock> api_mock_;
std::unique_ptr<CommandBufferHelper> helper_;
base::MessageLoop message_loop_;
@@ -242,23 +242,60 @@ TEST_F(MappedMemoryManagerTest, FreePendingToken) {
}
TEST_F(MappedMemoryManagerTest, FreeUnused) {
+ command_buffer_->LockFlush();
int32_t id = -1;
unsigned int offset = 0xFFFFFFFFU;
- void* m1 = manager_->Alloc(kBufferSize, &id, &offset);
- void* m2 = manager_->Alloc(kBufferSize, &id, &offset);
+ const unsigned int kAllocSize = 2048;
+ manager_->set_chunk_size_multiple(kAllocSize * 2);
+
+ void* m1 = manager_->Alloc(kAllocSize, &id, &offset);
+ void* m2 = manager_->Alloc(kAllocSize, &id, &offset);
ASSERT_TRUE(m1 != NULL);
ASSERT_TRUE(m2 != NULL);
+ // m1 and m2 fit in one chunk
+ EXPECT_EQ(1u, manager_->num_chunks());
+
+ void* m3 = manager_->Alloc(kAllocSize, &id, &offset);
+ ASSERT_TRUE(m3 != NULL);
+ // m3 needs another chunk
EXPECT_EQ(2u, manager_->num_chunks());
+
+ // Nothing to free, both chunks are in-use.
manager_->FreeUnused();
EXPECT_EQ(2u, manager_->num_chunks());
- manager_->Free(m2);
+
+ manager_->Free(m3);
EXPECT_EQ(2u, manager_->num_chunks());
+ // The second chunk is no longer in use, we can remove.
manager_->FreeUnused();
EXPECT_EQ(1u, manager_->num_chunks());
- manager_->Free(m1);
+
+ int32_t token = helper_->InsertToken();
+ manager_->FreePendingToken(m1, token);
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
EXPECT_EQ(1u, manager_->num_chunks());
+
+ int old_flush_count = command_buffer_->FlushCount();
+ // The remaining chunk is still busy, can't free it.
+ manager_->FreeUnused();
+ EXPECT_EQ(1u, manager_->num_chunks());
+ // This should not have caused a Flush or a Finish.
+ EXPECT_GT(token, GetToken());
+ EXPECT_EQ(old_flush_count, command_buffer_->FlushCount());
+
+ manager_->Free(m2);
+ EXPECT_EQ(1u, manager_->num_chunks());
+ // The remaining chunk is free pending token, we can release the shared
+ // memory.
manager_->FreeUnused();
EXPECT_EQ(0u, manager_->num_chunks());
+ // This should have triggered a Flush, but not forced a Finish, i.e. the token
+ // shouldn't have passed yet.
+ EXPECT_EQ(old_flush_count + 1, command_buffer_->FlushCount());
+ EXPECT_GT(token, GetToken());
}
TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
diff --git a/chromium/gpu/command_buffer/client/query_tracker.cc b/chromium/gpu/command_buffer/client/query_tracker.cc
index 4e241d6446a..a4b76f5f0b3 100644
--- a/chromium/gpu/command_buffer/client/query_tracker.cc
+++ b/chromium/gpu/command_buffer/client/query_tracker.cc
@@ -13,6 +13,7 @@
#include <stdint.h>
#include "base/atomicops.h"
+#include "base/memory/ptr_util.h"
#include "base/numerics/safe_conversions.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
@@ -29,6 +30,22 @@ QuerySyncManager::Bucket::Bucket(QuerySync* sync_mem,
QuerySyncManager::Bucket::~Bucket() = default;
+void QuerySyncManager::Bucket::FreePendingSyncs() {
+ auto it =
+ std::remove_if(pending_syncs.begin(), pending_syncs.end(),
+ [this](const PendingSync& pending) {
+ QuerySync* sync = this->syncs + pending.index;
+ if (base::subtle::Acquire_Load(&sync->process_count) ==
+ pending.submit_count) {
+ this->in_use_query_syncs[pending.index] = false;
+ return true;
+ } else {
+ return false;
+ }
+ });
+ pending_syncs.erase(it, pending_syncs.end());
+}
+
QuerySyncManager::QuerySyncManager(MappedMemoryManager* manager)
: mapped_memory_(manager) {
DCHECK(manager);
@@ -37,7 +54,6 @@ QuerySyncManager::QuerySyncManager(MappedMemoryManager* manager)
QuerySyncManager::~QuerySyncManager() {
while (!buckets_.empty()) {
mapped_memory_->Free(buckets_.front()->syncs);
- delete buckets_.front();
buckets_.pop_front();
}
}
@@ -45,11 +61,10 @@ QuerySyncManager::~QuerySyncManager() {
bool QuerySyncManager::Alloc(QuerySyncManager::QueryInfo* info) {
DCHECK(info);
Bucket* bucket = nullptr;
- for (Bucket* bucket_candidate : buckets_) {
- // In C++11 STL this could be replaced with
- // if (!bucket_candidate->in_use_queries.all()) { ... }
- if (bucket_candidate->in_use_queries.count() != kSyncsPerBucket) {
- bucket = bucket_candidate;
+ for (auto& bucket_candidate : buckets_) {
+ bucket_candidate->FreePendingSyncs();
+ if (!bucket_candidate->in_use_query_syncs.all()) {
+ bucket = bucket_candidate.get();
break;
}
}
@@ -62,62 +77,79 @@ bool QuerySyncManager::Alloc(QuerySyncManager::QueryInfo* info) {
return false;
}
QuerySync* syncs = static_cast<QuerySync*>(mem);
- bucket = new Bucket(syncs, shm_id, shm_offset);
- buckets_.push_back(bucket);
+ buckets_.push_back(base::MakeUnique<Bucket>(syncs, shm_id, shm_offset));
+ bucket = buckets_.back().get();
}
size_t index_in_bucket = 0;
for (size_t i = 0; i < kSyncsPerBucket; i++) {
- if (!bucket->in_use_queries[i]) {
+ if (!bucket->in_use_query_syncs[i]) {
index_in_bucket = i;
break;
}
}
- uint32_t shm_offset =
- bucket->base_shm_offset + index_in_bucket * sizeof(QuerySync);
- QuerySync* sync = bucket->syncs + index_in_bucket;
- *info = QueryInfo(bucket, bucket->shm_id, shm_offset, sync);
+ *info = QueryInfo(bucket, index_in_bucket);
info->sync->Reset();
- bucket->in_use_queries[index_in_bucket] = true;
+ bucket->in_use_query_syncs[index_in_bucket] = true;
return true;
}
void QuerySyncManager::Free(const QuerySyncManager::QueryInfo& info) {
- DCHECK_NE(info.bucket->in_use_queries.count(), 0u);
- unsigned short index_in_bucket = info.sync - info.bucket->syncs;
- DCHECK(info.bucket->in_use_queries[index_in_bucket]);
- info.bucket->in_use_queries[index_in_bucket] = false;
+ DCHECK_NE(info.bucket->in_use_query_syncs.count(), 0u);
+ unsigned short index_in_bucket = info.index();
+ DCHECK(info.bucket->in_use_query_syncs[index_in_bucket]);
+ if (base::subtle::Acquire_Load(&info.sync->process_count) !=
+ info.submit_count) {
+ // When you delete a query you can't mark its memory as unused until it's
+ // completed.
+ info.bucket->pending_syncs.push_back(
+ Bucket::PendingSync{index_in_bucket, info.submit_count});
+ } else {
+ info.bucket->in_use_query_syncs[index_in_bucket] = false;
+ }
}
-void QuerySyncManager::Shrink() {
- std::deque<Bucket*> new_buckets;
+void QuerySyncManager::Shrink(CommandBufferHelper* helper) {
+ std::deque<std::unique_ptr<Bucket>> new_buckets;
+ bool has_token = false;
+ uint32_t token = 0;
while (!buckets_.empty()) {
- Bucket* bucket = buckets_.front();
- if (bucket->in_use_queries.any()) {
- new_buckets.push_back(bucket);
+ std::unique_ptr<Bucket>& bucket = buckets_.front();
+ bucket->FreePendingSyncs();
+ if (bucket->in_use_query_syncs.any()) {
+ if (bucket->in_use_query_syncs.count() == bucket->pending_syncs.size()) {
+ // Every QuerySync that is in-use is just pending completion. We know
+ // the query has been deleted, so nothing on the service side will
+ // access the shared memory after current commands, so we can
+ // free-pending-token.
+ token = helper->InsertToken();
+ has_token = true;
+ mapped_memory_->FreePendingToken(bucket->syncs, token);
+ } else {
+ new_buckets.push_back(std::move(bucket));
+ }
} else {
+ // Every QuerySync is free or completed, so we know the service side won't
+ // access it any more, so we can free immediately.
mapped_memory_->Free(bucket->syncs);
- delete bucket;
}
buckets_.pop_front();
}
buckets_.swap(new_buckets);
}
-QueryTracker::Query::Query(GLuint id, GLenum target,
+QueryTracker::Query::Query(GLuint id,
+ GLenum target,
const QuerySyncManager::QueryInfo& info)
: id_(id),
target_(target),
info_(info),
state_(kUninitialized),
- submit_count_(0),
token_(0),
flush_count_(0),
client_begin_time_us_(0),
- result_(0) {
- }
-
+ result_(0) {}
void QueryTracker::Query::Begin(GLES2Implementation* gl) {
// init memory, inc count
@@ -160,23 +192,25 @@ void QueryTracker::Query::End(GLES2Implementation* gl) {
}
}
flush_count_ = gl->helper()->flush_generation();
- gl->helper()->EndQueryEXT(target(), submit_count());
- MarkAsPending(gl->helper()->InsertToken());
+ int32_t submit_count = NextSubmitCount();
+ gl->helper()->EndQueryEXT(target(), submit_count);
+ MarkAsPending(gl->helper()->InsertToken(), submit_count);
}
void QueryTracker::Query::QueryCounter(GLES2Implementation* gl) {
MarkAsActive();
flush_count_ = gl->helper()->flush_generation();
+ int32_t submit_count = NextSubmitCount();
gl->helper()->QueryCounterEXT(id(), target(), shm_id(), shm_offset(),
- submit_count());
- MarkAsPending(gl->helper()->InsertToken());
+ submit_count);
+ MarkAsPending(gl->helper()->InsertToken(), submit_count);
}
bool QueryTracker::Query::CheckResultsAvailable(
CommandBufferHelper* helper) {
if (Pending()) {
- bool processed_all =
- base::subtle::Acquire_Load(&info_.sync->process_count) == submit_count_;
+ bool processed_all = base::subtle::Acquire_Load(
+ &info_.sync->process_count) == submit_count();
// We check lost on the command buffer itself here instead of checking the
// GLES2Implementation because the GLES2Implementation will not hear about
// the loss until we exit out of this call stack (to avoid re-entrancy), and
@@ -228,14 +262,8 @@ QueryTracker::QueryTracker(MappedMemoryManager* manager)
}
QueryTracker::~QueryTracker() {
- while (!queries_.empty()) {
- delete queries_.begin()->second;
- queries_.erase(queries_.begin());
- }
- while (!removed_queries_.empty()) {
- delete removed_queries_.front();
- removed_queries_.pop_front();
- }
+ for (auto& kv : queries_)
+ query_sync_manager_.Free(kv.second->info_);
if (disjoint_count_sync_) {
mapped_memory_->Free(disjoint_count_sync_);
disjoint_count_sync_ = nullptr;
@@ -244,21 +272,21 @@ QueryTracker::~QueryTracker() {
QueryTracker::Query* QueryTracker::CreateQuery(GLuint id, GLenum target) {
DCHECK_NE(0u, id);
- FreeCompletedQueries();
QuerySyncManager::QueryInfo info;
if (!query_sync_manager_.Alloc(&info)) {
return nullptr;
}
- Query* query = new Query(id, target, info);
+ auto query = base::MakeUnique<Query>(id, target, info);
+ Query* query_ptr = query.get();
std::pair<QueryIdMap::iterator, bool> result =
- queries_.insert(std::make_pair(id, query));
+ queries_.emplace(id, std::move(query));
DCHECK(result.second);
- return query;
+ return query_ptr;
}
QueryTracker::Query* QueryTracker::GetQuery(GLuint client_id) {
QueryIdMap::iterator it = queries_.find(client_id);
- return it != queries_.end() ? it->second : nullptr;
+ return it != queries_.end() ? it->second.get() : nullptr;
}
QueryTracker::Query* QueryTracker::GetCurrentQuery(GLenum target) {
@@ -269,7 +297,7 @@ QueryTracker::Query* QueryTracker::GetCurrentQuery(GLenum target) {
void QueryTracker::RemoveQuery(GLuint client_id) {
QueryIdMap::iterator it = queries_.find(client_id);
if (it != queries_.end()) {
- Query* query = it->second;
+ Query* query = it->second.get();
// Erase from current targets map if it is the current target.
const GLenum target = query->target();
@@ -278,36 +306,13 @@ void QueryTracker::RemoveQuery(GLuint client_id) {
current_queries_.erase(target_it);
}
- // When you delete a query you can't mark its memory as unused until it's
- // completed.
- // Note: If you don't do this you won't mess up the service but you will
- // mess up yourself.
- removed_queries_.push_back(query);
+ query_sync_manager_.Free(query->info_);
queries_.erase(it);
- FreeCompletedQueries();
}
}
-void QueryTracker::Shrink() {
- FreeCompletedQueries();
- query_sync_manager_.Shrink();
-}
-
-void QueryTracker::FreeCompletedQueries() {
- QueryList::iterator it = removed_queries_.begin();
- while (it != removed_queries_.end()) {
- Query* query = *it;
- if (query->Pending() &&
- base::subtle::Acquire_Load(&query->info_.sync->process_count) !=
- query->submit_count()) {
- ++it;
- continue;
- }
-
- query_sync_manager_.Free(query->info_);
- it = removed_queries_.erase(it);
- delete query;
- }
+void QueryTracker::Shrink(CommandBufferHelper* helper) {
+ query_sync_manager_.Shrink(helper);
}
bool QueryTracker::BeginQuery(GLuint id, GLenum target,
diff --git a/chromium/gpu/command_buffer/client/query_tracker.h b/chromium/gpu/command_buffer/client/query_tracker.h
index c4505f1d55b..a52910055d2 100644
--- a/chromium/gpu/command_buffer/client/query_tracker.h
+++ b/chromium/gpu/command_buffer/client/query_tracker.h
@@ -13,9 +13,12 @@
#include <bitset>
#include <deque>
#include <list>
+#include <memory>
#include "base/atomicops.h"
+#include "base/containers/flat_map.h"
#include "base/containers/hash_tables.h"
+#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "gles2_impl_export.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
@@ -34,29 +37,34 @@ class GLES2_IMPL_EXPORT QuerySyncManager {
public:
static const size_t kSyncsPerBucket = 256;
- struct Bucket {
+ struct GLES2_IMPL_EXPORT Bucket {
Bucket(QuerySync* sync_mem, int32_t shm_id, uint32_t shm_offset);
~Bucket();
+
+ void FreePendingSyncs();
+
QuerySync* syncs;
int32_t shm_id;
uint32_t base_shm_offset;
- std::bitset<kSyncsPerBucket> in_use_queries;
+ std::bitset<kSyncsPerBucket> in_use_query_syncs;
+
+ struct PendingSync {
+ uint32_t index;
+ int32_t submit_count;
+ };
+ std::vector<PendingSync> pending_syncs;
};
+
struct QueryInfo {
- QueryInfo(Bucket* bucket, int32_t id, uint32_t offset, QuerySync* sync_mem)
- : bucket(bucket), shm_id(id), shm_offset(offset), sync(sync_mem) {}
-
- QueryInfo()
- : bucket(NULL),
- shm_id(0),
- shm_offset(0),
- sync(NULL) {
- }
+ QueryInfo(Bucket* bucket, uint32_t index)
+ : bucket(bucket), sync(bucket->syncs + index) {}
+ QueryInfo() {}
- Bucket* bucket;
- int32_t shm_id;
- uint32_t shm_offset;
- QuerySync* sync;
+ uint32_t index() const { return sync - bucket->syncs; }
+
+ Bucket* bucket = nullptr;
+ QuerySync* sync = nullptr;
+ int32_t submit_count = 0;
};
explicit QuerySyncManager(MappedMemoryManager* manager);
@@ -64,11 +72,13 @@ class GLES2_IMPL_EXPORT QuerySyncManager {
bool Alloc(QueryInfo* info);
void Free(const QueryInfo& sync);
- void Shrink();
+ void Shrink(CommandBufferHelper* helper);
private:
+ FRIEND_TEST_ALL_PREFIXES(QuerySyncManagerTest, Shrink);
+
MappedMemoryManager* mapped_memory_;
- std::deque<Bucket*> buckets_;
+ std::deque<std::unique_ptr<Bucket>> buckets_;
DISALLOW_COPY_AND_ASSIGN(QuerySyncManager);
};
@@ -95,23 +105,30 @@ class GLES2_IMPL_EXPORT QueryTracker {
return id_;
}
- int32_t shm_id() const { return info_.shm_id; }
+ int32_t shm_id() const { return info_.bucket->shm_id; }
- uint32_t shm_offset() const { return info_.shm_offset; }
+ uint32_t shm_offset() const {
+ return info_.bucket->base_shm_offset + sizeof(QuerySync) * info_.index();
+ }
void MarkAsActive() {
state_ = kActive;
- ++submit_count_;
- if (submit_count_ == INT_MAX)
- submit_count_ = 1;
}
- void MarkAsPending(int32_t token) {
+ int32_t NextSubmitCount() const {
+ int32_t submit_count = info_.submit_count + 1;
+ if (submit_count == INT_MAX)
+ submit_count = 1;
+ return submit_count;
+ }
+
+ void MarkAsPending(int32_t token, int32_t submit_count) {
+ info_.submit_count = submit_count;
token_ = token;
state_ = kPending;
}
- base::subtle::Atomic32 submit_count() const { return submit_count_; }
+ base::subtle::Atomic32 submit_count() const { return info_.submit_count; }
int32_t token() const { return token_; }
@@ -143,22 +160,20 @@ class GLES2_IMPL_EXPORT QueryTracker {
GLenum target_;
QuerySyncManager::QueryInfo info_;
State state_;
- base::subtle::Atomic32 submit_count_;
int32_t token_;
uint32_t flush_count_;
uint64_t client_begin_time_us_; // Only used for latency query target.
uint64_t result_;
};
- QueryTracker(MappedMemoryManager* manager);
+ explicit QueryTracker(MappedMemoryManager* manager);
~QueryTracker();
Query* CreateQuery(GLuint id, GLenum target);
Query* GetQuery(GLuint id);
Query* GetCurrentQuery(GLenum target);
void RemoveQuery(GLuint id);
- void Shrink();
- void FreeCompletedQueries();
+ void Shrink(CommandBufferHelper* helper);
bool BeginQuery(GLuint id, GLenum target, GLES2Implementation* gl);
bool EndQuery(GLenum target, GLES2Implementation* gl);
@@ -175,13 +190,11 @@ class GLES2_IMPL_EXPORT QueryTracker {
}
private:
- typedef base::hash_map<GLuint, Query*> QueryIdMap;
- typedef base::hash_map<GLenum, Query*> QueryTargetMap;
- typedef std::list<Query*> QueryList;
+ typedef base::hash_map<GLuint, std::unique_ptr<Query>> QueryIdMap;
+ typedef base::flat_map<GLenum, Query*> QueryTargetMap;
QueryIdMap queries_;
QueryTargetMap current_queries_;
- QueryList removed_queries_;
QuerySyncManager query_sync_manager_;
// The shared memory used for synchronizing timer disjoint values.
diff --git a/chromium/gpu/command_buffer/client/query_tracker_unittest.cc b/chromium/gpu/command_buffer/client/query_tracker_unittest.cc
index 78ab936d11d..8b5191e2608 100644
--- a/chromium/gpu/command_buffer/client/query_tracker_unittest.cc
+++ b/chromium/gpu/command_buffer/client/query_tracker_unittest.cc
@@ -20,6 +20,9 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using testing::_;
+using testing::AnyNumber;
+
namespace gpu {
namespace gles2 {
@@ -39,13 +42,14 @@ class QuerySyncManagerTest : public testing::Test {
}
void TearDown() override {
+ EXPECT_CALL(*command_buffer_, DestroyTransferBuffer(_)).Times(AnyNumber());
sync_manager_.reset();
mapped_memory_.reset();
helper_.reset();
command_buffer_.reset();
}
- std::unique_ptr<CommandBuffer> command_buffer_;
+ std::unique_ptr<MockClientCommandBuffer> command_buffer_;
std::unique_ptr<GLES2CmdHelper> helper_;
std::unique_ptr<MappedMemoryManager> mapped_memory_;
std::unique_ptr<QuerySyncManager> sync_manager_;
@@ -57,10 +61,10 @@ TEST_F(QuerySyncManagerTest, Basic) {
for (size_t ii = 0; ii < arraysize(infos); ++ii) {
EXPECT_TRUE(sync_manager_->Alloc(&infos[ii]));
- EXPECT_NE(0, infos[ii].shm_id);
ASSERT_TRUE(infos[ii].sync != NULL);
EXPECT_EQ(0, infos[ii].sync->process_count);
EXPECT_EQ(0u, infos[ii].sync->result);
+ EXPECT_EQ(0, infos[ii].submit_count);
}
for (size_t ii = 0; ii < arraysize(infos); ++ii) {
@@ -77,6 +81,126 @@ TEST_F(QuerySyncManagerTest, DontFree) {
}
}
+TEST_F(QuerySyncManagerTest, FreePendingSyncs) {
+ QuerySyncManager::QueryInfo info;
+ EXPECT_TRUE(sync_manager_->Alloc(&info));
+ QuerySyncManager::Bucket* bucket = info.bucket;
+
+ // Mark the query as in-use.
+ ++info.submit_count;
+
+ // Freeing the QueryInfo should keep the QuerySync busy as it's still in-use,
+ // but should be tracked in pending_syncs.
+ sync_manager_->Free(info);
+ EXPECT_FALSE(bucket->pending_syncs.empty());
+ EXPECT_TRUE(bucket->in_use_query_syncs.any());
+
+ // FreePendingSyncs should not free in-use QuerySync.
+ bucket->FreePendingSyncs();
+ EXPECT_FALSE(bucket->pending_syncs.empty());
+ EXPECT_TRUE(bucket->in_use_query_syncs.any());
+
+ // Mark the query as completed.
+ info.sync->process_count = info.submit_count;
+
+ // FreePendingSyncs should free the QuerySync.
+ bucket->FreePendingSyncs();
+ EXPECT_TRUE(bucket->pending_syncs.empty());
+ EXPECT_FALSE(bucket->in_use_query_syncs.any());
+
+ // Allocate a new Query, mark it in-use
+ EXPECT_TRUE(sync_manager_->Alloc(&info));
+ bucket = info.bucket;
+ ++info.submit_count;
+
+ // Mark the query as completed
+ info.sync->process_count = info.submit_count;
+
+ // FreePendingSyncs should not free the QuerySync. Even though the query is
+ // completed, is has not been deleted yet.
+ bucket->FreePendingSyncs();
+ EXPECT_TRUE(bucket->in_use_query_syncs.any());
+
+ // Free the QueryInfo, it should be immediately freed.
+ sync_manager_->Free(info);
+ EXPECT_TRUE(bucket->pending_syncs.empty());
+ EXPECT_FALSE(bucket->in_use_query_syncs.any());
+}
+
+TEST_F(QuerySyncManagerTest, Shrink) {
+ QuerySyncManager::QueryInfo info;
+ EXPECT_TRUE(sync_manager_->Alloc(&info));
+ QuerySyncManager::Bucket* bucket = info.bucket;
+ QuerySync* syncs = bucket->syncs;
+
+ FencedAllocator::State state =
+ mapped_memory_->GetPointerStatusForTest(syncs, nullptr);
+ EXPECT_EQ(FencedAllocator::IN_USE, state);
+
+ // Shrink while a query is allocated - should not release anything.
+ sync_manager_->Shrink(helper_.get());
+ state = mapped_memory_->GetPointerStatusForTest(syncs, nullptr);
+ EXPECT_EQ(FencedAllocator::IN_USE, state);
+
+ // Free query that was never submitted.
+ sync_manager_->Free(info);
+ EXPECT_TRUE(bucket->pending_syncs.empty());
+ EXPECT_FALSE(bucket->in_use_query_syncs.any());
+
+ // Shrink should release the memory immediately.
+ sync_manager_->Shrink(helper_.get());
+ EXPECT_TRUE(sync_manager_->buckets_.empty());
+ state = mapped_memory_->GetPointerStatusForTest(syncs, nullptr);
+ EXPECT_EQ(FencedAllocator::FREE, state);
+
+ EXPECT_TRUE(sync_manager_->Alloc(&info));
+ bucket = info.bucket;
+ syncs = bucket->syncs;
+
+ state = mapped_memory_->GetPointerStatusForTest(syncs, nullptr);
+ EXPECT_EQ(FencedAllocator::IN_USE, state);
+
+ // Free a query that was submitted, but not completed.
+ ++info.submit_count;
+ sync_manager_->Free(info);
+ EXPECT_FALSE(bucket->pending_syncs.empty());
+ EXPECT_TRUE(bucket->in_use_query_syncs.any());
+
+ int32_t last_token = helper_->InsertToken();
+
+ // Shrink should release the memory, pending a new token.
+ sync_manager_->Shrink(helper_.get());
+ EXPECT_TRUE(sync_manager_->buckets_.empty());
+ int32_t token = 0;
+ state = mapped_memory_->GetPointerStatusForTest(syncs, &token);
+ EXPECT_EQ(FencedAllocator::FREE_PENDING_TOKEN, state);
+ EXPECT_EQ(last_token + 1, token);
+
+ EXPECT_TRUE(sync_manager_->Alloc(&info));
+ bucket = info.bucket;
+ syncs = bucket->syncs;
+
+ state = mapped_memory_->GetPointerStatusForTest(syncs, nullptr);
+ EXPECT_EQ(FencedAllocator::IN_USE, state);
+
+ // Free a query that was submitted, but not completed yet.
+ ++info.submit_count;
+ int32_t submit_count = info.submit_count;
+ QuerySync* sync = info.sync;
+ sync_manager_->Free(info);
+ EXPECT_FALSE(bucket->pending_syncs.empty());
+ EXPECT_TRUE(bucket->in_use_query_syncs.any());
+
+ // Complete the query after Free.
+ sync->process_count = submit_count;
+
+ // Shrink should free the memory immediately since the query is completed.
+ sync_manager_->Shrink(helper_.get());
+ EXPECT_TRUE(sync_manager_->buckets_.empty());
+ state = mapped_memory_->GetPointerStatusForTest(syncs, nullptr);
+ EXPECT_EQ(FencedAllocator::FREE, state);
+}
+
class QueryTrackerTest : public testing::Test {
protected:
static const int32_t kNumCommandEntries = 400;
@@ -93,6 +217,8 @@ class QueryTrackerTest : public testing::Test {
}
void TearDown() override {
+ helper_->CommandBufferHelper::Flush();
+ EXPECT_CALL(*command_buffer_, DestroyTransferBuffer(_)).Times(AnyNumber());
query_tracker_.reset();
mapped_memory_.reset();
helper_.reset();
@@ -108,12 +234,12 @@ class QueryTrackerTest : public testing::Test {
}
uint32_t GetBucketUsedCount(QuerySyncManager::Bucket* bucket) {
- return bucket->in_use_queries.count();
+ return bucket->in_use_query_syncs.count();
}
uint32_t GetFlushGeneration() { return helper_->flush_generation(); }
- std::unique_ptr<CommandBuffer> command_buffer_;
+ std::unique_ptr<MockClientCommandBuffer> command_buffer_;
std::unique_ptr<GLES2CmdHelper> helper_;
std::unique_ptr<MappedMemoryManager> mapped_memory_;
std::unique_ptr<QueryTracker> query_tracker_;
@@ -156,10 +282,11 @@ TEST_F(QueryTrackerTest, Query) {
EXPECT_FALSE(query->NeverUsed());
EXPECT_FALSE(query->Pending());
EXPECT_EQ(0, query->token());
- EXPECT_EQ(1, query->submit_count());
+ EXPECT_EQ(0, query->submit_count());
+ EXPECT_EQ(1, query->NextSubmitCount());
// Check MarkAsPending.
- query->MarkAsPending(kToken);
+ query->MarkAsPending(kToken, query->NextSubmitCount());
EXPECT_FALSE(query->NeverUsed());
EXPECT_TRUE(query->Pending());
EXPECT_EQ(kToken, query->token());
@@ -216,7 +343,9 @@ TEST_F(QueryTrackerTest, Remove) {
EXPECT_EQ(1u, GetBucketUsedCount(bucket));
query->MarkAsActive();
- query->MarkAsPending(kToken);
+ int32_t submit_count = query->NextSubmitCount();
+ query->MarkAsPending(kToken, submit_count);
+ QuerySync* sync = GetSync(query);
query_tracker_->RemoveQuery(kId1);
// Check we get nothing for a non-existent query.
@@ -224,17 +353,39 @@ TEST_F(QueryTrackerTest, Remove) {
// Check that memory was not freed.
EXPECT_EQ(1u, GetBucketUsedCount(bucket));
+ EXPECT_EQ(1u, bucket->pending_syncs.size());
// Simulate GPU process marking it as available.
- QuerySync* sync = GetSync(query);
- sync->process_count = query->submit_count();
sync->result = kResult;
+ sync->process_count = submit_count;
- // Check FreeCompletedQueries.
- query_tracker_->FreeCompletedQueries();
+ // Check FreePendingSyncs.
+ bucket->FreePendingSyncs();
EXPECT_EQ(0u, GetBucketUsedCount(bucket));
}
+TEST_F(QueryTrackerTest, RemoveActive) {
+ const GLuint kId1 = 123;
+
+ // Create a Query.
+ QueryTracker::Query* query =
+ query_tracker_->CreateQuery(kId1, GL_ANY_SAMPLES_PASSED_EXT);
+ ASSERT_TRUE(query != NULL);
+
+ QuerySyncManager::Bucket* bucket = GetBucket(query);
+ EXPECT_EQ(1u, GetBucketUsedCount(bucket));
+
+ query->MarkAsActive();
+
+ query_tracker_->RemoveQuery(kId1);
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(query_tracker_->GetQuery(kId1) == NULL);
+
+ // Check that memory was freed.
+ EXPECT_EQ(0u, GetBucketUsedCount(bucket));
+ EXPECT_EQ(0u, bucket->pending_syncs.size());
+}
+
TEST_F(QueryTrackerTest, ManyQueries) {
const GLuint kId1 = 123;
const int32_t kToken = 46;
@@ -264,11 +415,13 @@ TEST_F(QueryTrackerTest, ManyQueries) {
GLuint query_id = kId1 + queries.size();
EXPECT_EQ(query_id, query->id());
query->MarkAsActive();
- query->MarkAsPending(kToken);
+ int32_t submit_count = query->NextSubmitCount();
+ query->MarkAsPending(kToken, submit_count);
+ QuerySync* sync = GetSync(query);
QuerySyncManager::Bucket* bucket = GetBucket(query);
uint32_t use_count_before_remove = GetBucketUsedCount(bucket);
- query_tracker_->FreeCompletedQueries();
+ bucket->FreePendingSyncs();
EXPECT_EQ(use_count_before_remove, GetBucketUsedCount(bucket));
query_tracker_->RemoveQuery(query_id);
// Check we get nothing for a non-existent query.
@@ -278,12 +431,11 @@ TEST_F(QueryTrackerTest, ManyQueries) {
EXPECT_EQ(use_count_before_remove, GetBucketUsedCount(bucket));
// Simulate GPU process marking it as available.
- QuerySync* sync = GetSync(query);
- sync->process_count = query->submit_count();
+ sync->process_count = submit_count;
sync->result = kResult;
// Check FreeCompletedQueries.
- query_tracker_->FreeCompletedQueries();
+ bucket->FreePendingSyncs();
EXPECT_EQ(use_count_before_remove - 1, GetBucketUsedCount(bucket));
}
}
diff --git a/chromium/gpu/command_buffer/client/ring_buffer.cc b/chromium/gpu/command_buffer/client/ring_buffer.cc
index 118dc57276e..db83391c484 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer.cc
+++ b/chromium/gpu/command_buffer/client/ring_buffer.cc
@@ -29,10 +29,8 @@ RingBuffer::RingBuffer(unsigned int alignment,
base_(static_cast<int8_t*>(base) - base_offset) {}
RingBuffer::~RingBuffer() {
- // Free blocks pending tokens.
- while (!blocks_.empty()) {
- FreeOldestBlock();
- }
+ for (const auto& block : blocks_)
+ DCHECK(block.state != IN_USE);
}
void RingBuffer::FreeOldestBlock() {
@@ -179,4 +177,18 @@ unsigned int RingBuffer::GetTotalFreeSizeNoWaiting() {
}
}
+void RingBuffer::ShrinkLastBlock(unsigned int new_size) {
+ if (blocks_.empty())
+ return;
+ auto& block = blocks_.back();
+ DCHECK_LT(new_size, block.size);
+ DCHECK_EQ(block.state, IN_USE);
+
+ // Can't shrink to size 0, see comments in Alloc.
+ new_size = std::max(new_size, 1u);
+
+ free_offset_ = block.offset + new_size;
+ block.size = new_size;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/ring_buffer.h b/chromium/gpu/command_buffer/client/ring_buffer.h
index 083cb018ff5..48ba789ec9d 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer.h
+++ b/chromium/gpu/command_buffer/client/ring_buffer.h
@@ -90,6 +90,9 @@ class GPU_EXPORT RingBuffer {
return (size + alignment_ - 1) & ~(alignment_ - 1);
}
+ // Shrinks the last block. new_size must be smaller than the current size
+ // and the block must still be in use in order to shrink.
+ void ShrinkLastBlock(unsigned int new_size);
private:
enum State {
diff --git a/chromium/gpu/command_buffer/client/shared_memory_limits.h b/chromium/gpu/command_buffer/client/shared_memory_limits.h
index 53881de0dc1..ed62e956cfb 100644
--- a/chromium/gpu/command_buffer/client/shared_memory_limits.h
+++ b/chromium/gpu/command_buffer/client/shared_memory_limits.h
@@ -7,18 +7,43 @@
#include <stddef.h>
+#include "base/sys_info.h"
+#include "build/build_config.h"
+
namespace gpu {
struct SharedMemoryLimits {
- SharedMemoryLimits() = default;
+ SharedMemoryLimits() {
+// We can't call AmountOfPhysicalMemory under NACL, so leave the default.
+#if !defined(OS_NACL)
+ // Max mapped memory to use for a texture upload depends on device ram.
+ // Do not use more than 5% of extra shared memory, and do not use any extra
+ // for memory contrained devices (<=1GB).
+ max_mapped_memory_for_texture_upload =
+ base::SysInfo::AmountOfPhysicalMemory() > 1024 * 1024 * 1024
+ ? base::saturated_cast<uint32_t>(
+ base::SysInfo::AmountOfPhysicalMemory() / 20)
+ : 0;
+
+ // On memory constrained devices, switch to lower limits.
+ if (base::SysInfo::AmountOfPhysicalMemoryMB() < 512) {
+ command_buffer_size = 512 * 1024;
+ start_transfer_buffer_size = 256 * 1024;
+ min_transfer_buffer_size = 128 * 1024;
+ mapped_memory_chunk_size = 256 * 1024;
+ }
+#endif
+ }
int32_t command_buffer_size = 1024 * 1024;
- uint32_t start_transfer_buffer_size = 1 * 1024 * 1024;
- uint32_t min_transfer_buffer_size = 1 * 256 * 1024;
+ uint32_t start_transfer_buffer_size = 1024 * 1024;
+ uint32_t min_transfer_buffer_size = 256 * 1024;
uint32_t max_transfer_buffer_size = 16 * 1024 * 1024;
static constexpr uint32_t kNoLimit = 0;
uint32_t mapped_memory_reclaim_limit = kNoLimit;
+ uint32_t mapped_memory_chunk_size = 2 * 1024 * 1024;
+ uint32_t max_mapped_memory_for_texture_upload = 0;
// These are limits for contexts only used for creating textures, mailboxing
// them and dealing with synchronization.
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer.cc b/chromium/gpu/command_buffer/client/transfer_buffer.cc
index 0aa21049e55..76afc59f6eb 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer.cc
+++ b/chromium/gpu/command_buffer/client/transfer_buffer.cc
@@ -65,7 +65,7 @@ bool TransferBuffer::Initialize(
void TransferBuffer::Free() {
if (HaveBuffer()) {
TRACE_EVENT0("gpu", "TransferBuffer::Free");
- helper_->Finish();
+ helper_->FlushLazy();
helper_->command_buffer()->DestroyTransferBuffer(buffer_id_);
buffer_id_ = -1;
buffer_ = NULL;
@@ -105,6 +105,10 @@ unsigned int TransferBuffer::GetFreeSize() const {
return HaveBuffer() ? ring_buffer_->GetTotalFreeSizeNoWaiting() : 0;
}
+void TransferBuffer::ShrinkLastBlock(unsigned int new_size) {
+ ring_buffer_->ShrinkLastBlock(new_size);
+}
+
void TransferBuffer::AllocateRingBuffer(unsigned int size) {
for (;size >= min_buffer_size_; size /= 2) {
int32_t id = -1;
@@ -230,4 +234,11 @@ void ScopedTransferBufferPtr::Reset(unsigned int new_size) {
buffer_ = transfer_buffer_->AllocUpTo(new_size, &size_);
}
+void ScopedTransferBufferPtr::Shrink(unsigned int new_size) {
+ if (!transfer_buffer_->HaveBuffer() || new_size >= size_)
+ return;
+ transfer_buffer_->ShrinkLastBlock(new_size);
+ size_ = new_size;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer.h b/chromium/gpu/command_buffer/client/transfer_buffer.h
index 155e8b0094e..fda4ac26a62 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer.h
+++ b/chromium/gpu/command_buffer/client/transfer_buffer.h
@@ -69,6 +69,8 @@ class GPU_EXPORT TransferBufferInterface {
virtual unsigned int GetSize() const = 0;
virtual unsigned int GetFreeSize() const = 0;
+
+ virtual void ShrinkLastBlock(unsigned int new_size) = 0;
};
// Class that manages the transfer buffer.
@@ -97,6 +99,7 @@ class GPU_EXPORT TransferBuffer : public TransferBufferInterface {
void FreePendingToken(void* p, unsigned int token) override;
unsigned int GetSize() const override;
unsigned int GetFreeSize() const override;
+ void ShrinkLastBlock(unsigned int new_size) override;
// These are for testing.
unsigned int GetCurrentMaxAllocationWithoutRealloc() const;
@@ -200,6 +203,9 @@ class GPU_EXPORT ScopedTransferBufferPtr {
void Reset(unsigned int new_size);
+ // Shrinks this transfer buffer to a given size.
+ void Shrink(unsigned int new_size);
+
private:
void* buffer_;
unsigned int size_;
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
index bb67d53d6fe..c83e1d3e4a8 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
+++ b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
@@ -141,6 +141,7 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(base::UnguessableToken(),
transfer_buffer_->shared_memory_handle().GetGUID());
+
// See that it gets reallocated.
EXPECT_TRUE(transfer_buffer_->GetResultBuffer() != NULL);
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
@@ -156,6 +157,7 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(base::UnguessableToken(),
transfer_buffer_->shared_memory_handle().GetGUID());
+
// See that it gets reallocated.
unsigned int size = 0;
void* data = transfer_buffer_->AllocUpTo(1, &size);
@@ -163,9 +165,12 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
EXPECT_NE(base::UnguessableToken(),
transfer_buffer_->shared_memory_handle().GetGUID());
- transfer_buffer_->FreePendingToken(data, 1);
+ int32_t token = helper_->InsertToken();
+ int32_t put_offset = helper_->GetPutOffsetForTest();
+ transfer_buffer_->FreePendingToken(data, token);
- // Free buffer.
+ // Free buffer. Should cause a Flush.
+ EXPECT_CALL(*command_buffer(), Flush(_)).Times(AtMost(1));
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
@@ -174,6 +179,11 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(base::UnguessableToken(),
transfer_buffer_->shared_memory_handle().GetGUID());
+ // Free should have flushed.
+ EXPECT_EQ(put_offset, command_buffer_->GetServicePutOffset());
+ // However it shouldn't have caused a finish.
+ EXPECT_LT(command_buffer_->GetState().get_offset, put_offset);
+
// See that it gets reallocated.
transfer_buffer_->GetResultOffset();
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
@@ -210,11 +220,11 @@ TEST_F(TransferBufferTest, MemoryAlignmentAfterZeroAllocation) {
Initialize(32u);
void* ptr = transfer_buffer_->Alloc(0);
EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u);
- transfer_buffer_->FreePendingToken(ptr, static_cast<unsigned int>(-1));
+ transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken());
// Check that the pointer is aligned on the following allocation.
ptr = transfer_buffer_->Alloc(4);
EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u);
- transfer_buffer_->FreePendingToken(ptr, 1);
+ transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken());
}
TEST_F(TransferBufferTest, Flush) {
@@ -505,4 +515,34 @@ TEST_F(TransferBufferExpandContractTest, ReallocsToDefault) {
transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
}
+TEST_F(TransferBufferExpandContractTest, Shrink) {
+ unsigned int alloc_size = transfer_buffer_->GetFreeSize();
+ EXPECT_EQ(kStartTransferBufferSize - kStartingOffset, alloc_size);
+ unsigned int size_allocated = 0;
+ void* ptr = transfer_buffer_->AllocUpTo(alloc_size, &size_allocated);
+
+ ASSERT_NE(ptr, nullptr);
+ EXPECT_EQ(alloc_size, size_allocated);
+ EXPECT_GT(alloc_size, 0u);
+ EXPECT_EQ(0u, transfer_buffer_->GetFreeSize());
+
+ // Shrink once.
+ const unsigned int shrink_size1 = 80;
+ EXPECT_LT(shrink_size1, alloc_size);
+ transfer_buffer_->ShrinkLastBlock(shrink_size1);
+ EXPECT_EQ(alloc_size - shrink_size1, transfer_buffer_->GetFreeSize());
+
+ // Shrink again.
+ const unsigned int shrink_size2 = 30;
+ EXPECT_LT(shrink_size2, shrink_size1);
+ transfer_buffer_->ShrinkLastBlock(shrink_size2);
+ EXPECT_EQ(alloc_size - shrink_size2, transfer_buffer_->GetFreeSize());
+
+ // Shrink to zero (minimum size is 1).
+ transfer_buffer_->ShrinkLastBlock(0);
+ EXPECT_EQ(alloc_size - 1, transfer_buffer_->GetFreeSize());
+
+ transfer_buffer_->FreePendingToken(ptr, 1);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/cmd_buffer_functions.txt b/chromium/gpu/command_buffer/cmd_buffer_functions.txt
index f806109bdc7..d8f976f118a 100644
--- a/chromium/gpu/command_buffer/cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/cmd_buffer_functions.txt
@@ -267,7 +267,7 @@ GL_APICALL GLboolean GL_APIENTRY glUnmapBuffer (GLenumBufferTarget target);
GL_APICALL void GL_APIENTRY glFlushMappedBufferRange (GLenumBufferTarget target, GLintptrNotNegative offset, GLsizeiptr size);
GL_APICALL void* GL_APIENTRY glMapTexSubImage2DCHROMIUM (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, GLenum access);
GL_APICALL void GL_APIENTRY glUnmapTexSubImage2DCHROMIUM (const void* mem);
-GL_APICALL void GL_APIENTRY glResizeCHROMIUM (GLuint width, GLuint height, GLfloat scale_factor, GLboolean alpha);
+GL_APICALL void GL_APIENTRY glResizeCHROMIUM (GLuint width, GLuint height, GLfloat scale_factor, GLenum color_space, GLboolean alpha);
GL_APICALL const GLchar* GL_APIENTRY glGetRequestableExtensionsCHROMIUM (void);
GL_APICALL void GL_APIENTRY glRequestExtensionCHROMIUM (const char* extension);
GL_APICALL void GL_APIENTRY glGetProgramInfoCHROMIUM (GLidProgram program, GLsizeiNotNegative bufsize, GLsizei* size, void* info);
@@ -279,8 +279,8 @@ GL_APICALL void GL_APIENTRY glDestroyImageCHROMIUM (GLuint image_id);
GL_APICALL void GL_APIENTRY glDescheduleUntilFinishedCHROMIUM (void);
GL_APICALL void GL_APIENTRY glGetTranslatedShaderSourceANGLE (GLidShader shader, GLsizeiNotNegative bufsize, GLsizeiOptional* length, char* source);
GL_APICALL void GL_APIENTRY glPostSubBufferCHROMIUM (GLint x, GLint y, GLint width, GLint height);
-GL_APICALL void GL_APIENTRY glCopyTextureCHROMIUM (GLuint source_id, GLint source_level, GLenum dest_target, GLuint dest_id, GLint dest_level, GLintTextureInternalFormat internalformat, GLenumPixelType dest_type, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
-GL_APICALL void GL_APIENTRY glCopySubTextureCHROMIUM (GLuint source_id, GLint source_level, GLenum dest_target, GLuint dest_id, GLint dest_level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
+GL_APICALL void GL_APIENTRY glCopyTextureCHROMIUM (GLuint source_id, GLint source_level, GLenumTextureTarget dest_target, GLuint dest_id, GLint dest_level, GLintTextureInternalFormat internalformat, GLenumPixelType dest_type, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
+GL_APICALL void GL_APIENTRY glCopySubTextureCHROMIUM (GLuint source_id, GLint source_level, GLenumTextureTarget dest_target, GLuint dest_id, GLint dest_level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
GL_APICALL void GL_APIENTRY glCompressedCopyTextureCHROMIUM (GLuint source_id, GLuint dest_id);
GL_APICALL void GL_APIENTRY glDrawArraysInstancedANGLE (GLenumDrawMode mode, GLint first, GLsizei count, GLsizei primcount);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedANGLE (GLenumDrawMode mode, GLsizei count, GLenumIndexType type, const void* indices, GLsizei primcount);
@@ -293,7 +293,7 @@ GL_APICALL GLuint GL_APIENTRY glCreateAndConsumeTextureCHROMIUM (GLenumTex
GL_APICALL void GL_APIENTRY glCreateAndConsumeTextureINTERNAL (GLenumTextureBindTarget target, GLuint texture, const GLbyte* mailbox);
GL_APICALL void GL_APIENTRY glBindUniformLocationCHROMIUM (GLidProgram program, GLint location, const char* name);
GL_APICALL void GL_APIENTRY glBindTexImage2DCHROMIUM (GLenumTextureBindTarget target, GLint imageId);
-GL_APICALL void GL_APIENTRY glBindTexImage2DWithInternalformatCHROMIUM (GLenumTextureBindTarget target, GLenum internalformat, GLint imageId);
+GL_APICALL void GL_APIENTRY glBindTexImage2DWithInternalformatCHROMIUM (GLenumTextureBindTarget target, GLenumTextureInternalFormat internalformat, GLint imageId);
GL_APICALL void GL_APIENTRY glReleaseTexImage2DCHROMIUM (GLenumTextureBindTarget target, GLint imageId);
GL_APICALL void GL_APIENTRY glTraceBeginCHROMIUM (const char* category_name, const char* trace_name);
GL_APICALL void GL_APIENTRY glTraceEndCHROMIUM (void);
@@ -309,6 +309,7 @@ GL_APICALL void GL_APIENTRY glDiscardBackbufferCHROMIUM (void);
GL_APICALL void GL_APIENTRY glScheduleOverlayPlaneCHROMIUM (GLint plane_z_order, GLenum plane_transform, GLuint overlay_texture_id, GLint bounds_x, GLint bounds_y, GLint bounds_width, GLint bounds_height, GLfloat uv_x, GLfloat uv_y, GLfloat uv_width, GLfloat uv_height);
GL_APICALL void GL_APIENTRY glScheduleCALayerSharedStateCHROMIUM (GLfloat opacity, GLboolean is_clipped, const GLfloat* clip_rect, GLint sorting_context_id, const GLfloat* transform);
GL_APICALL void GL_APIENTRY glScheduleCALayerCHROMIUM (GLuint contents_texture_id, const GLfloat* contents_rect, GLuint background_color, GLuint edge_aa_mask, const GLfloat* bounds_rect, GLuint filter);
+GL_APICALL void GL_APIENTRY glSetColorSpaceForScanoutCHROMIUM (GLuint texture_id, GLColorSpace color_space);
GL_APICALL void GL_APIENTRY glScheduleCALayerInUseQueryCHROMIUM (GLsizei count, const GLuint* textures);
GL_APICALL void GL_APIENTRY glCommitOverlayPlanesCHROMIUM (void);
GL_APICALL void GL_APIENTRY glSwapInterval (GLint interval);
@@ -363,7 +364,7 @@ GL_APICALL GLint GL_APIENTRY glGetFragDataIndexEXT (GLidProgram program,
// Extension CHROMIUM_stream_texture_matrix
GL_APICALL void GL_APIENTRY glUniformMatrix4fvStreamTextureMatrixCHROMIUM (GLintUniformLocation location, GLbooleanFalseOnly transpose, const GLfloat* transform);
-GL_APICALL void GL_APIENTRY glOverlayPromotionHintCHROMIUM (GLidBindTexture texture, GLboolean promotion_hint, GLint display_x, GLint display_y);
+GL_APICALL void GL_APIENTRY glOverlayPromotionHintCHROMIUM (GLidBindTexture texture, GLboolean promotion_hint, GLint display_x, GLint display_y, GLint display_width, GLint display_height);
GL_APICALL void GL_APIENTRY glSwapBuffersWithBoundsCHROMIUM (GLsizei count, const GLint* rects);
@@ -377,3 +378,8 @@ GL_APICALL void GL_APIENTRY glSetEnableDCLayersCHROMIUM (GLboolean enabl
GL_APICALL void GL_APIENTRY glInitializeDiscardableTextureCHROMIUM (GLuint texture_id);
GL_APICALL void GL_APIENTRY glUnlockDiscardableTextureCHROMIUM (GLuint texture_id);
GL_APICALL bool GL_APIENTRY glLockDiscardableTextureCHROMIUM (GLuint texture_id);
+
+// Extension CHROMIUM_raster_transport
+GL_APICALL void GL_APIENTRY glBeginRasterCHROMIUM (GLuint texture_id, GLuint sk_color, GLuint msaa_sample_count, GLboolean can_use_lcd_text, GLboolean use_distance_field_text, GLint pixel_config);
+GL_APICALL void GL_APIENTRY glRasterCHROMIUM (const cc::DisplayItemList* list, GLint x, GLint y, GLint w, GLint h);
+GL_APICALL void GL_APIENTRY glEndRasterCHROMIUM (void);
diff --git a/chromium/gpu/command_buffer/common/BUILD.gn b/chromium/gpu/command_buffer/common/BUILD.gn
index 633e79119d3..061318b3f1c 100644
--- a/chromium/gpu/command_buffer/common/BUILD.gn
+++ b/chromium/gpu/command_buffer/common/BUILD.gn
@@ -39,6 +39,7 @@ source_set("common_sources") {
"debug_marker_manager.h",
"discardable_handle.cc",
"discardable_handle.h",
+ "gl2_types.h",
"gles2_cmd_format.cc",
"gles2_cmd_format.h",
"gles2_cmd_format_autogen.h",
diff --git a/chromium/gpu/command_buffer/common/capabilities.h b/chromium/gpu/command_buffer/common/capabilities.h
index 243e10b806e..e49f3004ac1 100644
--- a/chromium/gpu/command_buffer/common/capabilities.h
+++ b/chromium/gpu/command_buffer/common/capabilities.h
@@ -159,6 +159,7 @@ struct GPU_EXPORT Capabilities {
bool multisample_compatibility = false;
// True if DirectComposition layers are enabled.
bool dc_layers = false;
+ bool use_dc_overlays_for_video = false;
// When this parameter is true, a CHROMIUM image created with RGB format will
// actually have RGBA format. The client is responsible for handling most of
@@ -178,6 +179,8 @@ struct GPU_EXPORT Capabilities {
// When true, non-empty post sub buffer calls are unsupported.
bool disable_non_empty_post_sub_buffers = false;
+ bool disable_2d_canvas_copy_on_write = false;
+
int major_version = 2;
int minor_version = 0;
};
diff --git a/chromium/gpu/command_buffer/common/constants.h b/chromium/gpu/command_buffer/common/constants.h
index 40fe3d579b8..2326799238b 100644
--- a/chromium/gpu/command_buffer/common/constants.h
+++ b/chromium/gpu/command_buffer/common/constants.h
@@ -75,7 +75,7 @@ const int32_t kCommandBufferSharedMemoryId = 4;
const size_t kDefaultMaxProgramCacheMemoryBytes = 6 * 1024 * 1024;
#else
const size_t kDefaultMaxProgramCacheMemoryBytes = 2 * 1024 * 1024;
-const size_t kLowEndMaxProgramCacheMemoryBytes = 512 * 1024;
+const size_t kLowEndMaxProgramCacheMemoryBytes = 128 * 1024;
#endif
// Namespace used to separate various command buffer types.
diff --git a/chromium/gpu/command_buffer/common/gl2_types.h b/chromium/gpu/command_buffer/common/gl2_types.h
new file mode 100644
index 00000000000..b8dca84e12e
--- /dev/null
+++ b/chromium/gpu/command_buffer/common/gl2_types.h
@@ -0,0 +1,30 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <KHR/khrplatform.h>
+
+// GL types are forward declared to avoid including the GL headers. The problem
+// is determining which GL headers to include from code that is common to the
+// client and service sides (GLES2 or one of several GL implementations).
+typedef unsigned int GLenum;
+typedef unsigned int GLbitfield;
+typedef unsigned int GLuint;
+typedef int GLint;
+typedef int GLsizei;
+typedef unsigned char GLboolean;
+typedef signed char GLbyte;
+typedef short GLshort;
+typedef unsigned char GLubyte;
+typedef unsigned short GLushort;
+typedef unsigned long GLulong;
+typedef float GLfloat;
+typedef float GLclampf;
+typedef double GLdouble;
+typedef double GLclampd;
+typedef void GLvoid;
+typedef khronos_intptr_t GLintptr;
+typedef khronos_ssize_t GLsizeiptr;
+typedef struct __GLsync* GLsync;
+typedef int64_t GLint64;
+typedef uint64_t GLuint64;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format.h b/chromium/gpu/command_buffer/common/gles2_cmd_format.h
index 2e8fdf9b544..7bb5aef6f09 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format.h
@@ -7,9 +7,6 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_H_
#define GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_H_
-
-#include <KHR/khrplatform.h>
-
#include <stddef.h>
#include <stdint.h>
#include <string.h>
@@ -20,34 +17,10 @@
#include "gpu/command_buffer/common/bitfield_helpers.h"
#include "gpu/command_buffer/common/cmd_buffer_common.h"
#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/gl2_types.h"
#include "gpu/command_buffer/common/gles2_cmd_ids.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
-// GL types are forward declared to avoid including the GL headers. The problem
-// is determining which GL headers to include from code that is common to the
-// client and service sides (GLES2 or one of several GL implementations).
-typedef unsigned int GLenum;
-typedef unsigned int GLbitfield;
-typedef unsigned int GLuint;
-typedef int GLint;
-typedef int GLsizei;
-typedef unsigned char GLboolean;
-typedef signed char GLbyte;
-typedef short GLshort;
-typedef unsigned char GLubyte;
-typedef unsigned short GLushort;
-typedef unsigned long GLulong;
-typedef float GLfloat;
-typedef float GLclampf;
-typedef double GLdouble;
-typedef double GLclampd;
-typedef void GLvoid;
-typedef khronos_intptr_t GLintptr;
-typedef khronos_ssize_t GLsizeiptr;
-typedef struct __GLsync *GLsync;
-typedef int64_t GLint64;
-typedef uint64_t GLuint64;
-
namespace gpu {
namespace gles2 {
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index 1407ca840dc..22636d1b3e7 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -12047,11 +12047,13 @@ struct ResizeCHROMIUM {
void Init(GLuint _width,
GLuint _height,
GLfloat _scale_factor,
+ GLenum _color_space,
GLboolean _alpha) {
SetHeader();
width = _width;
height = _height;
scale_factor = _scale_factor;
+ color_space = _color_space;
alpha = _alpha;
}
@@ -12059,8 +12061,10 @@ struct ResizeCHROMIUM {
GLuint _width,
GLuint _height,
GLfloat _scale_factor,
+ GLenum _color_space,
GLboolean _alpha) {
- static_cast<ValueType*>(cmd)->Init(_width, _height, _scale_factor, _alpha);
+ static_cast<ValueType*>(cmd)->Init(_width, _height, _scale_factor,
+ _color_space, _alpha);
return NextCmdAddress<ValueType>(cmd);
}
@@ -12068,11 +12072,12 @@ struct ResizeCHROMIUM {
uint32_t width;
uint32_t height;
float scale_factor;
+ uint32_t color_space;
uint32_t alpha;
};
-static_assert(sizeof(ResizeCHROMIUM) == 20,
- "size of ResizeCHROMIUM should be 20");
+static_assert(sizeof(ResizeCHROMIUM) == 24,
+ "size of ResizeCHROMIUM should be 24");
static_assert(offsetof(ResizeCHROMIUM, header) == 0,
"offset of ResizeCHROMIUM header should be 0");
static_assert(offsetof(ResizeCHROMIUM, width) == 4,
@@ -12081,8 +12086,10 @@ static_assert(offsetof(ResizeCHROMIUM, height) == 8,
"offset of ResizeCHROMIUM height should be 8");
static_assert(offsetof(ResizeCHROMIUM, scale_factor) == 12,
"offset of ResizeCHROMIUM scale_factor should be 12");
-static_assert(offsetof(ResizeCHROMIUM, alpha) == 16,
- "offset of ResizeCHROMIUM alpha should be 16");
+static_assert(offsetof(ResizeCHROMIUM, color_space) == 16,
+ "offset of ResizeCHROMIUM color_space should be 16");
+static_assert(offsetof(ResizeCHROMIUM, alpha) == 20,
+ "offset of ResizeCHROMIUM alpha should be 20");
struct GetRequestableExtensionsCHROMIUM {
typedef GetRequestableExtensionsCHROMIUM ValueType;
@@ -13730,6 +13737,62 @@ static_assert(offsetof(ScheduleCALayerCHROMIUM, shm_id) == 20,
static_assert(offsetof(ScheduleCALayerCHROMIUM, shm_offset) == 24,
"offset of ScheduleCALayerCHROMIUM shm_offset should be 24");
+struct SetColorSpaceForScanoutCHROMIUM {
+ typedef SetColorSpaceForScanoutCHROMIUM ValueType;
+ static const CommandId kCmdId = kSetColorSpaceForScanoutCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _texture_id,
+ GLuint _shm_id,
+ GLuint _shm_offset,
+ GLsizei _color_space_size) {
+ SetHeader();
+ texture_id = _texture_id;
+ shm_id = _shm_id;
+ shm_offset = _shm_offset;
+ color_space_size = _color_space_size;
+ }
+
+ void* Set(void* cmd,
+ GLuint _texture_id,
+ GLuint _shm_id,
+ GLuint _shm_offset,
+ GLsizei _color_space_size) {
+ static_cast<ValueType*>(cmd)->Init(_texture_id, _shm_id, _shm_offset,
+ _color_space_size);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture_id;
+ uint32_t shm_id;
+ uint32_t shm_offset;
+ int32_t color_space_size;
+};
+
+static_assert(sizeof(SetColorSpaceForScanoutCHROMIUM) == 20,
+ "size of SetColorSpaceForScanoutCHROMIUM should be 20");
+static_assert(offsetof(SetColorSpaceForScanoutCHROMIUM, header) == 0,
+ "offset of SetColorSpaceForScanoutCHROMIUM header should be 0");
+static_assert(
+ offsetof(SetColorSpaceForScanoutCHROMIUM, texture_id) == 4,
+ "offset of SetColorSpaceForScanoutCHROMIUM texture_id should be 4");
+static_assert(offsetof(SetColorSpaceForScanoutCHROMIUM, shm_id) == 8,
+ "offset of SetColorSpaceForScanoutCHROMIUM shm_id should be 8");
+static_assert(
+ offsetof(SetColorSpaceForScanoutCHROMIUM, shm_offset) == 12,
+ "offset of SetColorSpaceForScanoutCHROMIUM shm_offset should be 12");
+static_assert(
+ offsetof(SetColorSpaceForScanoutCHROMIUM, color_space_size) == 16,
+ "offset of SetColorSpaceForScanoutCHROMIUM color_space_size should be 16");
+
struct ScheduleCALayerInUseQueryCHROMIUMImmediate {
typedef ScheduleCALayerInUseQueryCHROMIUMImmediate ValueType;
static const CommandId kCmdId = kScheduleCALayerInUseQueryCHROMIUMImmediate;
@@ -15783,21 +15846,28 @@ struct OverlayPromotionHintCHROMIUM {
void Init(GLuint _texture,
GLboolean _promotion_hint,
GLint _display_x,
- GLint _display_y) {
+ GLint _display_y,
+ GLint _display_width,
+ GLint _display_height) {
SetHeader();
texture = _texture;
promotion_hint = _promotion_hint;
display_x = _display_x;
display_y = _display_y;
+ display_width = _display_width;
+ display_height = _display_height;
}
void* Set(void* cmd,
GLuint _texture,
GLboolean _promotion_hint,
GLint _display_x,
- GLint _display_y) {
+ GLint _display_y,
+ GLint _display_width,
+ GLint _display_height) {
static_cast<ValueType*>(cmd)->Init(_texture, _promotion_hint, _display_x,
- _display_y);
+ _display_y, _display_width,
+ _display_height);
return NextCmdAddress<ValueType>(cmd);
}
@@ -15806,10 +15876,12 @@ struct OverlayPromotionHintCHROMIUM {
uint32_t promotion_hint;
int32_t display_x;
int32_t display_y;
+ int32_t display_width;
+ int32_t display_height;
};
-static_assert(sizeof(OverlayPromotionHintCHROMIUM) == 20,
- "size of OverlayPromotionHintCHROMIUM should be 20");
+static_assert(sizeof(OverlayPromotionHintCHROMIUM) == 28,
+ "size of OverlayPromotionHintCHROMIUM should be 28");
static_assert(offsetof(OverlayPromotionHintCHROMIUM, header) == 0,
"offset of OverlayPromotionHintCHROMIUM header should be 0");
static_assert(offsetof(OverlayPromotionHintCHROMIUM, texture) == 4,
@@ -15821,6 +15893,12 @@ static_assert(offsetof(OverlayPromotionHintCHROMIUM, display_x) == 12,
"offset of OverlayPromotionHintCHROMIUM display_x should be 12");
static_assert(offsetof(OverlayPromotionHintCHROMIUM, display_y) == 16,
"offset of OverlayPromotionHintCHROMIUM display_y should be 16");
+static_assert(
+ offsetof(OverlayPromotionHintCHROMIUM, display_width) == 20,
+ "offset of OverlayPromotionHintCHROMIUM display_width should be 20");
+static_assert(
+ offsetof(OverlayPromotionHintCHROMIUM, display_height) == 24,
+ "offset of OverlayPromotionHintCHROMIUM display_height should be 24");
struct SwapBuffersWithBoundsCHROMIUMImmediate {
typedef SwapBuffersWithBoundsCHROMIUMImmediate ValueType;
@@ -16060,4 +16138,169 @@ static_assert(
offsetof(LockDiscardableTextureCHROMIUM, texture_id) == 4,
"offset of LockDiscardableTextureCHROMIUM texture_id should be 4");
+struct BeginRasterCHROMIUM {
+ typedef BeginRasterCHROMIUM ValueType;
+ static const CommandId kCmdId = kBeginRasterCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _texture_id,
+ GLuint _sk_color,
+ GLuint _msaa_sample_count,
+ GLboolean _can_use_lcd_text,
+ GLboolean _use_distance_field_text,
+ GLint _pixel_config) {
+ SetHeader();
+ texture_id = _texture_id;
+ sk_color = _sk_color;
+ msaa_sample_count = _msaa_sample_count;
+ can_use_lcd_text = _can_use_lcd_text;
+ use_distance_field_text = _use_distance_field_text;
+ pixel_config = _pixel_config;
+ }
+
+ void* Set(void* cmd,
+ GLuint _texture_id,
+ GLuint _sk_color,
+ GLuint _msaa_sample_count,
+ GLboolean _can_use_lcd_text,
+ GLboolean _use_distance_field_text,
+ GLint _pixel_config) {
+ static_cast<ValueType*>(cmd)->Init(_texture_id, _sk_color,
+ _msaa_sample_count, _can_use_lcd_text,
+ _use_distance_field_text, _pixel_config);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture_id;
+ uint32_t sk_color;
+ uint32_t msaa_sample_count;
+ uint32_t can_use_lcd_text;
+ uint32_t use_distance_field_text;
+ int32_t pixel_config;
+};
+
+static_assert(sizeof(BeginRasterCHROMIUM) == 28,
+ "size of BeginRasterCHROMIUM should be 28");
+static_assert(offsetof(BeginRasterCHROMIUM, header) == 0,
+ "offset of BeginRasterCHROMIUM header should be 0");
+static_assert(offsetof(BeginRasterCHROMIUM, texture_id) == 4,
+ "offset of BeginRasterCHROMIUM texture_id should be 4");
+static_assert(offsetof(BeginRasterCHROMIUM, sk_color) == 8,
+ "offset of BeginRasterCHROMIUM sk_color should be 8");
+static_assert(offsetof(BeginRasterCHROMIUM, msaa_sample_count) == 12,
+ "offset of BeginRasterCHROMIUM msaa_sample_count should be 12");
+static_assert(offsetof(BeginRasterCHROMIUM, can_use_lcd_text) == 16,
+ "offset of BeginRasterCHROMIUM can_use_lcd_text should be 16");
+static_assert(
+ offsetof(BeginRasterCHROMIUM, use_distance_field_text) == 20,
+ "offset of BeginRasterCHROMIUM use_distance_field_text should be 20");
+static_assert(offsetof(BeginRasterCHROMIUM, pixel_config) == 24,
+ "offset of BeginRasterCHROMIUM pixel_config should be 24");
+
+struct RasterCHROMIUM {
+ typedef RasterCHROMIUM ValueType;
+ static const CommandId kCmdId = kRasterCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(uint32_t _list_shm_id,
+ uint32_t _list_shm_offset,
+ GLint _x,
+ GLint _y,
+ GLint _w,
+ GLint _h,
+ uint32_t _data_size) {
+ SetHeader();
+ list_shm_id = _list_shm_id;
+ list_shm_offset = _list_shm_offset;
+ x = _x;
+ y = _y;
+ w = _w;
+ h = _h;
+ data_size = _data_size;
+ }
+
+ void* Set(void* cmd,
+ uint32_t _list_shm_id,
+ uint32_t _list_shm_offset,
+ GLint _x,
+ GLint _y,
+ GLint _w,
+ GLint _h,
+ uint32_t _data_size) {
+ static_cast<ValueType*>(cmd)->Init(_list_shm_id, _list_shm_offset, _x, _y,
+ _w, _h, _data_size);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t list_shm_id;
+ uint32_t list_shm_offset;
+ int32_t x;
+ int32_t y;
+ int32_t w;
+ int32_t h;
+ uint32_t data_size;
+};
+
+static_assert(sizeof(RasterCHROMIUM) == 32,
+ "size of RasterCHROMIUM should be 32");
+static_assert(offsetof(RasterCHROMIUM, header) == 0,
+ "offset of RasterCHROMIUM header should be 0");
+static_assert(offsetof(RasterCHROMIUM, list_shm_id) == 4,
+ "offset of RasterCHROMIUM list_shm_id should be 4");
+static_assert(offsetof(RasterCHROMIUM, list_shm_offset) == 8,
+ "offset of RasterCHROMIUM list_shm_offset should be 8");
+static_assert(offsetof(RasterCHROMIUM, x) == 12,
+ "offset of RasterCHROMIUM x should be 12");
+static_assert(offsetof(RasterCHROMIUM, y) == 16,
+ "offset of RasterCHROMIUM y should be 16");
+static_assert(offsetof(RasterCHROMIUM, w) == 20,
+ "offset of RasterCHROMIUM w should be 20");
+static_assert(offsetof(RasterCHROMIUM, h) == 24,
+ "offset of RasterCHROMIUM h should be 24");
+static_assert(offsetof(RasterCHROMIUM, data_size) == 28,
+ "offset of RasterCHROMIUM data_size should be 28");
+
+struct EndRasterCHROMIUM {
+ typedef EndRasterCHROMIUM ValueType;
+ static const CommandId kCmdId = kEndRasterCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+static_assert(sizeof(EndRasterCHROMIUM) == 4,
+ "size of EndRasterCHROMIUM should be 4");
+static_assert(offsetof(EndRasterCHROMIUM, header) == 0,
+ "offset of EndRasterCHROMIUM header should be 0");
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 2d00751e477..860ff542969 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -4029,16 +4029,17 @@ TEST_F(GLES2FormatTest, FlushMappedBufferRange) {
TEST_F(GLES2FormatTest, ResizeCHROMIUM) {
cmds::ResizeCHROMIUM& cmd = *GetBufferAs<cmds::ResizeCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
- static_cast<GLfloat>(13), static_cast<GLboolean>(14));
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
+ static_cast<GLuint>(12), static_cast<GLfloat>(13),
+ static_cast<GLenum>(14), static_cast<GLboolean>(15));
EXPECT_EQ(static_cast<uint32_t>(cmds::ResizeCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
EXPECT_EQ(static_cast<GLuint>(11), cmd.width);
EXPECT_EQ(static_cast<GLuint>(12), cmd.height);
EXPECT_EQ(static_cast<GLfloat>(13), cmd.scale_factor);
- EXPECT_EQ(static_cast<GLboolean>(14), cmd.alpha);
+ EXPECT_EQ(static_cast<GLenum>(14), cmd.color_space);
+ EXPECT_EQ(static_cast<GLboolean>(15), cmd.alpha);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -4635,6 +4636,23 @@ TEST_F(GLES2FormatTest, ScheduleCALayerCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, SetColorSpaceForScanoutCHROMIUM) {
+ cmds::SetColorSpaceForScanoutCHROMIUM& cmd =
+ *GetBufferAs<cmds::SetColorSpaceForScanoutCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
+ static_cast<GLuint>(13), static_cast<GLsizei>(14));
+ EXPECT_EQ(
+ static_cast<uint32_t>(cmds::SetColorSpaceForScanoutCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.shm_id);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.shm_offset);
+ EXPECT_EQ(static_cast<GLsizei>(14), cmd.color_space_size);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, ScheduleCALayerInUseQueryCHROMIUMImmediate) {
const int kSomeBaseValueToTestWith = 51;
static GLuint data[] = {
@@ -5279,7 +5297,8 @@ TEST_F(GLES2FormatTest, OverlayPromotionHintCHROMIUM) {
*GetBufferAs<cmds::OverlayPromotionHintCHROMIUM>();
void* next_cmd =
cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLboolean>(12),
- static_cast<GLint>(13), static_cast<GLint>(14));
+ static_cast<GLint>(13), static_cast<GLint>(14),
+ static_cast<GLint>(15), static_cast<GLint>(16));
EXPECT_EQ(static_cast<uint32_t>(cmds::OverlayPromotionHintCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
@@ -5287,6 +5306,8 @@ TEST_F(GLES2FormatTest, OverlayPromotionHintCHROMIUM) {
EXPECT_EQ(static_cast<GLboolean>(12), cmd.promotion_hint);
EXPECT_EQ(static_cast<GLint>(13), cmd.display_x);
EXPECT_EQ(static_cast<GLint>(14), cmd.display_y);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.display_width);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.display_height);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -5378,4 +5399,50 @@ TEST_F(GLES2FormatTest, LockDiscardableTextureCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, BeginRasterCHROMIUM) {
+ cmds::BeginRasterCHROMIUM& cmd = *GetBufferAs<cmds::BeginRasterCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
+ static_cast<GLuint>(13), static_cast<GLboolean>(14),
+ static_cast<GLboolean>(15), static_cast<GLint>(16));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BeginRasterCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.sk_color);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.msaa_sample_count);
+ EXPECT_EQ(static_cast<GLboolean>(14), cmd.can_use_lcd_text);
+ EXPECT_EQ(static_cast<GLboolean>(15), cmd.use_distance_field_text);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.pixel_config);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, RasterCHROMIUM) {
+ cmds::RasterCHROMIUM& cmd = *GetBufferAs<cmds::RasterCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<uint32_t>(11),
+ static_cast<uint32_t>(12), static_cast<GLint>(13),
+ static_cast<GLint>(14), static_cast<GLint>(15),
+ static_cast<GLint>(16), static_cast<uint32_t>(17));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::RasterCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<uint32_t>(11), cmd.list_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(12), cmd.list_shm_offset);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.y);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.w);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.h);
+ EXPECT_EQ(static_cast<uint32_t>(17), cmd.data_size);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, EndRasterCHROMIUM) {
+ cmds::EndRasterCHROMIUM& cmd = *GetBufferAs<cmds::EndRasterCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::EndRasterCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_TEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index 21f3fffe477..c5eb1742fd7 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -294,50 +294,54 @@
OP(ScheduleOverlayPlaneCHROMIUM) /* 535 */ \
OP(ScheduleCALayerSharedStateCHROMIUM) /* 536 */ \
OP(ScheduleCALayerCHROMIUM) /* 537 */ \
- OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 538 */ \
- OP(CommitOverlayPlanesCHROMIUM) /* 539 */ \
- OP(SwapInterval) /* 540 */ \
- OP(FlushDriverCachesCHROMIUM) /* 541 */ \
- OP(ScheduleDCLayerSharedStateCHROMIUM) /* 542 */ \
- OP(ScheduleDCLayerCHROMIUM) /* 543 */ \
- OP(MatrixLoadfCHROMIUMImmediate) /* 544 */ \
- OP(MatrixLoadIdentityCHROMIUM) /* 545 */ \
- OP(GenPathsCHROMIUM) /* 546 */ \
- OP(DeletePathsCHROMIUM) /* 547 */ \
- OP(IsPathCHROMIUM) /* 548 */ \
- OP(PathCommandsCHROMIUM) /* 549 */ \
- OP(PathParameterfCHROMIUM) /* 550 */ \
- OP(PathParameteriCHROMIUM) /* 551 */ \
- OP(PathStencilFuncCHROMIUM) /* 552 */ \
- OP(StencilFillPathCHROMIUM) /* 553 */ \
- OP(StencilStrokePathCHROMIUM) /* 554 */ \
- OP(CoverFillPathCHROMIUM) /* 555 */ \
- OP(CoverStrokePathCHROMIUM) /* 556 */ \
- OP(StencilThenCoverFillPathCHROMIUM) /* 557 */ \
- OP(StencilThenCoverStrokePathCHROMIUM) /* 558 */ \
- OP(StencilFillPathInstancedCHROMIUM) /* 559 */ \
- OP(StencilStrokePathInstancedCHROMIUM) /* 560 */ \
- OP(CoverFillPathInstancedCHROMIUM) /* 561 */ \
- OP(CoverStrokePathInstancedCHROMIUM) /* 562 */ \
- OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 563 */ \
- OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 564 */ \
- OP(BindFragmentInputLocationCHROMIUMBucket) /* 565 */ \
- OP(ProgramPathFragmentInputGenCHROMIUM) /* 566 */ \
- OP(GetBufferSubDataAsyncCHROMIUM) /* 567 */ \
- OP(CoverageModulationCHROMIUM) /* 568 */ \
- OP(BlendBarrierKHR) /* 569 */ \
- OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 570 */ \
- OP(BindFragDataLocationIndexedEXTBucket) /* 571 */ \
- OP(BindFragDataLocationEXTBucket) /* 572 */ \
- OP(GetFragDataIndexEXT) /* 573 */ \
- OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 574 */ \
- OP(OverlayPromotionHintCHROMIUM) /* 575 */ \
- OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 576 */ \
- OP(SetDrawRectangleCHROMIUM) /* 577 */ \
- OP(SetEnableDCLayersCHROMIUM) /* 578 */ \
- OP(InitializeDiscardableTextureCHROMIUM) /* 579 */ \
- OP(UnlockDiscardableTextureCHROMIUM) /* 580 */ \
- OP(LockDiscardableTextureCHROMIUM) /* 581 */
+ OP(SetColorSpaceForScanoutCHROMIUM) /* 538 */ \
+ OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 539 */ \
+ OP(CommitOverlayPlanesCHROMIUM) /* 540 */ \
+ OP(SwapInterval) /* 541 */ \
+ OP(FlushDriverCachesCHROMIUM) /* 542 */ \
+ OP(ScheduleDCLayerSharedStateCHROMIUM) /* 543 */ \
+ OP(ScheduleDCLayerCHROMIUM) /* 544 */ \
+ OP(MatrixLoadfCHROMIUMImmediate) /* 545 */ \
+ OP(MatrixLoadIdentityCHROMIUM) /* 546 */ \
+ OP(GenPathsCHROMIUM) /* 547 */ \
+ OP(DeletePathsCHROMIUM) /* 548 */ \
+ OP(IsPathCHROMIUM) /* 549 */ \
+ OP(PathCommandsCHROMIUM) /* 550 */ \
+ OP(PathParameterfCHROMIUM) /* 551 */ \
+ OP(PathParameteriCHROMIUM) /* 552 */ \
+ OP(PathStencilFuncCHROMIUM) /* 553 */ \
+ OP(StencilFillPathCHROMIUM) /* 554 */ \
+ OP(StencilStrokePathCHROMIUM) /* 555 */ \
+ OP(CoverFillPathCHROMIUM) /* 556 */ \
+ OP(CoverStrokePathCHROMIUM) /* 557 */ \
+ OP(StencilThenCoverFillPathCHROMIUM) /* 558 */ \
+ OP(StencilThenCoverStrokePathCHROMIUM) /* 559 */ \
+ OP(StencilFillPathInstancedCHROMIUM) /* 560 */ \
+ OP(StencilStrokePathInstancedCHROMIUM) /* 561 */ \
+ OP(CoverFillPathInstancedCHROMIUM) /* 562 */ \
+ OP(CoverStrokePathInstancedCHROMIUM) /* 563 */ \
+ OP(StencilThenCoverFillPathInstancedCHROMIUM) /* 564 */ \
+ OP(StencilThenCoverStrokePathInstancedCHROMIUM) /* 565 */ \
+ OP(BindFragmentInputLocationCHROMIUMBucket) /* 566 */ \
+ OP(ProgramPathFragmentInputGenCHROMIUM) /* 567 */ \
+ OP(GetBufferSubDataAsyncCHROMIUM) /* 568 */ \
+ OP(CoverageModulationCHROMIUM) /* 569 */ \
+ OP(BlendBarrierKHR) /* 570 */ \
+ OP(ApplyScreenSpaceAntialiasingCHROMIUM) /* 571 */ \
+ OP(BindFragDataLocationIndexedEXTBucket) /* 572 */ \
+ OP(BindFragDataLocationEXTBucket) /* 573 */ \
+ OP(GetFragDataIndexEXT) /* 574 */ \
+ OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 575 */ \
+ OP(OverlayPromotionHintCHROMIUM) /* 576 */ \
+ OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 577 */ \
+ OP(SetDrawRectangleCHROMIUM) /* 578 */ \
+ OP(SetEnableDCLayersCHROMIUM) /* 579 */ \
+ OP(InitializeDiscardableTextureCHROMIUM) /* 580 */ \
+ OP(UnlockDiscardableTextureCHROMIUM) /* 581 */ \
+ OP(LockDiscardableTextureCHROMIUM) /* 582 */ \
+ OP(BeginRasterCHROMIUM) /* 583 */ \
+ OP(RasterCHROMIUM) /* 584 */ \
+ OP(EndRasterCHROMIUM) /* 585 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index 0a451f64679..6a52eca7ce2 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -1896,24 +1896,7 @@ bool IsWebGL2OrES3ContextType(ContextType context_type) {
return false;
}
-ContextCreationAttribHelper::ContextCreationAttribHelper()
- : gpu_preference(gl::PreferIntegratedGpu),
- alpha_size(-1),
- blue_size(-1),
- green_size(-1),
- red_size(-1),
- depth_size(-1),
- stencil_size(-1),
- samples(-1),
- sample_buffers(-1),
- buffer_preserved(true),
- bind_generates_resource(true),
- fail_if_major_perf_caveat(false),
- lose_context_when_out_of_memory(false),
- should_use_native_gmb_for_backbuffer(false),
- own_offscreen_surface(false),
- single_buffer(false),
- context_type(CONTEXT_TYPE_OPENGLES2) {}
+ContextCreationAttribHelper::ContextCreationAttribHelper() = default;
ContextCreationAttribHelper::ContextCreationAttribHelper(
const ContextCreationAttribHelper& other) = default;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
index 9ae3f2a2295..c6230a53456 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
@@ -326,25 +326,26 @@ struct GLES2_UTILS_EXPORT ContextCreationAttribHelper {
bool Parse(const std::vector<int32_t>& attribs);
gfx::Size offscreen_framebuffer_size;
- gl::GpuPreference gpu_preference;
+ gl::GpuPreference gpu_preference = gl::PreferIntegratedGpu;
// -1 if invalid or unspecified.
- int32_t alpha_size;
- int32_t blue_size;
- int32_t green_size;
- int32_t red_size;
- int32_t depth_size;
- int32_t stencil_size;
- int32_t samples;
- int32_t sample_buffers;
- bool buffer_preserved;
- bool bind_generates_resource;
- bool fail_if_major_perf_caveat;
- bool lose_context_when_out_of_memory;
- bool should_use_native_gmb_for_backbuffer;
- bool own_offscreen_surface;
- bool single_buffer;
-
- ContextType context_type;
+ int32_t alpha_size = -1;
+ int32_t blue_size = -1;
+ int32_t green_size = -1;
+ int32_t red_size = -1;
+ int32_t depth_size = -1;
+ int32_t stencil_size = -1;
+ int32_t samples = -1;
+ int32_t sample_buffers = -1;
+ bool buffer_preserved = true;
+ bool bind_generates_resource = true;
+ bool fail_if_major_perf_caveat = false;
+ bool lose_context_when_out_of_memory = false;
+ bool should_use_native_gmb_for_backbuffer = false;
+ bool own_offscreen_surface = false;
+ bool single_buffer = false;
+ bool enable_oop_rasterization = false;
+
+ ContextType context_type = CONTEXT_TYPE_OPENGLES2;
ColorSpace color_space = COLOR_SPACE_UNSPECIFIED;
};
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
index ba8396bdeab..b6a91d84d79 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -1912,6 +1912,21 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
0x8A57, "GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1_EXT",
},
{
+ 0x8AF0, "GL_TEXTURE_FILTERING_HINT_CHROMIUM",
+ },
+ {
+ 0x8AF1, "GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM",
+ },
+ {
+ 0x8AF2, "GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM",
+ },
+ {
+ 0x8AF3, "GL_COLOR_SPACE_SRGB_CHROMIUM",
+ },
+ {
+ 0x8AF4, "GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM",
+ },
+ {
0x8B30, "GL_FRAGMENT_SHADER",
},
{
diff --git a/chromium/gpu/command_buffer/common/mailbox_holder.h b/chromium/gpu/command_buffer/common/mailbox_holder.h
index b1cc3dfad42..bc0d88d65d3 100644
--- a/chromium/gpu/command_buffer/common/mailbox_holder.h
+++ b/chromium/gpu/command_buffer/common/mailbox_holder.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <string.h>
+#include "gpu/command_buffer/common/gl2_types.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/gpu_export.h"
@@ -29,7 +30,7 @@ struct GPU_EXPORT MailboxHolder {
gpu::Mailbox mailbox;
gpu::SyncToken sync_token;
- uint32_t texture_target;
+ GLenum texture_target;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index c87a3f7d859..458eea927d4 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -44,6 +44,8 @@ target(link_target_type, "service_sources") {
"context_state.h",
"context_state_autogen.h",
"context_state_impl_autogen.h",
+ "create_gr_gl_interface.cc",
+ "create_gr_gl_interface.h",
"error_state.cc",
"error_state.h",
"feature_info.cc",
@@ -107,6 +109,8 @@ target(link_target_type, "service_sources") {
"mailbox_manager_sync.h",
"memory_program_cache.cc",
"memory_program_cache.h",
+ "passthrough_program_cache.cc",
+ "passthrough_program_cache.h",
"path_manager.cc",
"path_manager.h",
"preemption_flag.h",
@@ -165,6 +169,7 @@ target(link_target_type, "service_sources") {
":disk_cache_proto",
"//base",
"//base/third_party/dynamic_annotations",
+ "//cc/paint",
"//crypto",
"//gpu/command_buffer/client:client_sources",
"//gpu/command_buffer/common:gles2_utils",
@@ -177,8 +182,10 @@ target(link_target_type, "service_sources") {
"//third_party/protobuf:protobuf_lite",
"//third_party/re2",
"//third_party/smhasher:cityhash",
+ "//third_party/zlib",
"//ui/gfx",
"//ui/gfx/geometry",
+ "//ui/gfx/ipc/color",
"//ui/gl",
"//ui/gl/init",
]
diff --git a/chromium/gpu/command_buffer/service/DEPS b/chromium/gpu/command_buffer/service/DEPS
index d1cd8cdbe07..831a3d249a7 100644
--- a/chromium/gpu/command_buffer/service/DEPS
+++ b/chromium/gpu/command_buffer/service/DEPS
@@ -1,3 +1,5 @@
include_rules = [
+ "+cc/paint",
"+media/media_features.h",
+ "+third_party/skia",
]
diff --git a/chromium/gpu/command_buffer/service/buffer_manager.cc b/chromium/gpu/command_buffer/service/buffer_manager.cc
index e625cc016c5..4668db70121 100644
--- a/chromium/gpu/command_buffer/service/buffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/buffer_manager.cc
@@ -737,17 +737,17 @@ bool BufferManager::OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
MemoryAllocatorDump::kUnitsBytes,
static_cast<uint64_t>(buffer->size()));
- auto guid = gl::GetGLBufferGUIDForTracing(share_group_tracing_guid,
- client_buffer_id);
auto* mapped_range = buffer->GetMappedRange();
if (!mapped_range)
continue;
auto shared_memory_guid =
mapped_range->shm->backing()->shared_memory_handle().GetGUID();
if (!shared_memory_guid.is_empty()) {
- pmd->CreateSharedMemoryOwnershipEdge(
- dump->guid(), guid, shared_memory_guid, 0 /* importance */);
+ pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
+ 0 /* importance */);
} else {
+ auto guid = gl::GetGLBufferGUIDForTracing(share_group_tracing_guid,
+ client_buffer_id);
pmd->CreateSharedGlobalAllocatorDump(guid);
pmd->AddOwnershipEdge(dump->guid(), guid);
}
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service.cc b/chromium/gpu/command_buffer/service/command_buffer_service.cc
index 74865e90df3..5a39572733d 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_service.cc
@@ -117,7 +117,8 @@ void CommandBufferService::Flush(int32_t put_offset,
}
void CommandBufferService::SetGetBuffer(int32_t transfer_buffer_id) {
- DCHECK_EQ(put_offset_, state_.get_offset); // Only if it's empty.
+ DCHECK((put_offset_ == state_.get_offset) ||
+ (state_.error != error::kNoError));
put_offset_ = 0;
state_.get_offset = 0;
++state_.set_get_buffer_count;
@@ -125,7 +126,6 @@ void CommandBufferService::SetGetBuffer(int32_t transfer_buffer_id) {
// If the buffer is invalid we handle it gracefully.
// This means ring_buffer_ can be NULL.
ring_buffer_ = GetTransferBuffer(transfer_buffer_id);
- ring_buffer_id_ = transfer_buffer_id;
if (ring_buffer_) {
int32_t size = ring_buffer_->size();
volatile void* memory = ring_buffer_->memory();
@@ -178,14 +178,6 @@ scoped_refptr<Buffer> CommandBufferService::CreateTransferBuffer(size_t size,
void CommandBufferService::DestroyTransferBuffer(int32_t id) {
transfer_buffer_manager_->DestroyTransferBuffer(id);
- if (id == ring_buffer_id_) {
- ring_buffer_id_ = -1;
- ring_buffer_ = nullptr;
- buffer_ = nullptr;
- num_entries_ = 0;
- state_.get_offset = 0;
- put_offset_ = 0;
- }
}
scoped_refptr<Buffer> CommandBufferService::GetTransferBuffer(int32_t id) {
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service.h b/chromium/gpu/command_buffer/service/command_buffer_service.h
index 12f83731f98..24b08526378 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service.h
+++ b/chromium/gpu/command_buffer/service/command_buffer_service.h
@@ -127,7 +127,6 @@ class GPU_EXPORT CommandBufferService : public CommandBufferServiceBase {
CommandBuffer::State state_;
int32_t put_offset_ = 0;
- int32_t ring_buffer_id_ = -1;
int32_t num_entries_ = 0;
scoped_refptr<Buffer> ring_buffer_;
volatile CommandBufferEntry* buffer_ = nullptr;
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc b/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc
index 7531d5f83bb..584b57c1665 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc
@@ -323,13 +323,13 @@ TEST_F(CommandBufferServiceTest, TestError) {
}
TEST_F(CommandBufferServiceTest, SetBuffer) {
- MakeService(3);
+ MakeService(5);
AdvancePut(2);
// We should have advanced 2 entries.
EXPECT_EQ(2, GetGet());
CommandBuffer::State state1 = command_buffer_service()->GetState();
- int32_t id = SetNewGetBuffer(3 * sizeof(CommandBufferEntry));
+ int32_t id = SetNewGetBuffer(5 * sizeof(CommandBufferEntry));
CommandBuffer::State state2 = command_buffer_service()->GetState();
// The put and get should have reset to 0.
EXPECT_EQ(0, GetGet());
@@ -342,22 +342,34 @@ TEST_F(CommandBufferServiceTest, SetBuffer) {
// We should have advanced 2 entries.
EXPECT_EQ(2, GetGet());
- // Destroy current get buffer, should reset.
+ // Destroy current get buffer, should not reset.
command_buffer_service()->DestroyTransferBuffer(id);
CommandBuffer::State state3 = command_buffer_service()->GetState();
- EXPECT_EQ(0, GetGet());
- EXPECT_EQ(0, GetPut());
+ EXPECT_EQ(2, GetGet());
+ EXPECT_EQ(2, GetPut());
EXPECT_EQ(error::kNoError, state3.error);
- // Should not update the set_get_buffer_count however, since SetGetBuffer was
- // not called.
+ // Should not update the set_get_buffer_count either.
EXPECT_EQ(state2.set_get_buffer_count, state3.set_get_buffer_count);
- // Trying to execute commands should fail however.
+ AdvancePut(2);
+ // We should have advanced 2 entries.
+ EXPECT_EQ(4, GetGet());
+
+ // Reseting the get buffer should reset get and put
+ command_buffer_service()->SetGetBuffer(-1);
+ CommandBuffer::State state4 = command_buffer_service()->GetState();
+ EXPECT_EQ(0, GetGet());
+ EXPECT_EQ(0, GetPut());
+ EXPECT_EQ(error::kNoError, state4.error);
+ // Should not update the set_get_buffer_count either.
+ EXPECT_EQ(state3.set_get_buffer_count + 1, state4.set_get_buffer_count);
+
+ // Trying to execute commands should now fail.
EXPECT_CALL(*this, OnParseError()).Times(1);
command_buffer_service()->Flush(2, api_mock());
- CommandBuffer::State state4 = command_buffer_service()->GetState();
+ CommandBuffer::State state5 = command_buffer_service()->GetState();
EXPECT_EQ(0, GetPut());
- EXPECT_EQ(error::kOutOfBounds, state4.error);
+ EXPECT_EQ(error::kOutOfBounds, state5.error);
Mock::VerifyAndClearExpectations(this);
}
@@ -366,7 +378,7 @@ TEST_F(CommandBufferServiceTest, InvalidSetBuffer) {
CommandBuffer::State state1 = command_buffer_service()->GetState();
// Set an invalid transfer buffer, should succeed.
- command_buffer_service()->SetGetBuffer(0);
+ command_buffer_service()->SetGetBuffer(-1);
CommandBuffer::State state2 = command_buffer_service()->GetState();
EXPECT_EQ(0, GetGet());
EXPECT_EQ(0, GetPut());
diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc
index abbf7846e22..11a8eb90e8d 100644
--- a/chromium/gpu/command_buffer/service/context_group.cc
+++ b/chromium/gpu/command_buffer/service/context_group.cc
@@ -61,6 +61,7 @@ DisallowedFeatures AdjustDisallowedFeatures(
ContextGroup::ContextGroup(
const GpuPreferences& gpu_preferences,
+ bool supports_passthrough_command_decoders,
MailboxManager* mailbox_manager,
const scoped_refptr<MemoryTracker>& memory_tracker,
ShaderTranslatorCache* shader_translator_cache,
@@ -110,6 +111,7 @@ ContextGroup::ContextGroup(
feature_info_(feature_info),
image_manager_(image_manager),
image_factory_(image_factory),
+ use_passthrough_cmd_decoder_(false),
passthrough_resources_(new PassthroughResources),
progress_reporter_(progress_reporter),
gpu_feature_info_(gpu_feature_info),
@@ -119,6 +121,8 @@ ContextGroup::ContextGroup(
DCHECK(mailbox_manager_);
transfer_buffer_manager_ =
base::MakeUnique<TransferBufferManager>(memory_tracker_.get());
+ use_passthrough_cmd_decoder_ = supports_passthrough_command_decoders &&
+ gpu_preferences_.use_passthrough_cmd_decoder;
}
bool ContextGroup::Initialize(GLES2Decoder* decoder,
diff --git a/chromium/gpu/command_buffer/service/context_group.h b/chromium/gpu/command_buffer/service/context_group.h
index 3411e9726a5..b072dbe937e 100644
--- a/chromium/gpu/command_buffer/service/context_group.h
+++ b/chromium/gpu/command_buffer/service/context_group.h
@@ -58,6 +58,7 @@ DisallowedFeatures AdjustDisallowedFeatures(
class GPU_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
public:
ContextGroup(const GpuPreferences& gpu_preferences,
+ bool supports_passthrough_command_decoders,
MailboxManager* mailbox_manager,
const scoped_refptr<MemoryTracker>& memory_tracker,
ShaderTranslatorCache* shader_translator_cache,
@@ -228,6 +229,10 @@ class GPU_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
syncs_id_map_.erase(client_id);
}
+ bool use_passthrough_cmd_decoder() const {
+ return use_passthrough_cmd_decoder_;
+ }
+
PassthroughResources* passthrough_resources() const {
return passthrough_resources_.get();
}
@@ -304,6 +309,7 @@ class GPU_EXPORT ContextGroup : public base::RefCounted<ContextGroup> {
// Mappings from client side IDs to service side IDs.
base::hash_map<GLuint, GLsync> syncs_id_map_;
+ bool use_passthrough_cmd_decoder_;
std::unique_ptr<PassthroughResources> passthrough_resources_;
// Used to notify the watchdog thread of progress during destruction,
diff --git a/chromium/gpu/command_buffer/service/context_group_unittest.cc b/chromium/gpu/command_buffer/service/context_group_unittest.cc
index dada3dc9931..28fb63a9d35 100644
--- a/chromium/gpu/command_buffer/service/context_group_unittest.cc
+++ b/chromium/gpu/command_buffer/service/context_group_unittest.cc
@@ -46,8 +46,8 @@ class ContextGroupTest : public GpuServiceTest {
decoder_.reset(new MockGLES2Decoder(&command_buffer_service_));
scoped_refptr<FeatureInfo> feature_info = new FeatureInfo;
group_ = scoped_refptr<ContextGroup>(new ContextGroup(
- gpu_preferences_, &mailbox_manager_, nullptr /* memory_tracker */,
- nullptr /* shader_translator_cache */,
+ gpu_preferences_, false, &mailbox_manager_,
+ nullptr /* memory_tracker */, nullptr /* shader_translator_cache */,
nullptr /* framebuffer_completeness_cache */, feature_info,
kBindGeneratesResource, &image_manager_, nullptr /* image_factory */,
nullptr /* progress_reporter */, GpuFeatureInfo(),
@@ -77,6 +77,7 @@ TEST_F(ContextGroupTest, Basic) {
EXPECT_TRUE(group_->texture_manager() == NULL);
EXPECT_TRUE(group_->program_manager() == NULL);
EXPECT_TRUE(group_->shader_manager() == NULL);
+ EXPECT_FALSE(group_->use_passthrough_cmd_decoder());
}
TEST_F(ContextGroupTest, InitializeNoExtensions) {
diff --git a/chromium/gpu/command_buffer/service/context_state.cc b/chromium/gpu/command_buffer/service/context_state.cc
index 3c71d38b25e..78a77fcb239 100644
--- a/chromium/gpu/command_buffer/service/context_state.cc
+++ b/chromium/gpu/command_buffer/service/context_state.cc
@@ -467,16 +467,10 @@ void ContextState::RestoreVertexAttribArrays(
if (feature_info_->feature_flags().angle_instanced_arrays)
glVertexAttribDivisorANGLE(attrib_index, attrib->divisor());
- // Never touch vertex attribute 0's state (in particular, never
- // disable it) when running on desktop GL with compatibility profile
- // because it will never be re-enabled.
- if (attrib_index != 0 ||
- feature_info_->gl_version_info().BehavesLikeGLES()) {
- if (attrib->enabled()) {
- glEnableVertexAttribArray(attrib_index);
- } else {
- glDisableVertexAttribArray(attrib_index);
- }
+ if (attrib->enabled_in_driver()) {
+ glEnableVertexAttribArray(attrib_index);
+ } else {
+ glDisableVertexAttribArray(attrib_index);
}
}
}
diff --git a/chromium/gpu/command_buffer/service/create_gr_gl_interface.cc b/chromium/gpu/command_buffer/service/create_gr_gl_interface.cc
new file mode 100644
index 00000000000..08c3c64a6c2
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/create_gr_gl_interface.cc
@@ -0,0 +1,490 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/create_gr_gl_interface.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_version_info.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+const GLubyte* GetStringHook(const char* version_string, GLenum name) {
+ switch (name) {
+ case GL_VERSION:
+ return reinterpret_cast<const GLubyte*>(version_string);
+ default:
+ return glGetString(name);
+ }
+}
+
+const char* kBlacklistExtensions[] = {
+ "GL_APPLE_framebuffer_multisample",
+ "GL_APPLE_sync",
+ "GL_ARB_ES3_1_compatibility",
+ "GL_ARB_draw_indirect",
+ "GL_ARB_invalidate_subdata",
+ "GL_ARB_multi_draw_indirect",
+ "GL_ARB_sample_shading",
+ "GL_ARB_texture_barrier",
+ "GL_EXT_direct_state_access",
+ "GL_EXT_multi_draw_indirect",
+ "GL_EXT_raster_multisample",
+ "GL_EXT_window_rectangles",
+ "GL_NV_bindless_texture",
+ "GL_NV_texture_barrier",
+ "GL_OES_sample_shading",
+};
+
+} // anonymous namespace
+
+sk_sp<const GrGLInterface> CreateGrGLInterface(
+ const gl::GLVersionInfo& version_info) {
+ gl::ProcsGL* gl = &gl::g_current_gl_driver->fn;
+
+ GrGLStandard standard =
+ version_info.is_es ? kGLES_GrGLStandard : kGL_GrGLStandard;
+
+ // Depending on the advertised version and extensions, skia checks for
+ // existence of entrypoints. However some of those we don't yet handle in
+ // gl_bindings, so we need to fake the version to the maximum fully supported
+ // by the bindings (GL 3.2 or ES 3.0), and blacklist extensions that skia
+ // handles but bindings don't.
+ // TODO(piman): add bindings for missing entrypoints.
+ GrGLFunction<GrGLGetStringProc> get_string;
+ if (version_info.IsAtLeastGL(3, 3) || version_info.IsAtLeastGLES(3, 1)) {
+ const char* fake_version = version_info.is_es ? "OpenGL ES 3.0" : "3.2";
+ get_string = [fake_version](GLenum name) {
+ return GetStringHook(fake_version, name);
+ };
+ } else {
+ get_string = gl->glGetStringFn;
+ }
+
+ GrGLExtensions extensions;
+ if (!extensions.init(standard, get_string, gl->glGetStringiFn,
+ gl->glGetIntegervFn)) {
+ LOG(ERROR) << "Failed to initialize extensions";
+ return nullptr;
+ }
+ for (const char* extension : kBlacklistExtensions)
+ extensions.remove(extension);
+
+ GrGLInterface* interface = new GrGLInterface();
+ GrGLInterface::Functions* functions = &interface->fFunctions;
+ functions->fActiveTexture = gl->glActiveTextureFn;
+ functions->fAttachShader = gl->glAttachShaderFn;
+ functions->fBindAttribLocation = gl->glBindAttribLocationFn;
+ functions->fBindBuffer = gl->glBindBufferFn;
+ functions->fBindFragDataLocation = gl->glBindFragDataLocationFn;
+ functions->fBeginQuery = gl->glBeginQueryFn;
+ functions->fBindTexture = gl->glBindTextureFn;
+
+ functions->fBlendBarrier = gl->glBlendBarrierKHRFn;
+
+ functions->fBlendColor = gl->glBlendColorFn;
+ functions->fBlendEquation = gl->glBlendEquationFn;
+ functions->fBlendFunc = gl->glBlendFuncFn;
+ functions->fBufferData = gl->glBufferDataFn;
+ functions->fBufferSubData = gl->glBufferSubDataFn;
+ functions->fClear = gl->glClearFn;
+ functions->fClearColor = gl->glClearColorFn;
+ functions->fClearStencil = gl->glClearStencilFn;
+
+ // Not used
+ // functions->fClearTexImage = nullptr;
+ // functions->fClearTexSubImage = nullptr;
+
+ functions->fColorMask = gl->glColorMaskFn;
+ functions->fCompileShader = gl->glCompileShaderFn;
+ functions->fCompressedTexImage2D = gl->glCompressedTexImage2DFn;
+ functions->fCompressedTexSubImage2D = gl->glCompressedTexSubImage2DFn;
+ functions->fCopyTexSubImage2D = gl->glCopyTexSubImage2DFn;
+ functions->fCreateProgram = gl->glCreateProgramFn;
+ functions->fCreateShader = gl->glCreateShaderFn;
+ functions->fCullFace = gl->glCullFaceFn;
+ functions->fDeleteBuffers = gl->glDeleteBuffersARBFn;
+ functions->fDeleteProgram = gl->glDeleteProgramFn;
+ functions->fDeleteQueries = gl->glDeleteQueriesFn;
+ functions->fDeleteShader = gl->glDeleteShaderFn;
+ functions->fDeleteTextures = gl->glDeleteTexturesFn;
+ functions->fDepthMask = gl->glDepthMaskFn;
+ functions->fDisable = gl->glDisableFn;
+ functions->fDisableVertexAttribArray = gl->glDisableVertexAttribArrayFn;
+ functions->fDrawArrays = gl->glDrawArraysFn;
+ functions->fDrawBuffer = gl->glDrawBufferFn;
+ functions->fDrawBuffers = gl->glDrawBuffersARBFn;
+ functions->fDrawElements = gl->glDrawElementsFn;
+
+ functions->fDrawArraysInstanced = gl->glDrawArraysInstancedANGLEFn;
+ functions->fDrawElementsInstanced = gl->glDrawElementsInstancedANGLEFn;
+
+ // GL 4.0 or GL_ARB_draw_indirect or ES 3.1
+ // functions->fDrawArraysIndirect = gl->glDrawArraysIndirectFn;
+ // functions->fDrawElementsIndirect = gl->glDrawElementsIndirectFn;
+
+ functions->fDrawRangeElements = gl->glDrawRangeElementsFn;
+ functions->fEnable = gl->glEnableFn;
+ functions->fEnableVertexAttribArray = gl->glEnableVertexAttribArrayFn;
+ functions->fEndQuery = gl->glEndQueryFn;
+ functions->fFinish = gl->glFinishFn;
+ functions->fFlush = gl->glFlushFn;
+ functions->fFrontFace = gl->glFrontFaceFn;
+ functions->fGenBuffers = gl->glGenBuffersARBFn;
+ functions->fGetBufferParameteriv = gl->glGetBufferParameterivFn;
+ functions->fGetError = gl->glGetErrorFn;
+ functions->fGetIntegerv = gl->glGetIntegervFn;
+ functions->fGetMultisamplefv = gl->glGetMultisamplefvFn;
+ functions->fGetQueryObjectiv = gl->glGetQueryObjectivFn;
+ functions->fGetQueryObjectuiv = gl->glGetQueryObjectuivFn;
+ functions->fGetQueryObjecti64v = gl->glGetQueryObjecti64vFn;
+ functions->fGetQueryObjectui64v = gl->glGetQueryObjectui64vFn;
+ functions->fQueryCounter = gl->glQueryCounterFn;
+ functions->fGetQueryiv = gl->glGetQueryivFn;
+ functions->fGetProgramInfoLog = gl->glGetProgramInfoLogFn;
+ functions->fGetProgramiv = gl->glGetProgramivFn;
+ functions->fGetShaderInfoLog = gl->glGetShaderInfoLogFn;
+ functions->fGetShaderiv = gl->glGetShaderivFn;
+ functions->fGetString = get_string;
+ functions->fGetStringi = gl->glGetStringiFn;
+ functions->fGetShaderPrecisionFormat = gl->glGetShaderPrecisionFormatFn;
+ functions->fGetTexLevelParameteriv = gl->glGetTexLevelParameterivFn;
+ functions->fGenQueries = gl->glGenQueriesFn;
+ functions->fGenTextures = gl->glGenTexturesFn;
+ functions->fGetUniformLocation = gl->glGetUniformLocationFn;
+ functions->fIsTexture = gl->glIsTextureFn;
+ functions->fLineWidth = gl->glLineWidthFn;
+ functions->fLinkProgram = gl->glLinkProgramFn;
+ functions->fMapBuffer = gl->glMapBufferFn;
+
+ // GL 4.3 or GL_ARB_multi_draw_indirect or ES+GL_EXT_multi_draw_indirect
+ // functions->fMultiDrawArraysIndirect = gl->glMultiDrawArraysIndirectFn;
+ // functions->fMultiDrawElementsIndirect = gl->glMultiDrawElementsIndirectFn;
+
+ functions->fPixelStorei = gl->glPixelStoreiFn;
+ functions->fPolygonMode = gl->glPolygonModeFn;
+
+ // GL_EXT_raster_multisample
+ // functions->fRasterSamples = gl->glRasterSamplesEXTFn;
+
+ functions->fReadBuffer = gl->glReadBufferFn;
+ functions->fReadPixels = gl->glReadPixelsFn;
+ functions->fScissor = gl->glScissorFn;
+ functions->fShaderSource = gl->glShaderSourceFn;
+ functions->fStencilFunc = gl->glStencilFuncFn;
+ functions->fStencilFuncSeparate = gl->glStencilFuncSeparateFn;
+ functions->fStencilMask = gl->glStencilMaskFn;
+ functions->fStencilMaskSeparate = gl->glStencilMaskSeparateFn;
+ functions->fStencilOp = gl->glStencilOpFn;
+ functions->fStencilOpSeparate = gl->glStencilOpSeparateFn;
+ functions->fTexBuffer = gl->glTexBufferFn;
+ functions->fTexBufferRange = gl->glTexBufferRangeFn;
+ functions->fTexImage2D = gl->glTexImage2DFn;
+ functions->fTexParameteri = gl->glTexParameteriFn;
+ functions->fTexParameteriv = gl->glTexParameterivFn;
+ functions->fTexStorage2D = gl->glTexStorage2DEXTFn;
+ functions->fTexSubImage2D = gl->glTexSubImage2DFn;
+
+ // GL 4.5 or GL_ARB_texture_barrier or GL_NV_texture_barrier
+ // functions->fTextureBarrier = gl->glTextureBarrierFn;
+ // functions->fTextureBarrier = gl->glTextureBarrierNVFn;
+
+ functions->fUniform1f = gl->glUniform1fFn;
+ functions->fUniform1i = gl->glUniform1iFn;
+ functions->fUniform1fv = gl->glUniform1fvFn;
+ functions->fUniform1iv = gl->glUniform1ivFn;
+ functions->fUniform2f = gl->glUniform2fFn;
+ functions->fUniform2i = gl->glUniform2iFn;
+ functions->fUniform2fv = gl->glUniform2fvFn;
+ functions->fUniform2iv = gl->glUniform2ivFn;
+ functions->fUniform3f = gl->glUniform3fFn;
+ functions->fUniform3i = gl->glUniform3iFn;
+ functions->fUniform3fv = gl->glUniform3fvFn;
+ functions->fUniform3iv = gl->glUniform3ivFn;
+ functions->fUniform4f = gl->glUniform4fFn;
+ functions->fUniform4i = gl->glUniform4iFn;
+ functions->fUniform4fv = gl->glUniform4fvFn;
+ functions->fUniform4iv = gl->glUniform4ivFn;
+ functions->fUniformMatrix2fv = gl->glUniformMatrix2fvFn;
+ functions->fUniformMatrix3fv = gl->glUniformMatrix3fvFn;
+ functions->fUniformMatrix4fv = gl->glUniformMatrix4fvFn;
+ functions->fUnmapBuffer = gl->glUnmapBufferFn;
+ functions->fUseProgram = gl->glUseProgramFn;
+ functions->fVertexAttrib1f = gl->glVertexAttrib1fFn;
+ functions->fVertexAttrib2fv = gl->glVertexAttrib2fvFn;
+ functions->fVertexAttrib3fv = gl->glVertexAttrib3fvFn;
+ functions->fVertexAttrib4fv = gl->glVertexAttrib4fvFn;
+
+ functions->fVertexAttribDivisor = gl->glVertexAttribDivisorANGLEFn;
+
+ functions->fVertexAttribIPointer = gl->glVertexAttribIPointerFn;
+
+ functions->fVertexAttribPointer = gl->glVertexAttribPointerFn;
+ functions->fViewport = gl->glViewportFn;
+ functions->fBindFragDataLocationIndexed = gl->glBindFragDataLocationIndexedFn;
+
+ functions->fBindVertexArray = gl->glBindVertexArrayOESFn;
+ functions->fGenVertexArrays = gl->glGenVertexArraysOESFn;
+ functions->fDeleteVertexArrays = gl->glDeleteVertexArraysOESFn;
+
+ functions->fMapBufferRange = gl->glMapBufferRangeFn;
+ functions->fFlushMappedBufferRange = gl->glFlushMappedBufferRangeFn;
+
+ functions->fGenerateMipmap = gl->glGenerateMipmapEXTFn;
+ functions->fGenFramebuffers = gl->glGenFramebuffersEXTFn;
+ functions->fGetFramebufferAttachmentParameteriv =
+ gl->glGetFramebufferAttachmentParameterivEXTFn;
+ functions->fGetRenderbufferParameteriv =
+ gl->glGetRenderbufferParameterivEXTFn;
+ functions->fBindFramebuffer = gl->glBindFramebufferEXTFn;
+ functions->fFramebufferTexture2D = gl->glFramebufferTexture2DEXTFn;
+ functions->fCheckFramebufferStatus = gl->glCheckFramebufferStatusEXTFn;
+ functions->fDeleteFramebuffers = gl->glDeleteFramebuffersEXTFn;
+ functions->fRenderbufferStorage = gl->glRenderbufferStorageEXTFn;
+ functions->fGenRenderbuffers = gl->glGenRenderbuffersEXTFn;
+ functions->fDeleteRenderbuffers = gl->glDeleteRenderbuffersEXTFn;
+ functions->fFramebufferRenderbuffer = gl->glFramebufferRenderbufferEXTFn;
+ functions->fBindRenderbuffer = gl->glBindRenderbufferEXTFn;
+ functions->fRenderbufferStorageMultisample =
+ gl->glRenderbufferStorageMultisampleEXTFn;
+ functions->fBlitFramebuffer = gl->glBlitFramebufferFn;
+
+ functions->fMatrixLoadf = gl->glMatrixLoadfEXTFn;
+ functions->fMatrixLoadIdentity = gl->glMatrixLoadIdentityEXTFn;
+ functions->fPathCommands = gl->glPathCommandsNVFn;
+ functions->fPathParameteri = gl->glPathParameteriNVFn;
+ functions->fPathParameterf = gl->glPathParameterfNVFn;
+ functions->fGenPaths = gl->glGenPathsNVFn;
+ functions->fDeletePaths = gl->glDeletePathsNVFn;
+ functions->fIsPath = gl->glIsPathNVFn;
+ functions->fPathStencilFunc = gl->glPathStencilFuncNVFn;
+ functions->fStencilFillPath = gl->glStencilFillPathNVFn;
+ functions->fStencilStrokePath = gl->glStencilStrokePathNVFn;
+ functions->fStencilFillPathInstanced = gl->glStencilFillPathInstancedNVFn;
+ functions->fStencilStrokePathInstanced = gl->glStencilStrokePathInstancedNVFn;
+ functions->fCoverFillPath = gl->glCoverFillPathNVFn;
+ functions->fCoverStrokePath = gl->glCoverStrokePathNVFn;
+ functions->fCoverFillPathInstanced = gl->glCoverFillPathInstancedNVFn;
+ functions->fCoverStrokePathInstanced = gl->glCoverStrokePathInstancedNVFn;
+ functions->fStencilThenCoverFillPath = gl->glStencilThenCoverFillPathNVFn;
+ functions->fStencilThenCoverStrokePath = gl->glStencilThenCoverStrokePathNVFn;
+ functions->fStencilThenCoverFillPathInstanced =
+ gl->glStencilThenCoverFillPathInstancedNVFn;
+ functions->fStencilThenCoverStrokePathInstanced =
+ gl->glStencilThenCoverStrokePathInstancedNVFn;
+ functions->fProgramPathFragmentInputGen =
+ gl->glProgramPathFragmentInputGenNVFn;
+
+ functions->fCoverageModulation = gl->glCoverageModulationNVFn;
+
+ functions->fInsertEventMarker = gl->glInsertEventMarkerEXTFn;
+ functions->fPushGroupMarker = gl->glPushGroupMarkerEXTFn;
+ functions->fPopGroupMarker = gl->glPopGroupMarkerEXTFn;
+
+ // GL 4.3 or GL_ARB_invalidate_subdata
+ // functions->fInvalidateBufferData = gl->glInvalidateBufferDataFn;
+ // functions->fInvalidateBufferSubData = gl->glInvalidateBufferSubDataFn;
+ // functions->fInvalidateTexImage = gl->glInvalidateTexImageFn;
+ // functions->fInvalidateTexSubImage = gl->glInvalidateTexSubImageFn;
+
+ functions->fInvalidateFramebuffer = gl->glInvalidateFramebufferFn;
+ functions->fInvalidateSubFramebuffer = gl->glInvalidateSubFramebufferFn;
+
+ functions->fGetProgramResourceLocation = gl->glGetProgramResourceLocationFn;
+
+ // GL_NV_bindless_texture
+ // functions->fGetTextureHandle = gl->glGetTextureHandleNVFn;
+ // functions->fGetTextureSamplerHandle = gl->glGetTextureSamplerHandleNVFn;
+ // functions->fMakeTextureHandleResident =
+ // gl->glMakeTextureHandleResidentNVFn;
+ // functions->fMakeTextureHandleNonResident =
+ // gl->glMakeTextureHandleNonResidentNVFn;
+ // functions->fGetImageHandle = gl->glGetImageHandleNVFn;
+ // functions->fMakeImageHandleResident = gl->glMakeImageHandleResidentNVFn;
+ // functions->fMakeImageHandleNonResident =
+ // gl->glMakeImageHandleNonResidentNVFn;
+ // functions->fIsTextureHandleResident = gl->glIsTextureHandleResidentNVFn;
+ // functions->fIsImageHandleResident = gl->glIsImageHandleResidentNVFn;
+ // functions->fUniformHandleui64 = gl->glUniformHandleui64NVFn;
+ // functions->fUniformHandleui64v = gl->glUniformHandleui64vNVFn;
+ // functions->fProgramUniformHandleui64 = gl->glProgramUniformHandleui64NVFn;
+ // functions->fProgramUniformHandleui64v =
+ // gl->glProgramUniformHandleui64vNVFn;
+
+ // GL_EXT_direct_state_access
+ // functions->fTextureParameteri = gl->glTextureParameteriEXTFn;
+ // functions->fTextureParameteriv = gl->glTextureParameterivEXTFn;
+ // functions->fTextureParameterf = gl->glTextureParameterfEXTFn;
+ // functions->fTextureParameterfv = gl->glTextureParameterfvEXTFn;
+ // functions->fTextureImage1D = gl->glTextureImage1DEXTFn;
+ // functions->fTextureImage2D = gl->glTextureImage2DEXTFn;
+ // functions->fTextureSubImage1D = gl->glTextureSubImage1DEXTFn;
+ // functions->fTextureSubImage2D = gl->glTextureSubImage2DEXTFn;
+ // functions->fCopyTextureImage1D = gl->glCopyTextureImage1DEXTFn;
+ // functions->fCopyTextureImage2D = gl->glCopyTextureImage2DEXTFn;
+ // functions->fCopyTextureSubImage1D = gl->glCopyTextureSubImage1DEXTFn;
+ // functions->fCopyTextureSubImage2D = gl->glCopyTextureSubImage2DEXTFn;
+ // functions->fGetNamedBufferParameteriv =
+ // gl->glGetNamedBufferParameterivEXTFn;
+ // functions->fGetNamedBufferPointerv = gl->glGetNamedBufferPointervEXTFn;
+ // functions->fGetNamedBufferSubData = gl->glGetNamedBufferSubDataEXTFn;
+ // functions->fGetTextureImage = gl->glGetTextureImageEXTFn;
+ // functions->fGetTextureParameterfv = gl->glGetTextureParameterfvEXTFn;
+ // functions->fGetTextureParameteriv = gl->glGetTextureParameterivEXTFn;
+ // functions->fGetTextureLevelParameterfv =
+ // gl->glGetTextureLevelParameterfvEXTFn;
+ // functions->fGetTextureLevelParameteriv =
+ // gl->glGetTextureLevelParameterivEXTFn;
+ // functions->fMapNamedBuffer = gl->glMapNamedBufferEXTFn;
+ // functions->fNamedBufferData = gl->glNamedBufferDataEXTFn;
+ // functions->fNamedBufferSubData = gl->glNamedBufferSubDataEXTFn;
+ // functions->fProgramUniform1f = gl->glProgramUniform1fEXTFn;
+ // functions->fProgramUniform2f = gl->glProgramUniform2fEXTFn;
+ // functions->fProgramUniform3f = gl->glProgramUniform3fEXTFn;
+ // functions->fProgramUniform4f = gl->glProgramUniform4fEXTFn;
+ // functions->fProgramUniform1i = gl->glProgramUniform1iEXTFn;
+ // functions->fProgramUniform2i = gl->glProgramUniform2iEXTFn;
+ // functions->fProgramUniform3i = gl->glProgramUniform3iEXTFn;
+ // functions->fProgramUniform4i = gl->glProgramUniform4iEXTFn;
+ // functions->fProgramUniform1fv = gl->glProgramUniform1fvEXTFn;
+ // functions->fProgramUniform2fv = gl->glProgramUniform2fvEXTFn;
+ // functions->fProgramUniform3fv = gl->glProgramUniform3fvEXTFn;
+ // functions->fProgramUniform4fv = gl->glProgramUniform4fvEXTFn;
+ // functions->fProgramUniform1iv = gl->glProgramUniform1ivEXTFn;
+ // functions->fProgramUniform2iv = gl->glProgramUniform2ivEXTFn;
+ // functions->fProgramUniform3iv = gl->glProgramUniform3ivEXTFn;
+ // functions->fProgramUniform4iv = gl->glProgramUniform4ivEXTFn;
+ // functions->fProgramUniformMatrix2fv = gl->glProgramUniformMatrix2fvEXTFn;
+ // functions->fProgramUniformMatrix3fv = gl->glProgramUniformMatrix3fvEXTFn;
+ // functions->fProgramUniformMatrix4fv = gl->glProgramUniformMatrix4fvEXTFn;
+ // functions->fUnmapNamedBuffer = gl->glUnmapNamedBufferEXTFn;
+ // functions->fTextureImage3D = gl->glTextureImage3DEXTFn;
+ // functions->fTextureSubImage3D = gl->glTextureSubImage3DEXTFn;
+ // functions->fCopyTextureSubImage3D = gl->glCopyTextureSubImage3DEXTFn;
+ // functions->fCompressedTextureImage3D = gl->glCompressedTextureImage3DEXTFn;
+ // functions->fCompressedTextureImage2D = gl->glCompressedTextureImage2DEXTFn;
+ // functions->fCompressedTextureImage1D = gl->glCompressedTextureImage1DEXTFn;
+ // functions->fCompressedTextureSubImage3D =
+ // gl->glCompressedTextureSubImage3DEXTFn;
+ // functions->fCompressedTextureSubImage2D =
+ // gl->glCompressedTextureSubImage2DEXTFn;
+ // functions->fCompressedTextureSubImage1D =
+ // gl->glCompressedTextureSubImage1DEXTFn;
+ // functions->fGetCompressedTextureImage =
+ // gl->glGetCompressedTextureImageEXTFn;
+ // functions->fProgramUniformMatrix2x3fv =
+ // gl->glProgramUniformMatrix2x3fvEXTFn;
+ // functions->fProgramUniformMatrix3x2fv =
+ // gl->glProgramUniformMatrix3x2fvEXTFn;
+ // functions->fProgramUniformMatrix2x4fv =
+ // gl->glProgramUniformMatrix2x4fvEXTFn;
+ // functions->fProgramUniformMatrix4x2fv =
+ // gl->glProgramUniformMatrix4x2fvEXTFn;
+ // functions->fProgramUniformMatrix3x4fv =
+ // gl->glProgramUniformMatrix3x4fvEXTFn;
+ // functions->fProgramUniformMatrix4x3fv =
+ // gl->glProgramUniformMatrix4x3fvEXTFn;
+ // functions->fNamedRenderbufferStorage = gl->glNamedRenderbufferStorageEXTFn;
+ // functions->fGetNamedRenderbufferParameteriv =
+ // gl->glGetNamedRenderbufferParameterivEXTFn;
+ // functions->fNamedRenderbufferStorageMultisample =
+ // gl->glNamedRenderbufferStorageMultisampleEXTFn;
+ // functions->fCheckNamedFramebufferStatus =
+ // gl->glCheckNamedFramebufferStatusEXTFn;
+ // functions->fNamedFramebufferTexture1D =
+ // gl->glNamedFramebufferTexture1DEXTFn;
+ // functions->fNamedFramebufferTexture2D =
+ // gl->glNamedFramebufferTexture2DEXTFn;
+ // functions->fNamedFramebufferTexture3D =
+ // gl->glNamedFramebufferTexture3DEXTFn;
+ // functions->fNamedFramebufferRenderbuffer =
+ // gl->glNamedFramebufferRenderbufferEXTFn;
+ // functions->fGetNamedFramebufferAttachmentParameteriv =
+ // gl->glGetNamedFramebufferAttachmentParameterivEXTFn;
+ // functions->fGenerateTextureMipmap = gl->glGenerateTextureMipmapEXTFn;
+ // functions->fFramebufferDrawBuffer = gl->glFramebufferDrawBufferEXTFn;
+ // functions->fFramebufferDrawBuffers = gl->glFramebufferDrawBuffersEXTFn;
+ // functions->fFramebufferReadBuffer = gl->glFramebufferReadBufferEXTFn;
+ // functions->fGetFramebufferParameteriv =
+ // gl->glGetFramebufferParameterivEXTFn;
+ // functions->fNamedCopyBufferSubData = gl->glNamedCopyBufferSubDataEXTFn;
+ // functions->fVertexArrayVertexOffset = gl->glVertexArrayVertexOffsetEXTFn;
+ // functions->fVertexArrayColorOffset = gl->glVertexArrayColorOffsetEXTFn;
+ // functions->fVertexArrayEdgeFlagOffset =
+ // gl->glVertexArrayEdgeFlagOffsetEXTFn;
+ // functions->fVertexArrayIndexOffset = gl->glVertexArrayIndexOffsetEXTFn;
+ // functions->fVertexArrayNormalOffset = gl->glVertexArrayNormalOffsetEXTFn;
+ // functions->fVertexArrayTexCoordOffset =
+ // gl->glVertexArrayTexCoordOffsetEXTFn;
+ // functions->fVertexArrayMultiTexCoordOffset =
+ // gl->glVertexArrayMultiTexCoordOffsetEXTFn;
+ // functions->fVertexArrayFogCoordOffset =
+ // gl->glVertexArrayFogCoordOffsetEXTFn;
+ // functions->fVertexArraySecondaryColorOffset =
+ // gl->glVertexArraySecondaryColorOffsetEXTFn;
+ // functions->fVertexArrayVertexAttribOffset =
+ // gl->glVertexArrayVertexAttribOffsetEXTFn;
+ // functions->fVertexArrayVertexAttribIOffset =
+ // gl->glVertexArrayVertexAttribIOffsetEXTFn;
+ // functions->fEnableVertexArray = gl->glEnableVertexArrayEXTFn;
+ // functions->fDisableVertexArray = gl->glDisableVertexArrayEXTFn;
+ // functions->fEnableVertexArrayAttrib = gl->glEnableVertexArrayAttribEXTFn;
+ // functions->fDisableVertexArrayAttrib = gl->glDisableVertexArrayAttribEXTFn;
+ // functions->fGetVertexArrayIntegerv = gl->glGetVertexArrayIntegervEXTFn;
+ // functions->fGetVertexArrayPointerv = gl->glGetVertexArrayPointervEXTFn;
+ // functions->fGetVertexArrayIntegeri_v = gl->glGetVertexArrayIntegeri_vEXTFn;
+ // functions->fGetVertexArrayPointeri_v = gl->glGetVertexArrayPointeri_vEXTFn;
+ // functions->fMapNamedBufferRange = gl->glMapNamedBufferRangeEXTFn;
+ // functions->fFlushMappedNamedBufferRange =
+ // gl->glFlushMappedNamedBufferRangeEXTFn;
+ // functions->fTextureBuffer = gl->glTextureBufferEXTFn;
+
+ functions->fDebugMessageControl = gl->glDebugMessageControlFn;
+ functions->fDebugMessageInsert = gl->glDebugMessageInsertFn;
+ // TODO(piman): Our GL headers are out-of-date and define GLDEBUGPROC
+ // incorrectly wrt const-ness.
+ functions->fDebugMessageCallback =
+ reinterpret_cast<GrGLDebugMessageCallbackProc>(
+ gl->glDebugMessageCallbackFn);
+ functions->fGetDebugMessageLog =
+ reinterpret_cast<GrGLGetDebugMessageLogProc>(gl->glGetDebugMessageLogFn);
+ functions->fPushDebugGroup = gl->glPushDebugGroupFn;
+ functions->fPopDebugGroup = gl->glPopDebugGroupFn;
+ functions->fObjectLabel = gl->glObjectLabelFn;
+
+ // GL_EXT_window_rectangles
+ // functions->fWindowRectangles = gl->glWindowRectanglesEXTFn;
+
+ // EGL_KHR_image / EGL_KHR_image_base
+ // functions->fCreateImage = nullptr;
+ // functions->fDestroyImage = nullptr;
+
+ // GL 4.0 or GL_ARB_sample_shading or ES+GL_OES_sample_shading
+ // functions->fMinSampleShading = gl->glMinSampleShadingFn;
+
+ functions->fFenceSync = gl->glFenceSyncFn;
+ functions->fIsSync = gl->glIsSyncFn;
+ functions->fClientWaitSync = gl->glClientWaitSyncFn;
+ functions->fWaitSync = gl->glWaitSyncFn;
+ functions->fDeleteSync = gl->glDeleteSyncFn;
+
+ functions->fBindImageTexture = gl->glBindImageTextureEXTFn;
+ // TODO(piman): skia type is wrong.
+ functions->fMemoryBarrier =
+ reinterpret_cast<GrGLMemoryBarrierProc>(gl->glMemoryBarrierEXTFn);
+
+ // GL 4.5 or GL_ARB_ES3_1_compatibility or ES 3.1
+ // functions->fMemoryBarrierByRegion = gl->glMemoryBarrierByRegionFn;
+
+ functions->fGetInternalformativ = gl->glGetInternalformativFn;
+
+ interface->fStandard = standard;
+ interface->fExtensions.swap(&extensions);
+ sk_sp<const GrGLInterface> returned(interface);
+ return returned;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/create_gr_gl_interface.h b/chromium/gpu/command_buffer/service/create_gr_gl_interface.h
new file mode 100644
index 00000000000..b8feeb464a7
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/create_gr_gl_interface.h
@@ -0,0 +1,25 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_CREATE_GR_GL_INTERFACE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_CREATE_GR_GL_INTERFACE_H_
+
+#include "third_party/skia/include/gpu/gl/GrGLInterface.h"
+
+namespace gl {
+struct GLVersionInfo;
+}
+
+namespace gpu {
+namespace gles2 {
+
+// Creates a GrGLInterface by taking function pointers from the current
+// GL bindings.
+sk_sp<const GrGLInterface> CreateGrGLInterface(
+ const gl::GLVersionInfo& version_info);
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_CREATE_GR_GL_INTERFACE_H_
diff --git a/chromium/gpu/command_buffer/service/disk_cache_proto.proto b/chromium/gpu/command_buffer/service/disk_cache_proto.proto
index b1f47fe5238..784bf4ee63a 100644
--- a/chromium/gpu/command_buffer/service/disk_cache_proto.proto
+++ b/chromium/gpu/command_buffer/service/disk_cache_proto.proto
@@ -61,6 +61,8 @@ message GpuProgramProto {
optional bytes sha = 1;
optional uint32 format = 2;
optional bytes program = 3;
+ optional bool program_is_compressed = 6;
+ optional uint32 program_decompressed_length = 7;
optional ShaderProto vertex_shader = 4;
optional ShaderProto fragment_shader = 5;
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index 5682264773c..f038996b228 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -39,49 +39,6 @@ struct FormatInfo {
} // anonymous namespace.
-class FeatureInfo::StringSet {
- public:
- StringSet() {}
-
- StringSet(const char* s) {
- Init(s);
- }
-
- StringSet(const std::string& str) {
- Init(str);
- }
-
- StringSet(const std::vector<std::string>& strs) {
- string_set_.insert(strs.begin(), strs.end());
- }
-
- void Init(const char* s) {
- std::string str(s ? s : "");
- Init(str);
- }
-
- void Init(const std::string& str) {
- std::vector<std::string> tokens = base::SplitString(
- str, " ", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
- string_set_.insert(tokens.begin(), tokens.end());
- }
-
- bool Contains(const char* s) const {
- return string_set_.find(s) != string_set_.end();
- }
-
- bool Contains(const std::string& s) const {
- return string_set_.find(s) != string_set_.end();
- }
-
- const std::set<std::string>& GetImpl() {
- return string_set_;
- }
-
- private:
- std::set<std::string> string_set_;
-};
-
namespace {
class ScopedPixelUnpackBufferOverride {
@@ -236,13 +193,6 @@ FeatureInfo::FeatureInfo(
: nullptr);
}
-FeatureInfo::FeatureInfo(
- const base::CommandLine& command_line,
- const GpuDriverBugWorkarounds& gpu_driver_bug_workarounds)
- : workarounds_(gpu_driver_bug_workarounds) {
- InitializeBasicState(&command_line);
-}
-
void FeatureInfo::InitializeBasicState(const base::CommandLine* command_line) {
if (!command_line)
return;
@@ -254,6 +204,13 @@ void FeatureInfo::InitializeBasicState(const base::CommandLine* command_line) {
(command_line->GetSwitchValueASCII(switches::kUseGL) ==
gl::kGLImplementationSwiftShaderForWebGLName);
+ feature_flags_.is_swiftshader =
+ (command_line->GetSwitchValueASCII(switches::kUseGL) ==
+ gl::kGLImplementationSwiftShaderName);
+
+ feature_flags_.chromium_raster_transport =
+ command_line->HasSwitch(switches::kEnableOOPRasterization);
+
// The shader translator is needed to translate from WebGL-conformant GLES SL
// to normal GLES SL, enforce WebGL conformance, translate from GLES SL 1.0 to
// target context GLSL, implement emulation of OpenGL ES features on OpenGL,
@@ -405,7 +362,8 @@ void FeatureInfo::EnableOESTextureHalfFloatLinear() {
void FeatureInfo::InitializeFeatures() {
// Figure out what extensions to turn on.
- StringSet extensions(gl::GetGLExtensionsFromCurrentContext());
+ std::string extensions_string(gl::GetGLExtensionsFromCurrentContext());
+ gl::ExtensionSet extensions(gl::MakeExtensionSet(extensions_string));
const char* version_str =
reinterpret_cast<const char*>(glGetString(GL_VERSION));
@@ -413,7 +371,7 @@ void FeatureInfo::InitializeFeatures() {
reinterpret_cast<const char*>(glGetString(GL_RENDERER));
gl_version_info_.reset(
- new gl::GLVersionInfo(version_str, renderer_str, extensions.GetImpl()));
+ new gl::GLVersionInfo(version_str, renderer_str, extensions));
bool enable_es3 = IsWebGL2OrES3Context();
@@ -451,7 +409,7 @@ void FeatureInfo::InitializeFeatures() {
if (!disallowed_features_.gpu_memory_manager)
AddExtensionString("GL_CHROMIUM_gpu_memory_manager");
- if (extensions.Contains("GL_ANGLE_translated_shader_source")) {
+ if (gl::HasExtension(extensions, "GL_ANGLE_translated_shader_source")) {
feature_flags_.angle_translated_shader_source = true;
}
@@ -460,13 +418,17 @@ void FeatureInfo::InitializeFeatures() {
bool enable_dxt1 = false;
bool enable_dxt3 = false;
bool enable_dxt5 = false;
- bool have_s3tc = extensions.Contains("GL_EXT_texture_compression_s3tc");
+ bool have_s3tc =
+ gl::HasExtension(extensions, "GL_EXT_texture_compression_s3tc");
bool have_dxt3 =
- have_s3tc || extensions.Contains("GL_ANGLE_texture_compression_dxt3");
+ have_s3tc ||
+ gl::HasExtension(extensions, "GL_ANGLE_texture_compression_dxt3");
bool have_dxt5 =
- have_s3tc || extensions.Contains("GL_ANGLE_texture_compression_dxt5");
+ have_s3tc ||
+ gl::HasExtension(extensions, "GL_ANGLE_texture_compression_dxt5");
- if (extensions.Contains("GL_EXT_texture_compression_dxt1") || have_s3tc) {
+ if (gl::HasExtension(extensions, "GL_EXT_texture_compression_dxt1") ||
+ have_s3tc) {
enable_dxt1 = true;
}
if (have_dxt3) {
@@ -515,7 +477,8 @@ void FeatureInfo::InitializeFeatures() {
GL_COMPRESSED_RGBA_S3TC_DXT5_EXT);
}
- bool have_astc = extensions.Contains("GL_KHR_texture_compression_astc_ldr");
+ bool have_astc =
+ gl::HasExtension(extensions, "GL_KHR_texture_compression_astc_ldr");
if (have_astc) {
feature_flags_.ext_texture_format_astc = true;
AddExtensionString("GL_KHR_texture_compression_astc_ldr");
@@ -523,18 +486,23 @@ void FeatureInfo::InitializeFeatures() {
// GL_COMPRESSED_RGBA_ASTC(0x93B0 ~ 0x93BD)
GLint astc_format_it = GL_COMPRESSED_RGBA_ASTC_4x4_KHR;
GLint astc_format_max = GL_COMPRESSED_RGBA_ASTC_12x12_KHR;
- for (; astc_format_it <= astc_format_max; astc_format_it++)
- validators_.compressed_texture_format.AddValue(astc_format_it);
+ for (; astc_format_it <= astc_format_max; astc_format_it++) {
+ validators_.compressed_texture_format.AddValue(astc_format_it);
+ validators_.texture_internal_format_storage.AddValue(astc_format_it);
+ }
// GL_COMPRESSED_SRGB8_ALPHA8_ASTC(0x93D0 ~ 0x93DD)
astc_format_it = GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR;
astc_format_max = GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR;
- for (; astc_format_it <= astc_format_max; astc_format_it++)
- validators_.compressed_texture_format.AddValue(astc_format_it);
+ for (; astc_format_it <= astc_format_max; astc_format_it++) {
+ validators_.compressed_texture_format.AddValue(astc_format_it);
+ validators_.texture_internal_format_storage.AddValue(astc_format_it);
+ }
}
- bool have_atc = extensions.Contains("GL_AMD_compressed_ATC_texture") ||
- extensions.Contains("GL_ATI_texture_compression_atitc");
+ bool have_atc =
+ gl::HasExtension(extensions, "GL_AMD_compressed_ATC_texture") ||
+ gl::HasExtension(extensions, "GL_ATI_texture_compression_atitc");
if (have_atc) {
feature_flags_.ext_texture_format_atc = true;
@@ -549,7 +517,7 @@ void FeatureInfo::InitializeFeatures() {
}
// Check if we should enable GL_EXT_texture_filter_anisotropic.
- if (extensions.Contains("GL_EXT_texture_filter_anisotropic")) {
+ if (gl::HasExtension(extensions, "GL_EXT_texture_filter_anisotropic")) {
AddExtensionString("GL_EXT_texture_filter_anisotropic");
validators_.texture_parameter.AddValue(
GL_TEXTURE_MAX_ANISOTROPY_EXT);
@@ -572,9 +540,9 @@ void FeatureInfo::InitializeFeatures() {
bool enable_depth_texture = false;
GLenum depth_texture_format = GL_NONE;
if (!workarounds_.disable_depth_texture &&
- (extensions.Contains("GL_ARB_depth_texture") ||
- extensions.Contains("GL_OES_depth_texture") ||
- extensions.Contains("GL_ANGLE_depth_texture") ||
+ (gl::HasExtension(extensions, "GL_ARB_depth_texture") ||
+ gl::HasExtension(extensions, "GL_OES_depth_texture") ||
+ gl::HasExtension(extensions, "GL_ANGLE_depth_texture") ||
gl_version_info_->is_desktop_core_profile)) {
// Note that we don't expose depth_texture extenion on top of ES3 if
// the depth_texture extension isn't exposed by the ES3 driver.
@@ -583,7 +551,7 @@ void FeatureInfo::InitializeFeatures() {
enable_depth_texture = true;
depth_texture_format = GL_DEPTH_COMPONENT;
feature_flags_.angle_depth_texture =
- extensions.Contains("GL_ANGLE_depth_texture");
+ gl::HasExtension(extensions, "GL_ANGLE_depth_texture");
}
if (enable_depth_texture) {
@@ -598,10 +566,9 @@ void FeatureInfo::InitializeFeatures() {
}
GLenum depth_stencil_texture_format = GL_NONE;
- if (extensions.Contains("GL_EXT_packed_depth_stencil") ||
- extensions.Contains("GL_OES_packed_depth_stencil") ||
- gl_version_info_->is_es3 ||
- gl_version_info_->is_desktop_core_profile) {
+ if (gl::HasExtension(extensions, "GL_EXT_packed_depth_stencil") ||
+ gl::HasExtension(extensions, "GL_OES_packed_depth_stencil") ||
+ gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile) {
AddExtensionString("GL_OES_packed_depth_stencil");
feature_flags_.packed_depth24_stencil8 = true;
if (enable_depth_texture) {
@@ -631,11 +598,10 @@ void FeatureInfo::InitializeFeatures() {
}
}
- if (gl_version_info_->is_es3 ||
- gl_version_info_->is_desktop_core_profile ||
- extensions.Contains("GL_OES_vertex_array_object") ||
- extensions.Contains("GL_ARB_vertex_array_object") ||
- extensions.Contains("GL_APPLE_vertex_array_object")) {
+ if (gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile ||
+ gl::HasExtension(extensions, "GL_OES_vertex_array_object") ||
+ gl::HasExtension(extensions, "GL_ARB_vertex_array_object") ||
+ gl::HasExtension(extensions, "GL_APPLE_vertex_array_object")) {
feature_flags_.native_vertex_array_object = true;
}
@@ -647,7 +613,7 @@ void FeatureInfo::InitializeFeatures() {
}
if (gl_version_info_->is_es3 ||
- extensions.Contains("GL_OES_element_index_uint") ||
+ gl::HasExtension(extensions, "GL_OES_element_index_uint") ||
gl::HasDesktopGLFeatures()) {
AddExtensionString("GL_OES_element_index_uint");
validators_.index_type.AddValue(GL_UNSIGNED_INT);
@@ -656,8 +622,8 @@ void FeatureInfo::InitializeFeatures() {
bool has_srgb_framebuffer_support = false;
if (gl_version_info_->IsAtLeastGL(3, 2) ||
(gl_version_info_->IsAtLeastGL(2, 0) &&
- (extensions.Contains("GL_EXT_framebuffer_sRGB") ||
- extensions.Contains("GL_ARB_framebuffer_sRGB")))) {
+ (gl::HasExtension(extensions, "GL_EXT_framebuffer_sRGB") ||
+ gl::HasExtension(extensions, "GL_ARB_framebuffer_sRGB")))) {
feature_flags_.desktop_srgb_support = true;
has_srgb_framebuffer_support = true;
}
@@ -668,10 +634,10 @@ void FeatureInfo::InitializeFeatures() {
// <format> in this case. So, even with GLES3 explicitly check for
// GL_EXT_sRGB.
if ((((gl_version_info_->is_es3 ||
- extensions.Contains("GL_OES_rgb8_rgba8")) &&
- extensions.Contains("GL_EXT_sRGB")) ||
+ gl::HasExtension(extensions, "GL_OES_rgb8_rgba8")) &&
+ gl::HasExtension(extensions, "GL_EXT_sRGB")) ||
feature_flags_.desktop_srgb_support) &&
- IsWebGL1OrES2Context()) {
+ IsWebGL1OrES2Context()) {
feature_flags_.ext_srgb = true;
AddExtensionString("GL_EXT_sRGB");
validators_.texture_internal_format.AddValue(GL_SRGB_EXT);
@@ -694,7 +660,7 @@ void FeatureInfo::InitializeFeatures() {
// and the desktop extension GL_ARB_framebuffer_sRGB (part of the core in
// 3.0).
if (feature_flags_.desktop_srgb_support ||
- extensions.Contains("GL_EXT_sRGB_write_control")) {
+ gl::HasExtension(extensions, "GL_EXT_sRGB_write_control")) {
feature_flags_.ext_srgb_write_control = true;
AddExtensionString("GL_EXT_sRGB_write_control");
validators_.capability.AddValue(GL_FRAMEBUFFER_SRGB_EXT);
@@ -702,7 +668,8 @@ void FeatureInfo::InitializeFeatures() {
}
// The extension GL_EXT_texture_sRGB_decode is the same on desktop and GLES.
- if (extensions.Contains("GL_EXT_texture_sRGB_decode") && !IsWebGLContext()) {
+ if (gl::HasExtension(extensions, "GL_EXT_texture_sRGB_decode") &&
+ !IsWebGLContext()) {
AddExtensionString("GL_EXT_texture_sRGB_decode");
validators_.texture_parameter.AddValue(GL_TEXTURE_SRGB_DECODE_EXT);
}
@@ -712,17 +679,19 @@ void FeatureInfo::InitializeFeatures() {
// On mobile, the only extension that supports S3TC+sRGB is NV_sRGB_formats.
// The draft extension EXT_texture_compression_s3tc_srgb also supports it
// and is used if available (e.g. if ANGLE exposes it).
- have_s3tc_srgb = extensions.Contains("GL_NV_sRGB_formats") ||
- extensions.Contains("GL_EXT_texture_compression_s3tc_srgb");
+ have_s3tc_srgb =
+ gl::HasExtension(extensions, "GL_NV_sRGB_formats") ||
+ gl::HasExtension(extensions, "GL_EXT_texture_compression_s3tc_srgb");
} else {
// On desktop, strictly-speaking, S3TC+sRGB is only available if both
// EXT_texture_sRGB and EXT_texture_compression_s3tc_srgb are available.
//
// However, on macOS, S3TC+sRGB is supported on OpenGL 4.1 with only
// EXT_texture_compression_s3tc_srgb, so we allow that as well.
- if (extensions.Contains("GL_EXT_texture_sRGB") ||
+ if (gl::HasExtension(extensions, "GL_EXT_texture_sRGB") ||
gl_version_info_->IsAtLeastGL(4, 1)) {
- have_s3tc_srgb = extensions.Contains("GL_EXT_texture_compression_s3tc");
+ have_s3tc_srgb =
+ gl::HasExtension(extensions, "GL_EXT_texture_compression_s3tc");
}
}
@@ -762,13 +731,17 @@ void FeatureInfo::InitializeFeatures() {
// In WebGL contexts, BRGA is used for hardware overlay and WebGL 2.0 exposes
// glTexStorage2D. WebGL never uses both BGRA and glTexStorage2D together
// because WebGL API doesn't expose BGRA format. So allow both.
- bool has_apple_bgra = extensions.Contains("GL_APPLE_texture_format_BGRA8888");
- bool has_ext_bgra = extensions.Contains("GL_EXT_texture_format_BGRA8888");
+ bool has_apple_bgra =
+ gl::HasExtension(extensions, "GL_APPLE_texture_format_BGRA8888");
+ bool has_ext_bgra =
+ gl::HasExtension(extensions, "GL_EXT_texture_format_BGRA8888");
bool enable_texture_format_bgra8888 =
has_ext_bgra || has_apple_bgra || !gl_version_info_->is_es;
- bool has_ext_texture_storage = extensions.Contains("GL_EXT_texture_storage");
- bool has_arb_texture_storage = extensions.Contains("GL_ARB_texture_storage");
+ bool has_ext_texture_storage =
+ gl::HasExtension(extensions, "GL_EXT_texture_storage");
+ bool has_arb_texture_storage =
+ gl::HasExtension(extensions, "GL_ARB_texture_storage");
bool has_texture_storage =
!workarounds_.disable_texture_storage &&
(has_ext_texture_storage || has_arb_texture_storage ||
@@ -831,7 +804,7 @@ void FeatureInfo::InitializeFeatures() {
// require. On ES, support is indicated by the GL_EXT_read_format_bgra
// extension.
bool enable_read_format_bgra =
- extensions.Contains("GL_EXT_read_format_bgra") ||
+ gl::HasExtension(extensions, "GL_EXT_read_format_bgra") ||
!gl_version_info_->is_es;
if (enable_read_format_bgra) {
@@ -843,7 +816,7 @@ void FeatureInfo::InitializeFeatures() {
// GL_ARB_ES3_compatibility adds support for some ES3 texture formats that are
// not supported in desktop GL
feature_flags_.arb_es3_compatibility =
- extensions.Contains("GL_ARB_ES3_compatibility") &&
+ gl::HasExtension(extensions, "GL_ARB_ES3_compatibility") &&
!gl_version_info_->is_es;
// glGetInteger64v for timestamps is implemented on the client side in a way
@@ -853,14 +826,15 @@ void FeatureInfo::InitializeFeatures() {
// ES2. Thus we can enable GL_EXT_disjoint_timer_query on ES2 contexts even
// though it does not support glGetInteger64v due to a specification bug.
feature_flags_.ext_disjoint_timer_query =
- extensions.Contains("GL_EXT_disjoint_timer_query");
+ gl::HasExtension(extensions, "GL_EXT_disjoint_timer_query");
if (feature_flags_.ext_disjoint_timer_query ||
- extensions.Contains("GL_ARB_timer_query") ||
- extensions.Contains("GL_EXT_timer_query")) {
+ gl::HasExtension(extensions, "GL_ARB_timer_query") ||
+ gl::HasExtension(extensions, "GL_EXT_timer_query")) {
AddExtensionString("GL_EXT_disjoint_timer_query");
}
- if (extensions.Contains("GL_OES_rgb8_rgba8") || gl::HasDesktopGLFeatures()) {
+ if (gl::HasExtension(extensions, "GL_OES_rgb8_rgba8") ||
+ gl::HasDesktopGLFeatures()) {
AddExtensionString("GL_OES_rgb8_rgba8");
validators_.render_buffer_format.AddValue(GL_RGB8_OES);
validators_.render_buffer_format.AddValue(GL_RGBA8_OES);
@@ -868,10 +842,9 @@ void FeatureInfo::InitializeFeatures() {
// Check if we should allow GL_OES_texture_npot
if (!disallowed_features_.npot_support &&
- (gl_version_info_->is_es3 ||
- gl_version_info_->is_desktop_core_profile ||
- extensions.Contains("GL_ARB_texture_non_power_of_two") ||
- extensions.Contains("GL_OES_texture_npot"))) {
+ (gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile ||
+ gl::HasExtension(extensions, "GL_ARB_texture_non_power_of_two") ||
+ gl::HasExtension(extensions, "GL_OES_texture_npot"))) {
AddExtensionString("GL_OES_texture_npot");
feature_flags_.npot_ok = true;
}
@@ -881,12 +854,11 @@ void FeatureInfo::InitializeFeatures() {
// Check for multisample support
if (!workarounds_.disable_chromium_framebuffer_multisample) {
bool ext_has_multisample =
- extensions.Contains("GL_EXT_framebuffer_multisample") ||
- gl_version_info_->is_es3 ||
- gl_version_info_->is_desktop_core_profile;
+ gl::HasExtension(extensions, "GL_EXT_framebuffer_multisample") ||
+ gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile;
if (gl_version_info_->is_angle || gl_version_info_->is_swiftshader) {
feature_flags_.angle_framebuffer_multisample =
- extensions.Contains("GL_ANGLE_framebuffer_multisample");
+ gl::HasExtension(extensions, "GL_ANGLE_framebuffer_multisample");
ext_has_multisample |= feature_flags_.angle_framebuffer_multisample;
}
feature_flags_.use_core_framebuffer_multisample =
@@ -902,9 +874,10 @@ void FeatureInfo::InitializeFeatures() {
}
}
- if (extensions.Contains("GL_EXT_multisampled_render_to_texture")) {
+ if (gl::HasExtension(extensions, "GL_EXT_multisampled_render_to_texture")) {
feature_flags_.multisampled_render_to_texture = true;
- } else if (extensions.Contains("GL_IMG_multisampled_render_to_texture")) {
+ } else if (gl::HasExtension(extensions,
+ "GL_IMG_multisampled_render_to_texture")) {
feature_flags_.multisampled_render_to_texture = true;
feature_flags_.use_img_for_multisampled_render_to_texture = true;
}
@@ -918,39 +891,43 @@ void FeatureInfo::InitializeFeatures() {
}
if (!gl_version_info_->is_es ||
- extensions.Contains("GL_EXT_multisample_compatibility")) {
+ gl::HasExtension(extensions, "GL_EXT_multisample_compatibility")) {
AddExtensionString("GL_EXT_multisample_compatibility");
feature_flags_.ext_multisample_compatibility = true;
validators_.capability.AddValue(GL_MULTISAMPLE_EXT);
validators_.capability.AddValue(GL_SAMPLE_ALPHA_TO_ONE_EXT);
}
- if (extensions.Contains("GL_INTEL_framebuffer_CMAA")) {
+ if (gl::HasExtension(extensions, "GL_INTEL_framebuffer_CMAA")) {
feature_flags_.chromium_screen_space_antialiasing = true;
AddExtensionString("GL_CHROMIUM_screen_space_antialiasing");
} else if (!workarounds_.disable_framebuffer_cmaa &&
(gl_version_info_->IsAtLeastGLES(3, 1) ||
(gl_version_info_->IsAtLeastGL(3, 0) &&
- extensions.Contains("GL_ARB_shading_language_420pack") &&
- extensions.Contains("GL_ARB_texture_storage") &&
- extensions.Contains("GL_ARB_texture_gather") &&
- extensions.Contains("GL_ARB_explicit_uniform_location") &&
- extensions.Contains("GL_ARB_explicit_attrib_location") &&
- extensions.Contains("GL_ARB_shader_image_load_store")))) {
+ gl::HasExtension(extensions,
+ "GL_ARB_shading_language_420pack") &&
+ gl::HasExtension(extensions, "GL_ARB_texture_storage") &&
+ gl::HasExtension(extensions, "GL_ARB_texture_gather") &&
+ gl::HasExtension(extensions,
+ "GL_ARB_explicit_uniform_location") &&
+ gl::HasExtension(extensions,
+ "GL_ARB_explicit_attrib_location") &&
+ gl::HasExtension(extensions,
+ "GL_ARB_shader_image_load_store")))) {
feature_flags_.chromium_screen_space_antialiasing = true;
feature_flags_.use_chromium_screen_space_antialiasing_via_shaders = true;
AddExtensionString("GL_CHROMIUM_screen_space_antialiasing");
}
- if (extensions.Contains("GL_OES_depth24") || gl::HasDesktopGLFeatures() ||
- gl_version_info_->is_es3) {
+ if (gl::HasExtension(extensions, "GL_OES_depth24") ||
+ gl::HasDesktopGLFeatures() || gl_version_info_->is_es3) {
AddExtensionString("GL_OES_depth24");
feature_flags_.oes_depth24 = true;
validators_.render_buffer_format.AddValue(GL_DEPTH_COMPONENT24);
}
if (gl_version_info_->is_es3 ||
- extensions.Contains("GL_OES_standard_derivatives") ||
+ gl::HasExtension(extensions, "GL_OES_standard_derivatives") ||
gl::HasDesktopGLFeatures()) {
AddExtensionString("GL_OES_standard_derivatives");
feature_flags_.oes_standard_derivatives = true;
@@ -958,11 +935,11 @@ void FeatureInfo::InitializeFeatures() {
validators_.g_l_state.AddValue(GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES);
}
- if (extensions.Contains("GL_OES_EGL_image_external")) {
+ if (gl::HasExtension(extensions, "GL_OES_EGL_image_external")) {
AddExtensionString("GL_OES_EGL_image_external");
feature_flags_.oes_egl_image_external = true;
}
- if (extensions.Contains("GL_NV_EGL_stream_consumer_external")) {
+ if (gl::HasExtension(extensions, "GL_NV_EGL_stream_consumer_external")) {
AddExtensionString("GL_NV_EGL_stream_consumer_external");
feature_flags_.nv_egl_stream_consumer_external = true;
}
@@ -978,7 +955,7 @@ void FeatureInfo::InitializeFeatures() {
// TODO(kainino): If we add a way to query whether ANGLE is exposing
// native support for ETC1 textures, require that here. Otherwise, we could
// co-opt the native-ETC2-support query discussed below.
- if (extensions.Contains("GL_OES_compressed_ETC1_RGB8_texture") &&
+ if (gl::HasExtension(extensions, "GL_OES_compressed_ETC1_RGB8_texture") &&
!gl_version_info_->is_angle) {
AddExtensionString("GL_OES_compressed_ETC1_RGB8_texture");
feature_flags_.oes_compressed_etc1_rgb8_texture = true;
@@ -993,7 +970,7 @@ void FeatureInfo::InitializeFeatures() {
validators_.UpdateETCCompressedTextureFormats();
}
- if (extensions.Contains("GL_AMD_compressed_ATC_texture")) {
+ if (gl::HasExtension(extensions, "GL_AMD_compressed_ATC_texture")) {
AddExtensionString("GL_AMD_compressed_ATC_texture");
validators_.compressed_texture_format.AddValue(
GL_ATC_RGB_AMD);
@@ -1010,7 +987,7 @@ void FeatureInfo::InitializeFeatures() {
GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD);
}
- if (extensions.Contains("GL_IMG_texture_compression_pvrtc")) {
+ if (gl::HasExtension(extensions, "GL_IMG_texture_compression_pvrtc")) {
AddExtensionString("GL_IMG_texture_compression_pvrtc");
validators_.compressed_texture_format.AddValue(
GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG);
@@ -1035,7 +1012,8 @@ void FeatureInfo::InitializeFeatures() {
// IOSurface backed textures. We don't want applications to start using it;
// they should use ordinary non-power-of-two textures. However, for unit
// testing purposes we expose it on all supported platforms.
- if (extensions.Contains("GL_ARB_texture_rectangle") ||
+ if (gl::HasExtension(extensions, "GL_ARB_texture_rectangle") ||
+ gl::HasExtension(extensions, "GL_ANGLE_texture_rectangle") ||
gl_version_info_->is_desktop_core_profile) {
AddExtensionString("GL_ARB_texture_rectangle");
feature_flags_.arb_texture_rectangle = true;
@@ -1057,7 +1035,7 @@ void FeatureInfo::InitializeFeatures() {
}
#endif
- if (extensions.Contains("GL_APPLE_ycbcr_422")) {
+ if (gl::HasExtension(extensions, "GL_APPLE_ycbcr_422")) {
AddExtensionString("GL_CHROMIUM_ycbcr_422_image");
feature_flags_.chromium_image_ycbcr_422 = true;
}
@@ -1065,7 +1043,7 @@ void FeatureInfo::InitializeFeatures() {
// TODO(gman): Add support for these extensions.
// GL_OES_depth32
- if (extensions.Contains("GL_ANGLE_texture_usage")) {
+ if (gl::HasExtension(extensions, "GL_ANGLE_texture_usage")) {
feature_flags_.angle_texture_usage = true;
AddExtensionString("GL_ANGLE_texture_usage");
validators_.texture_parameter.AddValue(GL_TEXTURE_USAGE_ANGLE);
@@ -1075,13 +1053,13 @@ void FeatureInfo::InitializeFeatures() {
gl_version_info_->IsAtLeastGLES(3, 0) ||
gl_version_info_->IsAtLeastGL(3, 3);
bool have_ext_occlusion_query_boolean =
- extensions.Contains("GL_EXT_occlusion_query_boolean");
+ gl::HasExtension(extensions, "GL_EXT_occlusion_query_boolean");
bool have_arb_occlusion_query2 =
- extensions.Contains("GL_ARB_occlusion_query2");
+ gl::HasExtension(extensions, "GL_ARB_occlusion_query2");
bool have_arb_occlusion_query =
(gl_version_info_->is_desktop_core_profile &&
gl_version_info_->IsAtLeastGL(1, 5)) ||
- extensions.Contains("GL_ARB_occlusion_query");
+ gl::HasExtension(extensions, "GL_ARB_occlusion_query");
if (have_occlusion_query ||
have_ext_occlusion_query_boolean ||
@@ -1102,11 +1080,10 @@ void FeatureInfo::InitializeFeatures() {
}
if (!workarounds_.disable_angle_instanced_arrays &&
- (extensions.Contains("GL_ANGLE_instanced_arrays") ||
- (extensions.Contains("GL_ARB_instanced_arrays") &&
- extensions.Contains("GL_ARB_draw_instanced")) ||
- gl_version_info_->is_es3 ||
- gl_version_info_->is_desktop_core_profile)) {
+ (gl::HasExtension(extensions, "GL_ANGLE_instanced_arrays") ||
+ (gl::HasExtension(extensions, "GL_ARB_instanced_arrays") &&
+ gl::HasExtension(extensions, "GL_ARB_draw_instanced")) ||
+ gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile)) {
AddExtensionString("GL_ANGLE_instanced_arrays");
feature_flags_.angle_instanced_arrays = true;
validators_.vertex_attribute.AddValue(GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE);
@@ -1114,12 +1091,13 @@ void FeatureInfo::InitializeFeatures() {
bool have_es2_draw_buffers_vendor_agnostic =
gl_version_info_->is_desktop_core_profile ||
- extensions.Contains("GL_ARB_draw_buffers") ||
- extensions.Contains("GL_EXT_draw_buffers");
+ gl::HasExtension(extensions, "GL_ARB_draw_buffers") ||
+ gl::HasExtension(extensions, "GL_EXT_draw_buffers");
bool can_emulate_es2_draw_buffers_on_es3_nv =
- gl_version_info_->is_es3 && extensions.Contains("GL_NV_draw_buffers");
+ gl_version_info_->is_es3 &&
+ gl::HasExtension(extensions, "GL_NV_draw_buffers");
bool is_webgl_compatbility_context =
- extensions.Contains("GL_ANGLE_webgl_compatibility");
+ gl::HasExtension(extensions, "GL_ANGLE_webgl_compatibility");
bool have_es2_draw_buffers =
!workarounds_.disable_ext_draw_buffers &&
(have_es2_draw_buffers_vendor_agnostic ||
@@ -1168,7 +1146,8 @@ void FeatureInfo::InitializeFeatures() {
}
}
- if (gl_version_info_->is_es3 || extensions.Contains("GL_EXT_blend_minmax") ||
+ if (gl_version_info_->is_es3 ||
+ gl::HasExtension(extensions, "GL_EXT_blend_minmax") ||
gl::HasDesktopGLFeatures()) {
AddExtensionString("GL_EXT_blend_minmax");
validators_.equation.AddValue(GL_MIN_EXT);
@@ -1178,12 +1157,13 @@ void FeatureInfo::InitializeFeatures() {
}
// TODO(dshwang): GLES3 supports gl_FragDepth, not gl_FragDepthEXT.
- if (extensions.Contains("GL_EXT_frag_depth") || gl::HasDesktopGLFeatures()) {
+ if (gl::HasExtension(extensions, "GL_EXT_frag_depth") ||
+ gl::HasDesktopGLFeatures()) {
AddExtensionString("GL_EXT_frag_depth");
feature_flags_.ext_frag_depth = true;
}
- if (extensions.Contains("GL_EXT_shader_texture_lod") ||
+ if (gl::HasExtension(extensions, "GL_EXT_shader_texture_lod") ||
gl::HasDesktopGLFeatures()) {
AddExtensionString("GL_EXT_shader_texture_lod");
feature_flags_.ext_shader_texture_lod = true;
@@ -1193,18 +1173,16 @@ void FeatureInfo::InitializeFeatures() {
UMA_HISTOGRAM_BOOLEAN("GPU.FenceSupport", ui_gl_fence_works);
feature_flags_.map_buffer_range =
- gl_version_info_->is_es3 ||
- gl_version_info_->is_desktop_core_profile ||
- extensions.Contains("GL_ARB_map_buffer_range") ||
- extensions.Contains("GL_EXT_map_buffer_range");
+ gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile ||
+ gl::HasExtension(extensions, "GL_ARB_map_buffer_range") ||
+ gl::HasExtension(extensions, "GL_EXT_map_buffer_range");
// Really it's part of core OpenGL 2.1 and up, but let's assume the
// extension is still advertised.
bool has_pixel_buffers =
- gl_version_info_->is_es3 ||
- gl_version_info_->is_desktop_core_profile ||
- extensions.Contains("GL_ARB_pixel_buffer_object") ||
- extensions.Contains("GL_NV_pixel_buffer_object");
+ gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile ||
+ gl::HasExtension(extensions, "GL_ARB_pixel_buffer_object") ||
+ gl::HasExtension(extensions, "GL_NV_pixel_buffer_object");
// We will use either glMapBuffer() or glMapBufferRange() for async readbacks.
if (has_pixel_buffers && ui_gl_fence_works &&
@@ -1213,14 +1191,14 @@ void FeatureInfo::InitializeFeatures() {
}
if (gl_version_info_->is_es3 ||
- extensions.Contains("GL_ARB_sampler_objects")) {
+ gl::HasExtension(extensions, "GL_ARB_sampler_objects")) {
feature_flags_.enable_samplers = true;
// TODO(dsinclair): Add AddExtensionString("GL_CHROMIUM_sampler_objects")
// when available.
}
if ((gl_version_info_->is_es3 ||
- extensions.Contains("GL_EXT_discard_framebuffer")) &&
+ gl::HasExtension(extensions, "GL_EXT_discard_framebuffer")) &&
!workarounds_.disable_discard_framebuffer) {
// DiscardFramebufferEXT is automatically bound to InvalidateFramebuffer.
AddExtensionString("GL_EXT_discard_framebuffer");
@@ -1234,12 +1212,13 @@ void FeatureInfo::InitializeFeatures() {
if (!workarounds_.disable_blend_equation_advanced) {
bool blend_equation_advanced_coherent =
- extensions.Contains("GL_NV_blend_equation_advanced_coherent") ||
- extensions.Contains("GL_KHR_blend_equation_advanced_coherent");
+ gl::HasExtension(extensions,
+ "GL_NV_blend_equation_advanced_coherent") ||
+ gl::HasExtension(extensions, "GL_KHR_blend_equation_advanced_coherent");
if (blend_equation_advanced_coherent ||
- extensions.Contains("GL_NV_blend_equation_advanced") ||
- extensions.Contains("GL_KHR_blend_equation_advanced")) {
+ gl::HasExtension(extensions, "GL_NV_blend_equation_advanced") ||
+ gl::HasExtension(extensions, "GL_KHR_blend_equation_advanced")) {
const GLenum equations[] = {GL_MULTIPLY_KHR,
GL_SCREEN_KHR,
GL_OVERLAY_KHR,
@@ -1268,17 +1247,18 @@ void FeatureInfo::InitializeFeatures() {
}
}
- if (extensions.Contains("GL_NV_framebuffer_mixed_samples")) {
+ if (gl::HasExtension(extensions, "GL_NV_framebuffer_mixed_samples")) {
AddExtensionString("GL_CHROMIUM_framebuffer_mixed_samples");
feature_flags_.chromium_framebuffer_mixed_samples = true;
validators_.g_l_state.AddValue(GL_COVERAGE_MODULATION_CHROMIUM);
}
- if (extensions.Contains("GL_NV_path_rendering")) {
+ if (gl::HasExtension(extensions, "GL_NV_path_rendering")) {
bool has_dsa = gl_version_info_->IsAtLeastGL(4, 5) ||
- extensions.Contains("GL_EXT_direct_state_access");
- bool has_piq = gl_version_info_->IsAtLeastGL(4, 3) ||
- extensions.Contains("GL_ARB_program_interface_query");
+ gl::HasExtension(extensions, "GL_EXT_direct_state_access");
+ bool has_piq =
+ gl_version_info_->IsAtLeastGL(4, 3) ||
+ gl::HasExtension(extensions, "GL_ARB_program_interface_query");
bool has_fms = feature_flags_.chromium_framebuffer_mixed_samples;
if ((gl_version_info_->IsAtLeastGLES(3, 1) ||
(gl_version_info_->IsAtLeastGL(3, 2) && has_dsa && has_piq)) &&
@@ -1294,8 +1274,8 @@ void FeatureInfo::InitializeFeatures() {
}
if ((gl_version_info_->is_es3 || gl_version_info_->is_desktop_core_profile ||
- extensions.Contains("GL_EXT_texture_rg") ||
- extensions.Contains("GL_ARB_texture_rg")) &&
+ gl::HasExtension(extensions, "GL_EXT_texture_rg") ||
+ gl::HasExtension(extensions, "GL_ARB_texture_rg")) &&
IsGL_REDSupportedOnFBOs()) {
feature_flags_.ext_texture_rg = true;
AddExtensionString("GL_EXT_texture_rg");
@@ -1316,7 +1296,7 @@ void FeatureInfo::InitializeFeatures() {
UMA_HISTOGRAM_BOOLEAN("GPU.TextureRG", feature_flags_.ext_texture_rg);
if (gl_version_info_->is_desktop_core_profile ||
- extensions.Contains("GL_EXT_texture_norm16")) {
+ gl::HasExtension(extensions, "GL_EXT_texture_norm16")) {
feature_flags_.ext_texture_norm16 = true;
AddExtensionString("GL_EXT_texture_norm16");
@@ -1331,13 +1311,12 @@ void FeatureInfo::InitializeFeatures() {
bool has_opengl_dual_source_blending =
gl_version_info_->IsAtLeastGL(3, 3) ||
(gl_version_info_->IsAtLeastGL(3, 2) &&
- extensions.Contains("GL_ARB_blend_func_extended"));
- if (!disable_shader_translator_ &&
- !workarounds_.get_frag_data_info_bug &&
+ gl::HasExtension(extensions, "GL_ARB_blend_func_extended"));
+ if (!disable_shader_translator_ && !workarounds_.get_frag_data_info_bug &&
((gl_version_info_->IsAtLeastGL(3, 2) &&
has_opengl_dual_source_blending) ||
(gl_version_info_->IsAtLeastGLES(3, 0) &&
- extensions.Contains("GL_EXT_blend_func_extended")))) {
+ gl::HasExtension(extensions, "GL_EXT_blend_func_extended")))) {
// Note: to simplify the code, we do not expose EXT_blend_func_extended
// unless the service context supports ES 3.0. This means the theoretical ES
// 2.0 implementation with EXT_blend_func_extended is not sufficient.
@@ -1377,31 +1356,39 @@ void FeatureInfo::InitializeFeatures() {
}
feature_flags_.angle_robust_client_memory =
- extensions.Contains("GL_ANGLE_robust_client_memory");
+ gl::HasExtension(extensions, "GL_ANGLE_robust_client_memory");
feature_flags_.khr_debug = gl_version_info_->IsAtLeastGL(4, 3) ||
gl_version_info_->IsAtLeastGLES(3, 2) ||
- extensions.Contains("GL_KHR_debug");
+ gl::HasExtension(extensions, "GL_KHR_debug");
feature_flags_.chromium_bind_generates_resource =
- extensions.Contains("GL_CHROMIUM_bind_generates_resource");
+ gl::HasExtension(extensions, "GL_CHROMIUM_bind_generates_resource");
feature_flags_.angle_webgl_compatibility = is_webgl_compatbility_context;
feature_flags_.chromium_copy_texture =
- extensions.Contains("GL_CHROMIUM_copy_texture");
+ gl::HasExtension(extensions, "GL_CHROMIUM_copy_texture");
feature_flags_.chromium_copy_compressed_texture =
- extensions.Contains("GL_CHROMIUM_copy_compressed_texture");
+ gl::HasExtension(extensions, "GL_CHROMIUM_copy_compressed_texture");
feature_flags_.angle_client_arrays =
- extensions.Contains("GL_ANGLE_client_arrays");
+ gl::HasExtension(extensions, "GL_ANGLE_client_arrays");
feature_flags_.angle_request_extension =
- extensions.Contains("GL_ANGLE_request_extension");
- feature_flags_.ext_debug_marker = extensions.Contains("GL_EXT_debug_marker");
- feature_flags_.arb_robustness = extensions.Contains("GL_ARB_robustness");
- feature_flags_.khr_robustness = extensions.Contains("GL_KHR_robustness");
- feature_flags_.ext_robustness = extensions.Contains("GL_EXT_robustness");
+ gl::HasExtension(extensions, "GL_ANGLE_request_extension");
+ feature_flags_.ext_debug_marker =
+ gl::HasExtension(extensions, "GL_EXT_debug_marker");
+ feature_flags_.arb_robustness =
+ gl::HasExtension(extensions, "GL_ARB_robustness");
+ feature_flags_.khr_robustness =
+ gl::HasExtension(extensions, "GL_KHR_robustness");
+ feature_flags_.ext_robustness =
+ gl::HasExtension(extensions, "GL_EXT_robustness");
+ feature_flags_.chromium_texture_filtering_hint =
+ gl::HasExtension(extensions, "GL_CHROMIUM_texture_filtering_hint");
+ feature_flags_.ext_pixel_buffer_object =
+ gl::HasExtension(extensions, "GL_NV_pixel_buffer_object");
}
void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
- const StringSet& extensions) {
+ const gl::ExtensionSet& extensions) {
// Check if we should allow GL_OES_texture_float, GL_OES_texture_half_float,
// GL_OES_texture_float_linear, GL_OES_texture_half_float_linear
bool enable_texture_float = false;
@@ -1417,12 +1404,12 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
// These extensions allow a variety of floating point formats to be
// rendered to via framebuffer objects.
- if (extensions.Contains("GL_EXT_color_buffer_float"))
+ if (gl::HasExtension(extensions, "GL_EXT_color_buffer_float"))
enable_ext_color_buffer_float = true;
- if (extensions.Contains("GL_EXT_color_buffer_half_float"))
+ if (gl::HasExtension(extensions, "GL_EXT_color_buffer_half_float"))
enable_ext_color_buffer_half_float = true;
- if (extensions.Contains("GL_ARB_texture_float") ||
+ if (gl::HasExtension(extensions, "GL_ARB_texture_float") ||
gl_version_info_->is_desktop_core_profile) {
enable_texture_float = true;
enable_texture_float_linear = true;
@@ -1432,24 +1419,24 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
} else {
// GLES3 adds support for Float type by default but it doesn't support all
// formats as GL_OES_texture_float(i.e.LUMINANCE_ALPHA,LUMINANCE and Alpha)
- if (extensions.Contains("GL_OES_texture_float")) {
+ if (gl::HasExtension(extensions, "GL_OES_texture_float")) {
enable_texture_float = true;
if (enable_ext_color_buffer_float) {
may_enable_chromium_color_buffer_float = true;
}
}
- if (extensions.Contains("GL_OES_texture_float_linear")) {
+ if (gl::HasExtension(extensions, "GL_OES_texture_float_linear")) {
enable_texture_float_linear = true;
}
// TODO(dshwang): GLES3 supports half float by default but GL_HALF_FLOAT_OES
// isn't equal to GL_HALF_FLOAT.
- if (extensions.Contains("GL_OES_texture_half_float")) {
+ if (gl::HasExtension(extensions, "GL_OES_texture_half_float")) {
enable_texture_half_float = true;
}
- if (extensions.Contains("GL_OES_texture_half_float_linear")) {
+ if (gl::HasExtension(extensions, "GL_OES_texture_half_float_linear")) {
enable_texture_half_float_linear = true;
}
}
@@ -1479,7 +1466,7 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
}
bool had_native_chromium_color_buffer_float_ext = false;
- if (extensions.Contains("GL_CHROMIUM_color_buffer_float_rgb")) {
+ if (gl::HasExtension(extensions, "GL_CHROMIUM_color_buffer_float_rgb")) {
had_native_chromium_color_buffer_float_ext = true;
feature_flags_.chromium_color_buffer_float_rgb = true;
if (!disallowed_features_.chromium_color_buffer_float_rgb) {
@@ -1487,7 +1474,7 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
}
}
- if (extensions.Contains("GL_CHROMIUM_color_buffer_float_rgba")) {
+ if (gl::HasExtension(extensions, "GL_CHROMIUM_color_buffer_float_rgba")) {
had_native_chromium_color_buffer_float_ext = true;
feature_flags_.chromium_color_buffer_float_rgba = true;
if (!disallowed_features_.chromium_color_buffer_float_rgba) {
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index 0d3299adf81..6fe9fbe527e 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -14,6 +14,7 @@
#include "gpu/command_buffer/service/gles2_cmd_validation.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/gpu_export.h"
+#include "ui/gl/extension_set.h"
namespace base {
class CommandLine;
@@ -79,9 +80,12 @@ class GPU_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool ext_discard_framebuffer = false;
bool angle_depth_texture = false;
bool is_swiftshader_for_webgl = false;
+ bool is_swiftshader = false;
+ bool chromium_texture_filtering_hint = false;
bool angle_texture_usage = false;
bool ext_texture_storage = false;
bool chromium_path_rendering = false;
+ bool chromium_raster_transport = false;
bool chromium_framebuffer_mixed_samples = false;
bool blend_equation_advanced = false;
bool blend_equation_advanced_coherent = false;
@@ -114,6 +118,7 @@ class GPU_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool arb_robustness = false;
bool khr_robustness = false;
bool ext_robustness = false;
+ bool ext_pixel_buffer_object = false;
};
FeatureInfo();
@@ -122,10 +127,6 @@ class GPU_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
explicit FeatureInfo(
const GpuDriverBugWorkarounds& gpu_driver_bug_workarounds);
- // Constructor with workarounds taken from |command_line|.
- FeatureInfo(const base::CommandLine& command_line,
- const GpuDriverBugWorkarounds& gpu_driver_bug_workarounds);
-
// Initializes the feature information. Needs a current GL context.
bool Initialize(ContextType context_type,
const DisallowedFeatures& disallowed_features);
@@ -189,14 +190,13 @@ class GPU_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
private:
friend class base::RefCounted<FeatureInfo>;
friend class BufferManagerClientSideArraysTest;
- class StringSet;
~FeatureInfo();
void AddExtensionString(const char* s);
void InitializeBasicState(const base::CommandLine* command_line);
void InitializeFeatures();
- void InitializeFloatAndHalfFloatFeatures(const StringSet& extensions);
+ void InitializeFloatAndHalfFloatFeatures(const gl::ExtensionSet& extensions);
Validators validators_;
diff --git a/chromium/gpu/command_buffer/service/feature_info_unittest.cc b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
index d98963182d0..0e18fab448e 100644
--- a/chromium/gpu/command_buffer/service/feature_info_unittest.cc
+++ b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
@@ -8,13 +8,11 @@
#include <memory>
-#include "base/command_line.h"
-#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/service/gpu_service_test.h"
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "gpu/command_buffer/service/texture_manager.h"
-#include "gpu/config/gpu_driver_bug_workaround_type.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_switches.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_fence.h"
@@ -112,33 +110,18 @@ class FeatureInfoTest
info_->Initialize(GetContextType(), disallowed_features);
}
- void SetupInitExpectationsWithGLVersionAndCommandLine(
- const char* extensions,
- const char* renderer,
- const char* version,
- const base::CommandLine& command_line) {
- GpuServiceTest::SetUpWithGLVersion(version, extensions);
- TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
- gl_.get(), extensions, renderer, version, GetContextType());
- GpuDriverBugWorkarounds gpu_driver_bug_workaround(&command_line);
- info_ = new FeatureInfo(command_line, gpu_driver_bug_workaround);
- info_->Initialize(GetContextType(), DisallowedFeatures());
- }
-
- void SetupWithCommandLine(const base::CommandLine& command_line) {
+ void SetupWithWorkarounds(const gpu::GpuDriverBugWorkarounds& workarounds) {
GpuServiceTest::SetUp();
- GpuDriverBugWorkarounds gpu_driver_bug_workaround(&command_line);
- info_ = new FeatureInfo(command_line, gpu_driver_bug_workaround);
+ info_ = new FeatureInfo(workarounds);
}
- void SetupInitExpectationsWithCommandLine(
+ void SetupInitExpectationsWithWorkarounds(
const char* extensions,
- const base::CommandLine& command_line) {
+ const gpu::GpuDriverBugWorkarounds& workarounds) {
GpuServiceTest::SetUpWithGLVersion("2.0", extensions);
TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
gl_.get(), extensions, "", "", GetContextType());
- GpuDriverBugWorkarounds gpu_driver_bug_workaround(&command_line);
- info_ = new FeatureInfo(command_line, gpu_driver_bug_workaround);
+ info_ = new FeatureInfo(workarounds);
info_->Initialize(GetContextType(), DisallowedFeatures());
}
@@ -1388,12 +1371,10 @@ TEST_P(FeatureInfoTest, InitializeOES_element_index_uint) {
}
TEST_P(FeatureInfoTest, InitializeVAOsWithClientSideArrays) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::USE_CLIENT_SIDE_ARRAYS_FOR_STREAM_BUFFERS));
- SetupInitExpectationsWithCommandLine("GL_OES_vertex_array_object",
- command_line);
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.use_client_side_arrays_for_stream_buffers = true;
+ SetupInitExpectationsWithWorkarounds("GL_OES_vertex_array_object",
+ workarounds);
EXPECT_TRUE(info_->workarounds().use_client_side_arrays_for_stream_buffers);
EXPECT_FALSE(info_->feature_flags().native_vertex_array_object);
}
@@ -1494,23 +1475,19 @@ TEST_P(FeatureInfoTest, InitializeWithoutSamplers) {
}
TEST_P(FeatureInfoTest, ParseDriverBugWorkaroundsSingle) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::EXIT_ON_CONTEXT_LOST));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.exit_on_context_lost = true;
// Workarounds should get parsed without the need for a context.
- SetupWithCommandLine(command_line);
+ SetupWithWorkarounds(workarounds);
EXPECT_TRUE(info_->workarounds().exit_on_context_lost);
}
TEST_P(FeatureInfoTest, ParseDriverBugWorkaroundsMultiple) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::EXIT_ON_CONTEXT_LOST) + "," +
- base::IntToString(gpu::MAX_TEXTURE_SIZE_LIMIT_4096));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.exit_on_context_lost = true;
+ workarounds.max_texture_size = 4096;
// Workarounds should get parsed without the need for a context.
- SetupWithCommandLine(command_line);
+ SetupWithWorkarounds(workarounds);
EXPECT_TRUE(info_->workarounds().exit_on_context_lost);
EXPECT_EQ(4096, info_->workarounds().max_texture_size);
}
@@ -1543,59 +1520,51 @@ TEST_P(FeatureInfoTest, InitializeWithPreferredEXTDrawBuffers) {
}
TEST_P(FeatureInfoTest, BlendEquationAdvancedDisabled) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::DISABLE_BLEND_EQUATION_ADVANCED));
- SetupInitExpectationsWithCommandLine(
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.disable_blend_equation_advanced = true;
+ SetupInitExpectationsWithWorkarounds(
"GL_KHR_blend_equation_advanced_coherent GL_KHR_blend_equation_advanced",
- command_line);
+ workarounds);
EXPECT_FALSE(info_->feature_flags().blend_equation_advanced);
EXPECT_FALSE(info_->feature_flags().blend_equation_advanced_coherent);
}
TEST_P(FeatureInfoTest, InitializeCHROMIUM_path_rendering) {
- base::CommandLine command_line(0, NULL);
- SetupInitExpectationsWithGLVersionAndCommandLine(
+ SetupInitExpectationsWithGLVersion(
"GL_ARB_compatibility GL_NV_path_rendering GL_EXT_direct_state_access "
"GL_NV_framebuffer_mixed_samples",
- "", "4.3", command_line);
+ "", "4.3");
EXPECT_TRUE(info_->feature_flags().chromium_path_rendering);
EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_path_rendering"));
}
TEST_P(FeatureInfoTest, InitializeCHROMIUM_path_rendering2) {
- base::CommandLine command_line(0, NULL);
- SetupInitExpectationsWithGLVersionAndCommandLine(
+ SetupInitExpectationsWithGLVersion(
"GL_NV_path_rendering GL_NV_framebuffer_mixed_samples", "",
- "OpenGL ES 3.1", command_line);
+ "OpenGL ES 3.1");
EXPECT_TRUE(info_->feature_flags().chromium_path_rendering);
EXPECT_THAT(info_->extensions(), HasSubstr("GL_CHROMIUM_path_rendering"));
}
TEST_P(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering) {
- base::CommandLine command_line(0, NULL);
- SetupInitExpectationsWithGLVersionAndCommandLine("GL_ARB_compatibility", "",
- "4.3", command_line);
+ SetupInitExpectationsWithGLVersion("GL_ARB_compatibility", "", "4.3");
EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
EXPECT_THAT(info_->extensions(),
Not(HasSubstr("GL_CHROMIUM_path_rendering")));
}
TEST_P(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering2) {
- base::CommandLine command_line(0, NULL);
- SetupInitExpectationsWithGLVersionAndCommandLine(
- "GL_ARB_compatibility GL_NV_path_rendering", "", "4.3", command_line);
+ SetupInitExpectationsWithGLVersion(
+ "GL_ARB_compatibility GL_NV_path_rendering", "", "4.3");
EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
EXPECT_THAT(info_->extensions(),
Not(HasSubstr("GL_CHROMIUM_path_rendering")));
}
TEST_P(FeatureInfoTest, InitializeNoCHROMIUM_path_rendering3) {
- base::CommandLine command_line(0, NULL);
// Missing framebuffer mixed samples.
- SetupInitExpectationsWithGLVersionAndCommandLine(
- "GL_NV_path_rendering", "", "OpenGL ES 3.1", command_line);
+ SetupInitExpectationsWithGLVersion("GL_NV_path_rendering", "",
+ "OpenGL ES 3.1");
EXPECT_FALSE(info_->feature_flags().chromium_path_rendering);
EXPECT_THAT(info_->extensions(),
Not(HasSubstr("GL_CHROMIUM_path_rendering")));
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager.cc b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
index e2333d1beeb..366c5085c87 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
@@ -986,7 +986,7 @@ void Framebuffer::DoUnbindGLAttachmentsForWorkaround(GLenum target) {
void Framebuffer::AttachRenderbuffer(
GLenum attachment, Renderbuffer* renderbuffer) {
- DCHECK(attachment != GL_DEPTH_STENCIL_ATTACHMENT);
+ DCHECK_NE(static_cast<GLenum>(GL_DEPTH_STENCIL_ATTACHMENT), attachment);
const Attachment* a = GetAttachment(attachment);
if (a)
a->DetachFromFramebuffer(this, attachment);
@@ -1003,7 +1003,7 @@ void Framebuffer::AttachRenderbuffer(
void Framebuffer::AttachTexture(
GLenum attachment, TextureRef* texture_ref, GLenum target,
GLint level, GLsizei samples) {
- DCHECK(attachment != GL_DEPTH_STENCIL_ATTACHMENT);
+ DCHECK_NE(static_cast<GLenum>(GL_DEPTH_STENCIL_ATTACHMENT), attachment);
const Attachment* a = GetAttachment(attachment);
if (a)
a->DetachFromFramebuffer(this, attachment);
@@ -1020,7 +1020,7 @@ void Framebuffer::AttachTexture(
void Framebuffer::AttachTextureLayer(
GLenum attachment, TextureRef* texture_ref, GLenum target,
GLint level, GLint layer) {
- DCHECK(attachment != GL_DEPTH_STENCIL_ATTACHMENT);
+ DCHECK_NE(static_cast<GLenum>(GL_DEPTH_STENCIL_ATTACHMENT), attachment);
const Attachment* a = GetAttachment(attachment);
if (a)
a->DetachFromFramebuffer(this, attachment);
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual.cc b/chromium/gpu/command_buffer/service/gl_context_virtual.cc
index 9615f233bb1..a5005589c15 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual.cc
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual.cc
@@ -77,7 +77,7 @@ std::string GLContextVirtual::GetGLRenderer() {
return shared_context_->GetGLRenderer();
}
-std::string GLContextVirtual::GetExtensions() {
+const gl::ExtensionSet& GLContextVirtual::GetExtensions() {
return shared_context_->GetExtensions();
}
@@ -96,8 +96,9 @@ void GLContextVirtual::SetUnbindFboOnMakeCurrent() {
shared_context_->SetUnbindFboOnMakeCurrent();
}
-gl::YUVToRGBConverter* GLContextVirtual::GetYUVToRGBConverter() {
- return shared_context_->GetYUVToRGBConverter();
+gl::YUVToRGBConverter* GLContextVirtual::GetYUVToRGBConverter(
+ const gfx::ColorSpace& color_space) {
+ return shared_context_->GetYUVToRGBConverter(color_space);
}
void GLContextVirtual::ForceReleaseVirtuallyCurrent() {
@@ -108,4 +109,8 @@ GLContextVirtual::~GLContextVirtual() {
Destroy();
}
+void GLContextVirtual::ResetExtensions() {
+ shared_context_->ResetExtensions();
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual.h b/chromium/gpu/command_buffer/service/gl_context_virtual.h
index 7af79b819a0..f9768f589fe 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual.h
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual.h
@@ -42,15 +42,17 @@ class GPU_EXPORT GLContextVirtual : public gl::GLContext {
void OnSetSwapInterval(int interval) override;
std::string GetGLVersion() override;
std::string GetGLRenderer() override;
- std::string GetExtensions() override;
+ const gl::ExtensionSet& GetExtensions() override;
void SetSafeToForceGpuSwitch() override;
bool WasAllocatedUsingRobustnessExtension() override;
void SetUnbindFboOnMakeCurrent() override;
- gl::YUVToRGBConverter* GetYUVToRGBConverter() override;
+ gl::YUVToRGBConverter* GetYUVToRGBConverter(
+ const gfx::ColorSpace& color_space) override;
void ForceReleaseVirtuallyCurrent() override;
protected:
~GLContextVirtual() override;
+ void ResetExtensions() override;
private:
void Destroy();
diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
index 5ab4f751601..b040da84496 100644
--- a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
+++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
@@ -27,7 +27,9 @@ class GPU_EXPORT GLStreamTextureImage : public gl::GLImage {
virtual void NotifyPromotionHint(bool promotion_hint,
int display_x,
- int display_y) {}
+ int display_y,
+ int display_width,
+ int display_height) {}
protected:
~GLStreamTextureImage() override {}
diff --git a/chromium/gpu/command_buffer/service/gl_surface_mock.h b/chromium/gpu/command_buffer/service/gl_surface_mock.h
index 10e81debbac..a0f0bf2c31e 100644
--- a/chromium/gpu/command_buffer/service/gl_surface_mock.h
+++ b/chromium/gpu/command_buffer/service/gl_surface_mock.h
@@ -18,8 +18,11 @@ class GLSurfaceMock : public gl::GLSurface {
MOCK_METHOD1(Initialize, bool(gl::GLSurfaceFormat format));
MOCK_METHOD0(Destroy, void());
- MOCK_METHOD3(Resize,
- bool(const gfx::Size& size, float scale_factor, bool alpha));
+ MOCK_METHOD4(Resize,
+ bool(const gfx::Size& size,
+ float scale_factor,
+ ColorSpace color_space,
+ bool alpha));
MOCK_METHOD0(IsOffscreen, bool());
MOCK_METHOD0(SwapBuffers, gfx::SwapResult());
MOCK_METHOD4(PostSubBuffer,
diff --git a/chromium/gpu/command_buffer/service/gl_utils.h b/chromium/gpu/command_buffer/service/gl_utils.h
index 8995692b736..f80a619b6b7 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.h
+++ b/chromium/gpu/command_buffer/service/gl_utils.h
@@ -12,6 +12,8 @@
#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/transform.h"
#include "ui/gl/gl_bindings.h"
// Define this for extra GL error debugging (slower).
@@ -36,6 +38,22 @@ class FeatureInfo;
namespace gles2 {
+struct CALayerSharedState {
+ float opacity;
+ bool is_clipped;
+ gfx::Rect clip_rect;
+ int sorting_context_id;
+ gfx::Transform transform;
+};
+
+struct DCLayerSharedState {
+ float opacity;
+ bool is_clipped;
+ gfx::Rect clip_rect;
+ int z_order;
+ gfx::Transform transform;
+};
+
std::vector<int> GetAllGLErrors();
bool PrecisionMeetsSpecForHighpFloat(GLint rangeMin,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 521a929fafc..2da74f06c8a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -26,6 +26,7 @@
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
+#include "cc/paint/paint_op_buffer.h"
#include "gpu/command_buffer/common/debug_marker_manager.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
@@ -35,6 +36,7 @@
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/context_state.h"
+#include "gpu/command_buffer/service/create_gr_gl_interface.h"
#include "gpu/command_buffer/service/error_state.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/framebuffer_manager.h"
@@ -68,13 +70,21 @@
#include "gpu/command_buffer/service/vertex_array_manager.h"
#include "gpu/command_buffer/service/vertex_attrib_manager.h"
#include "third_party/angle/src/image_util/loadimage.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/core/SkSurfaceProps.h"
+#include "third_party/skia/include/core/SkTypeface.h"
+#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "third_party/skia/include/gpu/GrContext.h"
#include "third_party/smhasher/src/City.h"
#include "ui/gfx/buffer_types.h"
+#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/point.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/rect_conversions.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/gfx/ipc/color/gfx_param_traits.h"
#include "ui/gfx/overlay_transform.h"
#include "ui/gfx/transform.h"
#include "ui/gl/ca_renderer_layer_params.h"
@@ -86,7 +96,6 @@
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/gl_version_info.h"
-#include "ui/gl/gl_workarounds.h"
#include "ui/gl/gpu_timing.h"
#if defined(OS_MACOSX)
@@ -479,6 +488,10 @@ bool GLES2Decoder::GetServiceTextureId(uint32_t client_texture_id,
return false;
}
+TextureBase* GLES2Decoder::GetTextureBase(uint32_t client_id) {
+ return nullptr;
+}
+
uint32_t GLES2Decoder::GetAndClearBackbufferClearBitsForTest() {
return 0;
}
@@ -613,6 +626,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
bool GetServiceTextureId(uint32_t client_texture_id,
uint32_t* service_texture_id) override;
+ TextureBase* GetTextureBase(uint32_t client_id) override;
// Restores the current state to the user's settings.
void RestoreCurrentFramebufferBindings();
@@ -684,6 +698,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Initialize or re-initialize the shader translator.
bool InitializeShaderTranslator();
+ void DestroyShaderTranslator();
void UpdateCapabilities();
@@ -1035,6 +1050,10 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
const volatile GLbyte* key);
void DoApplyScreenSpaceAntialiasingCHROMIUM();
+ void BindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler) override;
void DoBindTexImage2DCHROMIUM(
GLenum target,
GLint image_id);
@@ -1709,7 +1728,9 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
void DoOverlayPromotionHintCHROMIUM(GLuint client_id,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y);
+ GLint display_y,
+ GLint display_width,
+ GLint display_height);
// Wrapper for glSetDrawRectangleCHROMIUM
void DoSetDrawRectangleCHROMIUM(GLint x, GLint y, GLint width, GLint height);
@@ -1750,7 +1771,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLuint renderbuffer, GLenum format);
// Wrapper for glReleaseShaderCompiler.
- void DoReleaseShaderCompiler() { }
+ void DoReleaseShaderCompiler();
// Wrappers for glSamplerParameter functions.
void DoSamplerParameterf(GLuint client_id, GLenum pname, GLfloat param);
@@ -1928,6 +1949,14 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLenum textarget,
GLuint texture_unit);
+ void DoBeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config);
+ void DoEndRasterCHROMIUM();
+
// Returns false if textures were replaced.
bool PrepareTexturesForRender();
void RestoreStateForTextures();
@@ -2371,6 +2400,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// if not returning an error.
error::Error current_decoder_error_;
+ bool has_fragment_precision_high_ = false;
scoped_refptr<ShaderTranslatorInterface> vertex_translator_;
scoped_refptr<ShaderTranslatorInterface> fragment_translator_;
@@ -2479,26 +2509,13 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
SamplerState default_sampler_state_;
- struct CALayerSharedState{
- float opacity;
- bool is_clipped;
- gfx::Rect clip_rect;
- int sorting_context_id;
- gfx::Transform transform;
- };
-
std::unique_ptr<CALayerSharedState> ca_layer_shared_state_;
-
- struct DCLayerSharedState {
- float opacity;
- bool is_clipped;
- gfx::Rect clip_rect;
- int z_order;
- gfx::Transform transform;
- };
-
std::unique_ptr<DCLayerSharedState> dc_layer_shared_state_;
+ // Raster helpers.
+ sk_sp<GrContext> gr_context_;
+ sk_sp<SkSurface> sk_surface_;
+
base::WeakPtrFactory<GLES2DecoderImpl> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(GLES2DecoderImpl);
@@ -3080,7 +3097,7 @@ GLES2Decoder* GLES2Decoder::Create(
GLES2DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
ContextGroup* group) {
- if (group->gpu_preferences().use_passthrough_cmd_decoder) {
+ if (group->use_passthrough_cmd_decoder()) {
return new GLES2DecoderPassthroughImpl(client, command_buffer_service,
group);
}
@@ -3198,13 +3215,6 @@ bool GLES2DecoderImpl::Initialize(
// Create GPU Tracer for timing values.
gpu_tracer_.reset(new GPUTracer(this));
- // Pass some workarounds to GLContext so that we can apply them in RealGLApi.
- gl::GLWorkarounds gl_workarounds;
- if (workarounds().clear_to_zero_or_one_broken) {
- gl_workarounds.clear_to_zero_or_one_broken = true;
- }
- GetGLContext()->SetGLWorkarounds(gl_workarounds);
-
if (workarounds().disable_timestamp_queries) {
// Forcing time elapsed query for any GPU Timing Client forces it for all
// clients in the context.
@@ -3323,7 +3333,7 @@ bool GLES2DecoderImpl::Initialize(
// We have to enable vertex array 0 on GL with compatibility profile or it
// won't render. Note that ES or GL with core profile does not have this
// issue.
- glEnableVertexAttribArray(0);
+ state_.vertex_attrib_manager->SetDriverVertexAttribEnabled(0, true);
}
glGenBuffersARB(1, &attrib_0_buffer_id_);
glBindBuffer(GL_ARRAY_BUFFER, attrib_0_buffer_id_);
@@ -3547,9 +3557,12 @@ bool GLES2DecoderImpl::Initialize(
features().khr_robustness ||
features().ext_robustness;
- if (!InitializeShaderTranslator()) {
- return false;
- }
+ GLint range[2] = {0, 0};
+ GLint precision = 0;
+ QueryShaderPrecisionFormat(gl_version_info(), GL_FRAGMENT_SHADER,
+ GL_HIGH_FLOAT, range, &precision);
+ has_fragment_precision_high_ =
+ PrecisionMeetsSpecForHighpFloat(range[0], range[1], precision);
GLint viewport_params[4] = { 0 };
glGetIntegerv(GL_MAX_VIEWPORT_DIMS, viewport_params);
@@ -3691,6 +3704,41 @@ bool GLES2DecoderImpl::Initialize(
InitializeGLDebugLogging();
}
+ if (feature_info_->feature_flags().chromium_texture_filtering_hint &&
+ feature_info_->feature_flags().is_swiftshader) {
+ glHint(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
+ }
+
+ if (attrib_helper.enable_oop_rasterization) {
+ if (!features().chromium_raster_transport)
+ return false;
+ sk_sp<const GrGLInterface> interface(
+ CreateGrGLInterface(gl_version_info()));
+ // TODO(enne): if this or gr_context creation below fails in practice for
+ // different reasons than the ones the renderer would fail on for gpu
+ // raster, expose this in gpu::Capabilities so the renderer can handle it.
+ if (!interface)
+ return false;
+
+ gr_context_ = sk_sp<GrContext>(
+ GrContext::Create(kOpenGL_GrBackend,
+ reinterpret_cast<GrBackendContext>(interface.get())));
+ if (!gr_context_) {
+ LOG(ERROR) << "Could not create GrContext";
+ return false;
+ }
+
+ // TODO(enne): this cache is for this decoder only and each decoder has
+ // its own cache. This is pretty unfortunate. This really needs to be
+ // rethought before shipping. Most likely a different command buffer
+ // context for raster-in-gpu, with a shared gl context / gr context
+ // that different decoders can use.
+ static const int kMaxGaneshResourceCacheCount = 8196;
+ static const size_t kMaxGaneshResourceCacheBytes = 96 * 1024 * 1024;
+ gr_context_->setResourceCacheLimits(kMaxGaneshResourceCacheCount,
+ kMaxGaneshResourceCacheBytes);
+ }
+
return true;
}
@@ -3848,6 +3896,7 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
caps.multisample_compatibility =
feature_info_->feature_flags().ext_multisample_compatibility;
caps.dc_layers = supports_dc_layers_;
+ caps.use_dc_overlays_for_video = surface_->UseOverlaysForVideo();
caps.blend_equation_advanced =
feature_info_->feature_flags().blend_equation_advanced;
@@ -3889,6 +3938,10 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
!surface_->IsOffscreen()) {
caps.disable_non_empty_post_sub_buffers = true;
}
+ if (workarounds().broken_egl_image_ref_counting &&
+ group_->gpu_preferences().enable_threaded_texture_mailboxes) {
+ caps.disable_2d_canvas_copy_on_write = true;
+ }
return caps;
}
@@ -3905,6 +3958,10 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() {
if (feature_info_->disable_shader_translator()) {
return true;
}
+ if (vertex_translator_ || fragment_translator_) {
+ DCHECK(vertex_translator_ && fragment_translator_);
+ return true;
+ }
ShBuiltInResources resources;
sh::InitBuiltInResources(&resources);
resources.MaxVertexAttribs = group_->max_vertex_attribs();
@@ -3931,12 +3988,7 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() {
resources.MinProgramTexelOffset = group_->min_program_texel_offset();
}
- GLint range[2] = { 0, 0 };
- GLint precision = 0;
- QueryShaderPrecisionFormat(gl_version_info(), GL_FRAGMENT_SHADER,
- GL_HIGH_FLOAT, range, &precision);
- resources.FragmentPrecisionHigh =
- PrecisionMeetsSpecForHighpFloat(range[0], range[1], precision);
+ resources.FragmentPrecisionHigh = has_fragment_precision_high_;
ShShaderSpec shader_spec;
switch (feature_info_->context_type()) {
@@ -4055,6 +4107,11 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() {
return true;
}
+void GLES2DecoderImpl::DestroyShaderTranslator() {
+ vertex_translator_ = nullptr;
+ fragment_translator_ = nullptr;
+}
+
bool GLES2DecoderImpl::GenBuffersHelper(GLsizei n, const GLuint* client_ids) {
for (GLsizei ii = 0; ii < n; ++ii) {
if (GetBuffer(client_ids[ii])) {
@@ -4746,6 +4803,11 @@ bool GLES2DecoderImpl::GetServiceTextureId(uint32_t client_texture_id,
return false;
}
+TextureBase* GLES2DecoderImpl::GetTextureBase(uint32_t client_id) {
+ TextureRef* texture_ref = texture_manager()->GetTexture(client_id);
+ return texture_ref ? texture_ref->texture() : nullptr;
+}
+
void GLES2DecoderImpl::Destroy(bool have_context) {
if (!initialized())
return;
@@ -4916,8 +4978,7 @@ void GLES2DecoderImpl::Destroy(bool have_context) {
// Need to release these before releasing |group_| which may own the
// ShaderTranslatorCache.
- fragment_translator_ = NULL;
- vertex_translator_ = NULL;
+ DestroyShaderTranslator();
// Destroy the GPU Tracer which may own some in process GPU Timings.
if (gpu_tracer_) {
@@ -5192,12 +5253,33 @@ error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(
GLuint width = static_cast<GLuint>(c.width);
GLuint height = static_cast<GLuint>(c.height);
GLfloat scale_factor = c.scale_factor;
+ GLenum color_space = c.color_space;
GLboolean has_alpha = c.alpha;
TRACE_EVENT2("gpu", "glResizeChromium", "width", width, "height", height);
width = std::max(1U, width);
height = std::max(1U, height);
+ gl::GLSurface::ColorSpace surface_color_space =
+ gl::GLSurface::ColorSpace::UNSPECIFIED;
+ switch (color_space) {
+ case GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM:
+ surface_color_space = gl::GLSurface::ColorSpace::UNSPECIFIED;
+ break;
+ case GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM:
+ surface_color_space = gl::GLSurface::ColorSpace::SCRGB_LINEAR;
+ break;
+ case GL_COLOR_SPACE_SRGB_CHROMIUM:
+ surface_color_space = gl::GLSurface::ColorSpace::SRGB;
+ break;
+ case GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM:
+ surface_color_space = gl::GLSurface::ColorSpace::DISPLAY_P3;
+ break;
+ default:
+ LOG(ERROR) << "GLES2DecoderImpl: Context lost because specified color"
+ << "space was invalid.";
+ return error::kLostContext;
+ }
bool is_offscreen = !!offscreen_target_frame_buffer_.get();
if (is_offscreen) {
if (!ResizeOffscreenFramebuffer(gfx::Size(width, height))) {
@@ -5207,7 +5289,7 @@ error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(
}
} else {
if (!surface_->Resize(gfx::Size(width, height), scale_factor,
- !!has_alpha)) {
+ surface_color_space, !!has_alpha)) {
LOG(ERROR) << "GLES2DecoderImpl: Context lost because resize failed.";
return error::kLostContext;
}
@@ -5675,7 +5757,7 @@ void GLES2DecoderImpl::ClearAllAttributes() const {
for (uint32_t i = 0; i < group_->max_vertex_attribs(); ++i) {
if (i != 0) // Never disable attribute 0
- glDisableVertexAttribArray(i);
+ state_.vertex_attrib_manager->SetDriverVertexAttribEnabled(i, false);
if (features().angle_instanced_arrays)
glVertexAttribDivisorANGLE(i, 0);
}
@@ -6026,7 +6108,7 @@ void GLES2DecoderImpl::DoResumeTransformFeedback() {
void GLES2DecoderImpl::DoDisableVertexAttribArray(GLuint index) {
if (state_.vertex_attrib_manager->Enable(index, false)) {
if (index != 0 || gl_version_info().BehavesLikeGLES()) {
- glDisableVertexAttribArray(index);
+ state_.vertex_attrib_manager->SetDriverVertexAttribEnabled(index, false);
}
} else {
LOCAL_SET_GL_ERROR(
@@ -6235,7 +6317,7 @@ void GLES2DecoderImpl::DoInvalidateSubFramebuffer(
void GLES2DecoderImpl::DoEnableVertexAttribArray(GLuint index) {
if (state_.vertex_attrib_manager->Enable(index, true)) {
- glEnableVertexAttribArray(index);
+ state_.vertex_attrib_manager->SetDriverVertexAttribEnabled(index, true);
} else {
LOCAL_SET_GL_ERROR(
GL_INVALID_VALUE, "glEnableVertexAttribArray", "index out of range");
@@ -8734,6 +8816,10 @@ bool GLES2DecoderImpl::VerifyMultisampleRenderbufferIntegrity(
pixel[2] == 0xFF);
}
+void GLES2DecoderImpl::DoReleaseShaderCompiler() {
+ DestroyShaderTranslator();
+}
+
void GLES2DecoderImpl::DoRenderbufferStorage(
GLenum target, GLenum internalformat, GLsizei width, GLsizei height) {
Renderbuffer* renderbuffer =
@@ -8813,7 +8899,9 @@ void GLES2DecoderImpl::DoLinkProgram(GLuint program_id) {
void GLES2DecoderImpl::DoOverlayPromotionHintCHROMIUM(GLuint client_id,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) {
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) {
if (client_id == 0)
return;
@@ -8832,7 +8920,8 @@ void GLES2DecoderImpl::DoOverlayPromotionHintCHROMIUM(GLuint client_id,
return;
}
- image->NotifyPromotionHint(promotion_hint != GL_FALSE, display_x, display_y);
+ image->NotifyPromotionHint(promotion_hint != GL_FALSE, display_x, display_y,
+ display_width, display_height);
}
void GLES2DecoderImpl::DoSetDrawRectangleCHROMIUM(GLint x,
@@ -8854,6 +8943,9 @@ void GLES2DecoderImpl::DoSetDrawRectangleCHROMIUM(GLint x,
if (!surface_->SetDrawRectangle(rect)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glSetDrawRectangleCHROMIUM",
"failed on surface");
+ LOG(ERROR) << "Context lost because SetDrawRectangleCHROMIUM failed.";
+ MarkContextLost(error::kUnknown);
+ group_->LoseContexts(error::kUnknown);
}
OnFboChanged();
}
@@ -10023,7 +10115,11 @@ void GLES2DecoderImpl::RestoreStateForAttrib(
// when running on desktop GL with compatibility profile because it will
// never be re-enabled.
if (attrib_index != 0 || gl_version_info().BehavesLikeGLES()) {
- if (attrib->enabled()) {
+ // Restore the vertex attrib array enable-state according to
+ // the VertexAttrib enabled_in_driver value (which really represents the
+ // state of the virtual context - not the driver - notably, above the
+ // vertex array object emulation layer).
+ if (attrib->enabled_in_driver()) {
glEnableVertexAttribArray(attrib_index);
} else {
glDisableVertexAttribArray(attrib_index);
@@ -10563,6 +10659,9 @@ void GLES2DecoderImpl::DoTransformFeedbackVaryings(
scoped_refptr<ShaderTranslatorInterface> GLES2DecoderImpl::GetTranslator(
GLenum type) {
+ if (!InitializeShaderTranslator()) {
+ return nullptr;
+ }
return type == GL_VERTEX_SHADER ? vertex_translator_ : fragment_translator_;
}
@@ -12244,6 +12343,48 @@ error::Error GLES2DecoderImpl::HandleScheduleDCLayerCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleSetColorSpaceForScanoutCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::SetColorSpaceForScanoutCHROMIUM& c = *static_cast<
+ const volatile gles2::cmds::SetColorSpaceForScanoutCHROMIUM*>(cmd_data);
+
+ GLuint texture_id = c.texture_id;
+ GLsizei color_space_size = c.color_space_size;
+ const char* data = static_cast<const char*>(
+ GetAddressAndCheckSize(c.shm_id, c.shm_offset, color_space_size));
+ if (!data)
+ return error::kOutOfBounds;
+
+ // Make a copy to reduce the risk of a time of check to time of use attack.
+ std::vector<char> color_space_data(data, data + color_space_size);
+ base::Pickle color_space_pickle(color_space_data.data(), color_space_size);
+ base::PickleIterator iterator(color_space_pickle);
+ gfx::ColorSpace color_space;
+ if (!IPC::ParamTraits<gfx::ColorSpace>::Read(&color_space_pickle, &iterator,
+ &color_space))
+ return error::kOutOfBounds;
+
+ scoped_refptr<gl::GLImage> image;
+ TextureRef* ref = texture_manager()->GetTexture(texture_id);
+ if (!ref) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glSetColorSpaceForScanoutCHROMIUM",
+ "unknown texture");
+ return error::kNoError;
+ }
+ Texture::ImageState image_state;
+ image =
+ ref->texture()->GetLevelImage(ref->texture()->target(), 0, &image_state);
+ if (!image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glSetColorSpaceForScanoutCHROMIUM",
+ "unsupported texture format");
+ return error::kNoError;
+ }
+
+ image->SetColorSpaceForScanout(color_space);
+ return error::kNoError;
+}
+
void GLES2DecoderImpl::DoScheduleCALayerInUseQueryCHROMIUM(
GLsizei count,
const volatile GLuint* textures) {
@@ -13505,6 +13646,66 @@ bool GLES2DecoderImpl::ValidateCompressedTexSubDimensions(
}
return true;
}
+ case GL_COMPRESSED_RGBA_ASTC_4x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x4_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_5x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_6x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_8x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x5_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x6_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x8_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_10x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x10_KHR:
+ case GL_COMPRESSED_RGBA_ASTC_12x12_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR:
+ case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR: {
+ const int index =
+ (format < GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR)
+ ? static_cast<int>(format - GL_COMPRESSED_RGBA_ASTC_4x4_KHR)
+ : static_cast<int>(format -
+ GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR);
+
+ const int kBlockWidth = kASTCBlockArray[index].blockWidth;
+ const int kBlockHeight = kASTCBlockArray[index].blockHeight;
+
+ if ((xoffset % kBlockWidth) || (yoffset % kBlockHeight)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
+ "xoffset or yoffset not multiple of 4");
+ return false;
+ }
+ GLsizei tex_width = 0;
+ GLsizei tex_height = 0;
+ if (!texture->GetLevelSize(target, level, &tex_width, &tex_height,
+ nullptr) ||
+ width - xoffset > tex_width || height - yoffset > tex_height) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
+ "dimensions out of range");
+ return false;
+ }
+ if ((((width % kBlockWidth) != 0) && (width + xoffset != tex_width)) ||
+ (((height % kBlockHeight) != 0) &&
+ (height + yoffset != tex_height))) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
+ "dimensions do not align to a block boundary");
+ return false;
+ }
+ return true;
+ }
case GL_ATC_RGB_AMD:
case GL_ATC_RGBA_EXPLICIT_ALPHA_AMD:
case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD: {
@@ -13572,6 +13773,8 @@ bool GLES2DecoderImpl::ValidateCompressedTexSubDimensions(
return true;
}
default:
+ LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, function_name,
+ "unknown compressed texture format");
return false;
}
}
@@ -15836,7 +16039,7 @@ error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM(
frag_depth_explicitly_enabled_ |= desire_frag_depth;
draw_buffers_explicitly_enabled_ |= desire_draw_buffers;
shader_texture_lod_explicitly_enabled_ |= desire_shader_texture_lod;
- InitializeShaderTranslator();
+ DestroyShaderTranslator();
}
if (feature_str.find("GL_CHROMIUM_color_buffer_float_rgba ") !=
@@ -16991,8 +17194,8 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
bool unpack_premultiply_alpha_change =
(unpack_premultiply_alpha ^ unpack_unmultiply_alpha) != 0;
// TODO(qiankun.miao@intel.com): Support level > 0 for CopyTexImage.
- if (image && dest_level == 0 && !unpack_flip_y &&
- !unpack_premultiply_alpha_change) {
+ if (image && internal_format == source_internal_format && dest_level == 0 &&
+ !unpack_flip_y && !unpack_premultiply_alpha_change) {
glBindTexture(dest_binding_target, dest_texture->service_id());
if (image->CopyTexImage(dest_target))
return;
@@ -17201,8 +17404,8 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
bool unpack_premultiply_alpha_change =
(unpack_premultiply_alpha ^ unpack_unmultiply_alpha) != 0;
// TODO(qiankun.miao@intel.com): Support level > 0 for CopyTexSubImage.
- if (image && dest_level == 0 && !unpack_flip_y &&
- !unpack_premultiply_alpha_change) {
+ if (image && dest_internal_format == source_internal_format &&
+ dest_level == 0 && !unpack_flip_y && !unpack_premultiply_alpha_change) {
ScopedTextureBinder binder(&state_, dest_texture->service_id(),
dest_binding_target);
if (image->CopyTexSubImage(dest_target, gfx::Point(xoffset, yoffset),
@@ -17873,6 +18076,26 @@ void GLES2DecoderImpl::DoPushGroupMarkerEXT(
void GLES2DecoderImpl::DoPopGroupMarkerEXT(void) {
}
+void GLES2DecoderImpl::BindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler) {
+ TextureRef* ref = texture_manager()->GetTexture(client_texture_id);
+ if (!ref) {
+ return;
+ }
+
+ GLenum bind_target = GLES2Util::GLFaceTargetToTextureTarget(texture_target);
+ if (ref->texture()->target() != bind_target) {
+ return;
+ }
+
+ texture_manager()->SetLevelImage(ref, texture_target, 0, image,
+ can_bind_to_sampler
+ ? gpu::gles2::Texture::BOUND
+ : gpu::gles2::Texture::UNBOUND);
+}
+
void GLES2DecoderImpl::DoBindTexImage2DCHROMIUM(
GLenum target, GLint image_id) {
TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoBindTexImage2DCHROMIUM");
@@ -19803,6 +20026,177 @@ error::Error GLES2DecoderImpl::HandleLockDiscardableTextureCHROMIUM(
return error::kNoError;
}
+void GLES2DecoderImpl::DoBeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) {
+ if (!gr_context_) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "chromium_raster_transport not enabled via attribs");
+ return;
+ }
+ if (sk_surface_) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "BeginRasterCHROMIUM without EndRasterCHROMIUM");
+ return;
+ }
+
+ gr_context_->resetContext();
+
+ // This function should look identical to
+ // ResourceProvider::ScopedSkSurfaceProvider.
+ GrGLTextureInfo texture_info;
+ auto* texture_ref = GetTexture(texture_id);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "unknown texture id");
+ return;
+ }
+ auto* texture = texture_ref->texture();
+ int width;
+ int height;
+ int depth;
+ if (!texture->GetLevelSize(texture->target(), 0, &width, &height, &depth)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "missing texture size info");
+ return;
+ }
+ GLenum type;
+ GLenum internal_format;
+ if (!texture->GetLevelType(texture->target(), 0, &type, &internal_format)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "missing texture type info");
+ return;
+ }
+ texture_info.fID = texture_ref->service_id();
+ texture_info.fTarget = texture->target();
+
+ if (texture->target() != GL_TEXTURE_2D &&
+ texture->target() != GL_TEXTURE_RECTANGLE_ARB) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "invalid texture target");
+ return;
+ }
+
+ switch (pixel_config) {
+ case kRGBA_4444_GrPixelConfig:
+ case kRGBA_8888_GrPixelConfig:
+ case kSRGBA_8888_GrPixelConfig:
+ if (internal_format != GL_RGBA8_OES && internal_format != GL_RGBA) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "pixel config mismatch");
+ return;
+ }
+ break;
+ case kBGRA_8888_GrPixelConfig:
+ case kSBGRA_8888_GrPixelConfig:
+ if (internal_format != GL_BGRA_EXT && internal_format != GL_BGRA8_EXT) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "pixel config mismatch");
+ return;
+ }
+ break;
+ default:
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "unsupported pixel config");
+ return;
+ }
+
+ GrBackendTexture gr_texture(
+ width, height, static_cast<GrPixelConfig>(pixel_config), texture_info);
+
+ uint32_t flags =
+ use_distance_field_text ? SkSurfaceProps::kUseDistanceFieldFonts_Flag : 0;
+ // Use unknown pixel geometry to disable LCD text.
+ SkSurfaceProps surface_props(flags, kUnknown_SkPixelGeometry);
+ if (can_use_lcd_text) {
+ // LegacyFontHost will get LCD text and skia figures out what type to use.
+ surface_props =
+ SkSurfaceProps(flags, SkSurfaceProps::kLegacyFontHost_InitType);
+ }
+
+ // Resolve requested msaa samples with GrGpu capabilities.
+ int final_msaa_count = gr_context_->caps()->getSampleCount(
+ msaa_sample_count, gr_texture.config());
+ sk_surface_ = SkSurface::MakeFromBackendTextureAsRenderTarget(
+ gr_context_.get(), gr_texture, kTopLeft_GrSurfaceOrigin, final_msaa_count,
+ nullptr, &surface_props);
+
+ if (!sk_surface_) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "failed to create surface");
+ return;
+ }
+
+ // All or nothing clearing, as no way to validate the client's input on what
+ // is the "used" part of the texture.
+ if (texture->IsLevelCleared(texture->target(), 0))
+ return;
+
+ // TODO(enne): this doesn't handle the case where the background color
+ // changes and so any extra pixels outside the raster area that get
+ // sampled may be incorrect.
+ sk_surface_->getCanvas()->drawColor(sk_color);
+ texture_manager()->SetLevelCleared(texture_ref, texture->target(), 0, true);
+}
+
+error::Error GLES2DecoderImpl::HandleRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!sk_surface_) {
+ LOG(ERROR) << "RasterCHROMIUM without BeginRasterCHROMIUM";
+ return error::kInvalidArguments;
+ }
+
+ alignas(
+ cc::PaintOpBuffer::PaintOpAlign) char data[sizeof(cc::LargestPaintOp)];
+ auto& c = *static_cast<const volatile gles2::cmds::RasterCHROMIUM*>(cmd_data);
+ size_t size = c.data_size;
+ char* buffer =
+ GetSharedMemoryAs<char*>(c.list_shm_id, c.list_shm_offset, size);
+ if (!buffer)
+ return error::kOutOfBounds;
+
+ SkCanvas* canvas = sk_surface_->getCanvas();
+ SkMatrix original_ctm;
+ cc::PlaybackParams playback_params(nullptr, original_ctm);
+
+ int op_idx = 0;
+ while (size > 4) {
+ size_t skip = 0;
+ cc::PaintOp* deserialized_op = cc::PaintOp::Deserialize(
+ buffer, size, &data[0], sizeof(cc::LargestPaintOp), &skip);
+ if (!deserialized_op) {
+ LOG(ERROR) << "RasterCHROMIUM: bad op: " << op_idx;
+ return error::kInvalidArguments;
+ }
+
+ deserialized_op->Raster(canvas, playback_params);
+ deserialized_op->DestroyThis();
+
+ size -= skip;
+ buffer += skip;
+ op_idx++;
+ }
+
+ return error::kNoError;
+}
+
+void GLES2DecoderImpl::DoEndRasterCHROMIUM() {
+ if (!sk_surface_) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
+ "EndRasterCHROMIUM without BeginRasterCHROMIUM");
+ return;
+ }
+
+ sk_surface_->prepareForExternalIO();
+ sk_surface_.reset();
+
+ RestoreState(nullptr);
+}
+
// Include the auto-generated part of this file. We split this because it means
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
index 8035e93ae0f..688537a3ba3 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -27,6 +27,7 @@
namespace gl {
class GLContext;
class GLSurface;
+class GLImage;
}
namespace gfx {
@@ -50,6 +51,7 @@ class Logger;
class QueryManager;
class ShaderTranslatorInterface;
class Texture;
+class TextureBase;
class TransformFeedbackManager;
class VertexArrayManager;
struct ContextCreationAttribHelper;
@@ -113,8 +115,7 @@ class GPU_EXPORT GLES2DecoderClient {
// This class implements the AsyncAPIInterface interface, decoding GLES2
// commands and calling GL.
-class GPU_EXPORT GLES2Decoder : public CommonDecoder,
- NON_EXPORTED_BASE(public AsyncAPIInterface) {
+class GPU_EXPORT GLES2Decoder : public CommonDecoder, public AsyncAPIInterface {
public:
typedef error::Error Error;
@@ -272,6 +273,10 @@ class GPU_EXPORT GLES2Decoder : public CommonDecoder,
virtual bool GetServiceTextureId(uint32_t client_texture_id,
uint32_t* service_texture_id);
+ // Gets the texture object associated with the client ID. null is returned on
+ // failure or if the texture has not been bound yet.
+ virtual TextureBase* GetTextureBase(uint32_t client_id);
+
// Clears a level sub area of a 2D texture.
// Returns false if a GL error should be generated.
virtual bool ClearLevel(Texture* texture,
@@ -331,6 +336,11 @@ class GPU_EXPORT GLES2Decoder : public CommonDecoder,
virtual scoped_refptr<ShaderTranslatorInterface> GetTranslator(
unsigned int type) = 0;
+ virtual void BindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler) = 0;
+
protected:
explicit GLES2Decoder(CommandBufferServiceBase* command_buffer_service);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index fef99fa0728..9bbab23274f 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -4571,6 +4571,11 @@ error::Error GLES2DecoderImpl::HandleCopyTextureCHROMIUM(
static_cast<GLboolean>(c.unpack_premultiply_alpha);
GLboolean unpack_unmultiply_alpha =
static_cast<GLboolean>(c.unpack_unmultiply_alpha);
+ if (!validators_->texture_target.IsValid(dest_target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCopyTextureCHROMIUM", dest_target,
+ "dest_target");
+ return error::kNoError;
+ }
if (!validators_->texture_internal_format.IsValid(internalformat)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopyTextureCHROMIUM",
"internalformat GL_INVALID_VALUE");
@@ -4609,6 +4614,11 @@ error::Error GLES2DecoderImpl::HandleCopySubTextureCHROMIUM(
static_cast<GLboolean>(c.unpack_premultiply_alpha);
GLboolean unpack_unmultiply_alpha =
static_cast<GLboolean>(c.unpack_unmultiply_alpha);
+ if (!validators_->texture_target.IsValid(dest_target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glCopySubTextureCHROMIUM", dest_target,
+ "dest_target");
+ return error::kNoError;
+ }
if (width < 0) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glCopySubTextureCHROMIUM",
"width < 0");
@@ -4782,6 +4792,12 @@ error::Error GLES2DecoderImpl::HandleBindTexImage2DWithInternalformatCHROMIUM(
"glBindTexImage2DWithInternalformatCHROMIUM", target, "target");
return error::kNoError;
}
+ if (!validators_->texture_internal_format.IsValid(internalformat)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(
+ "glBindTexImage2DWithInternalformatCHROMIUM", internalformat,
+ "internalformat");
+ return error::kNoError;
+ }
DoBindTexImage2DWithInternalformatCHROMIUM(target, internalformat, imageId);
return error::kNoError;
}
@@ -5129,7 +5145,10 @@ error::Error GLES2DecoderImpl::HandleOverlayPromotionHintCHROMIUM(
GLboolean promotion_hint = static_cast<GLboolean>(c.promotion_hint);
GLint display_x = static_cast<GLint>(c.display_x);
GLint display_y = static_cast<GLint>(c.display_y);
- DoOverlayPromotionHintCHROMIUM(texture, promotion_hint, display_x, display_y);
+ GLint display_width = static_cast<GLint>(c.display_width);
+ GLint display_height = static_cast<GLint>(c.display_height);
+ DoOverlayPromotionHintCHROMIUM(texture, promotion_hint, display_x, display_y,
+ display_width, display_height);
return error::kNoError;
}
@@ -5187,6 +5206,39 @@ error::Error GLES2DecoderImpl::HandleSetEnableDCLayersCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleBeginRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BeginRasterCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::BeginRasterCHROMIUM*>(cmd_data);
+ if (!features().chromium_raster_transport) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint texture_id = static_cast<GLuint>(c.texture_id);
+ GLuint sk_color = static_cast<GLuint>(c.sk_color);
+ GLuint msaa_sample_count = static_cast<GLuint>(c.msaa_sample_count);
+ GLboolean can_use_lcd_text = static_cast<GLboolean>(c.can_use_lcd_text);
+ GLboolean use_distance_field_text =
+ static_cast<GLboolean>(c.use_distance_field_text);
+ GLint pixel_config = static_cast<GLint>(c.pixel_config);
+ DoBeginRasterCHROMIUM(texture_id, sk_color, msaa_sample_count,
+ can_use_lcd_text, use_distance_field_text,
+ pixel_config);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleEndRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!features().chromium_raster_transport) {
+ return error::kUnknownCommand;
+ }
+
+ DoEndRasterCHROMIUM();
+ return error::kNoError;
+}
+
bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
switch (cap) {
case GL_BLEND:
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
index 8ecfab802c7..a694ba2e8e2 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
@@ -144,6 +144,11 @@ class MockGLES2Decoder : public GLES2Decoder {
MOCK_CONST_METHOD0(WasContextLost, bool());
MOCK_CONST_METHOD0(WasContextLostByRobustnessExtension, bool());
MOCK_METHOD1(MarkContextLost, void(gpu::error::ContextLostReason reason));
+ MOCK_METHOD4(BindImage,
+ void(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler));
private:
base::WeakPtrFactory<MockGLES2Decoder> weak_ptr_factory_;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index fa071874c4e..f033ced36c0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h"
+#include "base/callback.h"
#include "base/strings/string_split.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/feature_info.h"
@@ -11,6 +12,10 @@
#include "gpu/command_buffer/service/gpu_tracer.h"
#include "ui/gl/gl_version_info.h"
+#if defined(USE_EGL)
+#include "ui/gl/angle_platform_impl.h"
+#endif // defined(USE_EGL)
+
namespace gpu {
namespace gles2 {
@@ -21,7 +26,8 @@ void DeleteServiceObjects(ClientServiceMap<ClientType, ServiceType>* id_map,
DeleteFunction delete_function) {
if (have_context) {
for (auto client_service_id_pair : *id_map) {
- delete_function(client_service_id_pair.second);
+ delete_function(client_service_id_pair.first,
+ client_service_id_pair.second);
}
}
@@ -47,22 +53,34 @@ PassthroughResources::PassthroughResources() {}
PassthroughResources::~PassthroughResources() {}
void PassthroughResources::Destroy(bool have_context) {
- DeleteServiceObjects(&texture_id_map, have_context,
- [](GLuint texture) { glDeleteTextures(1, &texture); });
- DeleteServiceObjects(&buffer_id_map, have_context,
- [](GLuint buffer) { glDeleteBuffersARB(1, &buffer); });
+ // Only delete textures that are not referenced by a TexturePassthrough
+ // object, they handle their own deletion once all references are lost
+ DeleteServiceObjects(
+ &texture_id_map, have_context, [this](GLuint client_id, GLuint texture) {
+ if (texture_object_map.find(client_id) == texture_object_map.end()) {
+ glDeleteTextures(1, &texture);
+ }
+ });
+ DeleteServiceObjects(
+ &buffer_id_map, have_context,
+ [](GLuint client_id, GLuint buffer) { glDeleteBuffersARB(1, &buffer); });
+ DeleteServiceObjects(&renderbuffer_id_map, have_context,
+ [](GLuint client_id, GLuint renderbuffer) {
+ glDeleteRenderbuffersEXT(1, &renderbuffer);
+ });
+ DeleteServiceObjects(
+ &sampler_id_map, have_context,
+ [](GLuint client_id, GLuint sampler) { glDeleteSamplers(1, &sampler); });
DeleteServiceObjects(
- &renderbuffer_id_map, have_context,
- [](GLuint renderbuffer) { glDeleteRenderbuffersEXT(1, &renderbuffer); });
- DeleteServiceObjects(&sampler_id_map, have_context,
- [](GLuint sampler) { glDeleteSamplers(1, &sampler); });
- DeleteServiceObjects(&program_id_map, have_context,
- [](GLuint program) { glDeleteProgram(program); });
- DeleteServiceObjects(&shader_id_map, have_context,
- [](GLuint shader) { glDeleteShader(shader); });
- DeleteServiceObjects(&sync_id_map, have_context, [](uintptr_t sync) {
- glDeleteSync(reinterpret_cast<GLsync>(sync));
- });
+ &program_id_map, have_context,
+ [](GLuint client_id, GLuint program) { glDeleteProgram(program); });
+ DeleteServiceObjects(
+ &shader_id_map, have_context,
+ [](GLuint client_id, GLuint shader) { glDeleteShader(shader); });
+ DeleteServiceObjects(&sync_id_map, have_context,
+ [](GLuint client_id, uintptr_t sync) {
+ glDeleteSync(reinterpret_cast<GLsync>(sync));
+ });
if (!have_context) {
for (auto passthrough_texture : texture_object_map) {
@@ -95,6 +113,18 @@ GLES2DecoderPassthroughImpl::ActiveQuery::operator=(const ActiveQuery&) =
GLES2DecoderPassthroughImpl::ActiveQuery&
GLES2DecoderPassthroughImpl::ActiveQuery::operator=(ActiveQuery&&) = default;
+GLES2DecoderPassthroughImpl::BoundTexture::BoundTexture() = default;
+GLES2DecoderPassthroughImpl::BoundTexture::~BoundTexture() = default;
+GLES2DecoderPassthroughImpl::BoundTexture::BoundTexture(const BoundTexture&) =
+ default;
+GLES2DecoderPassthroughImpl::BoundTexture::BoundTexture(BoundTexture&&) =
+ default;
+GLES2DecoderPassthroughImpl::BoundTexture&
+GLES2DecoderPassthroughImpl::BoundTexture::operator=(const BoundTexture&) =
+ default;
+GLES2DecoderPassthroughImpl::BoundTexture&
+GLES2DecoderPassthroughImpl::BoundTexture::operator=(BoundTexture&&) = default;
+
GLES2DecoderPassthroughImpl::GLES2DecoderPassthroughImpl(
GLES2DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
@@ -108,7 +138,7 @@ GLES2DecoderPassthroughImpl::GLES2DecoderPassthroughImpl(
context_(),
offscreen_(false),
group_(group),
- feature_info_(new FeatureInfo),
+ feature_info_(new FeatureInfo(group->feature_info()->workarounds())),
gpu_decoder_category_(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("gpu_decoder"))),
gpu_trace_level_(2),
@@ -188,6 +218,10 @@ GLES2Decoder::Error GLES2DecoderPassthroughImpl::DoCommandsImpl(
}
}
+ if (DebugImpl) {
+ VerifyServiceTextureObjectsExist();
+ }
+
uint32_t immediate_data_size = (arg_count - info_arg_count) *
sizeof(CommandBufferEntry); // NOLINT
if (info.cmd_handler) {
@@ -270,6 +304,11 @@ bool GLES2DecoderPassthroughImpl::Initialize(
return false;
}
+ if (attrib_helper.enable_oop_rasterization) {
+ Destroy(true);
+ return false;
+ }
+
bind_generates_resource_ = group_->bind_generates_resource();
resources_ = group_->passthrough_resources();
@@ -281,18 +320,39 @@ bool GLES2DecoderPassthroughImpl::Initialize(
glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &num_texture_units);
active_texture_unit_ = 0;
- bound_textures_[GL_TEXTURE_2D].resize(num_texture_units, 0);
- bound_textures_[GL_TEXTURE_CUBE_MAP].resize(num_texture_units, 0);
+ bound_textures_[GL_TEXTURE_2D].resize(num_texture_units);
+ bound_textures_[GL_TEXTURE_CUBE_MAP].resize(num_texture_units);
if (feature_info_->gl_version_info().IsAtLeastGLES(3, 0)) {
- bound_textures_[GL_TEXTURE_2D_ARRAY].resize(num_texture_units, 0);
- bound_textures_[GL_TEXTURE_3D].resize(num_texture_units, 0);
+ bound_textures_[GL_TEXTURE_2D_ARRAY].resize(num_texture_units);
+ bound_textures_[GL_TEXTURE_3D].resize(num_texture_units);
}
if (feature_info_->gl_version_info().IsAtLeastGLES(3, 1)) {
- bound_textures_[GL_TEXTURE_2D_MULTISAMPLE].resize(num_texture_units, 0);
+ bound_textures_[GL_TEXTURE_2D_MULTISAMPLE].resize(num_texture_units);
}
if (feature_info_->feature_flags().oes_egl_image_external ||
feature_info_->feature_flags().nv_egl_stream_consumer_external) {
- bound_textures_[GL_TEXTURE_EXTERNAL_OES].resize(num_texture_units, 0);
+ bound_textures_[GL_TEXTURE_EXTERNAL_OES].resize(num_texture_units);
+ }
+
+ // Initialize the tracked buffer bindings
+ bound_buffers_[GL_ARRAY_BUFFER] = 0;
+ bound_buffers_[GL_ELEMENT_ARRAY_BUFFER] = 0;
+ if (feature_info_->gl_version_info().IsAtLeastGLES(3, 0) ||
+ feature_info_->feature_flags().ext_pixel_buffer_object) {
+ bound_buffers_[GL_PIXEL_PACK_BUFFER] = 0;
+ bound_buffers_[GL_PIXEL_UNPACK_BUFFER] = 0;
+ }
+ if (feature_info_->gl_version_info().IsAtLeastGLES(3, 0)) {
+ bound_buffers_[GL_COPY_READ_BUFFER] = 0;
+ bound_buffers_[GL_COPY_WRITE_BUFFER] = 0;
+ bound_buffers_[GL_TRANSFORM_FEEDBACK_BUFFER] = 0;
+ bound_buffers_[GL_UNIFORM_BUFFER] = 0;
+ }
+ if (feature_info_->gl_version_info().IsAtLeastGLES(3, 1)) {
+ bound_buffers_[GL_ATOMIC_COUNTER_BUFFER] = 0;
+ bound_buffers_[GL_SHADER_STORAGE_BUFFER] = 0;
+ bound_buffers_[GL_DRAW_INDIRECT_BUFFER] = 0;
+ bound_buffers_[GL_DISPATCH_INDIRECT_BUFFER] = 0;
}
if (group_->gpu_preferences().enable_gpu_driver_debug_logging &&
@@ -300,6 +360,11 @@ bool GLES2DecoderPassthroughImpl::Initialize(
InitializeGLDebugLogging();
}
+ if (feature_info_->feature_flags().chromium_texture_filtering_hint &&
+ feature_info_->feature_flags().is_swiftshader) {
+ glHint(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
+ }
+
has_robustness_extension_ = feature_info_->feature_flags().khr_robustness ||
feature_info_->feature_flags().ext_robustness;
@@ -312,18 +377,30 @@ void GLES2DecoderPassthroughImpl::Destroy(bool have_context) {
FlushErrors();
}
- DeleteServiceObjects(
- &framebuffer_id_map_, have_context,
- [](GLuint framebuffer) { glDeleteFramebuffersEXT(1, &framebuffer); });
+ if (!have_context) {
+ for (const auto& bound_texture_type : bound_textures_) {
+ for (const auto& bound_texture : bound_texture_type.second) {
+ bound_texture.texture->MarkContextLost();
+ }
+ }
+ }
+ bound_textures_.clear();
+
+ DeleteServiceObjects(&framebuffer_id_map_, have_context,
+ [](GLuint client_id, GLuint framebuffer) {
+ glDeleteFramebuffersEXT(1, &framebuffer);
+ });
DeleteServiceObjects(&transform_feedback_id_map_, have_context,
- [](GLuint transform_feedback) {
+ [](GLuint client_id, GLuint transform_feedback) {
glDeleteTransformFeedbacks(1, &transform_feedback);
});
- DeleteServiceObjects(&query_id_map_, have_context,
- [](GLuint query) { glDeleteQueries(1, &query); });
DeleteServiceObjects(
- &vertex_array_id_map_, have_context,
- [](GLuint vertex_array) { glDeleteVertexArraysOES(1, &vertex_array); });
+ &query_id_map_, have_context,
+ [](GLuint client_id, GLuint query) { glDeleteQueries(1, &query); });
+ DeleteServiceObjects(&vertex_array_id_map_, have_context,
+ [](GLuint client_id, GLuint vertex_array) {
+ glDeleteVertexArraysOES(1, &vertex_array);
+ });
// Destroy the GPU Tracer which may own some in process GPU Timings.
if (gpu_tracer_) {
@@ -336,6 +413,13 @@ void GLES2DecoderPassthroughImpl::Destroy(bool have_context) {
surface_ = nullptr;
if (group_) {
+#if defined(USE_EGL)
+ // Clear the program binary caching callback.
+ if (group_->has_program_cache()) {
+ angle::ResetCacheProgramCallback();
+ }
+#endif // defined(USE_EGL)
+
group_->Destroy(this, have_context);
group_ = nullptr;
}
@@ -399,6 +483,15 @@ bool GLES2DecoderPassthroughImpl::MakeCurrent() {
return false;
}
+#if defined(USE_EGL)
+ // Establish the program binary caching callback.
+ if (group_->has_program_cache()) {
+ auto program_callback = base::BindRepeating(
+ &GLES2DecoderClient::CacheShader, base::Unretained(client_));
+ angle::SetCacheProgramCallback(program_callback);
+ }
+#endif // defined(USE_EGL)
+
return true;
}
@@ -479,9 +572,11 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
caps.flips_vertically = !offscreen_ && surface_->FlipsVertically();
caps.multisample_compatibility =
feature_info_->feature_flags().ext_multisample_compatibility;
+ caps.dc_layers = !offscreen_ && surface_->SupportsDCLayers();
// TODO:
// caps.commit_overlay_planes
+ // caps.use_dc_overlays_for_video = surface_->UseOverlaysForVideo();
return caps;
}
@@ -592,6 +687,13 @@ bool GLES2DecoderPassthroughImpl::GetServiceTextureId(
service_texture_id);
}
+TextureBase* GLES2DecoderPassthroughImpl::GetTextureBase(uint32_t client_id) {
+ auto texture_object_iter = resources_->texture_object_map.find(client_id);
+ return (texture_object_iter != resources_->texture_object_map.end())
+ ? texture_object_iter->second.get()
+ : nullptr;
+}
+
bool GLES2DecoderPassthroughImpl::ClearLevel(Texture* texture,
unsigned target,
int level,
@@ -677,6 +779,51 @@ GLES2DecoderPassthroughImpl::GetTranslator(GLenum type) {
return nullptr;
}
+void GLES2DecoderPassthroughImpl::BindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler) {
+ auto passthrough_texture_iter =
+ resources_->texture_object_map.find(client_texture_id);
+ if (passthrough_texture_iter == resources_->texture_object_map.end()) {
+ return;
+ }
+
+ TexturePassthrough* passthrough_texture =
+ passthrough_texture_iter->second.get();
+ DCHECK(passthrough_texture != nullptr);
+
+ GLenum bind_target = GLES2Util::GLFaceTargetToTextureTarget(texture_target);
+ if (passthrough_texture->target() != bind_target) {
+ return;
+ }
+
+ if (can_bind_to_sampler) {
+ // Binding an image to a texture requires that the texture is currently
+ // bound.
+ scoped_refptr<TexturePassthrough> current_texture =
+ bound_textures_[bind_target][active_texture_unit_].texture;
+ bool bind_new_texture = current_texture != passthrough_texture;
+ if (bind_new_texture) {
+ glBindTexture(bind_target, passthrough_texture->service_id());
+ }
+
+ if (!image->BindTexImage(texture_target)) {
+ image->CopyTexImage(texture_target);
+ }
+
+ // Re-bind the old texture
+ if (bind_new_texture) {
+ GLuint current_service_texture =
+ current_texture ? current_texture->service_id() : 0;
+ glBindTexture(bind_target, current_service_texture);
+ }
+ }
+
+ // Reference the image even if it is not bound as a sampler.
+ passthrough_texture->SetLevelImage(texture_target, 0, image);
+}
+
const char* GLES2DecoderPassthroughImpl::GetCommandName(
unsigned int command_id) const {
if (command_id >= kFirstGLES2Command && command_id < kNumCommands) {
@@ -783,6 +930,47 @@ INSTANTIATE_PATCH_NUMERIC_RESULTS(GLfloat);
INSTANTIATE_PATCH_NUMERIC_RESULTS(GLboolean);
#undef INSTANTIATE_PATCH_NUMERIC_RESULTS
+template <typename T>
+error::Error GLES2DecoderPassthroughImpl::PatchGetBufferResults(GLenum target,
+ GLenum pname,
+ GLsizei bufsize,
+ GLsizei* length,
+ T* params) {
+ if (pname != GL_BUFFER_ACCESS_FLAGS) {
+ return error::kNoError;
+ }
+
+ // If there was no error, the buffer target should exist
+ DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
+ GLuint current_client_buffer = bound_buffers_[target];
+
+ auto mapped_buffer_info_iter =
+ resources_->mapped_buffer_map.find(current_client_buffer);
+ if (mapped_buffer_info_iter == resources_->mapped_buffer_map.end()) {
+ // Buffer is not mapped, nothing to do
+ return error::kNoError;
+ }
+
+ // Buffer is mapped, patch the result with the original access flags
+ DCHECK(bufsize >= 1);
+ DCHECK(*length == 1);
+ params[0] = mapped_buffer_info_iter->second.original_access;
+ return error::kNoError;
+}
+
+template error::Error GLES2DecoderPassthroughImpl::PatchGetBufferResults(
+ GLenum target,
+ GLenum pname,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint64* params);
+template error::Error GLES2DecoderPassthroughImpl::PatchGetBufferResults(
+ GLenum target,
+ GLenum pname,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* params);
+
error::Error
GLES2DecoderPassthroughImpl::PatchGetFramebufferAttachmentParameter(
GLenum target,
@@ -1000,16 +1188,17 @@ void GLES2DecoderPassthroughImpl::RemovePendingQuery(GLuint service_id) {
}
}
-void GLES2DecoderPassthroughImpl::UpdateTextureBinding(GLenum target,
- GLuint client_id,
- GLuint service_id) {
+void GLES2DecoderPassthroughImpl::UpdateTextureBinding(
+ GLenum target,
+ GLuint client_id,
+ TexturePassthrough* texture) {
+ GLuint texture_service_id = texture ? texture->service_id() : 0;
size_t cur_texture_unit = active_texture_unit_;
- const auto& target_bound_textures = bound_textures_.at(target);
+ auto& target_bound_textures = bound_textures_.at(target);
for (size_t bound_texture_index = 0;
bound_texture_index < target_bound_textures.size();
bound_texture_index++) {
- GLuint bound_client_id = target_bound_textures[bound_texture_index];
- if (bound_client_id == client_id) {
+ if (target_bound_textures[bound_texture_index].client_id == client_id) {
// Update the active texture unit if needed
if (bound_texture_index != cur_texture_unit) {
glActiveTexture(static_cast<GLenum>(GL_TEXTURE0 + bound_texture_index));
@@ -1017,7 +1206,8 @@ void GLES2DecoderPassthroughImpl::UpdateTextureBinding(GLenum target,
}
// Update the texture binding
- glBindTexture(target, service_id);
+ glBindTexture(target, texture_service_id);
+ target_bound_textures[bound_texture_index].texture = texture;
}
}
@@ -1042,6 +1232,13 @@ error::Error GLES2DecoderPassthroughImpl::BindTexImage2DCHROMIUMImpl(
return error::kNoError;
}
+ const BoundTexture& bound_texture =
+ bound_textures_[GL_TEXTURE_2D][active_texture_unit_];
+ if (bound_texture.texture == nullptr) {
+ InsertError(GL_INVALID_OPERATION, "No texture bound");
+ return error::kNoError;
+ }
+
if (internalformat) {
if (!image->BindTexImageWithInternalformat(target, internalformat)) {
image->CopyTexImage(target);
@@ -1052,6 +1249,24 @@ error::Error GLES2DecoderPassthroughImpl::BindTexImage2DCHROMIUMImpl(
}
}
+ DCHECK(bound_texture.texture != nullptr);
+ bound_texture.texture->SetLevelImage(target, 0, image);
+
+ return error::kNoError;
+}
+
+void GLES2DecoderPassthroughImpl::VerifyServiceTextureObjectsExist() {
+ for (const auto& texture_mapping : resources_->texture_object_map) {
+ DCHECK_EQ(GL_TRUE, glIsTexture(texture_mapping.second->service_id()));
+ }
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ // TODO(enne): Add CHROMIUM_raster_transport extension support to the
+ // passthrough command buffer.
+ NOTIMPLEMENTED();
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index 90048c427d0..2d85fa65cc3 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -33,7 +33,8 @@ class GPUTracer;
struct MappedBuffer {
GLsizeiptr size;
- GLbitfield access;
+ GLbitfield original_access;
+ GLbitfield filtered_access;
uint8_t* map_ptr;
int32_t data_shm_id;
uint32_t data_shm_offset;
@@ -68,7 +69,7 @@ struct PassthroughResources {
std::unordered_map<GLuint, MappedBuffer> mapped_buffer_map;
};
-class GLES2DecoderPassthroughImpl : public GLES2Decoder {
+class GPU_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
public:
GLES2DecoderPassthroughImpl(GLES2DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
@@ -186,6 +187,7 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
bool GetServiceTextureId(uint32_t client_texture_id,
uint32_t* service_texture_id) override;
+ TextureBase* GetTextureBase(uint32_t client_id) override;
// Provides detail about a lost context if one occurred.
// Clears a level sub area of a texture
@@ -246,6 +248,11 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
const ContextState* GetContextState() override;
scoped_refptr<ShaderTranslatorInterface> GetTranslator(GLenum type) override;
+ void BindImage(uint32_t client_texture_id,
+ uint32_t texture_target,
+ gl::GLImage* image,
+ bool can_bind_to_sampler) override;
+
private:
const char* GetCommandName(unsigned int command_id) const;
@@ -288,6 +295,13 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
GLsizei length,
GLint* params);
+ template <typename T>
+ error::Error PatchGetBufferResults(GLenum target,
+ GLenum pname,
+ GLsizei bufsize,
+ GLsizei* length,
+ T* params);
+
void InsertError(GLenum error, const std::string& message);
GLenum PopError();
bool FlushErrors();
@@ -299,12 +313,16 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
error::Error ProcessQueries(bool did_finish);
void RemovePendingQuery(GLuint service_id);
- void UpdateTextureBinding(GLenum target, GLuint client_id, GLuint service_id);
+ void UpdateTextureBinding(GLenum target,
+ GLuint client_id,
+ TexturePassthrough* texture);
error::Error BindTexImage2DCHROMIUMImpl(GLenum target,
GLenum internalformat,
GLint image_id);
+ void VerifyServiceTextureObjectsExist();
+
GLES2DecoderClient* client_;
int commands_to_process_;
@@ -361,7 +379,19 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
// State tracking of currently bound 2D textures (client IDs)
size_t active_texture_unit_;
- std::unordered_map<GLenum, std::vector<GLuint>> bound_textures_;
+
+ struct BoundTexture {
+ BoundTexture();
+ ~BoundTexture();
+ BoundTexture(const BoundTexture&);
+ BoundTexture(BoundTexture&&);
+ BoundTexture& operator=(const BoundTexture&);
+ BoundTexture& operator=(BoundTexture&&);
+
+ GLuint client_id = 0;
+ scoped_refptr<TexturePassthrough> texture;
+ };
+ std::unordered_map<GLenum, std::vector<BoundTexture>> bound_textures_;
// State tracking of currently bound buffers
std::unordered_map<GLenum, GLuint> bound_buffers_;
@@ -422,6 +452,8 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
// Cache of scratch memory
std::vector<uint8_t> scratch_memory_;
+ std::unique_ptr<DCLayerSharedState> dc_layer_shared_state_;
+
base::WeakPtrFactory<GLES2DecoderPassthroughImpl> weak_ptr_factory_;
// Include the prototypes of all the doer functions from a separate header to
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index 71e0ab6a7d8..a598f9027de 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -728,6 +728,7 @@ error::Error DoUnmapBuffer(GLenum target);
error::Error DoResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha);
error::Error DoGetRequestableExtensionsCHROMIUM(const char** extensions);
error::Error DoRequestExtensionCHROMIUM(const char* extension);
@@ -849,6 +850,7 @@ error::Error DoScheduleDCLayerCHROMIUM(
const GLfloat* contents_rect,
GLuint background_color,
GLuint edge_aa_mask,
+ GLenum filter,
const GLfloat* bounds_rect);
error::Error DoCommitOverlayPlanesCHROMIUM();
error::Error DoSwapInterval(GLint interval);
@@ -978,9 +980,18 @@ error::Error DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
error::Error DoOverlayPromotionHintCHROMIUM(GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y);
+ GLint display_y,
+ GLint display_width,
+ GLint display_height);
error::Error DoSetDrawRectangleCHROMIUM(GLint x,
GLint y,
GLint width,
GLint height);
error::Error DoSetEnableDCLayersCHROMIUM(GLboolean enable);
+error::Error DoBeginRasterCHROMIUM(GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config);
+error::Error DoEndRasterCHROMIUM();
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index 030c3be5342..e08bc9719c5 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -6,6 +6,8 @@
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "ui/gfx/geometry/rect_conversions.h"
+#include "ui/gl/dc_renderer_layer_params.h"
#include "ui/gl/gl_version_info.h"
namespace gpu {
@@ -309,6 +311,7 @@ error::Error GLES2DecoderPassthroughImpl::DoBindBuffer(GLenum target,
return error::kNoError;
}
+ DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
bound_buffers_[target] = buffer;
return error::kNoError;
@@ -317,8 +320,17 @@ error::Error GLES2DecoderPassthroughImpl::DoBindBuffer(GLenum target,
error::Error GLES2DecoderPassthroughImpl::DoBindBufferBase(GLenum target,
GLuint index,
GLuint buffer) {
- glBindBufferBase(target, index, GetBufferServiceID(buffer, resources_,
- bind_generates_resource_));
+ FlushErrors();
+ glBindBufferBase(
+ target, index,
+ GetBufferServiceID(buffer, resources_, bind_generates_resource_));
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
+ DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
+ bound_buffers_[target] = buffer;
+
return error::kNoError;
}
@@ -327,9 +339,18 @@ error::Error GLES2DecoderPassthroughImpl::DoBindBufferRange(GLenum target,
GLuint buffer,
GLintptr offset,
GLsizeiptr size) {
- glBindBufferRange(target, index, GetBufferServiceID(buffer, resources_,
- bind_generates_resource_),
- offset, size);
+ FlushErrors();
+ glBindBufferRange(
+ target, index,
+ GetBufferServiceID(buffer, resources_, bind_generates_resource_), offset,
+ size);
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
+ DCHECK(bound_buffers_.find(target) != bound_buffers_.end());
+ bound_buffers_[target] = buffer;
+
return error::kNoError;
}
@@ -374,21 +395,27 @@ error::Error GLES2DecoderPassthroughImpl::DoBindTexture(GLenum target,
// Track the currently bound textures
DCHECK(bound_textures_.find(target) != bound_textures_.end());
DCHECK(bound_textures_[target].size() > active_texture_unit_);
- bound_textures_[target][active_texture_unit_] = texture;
+ scoped_refptr<TexturePassthrough> texture_passthrough = nullptr;
if (service_id != 0) {
// Create a new texture object to track this texture
auto texture_object_iter = resources_->texture_object_map.find(texture);
if (texture_object_iter == resources_->texture_object_map.end()) {
+ texture_passthrough = new TexturePassthrough(service_id, target);
resources_->texture_object_map.insert(
- std::make_pair(texture, new TexturePassthrough(service_id, target)));
+ std::make_pair(texture, texture_passthrough));
} else {
+ texture_passthrough = texture_object_iter->second.get();
// Shouldn't be possible to get here if this texture has a different
// target than the one it was just bound to
DCHECK(texture_object_iter->second->target() == target);
}
}
+ bound_textures_[target][active_texture_unit_].client_id = texture;
+ bound_textures_[target][active_texture_unit_].texture =
+ std::move(texture_passthrough);
+
return error::kNoError;
}
@@ -439,7 +466,15 @@ error::Error GLES2DecoderPassthroughImpl::DoBufferData(GLenum target,
GLsizeiptr size,
const void* data,
GLenum usage) {
+ FlushErrors();
glBufferData(target, size, data, usage);
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
+ // Calling buffer data on a mapped buffer will implicitly unmap it
+ resources_->mapped_buffer_map.erase(bound_buffers_[target]);
+
return error::kNoError;
}
@@ -680,16 +715,26 @@ error::Error GLES2DecoderPassthroughImpl::DoDeleteBuffers(
InsertError(GL_INVALID_VALUE, "n cannot be negative.");
return error::kNoError;
}
- return DeleteHelper(n, buffers, &resources_->buffer_id_map,
- [this](GLsizei n, GLuint* buffers) {
- glDeleteBuffersARB(n, buffers);
- for (GLsizei i = 0; i < n; i++)
- for (auto buffer_binding : bound_buffers_) {
- if (buffer_binding.second == buffers[i]) {
- buffer_binding.second = 0;
- }
- }
- });
+
+ std::vector<GLuint> service_ids(n, 0);
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ GLuint client_id = buffers[ii];
+
+ // Update the bound and mapped buffer state tracking
+ for (auto& buffer_binding : bound_buffers_) {
+ if (buffer_binding.second == client_id) {
+ buffer_binding.second = 0;
+ }
+ resources_->mapped_buffer_map.erase(client_id);
+ }
+
+ service_ids[ii] =
+ resources_->buffer_id_map.GetServiceIDOrInvalid(client_id);
+ resources_->buffer_id_map.RemoveClientID(client_id);
+ }
+ glDeleteBuffersARB(n, service_ids.data());
+
+ return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoDeleteFramebuffers(
@@ -774,8 +819,10 @@ error::Error GLES2DecoderPassthroughImpl::DoDeleteTextures(
non_mailbox_client_ids.push_back(client_id);
} else {
// Deleted when unreferenced
+ scoped_refptr<TexturePassthrough> texture = texture_object_iter->second;
resources_->texture_id_map.RemoveClientID(client_id);
resources_->texture_object_map.erase(client_id);
+ UpdateTextureBinding(texture->target(), client_id, nullptr);
}
}
return DeleteHelper(
@@ -1186,7 +1233,12 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteri64v(
GLsizei bufsize,
GLsizei* length,
GLint64* params) {
+ FlushErrors();
glGetBufferParameteri64vRobustANGLE(target, pname, bufsize, length, params);
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+ PatchGetBufferResults(target, pname, bufsize, length, params);
return error::kNoError;
}
@@ -1196,7 +1248,12 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteriv(
GLsizei bufsize,
GLsizei* length,
GLint* params) {
+ FlushErrors();
glGetBufferParameterivRobustANGLE(target, pname, bufsize, length, params);
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+ PatchGetBufferResults(target, pname, bufsize, length, params);
return error::kNoError;
}
@@ -2530,6 +2587,10 @@ error::Error GLES2DecoderPassthroughImpl::DoFramebufferTexture2DMultisampleEXT(
GLuint texture,
GLint level,
GLsizei samples) {
+ if (!feature_info_->feature_flags().multisampled_render_to_texture) {
+ return error::kUnknownCommand;
+ }
+
glFramebufferTexture2DMultisampleEXT(
target, attachment, textarget,
GetTextureServiceID(texture, resources_, false), level, samples);
@@ -2892,7 +2953,8 @@ error::Error GLES2DecoderPassthroughImpl::DoMapBufferRange(
MappedBuffer mapped_buffer_info;
mapped_buffer_info.size = size;
- mapped_buffer_info.access = filtered_access;
+ mapped_buffer_info.original_access = access;
+ mapped_buffer_info.filtered_access = filtered_access;
mapped_buffer_info.map_ptr = static_cast<uint8_t*>(mapped_ptr);
mapped_buffer_info.data_shm_id = data_shm_id;
mapped_buffer_info.data_shm_offset = data_shm_offset;
@@ -2908,8 +2970,12 @@ error::Error GLES2DecoderPassthroughImpl::DoMapBufferRange(
error::Error GLES2DecoderPassthroughImpl::DoUnmapBuffer(GLenum target) {
auto bound_buffers_iter = bound_buffers_.find(target);
- if (bound_buffers_iter == bound_buffers_.end() ||
- bound_buffers_iter->second == 0) {
+ if (bound_buffers_iter == bound_buffers_.end()) {
+ InsertError(GL_INVALID_ENUM, "Invalid buffer target.");
+ return error::kNoError;
+ }
+
+ if (bound_buffers_iter->second == 0) {
InsertError(GL_INVALID_OPERATION, "No buffer bound to this target.");
return error::kNoError;
}
@@ -2923,8 +2989,8 @@ error::Error GLES2DecoderPassthroughImpl::DoUnmapBuffer(GLenum target) {
}
const MappedBuffer& map_info = mapped_buffer_info_iter->second;
- if ((map_info.access & GL_MAP_WRITE_BIT) != 0 &&
- (map_info.access & GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
+ if ((map_info.filtered_access & GL_MAP_WRITE_BIT) != 0 &&
+ (map_info.filtered_access & GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
uint8_t* mem = GetSharedMemoryAs<uint8_t*>(
map_info.data_shm_id, map_info.data_shm_offset, map_info.size);
if (!mem) {
@@ -2944,19 +3010,42 @@ error::Error GLES2DecoderPassthroughImpl::DoUnmapBuffer(GLenum target) {
error::Error GLES2DecoderPassthroughImpl::DoResizeCHROMIUM(GLuint width,
GLuint height,
GLfloat scale_factor,
+ GLenum color_space,
GLboolean alpha) {
+ gl::GLSurface::ColorSpace surface_color_space =
+ gl::GLSurface::ColorSpace::UNSPECIFIED;
+ switch (color_space) {
+ case GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM:
+ surface_color_space = gl::GLSurface::ColorSpace::UNSPECIFIED;
+ break;
+ case GL_COLOR_SPACE_SCRGB_LINEAR_CHROMIUM:
+ surface_color_space = gl::GLSurface::ColorSpace::SCRGB_LINEAR;
+ break;
+ case GL_COLOR_SPACE_SRGB_CHROMIUM:
+ surface_color_space = gl::GLSurface::ColorSpace::SRGB;
+ break;
+ case GL_COLOR_SPACE_DISPLAY_P3_CHROMIUM:
+ surface_color_space = gl::GLSurface::ColorSpace::DISPLAY_P3;
+ break;
+ default:
+ LOG(ERROR) << "GLES2DecoderPassthroughImpl: Context lost because "
+ "specified color space was invalid.";
+ return error::kLostContext;
+ }
if (offscreen_) {
// TODO: crbug.com/665521
NOTIMPLEMENTED();
} else {
- if (!surface_->Resize(gfx::Size(width, height), scale_factor, !!alpha)) {
- LOG(ERROR) << "GLES2DecoderImpl: Context lost because resize failed.";
+ if (!surface_->Resize(gfx::Size(width, height), scale_factor,
+ surface_color_space, !!alpha)) {
+ LOG(ERROR)
+ << "GLES2DecoderPassthroughImpl: Context lost because resize failed.";
return error::kLostContext;
}
DCHECK(context_->IsCurrent(surface_.get()));
if (!context_->IsCurrent(surface_.get())) {
- LOG(ERROR) << "GLES2DecoderImpl: Context lost because context no longer "
- << "current after resize callback.";
+ LOG(ERROR) << "GLES2DecoderPassthroughImpl: Context lost because context "
+ "no longer current after resize callback.";
return error::kLostContext;
}
}
@@ -3483,17 +3572,16 @@ error::Error GLES2DecoderPassthroughImpl::DoProduceTextureCHROMIUM(
return error::kNoError;
}
- GLuint texture_client_id = bound_textures_iter->second[active_texture_unit_];
- auto texture_object_iter =
- resources_->texture_object_map.find(texture_client_id);
- if (texture_object_iter == resources_->texture_object_map.end()) {
+ const BoundTexture& bound_texture =
+ bound_textures_iter->second[active_texture_unit_];
+ if (bound_texture.texture == nullptr) {
InsertError(GL_INVALID_OPERATION, "Unknown texture for target.");
return error::kNoError;
}
const Mailbox& mb = Mailbox::FromVolatile(
*reinterpret_cast<const volatile Mailbox*>(mailbox));
- mailbox_manager_->ProduceTexture(mb, texture_object_iter->second.get());
+ mailbox_manager_->ProduceTexture(mb, bound_texture.texture.get());
return error::kNoError;
}
@@ -3529,8 +3617,9 @@ error::Error GLES2DecoderPassthroughImpl::DoConsumeTextureCHROMIUM(
return error::kNoError;
}
- GLuint client_id = bound_textures_iter->second[active_texture_unit_];
- if (client_id == 0) {
+ const BoundTexture& current_texture =
+ bound_textures_iter->second[active_texture_unit_];
+ if (current_texture.client_id == 0) {
InsertError(GL_INVALID_OPERATION, "Unknown texture for target.");
return error::kNoError;
}
@@ -3550,13 +3639,15 @@ error::Error GLES2DecoderPassthroughImpl::DoConsumeTextureCHROMIUM(
}
// Update id mappings
- resources_->texture_id_map.RemoveClientID(client_id);
- resources_->texture_id_map.SetIDMapping(client_id, texture->service_id());
- resources_->texture_object_map.erase(client_id);
- resources_->texture_object_map.insert(std::make_pair(client_id, texture));
+ resources_->texture_id_map.RemoveClientID(current_texture.client_id);
+ resources_->texture_id_map.SetIDMapping(current_texture.client_id,
+ texture->service_id());
+ resources_->texture_object_map.erase(current_texture.client_id);
+ resources_->texture_object_map.insert(
+ std::make_pair(current_texture.client_id, texture));
// Bind the service id that now represents this texture
- UpdateTextureBinding(target, client_id, texture->service_id());
+ UpdateTextureBinding(target, current_texture.client_id, texture.get());
return error::kNoError;
}
@@ -3593,7 +3684,7 @@ error::Error GLES2DecoderPassthroughImpl::DoCreateAndConsumeTextureINTERNAL(
std::make_pair(texture_client_id, texture));
// Bind the service id that now represents this texture
- UpdateTextureBinding(target, texture_client_id, texture->service_id());
+ UpdateTextureBinding(target, texture_client_id, texture.get());
return error::kNoError;
}
@@ -3629,13 +3720,25 @@ error::Error GLES2DecoderPassthroughImpl::DoReleaseTexImage2DCHROMIUM(
return error::kNoError;
}
+ const BoundTexture& bound_texture =
+ bound_textures_[GL_TEXTURE_2D][active_texture_unit_];
+ if (bound_texture.texture == nullptr) {
+ InsertError(GL_INVALID_OPERATION, "No texture bound");
+ return error::kNoError;
+ }
+
gl::GLImage* image = group_->image_manager()->LookupImage(imageId);
if (image == nullptr) {
InsertError(GL_INVALID_OPERATION, "No image found with the given ID");
return error::kNoError;
}
- image->ReleaseTexImage(target);
+ // Only release the image if it is currently bound
+ if (bound_texture.texture->GetLevelImage(target, 0) != image) {
+ image->ReleaseTexImage(target);
+ bound_texture.texture->SetLevelImage(target, 0, nullptr);
+ }
+
return error::kNoError;
}
@@ -3781,7 +3884,19 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerSharedStateCHROMIUM(
const GLfloat* clip_rect,
GLint z_order,
const GLfloat* transform) {
- NOTIMPLEMENTED();
+ if (!dc_layer_shared_state_) {
+ dc_layer_shared_state_.reset(new DCLayerSharedState);
+ }
+ dc_layer_shared_state_->opacity = opacity;
+ dc_layer_shared_state_->is_clipped = is_clipped ? true : false;
+ dc_layer_shared_state_->clip_rect = gfx::ToEnclosingRect(
+ gfx::RectF(clip_rect[0], clip_rect[1], clip_rect[2], clip_rect[3]));
+ dc_layer_shared_state_->z_order = z_order;
+ dc_layer_shared_state_->transform = gfx::Transform(
+ transform[0], transform[1], transform[2], transform[3], transform[4],
+ transform[5], transform[6], transform[7], transform[8], transform[9],
+ transform[10], transform[11], transform[12], transform[13], transform[14],
+ transform[15]);
return error::kNoError;
}
@@ -3791,8 +3906,71 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleDCLayerCHROMIUM(
const GLfloat* contents_rect,
GLuint background_color,
GLuint edge_aa_mask,
+ GLenum filter,
const GLfloat* bounds_rect) {
- NOTIMPLEMENTED();
+ switch (filter) {
+ case GL_NEAREST:
+ case GL_LINEAR:
+ break;
+ default:
+ InsertError(GL_INVALID_OPERATION, "invalid filter.");
+ return error::kNoError;
+ }
+
+ if (!dc_layer_shared_state_) {
+ InsertError(GL_INVALID_OPERATION,
+ "glScheduleDCLayerSharedStateCHROMIUM has not been called.");
+ return error::kNoError;
+ }
+
+ if (num_textures < 0 || num_textures > 4) {
+ InsertError(GL_INVALID_OPERATION,
+ "number of textures greater than maximum of 4.");
+ return error::kNoError;
+ }
+
+ gfx::RectF contents_rect_object(contents_rect[0], contents_rect[1],
+ contents_rect[2], contents_rect[3]);
+ gfx::RectF bounds_rect_object(bounds_rect[0], bounds_rect[1], bounds_rect[2],
+ bounds_rect[3]);
+
+ std::vector<scoped_refptr<gl::GLImage>> images(num_textures);
+ for (int i = 0; i < num_textures; ++i) {
+ GLuint contents_texture_client_id = contents_texture_ids[i];
+ if (contents_texture_client_id != 0) {
+ auto texture_iter =
+ resources_->texture_object_map.find(contents_texture_client_id);
+ if (texture_iter == resources_->texture_object_map.end()) {
+ InsertError(GL_INVALID_VALUE, "unknown texture.");
+ return error::kNoError;
+ }
+
+ scoped_refptr<TexturePassthrough> passthrough_texture =
+ texture_iter->second;
+ DCHECK(passthrough_texture != nullptr);
+ DCHECK(passthrough_texture->target() == GL_TEXTURE_2D);
+
+ scoped_refptr<gl::GLImage> image =
+ passthrough_texture->GetLevelImage(GL_TEXTURE_2D, 0);
+ if (image == nullptr) {
+ InsertError(GL_INVALID_VALUE, "unsupported texture format");
+ return error::kNoError;
+ }
+ images[i] = image;
+ }
+ }
+
+ ui::DCRendererLayerParams params(
+ dc_layer_shared_state_->is_clipped, dc_layer_shared_state_->clip_rect,
+ dc_layer_shared_state_->z_order, dc_layer_shared_state_->transform,
+ images, contents_rect_object, gfx::ToEnclosingRect(bounds_rect_object),
+ background_color, edge_aa_mask, dc_layer_shared_state_->opacity, filter);
+
+ if (!surface_->ScheduleDCLayer(params)) {
+ InsertError(GL_INVALID_OPERATION, "failed to schedule DCLayer");
+ return error::kNoError;
+ }
+
return error::kNoError;
}
@@ -4097,7 +4275,9 @@ error::Error GLES2DecoderPassthroughImpl::DoOverlayPromotionHintCHROMIUM(
GLuint texture,
GLboolean promotion_hint,
GLint display_x,
- GLint display_y) {
+ GLint display_y,
+ GLint display_width,
+ GLint display_height) {
NOTIMPLEMENTED();
return error::kNoError;
}
@@ -4107,12 +4287,67 @@ error::Error GLES2DecoderPassthroughImpl::DoSetDrawRectangleCHROMIUM(
GLint y,
GLint width,
GLint height) {
- NOTIMPLEMENTED();
+ FlushErrors();
+
+ GLint current_framebuffer = 0;
+ glGetIntegerv(GL_FRAMEBUFFER_BINDING, &current_framebuffer);
+ if (current_framebuffer != 0) {
+ InsertError(GL_INVALID_OPERATION, "framebuffer must not be bound.");
+ return error::kNoError;
+ }
+
+ if (!surface_->SupportsDCLayers()) {
+ InsertError(GL_INVALID_OPERATION,
+ "surface doesn't support SetDrawRectangle.");
+ return error::kNoError;
+ }
+
+ gfx::Rect rect(x, y, width, height);
+ if (!surface_->SetDrawRectangle(rect)) {
+ InsertError(GL_INVALID_OPERATION, "SetDrawRectangle failed on surface");
+ return error::kNoError;
+ }
+
return error::kNoError;
}
error::Error GLES2DecoderPassthroughImpl::DoSetEnableDCLayersCHROMIUM(
GLboolean enable) {
+ FlushErrors();
+
+ GLint current_framebuffer = 0;
+ glGetIntegerv(GL_FRAMEBUFFER_BINDING, &current_framebuffer);
+ if (current_framebuffer != 0) {
+ InsertError(GL_INVALID_OPERATION, "framebuffer must not be bound.");
+ return error::kNoError;
+ }
+
+ if (!surface_->SupportsDCLayers()) {
+ InsertError(GL_INVALID_OPERATION,
+ "surface doesn't support SetDrawRectangle.");
+ return error::kNoError;
+ }
+
+ if (!surface_->SetEnableDCLayers(!!enable)) {
+ InsertError(GL_INVALID_OPERATION, "SetEnableDCLayers failed on surface.");
+ return error::kNoError;
+ }
+
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoBeginRasterCHROMIUM(
+ GLuint texture_id,
+ GLuint sk_color,
+ GLuint msaa_sample_count,
+ GLboolean can_use_lcd_text,
+ GLboolean use_distance_field_text,
+ GLint pixel_config) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoEndRasterCHROMIUM() {
NOTIMPLEMENTED();
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
index c18ea3dde93..75e33d2b5d4 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
@@ -1347,8 +1347,10 @@ error::Error GLES2DecoderPassthroughImpl::HandleResizeCHROMIUM(
GLuint width = static_cast<GLuint>(c.width);
GLuint height = static_cast<GLuint>(c.height);
GLfloat scale_factor = static_cast<GLfloat>(c.scale_factor);
+ GLenum color_space = static_cast<GLenum>(c.color_space);
GLboolean has_alpha = static_cast<GLboolean>(c.alpha);
- error::Error error = DoResizeCHROMIUM(width, height, scale_factor, has_alpha);
+ error::Error error =
+ DoResizeCHROMIUM(width, height, scale_factor, color_space, has_alpha);
if (error != error::kNoError) {
return error;
}
@@ -1827,16 +1829,24 @@ error::Error GLES2DecoderPassthroughImpl::HandleScheduleDCLayerCHROMIUM(
const GLfloat* contents_rect = mem;
GLuint background_color = static_cast<GLuint>(c.background_color);
GLuint edge_aa_mask = static_cast<GLuint>(c.edge_aa_mask);
+ GLenum filter = static_cast<GLenum>(c.filter);
const GLfloat* bounds_rect = mem + 4;
error::Error error = DoScheduleDCLayerCHROMIUM(
num_textures, contents_texture_ids, contents_rect, background_color,
- edge_aa_mask, bounds_rect);
+ edge_aa_mask, filter, bounds_rect);
if (error != error::kNoError) {
return error;
}
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleSetColorSpaceForScanoutCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleGenPathsCHROMIUM(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index 8a2d17dd489..3c805fa140b 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -4336,8 +4336,11 @@ error::Error GLES2DecoderPassthroughImpl::HandleOverlayPromotionHintCHROMIUM(
GLboolean promotion_hint = static_cast<GLboolean>(c.promotion_hint);
GLint display_x = static_cast<GLint>(c.display_x);
GLint display_y = static_cast<GLint>(c.display_y);
- error::Error error = DoOverlayPromotionHintCHROMIUM(texture, promotion_hint,
- display_x, display_y);
+ GLint display_width = static_cast<GLint>(c.display_width);
+ GLint display_height = static_cast<GLint>(c.display_height);
+ error::Error error =
+ DoOverlayPromotionHintCHROMIUM(texture, promotion_hint, display_x,
+ display_y, display_width, display_height);
if (error != error::kNoError) {
return error;
}
@@ -4403,5 +4406,36 @@ error::Error GLES2DecoderPassthroughImpl::HandleSetEnableDCLayersCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleBeginRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BeginRasterCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::BeginRasterCHROMIUM*>(cmd_data);
+ GLuint texture_id = static_cast<GLuint>(c.texture_id);
+ GLuint sk_color = static_cast<GLuint>(c.sk_color);
+ GLuint msaa_sample_count = static_cast<GLuint>(c.msaa_sample_count);
+ GLboolean can_use_lcd_text = static_cast<GLboolean>(c.can_use_lcd_text);
+ GLboolean use_distance_field_text =
+ static_cast<GLboolean>(c.use_distance_field_text);
+ GLint pixel_config = static_cast<GLint>(c.pixel_config);
+ error::Error error = DoBeginRasterCHROMIUM(
+ texture_id, sk_color, msaa_sample_count, can_use_lcd_text,
+ use_distance_field_text, pixel_config);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleEndRasterCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ error::Error error = DoEndRasterCHROMIUM();
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_buffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_buffers.cc
new file mode 100644
index 00000000000..43f1769a4d5
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_unittest_buffers.cc
@@ -0,0 +1,706 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
+
+namespace gpu {
+namespace gles2 {
+
+using namespace cmds;
+
+TEST_F(GLES3DecoderPassthroughTest, BindBufferBaseValidArgs) {
+ BindBufferBase bind_cmd;
+ bind_cmd.Init(GL_TRANSFORM_FEEDBACK_BUFFER, 2, kClientBufferId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, BindBufferBaseValidArgsNewId) {
+ constexpr GLuint kNewClientId = 502;
+ BindBufferBase cmd;
+ cmd.Init(GL_TRANSFORM_FEEDBACK_BUFFER, 2, kNewClientId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(IsObjectHelper<IsBuffer>(kNewClientId));
+}
+
+TEST_F(GLES3DecoderPassthroughTest, BindBufferRangeValidArgs) {
+ const GLenum kTarget = GL_TRANSFORM_FEEDBACK_BUFFER;
+ const GLintptr kRangeOffset = 4;
+ const GLsizeiptr kRangeSize = 8;
+ const GLsizeiptr kBufferSize = kRangeOffset + kRangeSize;
+
+ cmds::BindBuffer bind_cmd;
+ bind_cmd.Init(kTarget, kClientBufferId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_cmd));
+
+ cmds::BufferData buffer_data_cmd;
+ buffer_data_cmd.Init(kTarget, kBufferSize, 0, 0, GL_STREAM_DRAW);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(buffer_data_cmd));
+
+ BindBufferRange bind_buffer_range_cmd;
+ bind_buffer_range_cmd.Init(kTarget, 2, kClientBufferId, kRangeOffset,
+ kRangeSize);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_buffer_range_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, BindBufferRangeValidArgsWithNoData) {
+ const GLenum kTarget = GL_TRANSFORM_FEEDBACK_BUFFER;
+ const GLintptr kRangeOffset = 4;
+ const GLsizeiptr kRangeSize = 8;
+ DoBindBuffer(kTarget, kClientBufferId);
+ BindBufferRange cmd;
+ cmd.Init(kTarget, 2, kClientBufferId, kRangeOffset, kRangeSize);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, BindBufferRangeValidArgsWithLessData) {
+ const GLenum kTarget = GL_TRANSFORM_FEEDBACK_BUFFER;
+ const GLintptr kRangeOffset = 4;
+ const GLsizeiptr kRangeSize = 8;
+ const GLsizeiptr kBufferSize = kRangeOffset + kRangeSize - 4;
+ DoBindBuffer(kTarget, kClientBufferId);
+ DoBufferData(kTarget, kBufferSize, nullptr, GL_STREAM_DRAW);
+ BindBufferRange cmd;
+ cmd.Init(kTarget, 2, kClientBufferId, kRangeOffset, kRangeSize);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, BindBufferRangeValidArgsNewId) {
+ BindBufferRange cmd;
+ cmd.Init(GL_TRANSFORM_FEEDBACK_BUFFER, 2, kNewClientId, 4, 4);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_TRUE(IsObjectHelper<IsBuffer>(kNewClientId));
+}
+
+TEST_F(GLES3DecoderPassthroughTest, MapBufferRangeUnmapBufferReadSucceeds) {
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLbitfield kAccess = GL_MAP_READ_BIT;
+
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ // uint32_t is Result for both MapBufferRange and UnmapBuffer commands.
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(uint32_t);
+
+ DoBindBuffer(kTarget, kClientBufferId);
+ DoBufferData(kTarget, kSize + kOffset, nullptr, GL_STREAM_DRAW);
+
+ std::vector<int8_t> data(kSize);
+ for (GLsizeiptr ii = 0; ii < kSize; ++ii) {
+ data[ii] = static_cast<int8_t>(ii % 255);
+ }
+ DoBufferSubData(kTarget, kOffset, kSize, data.data());
+
+ { // MapBufferRange
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ *result = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ int8_t* mem = reinterpret_cast<int8_t*>(&result[1]);
+ EXPECT_EQ(0, memcmp(&data[0], mem, kSize));
+ EXPECT_EQ(1u, *result);
+ }
+
+ { // UnmapBuffer
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, MapBufferRangeUnmapBufferWriteSucceeds) {
+ const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLsizeiptr kTotalSize = kOffset + kSize;
+ const GLbitfield kAccess = GL_MAP_WRITE_BIT;
+ const GLbitfield kMappedAccess = GL_MAP_WRITE_BIT | GL_MAP_READ_BIT;
+
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ // uint32_t is Result for both MapBufferRange and UnmapBuffer commands.
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(uint32_t);
+
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ int8_t* client_data = GetSharedMemoryAs<int8_t*>() + sizeof(uint32_t);
+
+ DoBindBuffer(kTarget, kClientBufferId);
+ std::vector<int8_t> gpu_data(kTotalSize);
+ for (GLsizeiptr ii = 0; ii < kTotalSize; ++ii) {
+ gpu_data[ii] = static_cast<int8_t>(ii % 128);
+ }
+ DoBufferData(kTarget, kTotalSize, gpu_data.data(), GL_STREAM_DRAW);
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ { // MapBufferRange succeeds
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ *result = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1u, *result);
+
+ PassthroughResources* passthrough_resources = GetPassthroughResources();
+ auto mapped_buffer_info_iter =
+ passthrough_resources->mapped_buffer_map.find(kClientBufferId);
+ EXPECT_NE(mapped_buffer_info_iter,
+ passthrough_resources->mapped_buffer_map.end());
+ const MappedBuffer& mapped_buffer_info = mapped_buffer_info_iter->second;
+ EXPECT_EQ(mapped_buffer_info.original_access, kAccess);
+ EXPECT_EQ(mapped_buffer_info.filtered_access, kMappedAccess);
+
+ // Verify the buffer range from GPU is copied to client mem.
+ EXPECT_EQ(0, memcmp(&gpu_data[kOffset], client_data, kSize));
+ }
+ // Update the client mem.
+ const int8_t kValue0 = 21;
+ memset(client_data, kValue0, kSize);
+
+ { // UnmapBuffer succeeds
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+
+ // Reset the client data before mapping again
+ const int8_t kValue1 = 0;
+ memset(client_data, kValue1, kSize);
+
+ { // Re-map the buffer to verify the data
+ const GLbitfield kReadAccess = GL_MAP_READ_BIT;
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, 0, kTotalSize, kReadAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ *result = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1u, *result);
+
+ // Verify the GPU mem is updated
+ for (GLsizeiptr ii = 0; ii < kTotalSize; ++ii) {
+ if (ii < kOffset) {
+ EXPECT_EQ(static_cast<int8_t>(ii % 128), client_data[ii]);
+ } else {
+ EXPECT_EQ(kValue0, client_data[ii]);
+ }
+ }
+ }
+
+ { // UnmapBuffer succeeds
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, FlushMappedBufferRangeSucceeds) {
+ const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER;
+ const GLintptr kMappedOffset = 10;
+ const GLsizeiptr kMappedSize = 64;
+ const GLintptr kFlushRangeOffset = 5;
+ const GLsizeiptr kFlushRangeSize = 32;
+ const GLsizeiptr kTotalSize = kMappedOffset + kMappedSize;
+ const GLbitfield kAccess = GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT;
+ const GLbitfield kMappedAccess = kAccess | GL_MAP_READ_BIT;
+
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ // uint32_t is Result for both MapBufferRange and UnmapBuffer commands.
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(uint32_t);
+
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ int8_t* client_data = GetSharedMemoryAs<int8_t*>() + sizeof(uint32_t);
+
+ DoBindBuffer(kTarget, kClientBufferId);
+ std::vector<int8_t> gpu_data(kTotalSize);
+ for (GLsizeiptr ii = 0; ii < kTotalSize; ++ii) {
+ gpu_data[ii] = static_cast<int8_t>(ii % 128);
+ }
+ DoBufferData(kTarget, kTotalSize, gpu_data.data(), GL_STREAM_DRAW);
+
+ { // MapBufferRange succeeds
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kMappedOffset, kMappedSize, kAccess, data_shm_id,
+ data_shm_offset, result_shm_id, result_shm_offset);
+ *result = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1u, *result);
+ // Verify the buffer range from GPU is copied to client mem.
+ EXPECT_EQ(0, memcmp(&gpu_data[kMappedOffset], client_data, kMappedSize));
+
+ PassthroughResources* passthrough_resources = GetPassthroughResources();
+ auto mapped_buffer_info_iter =
+ passthrough_resources->mapped_buffer_map.find(kClientBufferId);
+ EXPECT_NE(mapped_buffer_info_iter,
+ passthrough_resources->mapped_buffer_map.end());
+ const MappedBuffer& mapped_buffer_info = mapped_buffer_info_iter->second;
+ EXPECT_EQ(mapped_buffer_info.original_access, kAccess);
+ EXPECT_EQ(mapped_buffer_info.filtered_access, kMappedAccess);
+ }
+
+ // Update the client mem, including data within and outside the flush range.
+ const int8_t kValue0 = 21;
+ memset(client_data, kValue0, kTotalSize);
+
+ { // FlushMappedBufferRange succeeds
+ FlushMappedBufferRange cmd;
+ cmd.Init(kTarget, kFlushRangeOffset, kFlushRangeSize);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+
+ { // UnmapBuffer succeeds
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+
+ // Reset the client data before mapping again
+ const int8_t kValue1 = 0;
+ memset(client_data, kValue1, kTotalSize);
+
+ { // Re-map the buffer to verify the data
+ const GLbitfield kReadAccess = GL_MAP_READ_BIT;
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, 0, kTotalSize, kReadAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ *result = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1u, *result);
+
+ // Verify the GPU memory is updated is but only within the flushed range.
+ for (GLsizeiptr ii = 0; ii < kTotalSize; ++ii) {
+ if (ii >= kMappedOffset + kFlushRangeOffset &&
+ ii < kMappedOffset + kFlushRangeOffset + kFlushRangeSize) {
+ EXPECT_EQ(kValue0, client_data[ii]);
+ } else {
+ EXPECT_EQ(static_cast<int8_t>(ii % 128), client_data[ii]);
+ }
+ }
+ }
+
+ { // UnmapBuffer succeeds
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, MapBufferRangeNotInitFails) {
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLbitfield kAccess = GL_MAP_READ_BIT;
+ std::vector<int8_t> data(kSize);
+
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = 1; // Any value other than 0.
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(*result);
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest,
+ MapBufferRangeWriteInvalidateRangeSucceeds) {
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ // With MAP_INVALIDATE_RANGE_BIT, no need to append MAP_READ_BIT.
+ const GLbitfield kAccess = GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT;
+
+ DoBindBuffer(kTarget, kClientBufferId);
+ DoBufferData(kTarget, kSize + kOffset, nullptr, GL_STREAM_DRAW);
+
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = 0;
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(*result);
+
+ int8_t* mem = reinterpret_cast<int8_t*>(&result[1]);
+ memset(mem, 72, kSize); // Init to a random value other than 0.
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest,
+ MapBufferRangeWriteInvalidateBufferSucceeds) {
+ // Test INVALIDATE_BUFFER_BIT is mapped to INVALIDATE_RANGE_BIT.
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLbitfield kAccess = GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT;
+ const GLbitfield kFilteredAccess =
+ GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT;
+
+ DoBindBuffer(kTarget, kClientBufferId);
+ DoBufferData(kTarget, kSize + kOffset, nullptr, GL_STREAM_DRAW);
+
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = 0;
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(*result);
+
+ int8_t* mem = reinterpret_cast<int8_t*>(&result[1]);
+ memset(mem, 72, kSize); // Init to a random value other than 0.
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ PassthroughResources* passthrough_resources = GetPassthroughResources();
+ auto mapped_buffer_info_iter =
+ passthrough_resources->mapped_buffer_map.find(kClientBufferId);
+ EXPECT_NE(mapped_buffer_info_iter,
+ passthrough_resources->mapped_buffer_map.end());
+ EXPECT_EQ(mapped_buffer_info_iter->second.original_access, kAccess);
+ EXPECT_EQ(mapped_buffer_info_iter->second.filtered_access, kFilteredAccess);
+}
+
+TEST_F(GLES3DecoderPassthroughTest, MapBufferRangeWriteUnsynchronizedBit) {
+ // Test UNSYNCHRONIZED_BIT is filtered out.
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLbitfield kAccess = GL_MAP_WRITE_BIT | GL_MAP_UNSYNCHRONIZED_BIT;
+ const GLbitfield kFilteredAccess = GL_MAP_WRITE_BIT | GL_MAP_READ_BIT;
+
+ DoBindBuffer(kTarget, kClientBufferId);
+ DoBufferData(kTarget, kSize + kOffset, nullptr, GL_STREAM_DRAW);
+
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = 0;
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(*result);
+
+ int8_t* mem = reinterpret_cast<int8_t*>(&result[1]);
+ memset(mem, 72, kSize); // Init to a random value other than 0.
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ PassthroughResources* passthrough_resources = GetPassthroughResources();
+ auto mapped_buffer_info_iter =
+ passthrough_resources->mapped_buffer_map.find(kClientBufferId);
+ EXPECT_NE(mapped_buffer_info_iter,
+ passthrough_resources->mapped_buffer_map.end());
+ EXPECT_EQ(mapped_buffer_info_iter->second.original_access, kAccess);
+ EXPECT_EQ(mapped_buffer_info_iter->second.filtered_access, kFilteredAccess);
+}
+
+TEST_F(GLES3DecoderPassthroughTest, MapBufferRangeWithError) {
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLbitfield kAccess = GL_MAP_READ_BIT;
+ std::vector<int8_t> data(kSize);
+ for (GLsizeiptr ii = 0; ii < kSize; ++ii) {
+ data[ii] = static_cast<int8_t>(ii % 255);
+ }
+
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = 0;
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(*result);
+
+ int8_t* mem = reinterpret_cast<int8_t*>(&result[1]);
+ memset(mem, 72, kSize); // Init to a random value other than 0.
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ memset(&data[0], 72, kSize);
+ // Mem is untouched.
+ EXPECT_EQ(0, memcmp(&data[0], mem, kSize));
+ EXPECT_EQ(0u, *result);
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+
+ PassthroughResources* passthrough_resources = GetPassthroughResources();
+ auto mapped_buffer_info_iter =
+ passthrough_resources->mapped_buffer_map.find(kClientBufferId);
+ EXPECT_EQ(mapped_buffer_info_iter,
+ passthrough_resources->mapped_buffer_map.end());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, MapBufferRangeBadSharedMemoryFails) {
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLbitfield kAccess = GL_MAP_READ_BIT;
+
+ DoBindBuffer(kTarget, kClientBufferId);
+ DoBufferData(kTarget, kOffset + kSize, nullptr, GL_STREAM_DRAW);
+
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+ *result = 0;
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(*result);
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, kInvalidSharedMemoryId,
+ data_shm_offset, result_shm_id, result_shm_offset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ kInvalidSharedMemoryId, result_shm_offset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id,
+ kInvalidSharedMemoryOffset, result_shm_id, result_shm_offset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, kInvalidSharedMemoryOffset);
+ EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
+}
+
+TEST_F(GLES3DecoderPassthroughTest, UnmapBufferWriteNotMappedFails) {
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+
+ DoBindBuffer(kTarget, kClientBufferId);
+
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, UnmapBufferWriteNoBoundBufferFails) {
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+}
+
+TEST_F(GLES3DecoderPassthroughTest, BufferDataDestroysDataStore) {
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLbitfield kAccess = GL_MAP_WRITE_BIT;
+ const GLbitfield kFilteredAccess = GL_MAP_WRITE_BIT | GL_MAP_READ_BIT;
+
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ // uint32_t is Result for both MapBufferRange and UnmapBuffer commands.
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(uint32_t);
+
+ DoBindBuffer(kTarget, kClientBufferId);
+ DoBufferData(kTarget, kSize + kOffset, nullptr, GL_STREAM_DRAW);
+
+ { // MapBufferRange succeeds
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ *result = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1u, *result);
+ }
+
+ { // Buffer is tracked as mapped
+ PassthroughResources* passthrough_resources = GetPassthroughResources();
+ auto mapped_buffer_info_iter =
+ passthrough_resources->mapped_buffer_map.find(kClientBufferId);
+ EXPECT_NE(mapped_buffer_info_iter,
+ passthrough_resources->mapped_buffer_map.end());
+ EXPECT_EQ(mapped_buffer_info_iter->second.original_access, kAccess);
+ EXPECT_EQ(mapped_buffer_info_iter->second.filtered_access, kFilteredAccess);
+ }
+
+ { // BufferData unmaps the data store.
+ DoBufferData(kTarget, kSize * 2, nullptr, GL_STREAM_DRAW);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ { // Buffer is no longer tracked as mapped
+ PassthroughResources* passthrough_resources = GetPassthroughResources();
+ EXPECT_EQ(passthrough_resources->mapped_buffer_map.find(kClientBufferId),
+ passthrough_resources->mapped_buffer_map.end());
+ }
+
+ { // UnmapBuffer fails.
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ }
+}
+
+TEST_F(GLES3DecoderPassthroughTest, DeleteBuffersDestroysDataStore) {
+ const GLenum kTarget = GL_ARRAY_BUFFER;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLbitfield kAccess = GL_MAP_WRITE_BIT;
+ const GLbitfield kFilteredAccess = GL_MAP_WRITE_BIT | GL_MAP_READ_BIT;
+
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ // uint32_t is Result for both MapBufferRange and UnmapBuffer commands.
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(uint32_t);
+
+ DoBindBuffer(kTarget, kClientBufferId);
+ DoBufferData(kTarget, kSize + kOffset, nullptr, GL_STREAM_DRAW);
+
+ { // MapBufferRange succeeds
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ *result = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1u, *result);
+ }
+
+ { // Buffer is tracked as mapped
+ PassthroughResources* passthrough_resources = GetPassthroughResources();
+ auto mapped_buffer_info_iter =
+ passthrough_resources->mapped_buffer_map.find(kClientBufferId);
+ EXPECT_NE(mapped_buffer_info_iter,
+ passthrough_resources->mapped_buffer_map.end());
+ EXPECT_EQ(mapped_buffer_info_iter->second.original_access, kAccess);
+ EXPECT_EQ(mapped_buffer_info_iter->second.filtered_access, kFilteredAccess);
+ }
+
+ { // DeleteBuffers unmaps the data store.
+ DoDeleteBuffer(kClientBufferId);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ { // Buffer is no longer tracked as mapped
+ PassthroughResources* passthrough_resources = GetPassthroughResources();
+ EXPECT_EQ(passthrough_resources->mapped_buffer_map.find(kClientBufferId),
+ passthrough_resources->mapped_buffer_map.end());
+ }
+
+ { // UnmapBuffer fails.
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
+ }
+}
+
+TEST_F(GLES3DecoderPassthroughTest, MapUnmapBufferInvalidTarget) {
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLintptr kOffset = 10;
+ const GLsizeiptr kSize = 64;
+ const GLbitfield kAccess = GL_MAP_WRITE_BIT;
+
+ uint32_t result_shm_id = shared_memory_id_;
+ uint32_t result_shm_offset = kSharedMemoryOffset;
+ uint32_t data_shm_id = shared_memory_id_;
+ // uint32_t is Result for both MapBufferRange and UnmapBuffer commands.
+ uint32_t data_shm_offset = kSharedMemoryOffset + sizeof(uint32_t);
+
+ typedef MapBufferRange::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>();
+
+ {
+ MapBufferRange cmd;
+ cmd.Init(kTarget, kOffset, kSize, kAccess, data_shm_id, data_shm_offset,
+ result_shm_id, result_shm_offset);
+ *result = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, *result);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ }
+
+ {
+ UnmapBuffer cmd;
+ cmd.Init(kTarget);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ }
+}
+
+TEST_F(GLES3DecoderPassthroughTest, CopyBufferSubDataValidArgs) {
+ const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER;
+ const GLsizeiptr kSize = 64;
+ const GLsizeiptr kHalfSize = kSize / 2;
+ const GLintptr kReadOffset = 0;
+ const GLintptr kWriteOffset = kHalfSize;
+ const GLsizeiptr kCopySize = 5;
+ const char kValue0 = 3;
+ const char kValue1 = 21;
+
+ // Set up the buffer so first half is kValue0 and second half is kValue1.
+ DoBindBuffer(kTarget, kClientBufferId);
+ DoBufferData(kTarget, kSize, nullptr, GL_STREAM_DRAW);
+ std::unique_ptr<char[]> data(new char[kHalfSize]);
+ memset(data.get(), kValue0, kHalfSize);
+ DoBufferSubData(kTarget, 0, kHalfSize, data.get());
+ memset(data.get(), kValue1, kHalfSize);
+ DoBufferSubData(kTarget, kHalfSize, kHalfSize, data.get());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ cmds::CopyBufferSubData cmd;
+ cmd.Init(kTarget, kTarget, kReadOffset, kWriteOffset, kCopySize);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index e23813a3be3..d48fa5405be 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -56,13 +56,11 @@ using namespace cmds;
void GLES2DecoderRGBBackbufferTest::SetUp() {
// Test codepath with workaround clear_alpha_in_readpixels because
// ReadPixelsEmulator emulates the incorrect driver behavior.
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::CLEAR_ALPHA_IN_READPIXELS));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.clear_alpha_in_readpixels = true;
InitState init;
init.bind_generates_resource = true;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
SetupDefaultProgram();
}
@@ -1661,15 +1659,13 @@ void GLES3DecoderWithShaderTest::SetUp() {
}
void GLES3DecoderRGBBackbufferTest::SetUp() {
- base::CommandLine command_line(0, nullptr);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::CLEAR_ALPHA_IN_READPIXELS));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.clear_alpha_in_readpixels = true;
InitState init;
init.gl_version = "OpenGL ES 3.0";
init.bind_generates_resource = true;
init.context_type = CONTEXT_TYPE_OPENGLES3;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
SetupDefaultProgram();
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
index b4a5f0d90b6..af6ed5b04e4 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
@@ -106,6 +106,18 @@ class GLES3DecoderManualInitTest : public GLES2DecoderManualInitTest {
GLES3DecoderManualInitTest() { shader_language_version_ = 300; }
};
+class GLES2DecoderPassthroughTest : public GLES2DecoderPassthroughTestBase {
+ public:
+ GLES2DecoderPassthroughTest()
+ : GLES2DecoderPassthroughTestBase(CONTEXT_TYPE_OPENGLES2) {}
+};
+
+class GLES3DecoderPassthroughTest : public GLES2DecoderPassthroughTestBase {
+ public:
+ GLES3DecoderPassthroughTest()
+ : GLES2DecoderPassthroughTestBase(CONTEXT_TYPE_OPENGLES3) {}
+};
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
index 23e41af93d3..dd9df696e3f 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
@@ -536,15 +536,6 @@ TEST_P(GLES2DecoderTest1, DisableInvalidArgs0_1) {
EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
}
-TEST_P(GLES2DecoderTest1, DisableVertexAttribArrayValidArgs) {
- EXPECT_CALL(*gl_, DisableVertexAttribArray(1));
- SpecializedSetup<cmds::DisableVertexAttribArray, 0>(true);
- cmds::DisableVertexAttribArray cmd;
- cmd.Init(1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
TEST_P(GLES2DecoderTest1, EnableValidArgs) {
SetupExpectationsForEnableDisable(GL_BLEND, true);
SpecializedSetup<cmds::Enable, 0>(true);
@@ -572,15 +563,6 @@ TEST_P(GLES2DecoderTest1, EnableInvalidArgs0_1) {
EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
}
-TEST_P(GLES2DecoderTest1, EnableVertexAttribArrayValidArgs) {
- EXPECT_CALL(*gl_, EnableVertexAttribArray(1));
- SpecializedSetup<cmds::EnableVertexAttribArray, 0>(true);
- cmds::EnableVertexAttribArray cmd;
- cmd.Init(1);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
TEST_P(GLES3DecoderTest1, FenceSyncValidArgs) {
const GLsync kNewServiceIdGLuint = reinterpret_cast<GLsync>(kNewServiceId);
EXPECT_CALL(*gl_, FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0))
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
index 9094ab01b4f..8cfa0a4af7e 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_attribs.cc
@@ -51,6 +51,54 @@ namespace gles2 {
using namespace cmds;
+TEST_P(GLES2DecoderTest, DisableVertexAttribArrayValidArgs) {
+ SetDriverVertexAttribEnabled(1, false);
+ SpecializedSetup<cmds::DisableVertexAttribArray, 0>(true);
+ cmds::DisableVertexAttribArray cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, EnableVertexAttribArrayValidArgs) {
+ SetDriverVertexAttribEnabled(1, true);
+ SpecializedSetup<cmds::EnableVertexAttribArray, 0>(true);
+ cmds::EnableVertexAttribArray cmd;
+ cmd.Init(1);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderWithShaderTest, EnabledVertexAttribArrayIsDisabledIfUnused) {
+ SetupExpectationsForApplyingDefaultDirtyState();
+
+ // Set up and enable attribs 0, 1, 2
+ SetupAllNeededVertexBuffers();
+ // Enable attrib 3, and verify it's called in the driver
+ {
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(3))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::EnableVertexAttribArray cmd;
+ cmd.Init(3);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+ DoVertexAttribPointer(3, 2, GL_FLOAT, 0, 0);
+
+ // Expect the draw call below causes attrib 3 to be disabled in the driver
+ EXPECT_CALL(*gl_, DisableVertexAttribArray(3)).Times(1).RetiresOnSaturation();
+ // Perform a draw which uses only attributes 0, 1, 2 - not attrib 3
+ {
+ EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
+ .Times(1)
+ .RetiresOnSaturation();
+ DrawArrays cmd;
+ cmd.Init(GL_TRIANGLES, 0, kNumVertices);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+}
+
TEST_P(GLES2DecoderWithShaderTest, GetVertexAttribPointervSucceeds) {
const GLuint kOffsetToTestFor = sizeof(float) * 4;
const GLuint kIndexToTest = 1;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
index bda92064832..0c39b3be25b 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -12,6 +12,7 @@
#include <string>
#include <vector>
+#include "base/command_line.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
@@ -20,6 +21,7 @@
#include "gpu/command_buffer/service/logger.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/program_manager.h"
+#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "gpu/command_buffer/service/vertex_attrib_manager.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -183,12 +185,13 @@ GLES2DecoderTestBase::InitState::InitState()
GLES2DecoderTestBase::InitState::InitState(const InitState& other) = default;
void GLES2DecoderTestBase::InitDecoder(const InitState& init) {
- InitDecoderWithCommandLine(init, NULL);
+ gpu::GpuDriverBugWorkarounds workarounds;
+ InitDecoderWithWorkarounds(init, workarounds);
}
-void GLES2DecoderTestBase::InitDecoderWithCommandLine(
+void GLES2DecoderTestBase::InitDecoderWithWorkarounds(
const InitState& init,
- const base::CommandLine* command_line) {
+ const gpu::GpuDriverBugWorkarounds& workarounds) {
InitState normalized_init = init;
NormalizeInitState(&normalized_init);
// For easier substring/extension matching
@@ -202,14 +205,10 @@ void GLES2DecoderTestBase::InitDecoderWithCommandLine(
SetupMockGLBehaviors();
- scoped_refptr<FeatureInfo> feature_info = new FeatureInfo;
- if (command_line) {
- GpuDriverBugWorkarounds gpu_driver_bug_workaround(command_line);
- feature_info = new FeatureInfo(*command_line, gpu_driver_bug_workaround);
- }
+ scoped_refptr<FeatureInfo> feature_info = new FeatureInfo(workarounds);
group_ = scoped_refptr<ContextGroup>(new ContextGroup(
- gpu_preferences_, &mailbox_manager_, memory_tracker_,
+ gpu_preferences_, false, &mailbox_manager_, memory_tracker_,
&shader_translator_cache_, &framebuffer_completeness_cache_, feature_info,
normalized_init.bind_generates_resource, &image_manager_,
nullptr /* image_factory */, nullptr /* progress_reporter */,
@@ -279,9 +278,7 @@ void GLES2DecoderTestBase::InitDecoderWithCommandLine(
AddExpectationsForBindVertexArrayOES();
if (!group_->feature_info()->gl_version_info().BehavesLikeGLES()) {
- EXPECT_CALL(*gl_, EnableVertexAttribArray(0))
- .Times(1)
- .RetiresOnSaturation();
+ SetDriverVertexAttribEnabled(0, true);
}
static GLuint attrib_0_id[] = {
kServiceAttrib0BufferId,
@@ -1917,7 +1914,8 @@ void GLES2DecoderTestBase::SetupShader(
TestHelper::SetShaderStates(gl_.get(), GetShader(vertex_shader_client_id),
true, nullptr, nullptr, &shader_language_version_,
- nullptr, nullptr, nullptr, nullptr, nullptr);
+ nullptr, nullptr, nullptr, nullptr, nullptr,
+ nullptr);
OutputVariableList frag_output_variable_list;
frag_output_variable_list.push_back(TestHelper::ConstructOutputVariable(
@@ -1927,7 +1925,7 @@ void GLES2DecoderTestBase::SetupShader(
TestHelper::SetShaderStates(gl_.get(), GetShader(fragment_shader_client_id),
true, nullptr, nullptr, &shader_language_version_,
nullptr, nullptr, nullptr, nullptr,
- &frag_output_variable_list);
+ &frag_output_variable_list, nullptr);
cmds::AttachShader attach_cmd;
attach_cmd.Init(program_client_id, vertex_shader_client_id);
@@ -1974,10 +1972,26 @@ void GLES2DecoderTestBase::DoEnableDisable(GLenum cap, bool enable) {
}
}
+void GLES2DecoderTestBase::SetDriverVertexAttribEnabled(GLint index,
+ bool enable) {
+ DCHECK(index < static_cast<GLint>(attribs_enabled_.size()));
+ bool already_enabled = attribs_enabled_[index];
+ if (already_enabled != enable) {
+ attribs_enabled_[index] = enable;
+ if (enable) {
+ EXPECT_CALL(*gl_, EnableVertexAttribArray(index))
+ .Times(1)
+ .RetiresOnSaturation();
+ } else {
+ EXPECT_CALL(*gl_, DisableVertexAttribArray(index))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ }
+}
+
void GLES2DecoderTestBase::DoEnableVertexAttribArray(GLint index) {
- EXPECT_CALL(*gl_, EnableVertexAttribArray(index))
- .Times(1)
- .RetiresOnSaturation();
+ SetDriverVertexAttribEnabled(index, true);
cmds::EnableVertexAttribArray cmd;
cmd.Init(index);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
@@ -2187,5 +2201,162 @@ void GLES2DecoderTestBase::DoLockDiscardableTextureCHROMIUM(GLuint texture_id) {
// instead of having to edit some template or the code generator.
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_0_autogen.h"
+namespace {
+
+GpuPreferences GenerateGpuPreferencesForPassthroughTests() {
+ GpuPreferences preferences;
+ preferences.use_passthrough_cmd_decoder = true;
+ return preferences;
+}
+} // anonymous namespace
+
+GLES2DecoderPassthroughTestBase::GLES2DecoderPassthroughTestBase(
+ ContextType context_type)
+ : gpu_preferences_(GenerateGpuPreferencesForPassthroughTests()),
+ shader_translator_cache_(gpu_preferences_) {
+ context_creation_attribs_.context_type = context_type;
+}
+
+GLES2DecoderPassthroughTestBase::~GLES2DecoderPassthroughTestBase() {}
+
+void GLES2DecoderPassthroughTestBase::OnConsoleMessage(
+ int32_t id,
+ const std::string& message) {}
+void GLES2DecoderPassthroughTestBase::CacheShader(const std::string& key,
+ const std::string& shader) {}
+void GLES2DecoderPassthroughTestBase::OnFenceSyncRelease(uint64_t release) {}
+bool GLES2DecoderPassthroughTestBase::OnWaitSyncToken(const gpu::SyncToken&) {
+ return false;
+}
+void GLES2DecoderPassthroughTestBase::OnDescheduleUntilFinished() {}
+void GLES2DecoderPassthroughTestBase::OnRescheduleAfterFinished() {}
+
+void GLES2DecoderPassthroughTestBase::SetUp() {
+ base::CommandLine::Init(0, NULL);
+ auto* command_line = base::CommandLine::ForCurrentProcess();
+ command_line->AppendSwitch(switches::kUsePassthroughCmdDecoder);
+ command_line->AppendSwitchASCII(switches::kUseGL,
+ gl::kGLImplementationANGLEName);
+ command_line->AppendSwitchASCII(switches::kUseANGLE,
+ gl::kANGLEImplementationNullName);
+
+ context_creation_attribs_.offscreen_framebuffer_size = gfx::Size(4, 4);
+ context_creation_attribs_.alpha_size = 8;
+ context_creation_attribs_.blue_size = 8;
+ context_creation_attribs_.green_size = 8;
+ context_creation_attribs_.red_size = 8;
+ context_creation_attribs_.depth_size = 24;
+ context_creation_attribs_.stencil_size = 8;
+ context_creation_attribs_.bind_generates_resource = true;
+
+ gl::init::InitializeGLOneOffImplementation(gl::kGLImplementationEGLGLES2,
+ false, false, false, true);
+
+ scoped_refptr<gles2::FeatureInfo> feature_info = new gles2::FeatureInfo();
+ group_ = new gles2::ContextGroup(
+ gpu_preferences_, true, &mailbox_manager_, nullptr /* memory_tracker */,
+ &shader_translator_cache_, &framebuffer_completeness_cache_, feature_info,
+ context_creation_attribs_.bind_generates_resource, &image_manager_,
+ nullptr /* image_factory */, nullptr /* progress_reporter */,
+ GpuFeatureInfo(), &discardable_manager_);
+
+ surface_ = gl::init::CreateOffscreenGLSurface(
+ context_creation_attribs_.offscreen_framebuffer_size);
+ context_ = gl::init::CreateGLContext(
+ nullptr, surface_.get(),
+ GenerateGLContextAttribs(context_creation_attribs_, group_.get()));
+ context_->MakeCurrent(surface_.get());
+
+ command_buffer_service_.reset(new FakeCommandBufferServiceBase());
+
+ decoder_.reset(new GLES2DecoderPassthroughImpl(
+ this, command_buffer_service_.get(), group_.get()));
+ ASSERT_TRUE(group_->Initialize(decoder_.get(),
+ context_creation_attribs_.context_type,
+ DisallowedFeatures()));
+ ASSERT_TRUE(decoder_->Initialize(surface_, context_, false,
+ DisallowedFeatures(),
+ context_creation_attribs_));
+
+ scoped_refptr<gpu::Buffer> buffer =
+ command_buffer_service_->CreateTransferBufferHelper(kSharedBufferSize,
+ &shared_memory_id_);
+ shared_memory_offset_ = kSharedMemoryOffset;
+ shared_memory_address_ =
+ reinterpret_cast<int8_t*>(buffer->memory()) + shared_memory_offset_;
+ shared_memory_base_ = buffer->memory();
+
+ decoder_->MakeCurrent();
+ decoder_->BeginDecoding();
+}
+
+void GLES2DecoderPassthroughTestBase::TearDown() {
+ surface_ = nullptr;
+ context_ = nullptr;
+ decoder_->EndDecoding();
+ decoder_->Destroy(!decoder_->WasContextLost());
+ group_->Destroy(decoder_.get(), false);
+ decoder_.reset();
+ group_ = nullptr;
+ command_buffer_service_.reset();
+ gl::init::ShutdownGL();
+}
+
+GLint GLES2DecoderPassthroughTestBase::GetGLError() {
+ cmds::GetError cmd;
+ cmd.Init(shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ return static_cast<GLint>(*GetSharedMemoryAs<GLenum*>());
+}
+
+void GLES2DecoderPassthroughTestBase::DoBindBuffer(GLenum target,
+ GLuint client_id) {
+ cmds::BindBuffer cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderPassthroughTestBase::DoDeleteBuffer(GLuint client_id) {
+ GenHelper<cmds::DeleteBuffersImmediate>(client_id);
+}
+
+void GLES2DecoderPassthroughTestBase::DoBufferData(GLenum target,
+ GLsizei size,
+ const void* data,
+ GLenum usage) {
+ cmds::BufferData cmd;
+ if (data) {
+ EXPECT_TRUE(size >= 0);
+ EXPECT_LT(static_cast<size_t>(size),
+ kSharedBufferSize - kSharedMemoryOffset);
+ memcpy(shared_memory_address_, data, size);
+ cmd.Init(target, size, shared_memory_id_, shared_memory_offset_, usage);
+ } else {
+ cmd.Init(target, size, 0, 0, usage);
+ }
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderPassthroughTestBase::DoBufferSubData(GLenum target,
+ GLint offset,
+ GLsizeiptr size,
+ const void* data) {
+ memcpy(shared_memory_address_, data, size);
+ cmds::BufferSubData cmd;
+ cmd.Init(target, offset, size, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef COMPILER_MSVC
+const size_t GLES2DecoderPassthroughTestBase::kSharedBufferSize;
+const uint32_t GLES2DecoderPassthroughTestBase::kSharedMemoryOffset;
+const uint32_t GLES2DecoderPassthroughTestBase::kInvalidSharedMemoryOffset;
+const int32_t GLES2DecoderPassthroughTestBase::kInvalidSharedMemoryId;
+
+const uint32_t GLES2DecoderPassthroughTestBase::kNewClientId;
+const GLuint GLES2DecoderPassthroughTestBase::kClientBufferId;
+#endif
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
index 8d717f88b9e..adcef8523f6 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
@@ -8,6 +8,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <array>
#include <memory>
#include "base/message_loop/message_loop.h"
@@ -20,6 +21,7 @@
#include "gpu/command_buffer/service/gl_context_mock.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h"
#include "gpu/command_buffer/service/gpu_preferences.h"
#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/mailbox_manager_impl.h"
@@ -33,15 +35,12 @@
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/command_buffer/service/transform_feedback_manager.h"
#include "gpu/command_buffer/service/vertex_array_manager.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_mock.h"
#include "ui/gl/gl_surface_stub.h"
#include "ui/gl/gl_version_info.h"
-namespace base {
-class CommandLine;
-}
-
namespace gpu {
namespace gles2 {
@@ -225,8 +224,8 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
};
void InitDecoder(const InitState& init);
- void InitDecoderWithCommandLine(const InitState& init,
- const base::CommandLine* command_line);
+ void InitDecoderWithWorkarounds(const InitState& init,
+ const GpuDriverBugWorkarounds& workarounds);
void ResetDecoder();
@@ -385,6 +384,7 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
void DoEnableDisable(GLenum cap, bool enable);
+ void SetDriverVertexAttribEnabled(GLint index, bool enable);
void DoEnableVertexAttribArray(GLint index);
void DoBufferData(GLenum target, GLsizei size);
@@ -725,6 +725,8 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool>,
int shader_language_version_;
+ std::array<bool, kNumVertexAttribs> attribs_enabled_ = {};
+
private:
// MockGLStates is used to track GL states and emulate driver
// behaviors on top of MockGLInterface.
@@ -801,6 +803,121 @@ MATCHER_P2(PointsToArray, array, size, "") {
return true;
}
+class GLES2DecoderPassthroughTestBase : public testing::Test,
+ public GLES2DecoderClient {
+ public:
+ GLES2DecoderPassthroughTestBase(ContextType context_type);
+ ~GLES2DecoderPassthroughTestBase() override;
+
+ void OnConsoleMessage(int32_t id, const std::string& message) override;
+ void CacheShader(const std::string& key, const std::string& shader) override;
+ void OnFenceSyncRelease(uint64_t release) override;
+ bool OnWaitSyncToken(const gpu::SyncToken&) override;
+ void OnDescheduleUntilFinished() override;
+ void OnRescheduleAfterFinished() override;
+
+ void SetUp() override;
+ void TearDown() override;
+
+ template <typename T>
+ void GenHelper(GLuint client_id) {
+ int8_t buffer[sizeof(T) + sizeof(client_id)];
+ T& cmd = *reinterpret_cast<T*>(&buffer);
+ cmd.Init(1, &client_id);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(client_id)));
+ }
+
+ template <typename Command>
+ bool IsObjectHelper(GLuint client_id) {
+ typename Command::Result* result =
+ static_cast<typename Command::Result*>(shared_memory_address_);
+ Command cmd;
+ cmd.Init(client_id, shared_memory_id_, kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ bool isObject = static_cast<bool>(*result);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ return isObject;
+ }
+
+ template <typename T>
+ error::Error ExecuteCmd(const T& cmd) {
+ static_assert(T::kArgFlags == cmd::kFixed,
+ "T::kArgFlags should equal cmd::kFixed");
+ int entries_processed = 0;
+ return decoder_->DoCommands(1, (const void*)&cmd,
+ ComputeNumEntries(sizeof(cmd)),
+ &entries_processed);
+ }
+
+ template <typename T>
+ error::Error ExecuteImmediateCmd(const T& cmd, size_t data_size) {
+ static_assert(T::kArgFlags == cmd::kAtLeastN,
+ "T::kArgFlags should equal cmd::kAtLeastN");
+ int entries_processed = 0;
+ return decoder_->DoCommands(1, (const void*)&cmd,
+ ComputeNumEntries(sizeof(cmd) + data_size),
+ &entries_processed);
+ }
+
+ template <typename T>
+ T GetSharedMemoryAs() {
+ return reinterpret_cast<T>(shared_memory_address_);
+ }
+
+ template <typename T>
+ T GetSharedMemoryAsWithOffset(uint32_t offset) {
+ void* ptr = reinterpret_cast<int8_t*>(shared_memory_address_) + offset;
+ return reinterpret_cast<T>(ptr);
+ }
+
+ PassthroughResources* GetPassthroughResources() const {
+ return group_->passthrough_resources();
+ }
+
+ GLint GetGLError();
+
+ protected:
+ void DoBindBuffer(GLenum target, GLuint client_id);
+ void DoDeleteBuffer(GLuint client_id);
+ void DoBufferData(GLenum target,
+ GLsizei size,
+ const void* data,
+ GLenum usage);
+ void DoBufferSubData(GLenum target,
+ GLint offset,
+ GLsizeiptr size,
+ const void* data);
+
+ static const size_t kSharedBufferSize = 2048;
+ static const uint32_t kSharedMemoryOffset = 132;
+ static const uint32_t kInvalidSharedMemoryOffset = kSharedBufferSize + 1;
+ static const int32_t kInvalidSharedMemoryId =
+ FakeCommandBufferServiceBase::kTransferBufferBaseId - 1;
+
+ static const uint32_t kNewClientId = 501;
+ static const GLuint kClientBufferId = 100;
+
+ int32_t shared_memory_id_;
+ uint32_t shared_memory_offset_;
+ void* shared_memory_address_;
+ void* shared_memory_base_;
+
+ private:
+ ContextCreationAttribHelper context_creation_attribs_;
+ GpuPreferences gpu_preferences_;
+ MailboxManagerImpl mailbox_manager_;
+ ShaderTranslatorCache shader_translator_cache_;
+ FramebufferCompletenessCache framebuffer_completeness_cache_;
+ ImageManager image_manager_;
+ ServiceDiscardableManager discardable_manager_;
+
+ scoped_refptr<gl::GLSurface> surface_;
+ scoped_refptr<gl::GLContext> context_;
+ std::unique_ptr<FakeCommandBufferServiceBase> command_buffer_service_;
+ std::unique_ptr<GLES2DecoderPassthroughImpl> decoder_;
+ scoped_refptr<ContextGroup> group_;
+};
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc
index 60d66be5bb3..2b8820792c1 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_context_lost.cc
@@ -126,14 +126,12 @@ class GLES2DecoderLostContextTest : public GLES2DecoderManualInitTest {
}
void InitWithVirtualContextsAndRobustness() {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(USE_VIRTUALIZED_GL_CONTEXTS));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.use_virtualized_gl_contexts = true;
InitState init;
init.gl_version = "opengl es 2.0";
init.extensions = "GL_KHR_robustness";
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
}
void DoGetErrorWithContextLost(GLenum reset_status) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
index a657fd51cf2..40eab5966a0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
@@ -818,17 +818,15 @@ TEST_P(GLES2DecoderWithShaderTest, DrawArraysValidAttributesSucceeds) {
// Same as DrawArraysValidAttributesSucceeds, but with workaround
// |init_vertex_attributes|.
TEST_P(GLES2DecoderManualInitTest, InitVertexAttributes) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::INIT_VERTEX_ATTRIBUTES));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.init_vertex_attributes = true;
InitState init;
init.has_alpha = true;
init.has_depth = true;
init.request_alpha = true;
init.request_depth = true;
init.bind_generates_resource = true;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
SetupDefaultProgram();
SetupTexture();
SetupVertexBuffer();
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
index 7e080f10005..7812a2d9ab2 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions.cc
@@ -1724,6 +1724,26 @@ TEST_P(GLES2DecoderTestWithCHROMIUMPathRendering,
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
+class GLES2DecoderTestWithCHROMIUMRasterTransport : public GLES2DecoderTest {
+ public:
+ GLES2DecoderTestWithCHROMIUMRasterTransport() {}
+ void SetUp() override {
+ InitState init;
+ init.gl_version = "opengl es 2.0";
+ init.has_alpha = true;
+ init.has_depth = true;
+ init.request_alpha = true;
+ init.request_depth = true;
+ init.bind_generates_resource = true;
+ init.extensions = "chromium_raster_transport";
+ InitDecoder(init);
+ }
+};
+
+INSTANTIATE_TEST_CASE_P(Service,
+ GLES2DecoderTestWithCHROMIUMRasterTransport,
+ ::testing::Bool());
+
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest_extensions_autogen.h"
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
index b07b2c0e670..7998a5a429b 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -908,15 +908,13 @@ TEST_P(GLES3DecoderTest, ReadPixelsPixelPackBufferIsNotLargeEnough) {
}
TEST_P(GLES2DecoderManualInitTest, ReadPixels2RowLengthWorkaround) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::PACK_PARAMETERS_WORKAROUND_WITH_PACK_BUFFER));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.pack_parameters_workaround_with_pack_buffer = true;
InitState init;
init.gl_version = "OpenGL ES 3.0";
init.bind_generates_resource = true;
init.context_type = CONTEXT_TYPE_OPENGLES3;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
const GLsizei kWidth = 5;
const GLsizei kHeight = 3;
@@ -961,15 +959,13 @@ TEST_P(GLES2DecoderManualInitTest, ReadPixels2RowLengthWorkaround) {
}
TEST_P(GLES2DecoderManualInitTest, ReadPixels2AlignmentWorkaround) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::PACK_PARAMETERS_WORKAROUND_WITH_PACK_BUFFER));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.pack_parameters_workaround_with_pack_buffer = true;
InitState init;
init.gl_version = "OpenGL ES 3.0";
init.bind_generates_resource = true;
init.context_type = CONTEXT_TYPE_OPENGLES3;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
const GLsizei kWidth = 5;
const GLsizei kHeight = 3;
@@ -1016,15 +1012,13 @@ TEST_P(GLES2DecoderManualInitTest, ReadPixels2AlignmentWorkaround) {
TEST_P(GLES2DecoderManualInitTest,
ReadPixels2RowLengthAndAlignmentWorkarounds) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::PACK_PARAMETERS_WORKAROUND_WITH_PACK_BUFFER));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.pack_parameters_workaround_with_pack_buffer = true;
InitState init;
init.gl_version = "OpenGL ES 3.0";
init.bind_generates_resource = true;
init.context_type = CONTEXT_TYPE_OPENGLES3;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
const GLsizei kWidth = 5;
const GLsizei kHeight = 3;
@@ -3358,7 +3352,7 @@ TEST_P(GLES2DecoderTest, ClearBackbufferBitsOnFlipSwap) {
EXPECT_CALL(*gl_, Finish()).Times(AnyNumber());
ResizeCHROMIUM& resize_cmd = *GetImmediateAs<ResizeCHROMIUM>();
- resize_cmd.Init(1, 1, 1.0f, GL_TRUE);
+ resize_cmd.Init(1, 1, 1.0f, GL_COLOR_SPACE_UNSPECIFIED_CHROMIUM, GL_TRUE);
EXPECT_EQ(error::kNoError, ExecuteCmd(resize_cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(static_cast<uint32_t>(GL_COLOR_BUFFER_BIT),
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
index cdfd539ba8b..fd4d2f14b8a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
@@ -2070,15 +2070,13 @@ TEST_P(GLES2DecoderWithShaderTest, BindUniformLocationCHROMIUMBucket) {
}
TEST_P(GLES2DecoderManualInitTest, ClearUniformsBeforeFirstProgramUse) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::CLEAR_UNIFORMS_BEFORE_FIRST_PROGRAM_USE));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.clear_uniforms_before_first_program_use = true;
InitState init;
init.has_alpha = true;
init.request_alpha = true;
init.bind_generates_resource = true;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
{
static AttribInfo attribs[] = {
{
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index a3ef80fa970..7524122ad36 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -151,13 +151,11 @@ TEST_P(GLES3DecoderTest, GenerateMipmapBaseLevel) {
// Same as GenerateMipmapClearsUnclearedTexture, but with workaround
// |set_texture_filters_before_generating_mipmap|.
TEST_P(GLES2DecoderManualInitTest, SetTextureFiltersBeforeGenerateMipmap) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::SET_TEXTURE_FILTER_BEFORE_GENERATING_MIPMAP));
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.set_texture_filter_before_generating_mipmap = true;
InitState init;
init.bind_generates_resource = true;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
EXPECT_CALL(*gl_, GenerateMipmapEXT(_)).Times(0);
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
@@ -2222,7 +2220,43 @@ TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DETC1) {
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
+TEST_P(GLES2DecoderTest, CopyTextureCHROMIUMBadTarget) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 16, 17, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ 0, 0);
+
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgPointee<1>(kNewServiceId))
+ .RetiresOnSaturation();
+ GenHelper<GenTexturesImmediate>(kNewClientId);
+
+ const GLenum kBadTarget = GL_RGB;
+ CopyTextureCHROMIUM cmd;
+ cmd.Init(client_texture_id_, 0, kBadTarget, kNewClientId, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, GL_FALSE, GL_FALSE, GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest, CopySubTextureCHROMIUMBadTarget) {
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 16, 17, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ 0, 0);
+
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgPointee<1>(kNewServiceId))
+ .RetiresOnSaturation();
+ DoBindTexture(GL_TEXTURE_2D, kNewClientId, kNewServiceId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 16, 17, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ 0, 0);
+ const GLenum kBadTarget = GL_RGB;
+ CopySubTextureCHROMIUM cmd;
+ cmd.Init(client_texture_id_, 0, kBadTarget, kNewClientId, 0, 1, 1, 2, 2, 3, 3,
+ GL_FALSE, GL_FALSE, GL_FALSE);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
TEST_P(GLES2DecoderManualInitTest, EGLImageExternalBindTexture) {
InitState init;
@@ -2874,16 +2908,13 @@ TEST_P(GLES2DecoderTest, TexSubImage2DDoesNotClearAfterTexImage2DNULLThenData) {
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
-TEST_P(
- GLES2DecoderManualInitTest,
- TexSubImage2DDoesNotClearAfterTexImage2DNULLThenDataWithTexImage2DIsFaster) {
- base::CommandLine command_line(0, NULL);
- command_line.AppendSwitchASCII(
- switches::kGpuDriverBugWorkarounds,
- base::IntToString(gpu::TEXSUBIMAGE_FASTER_THAN_TEXIMAGE));
+TEST_P(GLES2DecoderManualInitTest,
+ TexSubImage2DNotClearAfterTexImage2DNULLThenDataWithTexImage2DIsFaster) {
+ gpu::GpuDriverBugWorkarounds workarounds;
+ workarounds.texsubimage_faster_than_teximage = true;
InitState init;
init.bind_generates_resource = true;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoderWithWorkarounds(init, workarounds);
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
DoTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA, 2, 2, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
@@ -3605,6 +3636,18 @@ TEST_P(GLES2DecoderTest, BindTexImage2DCHROMIUMCubeMapNotAllowed) {
EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
}
+TEST_P(GLES2DecoderTest,
+ BindTexImage2DWithInternalformatCHROMIUMBadInternalFormat) {
+ scoped_refptr<gl::GLImage> image(new gl::GLImageStub);
+ GetImageManagerForTest()->AddImage(image.get(), 1);
+ DoBindTexture(GL_TEXTURE_CUBE_MAP, client_texture_id_, kServiceTextureId);
+
+ BindTexImage2DWithInternalformatCHROMIUM bind_tex_image_2d_cmd;
+ bind_tex_image_2d_cmd.Init(GL_TEXTURE_2D, GL_BACK, 1); // Invalid enum
+ EXPECT_EQ(error::kNoError, ExecuteCmd(bind_tex_image_2d_cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
TEST_P(GLES2DecoderTest, OrphanGLImageWithTexImage2D) {
scoped_refptr<gl::GLImage> image(new gl::GLImageStub);
GetImageManagerForTest()->AddImage(image.get(), 1);
@@ -4892,10 +4935,9 @@ TEST_P(GLES2DecoderTest, TestDeleteDiscardableTexture) {
TEST_P(GLES2DecoderManualInitTest,
TestDiscardableTextureUnusableWhileUnlocked) {
- base::CommandLine command_line(0, NULL);
InitState init;
init.bind_generates_resource = false;
- InitDecoderWithCommandLine(init, &command_line);
+ InitDecoder(init);
DoInitializeDiscardableTextureCHROMIUM(client_texture_id_);
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
diff --git a/chromium/gpu/command_buffer/service/gpu_preferences.cc b/chromium/gpu/command_buffer/service/gpu_preferences.cc
index 1b7983d6420..3de69ee8e46 100644
--- a/chromium/gpu/command_buffer/service/gpu_preferences.cc
+++ b/chromium/gpu/command_buffer/service/gpu_preferences.cc
@@ -8,13 +8,7 @@
namespace gpu {
-GpuPreferences::GpuPreferences() {
- gpu_program_cache_size = kDefaultMaxProgramCacheMemoryBytes;
-#if defined(OS_ANDROID)
- if (base::SysInfo::IsLowEndDevice())
- gpu_program_cache_size = kLowEndMaxProgramCacheMemoryBytes;
-#endif
-}
+GpuPreferences::GpuPreferences() = default;
GpuPreferences::GpuPreferences(const GpuPreferences& other) = default;
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.cc b/chromium/gpu/command_buffer/service/gpu_switches.cc
index b4bd8bf315e..50ff573a5cf 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.cc
+++ b/chromium/gpu/command_buffer/service/gpu_switches.cc
@@ -17,10 +17,6 @@ const char kDisableGLErrorLimit[] = "disable-gl-error-limit";
// Disable the GLSL translator.
const char kDisableGLSLTranslator[] = "disable-glsl-translator";
-// Disable workarounds for various GPU driver bugs.
-const char kDisableGpuDriverBugWorkarounds[] =
- "disable-gpu-driver-bug-workarounds";
-
// Turn off user-defined name hashing in shaders.
const char kDisableShaderNameHashing[] = "disable-shader-name-hashing";
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.h b/chromium/gpu/command_buffer/service/gpu_switches.h
index f32b7b261d2..f1a75801430 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.h
+++ b/chromium/gpu/command_buffer/service/gpu_switches.h
@@ -15,7 +15,6 @@ namespace switches {
GPU_EXPORT extern const char kCompileShaderAlwaysSucceeds[];
GPU_EXPORT extern const char kDisableGLErrorLimit[];
GPU_EXPORT extern const char kDisableGLSLTranslator[];
-GPU_EXPORT extern const char kDisableGpuDriverBugWorkarounds[];
GPU_EXPORT extern const char kDisableShaderNameHashing[];
GPU_EXPORT extern const char kEnableGPUCommandLogging[];
GPU_EXPORT extern const char kEnableGPUDebugging[];
diff --git a/chromium/gpu/command_buffer/service/gpu_tracer.cc b/chromium/gpu/command_buffer/service/gpu_tracer.cc
index bd596102cb7..960dcdf65b3 100644
--- a/chromium/gpu/command_buffer/service/gpu_tracer.cc
+++ b/chromium/gpu/command_buffer/service/gpu_tracer.cc
@@ -27,10 +27,10 @@
namespace gpu {
namespace gles2 {
-static const char* kGpuTraceSourceNames[] = {
- "TraceCHROMIUM", // kTraceCHROMIUM,
- "TraceCmd", // kTraceDecoder,
- "Disjoint", // kTraceDisjoint, // Used internally.
+constexpr const char* kGpuTraceSourceNames[] = {
+ "TraceCHROMIUM", // kTraceCHROMIUM,
+ "TraceCmd", // kTraceDecoder,
+ "Disjoint", // kTraceDisjoint, // Used internally.
};
static_assert(NUM_TRACER_SOURCES == arraysize(kGpuTraceSourceNames),
"Trace source names must match enumeration.");
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.cc b/chromium/gpu/command_buffer/service/memory_program_cache.cc
index 2601dea81e4..ee60b50fea4 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.cc
@@ -12,8 +12,11 @@
#include "base/command_line.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
+#include "base/numerics/checked_math.h"
#include "base/sha1.h"
#include "base/strings/string_number_conversions.h"
+#include "base/sys_info.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/service/disk_cache_proto.pb.h"
@@ -21,8 +24,19 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/gpu_preferences.h"
#include "gpu/command_buffer/service/shader_manager.h"
+#include "third_party/zlib/zlib.h"
#include "ui/gl/gl_bindings.h"
+// Macro to help with logging times under 10ms.
+#define UMA_HISTOGRAM_VERY_SHORT_TIMES(name, time_delta) \
+ UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, \
+ static_cast<base::HistogramBase::Sample>((time_delta).InMicroseconds()), \
+ 1, \
+ static_cast<base::HistogramBase::Sample>( \
+ base::TimeDelta::FromMilliseconds(10).InMicroseconds()), \
+ 50);
+
namespace gpu {
namespace gles2 {
@@ -209,6 +223,64 @@ bool ProgramBinaryExtensionsAvailable() {
gl::g_current_gl_driver->ext.b_GL_OES_get_program_binary);
}
+// Returns an empty vector if compression fails.
+std::vector<uint8_t> CompressData(const std::vector<uint8_t>& data) {
+ auto start_time = base::TimeTicks::Now();
+ uLongf compressed_size = compressBound(data.size());
+ std::vector<uint8_t> compressed_data(compressed_size);
+ // Level indicates a trade-off between compression and speed. Level 1
+ // indicates fastest speed (with worst compression).
+ auto result = compress2(compressed_data.data(), &compressed_size, data.data(),
+ data.size(), 1 /* level */);
+ // It should be impossible for compression to fail with the provided
+ // parameters.
+ bool success = Z_OK == result;
+ UMA_HISTOGRAM_BOOLEAN("GPU.ProgramCache.CompressDataSuccess", success);
+ if (!success)
+ return std::vector<uint8_t>();
+
+ compressed_data.resize(compressed_size);
+ compressed_data.shrink_to_fit();
+
+ UMA_HISTOGRAM_VERY_SHORT_TIMES("GPU.ProgramCache.CompressDataTime",
+ base::TimeTicks::Now() - start_time);
+ UMA_HISTOGRAM_PERCENTAGE("GPU.ProgramCache.CompressionPercentage",
+ (100 * compressed_size) / data.size());
+
+ return compressed_data;
+}
+
+// Returns an empty vector if decompression fails.
+std::vector<uint8_t> DecompressData(const std::vector<uint8_t>& data,
+ size_t decompressed_size,
+ size_t max_size_bytes) {
+ auto start_time = base::TimeTicks::Now();
+ std::vector<uint8_t> decompressed_data(decompressed_size);
+ uLongf decompressed_size_out =
+ static_cast<uLongf>(decompressed_size);
+ auto result = uncompress(decompressed_data.data(), &decompressed_size_out,
+ data.data(), data.size());
+
+ bool success =
+ result == Z_OK && decompressed_data.size() == decompressed_size_out;
+ UMA_HISTOGRAM_BOOLEAN("GPU.ProgramCache.DecompressDataSuccess", success);
+ if (!success)
+ return std::vector<uint8_t>();
+
+ UMA_HISTOGRAM_VERY_SHORT_TIMES("GPU.ProgramCache.DecompressDataTime",
+ base::TimeTicks::Now() - start_time);
+
+ return decompressed_data;
+}
+
+bool CompressProgramBinaries() {
+#if !defined(OS_ANDROID)
+ return false;
+#else // !defined(OS_ANDROID)
+ return base::SysInfo::IsLowEndDevice();
+#endif // !defined(OS_ANDROID)
+}
+
} // namespace
MemoryProgramCache::MemoryProgramCache(
@@ -220,6 +292,7 @@ MemoryProgramCache::MemoryProgramCache(
disable_gpu_shader_disk_cache_(disable_gpu_shader_disk_cache),
disable_program_caching_for_transform_feedback_(
disable_program_caching_for_transform_feedback),
+ compress_program_binaries_(CompressProgramBinaries()),
curr_size_bytes_(0),
store_(ProgramMRUCache::NO_AUTO_EVICT),
activity_flags_(activity_flags) {}
@@ -267,12 +340,22 @@ ProgramCache::ProgramLoadResult MemoryProgramCache::LoadLinkedProgram(
return PROGRAM_LOAD_FAILURE;
}
const scoped_refptr<ProgramCacheValue> value = found->second;
+ const std::vector<uint8_t>& decoded =
+ value->is_compressed()
+ ? DecompressData(value->data(), value->decompressed_length(),
+ max_size_bytes_)
+ : value->data();
+ if (decoded.empty()) {
+ // Decompression failure.
+ DCHECK(value->is_compressed());
+ return PROGRAM_LOAD_FAILURE;
+ }
{
GpuProcessActivityFlags::ScopedSetFlag scoped_set_flag(
activity_flags_, ActivityFlagsBase::FLAG_LOADING_PROGRAM_BINARY);
glProgramBinary(program, value->format(),
- static_cast<const GLvoid*>(value->data()), value->length());
+ static_cast<const GLvoid*>(decoded.data()), decoded.size());
}
GLint success = 0;
@@ -296,7 +379,9 @@ ProgramCache::ProgramLoadResult MemoryProgramCache::LoadLinkedProgram(
GpuProgramProto::default_instance().New());
proto->set_sha(sha, kHashLength);
proto->set_format(value->format());
- proto->set_program(value->data(), value->length());
+ proto->set_program(value->data().data(), value->data().size());
+ proto->set_program_is_compressed(value->is_compressed());
+ proto->set_program_decompressed_length(value->decompressed_length());
FillShaderProto(proto->mutable_vertex_shader(), a_sha, shader_a);
FillShaderProto(proto->mutable_fragment_shader(), b_sha, shader_b);
@@ -324,17 +409,23 @@ void MemoryProgramCache::SaveLinkedProgram(
}
GLenum format;
GLsizei length = 0;
- glGetProgramiv(program, GL_PROGRAM_BINARY_LENGTH_OES, &length);
+ glGetProgramiv(program, GL_PROGRAM_BINARY_LENGTH, &length);
if (length == 0 || static_cast<unsigned int>(length) > max_size_bytes_) {
return;
}
- std::unique_ptr<char[]> binary(new char[length]);
- glGetProgramBinary(program,
- length,
- NULL,
- &format,
- binary.get());
- UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.ProgramBinarySizeBytes", length);
+ std::vector<uint8_t> binary(length);
+ glGetProgramBinary(program, length, NULL, &format,
+ reinterpret_cast<char*>(binary.data()));
+
+ if (compress_program_binaries_) {
+ binary = CompressData(binary);
+ if (binary.empty()) {
+ // Zero size indicates failure.
+ return;
+ }
+ }
+ UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.ProgramBinarySizeBytes",
+ binary.size());
char a_sha[kHashLength];
char b_sha[kHashLength];
@@ -363,7 +454,7 @@ void MemoryProgramCache::SaveLinkedProgram(
if(existing != store_.end())
store_.Erase(existing);
- while (curr_size_bytes_ + length > max_size_bytes_) {
+ while (curr_size_bytes_ + binary.size() > max_size_bytes_) {
DCHECK(!store_.empty());
store_.Erase(store_.rbegin());
}
@@ -373,7 +464,9 @@ void MemoryProgramCache::SaveLinkedProgram(
GpuProgramProto::default_instance().New());
proto->set_sha(sha, kHashLength);
proto->set_format(format);
- proto->set_program(binary.get(), length);
+ proto->set_program(binary.data(), binary.size());
+ proto->set_program_decompressed_length(length);
+ proto->set_program_is_compressed(compress_program_binaries_);
FillShaderProto(proto->mutable_vertex_shader(), a_sha, shader_a);
FillShaderProto(proto->mutable_fragment_shader(), b_sha, shader_b);
@@ -383,13 +476,13 @@ void MemoryProgramCache::SaveLinkedProgram(
store_.Put(
sha_string,
new ProgramCacheValue(
- length, format, binary.release(), sha_string, a_sha,
- shader_a->attrib_map(), shader_a->uniform_map(),
+ format, std::move(binary), compress_program_binaries_, length,
+ sha_string, a_sha, shader_a->attrib_map(), shader_a->uniform_map(),
shader_a->varying_map(), shader_a->output_variable_list(),
- shader_a->interface_block_map(), b_sha,
- shader_b->attrib_map(), shader_b->uniform_map(),
- shader_b->varying_map(), shader_b->output_variable_list(),
- shader_b->interface_block_map(), this));
+ shader_a->interface_block_map(), b_sha, shader_b->attrib_map(),
+ shader_b->uniform_map(), shader_b->varying_map(),
+ shader_b->output_variable_list(), shader_b->interface_block_map(),
+ this));
UMA_HISTOGRAM_COUNTS("GPU.ProgramCache.MemorySizeAfterKb",
curr_size_bytes_ / 1024);
@@ -454,14 +547,17 @@ void MemoryProgramCache::LoadProgram(const std::string& key,
&fragment_interface_blocks);
}
- std::unique_ptr<char[]> binary(new char[proto->program().length()]);
- memcpy(binary.get(), proto->program().c_str(), proto->program().length());
+ std::vector<uint8_t> binary(proto->program().length());
+ memcpy(binary.data(), proto->program().c_str(), proto->program().length());
store_.Put(
proto->sha(),
new ProgramCacheValue(
- proto->program().length(), proto->format(), binary.release(),
- proto->sha(), proto->vertex_shader().sha().c_str(), vertex_attribs,
+ proto->format(), std::move(binary),
+ proto->has_program_is_compressed() &&
+ proto->program_is_compressed(),
+ proto->program_decompressed_length(), proto->sha(),
+ proto->vertex_shader().sha().c_str(), vertex_attribs,
vertex_uniforms, vertex_varyings, vertex_output_variables,
vertex_interface_blocks, proto->fragment_shader().sha().c_str(),
fragment_attribs, fragment_uniforms, fragment_varyings,
@@ -484,9 +580,10 @@ size_t MemoryProgramCache::Trim(size_t limit) {
}
MemoryProgramCache::ProgramCacheValue::ProgramCacheValue(
- GLsizei length,
GLenum format,
- const char* data,
+ std::vector<uint8_t> data,
+ bool is_compressed,
+ GLsizei decompressed_length,
const std::string& program_hash,
const char* shader_0_hash,
const AttributeMap& attrib_map_0,
@@ -501,9 +598,10 @@ MemoryProgramCache::ProgramCacheValue::ProgramCacheValue(
const OutputVariableList& output_variable_list_1,
const InterfaceBlockMap& interface_block_map_1,
MemoryProgramCache* program_cache)
- : length_(length),
- format_(format),
- data_(data),
+ : format_(format),
+ data_(std::move(data)),
+ is_compressed_(is_compressed),
+ decompressed_length_(decompressed_length),
program_hash_(program_hash),
shader_0_hash_(shader_0_hash, kHashLength),
attrib_map_0_(attrib_map_0),
@@ -518,12 +616,12 @@ MemoryProgramCache::ProgramCacheValue::ProgramCacheValue(
output_variable_list_1_(output_variable_list_1),
interface_block_map_1_(interface_block_map_1),
program_cache_(program_cache) {
- program_cache_->curr_size_bytes_ += length_;
+ program_cache_->curr_size_bytes_ += data_.size();
program_cache_->LinkedProgramCacheSuccess(program_hash);
}
MemoryProgramCache::ProgramCacheValue::~ProgramCacheValue() {
- program_cache_->curr_size_bytes_ -= length_;
+ program_cache_->curr_size_bytes_ -= data_.size();
program_cache_->Evict(program_hash_);
}
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.h b/chromium/gpu/command_buffer/service/memory_program_cache.h
index 03881f2c888..b854fd26e72 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.h
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.h
@@ -59,9 +59,10 @@ class GPU_EXPORT MemoryProgramCache : public ProgramCache {
class ProgramCacheValue : public base::RefCounted<ProgramCacheValue> {
public:
- ProgramCacheValue(GLsizei length,
- GLenum format,
- const char* data,
+ ProgramCacheValue(GLenum format,
+ std::vector<uint8_t> data,
+ bool is_compressed,
+ GLsizei decompressed_length,
const std::string& program_hash,
const char* shader_0_hash,
const AttributeMap& attrib_map_0,
@@ -77,17 +78,15 @@ class GPU_EXPORT MemoryProgramCache : public ProgramCache {
const InterfaceBlockMap& interface_block_map_1,
MemoryProgramCache* program_cache);
- GLsizei length() const {
- return length_;
- }
-
GLenum format() const {
return format_;
}
- const char* data() const {
- return data_.get();
- }
+ const std::vector<uint8_t>& data() const { return data_; }
+
+ bool is_compressed() const { return is_compressed_; }
+
+ GLsizei decompressed_length() const { return decompressed_length_; }
const std::string& shader_0_hash() const {
return shader_0_hash_;
@@ -142,9 +141,10 @@ class GPU_EXPORT MemoryProgramCache : public ProgramCache {
~ProgramCacheValue();
- const GLsizei length_;
const GLenum format_;
- const std::unique_ptr<const char[]> data_;
+ const std::vector<uint8_t> data_;
+ const bool is_compressed_;
+ const GLsizei decompressed_length_;
const std::string program_hash_;
const std::string shader_0_hash_;
const AttributeMap attrib_map_0_;
@@ -171,6 +171,7 @@ class GPU_EXPORT MemoryProgramCache : public ProgramCache {
const size_t max_size_bytes_;
const bool disable_gpu_shader_disk_cache_;
const bool disable_program_caching_for_transform_feedback_;
+ const bool compress_program_binaries_;
size_t curr_size_bytes_;
ProgramMRUCache store_;
GpuProcessActivityFlags* activity_flags_;
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
index b2c2f001c16..f51c5a38d1d 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
@@ -149,11 +149,11 @@ class MemoryProgramCacheTest : public GpuServiceTest,
TestHelper::SetShaderStates(gl_.get(), vertex_shader_, true, nullptr,
nullptr, nullptr, &vertex_attrib_map,
&vertex_uniform_map, &vertex_varying_map,
- nullptr, &vertex_output_variable_list);
- TestHelper::SetShaderStates(gl_.get(), fragment_shader_, true, nullptr,
- nullptr, nullptr, &fragment_attrib_map,
- &fragment_uniform_map, &fragment_varying_map,
- nullptr, &fragment_output_variable_list);
+ nullptr, &vertex_output_variable_list, nullptr);
+ TestHelper::SetShaderStates(
+ gl_.get(), fragment_shader_, true, nullptr, nullptr, nullptr,
+ &fragment_attrib_map, &fragment_uniform_map, &fragment_varying_map,
+ nullptr, &fragment_output_variable_list, nullptr);
}
void SetExpectationsForSaveLinkedProgram(
diff --git a/chromium/gpu/command_buffer/service/mocks.h b/chromium/gpu/command_buffer/service/mocks.h
index 25c23f00163..1f88ea69cd2 100644
--- a/chromium/gpu/command_buffer/service/mocks.h
+++ b/chromium/gpu/command_buffer/service/mocks.h
@@ -109,8 +109,9 @@ class MockShaderTranslator : public ShaderTranslatorInterface {
VaryingMap* varying_map,
InterfaceBlockMap* interface_block_map,
OutputVariableList* output_variable_list));
- MOCK_CONST_METHOD0(
- GetStringForOptionsThatWouldAffectCompilation, std::string());
+ MOCK_CONST_METHOD0(GetStringForOptionsThatWouldAffectCompilation,
+ OptionsAffectingCompilationString*());
+
private:
~MockShaderTranslator() override;
};
diff --git a/chromium/gpu/command_buffer/service/passthrough_program_cache.cc b/chromium/gpu/command_buffer/service/passthrough_program_cache.cc
new file mode 100644
index 00000000000..ae8bb020502
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/passthrough_program_cache.cc
@@ -0,0 +1,138 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/passthrough_program_cache.h"
+
+#include <stddef.h>
+
+#include "base/base64.h"
+#include "ui/gl/gl_bindings.h"
+
+#if defined(USE_EGL)
+#include "ui/gl/angle_platform_impl.h"
+#include "ui/gl/gl_surface_egl.h"
+#endif // defined(USE_EGL)
+
+#ifndef EGL_ANGLE_program_cache_control
+#define EGL_ANGLE_program_cache_control 1
+#define EGL_PROGRAM_CACHE_SIZE_ANGLE 0x3455
+#define EGL_PROGRAM_CACHE_KEY_LENGTH_ANGLE 0x3456
+#define EGL_PROGRAM_CACHE_RESIZE_ANGLE 0x3457
+#define EGL_PROGRAM_CACHE_TRIM_ANGLE 0x3458
+#define EGL_CONTEXT_PROGRAM_BINARY_CACHE_ENABLED_ANGLE 0x3459
+#endif /* EGL_ANGLE_program_cache_control */
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+
+bool ProgramCacheControlExtensionAvailable() {
+#if defined(USE_EGL)
+ // The display should be initialized if the extension is available.
+ return gl::g_driver_egl.ext.b_EGL_ANGLE_program_cache_control;
+#else
+ return false;
+#endif // defined(USE_EGL)
+}
+
+} // namespace
+
+PassthroughProgramCache::PassthroughProgramCache(
+ size_t max_cache_size_bytes,
+ bool disable_gpu_shader_disk_cache)
+ : disable_gpu_shader_disk_cache_(disable_gpu_shader_disk_cache) {
+ if (!CacheEnabled()) {
+ return;
+ }
+
+#if defined(USE_EGL)
+ EGLDisplay display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ DCHECK(display != EGL_NO_DISPLAY);
+
+ eglProgramCacheResizeANGLE(display, max_cache_size_bytes,
+ EGL_PROGRAM_CACHE_RESIZE_ANGLE);
+#endif // defined(USE_EGL)
+}
+
+PassthroughProgramCache::~PassthroughProgramCache() {
+#if defined(USE_EGL)
+ // Ensure the program cache callback is cleared.
+ angle::ResetCacheProgramCallback();
+#endif // defined(USE_EGL)
+}
+
+void PassthroughProgramCache::ClearBackend() {
+ Trim(0);
+}
+
+ProgramCache::ProgramLoadResult PassthroughProgramCache::LoadLinkedProgram(
+ GLuint program,
+ Shader* shader_a,
+ Shader* shader_b,
+ const LocationMap* bind_attrib_location_map,
+ const std::vector<std::string>& transform_feedback_varyings,
+ GLenum transform_feedback_buffer_mode,
+ GLES2DecoderClient* client) {
+ NOTREACHED();
+ return PROGRAM_LOAD_FAILURE;
+}
+
+void PassthroughProgramCache::SaveLinkedProgram(
+ GLuint program,
+ const Shader* shader_a,
+ const Shader* shader_b,
+ const LocationMap* bind_attrib_location_map,
+ const std::vector<std::string>& transform_feedback_varyings,
+ GLenum transform_feedback_buffer_mode,
+ GLES2DecoderClient* client) {
+ NOTREACHED();
+}
+
+void PassthroughProgramCache::LoadProgram(const std::string& key,
+ const std::string& program) {
+ if (!CacheEnabled()) {
+ // Early exit if this display can't support cache control
+ return;
+ }
+
+#if defined(USE_EGL)
+ EGLDisplay display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ DCHECK(display != EGL_NO_DISPLAY);
+
+ std::string key_decoded;
+ std::string program_decoded;
+ base::Base64Decode(key, &key_decoded);
+ base::Base64Decode(program, &program_decoded);
+
+ eglProgramCachePopulateANGLE(display, key_decoded.c_str(), key_decoded.size(),
+ program_decoded.c_str(), program_decoded.size());
+#endif // defined(USE_EGL)
+}
+
+size_t PassthroughProgramCache::Trim(size_t limit) {
+ if (!CacheEnabled()) {
+ // Early exit if this display can't support cache control
+ return 0;
+ }
+
+#if defined(USE_EGL)
+ EGLDisplay display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ DCHECK(display != EGL_NO_DISPLAY);
+
+ EGLint trimmed =
+ eglProgramCacheResizeANGLE(display, limit, EGL_PROGRAM_CACHE_TRIM_ANGLE);
+ return static_cast<size_t>(trimmed);
+#else
+ return 0;
+#endif // defined(USE_EGL)
+}
+
+bool PassthroughProgramCache::CacheEnabled() const {
+ return ProgramCacheControlExtensionAvailable() &&
+ !disable_gpu_shader_disk_cache_;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/passthrough_program_cache.h b/chromium/gpu/command_buffer/service/passthrough_program_cache.h
new file mode 100644
index 00000000000..5f73d041a57
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/passthrough_program_cache.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_PASSTHROUGH_PROGRAM_CACHE_H_
+#define GPU_COMMAND_BUFFER_SERVICE_PASSTHROUGH_PROGRAM_CACHE_H_
+
+#include "base/macros.h"
+#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/program_cache.h"
+
+namespace gpu {
+
+namespace gles2 {
+
+// Program cache that does not store binaries, but communicates with the
+// underlying implementation via the cache control extension.
+class GPU_EXPORT PassthroughProgramCache : public ProgramCache {
+ public:
+ PassthroughProgramCache(size_t max_cache_size_bytes,
+ bool disable_gpu_shader_disk_cache);
+ ~PassthroughProgramCache() override;
+
+ ProgramLoadResult LoadLinkedProgram(
+ GLuint program,
+ Shader* shader_a,
+ Shader* shader_b,
+ const LocationMap* bind_attrib_location_map,
+ const std::vector<std::string>& transform_feedback_varyings,
+ GLenum transform_feedback_buffer_mode,
+ GLES2DecoderClient* client) override;
+ void SaveLinkedProgram(
+ GLuint program,
+ const Shader* shader_a,
+ const Shader* shader_b,
+ const LocationMap* bind_attrib_location_map,
+ const std::vector<std::string>& transform_feedback_varyings,
+ GLenum transform_feedback_buffer_mode,
+ GLES2DecoderClient* client) override;
+
+ void LoadProgram(const std::string& key, const std::string& program) override;
+
+ size_t Trim(size_t limit) override;
+
+ private:
+ void ClearBackend() override;
+ bool CacheEnabled() const;
+
+ const bool disable_gpu_shader_disk_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(PassthroughProgramCache);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_PASSTHROUGH_PROGRAM_CACHE_H_
diff --git a/chromium/gpu/command_buffer/service/program_manager.cc b/chromium/gpu/command_buffer/service/program_manager.cc
index c1ffc0a753e..46df984df45 100644
--- a/chromium/gpu/command_buffer/service/program_manager.cc
+++ b/chromium/gpu/command_buffer/service/program_manager.cc
@@ -1413,7 +1413,6 @@ bool Program::Link(ShaderManager* manager,
ExecuteProgramOutputBindCalls();
- before_time = TimeTicks::Now();
if (cache && gl::g_current_gl_driver->ext.b_GL_ARB_get_program_binary) {
glProgramParameteri(service_id(),
PROGRAM_BINARY_RETRIEVABLE_HINT,
diff --git a/chromium/gpu/command_buffer/service/program_manager_unittest.cc b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
index e0269249bac..101d2784631 100644
--- a/chromium/gpu/command_buffer/service/program_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/program_manager_unittest.cc
@@ -29,6 +29,7 @@
using ::testing::_;
using ::testing::DoAll;
+using ::testing::Exactly;
using ::testing::InSequence;
using ::testing::MatcherCast;
using ::testing::Pointee;
@@ -437,11 +438,11 @@ class ProgramManagerWithShaderTest : public ProgramManagerTestBase {
TestHelper::SetShaderStates(gl_.get(), vshader, true, nullptr, nullptr,
shader_version, &vertex_attrib_map,
&vertex_uniform_map, &vertex_varying_map,
- nullptr, &vertex_output_variable_list);
+ nullptr, &vertex_output_variable_list, nullptr);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
shader_version, &frag_attrib_map,
&frag_uniform_map, &frag_varying_map, nullptr,
- &frag_output_variable_list);
+ &frag_output_variable_list, nullptr);
// Set up program
Program* program =
@@ -942,13 +943,13 @@ TEST_F(ProgramManagerWithShaderTest, GLDriverReturnsWrongTypeInfo) {
ASSERT_TRUE(vshader != NULL);
TestHelper::SetShaderStates(gl_.get(), vshader, true, nullptr, nullptr,
nullptr, &attrib_map, &uniform_map, &varying_map,
- nullptr, &output_variable_list);
+ nullptr, &output_variable_list, nullptr);
Shader* fshader = shader_manager_.CreateShader(
kFragmentShaderClientId, kFragmentShaderServiceId, GL_FRAGMENT_SHADER);
ASSERT_TRUE(fshader != NULL);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, &attrib_map, &uniform_map, &varying_map,
- nullptr, &output_variable_list);
+ nullptr, &output_variable_list, nullptr);
static ProgramManagerWithShaderTest::AttribInfo kAttribs[] = {
{ kAttrib1Name, kAttrib1Size, kAttrib1Type, kAttrib1Location, },
{ kAttrib2Name, kAttrib2Size, kAttrib2BadType, kAttrib2Location, },
@@ -1612,7 +1613,7 @@ TEST_F(ProgramManagerWithShaderTest, BindAttribLocationConflicts) {
// Set Status
TestHelper::SetShaderStates(gl_.get(), vshader, true, nullptr, nullptr,
nullptr, &attrib_map, nullptr, nullptr, nullptr,
- nullptr);
+ nullptr, nullptr);
// Check attrib infos got copied.
for (AttributeMap::const_iterator it = attrib_map.begin();
it != attrib_map.end(); ++it) {
@@ -1627,7 +1628,7 @@ TEST_F(ProgramManagerWithShaderTest, BindAttribLocationConflicts) {
}
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, &attrib_map, nullptr, nullptr, nullptr,
- nullptr);
+ nullptr, nullptr);
// Set up program
Program* program =
manager_->CreateProgram(kClientProgramId, kServiceProgramId);
@@ -1695,10 +1696,10 @@ TEST_F(ProgramManagerWithShaderTest, UniformsPrecisionMismatch) {
// Set Status
TestHelper::SetShaderStates(gl_.get(), vshader, true, nullptr, nullptr,
nullptr, nullptr, &vertex_uniform_map, nullptr,
- nullptr, nullptr);
+ nullptr, nullptr, nullptr);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, nullptr, &frag_uniform_map, nullptr,
- nullptr, nullptr);
+ nullptr, nullptr, nullptr);
// Set up program
Program* program =
manager_->CreateProgram(kClientProgramId, kServiceProgramId);
@@ -1817,7 +1818,7 @@ TEST_F(ProgramManagerWithShaderTest, FragmentOutputTypes) {
kVertexShaderClientId, kVertexShaderServiceId, GL_VERTEX_SHADER);
TestHelper::SetShaderStates(gl_.get(), vshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr,
- nullptr);
+ nullptr, nullptr);
Shader* fshader = shader_manager_.CreateShader(
kFragmentShaderClientId, kFragmentShaderServiceId, GL_FRAGMENT_SHADER);
ASSERT_TRUE(vshader && fshader);
@@ -1830,7 +1831,7 @@ TEST_F(ProgramManagerWithShaderTest, FragmentOutputTypes) {
{ // No outputs.
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr,
- nullptr);
+ nullptr, nullptr);
EXPECT_TRUE(LinkAsExpected(program, true));
EXPECT_EQ(0u, program->fragment_output_type_mask());
EXPECT_EQ(0u, program->fragment_output_written_mask());
@@ -1844,7 +1845,7 @@ TEST_F(ProgramManagerWithShaderTest, FragmentOutputTypes) {
fragment_outputs.push_back(var);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr,
- &fragment_outputs);
+ &fragment_outputs, nullptr);
EXPECT_TRUE(LinkAsExpected(program, true));
EXPECT_EQ(0x3u, program->fragment_output_type_mask());
EXPECT_EQ(0x3u, program->fragment_output_written_mask());
@@ -1858,7 +1859,7 @@ TEST_F(ProgramManagerWithShaderTest, FragmentOutputTypes) {
fragment_outputs.push_back(var);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr,
- &fragment_outputs);
+ &fragment_outputs, nullptr);
EXPECT_TRUE(LinkAsExpected(program, true));
EXPECT_EQ(0xFFFFu, program->fragment_output_type_mask());
EXPECT_EQ(0xFFFFu, program->fragment_output_written_mask());
@@ -1876,7 +1877,7 @@ TEST_F(ProgramManagerWithShaderTest, FragmentOutputTypes) {
fragment_outputs.push_back(var);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr,
- &fragment_outputs);
+ &fragment_outputs, nullptr);
EXPECT_TRUE(LinkAsExpected(program, true));
EXPECT_EQ(0x3u, program->fragment_output_type_mask());
EXPECT_EQ(0x3u, program->fragment_output_written_mask());
@@ -1890,7 +1891,7 @@ TEST_F(ProgramManagerWithShaderTest, FragmentOutputTypes) {
fragment_outputs.push_back(var);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr,
- &fragment_outputs);
+ &fragment_outputs, nullptr);
EXPECT_TRUE(LinkAsExpected(program, true));
EXPECT_EQ(0x2u, program->fragment_output_type_mask());
EXPECT_EQ(0x3u, program->fragment_output_written_mask());
@@ -1904,7 +1905,7 @@ TEST_F(ProgramManagerWithShaderTest, FragmentOutputTypes) {
fragment_outputs.push_back(var);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr,
- &fragment_outputs);
+ &fragment_outputs, nullptr);
EXPECT_TRUE(LinkAsExpected(program, true));
EXPECT_EQ(0x2u, program->fragment_output_type_mask());
EXPECT_EQ(0x3u, program->fragment_output_written_mask());
@@ -1922,7 +1923,7 @@ TEST_F(ProgramManagerWithShaderTest, FragmentOutputTypes) {
fragment_outputs.push_back(var);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr,
- &fragment_outputs);
+ &fragment_outputs, nullptr);
EXPECT_TRUE(LinkAsExpected(program, true));
EXPECT_EQ(0xF1u, program->fragment_output_type_mask());
EXPECT_EQ(0xF3u, program->fragment_output_written_mask());
@@ -2201,6 +2202,13 @@ class ProgramManagerWithCacheTest : public ProgramManagerTestBase {
TestHelper::SetShaderStates(gl_.get(), fragment_shader_, true);
}
+ void SetShadersCompiled(const std::string& compilation_options_string) {
+ TestHelper::SetShaderStates(gl_.get(), vertex_shader_, true,
+ compilation_options_string);
+ TestHelper::SetShaderStates(gl_.get(), fragment_shader_, true,
+ compilation_options_string);
+ }
+
void SetProgramCached() {
cache_->LinkedProgramCacheSuccess(
vertex_shader_->source(), fragment_shader_->source(),
@@ -2280,6 +2288,16 @@ class ProgramManagerWithCacheTest : public ProgramManagerTestBase {
nullptr, 0, service_program_id);
}
+ void SetExpectationsForProgramNotLoaded() {
+ EXPECT_CALL(*cache_.get(),
+ LoadLinkedProgram(
+ program_->service_id(), vertex_shader_, fragment_shader_,
+ &program_->bind_attrib_location_map(),
+ program_->effective_transform_feedback_varyings(),
+ program_->effective_transform_feedback_buffer_mode(), _))
+ .Times(Exactly(0));
+ }
+
void SetExpectationsForProgramLink() {
SetExpectationsForProgramLink(kServiceProgramId);
}
@@ -2368,6 +2386,17 @@ TEST_F(ProgramManagerWithCacheTest, LoadProgramOnProgramCacheHit) {
EXPECT_TRUE(program_->Link(NULL, Program::kCountOnlyStaticallyUsed, this));
}
+TEST_F(ProgramManagerWithCacheTest, RelinkOnChangedCompileOptions) {
+ SetShadersCompiled("a");
+ SetProgramCached();
+ SetExpectationsForProgramCached();
+
+ SetShadersCompiled("b");
+ SetExpectationsForProgramLink();
+ SetExpectationsForProgramNotLoaded();
+ EXPECT_TRUE(program_->Link(NULL, Program::kCountOnlyStaticallyUsed, this));
+}
+
class ProgramManagerWithPathRenderingTest
: public ProgramManagerWithShaderTest,
public testing::WithParamInterface<
@@ -2454,10 +2483,10 @@ TEST_P(ProgramManagerWithPathRenderingTest, BindFragmentInputLocation) {
kFragmentInput3StaticUse, kFragmentInput3Name);
TestHelper::SetShaderStates(gl_.get(), vshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, &varying_map, nullptr,
- nullptr);
+ nullptr, nullptr);
TestHelper::SetShaderStates(gl_.get(), fshader, true, nullptr, nullptr,
nullptr, nullptr, nullptr, &varying_map, nullptr,
- nullptr);
+ nullptr, nullptr);
Program* program =
manager_->CreateProgram(kClientProgramId, kServiceProgramId);
ASSERT_TRUE(program != NULL);
diff --git a/chromium/gpu/command_buffer/service/renderbuffer_manager.cc b/chromium/gpu/command_buffer/service/renderbuffer_manager.cc
index 111569a4429..ecdb81af79e 100644
--- a/chromium/gpu/command_buffer/service/renderbuffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/renderbuffer_manager.cc
@@ -166,12 +166,14 @@ bool Renderbuffer::RegenerateAndBindBackingObjectIfNeeded() {
void Renderbuffer::AddFramebufferAttachmentPoint(Framebuffer* framebuffer,
GLenum attachment) {
+ DCHECK_NE(static_cast<GLenum>(GL_DEPTH_STENCIL_ATTACHMENT), attachment);
framebuffer_attachment_points_.insert(
std::make_pair(framebuffer, attachment));
}
void Renderbuffer::RemoveFramebufferAttachmentPoint(Framebuffer* framebuffer,
GLenum attachment) {
+ DCHECK_NE(static_cast<GLenum>(GL_DEPTH_STENCIL_ATTACHMENT), attachment);
framebuffer_attachment_points_.erase(std::make_pair(framebuffer, attachment));
}
diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc
index d349165b70e..f240ab9a54e 100644
--- a/chromium/gpu/command_buffer/service/scheduler.cc
+++ b/chromium/gpu/command_buffer/service/scheduler.cc
@@ -137,6 +137,16 @@ class Scheduler::Sequence {
DISALLOW_COPY_AND_ASSIGN(Sequence);
};
+Scheduler::Task::Task(SequenceId sequence_id,
+ base::OnceClosure closure,
+ std::vector<SyncToken> sync_token_fences)
+ : sequence_id(sequence_id),
+ closure(std::move(closure)),
+ sync_token_fences(std::move(sync_token_fences)) {}
+Scheduler::Task::Task(Task&& other) = default;
+Scheduler::Task::~Task() = default;
+Scheduler::Task& Scheduler::Task::operator=(Task&& other) = default;
+
Scheduler::SchedulingState::SchedulingState() = default;
Scheduler::SchedulingState::SchedulingState(const SchedulingState& other) =
default;
@@ -324,16 +334,26 @@ void Scheduler::DisableSequence(SequenceId sequence_id) {
sequence->SetEnabled(false);
}
-void Scheduler::ScheduleTask(SequenceId sequence_id,
- base::OnceClosure closure,
- const std::vector<SyncToken>& sync_token_fences) {
+void Scheduler::ScheduleTask(Task task) {
+ base::AutoLock auto_lock(lock_);
+ ScheduleTaskHelper(std::move(task));
+}
+
+void Scheduler::ScheduleTasks(std::vector<Task> tasks) {
base::AutoLock auto_lock(lock_);
+ for (auto& task : tasks)
+ ScheduleTaskHelper(std::move(task));
+}
+
+void Scheduler::ScheduleTaskHelper(Task task) {
+ lock_.AssertAcquired();
+ SequenceId sequence_id = task.sequence_id;
Sequence* sequence = GetSequence(sequence_id);
DCHECK(sequence);
- uint32_t order_num = sequence->ScheduleTask(std::move(closure));
+ uint32_t order_num = sequence->ScheduleTask(std::move(task.closure));
- for (const SyncToken& sync_token : sync_token_fences) {
+ for (const SyncToken& sync_token : task.sync_token_fences) {
SequenceId release_id =
sync_point_manager_->GetSyncTokenReleaseSequenceId(sync_token);
Sequence* release_sequence = GetSequence(release_id);
diff --git a/chromium/gpu/command_buffer/service/scheduler.h b/chromium/gpu/command_buffer/service/scheduler.h
index 4aa3c708b85..850107ac883 100644
--- a/chromium/gpu/command_buffer/service/scheduler.h
+++ b/chromium/gpu/command_buffer/service/scheduler.h
@@ -31,6 +31,19 @@ class SyncPointManager;
class GPU_EXPORT Scheduler {
public:
+ struct GPU_EXPORT Task {
+ Task(SequenceId sequence_id,
+ base::OnceClosure closure,
+ std::vector<SyncToken> sync_token_fences);
+ Task(Task&& other);
+ ~Task();
+ Task& operator=(Task&& other);
+
+ SequenceId sequence_id;
+ base::OnceClosure closure;
+ std::vector<SyncToken> sync_token_fences;
+ };
+
Scheduler(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
SyncPointManager* sync_point_manager);
@@ -53,9 +66,9 @@ class GPU_EXPORT Scheduler {
// Schedules task (closure) to run on the sequence. The task is blocked until
// the sync token fences are released or determined to be invalid. Tasks are
// run in the order in which they are submitted.
- void ScheduleTask(SequenceId sequence_id,
- base::OnceClosure closure,
- const std::vector<SyncToken>& sync_token_fences);
+ void ScheduleTask(Task task);
+
+ void ScheduleTasks(std::vector<Task> tasks);
// Continue running task on the sequence with the closure. This must be called
// while running a previously scheduled task.
@@ -95,6 +108,8 @@ class GPU_EXPORT Scheduler {
SequenceId release_sequence_id,
SequenceId waiting_sequence_id);
+ void ScheduleTaskHelper(Task task);
+
void TryScheduleSequence(Sequence* sequence);
void RebuildSchedulingQueue();
diff --git a/chromium/gpu/command_buffer/service/scheduler_unittest.cc b/chromium/gpu/command_buffer/service/scheduler_unittest.cc
index b2921a1cc41..1037ab9fdce 100644
--- a/chromium/gpu/command_buffer/service/scheduler_unittest.cc
+++ b/chromium/gpu/command_buffer/service/scheduler_unittest.cc
@@ -52,12 +52,12 @@ TEST_F(SchedulerTest, ScheduledTasksRunInOrder) {
scheduler()->CreateSequence(SchedulingPriority::kNormal);
bool ran1 = false;
- scheduler()->ScheduleTask(sequence_id, GetClosure([&] { ran1 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_id, GetClosure([&] { ran1 = true; }), std::vector<SyncToken>()));
bool ran2 = false;
- scheduler()->ScheduleTask(sequence_id, GetClosure([&] { ran2 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_id, GetClosure([&] { ran2 = true; }), std::vector<SyncToken>()));
task_runner()->RunPendingTasks();
EXPECT_TRUE(ran1);
@@ -72,17 +72,17 @@ TEST_F(SchedulerTest, ContinuedTasksRunFirst) {
bool ran1 = false;
bool continued1 = false;
- scheduler()->ScheduleTask(sequence_id, GetClosure([&] {
- scheduler()->ContinueTask(
- sequence_id,
+ scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_id, GetClosure([&] {
+ scheduler()->ContinueTask(sequence_id,
GetClosure([&] { continued1 = true; }));
- ran1 = true;
- }),
- std::vector<SyncToken>());
+ ran1 = true;
+ }),
+ std::vector<SyncToken>()));
bool ran2 = false;
- scheduler()->ScheduleTask(sequence_id, GetClosure([&] { ran2 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_id, GetClosure([&] { ran2 = true; }), std::vector<SyncToken>()));
task_runner()->RunPendingTasks();
EXPECT_TRUE(ran1);
@@ -101,20 +101,23 @@ TEST_F(SchedulerTest, SequencesRunInPriorityOrder) {
SequenceId sequence_id1 =
scheduler()->CreateSequence(SchedulingPriority::kLowest);
bool ran1 = false;
- scheduler()->ScheduleTask(sequence_id1, GetClosure([&] { ran1 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id1,
+ GetClosure([&] { ran1 = true; }),
+ std::vector<SyncToken>()));
SequenceId sequence_id2 =
scheduler()->CreateSequence(SchedulingPriority::kNormal);
bool ran2 = false;
- scheduler()->ScheduleTask(sequence_id2, GetClosure([&] { ran2 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id2,
+ GetClosure([&] { ran2 = true; }),
+ std::vector<SyncToken>()));
SequenceId sequence_id3 =
scheduler()->CreateSequence(SchedulingPriority::kHighest);
bool ran3 = false;
- scheduler()->ScheduleTask(sequence_id3, GetClosure([&] { ran3 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id3,
+ GetClosure([&] { ran3 = true; }),
+ std::vector<SyncToken>()));
task_runner()->RunPendingTasks();
EXPECT_TRUE(ran3);
@@ -130,26 +133,30 @@ TEST_F(SchedulerTest, SequencesOfSamePriorityRunInOrder) {
SequenceId sequence_id1 =
scheduler()->CreateSequence(SchedulingPriority::kNormal);
bool ran1 = false;
- scheduler()->ScheduleTask(sequence_id1, GetClosure([&] { ran1 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id1,
+ GetClosure([&] { ran1 = true; }),
+ std::vector<SyncToken>()));
SequenceId sequence_id2 =
scheduler()->CreateSequence(SchedulingPriority::kNormal);
bool ran2 = false;
- scheduler()->ScheduleTask(sequence_id2, GetClosure([&] { ran2 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id2,
+ GetClosure([&] { ran2 = true; }),
+ std::vector<SyncToken>()));
SequenceId sequence_id3 =
scheduler()->CreateSequence(SchedulingPriority::kNormal);
bool ran3 = false;
- scheduler()->ScheduleTask(sequence_id3, GetClosure([&] { ran3 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id3,
+ GetClosure([&] { ran3 = true; }),
+ std::vector<SyncToken>()));
SequenceId sequence_id4 =
scheduler()->CreateSequence(SchedulingPriority::kNormal);
bool ran4 = false;
- scheduler()->ScheduleTask(sequence_id4, GetClosure([&] { ran4 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id4,
+ GetClosure([&] { ran4 = true; }),
+ std::vector<SyncToken>()));
task_runner()->RunPendingTasks();
EXPECT_TRUE(ran1);
@@ -179,18 +186,18 @@ TEST_F(SchedulerTest, SequenceWaitsForFence) {
uint64_t release = 1;
bool ran2 = false;
- scheduler()->ScheduleTask(sequence_id2, GetClosure([&] {
- release_state->ReleaseFenceSync(release);
- ran2 = true;
- }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id2, GetClosure([&] {
+ release_state->ReleaseFenceSync(
+ release);
+ ran2 = true;
+ }),
+ std::vector<SyncToken>()));
- SyncToken sync_token(namespace_id, 0 /* extra_data_field */,
- command_buffer_id, release);
+ SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
bool ran1 = false;
- scheduler()->ScheduleTask(sequence_id1, GetClosure([&] { ran1 = true; }),
- {sync_token});
+ scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_id1, GetClosure([&] { ran1 = true; }), {sync_token}));
task_runner()->RunPendingTasks();
EXPECT_FALSE(ran1);
@@ -216,21 +223,21 @@ TEST_F(SchedulerTest, SequenceDoesNotWaitForInvalidFence) {
namespace_id, command_buffer_id, sequence_id2);
uint64_t release = 1;
- SyncToken sync_token(namespace_id, 0 /* extra_data_field */,
- command_buffer_id, release);
+ SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
bool ran1 = false;
- scheduler()->ScheduleTask(sequence_id1, GetClosure([&] { ran1 = true; }),
- {sync_token});
+ scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_id1, GetClosure([&] { ran1 = true; }), {sync_token}));
// Release task is scheduled after wait task so release is treated as non-
// existent.
bool ran2 = false;
- scheduler()->ScheduleTask(sequence_id2, GetClosure([&] {
- release_state->ReleaseFenceSync(release);
- ran2 = true;
- }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id2, GetClosure([&] {
+ release_state->ReleaseFenceSync(
+ release);
+ ran2 = true;
+ }),
+ std::vector<SyncToken>()));
task_runner()->RunPendingTasks();
EXPECT_TRUE(ran1);
@@ -249,8 +256,9 @@ TEST_F(SchedulerTest, ReleaseSequenceIsPrioritized) {
scheduler()->CreateSequence(SchedulingPriority::kNormal);
bool ran1 = false;
- scheduler()->ScheduleTask(sequence_id1, GetClosure([&] { ran1 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id1,
+ GetClosure([&] { ran1 = true; }),
+ std::vector<SyncToken>()));
SequenceId sequence_id2 =
scheduler()->CreateSequence(SchedulingPriority::kLowest);
@@ -262,19 +270,19 @@ TEST_F(SchedulerTest, ReleaseSequenceIsPrioritized) {
uint64_t release = 1;
bool ran2 = false;
- scheduler()->ScheduleTask(sequence_id2, GetClosure([&] {
- release_state->ReleaseFenceSync(release);
- ran2 = true;
- }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id2, GetClosure([&] {
+ release_state->ReleaseFenceSync(
+ release);
+ ran2 = true;
+ }),
+ std::vector<SyncToken>()));
bool ran3 = false;
- SyncToken sync_token(namespace_id, 0 /* extra_data_field */,
- command_buffer_id, release);
+ SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
SequenceId sequence_id3 =
scheduler()->CreateSequence(SchedulingPriority::kHighest);
- scheduler()->ScheduleTask(sequence_id3, GetClosure([&] { ran3 = true; }),
- {sync_token});
+ scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_id3, GetClosure([&] { ran3 = true; }), {sync_token}));
task_runner()->RunPendingTasks();
EXPECT_FALSE(ran1);
@@ -304,21 +312,20 @@ TEST_F(SchedulerTest, ReleaseSequenceShouldYield) {
uint64_t release = 1;
bool ran1 = false;
scheduler()->ScheduleTask(
- sequence_id1, GetClosure([&] {
- EXPECT_FALSE(scheduler()->ShouldYield(sequence_id1));
- release_state->ReleaseFenceSync(release);
- EXPECT_TRUE(scheduler()->ShouldYield(sequence_id1));
- ran1 = true;
- }),
- std::vector<SyncToken>());
+ Scheduler::Task(sequence_id1, GetClosure([&] {
+ EXPECT_FALSE(scheduler()->ShouldYield(sequence_id1));
+ release_state->ReleaseFenceSync(release);
+ EXPECT_TRUE(scheduler()->ShouldYield(sequence_id1));
+ ran1 = true;
+ }),
+ std::vector<SyncToken>()));
bool ran2 = false;
- SyncToken sync_token(namespace_id, 0 /* extra_data_field */,
- command_buffer_id, release);
+ SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
SequenceId sequence_id2 =
scheduler()->CreateSequence(SchedulingPriority::kHighest);
- scheduler()->ScheduleTask(sequence_id2, GetClosure([&] { ran2 = true; }),
- {sync_token});
+ scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_id2, GetClosure([&] { ran2 = true; }), {sync_token}));
task_runner()->RunPendingTasks();
EXPECT_TRUE(ran1);
@@ -348,20 +355,20 @@ TEST_F(SchedulerTest, ReentrantEnableSequenceShouldNotDeadlock) {
namespace_id, command_buffer_id2, sequence_id2);
uint64_t release = 1;
- SyncToken sync_token(namespace_id, 0 /* extra_data_field */,
- command_buffer_id2, release);
+ SyncToken sync_token(namespace_id, 0, command_buffer_id2, release);
bool ran1, ran2 = false;
// Schedule task on sequence 2 first so that the sync token wait isn't a nop.
// BeginProcessingOrderNumber for this task will run the EnableSequence
// callback. This should not deadlock.
- scheduler()->ScheduleTask(sequence_id2, GetClosure([&] { ran2 = true; }),
- std::vector<SyncToken>());
+ scheduler()->ScheduleTask(Scheduler::Task(sequence_id2,
+ GetClosure([&] { ran2 = true; }),
+ std::vector<SyncToken>()));
// This will run first because of the higher priority and no scheduling sync
// token dependencies.
- scheduler()->ScheduleTask(
+ scheduler()->ScheduleTask(Scheduler::Task(
sequence_id1, GetClosure([&] {
ran1 = true;
release_state1->Wait(
@@ -370,7 +377,7 @@ TEST_F(SchedulerTest, ReentrantEnableSequenceShouldNotDeadlock) {
base::Unretained(scheduler()), sequence_id1));
scheduler()->DisableSequence(sequence_id1);
}),
- std::vector<SyncToken>());
+ std::vector<SyncToken>()));
task_runner()->RunPendingTasks();
EXPECT_TRUE(ran1);
@@ -400,11 +407,10 @@ TEST_F(SchedulerTest, WaitOnSelfShouldNotBlockSequence) {
sync_point_manager()->GenerateOrderNumber();
uint64_t release = 1;
- SyncToken sync_token(namespace_id, 0 /* extra_data_field */,
- command_buffer_id, release);
+ SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
bool ran = false;
- scheduler()->ScheduleTask(sequence_id, GetClosure([&]() { ran = true; }),
- {sync_token});
+ scheduler()->ScheduleTask(Scheduler::Task(
+ sequence_id, GetClosure([&]() { ran = true; }), {sync_token}));
task_runner()->RunPendingTasks();
EXPECT_TRUE(ran);
EXPECT_FALSE(sync_point_manager()->IsSyncTokenReleased(sync_token));
diff --git a/chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc b/chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc
index 9ddcb12583e..32131398662 100644
--- a/chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/service_discardable_manager_unittest.cc
@@ -70,7 +70,7 @@ class ServiceDiscardableManagerTest : public GpuServiceTest {
decoder_.reset(new MockGLES2Decoder(&command_buffer_service_));
feature_info_ = new FeatureInfo();
context_group_ = scoped_refptr<ContextGroup>(new ContextGroup(
- gpu_preferences_, &mailbox_manager_, nullptr, nullptr, nullptr,
+ gpu_preferences_, false, &mailbox_manager_, nullptr, nullptr, nullptr,
feature_info_, false, &image_manager_, nullptr, nullptr,
GpuFeatureInfo(), &discardable_manager_));
TestHelper::SetupContextGroupInitExpectations(
diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc
index a3b53116014..e8f2b6195ec 100644
--- a/chromium/gpu/command_buffer/service/service_utils.cc
+++ b/chromium/gpu/command_buffer/service/service_utils.cc
@@ -6,18 +6,23 @@
#include "base/command_line.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
-#include "gpu/command_buffer/service/gpu_preferences.h"
+#include "gpu/command_buffer/service/context_group.h"
#include "ui/gl/gl_switches.h"
+#if defined(USE_EGL)
+#include "ui/gl/gl_surface_egl.h"
+#endif // defined(USE_EGL)
+
namespace gpu {
namespace gles2 {
gl::GLContextAttribs GenerateGLContextAttribs(
const ContextCreationAttribHelper& attribs_helper,
- const GpuPreferences& gpu_preferences) {
+ const ContextGroup* context_group) {
+ DCHECK(context_group != nullptr);
gl::GLContextAttribs attribs;
attribs.gpu_preference = attribs_helper.gpu_preference;
- if (gpu_preferences.use_passthrough_cmd_decoder) {
+ if (context_group->use_passthrough_cmd_decoder()) {
attribs.bind_generates_resource = attribs_helper.bind_generates_resource;
attribs.webgl_compatibility_context =
IsWebGLContextType(attribs_helper.context_type);
@@ -50,5 +55,20 @@ gl::GLContextAttribs GenerateGLContextAttribs(
return attribs;
}
+bool PassthroughCommandDecoderSupported() {
+#if defined(USE_EGL)
+ // Using the passthrough command buffer requires that specific ANGLE
+ // extensions are exposed
+ return gl::GLSurfaceEGL::IsCreateContextBindGeneratesResourceSupported() &&
+ gl::GLSurfaceEGL::IsCreateContextWebGLCompatabilitySupported() &&
+ gl::GLSurfaceEGL::IsRobustResourceInitSupported() &&
+ gl::GLSurfaceEGL::IsDisplayTextureShareGroupSupported() &&
+ gl::GLSurfaceEGL::IsCreateContextClientArraysSupported();
+#else
+ // The passthrough command buffer is only supported on top of ANGLE/EGL
+ return false;
+#endif // defined(USE_EGL)
+} // namespace gles2
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/service_utils.h b/chromium/gpu/command_buffer/service/service_utils.h
index af4e1e6f4bf..5eae2d3b8b1 100644
--- a/chromium/gpu/command_buffer/service/service_utils.h
+++ b/chromium/gpu/command_buffer/service/service_utils.h
@@ -9,14 +9,17 @@
#include "ui/gl/gl_context.h"
namespace gpu {
-struct GpuPreferences;
namespace gles2 {
struct ContextCreationAttribHelper;
+class ContextGroup;
GPU_EXPORT gl::GLContextAttribs GenerateGLContextAttribs(
const ContextCreationAttribHelper& attribs_helper,
- const GpuPreferences& gpu_preferences);
+ const ContextGroup* context_group);
+
+// Returns true if the driver supports creating passthrough command decoders
+GPU_EXPORT bool PassthroughCommandDecoderSupported();
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shader_manager.cc b/chromium/gpu/command_buffer/service/shader_manager.cc
index 8a77b519998..77e0df23e0d 100644
--- a/chromium/gpu/command_buffer/service/shader_manager.cc
+++ b/chromium/gpu/command_buffer/service/shader_manager.cc
@@ -51,6 +51,10 @@ void Shader::RequestCompile(scoped_refptr<ShaderTranslatorInterface> translator,
TranslatedShaderSourceType type) {
shader_state_ = kShaderStateCompileRequested;
translator_ = translator;
+ if (translator_) {
+ options_affecting_compilation_ =
+ translator_->GetStringForOptionsThatWouldAffectCompilation();
+ }
source_type_ = type;
last_compiled_source_ = source_;
}
@@ -121,6 +125,9 @@ void Shader::DoCompile() {
<< "\n--translated-shader--\n" << source_for_driver
<< "\n--info-log--\n" << log_info_;
}
+
+ // Translator is no longer required and can be released
+ translator_ = nullptr;
}
void Shader::RefreshTranslatedShaderSource() {
diff --git a/chromium/gpu/command_buffer/service/shader_manager.h b/chromium/gpu/command_buffer/service/shader_manager.h
index c9c2df3a469..4df84f0bcce 100644
--- a/chromium/gpu/command_buffer/service/shader_manager.h
+++ b/chromium/gpu/command_buffer/service/shader_manager.h
@@ -85,9 +85,8 @@ class GPU_EXPORT Shader : public base::RefCounted<Shader> {
}
std::string last_compiled_signature() const {
- if (translator_.get()) {
- return last_compiled_source_ +
- translator_->GetStringForOptionsThatWouldAffectCompilation();
+ if (options_affecting_compilation_) {
+ return last_compiled_source_ + options_affecting_compilation_->data;
}
return last_compiled_source_;
}
@@ -235,6 +234,8 @@ class GPU_EXPORT Shader : public base::RefCounted<Shader> {
// Translator to use, set when shader was last requested to be compiled.
scoped_refptr<ShaderTranslatorInterface> translator_;
+ scoped_refptr<OptionsAffectingCompilationString>
+ options_affecting_compilation_;
// True if compilation succeeded.
bool valid_;
diff --git a/chromium/gpu/command_buffer/service/shader_manager_unittest.cc b/chromium/gpu/command_buffer/service/shader_manager_unittest.cc
index bbf14703316..9c552cb3790 100644
--- a/chromium/gpu/command_buffer/service/shader_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shader_manager_unittest.cc
@@ -220,9 +220,10 @@ TEST_F(ShaderManagerTest, DoCompile) {
kInterfaceBlock1Name, kInterfaceBlock1InstanceName,
interface_block1_fields);
- TestHelper::SetShaderStates(
- gl_.get(), shader1, true, &kLog, &kTranslatedSource, nullptr, &attrib_map,
- &uniform_map, &varying_map, &interface_block_map, &output_variable_list);
+ TestHelper::SetShaderStates(gl_.get(), shader1, true, &kLog,
+ &kTranslatedSource, nullptr, &attrib_map,
+ &uniform_map, &varying_map, &interface_block_map,
+ &output_variable_list, nullptr);
EXPECT_TRUE(shader1->valid());
// When compilation succeeds, no log is recorded.
@@ -325,9 +326,10 @@ TEST_F(ShaderManagerTest, DoCompile) {
}
// Compile failure case.
- TestHelper::SetShaderStates(
- gl_.get(), shader1, false, &kLog, &kTranslatedSource, nullptr,
- &attrib_map, &uniform_map, &varying_map, nullptr, &output_variable_list);
+ TestHelper::SetShaderStates(gl_.get(), shader1, false, &kLog,
+ &kTranslatedSource, nullptr, &attrib_map,
+ &uniform_map, &varying_map, nullptr,
+ &output_variable_list, nullptr);
EXPECT_FALSE(shader1->valid());
EXPECT_STREQ(kLog.c_str(), shader1->log_info().c_str());
EXPECT_STREQ("", shader1->translated_source().c_str());
diff --git a/chromium/gpu/command_buffer/service/shader_translator.cc b/chromium/gpu/command_buffer/service/shader_translator.cc
index 40f45e8d104..a22b67970d9 100644
--- a/chromium/gpu/command_buffer/service/shader_translator.cc
+++ b/chromium/gpu/command_buffer/service/shader_translator.cc
@@ -185,6 +185,14 @@ bool ShaderTranslator::Init(GLenum shader_type,
break;
}
+ if (compiler_) {
+ options_affecting_compilation_ =
+ base::MakeRefCounted<OptionsAffectingCompilationString>(
+ std::string(":CompileOptions:" +
+ base::Uint64ToString(GetCompileOptions())) +
+ sh::GetBuiltInResourcesString(compiler_));
+ }
+
return compiler_ != NULL;
}
@@ -237,12 +245,9 @@ bool ShaderTranslator::Translate(
return success;
}
-std::string ShaderTranslator::GetStringForOptionsThatWouldAffectCompilation()
- const {
- DCHECK(compiler_ != NULL);
- return std::string(":CompileOptions:" +
- base::Uint64ToString(GetCompileOptions())) +
- sh::GetBuiltInResourcesString(compiler_);
+OptionsAffectingCompilationString*
+ShaderTranslator::GetStringForOptionsThatWouldAffectCompilation() const {
+ return options_affecting_compilation_.get();
}
void ShaderTranslator::AddDestructionObserver(
diff --git a/chromium/gpu/command_buffer/service/shader_translator.h b/chromium/gpu/command_buffer/service/shader_translator.h
index 43ae7df7289..5d344636f0b 100644
--- a/chromium/gpu/command_buffer/service/shader_translator.h
+++ b/chromium/gpu/command_buffer/service/shader_translator.h
@@ -27,6 +27,7 @@ typedef std::vector<sh::OutputVariable> OutputVariableList;
typedef base::hash_map<std::string, sh::Uniform> UniformMap;
typedef base::hash_map<std::string, sh::Varying> VaryingMap;
typedef base::hash_map<std::string, sh::InterfaceBlock> InterfaceBlockMap;
+typedef base::RefCountedData<std::string> OptionsAffectingCompilationString;
// Translates a GLSL ES 2.0 shader to desktop GLSL shader, or just
// validates GLSL ES 2.0 shaders on a true GLSL ES implementation.
@@ -61,7 +62,8 @@ class ShaderTranslatorInterface
// Return a string that is unique for a specfic set of options that would
// possibly affect compilation.
- virtual std::string GetStringForOptionsThatWouldAffectCompilation() const = 0;
+ virtual OptionsAffectingCompilationString*
+ GetStringForOptionsThatWouldAffectCompilation() const = 0;
protected:
virtual ~ShaderTranslatorInterface() {}
@@ -72,8 +74,7 @@ class ShaderTranslatorInterface
};
// Implementation of ShaderTranslatorInterface
-class GPU_EXPORT ShaderTranslator
- : NON_EXPORTED_BASE(public ShaderTranslatorInterface) {
+class GPU_EXPORT ShaderTranslator : public ShaderTranslatorInterface {
public:
class DestructionObserver {
public:
@@ -111,7 +112,8 @@ class GPU_EXPORT ShaderTranslator
InterfaceBlockMap* interface_block_map,
OutputVariableList* output_variable_list) const override;
- std::string GetStringForOptionsThatWouldAffectCompilation() const override;
+ OptionsAffectingCompilationString*
+ GetStringForOptionsThatWouldAffectCompilation() const override;
void AddDestructionObserver(DestructionObserver* observer);
void RemoveDestructionObserver(DestructionObserver* observer);
@@ -123,6 +125,8 @@ class GPU_EXPORT ShaderTranslator
ShHandle compiler_;
ShCompileOptions compile_options_;
+ scoped_refptr<OptionsAffectingCompilationString>
+ options_affecting_compilation_;
base::ObserverList<DestructionObserver> destruction_observers_;
};
diff --git a/chromium/gpu/command_buffer/service/shader_translator_cache.h b/chromium/gpu/command_buffer/service/shader_translator_cache.h
index b764b754c8f..1b8cd588f07 100644
--- a/chromium/gpu/command_buffer/service/shader_translator_cache.h
+++ b/chromium/gpu/command_buffer/service/shader_translator_cache.h
@@ -27,7 +27,7 @@ namespace gles2 {
// TODO(backer): Investigate using glReleaseShaderCompiler as an alternative to
// to this cache.
class GPU_EXPORT ShaderTranslatorCache
- : public NON_EXPORTED_BASE(ShaderTranslator::DestructionObserver) {
+ : public ShaderTranslator::DestructionObserver {
public:
explicit ShaderTranslatorCache(const GpuPreferences& gpu_preferences);
~ShaderTranslatorCache() override;
diff --git a/chromium/gpu/command_buffer/service/shader_translator_unittest.cc b/chromium/gpu/command_buffer/service/shader_translator_unittest.cc
index d9e0a418e01..d548498259d 100644
--- a/chromium/gpu/command_buffer/service/shader_translator_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shader_translator_unittest.cc
@@ -16,7 +16,7 @@ class ShaderTranslatorTest : public testing::Test {
ShaderTranslatorTest() {
shader_output_language_ =
ShaderTranslator::GetShaderOutputLanguageForContext(
- gl::GLVersionInfo("2.0", "", ""));
+ gl::GLVersionInfo("2.0", "", gl::ExtensionSet()));
}
~ShaderTranslatorTest() override {}
@@ -55,7 +55,7 @@ class ES3ShaderTranslatorTest : public testing::Test {
ES3ShaderTranslatorTest() {
shader_output_language_ =
ShaderTranslator::GetShaderOutputLanguageForContext(
- gl::GLVersionInfo("3.0", "", ""));
+ gl::GLVersionInfo("3.0", "", gl::ExtensionSet()));
}
~ES3ShaderTranslatorTest() override {}
@@ -418,13 +418,13 @@ TEST_F(ShaderTranslatorTest, OptionsString) {
false));
std::string options_1(
- translator_1->GetStringForOptionsThatWouldAffectCompilation());
+ translator_1->GetStringForOptionsThatWouldAffectCompilation()->data);
std::string options_2(
- translator_1->GetStringForOptionsThatWouldAffectCompilation());
+ translator_1->GetStringForOptionsThatWouldAffectCompilation()->data);
std::string options_3(
- translator_2->GetStringForOptionsThatWouldAffectCompilation());
+ translator_2->GetStringForOptionsThatWouldAffectCompilation()->data);
std::string options_4(
- translator_3->GetStringForOptionsThatWouldAffectCompilation());
+ translator_3->GetStringForOptionsThatWouldAffectCompilation()->data);
EXPECT_EQ(options_1, options_2);
EXPECT_NE(options_1, options_3);
@@ -505,7 +505,8 @@ TEST_P(ShaderTranslatorOutputVersionTest, HasCorrectOutputGLSLVersion) {
" gl_Position = vPosition;\n"
"}";
- gl::GLVersionInfo output_context_version(testing::get<0>(GetParam()), "", "");
+ gl::GLVersionInfo output_context_version(testing::get<0>(GetParam()), "",
+ gl::ExtensionSet());
scoped_refptr<ShaderTranslator> translator = new ShaderTranslator();
ShBuiltInResources resources;
diff --git a/chromium/gpu/command_buffer/service/test_helper.cc b/chromium/gpu/command_buffer/service/test_helper.cc
index 4b56f76f8fc..27686d752e8 100644
--- a/chromium/gpu/command_buffer/service/test_helper.cc
+++ b/chromium/gpu/command_buffer/service/test_helper.cc
@@ -11,8 +11,6 @@
#include <string>
#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_split.h"
-#include "base/strings/string_tokenizer.h"
#include "gpu/command_buffer/service/buffer_manager.h"
#include "gpu/command_buffer/service/error_state_mock.h"
#include "gpu/command_buffer/service/feature_info.h"
@@ -207,7 +205,7 @@ void TestHelper::SetupTextureManagerInitExpectations(
bool is_es3_enabled,
bool is_es3_capable,
bool is_desktop_core_profile,
- const char* extensions,
+ const gl::ExtensionSet& extensions,
bool use_default_textures) {
InSequence sequence;
@@ -229,19 +227,11 @@ void TestHelper::SetupTextureManagerInitExpectations(
gl, GL_TEXTURE_2D_ARRAY, use_default_textures);
}
- bool ext_image_external = false;
- bool arb_texture_rectangle = is_desktop_core_profile;
- base::CStringTokenizer t(extensions, extensions + strlen(extensions), " ");
- while (t.GetNext()) {
- if (t.token() == "GL_OES_EGL_image_external") {
- ext_image_external = true;
- break;
- }
- if (t.token() == "GL_ARB_texture_rectangle") {
- arb_texture_rectangle = true;
- break;
- }
- }
+ bool ext_image_external =
+ gl::HasExtension(extensions, "GL_OES_EGL_image_external");
+ bool arb_texture_rectangle =
+ is_desktop_core_profile ||
+ gl::HasExtension(extensions, "GL_ARB_texture_rectangle");
if (ext_image_external) {
SetupTextureInitializationExpectations(
@@ -293,7 +283,7 @@ void TestHelper::SetupTextureManagerDestructionExpectations(
::gl::MockGLInterface* gl,
bool is_es3_enabled,
bool is_desktop_core_profile,
- const char* extensions,
+ const gl::ExtensionSet& extensions,
bool use_default_textures) {
SetupTextureDestructionExpectations(gl, GL_TEXTURE_2D, use_default_textures);
SetupTextureDestructionExpectations(
@@ -306,19 +296,11 @@ void TestHelper::SetupTextureManagerDestructionExpectations(
gl, GL_TEXTURE_2D_ARRAY,use_default_textures);
}
- bool ext_image_external = false;
- bool arb_texture_rectangle = false;
- base::CStringTokenizer t(extensions, extensions + strlen(extensions), " ");
- while (t.GetNext()) {
- if (t.token() == "GL_OES_EGL_image_external") {
- ext_image_external = true;
- break;
- }
- if (t.token() == "GL_ARB_texture_rectangle") {
- arb_texture_rectangle = true;
- break;
- }
- }
+ bool ext_image_external =
+ gl::HasExtension(extensions, "GL_OES_EGL_image_external");
+ bool arb_texture_rectangle =
+ is_desktop_core_profile ||
+ gl::HasExtension(extensions, "GL_ARB_texture_rectangle");
if (ext_image_external) {
SetupTextureDestructionExpectations(
@@ -346,20 +328,23 @@ void TestHelper::SetupContextGroupInitExpectations(
bool enable_es3 = !(context_type == CONTEXT_TYPE_OPENGLES2 ||
context_type == CONTEXT_TYPE_WEBGL1);
- gl::GLVersionInfo gl_info(gl_version, "", extensions);
+ gl::ExtensionSet extension_set(gl::MakeExtensionSet(extensions));
+ gl::GLVersionInfo gl_info(gl_version, "", extension_set);
SetupFeatureInfoInitExpectationsWithGLVersion(gl, extensions, "", gl_version,
context_type);
EXPECT_CALL(*gl, GetIntegerv(GL_MAX_RENDERBUFFER_SIZE, _))
.WillOnce(SetArgPointee<1>(kMaxRenderbufferSize))
.RetiresOnSaturation();
- if (strstr(extensions, "GL_EXT_framebuffer_multisample") ||
- strstr(extensions, "GL_EXT_multisampled_render_to_texture") ||
+ if (gl::HasExtension(extension_set, "GL_EXT_framebuffer_multisample") ||
+ gl::HasExtension(extension_set,
+ "GL_EXT_multisampled_render_to_texture") ||
gl_info.is_es3 || gl_info.is_desktop_core_profile) {
EXPECT_CALL(*gl, GetIntegerv(GL_MAX_SAMPLES, _))
.WillOnce(SetArgPointee<1>(kMaxSamples))
.RetiresOnSaturation();
- } else if (strstr(extensions, "GL_IMG_multisampled_render_to_texture")) {
+ } else if (gl::HasExtension(extension_set,
+ "GL_IMG_multisampled_render_to_texture")) {
EXPECT_CALL(*gl, GetIntegerv(GL_MAX_SAMPLES_IMG, _))
.WillOnce(SetArgPointee<1>(kMaxSamples))
.RetiresOnSaturation();
@@ -368,9 +353,10 @@ void TestHelper::SetupContextGroupInitExpectations(
if (enable_es3 ||
(!enable_es3 &&
(gl_info.is_desktop_core_profile ||
- strstr(extensions, "GL_EXT_draw_buffers") ||
- strstr(extensions, "GL_ARB_draw_buffers") ||
- (gl_info.is_es3 && strstr(extensions, "GL_NV_draw_buffers"))))) {
+ gl::HasExtension(extension_set, "GL_EXT_draw_buffers") ||
+ gl::HasExtension(extension_set, "GL_ARB_draw_buffers") ||
+ (gl_info.is_es3 &&
+ gl::HasExtension(extension_set, "GL_NV_draw_buffers"))))) {
EXPECT_CALL(*gl, GetIntegerv(GL_MAX_COLOR_ATTACHMENTS_EXT, _))
.WillOnce(SetArgPointee<1>(8))
.RetiresOnSaturation();
@@ -381,8 +367,9 @@ void TestHelper::SetupContextGroupInitExpectations(
if (gl_info.IsAtLeastGL(3, 3) ||
(gl_info.IsAtLeastGL(3, 2) &&
- strstr(extensions, "GL_ARB_blend_func_extended")) ||
- (gl_info.is_es && strstr(extensions, "GL_EXT_blend_func_extended"))) {
+ gl::HasExtension(extension_set, "GL_ARB_blend_func_extended")) ||
+ (gl_info.is_es &&
+ gl::HasExtension(extension_set, "GL_EXT_blend_func_extended"))) {
EXPECT_CALL(*gl, GetIntegerv(GL_MAX_DUAL_SOURCE_DRAW_BUFFERS_EXT, _))
.WillOnce(SetArgPointee<1>(8))
.RetiresOnSaturation();
@@ -422,7 +409,7 @@ void TestHelper::SetupContextGroupInitExpectations(
.WillOnce(SetArgPointee<1>(kMaxArrayTextureLayers))
.RetiresOnSaturation();
}
- if (strstr(extensions, "GL_ARB_texture_rectangle") ||
+ if (gl::HasExtension(extension_set, "GL_ARB_texture_rectangle") ||
gl_info.is_desktop_core_profile) {
EXPECT_CALL(*gl, GetIntegerv(GL_MAX_RECTANGLE_TEXTURE_SIZE, _))
.WillOnce(SetArgPointee<1>(kMaxRectangleTextureSize))
@@ -475,9 +462,9 @@ void TestHelper::SetupContextGroupInitExpectations(
.RetiresOnSaturation();
bool use_default_textures = bind_generates_resource;
- SetupTextureManagerInitExpectations(
- gl, enable_es3, gl_info.is_es3_capable, gl_info.is_desktop_core_profile,
- extensions, use_default_textures);
+ SetupTextureManagerInitExpectations(gl, enable_es3, gl_info.is_es3_capable,
+ gl_info.is_desktop_core_profile,
+ extension_set, use_default_textures);
}
void TestHelper::SetupFeatureInfoInitExpectations(::gl::MockGLInterface* gl,
@@ -501,14 +488,11 @@ void TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
.WillOnce(Return(reinterpret_cast<const uint8_t*>(gl_version)))
.RetiresOnSaturation();
+ gl::ExtensionSet extension_set(gl::MakeExtensionSet(extensions));
// Persistent storage is needed for the split extension string.
- split_extensions_.clear();
- if (extensions) {
- split_extensions_ = base::SplitString(
- extensions, " ", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
- }
-
- gl::GLVersionInfo gl_info(gl_version, gl_renderer, extensions);
+ split_extensions_ =
+ std::vector<std::string>(extension_set.begin(), extension_set.end());
+ gl::GLVersionInfo gl_info(gl_version, gl_renderer, extension_set);
if (!gl_info.is_es && gl_info.major_version >= 3) {
EXPECT_CALL(*gl, GetIntegerv(GL_NUM_EXTENSIONS, _))
.WillOnce(SetArgPointee<1>(split_extensions_.size()))
@@ -538,10 +522,11 @@ void TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
.RetiresOnSaturation();
}
- if ((strstr(extensions, "GL_ARB_texture_float") ||
+ if ((gl::HasExtension(extension_set, "GL_ARB_texture_float") ||
gl_info.is_desktop_core_profile) ||
- (gl_info.is_es3 && strstr(extensions, "GL_OES_texture_float") &&
- strstr(extensions, "GL_EXT_color_buffer_float"))) {
+ (gl_info.is_es3 &&
+ gl::HasExtension(extension_set, "GL_OES_texture_float") &&
+ gl::HasExtension(extension_set, "GL_EXT_color_buffer_float"))) {
static const GLuint tx_ids[] = {101, 102};
static const GLuint fb_ids[] = {103, 104};
const GLsizei width = 16;
@@ -637,7 +622,8 @@ void TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
.Times(1)
.RetiresOnSaturation();
}
- if (!enable_es3 && !strstr(extensions, "GL_EXT_color_buffer_half_float") &&
+ if (!enable_es3 &&
+ !gl::HasExtension(extension_set, "GL_EXT_color_buffer_half_float") &&
(gl_info.is_es || gl_info.IsAtLeastGL(3, 0))) {
EXPECT_CALL(
*gl,
@@ -687,9 +673,10 @@ void TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
if (enable_es3 ||
(!enable_es3 &&
(gl_info.is_desktop_core_profile ||
- strstr(extensions, "GL_EXT_draw_buffers") ||
- strstr(extensions, "GL_ARB_draw_buffers") ||
- (gl_info.is_es3 && strstr(extensions, "GL_NV_draw_buffers"))))) {
+ gl::HasExtension(extension_set, "GL_EXT_draw_buffers") ||
+ gl::HasExtension(extension_set, "GL_ARB_draw_buffers") ||
+ (gl_info.is_es3 &&
+ gl::HasExtension(extension_set, "GL_NV_draw_buffers"))))) {
EXPECT_CALL(*gl, GetIntegerv(GL_MAX_COLOR_ATTACHMENTS_EXT, _))
.WillOnce(SetArgPointee<1>(8))
.RetiresOnSaturation();
@@ -699,8 +686,8 @@ void TestHelper::SetupFeatureInfoInitExpectationsWithGLVersion(
}
if (gl_info.is_es3 || gl_info.is_desktop_core_profile ||
- strstr(extensions, "GL_EXT_texture_rg") ||
- (strstr(extensions, "GL_ARB_texture_rg"))) {
+ gl::HasExtension(extension_set, "GL_EXT_texture_rg") ||
+ (gl::HasExtension(extension_set, "GL_ARB_texture_rg"))) {
static const GLuint tx_ids[] = {101, 102};
static const GLuint fb_ids[] = {103, 104};
const GLsizei width = 1;
@@ -1119,7 +1106,8 @@ void TestHelper::SetShaderStates(
const UniformMap* const expected_uniform_map,
const VaryingMap* const expected_varying_map,
const InterfaceBlockMap* const expected_interface_block_map,
- const OutputVariableList* const expected_output_variable_list) {
+ const OutputVariableList* const expected_output_variable_list,
+ OptionsAffectingCompilationString* options_affecting_compilation) {
const std::string empty_log_info;
const std::string* log_info = (expected_log_info && !expected_valid) ?
expected_log_info : &empty_log_info;
@@ -1167,6 +1155,9 @@ void TestHelper::SetShaderStates(
SetArgPointee<7>(*interface_block_map),
SetArgPointee<8>(*output_variable_list), Return(expected_valid)))
.RetiresOnSaturation();
+ EXPECT_CALL(*mock_translator, GetStringForOptionsThatWouldAffectCompilation())
+ .WillOnce(Return(options_affecting_compilation))
+ .RetiresOnSaturation();
if (expected_valid) {
EXPECT_CALL(*gl, ShaderSource(shader->service_id(), 1, _, NULL))
.Times(1)
@@ -1188,7 +1179,20 @@ void TestHelper::SetShaderStates(::gl::MockGLInterface* gl,
Shader* shader,
bool valid) {
SetShaderStates(gl, shader, valid, nullptr, nullptr, nullptr, nullptr,
- nullptr, nullptr, nullptr, nullptr);
+ nullptr, nullptr, nullptr, nullptr, nullptr);
+}
+
+// static
+void TestHelper::SetShaderStates(
+ ::gl::MockGLInterface* gl,
+ Shader* shader,
+ bool valid,
+ const std::string& options_affecting_compilation) {
+ scoped_refptr<OptionsAffectingCompilationString> options =
+ base::MakeRefCounted<OptionsAffectingCompilationString>(
+ options_affecting_compilation);
+ SetShaderStates(gl, shader, valid, nullptr, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr, nullptr, options.get());
}
// static
diff --git a/chromium/gpu/command_buffer/service/test_helper.h b/chromium/gpu/command_buffer/service/test_helper.h
index f138507c255..e096a7a5295 100644
--- a/chromium/gpu/command_buffer/service/test_helper.h
+++ b/chromium/gpu/command_buffer/service/test_helper.h
@@ -117,17 +117,18 @@ class TestHelper {
const char* gl_renderer,
const char* gl_version,
ContextType context_type);
- static void SetupTextureManagerInitExpectations(::gl::MockGLInterface* gl,
- bool is_es3_enabled,
- bool is_es3_capable,
- bool is_desktop_core_profile,
- const char* extensions,
- bool use_default_textures);
+ static void SetupTextureManagerInitExpectations(
+ ::gl::MockGLInterface* gl,
+ bool is_es3_enabled,
+ bool is_es3_capable,
+ bool is_desktop_core_profile,
+ const gl::ExtensionSet& extensions,
+ bool use_default_textures);
static void SetupTextureManagerDestructionExpectations(
::gl::MockGLInterface* gl,
bool is_es3_enabled,
bool is_desktop_core_profile,
- const char* extensions,
+ const gl::ExtensionSet& extensions,
bool use_default_textures);
static void SetupExpectationsForClearingUniforms(::gl::MockGLInterface* gl,
@@ -197,12 +198,18 @@ class TestHelper {
const UniformMap* const expected_uniform_map,
const VaryingMap* const expected_varying_map,
const InterfaceBlockMap* const expected_interface_block_map,
- const OutputVariableList* const expected_output_variable_list);
+ const OutputVariableList* const expected_output_variable_list,
+ OptionsAffectingCompilationString* options_affecting_compilation);
static void SetShaderStates(::gl::MockGLInterface* gl,
Shader* shader,
bool valid);
+ static void SetShaderStates(::gl::MockGLInterface* gl,
+ Shader* shader,
+ bool valid,
+ const std::string& options_affecting_compilation);
+
static sh::Attribute ConstructAttribute(
GLenum type, GLint array_size, GLenum precision,
bool static_use, const std::string& name);
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index f56e04f4118..acd6afabe7d 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -488,7 +488,9 @@ void TextureBase::SetMailboxManager(MailboxManager* mailbox_manager) {
}
TexturePassthrough::TexturePassthrough(GLuint service_id, GLenum target)
- : TextureBase(service_id), have_context_(true) {
+ : TextureBase(service_id),
+ have_context_(true),
+ level_images_(target == GL_TEXTURE_CUBE_MAP ? 6 : 1) {
TextureBase::SetTarget(target);
}
@@ -503,6 +505,38 @@ void TexturePassthrough::MarkContextLost() {
have_context_ = false;
}
+void TexturePassthrough::SetLevelImage(GLenum target,
+ GLint level,
+ gl::GLImage* image) {
+ size_t face_idx = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK(face_idx < level_images_.size());
+ DCHECK(level >= 0);
+
+ // Don't allocate space for the images until needed
+ if (static_cast<GLint>(level_images_[face_idx].size()) <= level) {
+ level_images_[face_idx].resize(level + 1);
+ }
+
+ level_images_[face_idx][level] = image;
+}
+
+gl::GLImage* TexturePassthrough::GetLevelImage(GLenum target,
+ GLint level) const {
+ if (GLES2Util::GLFaceTargetToTextureTarget(target) != target_) {
+ return nullptr;
+ }
+
+ size_t face_idx = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK(face_idx < level_images_.size());
+ DCHECK(level >= 0);
+
+ if (static_cast<GLint>(level_images_[face_idx].size()) < level) {
+ return nullptr;
+ }
+
+ return level_images_[face_idx][level].get();
+}
+
Texture::Texture(GLuint service_id)
: TextureBase(service_id),
memory_tracking_ref_(NULL),
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index 02c5c1119c7..77b7e7fef5c 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -88,6 +88,9 @@ class TexturePassthrough final : public TextureBase,
// native GL texture in the destructor
void MarkContextLost();
+ void SetLevelImage(GLenum target, GLint level, gl::GLImage* image);
+ gl::GLImage* GetLevelImage(GLenum target, GLint level) const;
+
protected:
~TexturePassthrough() override;
@@ -96,6 +99,9 @@ class TexturePassthrough final : public TextureBase,
bool have_context_;
+ // Bound images divided into faces and then levels
+ std::vector<std::vector<scoped_refptr<gl::GLImage>>> level_images_;
+
DISALLOW_COPY_AND_ASSIGN(TexturePassthrough);
};
diff --git a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
index ad82cf301ec..92a2c7675c2 100644
--- a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
@@ -85,7 +85,7 @@ class TextureManagerTest : public GpuServiceTest {
kUseDefaultTextures, nullptr, &discardable_manager_));
SetupFeatureInfo("", "OpenGL ES 2.0", CONTEXT_TYPE_OPENGLES2);
TestHelper::SetupTextureManagerInitExpectations(
- gl_.get(), false, false, false, "", kUseDefaultTextures);
+ gl_.get(), false, false, false, {}, kUseDefaultTextures);
manager_->Initialize();
error_state_.reset(new ::testing::StrictMock<gles2::MockErrorState>());
}
@@ -240,8 +240,9 @@ TEST_F(TextureManagerTest, SetParameter) {
TEST_F(TextureManagerTest, UseDefaultTexturesTrue) {
bool use_default_textures = true;
- TestHelper::SetupTextureManagerInitExpectations(gl_.get(),
- false, false, false, "GL_ANGLE_texture_usage", use_default_textures);
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), false, false, false, {"GL_ANGLE_texture_usage"},
+ use_default_textures);
TextureManager manager(nullptr, feature_info_.get(), kMaxTextureSize,
kMaxCubeMapTextureSize, kMaxRectangleTextureSize,
kMax3DTextureSize, kMaxArrayTextureLayers,
@@ -258,8 +259,9 @@ TEST_F(TextureManagerTest, UseDefaultTexturesTrue) {
TEST_F(TextureManagerTest, UseDefaultTexturesFalse) {
bool use_default_textures = false;
- TestHelper::SetupTextureManagerInitExpectations(gl_.get(),
- false, false, false, "GL_ANGLE_texture_usage", use_default_textures);
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), false, false, false, {"GL_ANGLE_texture_usage"},
+ use_default_textures);
TextureManager manager(nullptr, feature_info_.get(), kMaxTextureSize,
kMaxCubeMapTextureSize, kMaxRectangleTextureSize,
kMax3DTextureSize, kMaxArrayTextureLayers,
@@ -277,8 +279,8 @@ TEST_F(TextureManagerTest, UseDefaultTexturesFalse) {
TEST_F(TextureManagerTest, UseDefaultTexturesTrueES3) {
bool use_default_textures = true;
SetupFeatureInfo("", "OpenGL ES 3.0", CONTEXT_TYPE_OPENGLES3);
- TestHelper::SetupTextureManagerInitExpectations(gl_.get(),
- true, true, false, "", use_default_textures);
+ TestHelper::SetupTextureManagerInitExpectations(gl_.get(), true, true, false,
+ {}, use_default_textures);
TextureManager manager(nullptr, feature_info_.get(), kMaxTextureSize,
kMaxCubeMapTextureSize, kMaxRectangleTextureSize,
kMax3DTextureSize, kMaxArrayTextureLayers,
@@ -294,8 +296,8 @@ TEST_F(TextureManagerTest, UseDefaultTexturesTrueES3) {
TEST_F(TextureManagerTest, UseDefaultTexturesFalseES3) {
bool use_default_textures = false;
SetupFeatureInfo("", "OpenGL ES 3.0", CONTEXT_TYPE_OPENGLES3);
- TestHelper::SetupTextureManagerInitExpectations(gl_.get(),
- true, true, false, "", use_default_textures);
+ TestHelper::SetupTextureManagerInitExpectations(gl_.get(), true, true, false,
+ {}, use_default_textures);
TextureManager manager(nullptr, feature_info_.get(), kMaxTextureSize,
kMaxCubeMapTextureSize, kMaxRectangleTextureSize,
kMax3DTextureSize, kMaxArrayTextureLayers,
@@ -310,7 +312,7 @@ TEST_F(TextureManagerTest, UseDefaultTexturesFalseES3) {
TEST_F(TextureManagerTest, TextureUsageExt) {
TestHelper::SetupTextureManagerInitExpectations(
- gl_.get(), false, false, false, "GL_ANGLE_texture_usage",
+ gl_.get(), false, false, false, {"GL_ANGLE_texture_usage"},
kUseDefaultTextures);
TextureManager manager(nullptr, feature_info_.get(), kMaxTextureSize,
kMaxCubeMapTextureSize, kMaxRectangleTextureSize,
@@ -336,7 +338,7 @@ TEST_F(TextureManagerTest, Destroy) {
const GLuint kClient1Id = 1;
const GLuint kService1Id = 11;
TestHelper::SetupTextureManagerInitExpectations(
- gl_.get(), false, false, false, "", kUseDefaultTextures);
+ gl_.get(), false, false, false, {}, kUseDefaultTextures);
TextureManager manager(nullptr, feature_info_.get(), kMaxTextureSize,
kMaxCubeMapTextureSize, kMaxRectangleTextureSize,
kMax3DTextureSize, kMaxArrayTextureLayers,
@@ -351,7 +353,7 @@ TEST_F(TextureManagerTest, Destroy) {
.Times(1)
.RetiresOnSaturation();
TestHelper::SetupTextureManagerDestructionExpectations(
- gl_.get(), false, false, "", kUseDefaultTextures);
+ gl_.get(), false, false, {}, kUseDefaultTextures);
manager.Destroy(true);
// Check that resources got freed.
texture = manager.GetTexture(kClient1Id);
@@ -512,8 +514,8 @@ TEST_F(TextureManagerTest, AlphaLuminanceCompatibilityProfile) {
const GLuint kServiceId = 11;
SetupFeatureInfo("", "2.1", CONTEXT_TYPE_OPENGLES2);
- TestHelper::SetupTextureManagerInitExpectations(gl_.get(), false, false,
- false, "", kUseDefaultTextures);
+ TestHelper::SetupTextureManagerInitExpectations(
+ gl_.get(), false, false, false, {}, kUseDefaultTextures);
TextureManager manager(nullptr, feature_info_.get(), kMaxTextureSize,
kMaxCubeMapTextureSize, kMaxRectangleTextureSize,
kMax3DTextureSize, kMaxArrayTextureLayers,
@@ -554,7 +556,7 @@ TEST_F(TextureManagerTest, AlphaLuminanceCoreProfileEmulation) {
SetupFeatureInfo("", "4.2", CONTEXT_TYPE_OPENGLES3);
TestHelper::SetupTextureManagerInitExpectations(gl_.get(), true, true, true,
- "", kUseDefaultTextures);
+ {}, kUseDefaultTextures);
TextureManager manager(nullptr, feature_info_.get(), kMaxTextureSize,
kMaxCubeMapTextureSize, kMaxRectangleTextureSize,
kMax3DTextureSize, kMaxArrayTextureLayers,
@@ -2212,10 +2214,10 @@ class SharedTextureTest : public GpuServiceTest {
nullptr, &discardable_manager_));
SetupFeatureInfo("", "OpenGL ES 2.0", CONTEXT_TYPE_OPENGLES2);
TestHelper::SetupTextureManagerInitExpectations(
- gl_.get(), false, false, false, "", kUseDefaultTextures);
+ gl_.get(), false, false, false, {}, kUseDefaultTextures);
texture_manager1_->Initialize();
TestHelper::SetupTextureManagerInitExpectations(
- gl_.get(), false, false, false, "", kUseDefaultTextures);
+ gl_.get(), false, false, false, {}, kUseDefaultTextures);
texture_manager2_->Initialize();
}
diff --git a/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc b/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc
index 34b164a8974..29359bd40d6 100644
--- a/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc
@@ -130,14 +130,14 @@ bool TransferBufferManager::OnMemoryDump(
dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, buffer->size());
- auto guid =
- GetBufferGUIDForTracing(memory_tracker_->ClientTracingId(), buffer_id);
auto shared_memory_guid =
buffer->backing()->shared_memory_handle().GetGUID();
if (!shared_memory_guid.is_empty()) {
- pmd->CreateSharedMemoryOwnershipEdge(
- dump->guid(), guid, shared_memory_guid, 0 /* importance */);
+ pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
+ 0 /* importance */);
} else {
+ auto guid = GetBufferGUIDForTracing(memory_tracker_->ClientTracingId(),
+ buffer_id);
pmd->CreateSharedGlobalAllocatorDump(guid);
pmd->AddOwnershipEdge(dump->guid(), guid);
}
diff --git a/chromium/gpu/command_buffer/service/transfer_buffer_manager.h b/chromium/gpu/command_buffer/service/transfer_buffer_manager.h
index 6239f1b777b..efe7631490d 100644
--- a/chromium/gpu/command_buffer/service/transfer_buffer_manager.h
+++ b/chromium/gpu/command_buffer/service/transfer_buffer_manager.h
@@ -39,6 +39,10 @@ class GPU_EXPORT TransferBufferManager
void DestroyTransferBuffer(int32_t id);
scoped_refptr<Buffer> GetTransferBuffer(int32_t id);
+ size_t shared_memory_bytes_allocated() const {
+ return shared_memory_bytes_allocated_;
+ }
+
private:
typedef base::flat_map<int32_t, scoped_refptr<Buffer>> BufferMap;
BufferMap registered_buffers_;
diff --git a/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc b/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc
index 30d3fa1b3df..76eb9d9d7ef 100644
--- a/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc
+++ b/chromium/gpu/command_buffer/service/vertex_attrib_manager.cc
@@ -29,6 +29,7 @@ namespace gles2 {
VertexAttrib::VertexAttrib()
: index_(0),
enabled_(false),
+ enabled_in_driver_(false),
size_(4),
type_(GL_FLOAT),
offset_(0),
@@ -38,8 +39,7 @@ VertexAttrib::VertexAttrib()
divisor_(0),
integer_(GL_FALSE),
is_client_side_array_(false),
- list_(NULL) {
-}
+ list_(NULL) {}
VertexAttrib::VertexAttrib(const VertexAttrib& other) = default;
@@ -210,6 +210,16 @@ bool VertexAttribManager::ValidateBindings(
}
const Program::VertexAttrib* attrib_info =
current_program->GetAttribInfoByLocation(attrib->index());
+
+ // Make sure that every attrib in enabled_vertex_attribs_ is really enabled
+ // in the driver, if AND ONLY IF it is consumed by the current shader
+ // program. (Note that since the containing loop is over
+ // enabled_vertex_attribs_, not all vertex attribs, it doesn't erroneously
+ // enable any attribs that should be disabled.)
+ // This is for http://crbug.com/756293 but also subsumes some workaround
+ // code for use_client_side_arrays_for_stream_buffers.
+ SetDriverVertexAttribEnabled(attrib->index(), attrib_info != nullptr);
+
if (attrib_info) {
divisor0 |= (attrib->divisor() == 0);
have_enabled_active_attribs = true;
@@ -224,7 +234,6 @@ bool VertexAttribManager::ValidateBindings(
return false;
}
if (use_client_side_arrays_for_stream_buffers) {
- glEnableVertexAttribArray(attrib->index());
if (buffer->IsClientSideArray()) {
if (current_buffer_id != 0) {
current_buffer_id = 0;
@@ -257,18 +266,6 @@ bool VertexAttribManager::ValidateBindings(
ptr);
}
}
- } else {
- // This attrib is not used in the current program.
- if (use_client_side_arrays_for_stream_buffers) {
- // Disable client side arrays for unused attributes else we'll
- // read bad memory
- if (buffer->IsClientSideArray()) {
- // Don't disable attrib 0 since it's special.
- if (attrib->index() > 0) {
- glDisableVertexAttribArray(attrib->index());
- }
- }
- }
}
}
diff --git a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h
index 4a2796bfeee..ced1e371c2e 100644
--- a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h
+++ b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h
@@ -78,6 +78,8 @@ class GPU_EXPORT VertexAttrib {
return enabled_;
}
+ bool enabled_in_driver() const { return enabled_in_driver_; }
+
// Find the maximum vertex accessed, accounting for instancing.
GLuint MaxVertexAccessed(GLsizei primcount,
GLuint max_vertex_accessed) const {
@@ -136,6 +138,9 @@ class GPU_EXPORT VertexAttrib {
// Whether or not this attribute is enabled.
bool enabled_;
+ // Whether or not this attribute is actually enabled in the driver.
+ bool enabled_in_driver_;
+
// number of components (1, 2, 3, 4)
GLint size_;
@@ -209,6 +214,23 @@ class GPU_EXPORT VertexAttribManager :
attrib_base_type_mask_[loc / 16] |= base_type << shift_bits;
}
+ // Sets the Enable/DisableVertexAttribArray state in the driver. This state
+ // is tracked for the current virtual context. Because of this, virtual
+ // context restore code should not call this function.
+ void SetDriverVertexAttribEnabled(GLuint index, bool enable) {
+ DCHECK_LT(index, vertex_attribs_.size());
+ VertexAttrib& attrib = vertex_attribs_[index];
+
+ if (enable != attrib.enabled_in_driver_) {
+ attrib.enabled_in_driver_ = enable;
+ if (enable) {
+ glEnableVertexAttribArray(index);
+ } else {
+ glDisableVertexAttribArray(index);
+ }
+ }
+ }
+
const std::vector<uint32_t>& attrib_base_type_mask() const {
return attrib_base_type_mask_;
}
diff --git a/chromium/gpu/config/BUILD.gn b/chromium/gpu/config/BUILD.gn
index d9db427b880..ea5d39bb63c 100644
--- a/chromium/gpu/config/BUILD.gn
+++ b/chromium/gpu/config/BUILD.gn
@@ -105,6 +105,8 @@ source_set("config_sources") {
"gpu_info_collector_win.cc",
"gpu_switches.cc",
"gpu_switches.h",
+ "gpu_switching.cc",
+ "gpu_switching.h",
"gpu_test_config.cc",
"gpu_test_config.h",
"gpu_test_expectations_parser.cc",
@@ -113,6 +115,10 @@ source_set("config_sources") {
"gpu_util.h",
]
+ if (is_fuchsia) {
+ sources += [ "gpu_info_collector_fuchsia.cc" ]
+ }
+
sources += process_json_outputs
configs += [
diff --git a/chromium/gpu/config/gpu_control_list.cc b/chromium/gpu/config/gpu_control_list.cc
index 339eaa0c0b2..4174e141d35 100644
--- a/chromium/gpu/config/gpu_control_list.cc
+++ b/chromium/gpu/config/gpu_control_list.cc
@@ -14,6 +14,7 @@
#include "base/strings/stringprintf.h"
#include "base/sys_info.h"
#include "base/values.h"
+#include "build/build_config.h"
#include "gpu/config/gpu_info.h"
#include "third_party/re2/src/re2/re2.h"
@@ -480,6 +481,11 @@ void GpuControlList::Entry::GetFeatureNames(
DCHECK(iter != feature_map.end());
feature_names->AppendString(iter->second);
}
+ for (size_t ii = 0; ii < disabled_extension_size; ++ii) {
+ std::string name =
+ base::StringPrintf("disable(%s)", disabled_extensions[ii]);
+ feature_names->AppendString(name);
+ }
}
GpuControlList::GpuControlList(const GpuControlListData& data)
@@ -497,19 +503,19 @@ GpuControlList::GpuControlList(const GpuControlListData& data)
GpuControlList::~GpuControlList() {
}
-std::set<int> GpuControlList::MakeDecision(GpuControlList::OsType os,
- const std::string& os_version,
- const GPUInfo& gpu_info) {
+std::set<int32_t> GpuControlList::MakeDecision(GpuControlList::OsType os,
+ const std::string& os_version,
+ const GPUInfo& gpu_info) {
active_entries_.clear();
std::set<int> features;
needs_more_info_ = false;
// Has all features permanently in the list without any possibility of
// removal in the future (subset of "features" set).
- std::set<int> permanent_features;
+ std::set<int32_t> permanent_features;
// Has all features absent from "features" set that could potentially be
// included later with more information.
- std::set<int> potential_features;
+ std::set<int32_t> potential_features;
if (os == kOsAny)
os = GetOsType();
@@ -535,7 +541,7 @@ std::set<int> GpuControlList::MakeDecision(GpuControlList::OsType os,
// set. If we don't have enough info for an exception, it's safer if we
// just ignore the exception and assume the exception doesn't apply.
for (size_t jj = 0; jj < entry.feature_size; ++jj) {
- int feature = entry.features[jj];
+ int32_t feature = entry.features[jj];
if (needs_more_info_main) {
if (!features.count(feature))
potential_features.insert(feature);
@@ -557,14 +563,18 @@ std::set<int> GpuControlList::MakeDecision(GpuControlList::OsType os,
return features;
}
-void GpuControlList::GetDecisionEntries(
- std::vector<uint32_t>* entry_ids) const {
- DCHECK(entry_ids);
- entry_ids->clear();
- for (auto index : active_entries_) {
+const std::vector<uint32_t>& GpuControlList::GetActiveEntries() const {
+ return active_entries_;
+}
+
+std::vector<uint32_t> GpuControlList::GetEntryIDsFromIndices(
+ const std::vector<uint32_t>& entry_indices) const {
+ std::vector<uint32_t> ids;
+ for (auto index : entry_indices) {
DCHECK_LT(index, entry_count_);
- entry_ids->push_back(entries_[index].id);
+ ids.push_back(entries_[index].id);
}
+ return ids;
}
std::vector<std::string> GpuControlList::GetDisabledExtensions() {
@@ -582,8 +592,15 @@ std::vector<std::string> GpuControlList::GetDisabledExtensions() {
void GpuControlList::GetReasons(base::ListValue* problem_list,
const std::string& tag) const {
+ GetReasons(problem_list, tag, active_entries_);
+}
+
+void GpuControlList::GetReasons(base::ListValue* problem_list,
+ const std::string& tag,
+ const std::vector<uint32_t>& entries) const {
DCHECK(problem_list);
- for (auto index : active_entries_) {
+ for (auto index : entries) {
+ DCHECK_LT(index, entry_count_);
const Entry& entry = entries_[index];
auto problem = base::MakeUnique<base::DictionaryValue>();
@@ -625,6 +642,8 @@ GpuControlList::OsType GpuControlList::GetOsType() {
return kOsWin;
#elif defined(OS_ANDROID)
return kOsAndroid;
+#elif defined(OS_FUCHSIA)
+ return kOsFuchsia;
#elif defined(OS_LINUX) || defined(OS_OPENBSD)
return kOsLinux;
#elif defined(OS_MACOSX)
diff --git a/chromium/gpu/config/gpu_control_list.h b/chromium/gpu/config/gpu_control_list.h
index 46199f41562..455f32e6f52 100644
--- a/chromium/gpu/config/gpu_control_list.h
+++ b/chromium/gpu/config/gpu_control_list.h
@@ -23,7 +23,15 @@ class GPU_EXPORT GpuControlList {
public:
typedef base::hash_map<int, std::string> FeatureMap;
- enum OsType { kOsLinux, kOsMacosx, kOsWin, kOsChromeOS, kOsAndroid, kOsAny };
+ enum OsType {
+ kOsLinux,
+ kOsMacosx,
+ kOsWin,
+ kOsChromeOS,
+ kOsAndroid,
+ kOsFuchsia,
+ kOsAny
+ };
enum OsFilter {
// In loading, ignore all entries that belong to other OS.
@@ -124,7 +132,7 @@ class GPU_EXPORT GpuControlList {
struct GPU_EXPORT MachineModelInfo {
size_t machine_model_name_size;
- const char** machine_model_names;
+ const char* const* machine_model_names;
Version machine_model_version;
bool Contains(const GPUInfo& gpu_info) const;
@@ -180,7 +188,7 @@ class GPU_EXPORT GpuControlList {
size_t feature_size;
const int* features;
size_t disabled_extension_size;
- const char** disabled_extensions;
+ const char* const* disabled_extensions;
size_t cr_bug_size;
const uint32_t* cr_bugs;
Conditions conditions;
@@ -212,12 +220,15 @@ class GPU_EXPORT GpuControlList {
// system and returns the union of features specified in each entry.
// If os is kOsAny, use the current OS; if os_version is empty, use the
// current OS version.
- std::set<int> MakeDecision(OsType os,
- const std::string& os_version,
- const GPUInfo& gpu_info);
+ std::set<int32_t> MakeDecision(OsType os,
+ const std::string& os_version,
+ const GPUInfo& gpu_info);
- // Collects the active entries from the last MakeDecision() call.
- void GetDecisionEntries(std::vector<uint32_t>* entry_ids) const;
+ // Return the active entry indices from the last MakeDecision() call.
+ const std::vector<uint32_t>& GetActiveEntries() const;
+ // Return corresponding entry IDs from entry indices.
+ std::vector<uint32_t> GetEntryIDsFromIndices(
+ const std::vector<uint32_t>& entry_indices) const;
// Collects all disabled extensions.
std::vector<std::string> GetDisabledExtensions();
@@ -231,6 +242,14 @@ class GPU_EXPORT GpuControlList {
// "crBugs": [1234],
// }
void GetReasons(base::ListValue* problem_list, const std::string& tag) const;
+ // Similar to the previous function, but instead of using the active entries
+ // from the last MakeDecision() call, which may not happen at all, entries
+ // are provided.
+ // The use case is we compute the entries from GPU process and send them to
+ // browser process, and call GetReasons() in browser process.
+ void GetReasons(base::ListValue* problem_list,
+ const std::string& tag,
+ const std::vector<uint32_t>& entries) const;
// Return the largest entry id. This is used for histogramming.
uint32_t max_entry_id() const;
@@ -268,7 +287,7 @@ class GPU_EXPORT GpuControlList {
// This records all the entries that are appliable to the current user
// machine. It is updated everytime MakeDecision() is called and is used
// later by GetDecisionEntries().
- std::vector<size_t> active_entries_;
+ std::vector<uint32_t> active_entries_;
uint32_t max_entry_id_;
diff --git a/chromium/gpu/config/gpu_control_list_unittest.cc b/chromium/gpu/config/gpu_control_list_unittest.cc
index 81de77debbb..a8bd4e6d0e5 100644
--- a/chromium/gpu/config/gpu_control_list_unittest.cc
+++ b/chromium/gpu/config/gpu_control_list_unittest.cc
@@ -76,8 +76,7 @@ TEST_F(GpuControlListTest, NeedsMoreInfo) {
GpuControlList::kOsWin, kOsVersion, gpu_info);
EXPECT_EMPTY_SET(features);
EXPECT_TRUE(control_list->needs_more_info());
- std::vector<uint32_t> decision_entries;
- control_list->GetDecisionEntries(&decision_entries);
+ std::vector<uint32_t> decision_entries = control_list->GetActiveEntries();
EXPECT_EQ(0u, decision_entries.size());
gpu_info.driver_version = "11";
@@ -85,8 +84,14 @@ TEST_F(GpuControlListTest, NeedsMoreInfo) {
GpuControlList::kOsWin, kOsVersion, gpu_info);
EXPECT_SINGLE_FEATURE(features, TEST_FEATURE_0);
EXPECT_FALSE(control_list->needs_more_info());
- control_list->GetDecisionEntries(&decision_entries);
+ decision_entries = control_list->GetActiveEntries();
EXPECT_EQ(1u, decision_entries.size());
+ EXPECT_EQ(0u, decision_entries[0]);
+ std::vector<uint32_t> entry_ids =
+ control_list->GetEntryIDsFromIndices(decision_entries);
+ EXPECT_EQ(1u, entry_ids.size());
+ EXPECT_EQ(static_cast<uint32_t>(kGpuControlListTest_NeedsMoreInfo + 1),
+ entry_ids[0]);
}
TEST_F(GpuControlListTest, NeedsMoreInfoForExceptions) {
diff --git a/chromium/gpu/config/gpu_driver_bug_list.cc b/chromium/gpu/config/gpu_driver_bug_list.cc
index 7a32c28569e..e703915176a 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.cc
+++ b/chromium/gpu/config/gpu_driver_bug_list.cc
@@ -67,15 +67,6 @@ void GpuDriverBugList::AppendWorkaroundsFromCommandLine(
std::set<int>* workarounds,
const base::CommandLine& command_line) {
DCHECK(workarounds);
-
- if (command_line.HasSwitch(switches::kGpuDriverBugWorkarounds)) {
- std::string cmd_workarounds_str =
- command_line.GetSwitchValueASCII(switches::kGpuDriverBugWorkarounds);
- std::set<int> cmd_workarounds;
- gpu::StringToFeatureSet(cmd_workarounds_str, &cmd_workarounds);
- workarounds->insert(cmd_workarounds.begin(), cmd_workarounds.end());
- }
-
for (int i = 0; i < NUMBER_OF_GPU_DRIVER_BUG_WORKAROUND_TYPES; i++) {
if (command_line.HasSwitch(kFeatureList[i].name)) {
// Check for disabling workaround flag.
diff --git a/chromium/gpu/config/gpu_driver_bug_list.json b/chromium/gpu/config/gpu_driver_bug_list.json
index 2e3a5bec2f6..37451168891 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.json
+++ b/chromium/gpu/config/gpu_driver_bug_list.json
@@ -1,6 +1,6 @@
{
"name": "gpu driver bug list",
- "version": "10.29",
+ "version": "10.30",
"entries": [
{
"id": 1,
@@ -167,17 +167,6 @@
]
},
{
- "id": 30,
- "cr_bugs": [237931],
- "description": "Multisampling is buggy on OSX when multiple monitors are connected",
- "os": {
- "type": "macosx"
- },
- "features": [
- "disable_multimonitor_multisampling"
- ]
- },
- {
"id": 31,
"cr_bugs": [154715, 10068, 269829, 294779, 285292],
"description": "The Mali-Txxx driver does not guarantee flush ordering",
@@ -835,10 +824,14 @@
{
"id": 100,
"description": "Disable Direct3D11 on systems with AMD switchable graphics",
- "cr_bugs": [451420],
+ "cr_bugs": [451420, 755722],
"os": {
"type": "win"
},
+ "driver_date": {
+ "op": "<",
+ "value": "2016.6"
+ },
"multi_gpu_style": "amd_switchable",
"features": [
"disable_d3d11"
@@ -2571,6 +2564,21 @@
]
},
{
+ "id": 237,
+ "description": "eglSwapBuffers intermittently fails on Android when app goes to background",
+ "cr_bugs": [744678],
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "<",
+ "value": "9.0"
+ }
+ },
+ "features": [
+ "dont_disable_webgl_when_compositor_context_lost"
+ ]
+ },
+ {
"id": 238,
"description": "On Intel GPUs MSAA performance is not acceptable for GPU rasterization",
"cr_bugs": [527565, 751919],
@@ -2584,6 +2592,39 @@
]
},
{
+ "id": 239,
+ "description": "Reset TexImage2D base level to 0 on Intel Mac 10.12.4",
+ "cr_bugs": [705865],
+ "os": {
+ "type": "macosx",
+ "version": {
+ "op": ">=",
+ "value": "10.12.4"
+ }
+ },
+ "vendor_id": "0x8086",
+ "features": [
+ "reset_teximage2d_base_level"
+ ]
+ },
+ {
+ "id": 240,
+ "cr_bugs": [750306, 764526],
+ "description": "glGetQueryObject(GL_QUERY_RESULT_AVAILABLE) blocks unexpectedly on Adreno",
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "<",
+ "value": "9.0"
+ }
+ },
+ "gl_renderer": "Adreno \\(TM\\) 5[34]0",
+ "disabled_extensions": [
+ "GL_EXT_disjoint_timer_query",
+ "GL_EXT_disjoint_timer_query_webgl2"
+ ]
+ },
+ {
"id": 241,
"description": "On Intel GPUs MSAA performance is not acceptable for GPU rasterization. Duplicate of 132 for Android",
"cr_bugs": [759471],
diff --git a/chromium/gpu/config/gpu_driver_bug_workaround_type.h b/chromium/gpu/config/gpu_driver_bug_workaround_type.h
index 20e347220e3..edd147f9ee0 100644
--- a/chromium/gpu/config/gpu_driver_bug_workaround_type.h
+++ b/chromium/gpu/config/gpu_driver_bug_workaround_type.h
@@ -35,8 +35,6 @@
clear_uniforms_before_first_program_use) \
GPU_OP(COUNT_ALL_IN_VARYINGS_PACKING, \
count_all_in_varyings_packing) \
- GPU_OP(CREATE_DEFAULT_GL_CONTEXT, \
- create_default_gl_context) \
GPU_OP(DECODE_ENCODE_SRGB_FOR_GENERATEMIPMAP, \
decode_encode_srgb_for_generatemipmap) \
GPU_OP(DISABLE_ACCELERATED_VPX_DECODE, \
@@ -73,8 +71,6 @@
disable_gl_rgb_format) \
GPU_OP(DISABLE_LARGER_THAN_SCREEN_OVERLAYS, \
disable_larger_than_screen_overlays) \
- GPU_OP(DISABLE_MULTIMONITOR_MULTISAMPLING, \
- disable_multimonitor_multisampling) \
GPU_OP(DISABLE_NON_EMPTY_POST_SUB_BUFFERS_FOR_ONSCREEN_SURFACES, \
disable_non_empty_post_sub_buffers_for_onscreen_surfaces) \
GPU_OP(DISABLE_NV12_DXGI_VIDEO, \
@@ -103,6 +99,8 @@
disable_webgl_rgb_multisampling_usage) \
GPU_OP(DISALLOW_LARGE_INSTANCED_DRAW, \
disallow_large_instanced_draw) \
+ GPU_OP(DONT_DISABLE_WEBGL_WHEN_COMPOSITOR_CONTEXT_LOST, \
+ dont_disable_webgl_when_compositor_context_lost) \
GPU_OP(DONT_REMOVE_INVARIANT_FOR_FRAGMENT_INPUT, \
dont_remove_invariant_for_fragment_input) \
GPU_OP(ETC1_POWER_OF_TWO_ONLY, \
@@ -171,6 +169,8 @@
remove_pow_with_constant_exponent) \
GPU_OP(RESET_BASE_MIPMAP_LEVEL_BEFORE_TEXSTORAGE, \
reset_base_mipmap_level_before_texstorage) \
+ GPU_OP(RESET_TEXIMAGE2D_BASE_LEVEL, \
+ reset_teximage2d_base_level) \
GPU_OP(RESTORE_SCISSOR_ON_FBO_CHANGE, \
restore_scissor_on_fbo_change) \
GPU_OP(REVERSE_POINT_SPRITE_COORD_ORIGIN, \
diff --git a/chromium/gpu/config/gpu_driver_bug_workarounds.cc b/chromium/gpu/config/gpu_driver_bug_workarounds.cc
index a13dba3120b..e46a32cbc58 100644
--- a/chromium/gpu/config/gpu_driver_bug_workarounds.cc
+++ b/chromium/gpu/config/gpu_driver_bug_workarounds.cc
@@ -4,23 +4,17 @@
#include "gpu/config/gpu_driver_bug_workarounds.h"
-#include "base/command_line.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_split.h"
-#include "gpu/config/gpu_switches.h"
+#include <algorithm>
+
+#include "base/logging.h"
namespace {
-// Process a string of wordaround type IDs (seperated by ',') and set up
-// the corresponding Workaround flags.
-void StringToWorkarounds(const std::string& types,
+// Construct GpuDriverBugWorkarounds from a set of enabled workaround IDs.
+void IntSetToWorkarounds(const std::vector<int32_t>& enabled_workarounds,
gpu::GpuDriverBugWorkarounds* workarounds) {
DCHECK(workarounds);
- for (const base::StringPiece& piece : base::SplitStringPiece(
- types, ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL)) {
- int number = 0;
- bool succeed = base::StringToInt(piece, &number);
- DCHECK(succeed);
- switch (number) {
+ for (auto ID : enabled_workarounds) {
+ switch (ID) {
#define GPU_OP(type, name) \
case gpu::type: \
workarounds->name = true; \
@@ -47,39 +41,23 @@ void StringToWorkarounds(const std::string& types,
workarounds->max_copy_texture_chromium_size = 262144;
}
+GLint LowerMax(GLint max0, GLint max1) {
+ if (max0 > 0 && max1 > 0)
+ return std::min(max0, max1);
+ if (max0 > 0)
+ return max0;
+ return max1;
+}
+
} // anonymous namespace
namespace gpu {
-GpuDriverBugWorkarounds::GpuDriverBugWorkarounds()
- :
-#define GPU_OP(type, name) name(false),
- GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
-#undef GPU_OP
- max_texture_size(0),
- max_fragment_uniform_vectors(0),
- max_varying_vectors(0),
- max_vertex_uniform_vectors(0),
- max_copy_texture_chromium_size(0) {
-}
+GpuDriverBugWorkarounds::GpuDriverBugWorkarounds() {}
GpuDriverBugWorkarounds::GpuDriverBugWorkarounds(
- const base::CommandLine* command_line)
- :
-#define GPU_OP(type, name) name(false),
- GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
-#undef GPU_OP
- max_texture_size(0),
- max_fragment_uniform_vectors(0),
- max_varying_vectors(0),
- max_vertex_uniform_vectors(0),
- max_copy_texture_chromium_size(0) {
- if (!command_line)
- return;
-
- std::string types =
- command_line->GetSwitchValueASCII(switches::kGpuDriverBugWorkarounds);
- StringToWorkarounds(types, this);
+ const std::vector<int>& enabled_driver_bug_workarounds) {
+ IntSetToWorkarounds(enabled_driver_bug_workarounds, this);
}
GpuDriverBugWorkarounds::GpuDriverBugWorkarounds(
@@ -87,4 +65,20 @@ GpuDriverBugWorkarounds::GpuDriverBugWorkarounds(
GpuDriverBugWorkarounds::~GpuDriverBugWorkarounds() {}
+void GpuDriverBugWorkarounds::Append(const GpuDriverBugWorkarounds& extra) {
+#define GPU_OP(type, name) name |= extra.name;
+ GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
+#undef GPU_OP
+
+ max_texture_size = LowerMax(max_texture_size, extra.max_texture_size);
+ max_fragment_uniform_vectors = LowerMax(max_fragment_uniform_vectors,
+ extra.max_fragment_uniform_vectors);
+ max_varying_vectors =
+ LowerMax(max_varying_vectors, extra.max_varying_vectors);
+ max_vertex_uniform_vectors =
+ LowerMax(max_vertex_uniform_vectors, extra.max_vertex_uniform_vectors);
+ max_copy_texture_chromium_size = LowerMax(
+ max_copy_texture_chromium_size, extra.max_copy_texture_chromium_size);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_driver_bug_workarounds.h b/chromium/gpu/config/gpu_driver_bug_workarounds.h
index 8194cd81d15..18cf6eeea65 100644
--- a/chromium/gpu/config/gpu_driver_bug_workarounds.h
+++ b/chromium/gpu/config/gpu_driver_bug_workarounds.h
@@ -5,6 +5,8 @@
#ifndef GPU_CONFIG_GPU_DRIVER_BUG_WORKAROUNDS_H_
#define GPU_CONFIG_GPU_DRIVER_BUG_WORKAROUNDS_H_
+#include <vector>
+
#include "base/macros.h"
#include "build/build_config.h"
#include "gpu/config/gpu_driver_bug_workaround_type.h"
@@ -13,31 +15,32 @@
// Forwardly declare a few GL types to avoid including GL header files.
typedef int GLint;
-namespace base {
-class CommandLine;
-}
-
namespace gpu {
class GPU_EXPORT GpuDriverBugWorkarounds {
public:
GpuDriverBugWorkarounds();
- explicit GpuDriverBugWorkarounds(const base::CommandLine* command_line);
+ explicit GpuDriverBugWorkarounds(const std::vector<int32_t>&);
GpuDriverBugWorkarounds(const GpuDriverBugWorkarounds& other);
~GpuDriverBugWorkarounds();
-#define GPU_OP(type, name) bool name;
+ // For boolean members, || is applied.
+ // For int members, the min() is applied if both are non-zero; if one is
+ // zero, then the other is applied.
+ void Append(const GpuDriverBugWorkarounds& extra);
+
+#define GPU_OP(type, name) bool name = false;
GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP)
#undef GPU_OP
// Note: 0 here means use driver limit.
- GLint max_texture_size;
- GLint max_fragment_uniform_vectors;
- GLint max_varying_vectors;
- GLint max_vertex_uniform_vectors;
- GLint max_copy_texture_chromium_size;
+ GLint max_texture_size = 0;
+ GLint max_fragment_uniform_vectors = 0;
+ GLint max_varying_vectors = 0;
+ GLint max_vertex_uniform_vectors = 0;
+ GLint max_copy_texture_chromium_size = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_dx_diagnostics_win.cc b/chromium/gpu/config/gpu_dx_diagnostics_win.cc
index 0c09ded59cf..e36ecfe0b4d 100644
--- a/chromium/gpu/config/gpu_dx_diagnostics_win.cc
+++ b/chromium/gpu/config/gpu_dx_diagnostics_win.cc
@@ -11,7 +11,7 @@
#include "base/strings/string_number_conversions.h"
#include "base/strings/utf_string_conversions.h"
-#include "base/win/scoped_com_initializer.h"
+#include "base/win/com_init_util.h"
#include "gpu/config/gpu_info_collector.h"
namespace gpu {
@@ -92,16 +92,15 @@ void RecurseDiagnosticTree(DxDiagNode* output,
} // namespace anonymous
bool GetDxDiagnostics(DxDiagNode* output) {
+ // CLSID_DxDiagProvider is configured as an STA only object.
+ base::win::AssertComApartmentType(base::win::ComApartmentType::STA);
+
HRESULT hr;
bool success = false;
- base::win::ScopedCOMInitializer com_initializer;
-
IDxDiagProvider* provider = NULL;
- hr = CoCreateInstance(CLSID_DxDiagProvider,
- NULL,
- CLSCTX_INPROC_SERVER,
- IID_IDxDiagProvider,
- reinterpret_cast<void**>(&provider));
+ hr = CoCreateInstance(CLSID_DxDiagProvider, NULL, CLSCTX_INPROC_SERVER,
+ IID_IDxDiagProvider,
+ reinterpret_cast<void**>(&provider));
if (SUCCEEDED(hr)) {
DXDIAG_INIT_PARAMS params = { sizeof(params) };
params.dwDxDiagHeaderVersion = DXDIAG_DX9_SDK_VERSION;
diff --git a/chromium/gpu/config/gpu_feature_info.cc b/chromium/gpu/config/gpu_feature_info.cc
index 5bdb9ea683b..eee64b5ee93 100644
--- a/chromium/gpu/config/gpu_feature_info.cc
+++ b/chromium/gpu/config/gpu_feature_info.cc
@@ -4,6 +4,11 @@
#include "gpu/config/gpu_feature_info.h"
+#include <algorithm>
+
+#include "gpu/config/gpu_driver_bug_workaround_type.h"
+#include "ui/gl/gl_context.h"
+
namespace gpu {
GpuFeatureInfo::GpuFeatureInfo() {
@@ -11,4 +16,34 @@ GpuFeatureInfo::GpuFeatureInfo() {
status = kGpuFeatureStatusUndefined;
}
+GpuFeatureInfo::GpuFeatureInfo(const GpuFeatureInfo&) = default;
+
+GpuFeatureInfo::GpuFeatureInfo(GpuFeatureInfo&&) = default;
+
+GpuFeatureInfo::~GpuFeatureInfo() {}
+
+GpuFeatureInfo& GpuFeatureInfo::operator=(const GpuFeatureInfo&) = default;
+
+GpuFeatureInfo& GpuFeatureInfo::operator=(GpuFeatureInfo&&) = default;
+
+void GpuFeatureInfo::ApplyToGLContext(gl::GLContext* gl_context) const {
+ DCHECK(gl_context);
+ gl::GLWorkarounds gl_workarounds;
+ if (IsWorkaroundEnabled(gpu::CLEAR_TO_ZERO_OR_ONE_BROKEN)) {
+ gl_workarounds.clear_to_zero_or_one_broken = true;
+ }
+ if (IsWorkaroundEnabled(RESET_TEXIMAGE2D_BASE_LEVEL)) {
+ gl_workarounds.reset_teximage2d_base_level = true;
+ }
+ gl_context->SetGLWorkarounds(gl_workarounds);
+ gl_context->SetDisabledGLExtensions(this->disabled_extensions);
+}
+
+bool GpuFeatureInfo::IsWorkaroundEnabled(int32_t workaround) const {
+ return std::find(this->enabled_gpu_driver_bug_workarounds.begin(),
+ this->enabled_gpu_driver_bug_workarounds.end(),
+ workaround) !=
+ this->enabled_gpu_driver_bug_workarounds.end();
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_feature_info.h b/chromium/gpu/config/gpu_feature_info.h
index fd4db8e4709..0074f92e182 100644
--- a/chromium/gpu/config/gpu_feature_info.h
+++ b/chromium/gpu/config/gpu_feature_info.h
@@ -5,9 +5,16 @@
#ifndef GPU_CONFIG_GPU_FEATURE_INFO_H_
#define GPU_CONFIG_GPU_FEATURE_INFO_H_
+#include <string>
+#include <vector>
+
#include "gpu/config/gpu_feature_type.h"
#include "gpu/gpu_export.h"
+namespace gl {
+class GLContext;
+} // namespace gl
+
namespace gpu {
// Flags indicating the status of a GPU feature (see gpu_feature_type.h).
@@ -19,11 +26,30 @@ enum GpuFeatureStatus {
kGpuFeatureStatusMax
};
-// A vector of GpuFeatureStatus values, one per GpuFeatureType. By default, all
-// features are disabled.
struct GPU_EXPORT GpuFeatureInfo {
GpuFeatureInfo();
+ GpuFeatureInfo(const GpuFeatureInfo&);
+ GpuFeatureInfo(GpuFeatureInfo&&);
+ ~GpuFeatureInfo();
+
+ // Set the GL workarounds and disabled GL extensions to the context.
+ void ApplyToGLContext(gl::GLContext* context) const;
+
+ bool IsWorkaroundEnabled(int32_t workaround) const;
+
+ GpuFeatureInfo& operator=(const GpuFeatureInfo&);
+ GpuFeatureInfo& operator=(GpuFeatureInfo&&);
+
+ // A vector of GpuFeatureStatus values, one per GpuFeatureType.
+ // By default, all features are disabled.
GpuFeatureStatus status_values[NUMBER_OF_GPU_FEATURE_TYPES];
+ // Active gpu driver bug workaround IDs.
+ // See gpu/config/gpu_driver_bug_workaround_type.h for ID mappings.
+ std::vector<int32_t> enabled_gpu_driver_bug_workarounds;
+ // Disabled extensions separated by whitespaces.
+ std::string disabled_extensions;
+ // Applied gpu driver bug list entry indices.
+ std::vector<uint32_t> applied_gpu_driver_bug_list_entries;
};
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info.cc b/chromium/gpu/config/gpu_info.cc
index 4285645781e..7226c966d96 100644
--- a/chromium/gpu/config/gpu_info.cc
+++ b/chromium/gpu/config/gpu_info.cc
@@ -81,7 +81,7 @@ GPUInfo::GPUInfo()
dx_diagnostics_info_state(kCollectInfoNone),
#endif
jpeg_decode_accelerator_supported(false)
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
,
system_visual(0),
rgba_visual(0)
@@ -146,7 +146,7 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
VideoEncodeAcceleratorSupportedProfiles
video_encode_accelerator_supported_profiles;
bool jpeg_decode_accelerator_supported;
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
VisualID system_visual;
VisualID rgba_visual;
#endif
@@ -213,7 +213,7 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
EnumerateVideoEncodeAcceleratorSupportedProfile(profile, enumerator);
enumerator->AddBool("jpegDecodeAcceleratorSupported",
jpeg_decode_accelerator_supported);
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
enumerator->AddInt64("systemVisual", system_visual);
enumerator->AddInt64("rgbaVisual", rgba_visual);
#endif
diff --git a/chromium/gpu/config/gpu_info.h b/chromium/gpu/config/gpu_info.h
index abc127265cf..ce6f2424b81 100644
--- a/chromium/gpu/config/gpu_info.h
+++ b/chromium/gpu/config/gpu_info.h
@@ -20,7 +20,7 @@
#include "gpu/gpu_export.h"
#include "ui/gfx/geometry/size.h"
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
typedef unsigned long VisualID;
#endif
@@ -247,7 +247,7 @@ struct GPU_EXPORT GPUInfo {
video_encode_accelerator_supported_profiles;
bool jpeg_decode_accelerator_supported;
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
VisualID system_visual;
VisualID rgba_visual;
#endif
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index 29c50bbb3d6..b28048b803b 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -27,7 +27,7 @@
#include "ui/gl/gl_version_info.h"
#include "ui/gl/init/gl_factory.h"
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
#include "ui/gl/gl_visual_picker_glx.h"
#endif
@@ -123,6 +123,7 @@ CollectInfoResult CollectGraphicsInfoGL(GPUInfo* gpu_info) {
gpu_info->gl_renderer = GetGLString(GL_RENDERER);
gpu_info->gl_vendor = GetGLString(GL_VENDOR);
gpu_info->gl_version = GetGLString(GL_VERSION);
+ std::string glsl_version_string = GetGLString(GL_SHADING_LANGUAGE_VERSION);
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
if (command_line->HasSwitch(switches::kGpuTestingGLVendor)) {
@@ -139,23 +140,19 @@ CollectInfoResult CollectGraphicsInfoGL(GPUInfo* gpu_info) {
}
gpu_info->gl_extensions = gl::GetGLExtensionsFromCurrentContext();
- std::string glsl_version_string = GetGLString(GL_SHADING_LANGUAGE_VERSION);
+ gl::ExtensionSet extension_set =
+ gl::MakeExtensionSet(gpu_info->gl_extensions);
gl::GLVersionInfo gl_info(gpu_info->gl_version.c_str(),
- gpu_info->gl_renderer.c_str(),
- gpu_info->gl_extensions.c_str());
+ gpu_info->gl_renderer.c_str(), extension_set);
GLint max_samples = 0;
if (gl_info.IsAtLeastGL(3, 0) || gl_info.IsAtLeastGLES(3, 0) ||
- gpu_info->gl_extensions.find("GL_ANGLE_framebuffer_multisample") !=
- std::string::npos ||
- gpu_info->gl_extensions.find("GL_APPLE_framebuffer_multisample") !=
- std::string::npos ||
- gpu_info->gl_extensions.find("GL_EXT_framebuffer_multisample") !=
- std::string::npos ||
- gpu_info->gl_extensions.find("GL_EXT_multisampled_render_to_texture") !=
- std::string::npos ||
- gpu_info->gl_extensions.find("GL_NV_framebuffer_multisample") !=
- std::string::npos) {
+ gl::HasExtension(extension_set, "GL_ANGLE_framebuffer_multisample") ||
+ gl::HasExtension(extension_set, "GL_APPLE_framebuffer_multisample") ||
+ gl::HasExtension(extension_set, "GL_EXT_framebuffer_multisample") ||
+ gl::HasExtension(extension_set,
+ "GL_EXT_multisampled_render_to_texture") ||
+ gl::HasExtension(extension_set, "GL_NV_framebuffer_multisample")) {
glGetIntegerv(GL_MAX_SAMPLES, &max_samples);
}
gpu_info->max_msaa_samples = base::IntToString(max_samples);
@@ -170,15 +167,15 @@ CollectInfoResult CollectGraphicsInfoGL(GPUInfo* gpu_info) {
}
bool supports_robustness =
- gpu_info->gl_extensions.find("GL_EXT_robustness") != std::string::npos ||
- gpu_info->gl_extensions.find("GL_KHR_robustness") != std::string::npos ||
- gpu_info->gl_extensions.find("GL_ARB_robustness") != std::string::npos;
+ gl::HasExtension(extension_set, "GL_EXT_robustness") ||
+ gl::HasExtension(extension_set, "GL_KHR_robustness") ||
+ gl::HasExtension(extension_set, "GL_ARB_robustness");
if (supports_robustness) {
glGetIntegerv(GL_RESET_NOTIFICATION_STRATEGY_ARB,
reinterpret_cast<GLint*>(&gpu_info->gl_reset_notification_strategy));
}
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL) {
gl::GLVisualPickerGLX* visual_picker = gl::GLVisualPickerGLX::GetInstance();
gpu_info->system_visual = visual_picker->system_visual().visualid;
@@ -242,7 +239,7 @@ void MergeGPUInfoGL(GPUInfo* basic_gpu_info,
basic_gpu_info->jpeg_decode_accelerator_supported =
context_gpu_info.jpeg_decode_accelerator_supported;
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
basic_gpu_info->system_visual = context_gpu_info.system_visual;
basic_gpu_info->rgba_visual = context_gpu_info.rgba_visual;
#endif
@@ -309,7 +306,15 @@ void IdentifyActiveGPU(GPUInfo* gpu_info) {
void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
angle::SystemInfo* system_info) {
- DCHECK(system_info->primaryGPUIndex >= 0);
+ // We fill gpu_info even when angle::GetSystemInfo failed so that we can see
+ // partial information even when GPU info collection fails. Handle malformed
+ // angle::SystemInfo first.
+ if (system_info->gpus.empty()) {
+ return;
+ }
+ if (system_info->primaryGPUIndex < 0) {
+ system_info->primaryGPUIndex = 0;
+ }
angle::GPUDeviceInfo* primary =
&system_info->gpus[system_info->primaryGPUIndex];
diff --git a/chromium/gpu/config/gpu_info_collector_fuchsia.cc b/chromium/gpu/config/gpu_info_collector_fuchsia.cc
new file mode 100644
index 00000000000..95dac4c9f8e
--- /dev/null
+++ b/chromium/gpu/config/gpu_info_collector_fuchsia.cc
@@ -0,0 +1,31 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/config/gpu_info_collector.h"
+
+namespace gpu {
+
+CollectInfoResult CollectContextGraphicsInfo(GPUInfo* gpu_info) {
+ // TODO(crbug.com/707031): Implement this.
+ NOTIMPLEMENTED();
+ return kCollectInfoFatalFailure;
+}
+
+CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
+ // TODO(crbug.com/707031): Implement this.
+ NOTIMPLEMENTED();
+ return kCollectInfoFatalFailure;
+}
+
+CollectInfoResult CollectDriverInfoGL(GPUInfo* gpu_info) {
+ // TODO(crbug.com/707031): Implement this.
+ NOTIMPLEMENTED();
+ return kCollectInfoFatalFailure;
+}
+
+void MergeGPUInfo(GPUInfo* basic_gpu_info, const GPUInfo& context_gpu_info) {
+ MergeGPUInfoGL(basic_gpu_info, context_gpu_info);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_collector_linux.cc b/chromium/gpu/config/gpu_info_collector_linux.cc
index a5d1b66cfe6..b15f9905654 100644
--- a/chromium/gpu/config/gpu_info_collector_linux.cc
+++ b/chromium/gpu/config/gpu_info_collector_linux.cc
@@ -26,9 +26,11 @@ CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
angle::SystemInfo system_info;
- if (angle::GetSystemInfo(&system_info)) {
+ bool success = angle::GetSystemInfo(&system_info);
+ FillGPUInfoFromSystemInfo(gpu_info, &system_info);
+
+ if (success) {
gpu_info->basic_info_state = kCollectInfoSuccess;
- FillGPUInfoFromSystemInfo(gpu_info, &system_info);
} else {
gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
}
diff --git a/chromium/gpu/config/gpu_info_collector_mac.mm b/chromium/gpu/config/gpu_info_collector_mac.mm
index ab6f732fe2e..0488da5cf2a 100644
--- a/chromium/gpu/config/gpu_info_collector_mac.mm
+++ b/chromium/gpu/config/gpu_info_collector_mac.mm
@@ -23,9 +23,11 @@ CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
angle::SystemInfo system_info;
- if (angle::GetSystemInfo(&system_info)) {
+ bool success = angle::GetSystemInfo(&system_info);
+ FillGPUInfoFromSystemInfo(gpu_info, &system_info);
+
+ if (success) {
gpu_info->basic_info_state = kCollectInfoSuccess;
- FillGPUInfoFromSystemInfo(gpu_info, &system_info);
} else {
gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
}
diff --git a/chromium/gpu/config/gpu_info_collector_win.cc b/chromium/gpu/config/gpu_info_collector_win.cc
index 7307f6d260a..fa9b38affff 100644
--- a/chromium/gpu/config/gpu_info_collector_win.cc
+++ b/chromium/gpu/config/gpu_info_collector_win.cc
@@ -34,7 +34,6 @@
#include "base/trace_event/trace_event.h"
#include "base/win/scoped_com_initializer.h"
#include "base/win/scoped_comptr.h"
-#include "base/win/windows_version.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface_egl.h"
@@ -84,18 +83,8 @@ CollectInfoResult CollectDriverInfoD3D(const std::wstring& device_id,
{0xbf, 0xc1, 0x08, 0x00, 0x2b, 0xe1, 0x03, 0x18}};
// create device info for the display device
- HDEVINFO device_info;
- if (base::win::GetVersion() <= base::win::VERSION_XP) {
- // Collection of information on all adapters is much slower on XP (almost
- // 100ms), and not very useful (as it's not going to use the GPU anyway), so
- // just collect information on the current device. http://crbug.com/456178
- device_info =
- SetupDiGetClassDevsW(NULL, device_id.c_str(), NULL,
- DIGCF_PRESENT | DIGCF_PROFILE | DIGCF_ALLCLASSES);
- } else {
- device_info =
- SetupDiGetClassDevsW(&display_class, NULL, NULL, DIGCF_PRESENT);
- }
+ HDEVINFO device_info =
+ ::SetupDiGetClassDevs(&display_class, NULL, NULL, DIGCF_PRESENT);
if (device_info == INVALID_HANDLE_VALUE) {
LOG(ERROR) << "Creating device info failed";
return kCollectInfoNonFatalFailure;
diff --git a/chromium/gpu/config/gpu_switches.cc b/chromium/gpu/config/gpu_switches.cc
index 83b39fe0714..ec8e9b8a72d 100644
--- a/chromium/gpu/config/gpu_switches.cc
+++ b/chromium/gpu/config/gpu_switches.cc
@@ -6,6 +6,13 @@
namespace switches {
+// Passes if it's AMD switchable dual GPUs from browser process to GPU process.
+const char kAMDSwitchable[] = "amd-switchable";
+
+// Disable workarounds for various GPU driver bugs.
+const char kDisableGpuDriverBugWorkarounds[] =
+ "disable-gpu-driver-bug-workarounds";
+
// Disable GPU rasterization, i.e. rasterize on the CPU only.
// Overrides the kEnableGpuRasterization and kForceGpuRasterization flags.
const char kDisableGpuRasterization[] = "disable-gpu-rasterization";
@@ -15,6 +22,10 @@ const char kDisableGpuRasterization[] = "disable-gpu-rasterization";
// impl-side painting.
const char kEnableGpuRasterization[] = "enable-gpu-rasterization";
+// Turns on out of process raster for the renderer whenever gpu raster
+// would have been used. Enables the chromium_raster_transport extension.
+const char kEnableOOPRasterization[] = "enable-oop-rasterization";
+
// Passes active gpu vendor id from browser process to GPU process.
const char kGpuActiveVendorID[] = "gpu-active-vendor-id";
@@ -24,9 +35,6 @@ const char kGpuActiveDeviceID[] = "gpu-active-device-id";
// Passes gpu device_id from browser process to GPU process.
const char kGpuDeviceID[] = "gpu-device-id";
-// Pass a set of GpuDriverBugWorkaroundType ids, seperated by ','.
-const char kGpuDriverBugWorkarounds[] = "gpu-driver-bug-workarounds";
-
// Passes gpu driver_vendor from browser process to GPU process.
const char kGpuDriverVendor[] = "gpu-driver-vendor";
diff --git a/chromium/gpu/config/gpu_switches.h b/chromium/gpu/config/gpu_switches.h
index f2022266085..312c9a8d18a 100644
--- a/chromium/gpu/config/gpu_switches.h
+++ b/chromium/gpu/config/gpu_switches.h
@@ -9,12 +9,14 @@
namespace switches {
+GPU_EXPORT extern const char kAMDSwitchable[];
+GPU_EXPORT extern const char kDisableGpuDriverBugWorkarounds[];
GPU_EXPORT extern const char kDisableGpuRasterization[];
GPU_EXPORT extern const char kEnableGpuRasterization[];
+GPU_EXPORT extern const char kEnableOOPRasterization[];
GPU_EXPORT extern const char kGpuActiveVendorID[];
GPU_EXPORT extern const char kGpuActiveDeviceID[];
GPU_EXPORT extern const char kGpuDeviceID[];
-GPU_EXPORT extern const char kGpuDriverBugWorkarounds[];
GPU_EXPORT extern const char kGpuDriverVendor[];
GPU_EXPORT extern const char kGpuDriverVersion[];
GPU_EXPORT extern const char kGpuDriverDate[];
diff --git a/chromium/gpu/config/gpu_switching.cc b/chromium/gpu/config/gpu_switching.cc
new file mode 100644
index 00000000000..10b3955e42d
--- /dev/null
+++ b/chromium/gpu/config/gpu_switching.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/config/gpu_switching.h"
+
+#if defined(OS_MACOSX)
+#include <OpenGL/OpenGL.h>
+#endif
+
+#include <algorithm>
+#include <vector>
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "gpu/config/gpu_driver_bug_workaround_type.h"
+#include "gpu/config/gpu_info.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_switches.h"
+#include "ui/gl/gpu_preference.h"
+
+namespace gpu {
+
+namespace {
+
+#if defined(OS_MACOSX)
+typedef CGLPixelFormatObj PlatformPixelFormatObj;
+#else
+typedef void* PlatformPixelFormatObj;
+#endif // OS_MACOSX
+
+PlatformPixelFormatObj g_discrete_pixel_format_obj = nullptr;
+
+bool ContainsWorkaround(const std::vector<int32_t>& workarounds,
+ int32_t workaround) {
+ return (std::find(workarounds.begin(), workarounds.end(), workaround) !=
+ workarounds.end());
+}
+
+void ForceDiscreteGPU() {
+ if (g_discrete_pixel_format_obj)
+ return;
+#if defined(OS_MACOSX)
+ CGLPixelFormatAttribute attribs[1];
+ attribs[0] = static_cast<CGLPixelFormatAttribute>(0);
+ GLint num_pixel_formats = 0;
+ CGLChoosePixelFormat(attribs, &g_discrete_pixel_format_obj,
+ &num_pixel_formats);
+#endif // OS_MACOSX
+}
+
+} // namespace anonymous
+
+bool SwitchableGPUsSupported(const GPUInfo& gpu_info,
+ const base::CommandLine& command_line) {
+#if defined(OS_MACOSX)
+ if (command_line.HasSwitch(switches::kUseGL) &&
+ command_line.GetSwitchValueASCII(switches::kUseGL) !=
+ gl::kGLImplementationDesktopName) {
+ return false;
+ }
+ if (gpu_info.secondary_gpus.size() != 1) {
+ return false;
+ }
+ // Only advertise that we have two GPUs to the rest of
+ // Chrome's code if we find an Intel GPU and some other
+ // vendor's GPU. Otherwise we don't understand the
+ // configuration and don't deal well with it (an example being
+ // the dual AMD GPUs in recent Mac Pros).
+ const uint32_t kVendorIntel = 0x8086;
+ return ((gpu_info.gpu.vendor_id == kVendorIntel &&
+ gpu_info.secondary_gpus[0].vendor_id != kVendorIntel) ||
+ (gpu_info.gpu.vendor_id != kVendorIntel &&
+ gpu_info.secondary_gpus[0].vendor_id == kVendorIntel));
+#else
+ return false;
+#endif // OS_MACOSX
+}
+
+void InitializeSwitchableGPUs(
+ const std::vector<int32_t>& driver_bug_workarounds) {
+ gl::GLContext::SetSwitchableGPUsSupported();
+ if (ContainsWorkaround(driver_bug_workarounds, FORCE_DISCRETE_GPU)) {
+ gl::GLContext::SetForcedGpuPreference(gl::PreferDiscreteGpu);
+ ForceDiscreteGPU();
+ } else if (ContainsWorkaround(driver_bug_workarounds, FORCE_INTEGRATED_GPU)) {
+ gl::GLContext::SetForcedGpuPreference(gl::PreferIntegratedGpu);
+ }
+}
+
+void StopForceDiscreteGPU() {
+#if defined(OS_MACOSX)
+ if (g_discrete_pixel_format_obj) {
+ CGLReleasePixelFormat(g_discrete_pixel_format_obj);
+ g_discrete_pixel_format_obj = nullptr;
+ }
+#endif // OS_MACOSX
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/config/gpu_switching.h b/chromium/gpu/config/gpu_switching.h
new file mode 100644
index 00000000000..ab22f1669cd
--- /dev/null
+++ b/chromium/gpu/config/gpu_switching.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_CONFIG_GPU_SWITCHING_H_
+#define GPU_CONFIG_GPU_SWITCHING_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "build/build_config.h"
+#include "gpu/gpu_export.h"
+
+namespace base {
+class CommandLine;
+}
+
+namespace gpu {
+
+struct GPUInfo;
+
+// Returns true if GPU dynamic switching inside Chrome is supported.
+// Currently it's only for Mac with switchable dual GPUs.
+GPU_EXPORT bool SwitchableGPUsSupported(const GPUInfo& gpu_info,
+ const base::CommandLine& command_line);
+
+// Depending on the GPU driver bug workarounds, if needed, force onto the
+// discrete GPU or try best to stay on the integrated GPU.
+// This should only be called if SwitchableGPUsSupported() returns true.
+GPU_EXPORT void InitializeSwitchableGPUs(
+ const std::vector<int32_t>& driver_bug_workarounds);
+
+// Destroy the CGLPixelFormatObj that's used to force discrete GPU.
+GPU_EXPORT void StopForceDiscreteGPU();
+
+} // namespace gpu
+
+#endif // GPU_CONFIG_GPU_SWITCHING_H_
diff --git a/chromium/gpu/config/gpu_test_config.cc b/chromium/gpu/config/gpu_test_config.cc
index d63e283a638..f757f12625d 100644
--- a/chromium/gpu/config/gpu_test_config.cc
+++ b/chromium/gpu/config/gpu_test_config.cc
@@ -302,12 +302,6 @@ bool GPUTestBotConfig::CurrentConfigMatches(
// static
bool GPUTestBotConfig::GpuBlacklistedOnBot() {
-#if defined(OS_WIN)
- // Blacklist rule #79 disables all Gpu acceleration before Windows 7.
- if (base::win::GetVersion() <= base::win::VERSION_VISTA) {
- return true;
- }
-#endif
return false;
}
diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc
index 3f89fb38477..e57c7112e55 100644
--- a/chromium/gpu/config/gpu_util.cc
+++ b/chromium/gpu/config/gpu_util.cc
@@ -25,36 +25,11 @@
#include "gpu/config/gpu_info_collector.h"
#include "gpu/config/gpu_switches.h"
#include "ui/gl/gl_switches.h"
-#include "ui/gl/gpu_switching_manager.h"
namespace gpu {
namespace {
-// Combine the integers into a string, seperated by ','.
-std::string IntSetToString(const std::set<int>& list) {
- std::string rt;
- for (std::set<int>::const_iterator it = list.begin();
- it != list.end(); ++it) {
- if (!rt.empty())
- rt += ",";
- rt += base::IntToString(*it);
- }
- return rt;
-}
-
-void StringToIntSet(const std::string& str, std::set<int>* list) {
- DCHECK(list);
- for (const base::StringPiece& piece :
- base::SplitStringPiece(str, ",", base::TRIM_WHITESPACE,
- base::SPLIT_WANT_ALL)) {
- int number = 0;
- bool succeed = base::StringToInt(piece, &number);
- DCHECK(succeed);
- list->insert(number);
- }
-}
-
// |str| is in the format of "0x040a;0x10de;...;hex32_N".
void StringToIds(const std::string& str, std::vector<uint32_t>* list) {
DCHECK(list);
@@ -93,51 +68,20 @@ GpuFeatureStatus GetGpuRasterizationFeatureStatus(
return kGpuFeatureStatusEnabled;
}
-} // namespace anonymous
-
-void ApplyGpuDriverBugWorkarounds(const GPUInfo& gpu_info,
- base::CommandLine* command_line) {
- std::unique_ptr<GpuDriverBugList> list(GpuDriverBugList::Create());
- std::set<int> workarounds = list->MakeDecision(
- GpuControlList::kOsAny, std::string(), gpu_info);
- GpuDriverBugList::AppendWorkaroundsFromCommandLine(
- &workarounds, *command_line);
- if (!workarounds.empty()) {
- command_line->AppendSwitchASCII(switches::kGpuDriverBugWorkarounds,
- IntSetToString(workarounds));
+void AppendWorkaroundsToCommandLine(const GpuFeatureInfo& gpu_feature_info,
+ base::CommandLine* command_line) {
+ if (gpu_feature_info.IsWorkaroundEnabled(DISABLE_D3D11)) {
+ command_line->AppendSwitch(switches::kDisableD3D11);
}
-
- std::vector<std::string> buglist_disabled_extensions =
- list->GetDisabledExtensions();
- std::set<base::StringPiece> disabled_extensions(
- buglist_disabled_extensions.begin(), buglist_disabled_extensions.end());
-
- // Must be outside if statement to remain in scope (referenced by
- // |disabled_extensions|).
- std::string command_line_disable_gl_extensions;
- if (command_line->HasSwitch(switches::kDisableGLExtensions)) {
- command_line_disable_gl_extensions =
- command_line->GetSwitchValueASCII(switches::kDisableGLExtensions);
- std::vector<base::StringPiece> existing_disabled_extensions =
- base::SplitStringPiece(command_line_disable_gl_extensions, " ",
- base::TRIM_WHITESPACE,
- base::SPLIT_WANT_NONEMPTY);
- disabled_extensions.insert(existing_disabled_extensions.begin(),
- existing_disabled_extensions.end());
+ if (gpu_feature_info.IsWorkaroundEnabled(DISABLE_ES3_GL_CONTEXT)) {
+ command_line->AppendSwitch(switches::kDisableES3GLContext);
}
-
- if (!disabled_extensions.empty()) {
- std::vector<base::StringPiece> v(disabled_extensions.begin(),
- disabled_extensions.end());
- command_line->AppendSwitchASCII(switches::kDisableGLExtensions,
- base::JoinString(v, " "));
+ if (gpu_feature_info.IsWorkaroundEnabled(DISABLE_DIRECT_COMPOSITION)) {
+ command_line->AppendSwitch(switches::kDisableDirectComposition);
}
}
-void StringToFeatureSet(
- const std::string& str, std::set<int>* feature_set) {
- StringToIntSet(str, feature_set);
-}
+} // namespace anonymous
void ParseSecondaryGpuDevicesFromCommandLine(
const base::CommandLine& command_line,
@@ -176,23 +120,67 @@ void ParseSecondaryGpuDevicesFromCommandLine(
}
}
-void InitializeDualGpusIfSupported(
- const std::set<int>& driver_bug_workarounds) {
- ui::GpuSwitchingManager* switching_manager =
- ui::GpuSwitchingManager::GetInstance();
- if (!switching_manager->SupportsDualGpus())
+void GetGpuInfoFromCommandLine(const base::CommandLine& command_line,
+ GPUInfo* gpu_info) {
+ DCHECK(gpu_info);
+
+ if (!command_line.HasSwitch(switches::kGpuVendorID) ||
+ !command_line.HasSwitch(switches::kGpuDeviceID) ||
+ !command_line.HasSwitch(switches::kGpuDriverVersion))
return;
- if (driver_bug_workarounds.count(gpu::FORCE_DISCRETE_GPU) == 1)
- ui::GpuSwitchingManager::GetInstance()->ForceUseOfDiscreteGpu();
- else if (driver_bug_workarounds.count(gpu::FORCE_INTEGRATED_GPU) == 1)
- ui::GpuSwitchingManager::GetInstance()->ForceUseOfIntegratedGpu();
+ bool success = base::HexStringToUInt(
+ command_line.GetSwitchValueASCII(switches::kGpuVendorID),
+ &gpu_info->gpu.vendor_id);
+ DCHECK(success);
+ success = base::HexStringToUInt(
+ command_line.GetSwitchValueASCII(switches::kGpuDeviceID),
+ &gpu_info->gpu.device_id);
+ DCHECK(success);
+ gpu_info->driver_vendor =
+ command_line.GetSwitchValueASCII(switches::kGpuDriverVendor);
+ gpu_info->driver_version =
+ command_line.GetSwitchValueASCII(switches::kGpuDriverVersion);
+ gpu_info->driver_date =
+ command_line.GetSwitchValueASCII(switches::kGpuDriverDate);
+ gpu::ParseSecondaryGpuDevicesFromCommandLine(command_line, gpu_info);
+
+ // Set active gpu device.
+ if (command_line.HasSwitch(switches::kGpuActiveVendorID) &&
+ command_line.HasSwitch(switches::kGpuActiveDeviceID)) {
+ uint32_t active_vendor_id = 0;
+ uint32_t active_device_id = 0;
+ success = base::HexStringToUInt(
+ command_line.GetSwitchValueASCII(switches::kGpuActiveVendorID),
+ &active_vendor_id);
+ DCHECK(success);
+ success = base::HexStringToUInt(
+ command_line.GetSwitchValueASCII(switches::kGpuActiveDeviceID),
+ &active_device_id);
+ DCHECK(success);
+ if (gpu_info->gpu.vendor_id == active_vendor_id &&
+ gpu_info->gpu.device_id == active_device_id) {
+ gpu_info->gpu.active = true;
+ } else {
+ for (size_t i = 0; i < gpu_info->secondary_gpus.size(); ++i) {
+ if (gpu_info->secondary_gpus[i].vendor_id == active_vendor_id &&
+ gpu_info->secondary_gpus[i].device_id == active_device_id) {
+ gpu_info->secondary_gpus[i].active = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (command_line.HasSwitch(switches::kAMDSwitchable)) {
+ gpu_info->amd_switchable = true;
+ }
}
-GpuFeatureInfo GetGpuFeatureInfo(const GPUInfo& gpu_info,
- const base::CommandLine& command_line) {
+GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info,
+ base::CommandLine* command_line) {
GpuFeatureInfo gpu_feature_info;
std::set<int> blacklisted_features;
- if (!command_line.HasSwitch(switches::kIgnoreGpuBlacklist)) {
+ if (!command_line->HasSwitch(switches::kIgnoreGpuBlacklist)) {
std::unique_ptr<GpuBlacklist> list(GpuBlacklist::Create());
blacklisted_features =
list->MakeDecision(GpuControlList::kOsAny, std::string(), gpu_info);
@@ -200,7 +188,50 @@ GpuFeatureInfo GetGpuFeatureInfo(const GPUInfo& gpu_info,
// Currently only used for GPU rasterization.
gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] =
- GetGpuRasterizationFeatureStatus(blacklisted_features, command_line);
+ GetGpuRasterizationFeatureStatus(blacklisted_features, *command_line);
+
+ std::set<base::StringPiece> all_disabled_extensions;
+ std::string disabled_gl_extensions_value =
+ command_line->GetSwitchValueASCII(switches::kDisableGLExtensions);
+ if (!disabled_gl_extensions_value.empty()) {
+ std::vector<base::StringPiece> command_line_disabled_extensions =
+ base::SplitStringPiece(disabled_gl_extensions_value, ", ;",
+ base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ all_disabled_extensions.insert(command_line_disabled_extensions.begin(),
+ command_line_disabled_extensions.end());
+ }
+
+ std::set<int> enabled_driver_bug_workarounds;
+ std::vector<std::string> driver_bug_disabled_extensions;
+ if (!command_line->HasSwitch(switches::kDisableGpuDriverBugWorkarounds)) {
+ std::unique_ptr<gpu::GpuDriverBugList> list(GpuDriverBugList::Create());
+ enabled_driver_bug_workarounds =
+ list->MakeDecision(GpuControlList::kOsAny, std::string(), gpu_info);
+ gpu_feature_info.applied_gpu_driver_bug_list_entries =
+ list->GetActiveEntries();
+
+ driver_bug_disabled_extensions = list->GetDisabledExtensions();
+ all_disabled_extensions.insert(driver_bug_disabled_extensions.begin(),
+ driver_bug_disabled_extensions.end());
+ }
+ gpu::GpuDriverBugList::AppendWorkaroundsFromCommandLine(
+ &enabled_driver_bug_workarounds, *command_line);
+
+ gpu_feature_info.enabled_gpu_driver_bug_workarounds.insert(
+ gpu_feature_info.enabled_gpu_driver_bug_workarounds.begin(),
+ enabled_driver_bug_workarounds.begin(),
+ enabled_driver_bug_workarounds.end());
+
+ if (all_disabled_extensions.size()) {
+ std::vector<base::StringPiece> v(all_disabled_extensions.begin(),
+ all_disabled_extensions.end());
+ gpu_feature_info.disabled_extensions = base::JoinString(v, " ");
+ }
+
+ // TODO(zmo): Find a better way to communicate these settings to bindings
+ // initialization than commandline switches.
+ AppendWorkaroundsToCommandLine(gpu_feature_info, command_line);
return gpu_feature_info;
}
diff --git a/chromium/gpu/config/gpu_util.h b/chromium/gpu/config/gpu_util.h
index b6aa37e4c2b..c508302a143 100644
--- a/chromium/gpu/config/gpu_util.h
+++ b/chromium/gpu/config/gpu_util.h
@@ -5,10 +5,6 @@
#ifndef GPU_CONFIG_GPU_UTIL_H_
#define GPU_CONFIG_GPU_UTIL_H_
-#include <set>
-#include <string>
-
-#include "base/command_line.h"
#include "build/build_config.h"
#include "gpu/config/gpu_feature_info.h"
#include "gpu/gpu_export.h"
@@ -21,35 +17,26 @@ namespace gpu {
struct GPUInfo;
-// With provided GPUInfo, compute the driver bug workarounds and disabled
-// extensions for the current system, and append the |command_line|.
-GPU_EXPORT void ApplyGpuDriverBugWorkarounds(
- const GPUInfo& gpu_info,
- base::CommandLine* command_line);
-
-// |str| is in the format of "feature1,feature2,...,featureN".
-GPU_EXPORT void StringToFeatureSet(
- const std::string& str, std::set<int>* feature_set);
-
// With provided command line, fill gpu_info->secondary_gpus with parsed
// secondary vendor and device ids.
GPU_EXPORT void ParseSecondaryGpuDevicesFromCommandLine(
const base::CommandLine& command_line,
GPUInfo* gpu_info);
-GPU_EXPORT void InitializeDualGpusIfSupported(
- const std::set<int>& driver_bug_workarounds);
+// Command line contains basic GPU info collected at browser startup time in
+// GpuDataManagerImplPrivate::Initialize().
+// TODO(zmo): Obsolete this.
+GPU_EXPORT void GetGpuInfoFromCommandLine(const base::CommandLine& command_line,
+ GPUInfo* gpu_info);
// This function should only be called from the GPU process, or the Browser
// process while using in-process GPU. This function is safe to call at any
// point, and is not dependent on sandbox initialization.
+// This function also appends a few commandline switches caused by driver bugs.
GPU_EXPORT GpuFeatureInfo
-GetGpuFeatureInfo(const GPUInfo& gpu_info,
- const base::CommandLine& command_line);
+ComputeGpuFeatureInfo(const GPUInfo& gpu_info, base::CommandLine* command_line);
GPU_EXPORT void SetKeysForCrashLogging(const GPUInfo& gpu_info);
-
} // namespace gpu
#endif // GPU_CONFIG_GPU_UTIL_H_
-
diff --git a/chromium/gpu/config/gpu_util_unittest.cc b/chromium/gpu/config/gpu_util_unittest.cc
index 271d821efd4..23d2881bd54 100644
--- a/chromium/gpu/config/gpu_util_unittest.cc
+++ b/chromium/gpu/config/gpu_util_unittest.cc
@@ -4,61 +4,15 @@
#include "gpu/config/gpu_util.h"
-#include <memory>
-
-#include "base/strings/string_split.h"
+#include "base/command_line.h"
#include "base/strings/stringprintf.h"
-#include "gpu/config/gpu_driver_bug_list.h"
+#include "gpu/config/gpu_driver_bug_workaround_type.h"
#include "gpu/config/gpu_info.h"
-#include "gpu/config/gpu_info_collector.h"
#include "gpu/config/gpu_switches.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "ui/gl/gl_switches.h"
namespace gpu {
-TEST(GpuUtilTest, StringToFeatureSet) {
- {
- // zero feature.
- std::set<int> features;
- StringToFeatureSet("", &features);
- EXPECT_EQ(0u, features.size());
- }
- {
- // One features.
- std::set<int> features;
- StringToFeatureSet("4", &features);
- EXPECT_EQ(1u, features.size());
- }
- {
- // Multiple features.
- std::set<int> features;
- StringToFeatureSet("1,9", &features);
- EXPECT_EQ(2u, features.size());
- }
-}
-
-TEST(GpuUtilTest,
- ApplyGpuDriverBugWorkarounds_DisabledExtensions) {
- GPUInfo gpu_info;
- CollectBasicGraphicsInfo(&gpu_info);
- std::unique_ptr<GpuDriverBugList> list(GpuDriverBugList::Create());
- list->MakeDecision(GpuControlList::kOsAny, std::string(), gpu_info);
- std::vector<std::string> expected_disabled_extensions =
- list->GetDisabledExtensions();
- base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
- ApplyGpuDriverBugWorkarounds(gpu_info, &command_line);
-
- std::vector<std::string> actual_disabled_extensions = base::SplitString(
- command_line.GetSwitchValueASCII(switches::kDisableGLExtensions), ", ;",
- base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
- sort(expected_disabled_extensions.begin(),
- expected_disabled_extensions.end());
- sort(actual_disabled_extensions.begin(), actual_disabled_extensions.end());
-
- EXPECT_EQ(expected_disabled_extensions, actual_disabled_extensions);
-}
-
TEST(GpuUtilTest, ParseSecondaryGpuDevicesFromCommandLine_Simple) {
base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
command_line.AppendSwitchASCII(switches::kGpuSecondaryVendorIDs, "0x10de");
@@ -167,4 +121,54 @@ TEST(GpuUtilTest, ParseSecondaryGpuDevicesFromCommandLine_TestingClear) {
EXPECT_EQ(gpu_info.secondary_gpus.size(), 0ul);
}
+TEST(GpuUtilTest, GetGpuFeatureInfo_WorkaroundFromCommandLine) {
+ {
+ base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
+ GPUInfo gpu_info;
+ GpuFeatureInfo gpu_feature_info =
+ ComputeGpuFeatureInfo(gpu_info, &command_line);
+ EXPECT_FALSE(gpu_feature_info.IsWorkaroundEnabled(
+ USE_GPU_DRIVER_WORKAROUND_FOR_TESTING));
+ }
+
+ {
+ base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
+ command_line.AppendSwitchASCII(GpuDriverBugWorkaroundTypeToString(
+ USE_GPU_DRIVER_WORKAROUND_FOR_TESTING),
+ "1");
+ GPUInfo gpu_info;
+ GpuFeatureInfo gpu_feature_info =
+ ComputeGpuFeatureInfo(gpu_info, &command_line);
+ EXPECT_TRUE(gpu_feature_info.IsWorkaroundEnabled(
+ USE_GPU_DRIVER_WORKAROUND_FOR_TESTING));
+ }
+
+ {
+ base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
+ GPUInfo gpu_info;
+ // See gpu/config/gpu_driver_bug_list.json, entry 215.
+ gpu_info.gpu.vendor_id = 0xbad9;
+ gpu_info.gpu.device_id = 0xbad9;
+ GpuFeatureInfo gpu_feature_info =
+ ComputeGpuFeatureInfo(gpu_info, &command_line);
+ EXPECT_TRUE(gpu_feature_info.IsWorkaroundEnabled(
+ USE_GPU_DRIVER_WORKAROUND_FOR_TESTING));
+ }
+
+ {
+ base::CommandLine command_line(base::CommandLine::NO_PROGRAM);
+ command_line.AppendSwitchASCII(GpuDriverBugWorkaroundTypeToString(
+ USE_GPU_DRIVER_WORKAROUND_FOR_TESTING),
+ "0");
+ GPUInfo gpu_info;
+ // See gpu/config/gpu_driver_bug_list.json, entry 215.
+ gpu_info.gpu.vendor_id = 0xbad9;
+ gpu_info.gpu.device_id = 0xbad9;
+ GpuFeatureInfo gpu_feature_info =
+ ComputeGpuFeatureInfo(gpu_info, &command_line);
+ EXPECT_FALSE(gpu_feature_info.IsWorkaroundEnabled(
+ USE_GPU_DRIVER_WORKAROUND_FOR_TESTING));
+ }
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/process_json.py b/chromium/gpu/config/process_json.py
index 648fb067010..86e143cce22 100755
--- a/chromium/gpu/config/process_json.py
+++ b/chromium/gpu/config/process_json.py
@@ -122,7 +122,8 @@ def write_disabled_extension_list(entry_id, data, data_file, data_helper_file):
if data:
var_name = 'kDisabledExtensionsForEntry' + str(entry_id)
# define the list
- data_helper_file.write('const char* %s[%d] = {\n' % (var_name, len(data)))
+ data_helper_file.write(
+ 'const char* const %s[%d] = {\n' % (var_name, len(data)))
for item in data:
write_string(item, data_helper_file)
data_helper_file.write(',\n')
@@ -253,7 +254,7 @@ def write_machine_model_info(entry_id, is_exception, exception_id,
model_name_var_name = 'kMachineModelNameForEntry' + str(entry_id)
if is_exception:
model_name_var_name += 'Exception' + str(exception_id)
- data_helper_file.write('const char* %s[%d] = {\n' %
+ data_helper_file.write('const char* const %s[%d] = {\n' %
(model_name_var_name, len(machine_model_name)))
for item in machine_model_name:
write_string(item, data_helper_file)
diff --git a/chromium/gpu/config/software_rendering_list.json b/chromium/gpu/config/software_rendering_list.json
index ed5c6d6c0e3..2caa641a145 100644
--- a/chromium/gpu/config/software_rendering_list.json
+++ b/chromium/gpu/config/software_rendering_list.json
@@ -1,6 +1,6 @@
{
"name": "software rendering list",
- "version": "13.10",
+ "version": "13.13",
"entries": [
{
"id": 1,
@@ -496,8 +496,8 @@
},
{
"id": 68,
- "description": "VMware Fusion 4 has corrupt rendering with Win Vista+",
- "cr_bugs": [169470],
+ "description": "VMware has corrupt rendering on Windows",
+ "cr_bugs": [169470, 754435],
"os": {
"type": "win",
"version": {
@@ -506,10 +506,6 @@
}
},
"vendor_id": "0x15ad",
- "driver_version": {
- "op": "<=",
- "value": "7.14.1.1134"
- },
"features": [
"all"
]
@@ -1495,10 +1491,14 @@
{
"id": 146,
"description": "Disable D3D11/WebGL2 on AMD switchable graphics",
- "cr_bugs": [451420],
+ "cr_bugs": [451420, 755722],
"os": {
"type": "win"
},
+ "driver_date": {
+ "op": "<",
+ "value": "2016.6"
+ },
"multi_gpu_style": "amd_switchable",
"features": [
"webgl2"
@@ -1532,6 +1532,24 @@
"features": [
"gpu_rasterization"
]
+ },
+ {
+ "id": 150,
+ "description": "Macs with NVidia GPUs experience rendering issues on High Sierra",
+ "cr_bugs": [773705],
+ "os": {
+ "type": "macosx",
+ "version": {
+ "op": ">=",
+ "value": "10.13"
+ }
+ },
+ "vendor_id": "0x10de",
+ "multi_gpu_category": "any",
+ "features": [
+ "accelerated_2d_canvas",
+ "gpu_rasterization"
+ ]
}
],
"comment": [
diff --git a/chromium/gpu/gles2_conform_support/egl/context.cc b/chromium/gpu/gles2_conform_support/egl/context.cc
index d61fabbc169..5ba8b95acd0 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.cc
+++ b/chromium/gpu/gles2_conform_support/egl/context.cc
@@ -40,20 +40,28 @@
// text in this case.
namespace {
-const int32_t kCommandBufferSize = 1024 * 1024;
-const int32_t kTransferBufferSize = 512 * 1024;
const bool kBindGeneratesResources = true;
const bool kLoseContextWhenOutOfMemory = false;
const bool kSupportClientSideArrays = true;
}
namespace egl {
+// static
+gpu::GpuFeatureInfo Context::platform_gpu_feature_info_;
+
+// static
+void Context::SetPlatformGpuFeatureInfo(
+ const gpu::GpuFeatureInfo& gpu_feature_info) {
+ platform_gpu_feature_info_ = gpu_feature_info;
+}
+
Context::Context(Display* display, const Config* config)
: display_(display),
config_(config),
is_current_in_some_thread_(false),
is_destroyed_(false),
- gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()),
+ gpu_driver_bug_workarounds_(
+ platform_gpu_feature_info_.enabled_gpu_driver_bug_workarounds),
translator_cache_(gpu::GpuPreferences()) {}
Context::~Context() {
@@ -195,11 +203,7 @@ gpu::CommandBufferId Context::GetCommandBufferID() const {
return gpu::CommandBufferId();
}
-int32_t Context::GetStreamId() const {
- return 0;
-}
-
-void Context::FlushOrderingBarrierOnStream(int32_t stream_id) {
+void Context::FlushPendingWork() {
// This is only relevant for out-of-process command buffers.
}
@@ -255,12 +259,13 @@ void Context::ApplyContextReleased() {
}
bool Context::CreateService(gl::GLSurface* gl_surface) {
+ gpu::SharedMemoryLimits limits;
scoped_refptr<gpu::gles2::FeatureInfo> feature_info(
new gpu::gles2::FeatureInfo(gpu_driver_bug_workarounds_));
scoped_refptr<gpu::gles2::ContextGroup> group(new gpu::gles2::ContextGroup(
- gpu::GpuPreferences(), &mailbox_manager_, nullptr /* memory_tracker */,
- &translator_cache_, &completeness_cache_, feature_info, true,
- &image_manager_, nullptr /* image_factory */,
+ gpu::GpuPreferences(), true, &mailbox_manager_,
+ nullptr /* memory_tracker */, &translator_cache_, &completeness_cache_,
+ feature_info, true, &image_manager_, nullptr /* image_factory */,
nullptr /* progress_reporter */, gpu::GpuFeatureInfo(),
&discardable_manager_));
@@ -281,6 +286,7 @@ bool Context::CreateService(gl::GLSurface* gl_surface) {
gl::init::CreateGLContext(nullptr, gl_surface, context_attribs));
if (!gl_context)
return false;
+ platform_gpu_feature_info_.ApplyToGLContext(gl_context.get());
gl_context->MakeCurrent(gl_surface);
@@ -304,7 +310,7 @@ bool Context::CreateService(gl::GLSurface* gl_surface) {
std::unique_ptr<gpu::gles2::GLES2CmdHelper> gles2_cmd_helper(
new gpu::gles2::GLES2CmdHelper(command_buffer.get()));
- if (!gles2_cmd_helper->Initialize(kCommandBufferSize)) {
+ if (!gles2_cmd_helper->Initialize(limits.command_buffer_size)) {
decoder->Destroy(true);
return false;
}
@@ -324,9 +330,7 @@ bool Context::CreateService(gl::GLSurface* gl_surface) {
kBindGeneratesResources, kLoseContextWhenOutOfMemory,
kSupportClientSideArrays, this));
- if (!context->Initialize(kTransferBufferSize, kTransferBufferSize / 2,
- kTransferBufferSize * 2,
- gpu::SharedMemoryLimits::kNoLimit)) {
+ if (!context->Initialize(limits)) {
DestroyService();
return false;
}
diff --git a/chromium/gpu/gles2_conform_support/egl/context.h b/chromium/gpu/gles2_conform_support/egl/context.h
index f0bb5d53cb5..585d26ef9b1 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.h
+++ b/chromium/gpu/gles2_conform_support/egl/context.h
@@ -69,8 +69,7 @@ class Context : public base::RefCountedThreadSafe<Context>,
void EnsureWorkVisible() override;
gpu::CommandBufferNamespace GetNamespaceID() const override;
gpu::CommandBufferId GetCommandBufferID() const override;
- int32_t GetStreamId() const override;
- void FlushOrderingBarrierOnStream(int32_t stream_id) override;
+ void FlushPendingWork() override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;
@@ -88,6 +87,9 @@ class Context : public base::RefCountedThreadSafe<Context>,
void ApplyCurrentContext(gl::GLSurface* current_surface);
static void ApplyContextReleased();
+ static void SetPlatformGpuFeatureInfo(
+ const gpu::GpuFeatureInfo& gpu_feature_info);
+
private:
friend class base::RefCountedThreadSafe<Context>;
~Context() override;
@@ -101,6 +103,8 @@ class Context : public base::RefCountedThreadSafe<Context>,
bool IsCompatibleSurface(Surface* surface) const;
bool Flush(gl::GLSurface* gl_surface);
+ static gpu::GpuFeatureInfo platform_gpu_feature_info_;
+
Display* display_;
const Config* config_;
bool is_current_in_some_thread_;
diff --git a/chromium/gpu/gles2_conform_support/egl/thread_state.cc b/chromium/gpu/gles2_conform_support/egl/thread_state.cc
index 0a7f77dc8e6..e0f02ae7116 100644
--- a/chromium/gpu/gles2_conform_support/egl/thread_state.cc
+++ b/chromium/gpu/gles2_conform_support/egl/thread_state.cc
@@ -78,13 +78,18 @@ egl::ThreadState* ThreadState::Get() {
// Need to call both Init and InitFromArgv, since Windows does not use
// argc, argv in CommandLine::Init(argc, argv).
command_line->InitFromArgv(argv);
+ gpu::GpuFeatureInfo gpu_feature_info;
if (!command_line->HasSwitch(switches::kDisableGpuDriverBugWorkarounds)) {
gpu::GPUInfo gpu_info;
gpu::CollectBasicGraphicsInfo(&gpu_info);
- gpu::ApplyGpuDriverBugWorkarounds(gpu_info, command_line);
+ gpu_feature_info = gpu::ComputeGpuFeatureInfo(gpu_info, command_line);
+ Context::SetPlatformGpuFeatureInfo(gpu_feature_info);
}
- gl::init::InitializeGLOneOff();
+ gl::init::InitializeGLNoExtensionsOneOff();
+ gl::init::SetDisabledExtensionsPlatform(
+ gpu_feature_info.disabled_extensions);
+ gl::init::InitializeExtensionSettingsOneOffPlatform();
}
g_egl_default_display = new egl::Display();
diff --git a/chromium/gpu/ipc/client/DEPS b/chromium/gpu/ipc/client/DEPS
index 191b73ce946..d428e56323f 100644
--- a/chromium/gpu/ipc/client/DEPS
+++ b/chromium/gpu/ipc/client/DEPS
@@ -4,3 +4,9 @@ include_rules = [
"+ui/base",
"+ui/latency",
]
+
+specific_include_rules = {
+ "gpu_in_process_context_tests.cc": [
+ "+components/viz/test/test_gpu_memory_buffer_manager.h",
+ ]
+}
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index 77c08ebb6a1..e6db37e8d49 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -57,7 +57,7 @@ CommandBufferProxyImpl::CommandBufferProxyImpl(int channel_id,
channel_id_(channel_id),
route_id_(route_id),
stream_id_(stream_id),
- weak_this_(AsWeakPtr()) {
+ weak_ptr_factory_(this) {
DCHECK(route_id);
}
@@ -216,7 +216,8 @@ bool CommandBufferProxyImpl::Initialize(
// Route must be added before sending the message, otherwise messages sent
// from the GPU process could race against adding ourselves to the filter.
- channel->AddRouteWithTaskRunner(route_id_, AsWeakPtr(), task_runner);
+ channel->AddRouteWithTaskRunner(route_id_, weak_ptr_factory_.GetWeakPtr(),
+ task_runner);
// We're blocking the UI thread, which is generally undesirable.
// In this case we need to wait for this before we can show any UI /anyway/,
@@ -252,32 +253,10 @@ void CommandBufferProxyImpl::Flush(int32_t put_offset) {
TRACE_EVENT1("gpu", "CommandBufferProxyImpl::Flush", "put_offset",
put_offset);
- bool put_offset_changed = last_put_offset_ != put_offset;
- last_put_offset_ = put_offset;
- last_barrier_put_offset_ = put_offset;
-
- if (channel_) {
- uint32_t highest_verified_flush_id;
- const uint32_t flush_id = channel_->OrderingBarrier(
- route_id_, stream_id_, put_offset, ++flush_count_, latency_info_,
- pending_sync_token_fences_, put_offset_changed, true,
- &highest_verified_flush_id);
- if (put_offset_changed) {
- DCHECK(flush_id);
- const uint64_t fence_sync_release = next_fence_sync_release_ - 1;
- if (fence_sync_release > flushed_fence_sync_release_) {
- flushed_fence_sync_release_ = fence_sync_release;
- flushed_release_flush_id_.push(
- std::make_pair(fence_sync_release, flush_id));
- }
- }
- CleanupFlushedReleases(highest_verified_flush_id);
- }
+ OrderingBarrierHelper(put_offset);
- if (put_offset_changed) {
- latency_info_.clear();
- pending_sync_token_fences_.clear();
- }
+ if (channel_)
+ channel_->EnsureFlush(last_flush_id_);
}
void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
@@ -289,31 +268,26 @@ void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset",
put_offset);
- bool put_offset_changed = last_barrier_put_offset_ != put_offset;
- last_barrier_put_offset_ = put_offset;
+ OrderingBarrierHelper(put_offset);
+}
+
+void CommandBufferProxyImpl::OrderingBarrierHelper(int32_t put_offset) {
+ DCHECK(has_buffer_);
+
+ if (last_put_offset_ == put_offset)
+ return;
+ last_put_offset_ = put_offset;
if (channel_) {
- uint32_t highest_verified_flush_id;
- const uint32_t flush_id = channel_->OrderingBarrier(
- route_id_, stream_id_, put_offset, ++flush_count_, latency_info_,
- pending_sync_token_fences_, put_offset_changed, false,
- &highest_verified_flush_id);
-
- if (put_offset_changed) {
- DCHECK(flush_id);
- const uint64_t fence_sync_release = next_fence_sync_release_ - 1;
- if (fence_sync_release > flushed_fence_sync_release_) {
- flushed_fence_sync_release_ = fence_sync_release;
- flushed_release_flush_id_.push(
- std::make_pair(fence_sync_release, flush_id));
- }
- }
- CleanupFlushedReleases(highest_verified_flush_id);
- }
- if (put_offset_changed) {
- latency_info_.clear();
- pending_sync_token_fences_.clear();
+ last_flush_id_ = channel_->OrderingBarrier(
+ route_id_, put_offset, std::move(latency_info_),
+ std::move(pending_sync_token_fences_));
}
+
+ latency_info_.clear();
+ pending_sync_token_fences_.clear();
+
+ flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
}
void CommandBufferProxyImpl::SetSwapBuffersCompletionCallback(
@@ -411,7 +385,7 @@ void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) {
Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
last_put_offset_ = -1;
- last_barrier_put_offset_ = -1;
+ has_buffer_ = (shm_id > 0);
}
scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
@@ -528,8 +502,8 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
Send(new GpuCommandBufferMsg_CreateImage(route_id_, params));
if (image_fence_sync) {
- gpu::SyncToken sync_token(GetNamespaceID(), GetStreamId(),
- GetCommandBufferID(), image_fence_sync);
+ gpu::SyncToken sync_token(GetNamespaceID(), 0, GetCommandBufferID(),
+ image_fence_sync);
// Force a synchronous IPC to validate sync token.
EnsureWorkVisible();
@@ -574,7 +548,7 @@ void CommandBufferProxyImpl::SetLock(base::Lock* lock) {
void CommandBufferProxyImpl::EnsureWorkVisible() {
if (channel_)
- channel_->ValidateFlushIDReachedServer(stream_id_, true);
+ channel_->VerifyFlush(UINT32_MAX);
}
gpu::CommandBufferNamespace CommandBufferProxyImpl::GetNamespaceID() const {
@@ -585,13 +559,9 @@ gpu::CommandBufferId CommandBufferProxyImpl::GetCommandBufferID() const {
return command_buffer_id_;
}
-int32_t CommandBufferProxyImpl::GetStreamId() const {
- return stream_id_;
-}
-
-void CommandBufferProxyImpl::FlushOrderingBarrierOnStream(int32_t stream_id) {
+void CommandBufferProxyImpl::FlushPendingWork() {
if (channel_)
- channel_->FlushPendingStream(stream_id);
+ channel_->EnsureFlush(UINT32_MAX);
}
uint64_t CommandBufferProxyImpl::GenerateFenceSyncRelease() {
@@ -601,38 +571,22 @@ uint64_t CommandBufferProxyImpl::GenerateFenceSyncRelease() {
bool CommandBufferProxyImpl::IsFenceSyncRelease(uint64_t release) {
CheckLock();
- return release != 0 && release < next_fence_sync_release_;
+ return release && release < next_fence_sync_release_;
}
bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) {
CheckLock();
- return release != 0 && release <= flushed_fence_sync_release_;
+ return release && release <= flushed_fence_sync_release_;
}
bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) {
CheckLock();
- base::AutoLock lock(last_state_lock_);
- if (last_state_.error != gpu::error::kNoError)
- return false;
-
- if (release <= verified_fence_sync_release_)
- return true;
-
- // Check if we have actually flushed the fence sync release.
- if (release <= flushed_fence_sync_release_) {
- DCHECK(!flushed_release_flush_id_.empty());
- // Check if it has already been validated by another context.
- UpdateVerifiedReleases(channel_->GetHighestValidatedFlushID(stream_id_));
- if (release <= verified_fence_sync_release_)
- return true;
-
- // Has not been validated, validate it now.
- UpdateVerifiedReleases(
- channel_->ValidateFlushIDReachedServer(stream_id_, false));
- return release <= verified_fence_sync_release_;
+ if (release > verified_fence_sync_release_) {
+ if (channel_)
+ channel_->VerifyFlush(last_flush_id_);
+ verified_fence_sync_release_ = flushed_fence_sync_release_;
}
-
- return false;
+ return release && release <= verified_fence_sync_release_;
}
// This can be called from any thread without holding |lock_|. Use a thread-safe
@@ -674,12 +628,6 @@ bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken(
sync_token_channel_id != channel_id_) {
return false;
}
-
- // If waiting on a different stream, flush pending commands on that stream.
- int32_t release_stream_id = sync_token.extra_data_field();
- if (channel_ && release_stream_id != stream_id_)
- channel_->FlushPendingStream(release_stream_id);
-
return true;
}
@@ -798,7 +746,7 @@ void CommandBufferProxyImpl::TryUpdateStateThreadSafe() {
callback_thread_->PostTask(
FROM_HERE,
base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel,
- weak_this_));
+ weak_ptr_factory_.GetWeakPtr()));
}
}
}
@@ -809,29 +757,6 @@ void CommandBufferProxyImpl::TryUpdateStateDontReportError() {
shared_state()->Read(&last_state_);
}
-void CommandBufferProxyImpl::UpdateVerifiedReleases(uint32_t verified_flush) {
- while (!flushed_release_flush_id_.empty()) {
- const std::pair<uint64_t, uint32_t>& front_item =
- flushed_release_flush_id_.front();
- if (front_item.second > verified_flush)
- break;
- verified_fence_sync_release_ = front_item.first;
- flushed_release_flush_id_.pop();
- }
-}
-
-void CommandBufferProxyImpl::CleanupFlushedReleases(
- uint32_t highest_verified_flush_id) {
- DCHECK(channel_);
- static const uint32_t kMaxUnverifiedFlushes = 1000;
- if (flushed_release_flush_id_.size() > kMaxUnverifiedFlushes) {
- // Prevent list of unverified flushes from growing indefinitely.
- highest_verified_flush_id =
- channel_->ValidateFlushIDReachedServer(stream_id_, false);
- }
- UpdateVerifiedReleases(highest_verified_flush_id);
-}
-
gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const {
return reinterpret_cast<gpu::CommandBufferSharedState*>(
shared_state_shm_->memory());
@@ -932,7 +857,7 @@ void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() {
// act fully on the lost context.
callback_thread_->PostTask(
FROM_HERE, base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel,
- weak_this_));
+ weak_ptr_factory_.GetWeakPtr()));
}
void CommandBufferProxyImpl::LockAndDisconnectChannel() {
@@ -948,7 +873,7 @@ void CommandBufferProxyImpl::DisconnectChannel() {
// the client for lost context a single time.
if (!channel_)
return;
- channel_->FlushPendingStream(stream_id_);
+ channel_->VerifyFlush(UINT32_MAX);
channel_->Send(new GpuChannelMsg_DestroyCommandBuffer(route_id_));
channel_->RemoveRoute(route_id_);
channel_ = nullptr;
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index 835140b5afd..4984cded14e 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -59,11 +59,9 @@ class GpuChannelHost;
// Client side proxy that forwards messages synchronously to a
// CommandBufferStub.
-class GPU_EXPORT CommandBufferProxyImpl
- : public gpu::CommandBuffer,
- public gpu::GpuControl,
- public IPC::Listener,
- public base::SupportsWeakPtr<CommandBufferProxyImpl> {
+class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
+ public gpu::GpuControl,
+ public IPC::Listener {
public:
class DeletionObserver {
public:
@@ -119,8 +117,7 @@ class GPU_EXPORT CommandBufferProxyImpl
void EnsureWorkVisible() override;
gpu::CommandBufferNamespace GetNamespaceID() const override;
gpu::CommandBufferId GetCommandBufferID() const override;
- int32_t GetStreamId() const override;
- void FlushOrderingBarrierOnStream(int32_t stream_id) override;
+ void FlushPendingWork() override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;
@@ -183,6 +180,8 @@ class GPU_EXPORT CommandBufferProxyImpl
}
}
+ void OrderingBarrierHelper(int32_t put_offset);
+
// Send an IPC message over the GPU channel. This is private to fully
// encapsulate the channel; all callers of this function must explicitly
// verify that the context has not been lost.
@@ -198,10 +197,6 @@ class GPU_EXPORT CommandBufferProxyImpl
void OnUpdateVSyncParameters(base::TimeTicks timebase,
base::TimeDelta interval);
- // Updates the highest verified release fence sync.
- void UpdateVerifiedReleases(uint32_t verified_flush);
- void CleanupFlushedReleases(uint32_t highest_verified_flush_id);
-
// Try to read an updated copy of the state from shared memory, and calls
// OnGpuStateError() if the new state has an error.
void TryUpdateState();
@@ -264,18 +259,16 @@ class GPU_EXPORT CommandBufferProxyImpl
const int channel_id_;
const int32_t route_id_;
const int32_t stream_id_;
- uint32_t flush_count_ = 0;
+ uint32_t last_flush_id_ = 0;
int32_t last_put_offset_ = -1;
- int32_t last_barrier_put_offset_ = -1;
+ bool has_buffer_ = false;
// Next generated fence sync.
uint64_t next_fence_sync_release_ = 1;
+ // Sync token waits that haven't been flushed yet.
std::vector<SyncToken> pending_sync_token_fences_;
- // Unverified flushed fence syncs with their corresponding flush id.
- std::queue<std::pair<uint64_t, uint32_t>> flushed_release_flush_id_;
-
// Last flushed fence sync release, same as last item in queue if not empty.
uint64_t flushed_fence_sync_release_ = 0;
@@ -295,8 +288,8 @@ class GPU_EXPORT CommandBufferProxyImpl
SwapBuffersCompletionCallback swap_buffers_completion_callback_;
UpdateVSyncParametersCallback update_vsync_parameters_completion_callback_;
- base::WeakPtr<CommandBufferProxyImpl> weak_this_;
scoped_refptr<base::SequencedTaskRunner> callback_thread_;
+ base::WeakPtrFactory<CommandBufferProxyImpl> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(CommandBufferProxyImpl);
};
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.cc b/chromium/gpu/ipc/client/gpu_channel_host.cc
index 060ce66695e..8fc324c7751 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.cc
+++ b/chromium/gpu/ipc/client/gpu_channel_host.cc
@@ -29,21 +29,6 @@ base::AtomicSequenceNumber g_next_transfer_buffer_id;
} // namespace
-GpuChannelHost::StreamFlushInfo::StreamFlushInfo()
- : next_stream_flush_id(1),
- flushed_stream_flush_id(0),
- verified_stream_flush_id(0),
- flush_pending(false),
- route_id(MSG_ROUTING_NONE),
- put_offset(0),
- flush_count(0),
- flush_id(0) {}
-
-GpuChannelHost::StreamFlushInfo::StreamFlushInfo(const StreamFlushInfo& other) =
- default;
-
-GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {}
-
// static
scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
GpuChannelHostFactory* factory,
@@ -129,66 +114,56 @@ bool GpuChannelHost::Send(IPC::Message* msg) {
uint32_t GpuChannelHost::OrderingBarrier(
int32_t route_id,
- int32_t stream_id,
int32_t put_offset,
- uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info,
- const std::vector<SyncToken>& sync_token_fences,
- bool put_offset_changed,
- bool do_flush,
- uint32_t* highest_verified_flush_id) {
+ std::vector<ui::LatencyInfo> latency_info,
+ std::vector<SyncToken> sync_token_fences) {
AutoLock lock(context_lock_);
- StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
- if (flush_info.flush_pending && flush_info.route_id != route_id)
- InternalFlush(&flush_info);
-
- *highest_verified_flush_id = flush_info.verified_stream_flush_id;
-
- if (put_offset_changed) {
- const uint32_t flush_id = flush_info.next_stream_flush_id++;
- flush_info.flush_pending = true;
- flush_info.route_id = route_id;
- flush_info.put_offset = put_offset;
- flush_info.flush_count = flush_count;
- flush_info.flush_id = flush_id;
- flush_info.latency_info.insert(flush_info.latency_info.end(),
- latency_info.begin(), latency_info.end());
- flush_info.sync_token_fences.insert(flush_info.sync_token_fences.end(),
- sync_token_fences.begin(),
- sync_token_fences.end());
-
- if (do_flush)
- InternalFlush(&flush_info);
-
- return flush_id;
- }
- return 0;
+
+ if (flush_list_.empty() || flush_list_.back().route_id != route_id)
+ flush_list_.push_back(FlushParams());
+
+ FlushParams& flush_params = flush_list_.back();
+ flush_params.flush_id = next_flush_id_++;
+ flush_params.route_id = route_id;
+ flush_params.put_offset = put_offset;
+ flush_params.latency_info.insert(
+ flush_params.latency_info.end(),
+ std::make_move_iterator(latency_info.begin()),
+ std::make_move_iterator(latency_info.end()));
+ flush_params.sync_token_fences.insert(
+ flush_params.sync_token_fences.end(),
+ std::make_move_iterator(sync_token_fences.begin()),
+ std::make_move_iterator(sync_token_fences.end()));
+ return flush_params.flush_id;
+}
+
+void GpuChannelHost::EnsureFlush(uint32_t flush_id) {
+ AutoLock lock(context_lock_);
+ InternalFlush(flush_id);
}
-void GpuChannelHost::FlushPendingStream(int32_t stream_id) {
+void GpuChannelHost::VerifyFlush(uint32_t flush_id) {
AutoLock lock(context_lock_);
- auto flush_info_iter = stream_flush_info_.find(stream_id);
- if (flush_info_iter == stream_flush_info_.end())
- return;
- StreamFlushInfo& flush_info = flush_info_iter->second;
- if (flush_info.flush_pending)
- InternalFlush(&flush_info);
+ InternalFlush(flush_id);
+
+ if (flush_id > verified_flush_id_) {
+ Send(new GpuChannelMsg_Nop());
+ verified_flush_id_ = next_flush_id_ - 1;
+ }
}
-void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) {
+void GpuChannelHost::InternalFlush(uint32_t flush_id) {
context_lock_.AssertAcquired();
- DCHECK(flush_info);
- DCHECK(flush_info->flush_pending);
- DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id);
- Send(new GpuCommandBufferMsg_AsyncFlush(
- flush_info->route_id, flush_info->put_offset, flush_info->flush_count,
- flush_info->latency_info, flush_info->sync_token_fences));
- flush_info->latency_info.clear();
- flush_info->sync_token_fences.clear();
- flush_info->flush_pending = false;
-
- flush_info->flushed_stream_flush_id = flush_info->flush_id;
+
+ if (!flush_list_.empty() && flush_id > flushed_flush_id_) {
+ DCHECK_EQ(flush_list_.back().flush_id, next_flush_id_ - 1);
+
+ Send(new GpuChannelMsg_FlushCommandBuffers(std::move(flush_list_)));
+
+ flush_list_.clear();
+ flushed_flush_id_ = next_flush_id_ - 1;
+ }
}
void GpuChannelHost::DestroyChannel() {
@@ -210,9 +185,8 @@ void GpuChannelHost::AddRouteWithTaskRunner(
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
factory_->GetIOThreadTaskRunner();
io_task_runner->PostTask(
- FROM_HERE,
- base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
- channel_filter_, route_id, listener, task_runner));
+ FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
+ channel_filter_, route_id, listener, task_runner));
}
void GpuChannelHost::RemoveRoute(int route_id) {
@@ -244,63 +218,6 @@ int32_t GpuChannelHost::GenerateRouteID() {
return next_route_id_.GetNext();
}
-uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32_t stream_id,
- bool force_validate) {
- // Store what flush ids we will be validating for all streams.
- base::hash_map<int32_t, uint32_t> validate_flushes;
- uint32_t flushed_stream_flush_id = 0;
- uint32_t verified_stream_flush_id = 0;
- {
- AutoLock lock(context_lock_);
- for (const auto& iter : stream_flush_info_) {
- const int32_t iter_stream_id = iter.first;
- const StreamFlushInfo& flush_info = iter.second;
- if (iter_stream_id == stream_id) {
- flushed_stream_flush_id = flush_info.flushed_stream_flush_id;
- verified_stream_flush_id = flush_info.verified_stream_flush_id;
- }
-
- if (flush_info.flushed_stream_flush_id >
- flush_info.verified_stream_flush_id) {
- validate_flushes.insert(
- std::make_pair(iter_stream_id, flush_info.flushed_stream_flush_id));
- }
- }
- }
-
- if (!force_validate && flushed_stream_flush_id == verified_stream_flush_id) {
- // Current stream has no unverified flushes.
- return verified_stream_flush_id;
- }
-
- if (Send(new GpuChannelMsg_Nop())) {
- // Update verified flush id for all streams.
- uint32_t highest_flush_id = 0;
- AutoLock lock(context_lock_);
- for (const auto& iter : validate_flushes) {
- const int32_t validated_stream_id = iter.first;
- const uint32_t validated_flush_id = iter.second;
- StreamFlushInfo& flush_info = stream_flush_info_[validated_stream_id];
- if (flush_info.verified_stream_flush_id < validated_flush_id) {
- flush_info.verified_stream_flush_id = validated_flush_id;
- }
-
- if (validated_stream_id == stream_id)
- highest_flush_id = flush_info.verified_stream_flush_id;
- }
-
- return highest_flush_id;
- }
-
- return 0;
-}
-
-uint32_t GpuChannelHost::GetHighestValidatedFlushID(int32_t stream_id) {
- AutoLock lock(context_lock_);
- StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
- return flush_info.verified_stream_flush_id;
-}
-
GpuChannelHost::~GpuChannelHost() {
#if DCHECK_IS_ON()
AutoLock lock(context_lock_);
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.h b/chromium/gpu/ipc/client/gpu_channel_host.h
index d71144e5fbf..167dec4bb15 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.h
+++ b/chromium/gpu/ipc/client/gpu_channel_host.h
@@ -21,6 +21,7 @@
#include "base/synchronization/lock.h"
#include "gpu/config/gpu_info.h"
#include "gpu/gpu_export.h"
+#include "gpu/ipc/common/flush_params.h"
#include "ipc/ipc_channel_handle.h"
#include "ipc/ipc_sync_channel.h"
#include "ipc/message_filter.h"
@@ -97,21 +98,22 @@ class GPU_EXPORT GpuChannelHost
// IPC::Sender implementation:
bool Send(IPC::Message* msg) override;
- // Set an ordering barrier. AsyncFlushes any pending barriers on other
- // routes. Combines multiple OrderingBarriers into a single AsyncFlush.
- // Returns the flush ID for the stream or 0 if put offset was not changed.
- // Outputs *highest_verified_flush_id.
+ // Enqueue an ordering barrier to defer the flush and return an identifier
+ // that can be used to ensure or verify the flush later.
uint32_t OrderingBarrier(int32_t route_id,
- int32_t stream_id,
int32_t put_offset,
- uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info,
- const std::vector<SyncToken>& sync_token_fences,
- bool put_offset_changed,
- bool do_flush,
- uint32_t* highest_verified_flush_id);
+ std::vector<ui::LatencyInfo> latency_info,
+ std::vector<SyncToken> sync_token_fences);
- void FlushPendingStream(int32_t stream_id);
+ // Ensure that the all ordering barriers prior upto |flush_id| have been
+ // flushed. Pass UINT32_MAX to force all pending ordering barriers to be
+ // flushed.
+ void EnsureFlush(uint32_t flush_id);
+
+ // Verify that the all ordering barriers prior upto |flush_id| have reached
+ // the service. Pass UINT32_MAX to force all pending ordering barriers to be
+ // verified.
+ void VerifyFlush(uint32_t flush_id);
// Destroy this channel. Must be called on the main thread, before
// destruction.
@@ -150,19 +152,6 @@ class GPU_EXPORT GpuChannelHost
// Generate a route ID guaranteed to be unique for this channel.
int32_t GenerateRouteID();
- // Sends a synchronous nop to the server which validate that all previous IPC
- // messages have been received. Once the synchronous nop has been sent to the
- // server all previous flushes will all be marked as validated, including
- // flushes for other streams on the same channel. Once a validation has been
- // sent, it will return the highest validated flush id for the stream.
- // If the validation fails (which can only happen upon context lost), the
- // highest validated flush id will not change. If no flush ID were ever
- // validated then it will return 0 (Note the lowest valid flush ID is 1).
- uint32_t ValidateFlushIDReachedServer(int32_t stream_id, bool force_validate);
-
- // Returns the highest validated flush ID for a given stream.
- uint32_t GetHighestValidatedFlushID(int32_t stream_id);
-
private:
friend class base::RefCountedThreadSafe<GpuChannelHost>;
@@ -213,26 +202,6 @@ class GPU_EXPORT GpuChannelHost
bool lost_;
};
- struct StreamFlushInfo {
- StreamFlushInfo();
- StreamFlushInfo(const StreamFlushInfo& other);
- ~StreamFlushInfo();
-
- // These are global per stream.
- uint32_t next_stream_flush_id;
- uint32_t flushed_stream_flush_id;
- uint32_t verified_stream_flush_id;
-
- // These are local per context.
- bool flush_pending;
- int32_t route_id;
- int32_t put_offset;
- uint32_t flush_count;
- uint32_t flush_id;
- std::vector<ui::LatencyInfo> latency_info;
- std::vector<SyncToken> sync_token_fences;
- };
-
GpuChannelHost(GpuChannelHostFactory* factory,
int channel_id,
const gpu::GPUInfo& gpu_info,
@@ -241,13 +210,13 @@ class GPU_EXPORT GpuChannelHost
void Connect(const IPC::ChannelHandle& channel_handle,
base::WaitableEvent* shutdown_event);
bool InternalSend(IPC::Message* msg);
- void InternalFlush(StreamFlushInfo* flush_info);
+ void InternalFlush(uint32_t flush_id);
// Threading notes: all fields are constant during the lifetime of |this|
// except:
// - |next_image_id_|, atomic type
// - |next_route_id_|, atomic type
- // - |channel_| and |stream_flush_info_|, protected by |context_lock_|
+ // - |channel_| and |flush_list_|, protected by |context_lock_|
GpuChannelHostFactory* const factory_;
const int channel_id_;
@@ -266,10 +235,13 @@ class GPU_EXPORT GpuChannelHost
// Route IDs are allocated in sequence.
base::AtomicSequenceNumber next_route_id_;
- // Protects channel_ and stream_flush_info_.
+ // Protects channel_ and flush_list_.
mutable base::Lock context_lock_;
std::unique_ptr<IPC::SyncChannel> channel_;
- base::hash_map<int32_t, StreamFlushInfo> stream_flush_info_;
+ std::vector<FlushParams> flush_list_;
+ uint32_t next_flush_id_ = 1;
+ uint32_t flushed_flush_id_ = 0;
+ uint32_t verified_flush_id_ = 0;
DISALLOW_COPY_AND_ASSIGN(GpuChannelHost);
};
diff --git a/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc b/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
index 7562908be44..ce6919349ae 100644
--- a/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
+++ b/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
@@ -10,6 +10,8 @@
#include <vector>
#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "components/viz/test/test_gpu_memory_buffer_manager.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
#include "gpu/command_buffer/client/shared_memory_limits.h"
#include "gpu/ipc/common/surface_handle.h"
@@ -21,7 +23,7 @@ namespace {
class ContextTestBase : public testing::Test {
public:
- void SetUp() override {
+ std::unique_ptr<gpu::GLInProcessContext> CreateGLInProcessContext() {
gpu::gles2::ContextCreationAttribHelper attributes;
attributes.alpha_size = 8;
attributes.depth_size = 24;
@@ -33,25 +35,34 @@ class ContextTestBase : public testing::Test {
attributes.sample_buffers = 1;
attributes.bind_generates_resource = false;
- context_.reset(
- gpu::GLInProcessContext::Create(nullptr, /* service */
- nullptr, /* surface */
- true, /* offscreen */
- gpu::kNullSurfaceHandle, /* window */
- nullptr, /* share_context */
- attributes, gpu::SharedMemoryLimits(),
- nullptr, /* gpu_memory_buffer_manager */
- nullptr, /* image_factory */
- base::ThreadTaskRunnerHandle::Get()));
+ return base::WrapUnique(gpu::GLInProcessContext::Create(
+ nullptr, /* service */
+ nullptr, /* surface */
+ true, /* offscreen */
+ gpu::kNullSurfaceHandle, /* window */
+ nullptr, /* share_context */
+ attributes, gpu::SharedMemoryLimits(), gpu_memory_buffer_manager_.get(),
+ nullptr, /* image_factory */
+ base::ThreadTaskRunnerHandle::Get()));
+ }
+
+ void SetUp() override {
+ gpu_memory_buffer_manager_ =
+ std::make_unique<viz::TestGpuMemoryBufferManager>();
+ context_ = CreateGLInProcessContext();
gl_ = context_->GetImplementation();
context_support_ = context_->GetImplementation();
}
- void TearDown() override { context_.reset(NULL); }
+ void TearDown() override {
+ context_.reset();
+ gpu_memory_buffer_manager_.reset();
+ }
protected:
gpu::gles2::GLES2Interface* gl_;
gpu::ContextSupport* context_support_;
+ std::unique_ptr<gpu::GpuMemoryBufferManager> gpu_memory_buffer_manager_;
private:
std::unique_ptr<gpu::GLInProcessContext> context_;
@@ -62,3 +73,42 @@ class ContextTestBase : public testing::Test {
// Include the actual tests.
#define CONTEXT_TEST_F TEST_F
#include "gpu/ipc/client/gpu_context_tests.h"
+
+using InProcessCommandBufferTest = ContextTestBase;
+
+TEST_F(InProcessCommandBufferTest, CreateImage) {
+ constexpr gfx::BufferFormat kBufferFormat = gfx::BufferFormat::RGBA_8888;
+ constexpr gfx::BufferUsage kBufferUsage = gfx::BufferUsage::SCANOUT;
+ constexpr gfx::Size kBufferSize(100, 100);
+
+#if defined(OS_WIN)
+ // The IPC version of ContextTestBase::SetUpOnMainThread does not succeed on
+ // some platforms.
+ if (!gl_)
+ return;
+#endif
+
+ // Calling CreateImageCHROMIUM() should allocate an image id starting at 1.
+ std::unique_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer1 =
+ gpu_memory_buffer_manager_->CreateGpuMemoryBuffer(
+ kBufferSize, kBufferFormat, kBufferUsage, gpu::kNullSurfaceHandle);
+ int image_id1 = gl_->CreateImageCHROMIUM(gpu_memory_buffer1->AsClientBuffer(),
+ kBufferSize.width(),
+ kBufferSize.height(), GL_RGBA);
+
+ EXPECT_EQ(image_id1, 1);
+
+ // Create a second GLInProcessContext that is backed by a different
+ // InProcessCommandBuffer. Calling CreateImageCHROMIUM() should return a
+ // different id than the first call.
+ std::unique_ptr<gpu::GLInProcessContext> context2 =
+ CreateGLInProcessContext();
+ std::unique_ptr<gfx::GpuMemoryBuffer> buffer2 =
+ gpu_memory_buffer_manager_->CreateGpuMemoryBuffer(
+ kBufferSize, kBufferFormat, kBufferUsage, gpu::kNullSurfaceHandle);
+ int image_id2 = context2->GetImplementation()->CreateImageCHROMIUM(
+ buffer2->AsClientBuffer(), kBufferSize.width(), kBufferSize.height(),
+ GL_RGBA);
+
+ EXPECT_EQ(image_id2, 2);
+}
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc
index a87bd3046b6..9cb8fe9e839 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_io_surface.cc
@@ -21,7 +21,9 @@ uint32_t LockFlags(gfx::BufferUsage usage) {
return kIOSurfaceLockAvoidSync;
case gfx::BufferUsage::GPU_READ:
case gfx::BufferUsage::SCANOUT:
+ case gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE:
case gfx::BufferUsage::SCANOUT_CPU_READ_WRITE:
+ case gfx::BufferUsage::SCANOUT_VDA_WRITE:
case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT:
return 0;
}
@@ -119,16 +121,16 @@ void GpuMemoryBufferImplIOSurface::SetColorSpaceForScanout(
color_space_ = color_space;
// Retrieve the ICC profile data.
- gfx::ICCProfile icc_profile;
- if (!color_space_.GetAsFullRangeRGB().GetICCProfile(&icc_profile)) {
+ std::vector<char> icc_profile_data;
+ if (!color_space_.GetAsFullRangeRGB().GetICCProfileData(&icc_profile_data)) {
DLOG(ERROR) << "Failed to set color space for scanout: no ICC profile.";
return;
}
// Package it as a CFDataRef and send it to the IOSurface.
base::ScopedCFTypeRef<CFDataRef> cf_data_icc_profile(CFDataCreate(
- nullptr, reinterpret_cast<const UInt8*>(icc_profile.GetData().data()),
- icc_profile.GetData().size()));
+ nullptr, reinterpret_cast<const UInt8*>(icc_profile_data.data()),
+ icc_profile_data.size()));
IOSurfaceSetValue(io_surface_, CFSTR("IOSurfaceColorSpace"),
cf_data_icc_profile);
}
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
index d138b1d1e56..c78c4d276c1 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
@@ -8,11 +8,12 @@
#include <utility>
#include "base/bind.h"
+#include "base/format_macros.h"
#include "base/memory/ptr_util.h"
#include "base/numerics/safe_math.h"
#include "base/process/memory.h"
+#include "base/strings/stringprintf.h"
#include "ui/gfx/buffer_format_util.h"
-#include "ui/gfx/gpu_memory_buffer_tracing.h"
#include "ui/gl/gl_bindings.h"
namespace gpu {
@@ -102,6 +103,8 @@ bool GpuMemoryBufferImplSharedMemory::IsUsageSupported(gfx::BufferUsage usage) {
case gfx::BufferUsage::SCANOUT_CPU_READ_WRITE:
return true;
case gfx::BufferUsage::SCANOUT:
+ case gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE:
+ case gfx::BufferUsage::SCANOUT_VDA_WRITE:
return false;
}
NOTREACHED();
@@ -216,7 +219,8 @@ gfx::GpuMemoryBufferHandle GpuMemoryBufferImplSharedMemory::GetHandle() const {
base::trace_event::MemoryAllocatorDumpGuid
GpuMemoryBufferImplSharedMemory::GetGUIDForTracing(
uint64_t tracing_process_id) const {
- return gfx::GetSharedMemoryGUIDForTracing(tracing_process_id, id_);
+ return base::trace_event::MemoryAllocatorDumpGuid(base::StringPrintf(
+ "shared_memory_gpu/%" PRIx64 "/%d", tracing_process_id, id_.id));
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h
index af67695c4ba..e84a7a72259 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h
@@ -52,8 +52,11 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, CreateFromHandle) {
for (auto format : gfx::GetBufferFormatsForTesting()) {
gfx::BufferUsage usages[] = {
- gfx::BufferUsage::GPU_READ, gfx::BufferUsage::SCANOUT,
+ gfx::BufferUsage::GPU_READ,
+ gfx::BufferUsage::SCANOUT,
+ gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE,
gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
+ gfx::BufferUsage::SCANOUT_VDA_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT};
for (auto usage : usages) {
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index 4ce7bddf0fd..da7e7660c48 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -65,6 +65,8 @@ source_set("ipc_common_sources") {
visibility = [ "//gpu/*" ]
sources = [
+ "flush_params.cc",
+ "flush_params.h",
"gpu_memory_buffer_support.cc",
"gpu_memory_buffer_support.h",
"gpu_message_generator.cc",
diff --git a/chromium/gpu/ipc/common/flush_params.cc b/chromium/gpu/ipc/common/flush_params.cc
new file mode 100644
index 00000000000..073d6d5e09b
--- /dev/null
+++ b/chromium/gpu/ipc/common/flush_params.cc
@@ -0,0 +1,16 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/common/flush_params.h"
+
+namespace gpu {
+
+FlushParams::FlushParams() = default;
+FlushParams::FlushParams(const FlushParams& other) = default;
+FlushParams::FlushParams(FlushParams&& other) = default;
+FlushParams::~FlushParams() = default;
+FlushParams& FlushParams::operator=(const FlushParams& other) = default;
+FlushParams& FlushParams::operator=(FlushParams&& other) = default;
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/common/flush_params.h b/chromium/gpu/ipc/common/flush_params.h
new file mode 100644
index 00000000000..5b5b7e04e55
--- /dev/null
+++ b/chromium/gpu/ipc/common/flush_params.h
@@ -0,0 +1,41 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_FLUSH_PARAMS_H_
+#define GPU_IPC_COMMON_FLUSH_PARAMS_H_
+
+#include <stdint.h>
+#include <vector>
+
+#include "gpu/command_buffer/common/sync_token.h"
+#include "gpu/gpu_export.h"
+#include "ui/latency/latency_info.h"
+
+namespace gpu {
+
+struct FlushParams {
+ FlushParams();
+ FlushParams(const FlushParams& other);
+ FlushParams(FlushParams&& other);
+ ~FlushParams();
+ FlushParams& operator=(const FlushParams& other);
+ FlushParams& operator=(FlushParams&& other);
+
+ // Route ID of the command buffer for this flush.
+ int32_t route_id;
+ // Client put offset. Service get offset is updated in shared memory.
+ int32_t put_offset;
+ // Increasing counter for the flush.
+ uint32_t flush_id;
+ // Latency timestamps used for correlating swap buffers in the GPU process
+ // with input events.
+ std::vector<ui::LatencyInfo> latency_info;
+ // Sync token dependencies of the flush. These are sync tokens for which waits
+ // are in the commands that are part of this flush.
+ std::vector<SyncToken> sync_token_fences;
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_COMMON_FLUSH_PARAMS_H_
diff --git a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
index 0594b759611..a7145c0dc4a 100644
--- a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
+++ b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
@@ -134,8 +134,10 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::Capabilities)
IPC_STRUCT_TRAITS_MEMBER(emulate_rgb_buffer_with_rgba)
IPC_STRUCT_TRAITS_MEMBER(software_to_accelerated_canvas_upgrade)
IPC_STRUCT_TRAITS_MEMBER(dc_layers)
+ IPC_STRUCT_TRAITS_MEMBER(use_dc_overlays_for_video)
IPC_STRUCT_TRAITS_MEMBER(disable_non_empty_post_sub_buffers)
IPC_STRUCT_TRAITS_MEMBER(avoid_stencil_buffers)
+ IPC_STRUCT_TRAITS_MEMBER(disable_2d_canvas_copy_on_write)
IPC_STRUCT_TRAITS_MEMBER(major_version)
IPC_STRUCT_TRAITS_MEMBER(minor_version)
@@ -171,4 +173,5 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::gles2::ContextCreationAttribHelper)
IPC_STRUCT_TRAITS_MEMBER(own_offscreen_surface)
IPC_STRUCT_TRAITS_MEMBER(single_buffer)
IPC_STRUCT_TRAITS_MEMBER(color_space)
+ IPC_STRUCT_TRAITS_MEMBER(enable_oop_rasterization)
IPC_STRUCT_TRAITS_END()
diff --git a/chromium/gpu/ipc/common/gpu_feature_info.mojom b/chromium/gpu/ipc/common/gpu_feature_info.mojom
index f7f6c14d5d9..f050dae4bc5 100644
--- a/chromium/gpu/ipc/common/gpu_feature_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_feature_info.mojom
@@ -20,4 +20,17 @@ struct GpuFeatureInfo {
// array should be gpu::NUMBER_OF_GPU_FEATURE_TYPES. This is enforced during
// deserialization.
array<GpuFeatureStatus> status_values;
+
+ // The array contains a set of workaround IDs that apply in the current
+ // platform (OS, GPU, driver, etc). The IDs correspond to the enums defined
+ // in gpu/config/gpu_driver_bug_workaround_type.h.
+ array<int32> enabled_gpu_driver_bug_workarounds;
+
+ // GL extensions disabled by GpuDriverBugWorkarounds, separated by ' '.
+ string disabled_extensions;
+
+ // The array constains a list of gpu driver bug list entry indices that apply
+ // in the current platform. The entries are defined in
+ // gpu/config/gpu_driver_bug_list.json.
+ array<uint32> applied_gpu_driver_bug_list_entries;
};
diff --git a/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h
index 29ed84062b6..bc99f168c0b 100644
--- a/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h
@@ -61,7 +61,11 @@ struct StructTraits<gpu::mojom::GpuFeatureInfoDataView, gpu::GpuFeatureInfo> {
if (info_status.size() != gpu::NUMBER_OF_GPU_FEATURE_TYPES)
return false;
std::copy(info_status.begin(), info_status.end(), out->status_values);
- return true;
+ return data.ReadEnabledGpuDriverBugWorkarounds(
+ &out->enabled_gpu_driver_bug_workarounds) &&
+ data.ReadDisabledExtensions(&out->disabled_extensions) &&
+ data.ReadAppliedGpuDriverBugListEntries(
+ &out->applied_gpu_driver_bug_list_entries);
}
static std::vector<gpu::GpuFeatureStatus> status_values(
@@ -69,6 +73,21 @@ struct StructTraits<gpu::mojom::GpuFeatureInfoDataView, gpu::GpuFeatureInfo> {
return std::vector<gpu::GpuFeatureStatus>(info.status_values,
std::end(info.status_values));
}
+
+ static const std::vector<int32_t>& enabled_gpu_driver_bug_workarounds(
+ const gpu::GpuFeatureInfo& info) {
+ return info.enabled_gpu_driver_bug_workarounds;
+ }
+
+ static const std::string& disabled_extensions(
+ const gpu::GpuFeatureInfo& info) {
+ return info.disabled_extensions;
+ }
+
+ static const std::vector<uint32_t>& applied_gpu_driver_bug_list_entries(
+ const gpu::GpuFeatureInfo& info) {
+ return info.applied_gpu_driver_bug_list_entries;
+ }
};
} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
index b5e56cfaf08..0b38785f75c 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
@@ -253,7 +253,7 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
out->jpeg_decode_accelerator_supported =
data.jpeg_decode_accelerator_supported();
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
out->system_visual = data.system_visual();
out->rgba_visual = data.rgba_visual();
#endif
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
index 1e6bee1de11..569a931e937 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
@@ -293,14 +293,14 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
}
static uint64_t system_visual(const gpu::GPUInfo& input) {
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
return input.system_visual;
#endif
return 0;
}
static uint64_t rgba_visual(const gpu::GPUInfo& input) {
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
return input.rgba_visual;
#endif
return 0;
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
index 77f98359d26..5ac37c79a30 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
@@ -42,6 +42,9 @@ bool IsNativeGpuMemoryBufferConfigurationSupported(gfx::BufferFormat format,
format == gfx::BufferFormat::RGBA_F16 ||
format == gfx::BufferFormat::UYVY_422 ||
format == gfx::BufferFormat::YUV_420_BIPLANAR;
+ case gfx::BufferUsage::SCANOUT_VDA_WRITE:
+ case gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE:
+ return false;
}
NOTREACHED();
return false;
diff --git a/chromium/gpu/ipc/common/gpu_messages.h b/chromium/gpu/ipc/common/gpu_messages.h
index 49cf9396391..5639f4aecbf 100644
--- a/chromium/gpu/ipc/common/gpu_messages.h
+++ b/chromium/gpu/ipc/common/gpu_messages.h
@@ -111,6 +111,9 @@ IPC_SYNC_MESSAGE_CONTROL3_2(GpuChannelMsg_CreateCommandBuffer,
IPC_SYNC_MESSAGE_CONTROL1_0(GpuChannelMsg_DestroyCommandBuffer,
int32_t /* instance_id */)
+IPC_MESSAGE_CONTROL1(GpuChannelMsg_FlushCommandBuffers,
+ std::vector<gpu::FlushParams> /* flush_list */)
+
// Simple NOP message which can be used as fence to ensure all previous sent
// messages have been received.
IPC_SYNC_MESSAGE_CONTROL0_0(GpuChannelMsg_Nop)
@@ -175,11 +178,13 @@ IPC_SYNC_MESSAGE_ROUTED3_1(GpuCommandBufferMsg_WaitForGetOffsetInRange,
// Caller passes its current put offset. Current state (including get offset)
// is returned in shared memory. The input latency info for the current
// frame is also sent to the GPU process.
-IPC_MESSAGE_ROUTED4(GpuCommandBufferMsg_AsyncFlush,
+// TODO(sunnyps): This is an internal implementation detail of the gpu service
+// and is not sent by the client. Remove this once the non-scheduler code path
+// is removed.
+IPC_MESSAGE_ROUTED3(GpuCommandBufferMsg_AsyncFlush,
int32_t /* put_offset */,
- uint32_t /* flush_count */,
- std::vector<ui::LatencyInfo> /* latency_info */,
- std::vector<gpu::SyncToken> /* sync_token_fences */)
+ uint32_t /* flush_id */,
+ std::vector<ui::LatencyInfo> /* latency_info */)
// Sent by the GPU process to display messages in the console.
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ConsoleMsg,
diff --git a/chromium/gpu/ipc/common/gpu_param_traits_macros.h b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
index 3887a014535..1b4c1311c12 100644
--- a/chromium/gpu/ipc/common/gpu_param_traits_macros.h
+++ b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
@@ -11,9 +11,12 @@
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_info.h"
#include "gpu/gpu_export.h"
+#include "gpu/ipc/common/flush_params.h"
+#include "gpu/ipc/common/gpu_command_buffer_traits.h"
#include "ipc/ipc_message_macros.h"
#include "ui/gfx/ipc/geometry/gfx_param_traits.h"
#include "ui/gfx/ipc/gfx_param_traits.h"
+#include "ui/latency/ipc/latency_info_param_traits.h"
#include "url/ipc/url_param_traits.h"
#undef IPC_MESSAGE_EXPORT
@@ -38,6 +41,14 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::VideoEncodeAcceleratorSupportedProfile)
IPC_STRUCT_TRAITS_MEMBER(max_framerate_denominator)
IPC_STRUCT_TRAITS_END()
+IPC_STRUCT_TRAITS_BEGIN(gpu::FlushParams)
+ IPC_STRUCT_TRAITS_MEMBER(route_id)
+ IPC_STRUCT_TRAITS_MEMBER(put_offset)
+ IPC_STRUCT_TRAITS_MEMBER(flush_id)
+ IPC_STRUCT_TRAITS_MEMBER(latency_info)
+ IPC_STRUCT_TRAITS_MEMBER(sync_token_fences)
+IPC_STRUCT_TRAITS_END()
+
IPC_ENUM_TRAITS_MAX_VALUE(gpu::SchedulingPriority,
gpu::SchedulingPriority::kLast)
IPC_ENUM_TRAITS_MAX_VALUE(gpu::MemoryAllocation::PriorityCutoff,
@@ -101,7 +112,7 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::GPUInfo)
IPC_STRUCT_TRAITS_MEMBER(video_decode_accelerator_capabilities)
IPC_STRUCT_TRAITS_MEMBER(video_encode_accelerator_supported_profiles)
IPC_STRUCT_TRAITS_MEMBER(jpeg_decode_accelerator_supported)
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
IPC_STRUCT_TRAITS_MEMBER(system_visual)
IPC_STRUCT_TRAITS_MEMBER(rgba_visual)
#endif
diff --git a/chromium/gpu/ipc/common/mailbox_struct_traits.cc b/chromium/gpu/ipc/common/mailbox_struct_traits.cc
index 4f73996e22d..53520fbaed4 100644
--- a/chromium/gpu/ipc/common/mailbox_struct_traits.cc
+++ b/chromium/gpu/ipc/common/mailbox_struct_traits.cc
@@ -4,20 +4,15 @@
#include "gpu/ipc/common/mailbox_struct_traits.h"
-namespace mojo {
+#include "base/containers/span.h"
-// static
-MailboxName StructTraits<gpu::mojom::MailboxDataView, gpu::Mailbox>::name(
- const gpu::Mailbox& mailbox) {
- return {GL_MAILBOX_SIZE_CHROMIUM, GL_MAILBOX_SIZE_CHROMIUM,
- const_cast<int8_t*>(&mailbox.name[0])};
-}
+namespace mojo {
// static
bool StructTraits<gpu::mojom::MailboxDataView, gpu::Mailbox>::Read(
gpu::mojom::MailboxDataView data,
gpu::Mailbox* out) {
- MailboxName mailbox_name = {0, GL_MAILBOX_SIZE_CHROMIUM, &out->name[0]};
+ base::span<int8_t> mailbox_name(out->name);
return data.ReadName(&mailbox_name);
}
diff --git a/chromium/gpu/ipc/common/mailbox_struct_traits.h b/chromium/gpu/ipc/common/mailbox_struct_traits.h
index 50c5b61bb93..c7dbc90ed15 100644
--- a/chromium/gpu/ipc/common/mailbox_struct_traits.h
+++ b/chromium/gpu/ipc/common/mailbox_struct_traits.h
@@ -5,19 +5,18 @@
#ifndef GPU_IPC_COMMON_MAILBOX_STRUCT_TRAITS_H_
#define GPU_IPC_COMMON_MAILBOX_STRUCT_TRAITS_H_
+#include "base/containers/span.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/ipc/common/mailbox.mojom-shared.h"
#include "mojo/public/cpp/bindings/array_traits.h"
namespace mojo {
-// A buffer used to read bytes directly from MailboxDataView to gpu::Mailbox
-// name.
-using MailboxName = CArray<int8_t>;
-
template <>
struct StructTraits<gpu::mojom::MailboxDataView, gpu::Mailbox> {
- static MailboxName name(const gpu::Mailbox& mailbox);
+ static base::span<const int8_t> name(const gpu::Mailbox& mailbox) {
+ return mailbox.name;
+ }
static bool Read(gpu::mojom::MailboxDataView data, gpu::Mailbox* out);
};
diff --git a/chromium/gpu/ipc/common/struct_traits_unittest.cc b/chromium/gpu/ipc/common/struct_traits_unittest.cc
index 3846468f04b..680cdb2bedb 100644
--- a/chromium/gpu/ipc/common/struct_traits_unittest.cc
+++ b/chromium/gpu/ipc/common/struct_traits_unittest.cc
@@ -168,7 +168,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
const std::vector<gpu::VideoEncodeAcceleratorSupportedProfile>
video_encode_accelerator_supported_profiles;
const bool jpeg_decode_accelerator_supported = true;
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
const VisualID system_visual = 0x1234;
const VisualID rgba_visual = 0x5678;
#endif
@@ -213,7 +213,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
input.video_encode_accelerator_supported_profiles =
video_encode_accelerator_supported_profiles;
input.jpeg_decode_accelerator_supported = jpeg_decode_accelerator_supported;
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
input.system_visual = system_visual;
input.rgba_visual = rgba_visual;
#endif
@@ -288,7 +288,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
video_decode_accelerator_capabilities.supported_profiles.size());
EXPECT_EQ(output.video_encode_accelerator_supported_profiles.size(),
video_encode_accelerator_supported_profiles.size());
-#if defined(USE_X11) && !defined(OS_CHROMEOS)
+#if defined(USE_X11)
EXPECT_EQ(system_visual, output.system_visual);
EXPECT_EQ(rgba_visual, output.rgba_visual);
#endif
diff --git a/chromium/gpu/ipc/gl_in_process_context.cc b/chromium/gpu/ipc/gl_in_process_context.cc
index c854913758b..2c375edc6ea 100644
--- a/chromium/gpu/ipc/gl_in_process_context.cc
+++ b/chromium/gpu/ipc/gl_in_process_context.cc
@@ -171,11 +171,7 @@ bool GLInProcessContextImpl::Initialize(
bind_generates_resource, attribs.lose_context_when_out_of_memory,
support_client_side_arrays, command_buffer_.get()));
- if (!gles2_implementation_->Initialize(
- mem_limits.start_transfer_buffer_size,
- mem_limits.min_transfer_buffer_size,
- mem_limits.max_transfer_buffer_size,
- mem_limits.mapped_memory_reclaim_limit)) {
+ if (!gles2_implementation_->Initialize(mem_limits)) {
return false;
}
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.cc b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
index d1aedd68ddb..b0da25e533a 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.cc
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
@@ -13,8 +13,12 @@ GpuInProcessThreadService::GpuInProcessThreadService(
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
gpu::SyncPointManager* sync_point_manager,
gpu::gles2::MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group)
- : gpu::InProcessCommandBuffer::Service(mailbox_manager, share_group),
+ scoped_refptr<gl::GLShareGroup> share_group,
+ const GpuFeatureInfo& gpu_feature_info)
+ : gpu::InProcessCommandBuffer::Service(GpuPreferences(),
+ mailbox_manager,
+ share_group,
+ gpu_feature_info),
task_runner_(task_runner),
sync_point_manager_(sync_point_manager) {}
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.h b/chromium/gpu/ipc/gpu_in_process_thread_service.h
index fdf27034269..464e713faba 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.h
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.h
@@ -17,14 +17,15 @@ namespace gpu {
// Default Service class when no service is specified. GpuInProcessThreadService
// is used by Mus and unit tests.
class GPU_EXPORT GpuInProcessThreadService
- : public NON_EXPORTED_BASE(gpu::InProcessCommandBuffer::Service),
+ : public gpu::InProcessCommandBuffer::Service,
public base::RefCountedThreadSafe<GpuInProcessThreadService> {
public:
GpuInProcessThreadService(
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
gpu::SyncPointManager* sync_point_manager,
gpu::gles2::MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group);
+ scoped_refptr<gl::GLShareGroup> share_group,
+ const GpuFeatureInfo& gpu_feature_info);
// gpu::InProcessCommandBuffer::Service implementation.
void ScheduleTask(const base::Closure& task) override;
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
index 813f3395c7a..2dc7ed71115 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
@@ -23,7 +23,7 @@ bool AreNativeGpuMemoryBuffersEnabled() {
return false;
}
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+#if defined(OS_MACOSX)
return !base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kDisableNativeGpuMemoryBuffers);
#else
@@ -51,6 +51,7 @@ GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations() {
gfx::BufferFormat::YUV_420_BIPLANAR};
const gfx::BufferUsage kNativeUsages[] = {
gfx::BufferUsage::GPU_READ, gfx::BufferUsage::SCANOUT,
+ gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT};
for (auto format : kNativeFormats) {
@@ -74,7 +75,8 @@ GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations() {
gfx::BufferFormat::YVU_420, gfx::BufferFormat::YUV_420_BIPLANAR};
const gfx::BufferUsage kGPUReadWriteUsages[] = {
gfx::BufferUsage::GPU_READ, gfx::BufferUsage::SCANOUT,
- gfx::BufferUsage::SCANOUT_CPU_READ_WRITE};
+ gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
+ gfx::BufferUsage::SCANOUT_VDA_WRITE};
for (auto format : kGPUReadWriteFormats) {
for (auto usage : kGPUReadWriteUsages) {
if (IsNativeGpuMemoryBufferConfigurationSupported(format, usage))
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.cc b/chromium/gpu/ipc/host/shader_disk_cache.cc
index 144f9af4783..2daecb53414 100644
--- a/chromium/gpu/ipc/host/shader_disk_cache.cc
+++ b/chromium/gpu/ipc/host/shader_disk_cache.cc
@@ -6,8 +6,9 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
-#include "base/single_thread_task_runner.h"
+#include "base/sys_info.h"
#include "base/threading/thread_checker.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
#include "net/base/cache_type.h"
#include "net/base/io_buffer.h"
@@ -418,9 +419,7 @@ void ShaderClearHelper::DoClearShaderCache(int rv) {
////////////////////////////////////////////////////////////////////////////////
// ShaderCacheFactory
-ShaderCacheFactory::ShaderCacheFactory(
- scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner)
- : cache_task_runner_(std::move(cache_task_runner)) {}
+ShaderCacheFactory::ShaderCacheFactory() {}
ShaderCacheFactory::~ShaderCacheFactory() {}
@@ -451,7 +450,7 @@ scoped_refptr<ShaderDiskCache> ShaderCacheFactory::GetByPath(
return iter->second;
ShaderDiskCache* cache = new ShaderDiskCache(this, path);
- cache->Init(cache_task_runner_);
+ cache->Init();
return cache;
}
@@ -544,8 +543,7 @@ ShaderDiskCache::~ShaderDiskCache() {
factory_->RemoveFromCache(cache_path_);
}
-void ShaderDiskCache::Init(
- scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner) {
+void ShaderDiskCache::Init() {
if (is_initialized_) {
NOTREACHED(); // can't initialize disk cache twice.
return;
@@ -554,8 +552,7 @@ void ShaderDiskCache::Init(
int rv = disk_cache::CreateCacheBackend(
net::SHADER_CACHE, net::CACHE_BACKEND_DEFAULT,
- cache_path_.Append(kGpuCachePath),
- gpu::kDefaultMaxProgramCacheMemoryBytes, true, cache_task_runner, NULL,
+ cache_path_.Append(kGpuCachePath), CacheSizeBytes(), true, nullptr,
&backend_, base::Bind(&ShaderDiskCache::CacheCreatedCallback, this));
if (rv == net::OK)
@@ -637,4 +634,16 @@ int ShaderDiskCache::SetCacheCompleteCallback(
return net::ERR_IO_PENDING;
}
+// static
+size_t ShaderDiskCache::CacheSizeBytes() {
+#if !defined(OS_ANDROID)
+ return kDefaultMaxProgramCacheMemoryBytes;
+#else // !defined(OS_ANDROID)
+ if (!base::SysInfo::IsLowEndDevice())
+ return kDefaultMaxProgramCacheMemoryBytes;
+ else
+ return kLowEndMaxProgramCacheMemoryBytes;
+#endif // !defined(OS_ANDROID)
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.h b/chromium/gpu/ipc/host/shader_disk_cache.h
index 32decebf1b1..1e8756e6db1 100644
--- a/chromium/gpu/ipc/host/shader_disk_cache.h
+++ b/chromium/gpu/ipc/host/shader_disk_cache.h
@@ -15,7 +15,6 @@
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/single_thread_task_runner.h"
#include "base/threading/thread_checker.h"
#include "net/disk_cache/disk_cache.h"
@@ -65,6 +64,9 @@ class ShaderDiskCache : public base::RefCounted<ShaderDiskCache> {
// been written to the cache.
int SetCacheCompleteCallback(const net::CompletionCallback& callback);
+ // Returns the size which should be used for the shader disk cache.
+ static size_t CacheSizeBytes();
+
private:
friend class base::RefCounted<ShaderDiskCache>;
friend class ShaderDiskCacheEntry;
@@ -75,7 +77,7 @@ class ShaderDiskCache : public base::RefCounted<ShaderDiskCache> {
const base::FilePath& cache_path);
~ShaderDiskCache();
- void Init(scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner);
+ void Init();
void CacheCreatedCallback(int rv);
disk_cache::Backend* backend() { return backend_.get(); }
@@ -103,10 +105,9 @@ class ShaderDiskCache : public base::RefCounted<ShaderDiskCache> {
// ShaderCacheFactory maintains a cache of ShaderDiskCache objects
// so we only create one per profile directory.
-class ShaderCacheFactory : NON_EXPORTED_BASE(public base::ThreadChecker) {
+class ShaderCacheFactory : public base::ThreadChecker {
public:
- explicit ShaderCacheFactory(
- scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner);
+ ShaderCacheFactory();
~ShaderCacheFactory();
// Clear the shader disk cache for the given |path|. This supports unbounded
@@ -143,8 +144,6 @@ class ShaderCacheFactory : NON_EXPORTED_BASE(public base::ThreadChecker) {
private:
friend class ShaderClearHelper;
- scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner_;
-
scoped_refptr<ShaderDiskCache> GetByPath(const base::FilePath& path);
void CacheCleared(const base::FilePath& path);
diff --git a/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
index 209b6304c67..80ecb3b87ab 100644
--- a/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
+++ b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "gpu/ipc/host/shader_disk_cache.h"
#include "base/files/scoped_temp_dir.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
-#include "base/message_loop/message_loop.h"
-#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "gpu/ipc/host/shader_disk_cache.h"
+#include "base/test/scoped_task_environment.h"
#include "net/base/test_completion_callback.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -23,14 +21,7 @@ const char kCacheValue[] = "cached value";
class ShaderDiskCacheTest : public testing::Test {
public:
- ShaderDiskCacheTest()
- : cache_thread_("CacheThread") {
- base::Thread::Options options;
- options.message_loop_type = base::MessageLoop::TYPE_IO;
- CHECK(cache_thread_.StartWithOptions(options));
- factory_ =
- base::MakeUnique<ShaderCacheFactory>(cache_thread_.task_runner());
- }
+ ShaderDiskCacheTest() {}
~ShaderDiskCacheTest() override {}
@@ -38,18 +29,17 @@ class ShaderDiskCacheTest : public testing::Test {
void InitCache() {
ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
- factory_->SetCacheInfo(kDefaultClientId, cache_path());
+ factory_.SetCacheInfo(kDefaultClientId, cache_path());
}
- ShaderCacheFactory* factory() { return factory_.get(); }
+ ShaderCacheFactory* factory() { return &factory_; }
private:
- void TearDown() override { factory_->RemoveCacheInfo(kDefaultClientId); }
+ void TearDown() override { factory_.RemoveCacheInfo(kDefaultClientId); }
- std::unique_ptr<ShaderCacheFactory> factory_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
base::ScopedTempDir temp_dir_;
- base::Thread cache_thread_;
- base::MessageLoopForIO message_loop_;
+ ShaderCacheFactory factory_;
DISALLOW_COPY_AND_ASSIGN(ShaderDiskCacheTest);
};
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index f5ac52f1744..15b4c537bd1 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -11,6 +11,7 @@
#include <set>
#include <utility>
+#include "base/atomic_sequence_num.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
@@ -24,6 +25,7 @@
#include "base/sequence_checker.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/client/gpu_control_client.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
@@ -65,6 +67,7 @@ namespace gpu {
namespace {
base::AtomicSequenceNumber g_next_command_buffer_id;
+base::AtomicSequenceNumber g_next_image_id;
template <typename T>
static void RunTaskWithResult(base::Callback<T(void)> task,
@@ -83,10 +86,16 @@ class GpuInProcessThreadHolder : public base::Thread {
~GpuInProcessThreadHolder() override { Stop(); }
+ void SetGpuFeatureInfo(const GpuFeatureInfo& gpu_feature_info) {
+ DCHECK(!gpu_thread_service_.get());
+ gpu_feature_info_ = gpu_feature_info;
+ }
+
const scoped_refptr<InProcessCommandBuffer::Service>& GetGpuThreadService() {
if (!gpu_thread_service_) {
gpu_thread_service_ = new GpuInProcessThreadService(
- task_runner(), sync_point_manager_.get(), nullptr, nullptr);
+ task_runner(), sync_point_manager_.get(), nullptr, nullptr,
+ gpu_feature_info_);
}
return gpu_thread_service_;
}
@@ -94,6 +103,7 @@ class GpuInProcessThreadHolder : public base::Thread {
private:
std::unique_ptr<SyncPointManager> sync_point_manager_;
scoped_refptr<InProcessCommandBuffer::Service> gpu_thread_service_;
+ GpuFeatureInfo gpu_feature_info_;
};
base::LazyInstance<GpuInProcessThreadHolder>::DestructorAtExit
@@ -125,20 +135,15 @@ scoped_refptr<InProcessCommandBuffer::Service> GetInitialService(
} // anonyous namespace
-InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences)
- : Service(gpu_preferences, nullptr, nullptr) {}
-
-InProcessCommandBuffer::Service::Service(
- gpu::gles2::MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group)
- : Service(GpuPreferences(), mailbox_manager, share_group) {}
-
InProcessCommandBuffer::Service::Service(
const GpuPreferences& gpu_preferences,
gpu::gles2::MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group)
+ scoped_refptr<gl::GLShareGroup> share_group,
+ const GpuFeatureInfo& gpu_feature_info)
: gpu_preferences_(gpu_preferences),
- gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()),
+ gpu_feature_info_(gpu_feature_info),
+ gpu_driver_bug_workarounds_(
+ gpu_feature_info.enabled_gpu_driver_bug_workarounds),
mailbox_manager_(mailbox_manager),
share_group_(share_group),
shader_translator_cache_(gpu_preferences_) {
@@ -206,13 +211,25 @@ InProcessCommandBuffer::InProcessCommandBuffer(
client_thread_weak_ptr_factory_(this),
gpu_thread_weak_ptr_factory_(this) {
DCHECK(service_.get());
- next_image_id_.GetNext();
}
InProcessCommandBuffer::~InProcessCommandBuffer() {
Destroy();
}
+// static
+void InProcessCommandBuffer::InitializeDefaultServiceForTesting(
+ const GpuFeatureInfo& gpu_feature_info) {
+ // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is
+ // instantiated before we create the GPU thread, otherwise shutdown order will
+ // delete the ThreadTaskRunnerHandle before the GPU thread's message loop,
+ // and when the message loop is shutdown, it will recreate
+ // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager,
+ // which causes a deadlock because it's already locked.
+ base::ThreadTaskRunnerHandle::IsSet();
+ g_default_service.Get().SetGpuFeatureInfo(gpu_feature_info);
+}
+
bool InProcessCommandBuffer::MakeCurrent() {
CheckSequencedThread();
command_buffer_lock_.AssertAcquired();
@@ -297,8 +314,9 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
params.context_group
? params.context_group->decoder_->GetContextGroup()
: new gles2::ContextGroup(
- service_->gpu_preferences(), service_->mailbox_manager(),
- nullptr /* memory_tracker */,
+ service_->gpu_preferences(),
+ gles2::PassthroughCommandDecoderSupported(),
+ service_->mailbox_manager(), nullptr /* memory_tracker */,
service_->shader_translator_cache(),
service_->framebuffer_completeness_cache(), feature_info,
bind_generates_resource, service_->image_manager(),
@@ -353,18 +371,19 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
if (!context_.get()) {
context_ = gl::init::CreateGLContext(
gl_share_group_.get(), surface_.get(),
- GenerateGLContextAttribs(
- params.attribs, decoder_->GetContextGroup()->gpu_preferences()));
+ GenerateGLContextAttribs(params.attribs,
+ decoder_->GetContextGroup()));
+ if (context_.get()) {
+ service_->gpu_feature_info().ApplyToGLContext(context_.get());
+ }
gl_share_group_->SetSharedContext(surface_.get(), context_.get());
}
context_ = new GLContextVirtual(gl_share_group_.get(), context_.get(),
decoder_->AsWeakPtr());
if (context_->Initialize(
- surface_.get(),
- GenerateGLContextAttribs(
- params.attribs,
- decoder_->GetContextGroup()->gpu_preferences()))) {
+ surface_.get(), GenerateGLContextAttribs(
+ params.attribs, decoder_->GetContextGroup()))) {
VLOG(1) << "Created virtual GL context.";
} else {
context_ = NULL;
@@ -372,8 +391,10 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
} else {
context_ = gl::init::CreateGLContext(
gl_share_group_.get(), surface_.get(),
- GenerateGLContextAttribs(
- params.attribs, decoder_->GetContextGroup()->gpu_preferences()));
+ GenerateGLContextAttribs(params.attribs, decoder_->GetContextGroup()));
+ if (context_.get()) {
+ service_->gpu_feature_info().ApplyToGLContext(context_.get());
+ }
}
if (!context_.get()) {
@@ -722,7 +743,7 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
reinterpret_cast<gfx::GpuMemoryBuffer*>(buffer);
DCHECK(gpu_memory_buffer);
- int32_t new_id = next_image_id_.GetNext();
+ int32_t new_id = g_next_image_id.GetNext() + 1;
DCHECK(gpu::IsImageFromGpuMemoryBufferFormatSupported(
gpu_memory_buffer->GetFormat(), capabilities_));
@@ -754,8 +775,7 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
if (fence_sync) {
flushed_fence_sync_release_ = fence_sync;
- SyncToken sync_token(GetNamespaceID(), GetStreamId(), GetCommandBufferID(),
- fence_sync);
+ SyncToken sync_token(GetNamespaceID(), 0, GetCommandBufferID(), fence_sync);
sync_token.SetVerifyFlush();
gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer,
sync_token);
@@ -851,8 +871,7 @@ void InProcessCommandBuffer::CacheShader(const std::string& key,
}
void InProcessCommandBuffer::OnFenceSyncRelease(uint64_t release) {
- SyncToken sync_token(GetNamespaceID(), GetStreamId(), GetCommandBufferID(),
- release);
+ SyncToken sync_token(GetNamespaceID(), 0, GetCommandBufferID(), release);
gles2::MailboxManager* mailbox_manager =
decoder_->GetContextGroup()->mailbox_manager();
@@ -971,11 +990,7 @@ CommandBufferId InProcessCommandBuffer::GetCommandBufferID() const {
return command_buffer_id_;
}
-int32_t InProcessCommandBuffer::GetStreamId() const {
- return 0;
-}
-
-void InProcessCommandBuffer::FlushOrderingBarrierOnStream(int32_t stream_id) {
+void InProcessCommandBuffer::FlushPendingWork() {
// This is only relevant for out-of-process command buffers.
}
@@ -1045,6 +1060,10 @@ const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const {
return context_group_->feature_info();
}
+const GpuPreferences& InProcessCommandBuffer::GetGpuPreferences() const {
+ return context_group_->gpu_preferences();
+}
+
void InProcessCommandBuffer::SetLatencyInfoCallback(
const LatencyInfoCallback& callback) {
latency_info_callback_ = callback;
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index c8ca5073ac9..6a808610dc9 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -12,7 +12,6 @@
#include <memory>
#include <vector>
-#include "base/atomic_sequence_num.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/macros.h"
@@ -32,6 +31,7 @@
#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/config/gpu_feature_info.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h"
#include "ui/gfx/gpu_memory_buffer.h"
@@ -129,8 +129,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
void EnsureWorkVisible() override;
CommandBufferNamespace GetNamespaceID() const override;
CommandBufferId GetCommandBufferID() const override;
- int32_t GetStreamId() const override;
- void FlushOrderingBarrierOnStream(int32_t stream_id) override;
+ void FlushPendingWork() override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;
@@ -163,6 +162,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
#endif
void DidSwapBuffersComplete(SwapBuffersCompleteParams params) override;
const gles2::FeatureInfo* GetFeatureInfo() const override;
+ const GpuPreferences& GetGpuPreferences() const override;
void SetLatencyInfoCallback(const LatencyInfoCallback& callback) override;
void UpdateVSyncParameters(base::TimeTicks timebase,
base::TimeDelta interval) override;
@@ -186,12 +186,20 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
void UpdateVSyncParametersOnOriginThread(base::TimeTicks timebase,
base::TimeDelta interval);
+ // Mostly the GpuFeatureInfo from GpuInit will be used to create a gpu thread
+ // service. In certain tests GpuInit is not part of the execution path, so
+ // the test suite need to compute it and pass it to the default service.
+ // See "gpu/ipc/in_process_command_buffer.cc".
+ static void InitializeDefaultServiceForTesting(
+ const GpuFeatureInfo& gpu_feature_info);
+
// The serializer interface to the GPU service (i.e. thread).
class Service {
public:
- explicit Service(const gpu::GpuPreferences& gpu_preferences);
- Service(gles2::MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group);
+ Service(const GpuPreferences& gpu_preferences,
+ gles2::MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group,
+ const GpuFeatureInfo& gpu_feature_info);
virtual ~Service();
@@ -210,6 +218,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
virtual bool BlockThreadOnWaitSyncToken() const = 0;
const GpuPreferences& gpu_preferences();
+ const GpuFeatureInfo& gpu_feature_info() { return gpu_feature_info_; }
const GpuDriverBugWorkarounds& gpu_driver_bug_workarounds();
scoped_refptr<gl::GLShareGroup> share_group();
gles2::MailboxManager* mailbox_manager() { return mailbox_manager_; }
@@ -226,11 +235,8 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
}
protected:
- Service(const gpu::GpuPreferences& gpu_preferences,
- gles2::MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group);
-
const GpuPreferences gpu_preferences_;
+ const GpuFeatureInfo gpu_feature_info_;
const GpuDriverBugWorkarounds gpu_driver_bug_workarounds_;
std::unique_ptr<gles2::MailboxManager> owned_mailbox_manager_;
gles2::MailboxManager* mailbox_manager_ = nullptr;
@@ -334,7 +340,6 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
int32_t last_put_offset_;
gpu::Capabilities capabilities_;
GpuMemoryBufferManager* gpu_memory_buffer_manager_;
- base::AtomicSequenceNumber next_image_id_;
uint64_t next_fence_sync_release_;
uint64_t flushed_fence_sync_release_;
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index faeaefd800b..983715b6773 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -80,8 +80,6 @@ target(link_target_type, "ipc_service_sources") {
libs = []
if (is_win) {
sources += [
- "child_window_surface_win.cc",
- "child_window_surface_win.h",
"child_window_win.cc",
"child_window_win.h",
"direct_composition_child_surface_win.cc",
@@ -128,6 +126,9 @@ target(link_target_type, "ipc_service_sources") {
if (use_ozone) {
deps += [ "//ui/ozone" ]
}
+ if (is_fuchsia) {
+ sources += [ "image_transport_surface_fuchsia.cc" ]
+ }
}
source_set("test_support") {
diff --git a/chromium/gpu/ipc/service/OWNERS b/chromium/gpu/ipc/service/OWNERS
index caa4fba639c..c9aff041f6f 100644
--- a/chromium/gpu/ipc/service/OWNERS
+++ b/chromium/gpu/ipc/service/OWNERS
@@ -1,2 +1,5 @@
# Mac stuff.
per-file *_mac*=ccameron@chromium.org
+
+# Win stuff
+per-file *_win*=stanisc@chromium.org
diff --git a/chromium/gpu/ipc/service/child_window_surface_win.cc b/chromium/gpu/ipc/service/child_window_surface_win.cc
deleted file mode 100644
index 36b65a7ef87..00000000000
--- a/chromium/gpu/ipc/service/child_window_surface_win.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/service/child_window_surface_win.h"
-
-#include <memory>
-
-#include "base/compiler_specific.h"
-#include "base/memory/ptr_util.h"
-#include "gpu/ipc/common/gpu_messages.h"
-#include "gpu/ipc/service/gpu_channel_manager.h"
-#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
-#include "ui/display/display_switches.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gl/egl_util.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_surface_egl.h"
-#include "ui/gl/scoped_make_current.h"
-
-namespace gpu {
-
-ChildWindowSurfaceWin::ChildWindowSurfaceWin(
- std::unique_ptr<gfx::VSyncProvider> vsync_provider,
- base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
- HWND parent_window)
- : gl::NativeViewGLSurfaceEGL(0, std::move(vsync_provider)),
- child_window_(delegate, parent_window),
- alpha_(true),
- first_swap_(true) {
- // Don't use EGL_ANGLE_window_fixed_size so that we can avoid recreating the
- // window surface, which can cause flicker on DirectComposition.
- enable_fixed_size_angle_ = false;
-}
-
-EGLConfig ChildWindowSurfaceWin::GetConfig() {
- if (!config_) {
- int alpha_size = alpha_ ? 8 : EGL_DONT_CARE;
- int bits_per_channel =
- base::CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnableHDR)
- ? 16
- : 8;
-
- EGLint config_attribs[] = {EGL_ALPHA_SIZE,
- alpha_size,
- EGL_BLUE_SIZE,
- bits_per_channel,
- EGL_GREEN_SIZE,
- bits_per_channel,
- EGL_RED_SIZE,
- bits_per_channel,
- EGL_RENDERABLE_TYPE,
- EGL_OPENGL_ES2_BIT,
- EGL_SURFACE_TYPE,
- EGL_WINDOW_BIT | EGL_PBUFFER_BIT,
- EGL_NONE};
-
- EGLDisplay display = GetHardwareDisplay();
- EGLint num_configs;
- if (!eglChooseConfig(display, config_attribs, NULL, 0, &num_configs)) {
- LOG(ERROR) << "eglChooseConfig failed with error "
- << ui::GetLastEGLErrorString();
- return NULL;
- }
- std::vector<EGLConfig> configs(num_configs);
- if (!eglChooseConfig(display, config_attribs, configs.data(), num_configs,
- &num_configs)) {
- LOG(ERROR) << "eglChooseConfig failed with error "
- << ui::GetLastEGLErrorString();
- return NULL;
- }
- config_ = configs[0];
- for (int i = 0; i < num_configs; i++) {
- EGLint red_bits;
- eglGetConfigAttrib(display, configs[i], EGL_RED_SIZE, &red_bits);
- // Try to pick a configuration with the right number of bits rather
- // than one that just has enough bits.
- if (red_bits == bits_per_channel) {
- config_ = configs[i];
- break;
- }
- }
- }
-
- return config_;
-}
-
-bool ChildWindowSurfaceWin::InitializeNativeWindow() {
- if (window_)
- return true;
-
- bool result = child_window_.Initialize();
- window_ = child_window_.window();
- return result;
-}
-
-bool ChildWindowSurfaceWin::Resize(const gfx::Size& size,
- float scale_factor,
- bool has_alpha) {
- if (!SupportsPostSubBuffer()) {
- if (!MoveWindow(window_, 0, 0, size.width(), size.height(), FALSE)) {
- return false;
- }
- alpha_ = has_alpha;
- return gl::NativeViewGLSurfaceEGL::Resize(size, scale_factor, has_alpha);
- } else {
- if (size == GetSize() && has_alpha == alpha_)
- return true;
-
- // Force a resize and redraw (but not a move, activate, etc.).
- if (!SetWindowPos(window_, nullptr, 0, 0, size.width(), size.height(),
- SWP_NOMOVE | SWP_NOACTIVATE | SWP_NOCOPYBITS |
- SWP_NOOWNERZORDER | SWP_NOZORDER)) {
- return false;
- }
- size_ = size;
- if (has_alpha == alpha_) {
- // A 0-size PostSubBuffer doesn't swap but forces the swap chain to resize
- // to match the window.
- PostSubBuffer(0, 0, 0, 0);
- } else {
- alpha_ = has_alpha;
- config_ = nullptr;
-
- std::unique_ptr<ui::ScopedMakeCurrent> scoped_make_current;
- gl::GLContext* current_context = gl::GLContext::GetCurrent();
- bool was_current = current_context && current_context->IsCurrent(this);
- if (was_current) {
- scoped_make_current.reset(
- new ui::ScopedMakeCurrent(current_context, this));
- current_context->ReleaseCurrent(this);
- }
-
- Destroy();
-
- if (!Initialize()) {
- LOG(ERROR) << "Failed to resize window.";
- return false;
- }
- }
- return true;
- }
-}
-
-gfx::SwapResult ChildWindowSurfaceWin::SwapBuffers() {
- gfx::SwapResult result = NativeViewGLSurfaceEGL::SwapBuffers();
- // Force the driver to finish drawing before clearing the contents to
- // transparent, to reduce or eliminate the period of time where the contents
- // have flashed black.
- if (first_swap_) {
- glFinish();
- first_swap_ = false;
- }
- child_window_.ClearInvalidContents();
- return result;
-}
-
-gfx::SwapResult ChildWindowSurfaceWin::PostSubBuffer(int x,
- int y,
- int width,
- int height) {
- gfx::SwapResult result =
- NativeViewGLSurfaceEGL::PostSubBuffer(x, y, width, height);
- child_window_.ClearInvalidContents();
- return result;
-}
-
-void ChildWindowSurfaceWin::WaitForSnapshotRendering() {
- DCHECK(gl::GLContext::GetCurrent()->IsCurrent(this));
- glFinish();
-}
-
-ChildWindowSurfaceWin::~ChildWindowSurfaceWin() {
-}
-
-} // namespace gpu
diff --git a/chromium/gpu/ipc/service/child_window_surface_win.h b/chromium/gpu/ipc/service/child_window_surface_win.h
deleted file mode 100644
index 77b62bd9f92..00000000000
--- a/chromium/gpu/ipc/service/child_window_surface_win.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_SERVICE_CHILD_WINDOW_SURFACE_WIN_H_
-#define GPU_IPC_SERVICE_CHILD_WINDOW_SURFACE_WIN_H_
-
-#include "base/memory/weak_ptr.h"
-#include "gpu/ipc/service/child_window_win.h"
-#include "gpu/ipc/service/image_transport_surface_delegate.h"
-#include "ui/gl/gl_surface_egl.h"
-
-#include <windows.h>
-
-namespace gpu {
-
-class ChildWindowSurfaceWin : public gl::NativeViewGLSurfaceEGL {
- public:
- ChildWindowSurfaceWin(std::unique_ptr<gfx::VSyncProvider> vsync_provider,
- base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
- HWND parent_window);
-
- // GLSurface implementation.
- EGLConfig GetConfig() override;
- bool Resize(const gfx::Size& size,
- float scale_factor,
- bool has_alpha) override;
- bool InitializeNativeWindow() override;
- gfx::SwapResult SwapBuffers() override;
- gfx::SwapResult PostSubBuffer(int x, int y, int width, int height) override;
- void WaitForSnapshotRendering() override;
-
- protected:
- ~ChildWindowSurfaceWin() override;
-
- private:
- ChildWindowWin child_window_;
- bool alpha_;
- bool first_swap_;
-
- DISALLOW_COPY_AND_ASSIGN(ChildWindowSurfaceWin);
-};
-
-} // namespace gpu
-
-#endif // GPU_IPC_SERVICE_CHILD_WINDOW_SURFACE_WIN_H_
diff --git a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
index e93aae934e9..1a7a6a3c26b 100644
--- a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
+++ b/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
@@ -40,10 +40,12 @@ IDCompositionSurface* g_current_surface;
DirectCompositionChildSurfaceWin::DirectCompositionChildSurfaceWin(
const gfx::Size& size,
+ bool is_hdr,
bool has_alpha,
bool enable_dc_layers)
: gl::GLSurfaceEGL(),
size_(size),
+ is_hdr_(is_hdr),
has_alpha_(has_alpha),
enable_dc_layers_(enable_dc_layers) {}
@@ -79,15 +81,13 @@ void DirectCompositionChildSurfaceWin::ReleaseCurrentSurface() {
swap_chain_.Reset();
}
-void DirectCompositionChildSurfaceWin::InitializeSurface() {
+bool DirectCompositionChildSurfaceWin::InitializeSurface() {
TRACE_EVENT1("gpu", "DirectCompositionChildSurfaceWin::InitializeSurface()",
"enable_dc_layers_", enable_dc_layers_);
DCHECK(!dcomp_surface_);
DCHECK(!swap_chain_);
DXGI_FORMAT output_format =
- base::CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnableHDR)
- ? DXGI_FORMAT_R16G16B16A16_FLOAT
- : DXGI_FORMAT_B8G8R8A8_UNORM;
+ is_hdr_ ? DXGI_FORMAT_R16G16B16A16_FLOAT : DXGI_FORMAT_B8G8R8A8_UNORM;
if (enable_dc_layers_) {
// Always treat as premultiplied, because an underlay could cause it to
// become transparent.
@@ -113,7 +113,8 @@ void DirectCompositionChildSurfaceWin::InitializeSurface() {
desc.Stereo = FALSE;
desc.SampleDesc.Count = 1;
desc.BufferCount = 2;
- desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ desc.BufferUsage =
+ DXGI_USAGE_RENDER_TARGET_OUTPUT | DXGI_USAGE_SHADER_INPUT;
desc.Scaling = DXGI_SCALING_STRETCH;
desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
desc.AlphaMode = alpha_mode;
@@ -122,8 +123,9 @@ void DirectCompositionChildSurfaceWin::InitializeSurface() {
d3d11_device_.Get(), &desc, nullptr, swap_chain_.GetAddressOf());
has_been_rendered_to_ = false;
first_swap_ = true;
- CHECK(SUCCEEDED(hr));
+ return SUCCEEDED(hr);
}
+ return true;
}
void DirectCompositionChildSurfaceWin::ReleaseDrawTexture(bool will_discard) {
@@ -136,6 +138,7 @@ void DirectCompositionChildSurfaceWin::ReleaseDrawTexture(bool will_discard) {
if (dcomp_surface_) {
HRESULT hr = dcomp_surface_->EndDraw();
CHECK(SUCCEEDED(hr));
+ dcomp_surface_serial_++;
} else if (!will_discard) {
DXGI_PRESENT_PARAMETERS params = {};
RECT dirty_rect = swap_rect_.ToRECT();
@@ -158,7 +161,7 @@ void DirectCompositionChildSurfaceWin::ReleaseDrawTexture(bool will_discard) {
}
}
}
- if (dcomp_surface_ == g_current_surface)
+ if (dcomp_surface_.Get() == g_current_surface)
g_current_surface = nullptr;
}
@@ -177,7 +180,7 @@ void DirectCompositionChildSurfaceWin::Destroy() {
}
real_surface_ = nullptr;
}
- if (dcomp_surface_ && (dcomp_surface_ == g_current_surface)) {
+ if (dcomp_surface_ && (dcomp_surface_.Get() == g_current_surface)) {
HRESULT hr = dcomp_surface_->EndDraw();
CHECK(SUCCEEDED(hr));
g_current_surface = nullptr;
@@ -212,7 +215,7 @@ bool DirectCompositionChildSurfaceWin::SupportsPostSubBuffer() {
}
bool DirectCompositionChildSurfaceWin::OnMakeCurrent(gl::GLContext* context) {
- if (g_current_surface != dcomp_surface_) {
+ if (g_current_surface != dcomp_surface_.Get()) {
if (g_current_surface) {
HRESULT hr = g_current_surface->SuspendDraw();
CHECK(SUCCEEDED(hr));
@@ -241,7 +244,10 @@ bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
if ((enable_dc_layers_ && !dcomp_surface_) ||
(!enable_dc_layers_ && !swap_chain_)) {
ReleaseCurrentSurface();
- InitializeSurface();
+ if (!InitializeSurface()) {
+ LOG(ERROR) << "InitializeSurface failed";
+ return false;
+ }
}
if (!gfx::Rect(size_).Contains(rectangle)) {
diff --git a/chromium/gpu/ipc/service/direct_composition_child_surface_win.h b/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
index 8d5dc5d6931..9d7b70353fd 100644
--- a/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
+++ b/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
@@ -18,6 +18,7 @@ namespace gpu {
class GPU_EXPORT DirectCompositionChildSurfaceWin : public gl::GLSurfaceEGL {
public:
DirectCompositionChildSurfaceWin(const gfx::Size& size,
+ bool is_hdr,
bool has_alpha,
bool enable_dc_layers);
@@ -44,12 +45,14 @@ class GPU_EXPORT DirectCompositionChildSurfaceWin : public gl::GLSurfaceEGL {
return swap_chain_;
}
+ uint64_t dcomp_surface_serial() const { return dcomp_surface_serial_; }
+
protected:
~DirectCompositionChildSurfaceWin() override;
private:
void ReleaseCurrentSurface();
- void InitializeSurface();
+ bool InitializeSurface();
// Release the texture that's currently being drawn to. If will_discard is
// true then the surface should be discarded without swapping any contents
// to it.
@@ -64,11 +67,17 @@ class GPU_EXPORT DirectCompositionChildSurfaceWin : public gl::GLSurfaceEGL {
EGLSurface real_surface_ = 0;
bool first_swap_ = true;
const gfx::Size size_;
+ const bool is_hdr_;
const bool has_alpha_;
const bool enable_dc_layers_;
gfx::Rect swap_rect_;
gfx::Vector2d draw_offset_;
+ // This is a number that increments once for every EndDraw on a surface, and
+ // is used to determine when the contents have changed so Commit() needs to
+ // be called on the device.
+ uint64_t dcomp_surface_serial_ = 0;
+
base::win::ScopedComPtr<ID3D11Device> d3d11_device_;
base::win::ScopedComPtr<IDCompositionDevice2> dcomp_device_;
base::win::ScopedComPtr<IDCompositionSurface> dcomp_surface_;
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
index a22aa4febfe..cc7b344c11a 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.cc
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
@@ -201,6 +201,7 @@ class DCLayerTree {
std::unique_ptr<SwapChainPresenter> swap_chain_presenter;
base::win::ScopedComPtr<IDXGISwapChain1> swap_chain;
base::win::ScopedComPtr<IDCompositionSurface> surface;
+ uint64_t dcomp_surface_serial = 0;
gfx::Rect bounds;
float swap_chain_scale_x = 0.0f;
@@ -210,12 +211,13 @@ class DCLayerTree {
gfx::Transform transform;
};
- void InitVisual(size_t i);
- void UpdateVisualForVideo(VisualInfo* visual_info,
+ // These functions return true if the visual tree was changed.
+ bool InitVisual(size_t i);
+ bool UpdateVisualForVideo(VisualInfo* visual_info,
const ui::DCRendererLayerParams& params);
- void UpdateVisualForBackbuffer(VisualInfo* visual_info,
+ bool UpdateVisualForBackbuffer(VisualInfo* visual_info,
const ui::DCRendererLayerParams& params);
- void UpdateVisualClip(VisualInfo* visual_info,
+ bool UpdateVisualClip(VisualInfo* visual_info,
const ui::DCRendererLayerParams& params);
DirectCompositionSurfaceWin* surface_;
@@ -590,6 +592,9 @@ void DCLayerTree::SwapChainPresenter::PresentToSwapChain(
// TODO(jbauman): Use correct colorspace.
gfx::ColorSpace src_color_space = gfx::ColorSpace::CreateREC709();
+ if (params.image[0]->color_space().IsValid()) {
+ src_color_space = params.image[0]->color_space();
+ }
base::win::ScopedComPtr<ID3D11VideoContext1> context1;
if (SUCCEEDED(video_context_.CopyTo(context1.GetAddressOf()))) {
context1->VideoProcessorSetStreamColorSpace1(
@@ -615,16 +620,47 @@ void DCLayerTree::SwapChainPresenter::PresentToSwapChain(
if (SUCCEEDED(swap_chain_.CopyTo(swap_chain3.GetAddressOf()))) {
DXGI_COLOR_SPACE_TYPE color_space =
gfx::ColorSpaceWin::GetDXGIColorSpace(output_color_space);
+ if (is_yuy2_swapchain_) {
+ // Swapchains with YUY2 textures can't have RGB color spaces.
+ switch (color_space) {
+ case DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709:
+ case DXGI_COLOR_SPACE_RGB_FULL_G10_NONE_P709:
+ color_space = DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P709;
+ break;
+
+ case DXGI_COLOR_SPACE_RGB_STUDIO_G22_NONE_P709:
+ color_space = DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P709;
+ break;
+
+ case DXGI_COLOR_SPACE_RGB_STUDIO_G22_NONE_P2020:
+ color_space = DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020;
+ break;
+
+ case DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020:
+ case DXGI_COLOR_SPACE_RGB_STUDIO_G2084_NONE_P2020:
+ color_space = DXGI_COLOR_SPACE_YCBCR_STUDIO_G2084_LEFT_P2020;
+ break;
+
+ case DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P2020:
+ color_space = DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020;
+ break;
+
+ default:
+ break;
+ }
+ }
HRESULT hr = swap_chain3->SetColorSpace1(color_space);
- CHECK(SUCCEEDED(hr));
- if (context1) {
- context1->VideoProcessorSetOutputColorSpace1(video_processor_.Get(),
- color_space);
- } else {
- D3D11_VIDEO_PROCESSOR_COLOR_SPACE d3d11_color_space =
- gfx::ColorSpaceWin::GetD3D11ColorSpace(output_color_space);
- video_context_->VideoProcessorSetOutputColorSpace(video_processor_.Get(),
- &d3d11_color_space);
+
+ if (SUCCEEDED(hr)) {
+ if (context1) {
+ context1->VideoProcessorSetOutputColorSpace1(video_processor_.Get(),
+ color_space);
+ } else {
+ D3D11_VIDEO_PROCESSOR_COLOR_SPACE d3d11_color_space =
+ gfx::ColorSpaceWin::GetD3D11ColorSpace(output_color_space);
+ video_context_->VideoProcessorSetOutputColorSpace(
+ video_processor_.Get(), &d3d11_color_space);
+ }
}
}
@@ -810,11 +846,11 @@ void DCLayerTree::SwapChainPresenter::ReallocateSwapChain(bool yuy2) {
out_view_.Reset();
}
-void DCLayerTree::InitVisual(size_t i) {
+bool DCLayerTree::InitVisual(size_t i) {
DCHECK_GT(visual_info_.size(), i);
VisualInfo* visual_info = &visual_info_[i];
if (visual_info->content_visual)
- return;
+ return false;
DCHECK(!visual_info->clip_visual);
base::win::ScopedComPtr<IDCompositionVisual2> visual;
dcomp_device_->CreateVisual(visual_info->clip_visual.GetAddressOf());
@@ -825,14 +861,16 @@ void DCLayerTree::InitVisual(size_t i) {
IDCompositionVisual2* last_visual =
(i > 0) ? visual_info_[i - 1].clip_visual.Get() : nullptr;
root_visual_->AddVisual(visual_info->clip_visual.Get(), TRUE, last_visual);
+ return true;
}
-void DCLayerTree::UpdateVisualForVideo(
+bool DCLayerTree::UpdateVisualForVideo(
VisualInfo* visual_info,
const ui::DCRendererLayerParams& params) {
base::win::ScopedComPtr<IDCompositionVisual2> dc_visual =
visual_info->content_visual;
+ bool changed = false;
gfx::Rect bounds_rect = params.rect;
visual_info->surface.Reset();
if (!visual_info->swap_chain_presenter) {
@@ -844,6 +882,7 @@ void DCLayerTree::UpdateVisualForVideo(
visual_info->swap_chain_presenter->swap_chain()) {
visual_info->swap_chain = visual_info->swap_chain_presenter->swap_chain();
dc_visual->SetContent(visual_info->swap_chain.Get());
+ changed = true;
}
if (visual_info->swap_chain_presenter->swap_chain_scale_x() !=
@@ -879,16 +918,19 @@ void DCLayerTree::UpdateVisualForVideo(
final_transform.matrix().get(3, 1)}}};
dcomp_transform->SetMatrix(d2d_matrix);
dc_visual->SetTransform(dcomp_transform.Get());
+ changed = true;
}
+ return changed;
}
-void DCLayerTree::UpdateVisualForBackbuffer(
+bool DCLayerTree::UpdateVisualForBackbuffer(
VisualInfo* visual_info,
const ui::DCRendererLayerParams& params) {
base::win::ScopedComPtr<IDCompositionVisual2> dc_visual =
visual_info->content_visual;
visual_info->swap_chain_presenter = nullptr;
+ bool changed = false;
if ((visual_info->surface != surface_->dcomp_surface()) ||
(visual_info->swap_chain != surface_->swap_chain())) {
visual_info->surface = surface_->dcomp_surface();
@@ -900,6 +942,7 @@ void DCLayerTree::UpdateVisualForBackbuffer(
} else {
dc_visual->SetContent(nullptr);
}
+ changed = true;
}
gfx::Rect bounds_rect = params.rect;
@@ -910,10 +953,17 @@ void DCLayerTree::UpdateVisualForBackbuffer(
visual_info->bounds = bounds_rect;
dc_visual->SetTransform(nullptr);
visual_info->transform = gfx::Transform();
+ changed = true;
+ }
+ if (surface_->dcomp_surface() &&
+ surface_->GetDCompSurfaceSerial() != visual_info->dcomp_surface_serial) {
+ changed = true;
+ visual_info->dcomp_surface_serial = surface_->GetDCompSurfaceSerial();
}
+ return changed;
}
-void DCLayerTree::UpdateVisualClip(VisualInfo* visual_info,
+bool DCLayerTree::UpdateVisualClip(VisualInfo* visual_info,
const ui::DCRendererLayerParams& params) {
if (params.is_clipped != visual_info->is_clipped ||
params.clip_rect != visual_info->clip_rect) {
@@ -934,7 +984,9 @@ void DCLayerTree::UpdateVisualClip(VisualInfo* visual_info,
} else {
visual_info->clip_visual->SetClip(nullptr);
}
+ return true;
}
+ return false;
}
bool DCLayerTree::CommitAndClearPendingOverlays() {
@@ -957,10 +1009,12 @@ bool DCLayerTree::CommitAndClearPendingOverlays() {
return a->z_order < b->z_order;
});
+ bool changed = false;
while (visual_info_.size() > pending_overlays_.size()) {
visual_info_.back().clip_visual->RemoveAllVisuals();
root_visual_->RemoveVisual(visual_info_.back().clip_visual.Get());
visual_info_.pop_back();
+ changed = true;
}
visual_info_.resize(pending_overlays_.size());
@@ -974,19 +1028,21 @@ bool DCLayerTree::CommitAndClearPendingOverlays() {
ui::DCRendererLayerParams& params = *pending_overlays_[i];
VisualInfo* visual_info = &visual_info_[i];
- InitVisual(i);
+ changed |= InitVisual(i);
if (params.image.size() >= 1 && params.image[0]) {
- UpdateVisualForVideo(visual_info, params);
+ changed |= UpdateVisualForVideo(visual_info, params);
} else if (params.image.empty()) {
- UpdateVisualForBackbuffer(visual_info, params);
+ changed |= UpdateVisualForBackbuffer(visual_info, params);
} else {
CHECK(false);
}
- UpdateVisualClip(visual_info, params);
+ changed |= UpdateVisualClip(visual_info, params);
}
- HRESULT hr = dcomp_device_->Commit();
- CHECK(SUCCEEDED(hr));
+ if (changed) {
+ HRESULT hr = dcomp_device_->Commit();
+ CHECK(SUCCEEDED(hr));
+ }
pending_overlays_.clear();
return true;
@@ -1013,10 +1069,20 @@ DirectCompositionSurfaceWin::~DirectCompositionSurfaceWin() {
// static
bool DirectCompositionSurfaceWin::AreOverlaysSupported() {
- if (!HardwareSupportsOverlays())
- return false;
+ static bool initialized;
+ static bool overlays_supported;
+ if (initialized)
+ return overlays_supported;
+
+ initialized = true;
- return base::FeatureList::IsEnabled(switches::kDirectCompositionOverlays);
+ overlays_supported =
+ HardwareSupportsOverlays() &&
+ base::FeatureList::IsEnabled(switches::kDirectCompositionOverlays);
+
+ UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.OverlaysSupported",
+ overlays_supported);
+ return overlays_supported;
}
// static
@@ -1132,8 +1198,10 @@ void* DirectCompositionSurfaceWin::GetHandle() {
bool DirectCompositionSurfaceWin::Resize(const gfx::Size& size,
float scale_factor,
+ ColorSpace color_space,
bool has_alpha) {
- if ((size == GetSize()) && (has_alpha == has_alpha_))
+ bool is_hdr = color_space == ColorSpace::SCRGB_LINEAR;
+ if (size == GetSize() && has_alpha == has_alpha_ && is_hdr == is_hdr_)
return true;
// Force a resize and redraw (but not a move, activate, etc.).
@@ -1143,6 +1211,7 @@ bool DirectCompositionSurfaceWin::Resize(const gfx::Size& size,
return false;
}
size_ = size;
+ is_hdr_ = is_hdr;
has_alpha_ = has_alpha;
ui::ScopedReleaseCurrent release_current(this);
return RecreateRootSurface();
@@ -1204,6 +1273,10 @@ bool DirectCompositionSurfaceWin::SupportsDCLayers() const {
return true;
}
+bool DirectCompositionSurfaceWin::UseOverlaysForVideo() const {
+ return AreOverlaysSupported();
+}
+
bool DirectCompositionSurfaceWin::SetDrawRectangle(const gfx::Rect& rectangle) {
if (root_surface_)
return root_surface_->SetDrawRectangle(rectangle);
@@ -1222,8 +1295,8 @@ void DirectCompositionSurfaceWin::WaitForSnapshotRendering() {
}
bool DirectCompositionSurfaceWin::RecreateRootSurface() {
- root_surface_ = new DirectCompositionChildSurfaceWin(size_, has_alpha_,
- enable_dc_layers_);
+ root_surface_ = new DirectCompositionChildSurfaceWin(
+ size_, is_hdr_, has_alpha_, enable_dc_layers_);
return root_surface_->Initialize();
}
@@ -1237,6 +1310,10 @@ DirectCompositionSurfaceWin::swap_chain() const {
return root_surface_ ? root_surface_->swap_chain() : nullptr;
}
+uint64_t DirectCompositionSurfaceWin::GetDCompSurfaceSerial() const {
+ return root_surface_ ? root_surface_->dcomp_surface_serial() : 0;
+}
+
scoped_refptr<base::TaskRunner>
DirectCompositionSurfaceWin::GetWindowTaskRunnerForTesting() {
return child_window_.GetTaskRunnerForTesting();
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.h b/chromium/gpu/ipc/service/direct_composition_surface_win.h
index 145ed9f1b8e..7d948f56403 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.h
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win.h
@@ -48,6 +48,7 @@ class GPU_EXPORT DirectCompositionSurfaceWin : public gl::GLSurfaceEGL {
void* GetHandle() override;
bool Resize(const gfx::Size& size,
float scale_factor,
+ ColorSpace color_space,
bool has_alpha) override;
gfx::SwapResult SwapBuffers() override;
gfx::SwapResult PostSubBuffer(int x, int y, int width, int height) override;
@@ -57,6 +58,7 @@ class GPU_EXPORT DirectCompositionSurfaceWin : public gl::GLSurfaceEGL {
bool SupportsPostSubBuffer() override;
bool OnMakeCurrent(gl::GLContext* context) override;
bool SupportsDCLayers() const override;
+ bool UseOverlaysForVideo() const override;
bool SetDrawRectangle(const gfx::Rect& rect) override;
gfx::Vector2d GetDrawOffset() const override;
void WaitForSnapshotRendering() override;
@@ -71,6 +73,8 @@ class GPU_EXPORT DirectCompositionSurfaceWin : public gl::GLSurfaceEGL {
const base::win::ScopedComPtr<IDCompositionSurface> dcomp_surface() const;
const base::win::ScopedComPtr<IDXGISwapChain1> swap_chain() const;
+ uint64_t GetDCompSurfaceSerial() const;
+
scoped_refptr<base::TaskRunner> GetWindowTaskRunnerForTesting();
base::win::ScopedComPtr<IDXGISwapChain1> GetLayerSwapChainForTesting(
@@ -95,6 +99,7 @@ class GPU_EXPORT DirectCompositionSurfaceWin : public gl::GLSurfaceEGL {
gfx::Size size_ = gfx::Size(1, 1);
bool enable_dc_layers_ = false;
+ bool is_hdr_ = false;
bool has_alpha_ = true;
std::unique_ptr<gfx::VSyncProvider> vsync_provider_;
scoped_refptr<DirectCompositionChildSurfaceWin> root_surface_;
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc b/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
index 264c44bf65e..132769023ab 100644
--- a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
+++ b/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
@@ -13,6 +13,7 @@
#include "base/win/scoped_hdc.h"
#include "base/win/scoped_select_object.h"
#include "gpu/command_buffer/service/feature_info.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/base/win/hidden_window.h"
#include "ui/gfx/buffer_format_util.h"
@@ -60,6 +61,9 @@ class TestImageTransportSurfaceDelegate
const gles2::FeatureInfo* GetFeatureInfo() const override {
return feature_info_.get();
}
+ const GpuPreferences& GetGpuPreferences() const override {
+ return gpu_preferences_;
+ }
void SetLatencyInfoCallback(const LatencyInfoCallback& callback) override {}
void UpdateVSyncParameters(base::TimeTicks timebase,
base::TimeDelta interval) override {}
@@ -68,6 +72,7 @@ class TestImageTransportSurfaceDelegate
private:
scoped_refptr<gpu::gles2::FeatureInfo> feature_info_;
+ GpuPreferences gpu_preferences_;
};
class TestPlatformDelegate : public ui::PlatformWindowDelegate {
@@ -128,8 +133,8 @@ base::win::ScopedComPtr<ID3D11Texture2D> CreateNV12Texture(
}
std::vector<char> image_data(size.width() * size.height() * 3 / 2);
- // Y, U, and V should all be Oxff. Output color should be pink.
- memset(&image_data[0], 0xff, size.width() * size.height() * 3 / 2);
+ // Y, U, and V should all be 160. Output color should be pink.
+ memset(&image_data[0], 160, size.width() * size.height() * 3 / 2);
D3D11_SUBRESOURCE_DATA data = {};
data.pSysMem = (const void*)&image_data[0];
@@ -156,7 +161,8 @@ TEST(DirectCompositionSurfaceTest, TestMakeCurrent) {
scoped_refptr<gl::GLContext> context1 = gl::init::CreateGLContext(
nullptr, surface1.get(), gl::GLContextAttribs());
- EXPECT_TRUE(surface1->Resize(gfx::Size(100, 100), 1.0, true));
+ EXPECT_TRUE(surface1->Resize(gfx::Size(100, 100), 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
// First SetDrawRectangle must be full size of surface.
EXPECT_FALSE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
@@ -175,7 +181,8 @@ TEST(DirectCompositionSurfaceTest, TestMakeCurrent) {
EXPECT_TRUE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
EXPECT_TRUE(context1->IsCurrent(surface1.get()));
- EXPECT_TRUE(surface1->Resize(gfx::Size(50, 50), 1.0, true));
+ EXPECT_TRUE(surface1->Resize(gfx::Size(50, 50), 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
EXPECT_TRUE(context1->IsCurrent(surface1.get()));
EXPECT_TRUE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
EXPECT_TRUE(context1->IsCurrent(surface1.get()));
@@ -189,7 +196,8 @@ TEST(DirectCompositionSurfaceTest, TestMakeCurrent) {
nullptr, surface2.get(), gl::GLContextAttribs());
surface2->SetEnableDCLayers(true);
EXPECT_TRUE(context2->MakeCurrent(surface2.get()));
- EXPECT_TRUE(surface2->Resize(gfx::Size(100, 100), 1.0, true));
+ EXPECT_TRUE(surface2->Resize(gfx::Size(100, 100), 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
// The previous IDCompositionSurface should be suspended when another
// surface is being drawn to.
EXPECT_TRUE(surface2->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
@@ -219,7 +227,8 @@ TEST(DirectCompositionSurfaceTest, DXGIDCLayerSwitch) {
scoped_refptr<gl::GLContext> context =
gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0, true));
+ EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
EXPECT_FALSE(surface->swap_chain());
// First SetDrawRectangle must be full size of surface for DXGI
@@ -273,7 +282,8 @@ TEST(DirectCompositionSurfaceTest, SwitchAlpha) {
scoped_refptr<gl::GLContext> context =
gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0, true));
+ EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
EXPECT_FALSE(surface->swap_chain());
EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
@@ -284,10 +294,12 @@ TEST(DirectCompositionSurfaceTest, SwitchAlpha) {
EXPECT_EQ(DXGI_ALPHA_MODE_PREMULTIPLIED, desc.AlphaMode);
// Resize to the same parameters should have no effect.
- EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0, true));
+ EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
EXPECT_TRUE(surface->swap_chain());
- EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0, false));
+ EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, false));
EXPECT_FALSE(surface->swap_chain());
EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
@@ -327,6 +339,7 @@ TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
scoped_refptr<gl::GLImageDXGI> image_dxgi(
new gl::GLImageDXGI(texture_size, nullptr));
image_dxgi->SetTexture(texture, 0);
+ image_dxgi->SetColorSpaceForScanout(gfx::ColorSpace::CreateREC709());
ui::DCRendererLayerParams params(
false, gfx::Rect(), 1, gfx::Transform(),
@@ -356,7 +369,7 @@ TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
base::win::ScopedComPtr<IDXGISwapChain1> swap_chain2 =
surface->GetLayerSwapChainForTesting(1);
- EXPECT_EQ(swap_chain2, swap_chain);
+ EXPECT_EQ(swap_chain2.Get(), swap_chain.Get());
// It's the same image, so it should have the same swapchain.
EXPECT_TRUE(SUCCEEDED(swap_chain->GetLastPresentCount(&last_present_count)));
@@ -374,7 +387,7 @@ TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
base::win::ScopedComPtr<IDXGISwapChain1> swap_chain3 =
surface->GetLayerSwapChainForTesting(1);
- EXPECT_NE(swap_chain2, swap_chain3);
+ EXPECT_NE(swap_chain2.Get(), swap_chain3.Get());
context = nullptr;
DestroySurface(std::move(surface));
@@ -436,7 +449,8 @@ class DirectCompositionPixelTest : public testing::Test {
scoped_refptr<gl::GLContext> context = gl::init::CreateGLContext(
nullptr, surface_.get(), gl::GLContextAttribs());
- EXPECT_TRUE(surface_->Resize(window_size, 1.0, true));
+ EXPECT_TRUE(surface_->Resize(window_size, 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(window_size)));
EXPECT_TRUE(context->MakeCurrent(surface_.get()));
@@ -458,6 +472,32 @@ class DirectCompositionPixelTest : public testing::Test {
DestroySurface(std::move(surface_));
}
+ void PixelTestCopyTexture(bool layers_enabled) {
+ if (!CheckIfDCSupported())
+ return;
+ InitializeSurface();
+ surface_->SetEnableDCLayers(layers_enabled);
+ gfx::Size window_size(100, 100);
+
+ scoped_refptr<gl::GLContext> context = gl::init::CreateGLContext(
+ nullptr, surface_.get(), gl::GLContextAttribs());
+ EXPECT_TRUE(surface_->Resize(window_size, 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
+
+ EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
+ EXPECT_TRUE(context->MakeCurrent(surface_.get()));
+
+ Sleep(1000);
+
+ GLuint texture = 0;
+ glGenTextures(1, &texture);
+ glBindTexture(GL_TEXTURE_2D, texture);
+ glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 100, 100, 0);
+
+ context = nullptr;
+ DestroySurface(std::move(surface_));
+ }
+
TestPlatformDelegate platform_delegate_;
TestImageTransportSurfaceDelegate delegate_;
ui::WinWindow window_;
@@ -472,6 +512,14 @@ TEST_F(DirectCompositionPixelTest, DCLayersDisabled) {
PixelTestSwapChain(false);
}
+TEST_F(DirectCompositionPixelTest, CopyTextureFromSurfaceWithLayersEnabled) {
+ PixelTestCopyTexture(true);
+}
+
+TEST_F(DirectCompositionPixelTest, CopyTextureFromSurfaceWithLayersDisabled) {
+ PixelTestCopyTexture(false);
+}
+
bool AreColorsSimilar(int a, int b) {
// The precise colors may differ depending on the video processor, so allow
// a margin for error.
@@ -482,58 +530,93 @@ bool AreColorsSimilar(int a, int b) {
abs(SkColorGetB(a) - SkColorGetB(b)) < kMargin;
}
-TEST_F(DirectCompositionPixelTest, VideoSwapchain) {
- if (!CheckIfDCSupported())
- return;
- InitializeSurface();
- surface_->SetEnableDCLayers(true);
- gfx::Size window_size(100, 100);
+class DirectCompositionVideoPixelTest : public DirectCompositionPixelTest {
+ protected:
+ void TestVideo(const gfx::ColorSpace& color_space,
+ SkColor expected_color,
+ bool check_color) {
+ if (!CheckIfDCSupported())
+ return;
+ InitializeSurface();
+ surface_->SetEnableDCLayers(true);
+ gfx::Size window_size(100, 100);
- scoped_refptr<gl::GLContext> context = gl::init::CreateGLContext(
- nullptr, surface_.get(), gl::GLContextAttribs());
- EXPECT_TRUE(surface_->Resize(window_size, 1.0, true));
+ scoped_refptr<gl::GLContext> context = gl::init::CreateGLContext(
+ nullptr, surface_.get(), gl::GLContextAttribs());
+ EXPECT_TRUE(surface_->Resize(window_size, 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- base::win::ScopedComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
+ base::win::ScopedComPtr<ID3D11Device> d3d11_device =
+ gl::QueryD3D11DeviceObjectFromANGLE();
- gfx::Size texture_size(50, 50);
- base::win::ScopedComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, false);
+ gfx::Size texture_size(50, 50);
+ base::win::ScopedComPtr<ID3D11Texture2D> texture =
+ CreateNV12Texture(d3d11_device, texture_size, false);
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- image_dxgi->SetTexture(texture, 0);
+ scoped_refptr<gl::GLImageDXGI> image_dxgi(
+ new gl::GLImageDXGI(texture_size, nullptr));
+ image_dxgi->SetTexture(texture, 0);
+ image_dxgi->SetColorSpaceForScanout(color_space);
- ui::DCRendererLayerParams params(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(texture_size), 0, 0, 1.0,
- 0);
- surface_->ScheduleDCLayer(params);
+ ui::DCRendererLayerParams params(
+ false, gfx::Rect(), 1, gfx::Transform(),
+ std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
+ gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(texture_size), 0, 0, 1.0,
+ 0);
+ surface_->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface_->SwapBuffers());
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface_->SwapBuffers());
- // Scaling up the swapchain with the same image should cause it to be
- // transformed again, but not presented again.
- ui::DCRendererLayerParams params2(
- false, gfx::Rect(), 1, gfx::Transform(),
- std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
- gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0,
- 0);
- surface_->ScheduleDCLayer(params2);
+ // Scaling up the swapchain with the same image should cause it to be
+ // transformed again, but not presented again.
+ ui::DCRendererLayerParams params2(
+ false, gfx::Rect(), 1, gfx::Transform(),
+ std::vector<scoped_refptr<gl::GLImage>>{image_dxgi},
+ gfx::RectF(gfx::Rect(texture_size)), gfx::Rect(window_size), 0, 0, 1.0,
+ 0);
+ surface_->ScheduleDCLayer(params2);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface_->SwapBuffers());
- Sleep(1000);
+ EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface_->SwapBuffers());
+ Sleep(1000);
- SkColor expected_color = SkColorSetRGB(0xff, 0xb7, 0xff);
- SkColor actual_color =
- ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
- EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
- << std::hex << "Expected " << expected_color << " Actual "
- << actual_color;
+ if (check_color) {
+ SkColor actual_color =
+ ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
+ EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
+ << std::hex << "Expected " << expected_color << " Actual "
+ << actual_color;
+ }
- context = nullptr;
- DestroySurface(std::move(surface_));
+ context = nullptr;
+ DestroySurface(std::move(surface_));
+ }
+};
+
+TEST_F(DirectCompositionVideoPixelTest, BT601) {
+ TestVideo(gfx::ColorSpace::CreateREC601(), SkColorSetRGB(0xdb, 0x81, 0xe8),
+ true);
+}
+
+TEST_F(DirectCompositionVideoPixelTest, BT709) {
+ TestVideo(gfx::ColorSpace::CreateREC709(), SkColorSetRGB(0xe1, 0x90, 0xeb),
+ true);
+}
+
+TEST_F(DirectCompositionVideoPixelTest, SRGB) {
+ // SRGB doesn't make sense on an NV12 input, but don't crash.
+ TestVideo(gfx::ColorSpace::CreateSRGB(), SkColorSetRGB(0xd7, 0x89, 0xe0),
+ false);
+}
+
+TEST_F(DirectCompositionVideoPixelTest, SCRGBLinear) {
+ // SCRGB doesn't make sense on an NV12 input, but don't crash.
+ TestVideo(gfx::ColorSpace::CreateSCRGBLinear(),
+ SkColorSetRGB(0xd7, 0x89, 0xe0), false);
+}
+
+TEST_F(DirectCompositionVideoPixelTest, InvalidColorSpace) {
+ // Invalid color space should be treated as BT.709
+ TestVideo(gfx::ColorSpace(), SkColorSetRGB(0xe1, 0x90, 0xeb), true);
}
TEST_F(DirectCompositionPixelTest, SoftwareVideoSwapchain) {
@@ -545,7 +628,8 @@ TEST_F(DirectCompositionPixelTest, SoftwareVideoSwapchain) {
scoped_refptr<gl::GLContext> context = gl::init::CreateGLContext(
nullptr, surface_.get(), gl::GLContextAttribs());
- EXPECT_TRUE(surface_->Resize(window_size, 1.0, true));
+ EXPECT_TRUE(surface_->Resize(window_size, 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
base::win::ScopedComPtr<ID3D11Device> d3d11_device =
gl::QueryD3D11DeviceObjectFromANGLE();
@@ -567,6 +651,7 @@ TEST_F(DirectCompositionPixelTest, SoftwareVideoSwapchain) {
new gl::GLImageRefCountedMemory(uv_size, GL_BGRA_EXT));
uv_image->Initialize(new base::RefCountedBytes(uv_data),
gfx::BufferFormat::RG_88);
+ y_image->SetColorSpaceForScanout(gfx::ColorSpace::CreateREC709());
ui::DCRendererLayerParams params(
false, gfx::Rect(), 1, gfx::Transform(),
@@ -597,7 +682,8 @@ TEST_F(DirectCompositionPixelTest, VideoHandleSwapchain) {
scoped_refptr<gl::GLContext> context = gl::init::CreateGLContext(
nullptr, surface_.get(), gl::GLContextAttribs());
- EXPECT_TRUE(surface_->Resize(window_size, 1.0, true));
+ EXPECT_TRUE(surface_->Resize(window_size, 1.0,
+ gl::GLSurface::ColorSpace::UNSPECIFIED, true));
base::win::ScopedComPtr<ID3D11Device> d3d11_device =
gl::QueryD3D11DeviceObjectFromANGLE();
@@ -626,7 +712,7 @@ TEST_F(DirectCompositionPixelTest, VideoHandleSwapchain) {
Sleep(1000);
- SkColor expected_color = SkColorSetRGB(0xff, 0xb7, 0xff);
+ SkColor expected_color = SkColorSetRGB(0xe1, 0x90, 0xeb);
SkColor actual_color =
ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
diff --git a/chromium/gpu/ipc/service/gpu_channel.cc b/chromium/gpu/ipc/service/gpu_channel.cc
index d36c9072a62..308457f6792 100644
--- a/chromium/gpu/ipc/service/gpu_channel.cc
+++ b/chromium/gpu/ipc/service/gpu_channel.cc
@@ -688,31 +688,64 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
if (!gpu_channel_)
return MessageErrorHandler(message, "Channel destroyed");
- if (message.routing_id() == MSG_ROUTING_CONTROL ||
- message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
- message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
+ // TODO(sunnyps): Remove the async flush message once the non-scheduler code
+ // path is removed.
+ if (message.type() == GpuCommandBufferMsg_AsyncFlush::ID)
+ return MessageErrorHandler(message, "Invalid flush message");
+
+ std::vector<Scheduler::Task> tasks;
+
+ if (message.type() == GpuChannelMsg_FlushCommandBuffers::ID) {
+ GpuChannelMsg_FlushCommandBuffers::Param params;
+
+ if (!GpuChannelMsg_FlushCommandBuffers::Read(&message, &params))
+ return MessageErrorHandler(message, "Invalid flush message");
+
+ std::vector<FlushParams> flush_list = std::get<0>(std::move(params));
+
+ for (auto& flush_info : flush_list) {
+ GpuCommandBufferMsg_AsyncFlush flush_message(
+ flush_info.route_id, flush_info.put_offset, flush_info.flush_id,
+ std::move(flush_info.latency_info));
+
+ if (scheduler_) {
+ auto it = route_sequences_.find(flush_info.route_id);
+ if (it == route_sequences_.end()) {
+ DLOG(ERROR) << "Invalid route id in flush list";
+ continue;
+ }
+ tasks.emplace_back(
+ it->second /* sequence_id */,
+ base::BindOnce(&GpuChannel::HandleMessage,
+ gpu_channel_->AsWeakPtr(), flush_message),
+ std::move(flush_info.sync_token_fences));
+ } else {
+ message_queue_->PushBackMessage(flush_message);
+ }
+ }
+
+ if (scheduler_)
+ scheduler_->ScheduleTasks(std::move(tasks));
+
+ } else if (message.routing_id() == MSG_ROUTING_CONTROL ||
+ message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
+ message.type() ==
+ GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
// It's OK to post task that may never run even for sync messages, because
// if the channel is destroyed, the client Send will fail.
main_task_runner_->PostTask(FROM_HERE,
base::Bind(&GpuChannel::HandleOutOfOrderMessage,
gpu_channel_->AsWeakPtr(), message));
} else if (scheduler_) {
- SequenceId sequence_id = route_sequences_[message.routing_id()];
- if (sequence_id.is_null())
- return MessageErrorHandler(message, "Invalid route");
-
- std::vector<SyncToken> sync_token_fences;
- if (message.type() == GpuCommandBufferMsg_AsyncFlush::ID) {
- GpuCommandBufferMsg_AsyncFlush::Param params;
- if (!GpuCommandBufferMsg_AsyncFlush::Read(&message, &params))
- return MessageErrorHandler(message, "Invalid flush message");
- sync_token_fences = std::get<3>(params);
- }
-
- scheduler_->ScheduleTask(sequence_id,
- base::BindOnce(&GpuChannel::HandleMessage,
- gpu_channel_->AsWeakPtr(), message),
- sync_token_fences);
+ auto it = route_sequences_.find(message.routing_id());
+ if (it == route_sequences_.end())
+ return MessageErrorHandler(message, "Invalid route id");
+
+ scheduler_->ScheduleTask(
+ Scheduler::Task(it->second /* sequence_id */,
+ base::BindOnce(&GpuChannel::HandleMessage,
+ gpu_channel_->AsWeakPtr(), message),
+ std::vector<SyncToken>()));
} else {
// Message queue takes care of PostTask.
message_queue_->PushBackMessage(message);
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index 2a96109db40..6b2ced82f03 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -11,8 +11,8 @@
#include "base/command_line.h"
#include "base/location.h"
#include "base/memory/ptr_util.h"
-#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram_macros.h"
+#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/sys_info.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -21,8 +21,10 @@
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_program_cache.h"
+#include "gpu/command_buffer/service/passthrough_program_cache.h"
#include "gpu/command_buffer/service/preemption_flag.h"
#include "gpu/command_buffer/service/scheduler.h"
+#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel.h"
@@ -47,7 +49,6 @@ const int kMaxKeepAliveTimeMs = 200;
GpuChannelManager::GpuChannelManager(
const GpuPreferences& gpu_preferences,
- const GpuDriverBugWorkarounds& workarounds,
GpuChannelManagerDelegate* delegate,
GpuWatchdogThread* watchdog,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
@@ -60,7 +61,8 @@ GpuChannelManager::GpuChannelManager(
: task_runner_(task_runner),
io_task_runner_(io_task_runner),
gpu_preferences_(gpu_preferences),
- gpu_driver_bug_workarounds_(workarounds),
+ gpu_driver_bug_workarounds_(
+ gpu_feature_info.enabled_gpu_driver_bug_workarounds),
delegate_(delegate),
watchdog_(watchdog),
share_group_(new gl::GLShareGroup()),
@@ -103,16 +105,23 @@ GpuChannelManager::~GpuChannelManager() {
}
gles2::ProgramCache* GpuChannelManager::program_cache() {
- if (!program_cache_.get() &&
- !gpu_preferences_.disable_gpu_program_cache) {
+ if (!program_cache_.get()) {
const GpuDriverBugWorkarounds& workarounds = gpu_driver_bug_workarounds_;
bool disable_disk_cache =
gpu_preferences_.disable_gpu_shader_disk_cache ||
workarounds.disable_program_disk_cache;
- program_cache_.reset(new gles2::MemoryProgramCache(
- gpu_preferences_.gpu_program_cache_size, disable_disk_cache,
- workarounds.disable_program_caching_for_transform_feedback,
- &activity_flags_));
+
+ // Use the EGL cache control extension for the passthrough decoder.
+ if (gpu_preferences_.use_passthrough_cmd_decoder &&
+ gles2::PassthroughCommandDecoderSupported()) {
+ program_cache_.reset(new gles2::PassthroughProgramCache(
+ gpu_preferences_.gpu_program_cache_size, disable_disk_cache));
+ } else {
+ program_cache_.reset(new gles2::MemoryProgramCache(
+ gpu_preferences_.gpu_program_cache_size, disable_disk_cache,
+ workarounds.disable_program_caching_for_transform_feedback,
+ &activity_flags_));
+ }
}
return program_cache_.get();
}
@@ -189,7 +198,7 @@ void GpuChannelManager::MaybeExitOnContextLost() {
<< " from problems.";
// Signal the message loop to quit to shut down other threads
// gracefully.
- base::MessageLoop::current()->QuitNow();
+ base::RunLoop::QuitCurrentDeprecated();
exiting_for_lost_context_ = true;
}
}
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index da6075b8f78..e064f85777c 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -42,7 +42,6 @@ class GLShareGroup;
}
namespace gpu {
-class GpuDriverBugWorkarounds;
struct GpuPreferences;
class PreemptionFlag;
class Scheduler;
@@ -66,7 +65,6 @@ class GpuWatchdogThread;
class GPU_EXPORT GpuChannelManager {
public:
GpuChannelManager(const GpuPreferences& gpu_preferences,
- const GpuDriverBugWorkarounds& workarounds,
GpuChannelManagerDelegate* delegate,
GpuWatchdogThread* watchdog,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
@@ -169,7 +167,7 @@ class GPU_EXPORT GpuChannelManager {
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
const GpuPreferences gpu_preferences_;
- GpuDriverBugWorkarounds gpu_driver_bug_workarounds_;
+ const GpuDriverBugWorkarounds gpu_driver_bug_workarounds_;
GpuChannelManagerDelegate* const delegate_;
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.cc b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
index 36b6cc756db..f2fa1be5cd7 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
@@ -77,7 +77,6 @@ GpuChannelTestCommon::GpuChannelTestCommon()
channel_manager_delegate_(new TestGpuChannelManagerDelegate()),
channel_manager_(
new GpuChannelManager(GpuPreferences(),
- GpuDriverBugWorkarounds(),
channel_manager_delegate_.get(),
nullptr, /* watchdog */
task_runner_.get(),
diff --git a/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc b/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
index 75c30e88c80..c5ca7db627e 100644
--- a/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
@@ -51,6 +51,7 @@
#include "ui/gl/gl_image.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_switches.h"
+#include "ui/gl/gl_workarounds.h"
#include "ui/gl/init/gl_factory.h"
#if defined(OS_WIN)
@@ -251,7 +252,7 @@ GpuCommandBufferStub::GpuCommandBufferStub(
sequence_id_(sequence_id),
stream_id_(stream_id),
route_id_(route_id),
- last_flush_count_(0),
+ last_flush_id_(0),
waiting_for_sync_point_(false),
previous_processed_num_(0),
active_url_(init_params.active_url),
@@ -369,6 +370,10 @@ const gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const {
return context_group_->feature_info();
}
+const GpuPreferences& GpuCommandBufferStub::GetGpuPreferences() const {
+ return context_group_->gpu_preferences();
+}
+
void GpuCommandBufferStub::SetLatencyInfoCallback(
const LatencyInfoCallback& callback) {
latency_info_callback_ = callback;
@@ -564,6 +569,11 @@ bool GpuCommandBufferStub::Initialize(
GpuCommandBufferStub* share_command_buffer_stub,
const GPUCreateCommandBufferConfig& init_params,
std::unique_ptr<base::SharedMemory> shared_state_shm) {
+#if defined(OS_FUCHSIA)
+ // TODO(crbug.com/707031): Implement this.
+ NOTIMPLEMENTED();
+ return false;
+#else
TRACE_EVENT0("gpu", "GpuCommandBufferStub::Initialize");
FastSetActiveURL(active_url_, active_url_hash_, channel_);
@@ -580,7 +590,8 @@ bool GpuCommandBufferStub::Initialize(
gpu::GpuMemoryBufferFactory* gmb_factory =
channel_->gpu_channel_manager()->gpu_memory_buffer_factory();
context_group_ = new gles2::ContextGroup(
- manager->gpu_preferences(), manager->mailbox_manager(),
+ manager->gpu_preferences(), gles2::PassthroughCommandDecoderSupported(),
+ manager->mailbox_manager(),
new GpuCommandBufferMemoryTracker(
channel_, command_buffer_id_.GetUnsafeValue(),
init_params.attribs.context_type, channel_->task_runner()),
@@ -711,7 +722,7 @@ bool GpuCommandBufferStub::Initialize(
}
}
- if (context_group_->gpu_preferences().use_passthrough_cmd_decoder) {
+ if (context_group_->use_passthrough_cmd_decoder()) {
// When using the passthrough command decoder, only share with other
// contexts in the explicitly requested share group
if (share_command_buffer_stub) {
@@ -735,8 +746,7 @@ bool GpuCommandBufferStub::Initialize(
if (!context.get()) {
context = gl::init::CreateGLContext(
share_group_.get(), surface_.get(),
- GenerateGLContextAttribs(init_params.attribs,
- context_group_->gpu_preferences()));
+ GenerateGLContextAttribs(init_params.attribs, context_group_.get()));
if (!context.get()) {
DLOG(ERROR) << "Failed to create shared context for virtualization.";
return false;
@@ -745,6 +755,10 @@ bool GpuCommandBufferStub::Initialize(
// group.
DCHECK(context->share_group() == share_group_.get());
share_group_->SetSharedContext(surface_.get(), context.get());
+
+ // This needs to be called against the real shared context, not the
+ // virtual context created below.
+ manager->gpu_feature_info().ApplyToGLContext(context.get());
}
// This should be either:
// (1) a non-virtual GL context, or
@@ -754,26 +768,25 @@ bool GpuCommandBufferStub::Initialize(
gl::GetGLImplementation() == gl::kGLImplementationStubGL);
context = new GLContextVirtual(share_group_.get(), context.get(),
decoder_->AsWeakPtr());
- if (!context->Initialize(
- surface_.get(),
- GenerateGLContextAttribs(init_params.attribs,
- context_group_->gpu_preferences()))) {
+ if (!context->Initialize(surface_.get(),
+ GenerateGLContextAttribs(init_params.attribs,
+ context_group_.get()))) {
// The real context created above for the default offscreen surface
// might not be compatible with this surface.
context = NULL;
DLOG(ERROR) << "Failed to initialize virtual GL context.";
return false;
}
- }
- if (!context.get()) {
+ } else {
context = gl::init::CreateGLContext(
share_group_.get(), surface_.get(),
- GenerateGLContextAttribs(init_params.attribs,
- context_group_->gpu_preferences()));
- }
- if (!context.get()) {
- DLOG(ERROR) << "Failed to create context.";
- return false;
+ GenerateGLContextAttribs(init_params.attribs, context_group_.get()));
+ if (!context.get()) {
+ DLOG(ERROR) << "Failed to create context.";
+ return false;
+ }
+
+ manager->gpu_feature_info().ApplyToGLContext(context.get());
}
if (!context->MakeCurrent(surface_.get())) {
@@ -830,6 +843,7 @@ bool GpuCommandBufferStub::Initialize(
initialized_ = true;
return true;
+#endif // defined(OS_FUCHSIA)
}
void GpuCommandBufferStub::OnCreateStreamTexture(uint32_t texture_id,
@@ -958,26 +972,25 @@ void GpuCommandBufferStub::CheckCompleteWaits() {
void GpuCommandBufferStub::OnAsyncFlush(
int32_t put_offset,
- uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info,
- const std::vector<SyncToken>& sync_token_fences) {
+ uint32_t flush_id,
+ const std::vector<ui::LatencyInfo>& latency_info) {
TRACE_EVENT1(
"gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
DCHECK(command_buffer_);
// We received this message out-of-order. This should not happen but is here
// to catch regressions. Ignore the message.
- DVLOG_IF(0, flush_count - last_flush_count_ >= 0x8000000U)
+ DVLOG_IF(0, flush_id - last_flush_id_ >= 0x8000000U)
<< "Received a Flush message out-of-order";
- if (flush_count > last_flush_count_ &&
+ if (flush_id > last_flush_id_ &&
ui::LatencyInfo::Verify(latency_info,
"GpuCommandBufferStub::OnAsyncFlush") &&
!latency_info_callback_.is_null()) {
latency_info_callback_.Run(latency_info);
}
- last_flush_count_ = flush_count;
+ last_flush_id_ = flush_id;
CommandBuffer::State pre_state = command_buffer_->GetState();
FastSetActiveURL(active_url_, active_url_hash_, channel_);
command_buffer_->Flush(put_offset, decoder_.get());
diff --git a/chromium/gpu/ipc/service/gpu_command_buffer_stub.h b/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
index 6916ab7f14e..98e917bffc8 100644
--- a/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
@@ -113,6 +113,7 @@ class GPU_EXPORT GpuCommandBufferStub
#endif
void DidSwapBuffersComplete(SwapBuffersCompleteParams params) override;
const gles2::FeatureInfo* GetFeatureInfo() const override;
+ const GpuPreferences& GetGpuPreferences() const override;
void SetLatencyInfoCallback(const LatencyInfoCallback& callback) override;
void UpdateVSyncParameters(base::TimeTicks timebase,
base::TimeDelta interval) override;
@@ -176,9 +177,8 @@ class GPU_EXPORT GpuCommandBufferStub
int32_t end,
IPC::Message* reply_message);
void OnAsyncFlush(int32_t put_offset,
- uint32_t flush_count,
- const std::vector<ui::LatencyInfo>& latency_info,
- const std::vector<SyncToken>& sync_token_fences);
+ uint32_t flush_id,
+ const std::vector<ui::LatencyInfo>& latency_info);
void OnRegisterTransferBuffer(int32_t id,
base::SharedMemoryHandle transfer_buffer,
uint32_t size);
@@ -214,6 +214,10 @@ class GPU_EXPORT GpuCommandBufferStub
bool CheckContextLost();
void CheckCompleteWaits();
+ // Set driver bug workarounds and disabled GL extensions to the context.
+ static void SetContextGpuFeatureInfo(gl::GLContext* context,
+ const GpuFeatureInfo& gpu_feature_info);
+
// The lifetime of objects of this class is managed by a GpuChannel. The
// GpuChannels destroy all the GpuCommandBufferStubs that they own when they
// are destroyed. So a raw pointer is safe.
@@ -229,7 +233,7 @@ class GPU_EXPORT GpuCommandBufferStub
const SequenceId sequence_id_;
const int32_t stream_id_;
const int32_t route_id_;
- uint32_t last_flush_count_;
+ uint32_t last_flush_id_;
std::unique_ptr<CommandBufferService> command_buffer_;
std::unique_ptr<gles2::GLES2Decoder> decoder_;
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index c0c0c5f010d..e9909a96274 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -10,10 +10,13 @@
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/thread_restrictions.h"
#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/service/gpu_switches.h"
+#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/config/gpu_driver_bug_list.h"
#include "gpu/config/gpu_info_collector.h"
#include "gpu/config/gpu_switches.h"
+#include "gpu/config/gpu_switching.h"
#include "gpu/config/gpu_util.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "gpu/ipc/service/switches.h"
@@ -28,80 +31,34 @@
#endif
#if defined(OS_WIN)
-#include "gpu/ipc/service/child_window_surface_win.h"
#include "gpu/ipc/service/direct_composition_surface_win.h"
#endif
namespace gpu {
namespace {
-
-void GetGpuInfoFromCommandLine(gpu::GPUInfo& gpu_info,
- const base::CommandLine& command_line) {
- if (!command_line.HasSwitch(switches::kGpuVendorID) ||
- !command_line.HasSwitch(switches::kGpuDeviceID) ||
- !command_line.HasSwitch(switches::kGpuDriverVersion))
- return;
- bool success = base::HexStringToUInt(
- command_line.GetSwitchValueASCII(switches::kGpuVendorID),
- &gpu_info.gpu.vendor_id);
- DCHECK(success);
- success = base::HexStringToUInt(
- command_line.GetSwitchValueASCII(switches::kGpuDeviceID),
- &gpu_info.gpu.device_id);
- DCHECK(success);
- gpu_info.driver_vendor =
- command_line.GetSwitchValueASCII(switches::kGpuDriverVendor);
- gpu_info.driver_version =
- command_line.GetSwitchValueASCII(switches::kGpuDriverVersion);
- gpu_info.driver_date =
- command_line.GetSwitchValueASCII(switches::kGpuDriverDate);
- gpu::ParseSecondaryGpuDevicesFromCommandLine(command_line, &gpu_info);
-
- // Set active gpu device.
- if (command_line.HasSwitch(switches::kGpuActiveVendorID) &&
- command_line.HasSwitch(switches::kGpuActiveDeviceID)) {
- uint32_t active_vendor_id = 0;
- uint32_t active_device_id = 0;
- success = base::HexStringToUInt(
- command_line.GetSwitchValueASCII(switches::kGpuActiveVendorID),
- &active_vendor_id);
- DCHECK(success);
- success = base::HexStringToUInt(
- command_line.GetSwitchValueASCII(switches::kGpuActiveDeviceID),
- &active_device_id);
- DCHECK(success);
- if (gpu_info.gpu.vendor_id == active_vendor_id &&
- gpu_info.gpu.device_id == active_device_id) {
- gpu_info.gpu.active = true;
- } else {
- for (size_t i = 0; i < gpu_info.secondary_gpus.size(); ++i) {
- if (gpu_info.secondary_gpus[i].vendor_id == active_vendor_id &&
- gpu_info.secondary_gpus[i].device_id == active_device_id) {
- gpu_info.secondary_gpus[i].active = true;
- break;
- }
- }
- }
- }
-}
-
#if !defined(OS_MACOSX)
-void CollectGraphicsInfo(gpu::GPUInfo& gpu_info) {
+void CollectGraphicsInfo(GPUInfo* gpu_info) {
+ DCHECK(gpu_info);
+#if defined(OS_FUCHSIA)
+ // TODO(crbug.com/707031): Implement this.
+ NOTIMPLEMENTED();
+ return;
+#else
TRACE_EVENT0("gpu,startup", "Collect Graphics Info");
-
- gpu::CollectInfoResult result = gpu::CollectContextGraphicsInfo(&gpu_info);
+ base::TimeTicks before_collect_context_graphics_info = base::TimeTicks::Now();
+ CollectInfoResult result = CollectContextGraphicsInfo(gpu_info);
switch (result) {
- case gpu::kCollectInfoFatalFailure:
+ case kCollectInfoFatalFailure:
LOG(ERROR) << "gpu::CollectGraphicsInfo failed (fatal).";
break;
- case gpu::kCollectInfoNonFatalFailure:
+ case kCollectInfoNonFatalFailure:
DVLOG(1) << "gpu::CollectGraphicsInfo failed (non-fatal).";
break;
- case gpu::kCollectInfoNone:
+ case kCollectInfoNone:
NOTREACHED();
break;
- case gpu::kCollectInfoSuccess:
+ case kCollectInfoSuccess:
break;
}
@@ -109,12 +66,19 @@ void CollectGraphicsInfo(gpu::GPUInfo& gpu_info) {
if (gl::GetGLImplementation() == gl::kGLImplementationEGLGLES2 &&
gl::GLSurfaceEGL::IsDirectCompositionSupported() &&
DirectCompositionSurfaceWin::AreOverlaysSupported()) {
- gpu_info.supports_overlays = true;
+ gpu_info->supports_overlays = true;
}
if (DirectCompositionSurfaceWin::IsHDRSupported()) {
- gpu_info.hdr = true;
+ gpu_info->hdr = true;
}
#endif // defined(OS_WIN)
+
+ if (result != kCollectInfoFatalFailure) {
+ base::TimeDelta collect_context_time =
+ base::TimeTicks::Now() - before_collect_context_graphics_info;
+ UMA_HISTOGRAM_TIMES("GPU.CollectContextGraphicsInfo", collect_context_time);
+ }
+#endif // defined(OS_FUCHSIA)
}
#endif // defined(OS_MACOSX)
@@ -134,23 +98,46 @@ bool CanAccessNvidiaDeviceFile() {
GpuInit::GpuInit() {}
-GpuInit::~GpuInit() {}
+GpuInit::~GpuInit() {
+ gpu::StopForceDiscreteGPU();
+}
-bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
- if (command_line.HasSwitch(switches::kSupportsDualGpus)) {
- std::set<int> workarounds;
- gpu::GpuDriverBugList::AppendWorkaroundsFromCommandLine(&workarounds,
- command_line);
- gpu::InitializeDualGpusIfSupported(workarounds);
+bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
+ bool in_process_gpu) {
+#if defined(OS_ANDROID)
+ // Android doesn't have PCI vendor/device IDs, so collecting GL strings early
+ // is necessary.
+ CollectGraphicsInfo(&gpu_info_);
+ if (gpu_info_.context_info_state == gpu::kCollectInfoFatalFailure)
+ return false;
+#else
+ // Get vendor_id, device_id, driver_version from browser process through
+ // commandline switches.
+ // TODO(zmo): Collect basic GPU info (without a context) here instead of
+ // passing from browser process.
+ GetGpuInfoFromCommandLine(*command_line, &gpu_info_);
+#endif // OS_ANDROID
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+ if (gpu_info_.gpu.vendor_id == 0x10de && // NVIDIA
+ gpu_info_.driver_vendor == "NVIDIA" && !CanAccessNvidiaDeviceFile())
+ return false;
+#endif
+ gpu_info_.in_process_gpu = in_process_gpu;
+
+ // Compute blacklist and driver bug workaround decisions based on basic GPU
+ // info.
+ gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(gpu_info_, command_line);
+ if (gpu::SwitchableGPUsSupported(gpu_info_, *command_line)) {
+ gpu::InitializeSwitchableGPUs(
+ gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
}
// In addition to disabling the watchdog if the command line switch is
// present, disable the watchdog on valgrind because the code is expected
// to run slowly in that case.
bool enable_watchdog =
- !command_line.HasSwitch(switches::kDisableGpuWatchdog) &&
- !command_line.HasSwitch(switches::kHeadless) &&
- !RunningOnValgrind();
+ !command_line->HasSwitch(switches::kDisableGpuWatchdog) &&
+ !command_line->HasSwitch(switches::kHeadless) && !RunningOnValgrind();
// Disable the watchdog in debug builds because they tend to only be run by
// developers who will not appreciate the watchdog killing the GPU process.
@@ -185,31 +172,18 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
#endif // OS_WIN
}
- // Get vendor_id, device_id, driver_version from browser process through
- // commandline switches.
- GetGpuInfoFromCommandLine(gpu_info_, command_line);
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
- if (gpu_info_.gpu.vendor_id == 0x10de && // NVIDIA
- gpu_info_.driver_vendor == "NVIDIA" && !CanAccessNvidiaDeviceFile())
- return false;
-#endif
- gpu_info_.in_process_gpu = false;
-
- gpu_info_.passthrough_cmd_decoder =
- gl::UsePassthroughCommandDecoder(&command_line);
-
sandbox_helper_->PreSandboxStartup();
bool attempted_startsandbox = false;
#if defined(OS_LINUX)
// On Chrome OS ARM Mali, GPU driver userspace creates threads when
// initializing a GL context, so start the sandbox early.
- if (command_line.HasSwitch(switches::kGpuSandboxStartEarly)) {
+ // TODO(zmo): Need to collect OS version before this.
+ if (command_line->HasSwitch(switches::kGpuSandboxStartEarly)) {
gpu_info_.sandboxed =
sandbox_helper_->EnsureSandboxInitialized(watchdog_thread_.get());
attempted_startsandbox = true;
}
-
#endif // defined(OS_LINUX)
base::TimeTicks before_initialize_one_off = base::TimeTicks::Now();
@@ -218,7 +192,7 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
// Initialize Ozone GPU after the watchdog in case it hangs. The sandbox
// may also have started at this point.
ui::OzonePlatform::InitParams params;
- params.single_process = false;
+ params.single_process = in_process_gpu;
ui::OzonePlatform::InitializeForGPU(params);
#endif
@@ -227,10 +201,10 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
// browser process, for example.
bool gl_initialized = gl::GetGLImplementation() != gl::kGLImplementationNone;
if (!gl_initialized)
- gl_initialized = gl::init::InitializeGLOneOff();
+ gl_initialized = gl::init::InitializeGLNoExtensionsOneOff();
if (!gl_initialized) {
- VLOG(1) << "gl::init::InitializeGLOneOff failed";
+ VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed";
return false;
}
@@ -240,31 +214,22 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
// multiple seconds to finish, which in turn cause the GPU process to crash.
// By skipping the following code on Mac, we don't really lose anything,
// because the basic GPU information is passed down from the host process.
- base::TimeTicks before_collect_context_graphics_info = base::TimeTicks::Now();
-#if !defined(OS_MACOSX)
- CollectGraphicsInfo(gpu_info_);
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
+ CollectGraphicsInfo(&gpu_info_);
if (gpu_info_.context_info_state == gpu::kCollectInfoFatalFailure)
return false;
+ gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(gpu_info_, command_line);
+#endif
- // Recompute gpu driver bug workarounds.
- // This is necessary on systems where vendor_id/device_id aren't available
- // (Chrome OS, Android) or where workarounds may be dependent on GL_VENDOR
- // and GL_RENDERER strings which are lazily computed (Linux).
- if (!command_line.HasSwitch(switches::kDisableGpuDriverBugWorkarounds)) {
- // TODO: this can not affect disabled extensions, since they're already
- // initialized in the bindings. This should be moved before bindings
- // initialization. However, populating GPUInfo fully works only on Android.
- // Other platforms would need the bindings to query GL strings.
- gpu::ApplyGpuDriverBugWorkarounds(
- gpu_info_, const_cast<base::CommandLine*>(&command_line));
+ if (!gpu_feature_info_.disabled_extensions.empty()) {
+ gl::init::SetDisabledExtensionsPlatform(
+ gpu_feature_info_.disabled_extensions);
+ }
+ gl_initialized = gl::init::InitializeExtensionSettingsOneOffPlatform();
+ if (!gl_initialized) {
+ VLOG(1) << "gl::init::InitializeExtensionSettingsOneOffPlatform failed";
+ return false;
}
-#endif // !defined(OS_MACOSX)
-
- gpu_feature_info_ = gpu::GetGpuFeatureInfo(gpu_info_, command_line);
-
- base::TimeDelta collect_context_time =
- base::TimeTicks::Now() - before_collect_context_graphics_info;
- UMA_HISTOGRAM_TIMES("GPU.CollectContextGraphicsInfo", collect_context_time);
base::TimeDelta initialize_one_off_time =
base::TimeTicks::Now() - before_initialize_one_off;
@@ -285,6 +250,12 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
gpu_info_.sandboxed =
sandbox_helper_->EnsureSandboxInitialized(watchdog_thread_.get());
}
+ UMA_HISTOGRAM_BOOLEAN("GPU.Sandbox.InitializedSuccessfully",
+ gpu_info_.sandboxed);
+
+ gpu_info_.passthrough_cmd_decoder =
+ gl::UsePassthroughCommandDecoder(command_line) &&
+ gles2::PassthroughCommandDecoderSupported();
return true;
}
diff --git a/chromium/gpu/ipc/service/gpu_init.h b/chromium/gpu/ipc/service/gpu_init.h
index 268fffc6151..7b5a6a67d34 100644
--- a/chromium/gpu/ipc/service/gpu_init.h
+++ b/chromium/gpu/ipc/service/gpu_init.h
@@ -36,7 +36,8 @@ class GPU_EXPORT GpuInit {
sandbox_helper_ = helper;
}
- bool InitializeAndStartSandbox(const base::CommandLine& command_line);
+ bool InitializeAndStartSandbox(base::CommandLine* command_line,
+ bool in_process_gpu);
const GPUInfo& gpu_info() const { return gpu_info_; }
const GpuFeatureInfo& gpu_feature_info() const { return gpu_feature_info_; }
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h
index 0efd7ca3436..a1548fbcdf2 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h
@@ -65,7 +65,7 @@ class GPU_EXPORT GpuMemoryBufferFactoryIOSurface
typedef std::pair<gfx::IOSurfaceId, int> IOSurfaceMapKey;
typedef base::hash_map<IOSurfaceMapKey, base::ScopedCFTypeRef<IOSurfaceRef>>
IOSurfaceMap;
- // TOOD(reveman): Remove |io_surfaces_| and allow IOSurface backed GMBs to be
+ // TODO(reveman): Remove |io_surfaces_| and allow IOSurface backed GMBs to be
// used with any GPU process by passing a mach_port to CreateImageCHROMIUM.
IOSurfaceMap io_surfaces_;
base::Lock io_surfaces_lock_;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
index 4d0ed2d89cb..8edaddbdddd 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
@@ -31,8 +31,11 @@ TYPED_TEST_P(GpuMemoryBufferFactoryTest, CreateGpuMemoryBuffer) {
for (auto format : gfx::GetBufferFormatsForTesting()) {
gfx::BufferUsage usages[] = {
- gfx::BufferUsage::GPU_READ, gfx::BufferUsage::SCANOUT,
+ gfx::BufferUsage::GPU_READ,
+ gfx::BufferUsage::SCANOUT,
+ gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE,
gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
+ gfx::BufferUsage::SCANOUT_VDA_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT};
for (auto usage : usages) {
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc b/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc
index 4e4eaafcea8..4822e350310 100644
--- a/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc
@@ -10,6 +10,7 @@
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "ipc/ipc_channel.h"
#include "ipc/ipc_message_macros.h"
@@ -113,6 +114,9 @@ class FakeDelegate : public ImageTransportSurfaceDelegate,
SurfaceHandle child_window) override {}
void DidSwapBuffersComplete(SwapBuffersCompleteParams params) override {}
const gles2::FeatureInfo* GetFeatureInfo() const override { return nullptr; }
+ const GpuPreferences& GetGpuPreferences() const override {
+ return gpu_preferences_;
+ }
void SetLatencyInfoCallback(const LatencyInfoCallback& callback) override {}
void UpdateVSyncParameters(base::TimeTicks timebase,
base::TimeDelta interval) override {
@@ -128,6 +132,7 @@ class FakeDelegate : public ImageTransportSurfaceDelegate,
private:
FakeChannel* channel_;
+ GpuPreferences gpu_preferences_;
DISALLOW_COPY_AND_ASSIGN(FakeDelegate);
};
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc b/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
index c2d8c8a1487..47cf27fdbef 100644
--- a/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
@@ -153,7 +153,7 @@ class GpuVSyncWorker : public base::Thread,
GpuVSyncWorker::GpuVSyncWorker(
const gfx::VSyncProvider::UpdateVSyncCallback& callback,
SurfaceHandle surface_handle)
- : base::Thread(base::StringPrintf("VSync-%d", surface_handle)),
+ : base::Thread(base::StringPrintf("VSync-%p", surface_handle)),
callback_(callback),
surface_handle_(surface_handle) {
HMODULE gdi32 = GetModuleHandle(L"gdi32");
@@ -558,8 +558,12 @@ GpuVSyncProviderWin::GpuVSyncProviderWin(
// Start the thread.
base::Thread::Options options;
- // TODO(stanisc): might consider even higher priority - REALTIME_AUDIO.
- options.priority = base::ThreadPriority::DISPLAY;
+ // Realtime priority is needed to ensure the minimal possible wakeup latency
+ // and to ensure that the thread isn't pre-empted when it handles the v-blank
+ // wake-up. The thread sleeps most of the time and does a tiny amount of
+ // actual work on each cycle. So the increased priority is mostly for the best
+ // possible latency.
+ options.priority = base::ThreadPriority::REALTIME_AUDIO;
vsync_worker_->StartWithOptions(options);
}
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index 4006edc693c..057de5a9be3 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -394,7 +394,7 @@ void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
bool using_thread_ticks = base::ThreadTicks::IsSupported();
base::debug::Alias(&using_thread_ticks);
- bool using_high_res_timer = base::Time::IsHighResolutionTimerInUse();
+ bool using_high_res_timer = base::TimeTicks::IsHighResolution();
base::debug::Alias(&using_high_res_timer);
#endif
diff --git a/chromium/gpu/ipc/service/image_transport_surface_delegate.h b/chromium/gpu/ipc/service/image_transport_surface_delegate.h
index 44edc0f643d..9182004ac15 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_delegate.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_delegate.h
@@ -22,6 +22,7 @@ class MessageFilter;
}
namespace gpu {
+struct GpuPreferences;
namespace gles2 {
class FeatureInfo;
@@ -68,6 +69,8 @@ class GPU_EXPORT ImageTransportSurfaceDelegate {
// Returns the features available for the ContextGroup.
virtual const gles2::FeatureInfo* GetFeatureInfo() const = 0;
+ virtual const GpuPreferences& GetGpuPreferences() const = 0;
+
using LatencyInfoCallback =
base::Callback<void(const std::vector<ui::LatencyInfo>&)>;
// |callback| is called when the delegate has updated LatencyInfo available.
diff --git a/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc b/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc
new file mode 100644
index 00000000000..65d3013cc97
--- /dev/null
+++ b/chromium/gpu/ipc/service/image_transport_surface_fuchsia.cc
@@ -0,0 +1,22 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/image_transport_surface.h"
+
+#include "base/logging.h"
+#include "ui/gl/gl_surface_stub.h"
+
+namespace gpu {
+
+// static
+scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
+ base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
+ SurfaceHandle surface_handle,
+ gl::GLSurfaceFormat format) {
+ DCHECK(gl::GetGLImplementation() == gl::kGLImplementationMockGL ||
+ gl::GetGLImplementation() == gl::kGLImplementationStubGL);
+ return new gl::GLSurfaceStub;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index 8b1a0ac1ca8..53a6df00c49 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -43,6 +43,7 @@ class ImageTransportSurfaceOverlayMac : public gl::GLSurface,
void Destroy() override;
bool Resize(const gfx::Size& size,
float scale_factor,
+ ColorSpace color_space,
bool has_alpha) override;
bool IsOffscreen() override;
gfx::SwapResult SwapBuffers() override;
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index 1c75718e5a7..5a965bd8533 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -382,6 +382,7 @@ bool ImageTransportSurfaceOverlayMac::IsSurfaceless() const {
bool ImageTransportSurfaceOverlayMac::Resize(const gfx::Size& pixel_size,
float scale_factor,
+ ColorSpace color_space,
bool has_alpha) {
pixel_size_ = pixel_size;
scale_factor_ = scale_factor;
diff --git a/chromium/gpu/ipc/service/image_transport_surface_win.cc b/chromium/gpu/ipc/service/image_transport_surface_win.cc
index d4b5f75e6cb..996a32e70a6 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_win.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_win.cc
@@ -6,9 +6,8 @@
#include <memory>
-#include "base/metrics/histogram_macros.h"
#include "base/win/windows_version.h"
-#include "gpu/ipc/service/child_window_surface_win.h"
+#include "gpu/command_buffer/service/gpu_preferences.h"
#include "gpu/ipc/service/direct_composition_surface_win.h"
#include "gpu/ipc/service/gpu_vsync_provider_win.h"
#include "gpu/ipc/service/pass_through_image_transport_surface.h"
@@ -52,25 +51,12 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
vsync_provider.reset(new gl::VSyncProviderWin(surface_handle));
if (gl::GLSurfaceEGL::IsDirectCompositionSupported()) {
- bool overlays_supported =
- DirectCompositionSurfaceWin::AreOverlaysSupported();
- UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.OverlaysSupported",
- overlays_supported);
- if (overlays_supported) {
- scoped_refptr<DirectCompositionSurfaceWin> egl_surface =
- make_scoped_refptr(new DirectCompositionSurfaceWin(
- std::move(vsync_provider), delegate, surface_handle));
- if (!egl_surface->Initialize())
- return nullptr;
- surface = egl_surface;
- } else {
- scoped_refptr<ChildWindowSurfaceWin> egl_surface =
- make_scoped_refptr(new ChildWindowSurfaceWin(
- std::move(vsync_provider), delegate, surface_handle));
- if (!egl_surface->Initialize())
- return nullptr;
- surface = egl_surface;
- }
+ scoped_refptr<DirectCompositionSurfaceWin> egl_surface =
+ make_scoped_refptr(new DirectCompositionSurfaceWin(
+ std::move(vsync_provider), delegate, surface_handle));
+ if (!egl_surface->Initialize())
+ return nullptr;
+ surface = egl_surface;
} else {
surface = gl::init::CreateNativeViewGLSurfaceEGL(
surface_handle, std::move(vsync_provider));
diff --git a/chromium/gpu/ipc/service/stream_texture_android.cc b/chromium/gpu/ipc/service/stream_texture_android.cc
index dea1f36a401..76332866b51 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.cc
+++ b/chromium/gpu/ipc/service/stream_texture_android.cc
@@ -145,7 +145,7 @@ bool StreamTexture::CopyTexImage(unsigned target) {
return false;
if (!owner_stub_ || !surface_texture_.get())
- return true;
+ return false;
GLint texture_id;
glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &texture_id);
@@ -221,12 +221,10 @@ void StreamTexture::OnForwardForSurfaceRequest(
}
bool StreamTexture::BindTexImage(unsigned target) {
- NOTREACHED();
return false;
}
void StreamTexture::ReleaseTexImage(unsigned target) {
- NOTREACHED();
}
bool StreamTexture::CopyTexSubImage(unsigned target,
diff --git a/chromium/gpu/ipc/service/switches.cc b/chromium/gpu/ipc/service/switches.cc
index 4a312eaec1e..1282e02ff4a 100644
--- a/chromium/gpu/ipc/service/switches.cc
+++ b/chromium/gpu/ipc/service/switches.cc
@@ -14,6 +14,6 @@ const char kDisableGpuWatchdog[] = "disable-gpu-watchdog";
const char kGpuSandboxStartEarly[] = "gpu-sandbox-start-early";
const base::Feature kDirectCompositionOverlays{
- "DirectCompositionOverlays", base::FEATURE_DISABLED_BY_DEFAULT};
+ "DirectCompositionOverlays", base::FEATURE_ENABLED_BY_DEFAULT};
} // namespace switches
diff --git a/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc b/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
index c9881d49efc..f6d723d3f01 100644
--- a/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
+++ b/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
@@ -108,6 +108,8 @@ sk_sp<GrGLInterface> CreateGLES2InterfaceBindings(GLES2Interface* impl) {
gles_bind(&GLES2Interface::GetBufferParameteriv, impl);
functions->fGetError = gles_bind(&GLES2Interface::GetError, impl);
functions->fGetIntegerv = get_integerv;
+ functions->fGetInternalformativ =
+ gles_bind(&GLES2Interface::GetInternalformativ, impl);
functions->fGetProgramInfoLog =
gles_bind(&GLES2Interface::GetProgramInfoLog, impl);
functions->fGetProgramiv = gles_bind(&GLES2Interface::GetProgramiv, impl);
@@ -129,6 +131,7 @@ sk_sp<GrGLInterface> CreateGLES2InterfaceBindings(GLES2Interface* impl) {
gles_bind(&GLES2Interface::InvalidateFramebuffer, impl);
functions->fInvalidateSubFramebuffer =
gles_bind(&GLES2Interface::InvalidateSubFramebuffer, impl);
+ functions->fIsSync = gles_bind(&GLES2Interface::IsSync, impl);
functions->fIsTexture = gles_bind(&GLES2Interface::IsTexture, impl);
functions->fLineWidth = gles_bind(&GLES2Interface::LineWidth, impl);
functions->fLinkProgram = gles_bind(&GLES2Interface::LinkProgram, impl);
diff --git a/chromium/gpu/tools/compositor_model_bench/BUILD.gn b/chromium/gpu/tools/compositor_model_bench/BUILD.gn
index 114b7d1d0cb..b6e0e9541d4 100644
--- a/chromium/gpu/tools/compositor_model_bench/BUILD.gn
+++ b/chromium/gpu/tools/compositor_model_bench/BUILD.gn
@@ -4,7 +4,7 @@
import("//build/config/ui.gni")
-if (is_linux && !is_chromeos && current_cpu != "arm" && use_x11) {
+if (is_linux && use_x11 && current_cpu != "arm") {
executable("compositor_model_bench") {
sources = [
"compositor_model_bench.cc",