summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-04-05 14:08:31 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-04-11 07:46:53 +0000
commit6a4cabb866f66d4128a97cdc6d9d08ce074f1247 (patch)
treeab00f70a5e89278d6a0d16ff0c42578dc4d84a2d /chromium/gpu
parente733310db58160074f574c429d48f8308c0afe17 (diff)
downloadqtwebengine-chromium-6a4cabb866f66d4128a97cdc6d9d08ce074f1247.tar.gz
BASELINE: Update Chromium to 57.0.2987.144
Change-Id: I29db402ff696c71a04c4dbaec822c2e53efe0267 Reviewed-by: Peter Varga <pvarga@inf.u-szeged.hu>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn31
-rw-r--r--chromium/gpu/DEPS1
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_stream_texture_matrix.txt25
-rw-r--r--chromium/gpu/GLES2/gl2chromium_autogen.h2
-rw-r--r--chromium/gpu/GLES2/gl2extchromium.h48
-rw-r--r--chromium/gpu/angle_deqp_tests_main.cc7
-rw-r--r--chromium/gpu/angle_unittest_main.cc4
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py28
-rw-r--r--chromium/gpu/command_buffer/client/client_context_state.cc13
-rw-r--r--chromium/gpu/command_buffer/client/client_context_state.h8
-rw-r--r--chromium/gpu/command_buffer/client/client_test_helper.cc46
-rw-r--r--chromium/gpu/command_buffer/client/client_test_helper.h9
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.cc78
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.h54
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc6
-rw-r--r--chromium/gpu/command_buffer/client/context_support.h7
-rw-r--r--chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h24
-rw-r--r--chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h25
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc99
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h3
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h72
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h17
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_autogen.h8
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h8
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h8
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h26
-rw-r--r--chromium/gpu/command_buffer/client/gpu_control.h29
-rw-r--r--chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.h14
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory.cc10
-rw-r--r--chromium/gpu/command_buffer/client/mapped_memory.h9
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer.cc3
-rw-r--r--chromium/gpu/command_buffer/cmd_buffer_functions.txt7
-rw-r--r--chromium/gpu/command_buffer/common/buffer.cc9
-rw-r--r--chromium/gpu/command_buffer/common/buffer.h2
-rw-r--r--chromium/gpu/command_buffer/common/capabilities.cc2
-rw-r--r--chromium/gpu/command_buffer/common/capabilities.h3
-rw-r--r--chromium/gpu/command_buffer/common/cmd_buffer_common.cc11
-rw-r--r--chromium/gpu/command_buffer/common/command_buffer.h31
-rw-r--r--chromium/gpu/command_buffer/common/command_buffer_mock.h9
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h168
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h73
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h3
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc73
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.h2
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h1
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/common/unittest_main.cc15
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn1
-rw-r--r--chromium/gpu/command_buffer/service/DEPS3
-rw-r--r--chromium/gpu/command_buffer/service/client_service_map.h18
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service.cc30
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service.h10
-rw-r--r--chromium/gpu/command_buffer/service/context_state.cc15
-rw-r--r--chromium/gpu/command_buffer/service/context_state.h7
-rw-r--r--chromium/gpu/command_buffer/service/context_state_autogen.h8
-rw-r--r--chromium/gpu/command_buffer/service/context_state_impl_autogen.h16
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc65
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h2
-rw-r--r--chromium/gpu/command_buffer/service/feature_info_unittest.cc13
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager.cc38
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager.h9
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual.h1
-rw-r--r--chromium/gpu/command_buffer/service/gl_stream_texture_image.h4
-rw-r--r--chromium/gpu/command_buffer/service/gl_surface_mock.h4
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc23
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h12
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc757
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h44
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc739
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h43
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc327
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h81
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h37
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc439
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc112
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc31
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc78
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h13
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h4
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc65
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h7
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc118
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc3
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc62
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc100
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h6
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h10
-rw-r--r--chromium/gpu/command_buffer/service/gpu_preferences.h3
-rw-r--r--chromium/gpu/command_buffer/service/gpu_service_test.cc2
-rw-r--r--chromium/gpu/command_buffer/service/mailbox_manager_impl.h1
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.cc12
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache.h6
-rw-r--r--chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc38
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.cc39
-rw-r--r--chromium/gpu/command_buffer/service/sampler_manager.cc5
-rw-r--r--chromium/gpu/command_buffer/service/sampler_manager.h9
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator.cc37
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator_cache_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/service/shader_translator_unittest.cc10
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc55
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h33
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager_unittest.cc28
-rw-r--r--chromium/gpu/command_buffer/service/transfer_buffer_manager.cc27
-rw-r--r--chromium/gpu/command_buffer/service/transform_feedback_manager.cc5
-rw-r--r--chromium/gpu/command_buffer/service/transform_feedback_manager.h12
-rw-r--r--chromium/gpu/config/gpu_control_list_jsons.h1
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list_json.cc134
-rw-r--r--chromium/gpu/config/gpu_driver_bug_workaround_type.h6
-rw-r--r--chromium/gpu/config/gpu_info.cc3
-rw-r--r--chromium/gpu/config/gpu_info.h3
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc2
-rw-r--r--chromium/gpu/config/gpu_info_collector_unittest.cc2
-rw-r--r--chromium/gpu/config/gpu_info_collector_win.cc11
-rw-r--r--chromium/gpu/config/software_rendering_list_json.cc115
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.cc5
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.h2
-rw-r--r--chromium/gpu/ipc/BUILD.gn11
-rw-r--r--chromium/gpu/ipc/client/BUILD.gn7
-rw-r--r--chromium/gpu/ipc/client/android/in_process_surface_texture_manager.cc60
-rw-r--r--chromium/gpu/ipc/client/android/in_process_surface_texture_manager.h48
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc164
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h36
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.cc48
-rw-r--r--chromium/gpu/ipc/client/gpu_channel_host.h10
-rw-r--r--chromium/gpu/ipc/client/gpu_in_process_context_tests.cc21
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc3
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl.h6
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc26
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc19
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h2
-rw-r--r--chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h10
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn3
-rw-r--r--chromium/gpu/ipc/common/android/surface_texture_manager.cc28
-rw-r--r--chromium/gpu/ipc/common/android/surface_texture_manager.h42
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom5
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.cc3
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.h9
-rw-r--r--chromium/gpu/ipc/common/gpu_param_traits_macros.h1
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.mojom53
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.typemap16
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_struct_traits.h238
-rw-r--r--chromium/gpu/ipc/common/gpu_surface_tracker.cc4
-rw-r--r--chromium/gpu/ipc/common/gpu_surface_tracker.h7
-rw-r--r--chromium/gpu/ipc/common/struct_traits_unittest.cc37
-rw-r--r--chromium/gpu/ipc/common/traits_test_service.mojom4
-rw-r--r--chromium/gpu/ipc/common/typemaps.gni1
-rw-r--r--chromium/gpu/ipc/gl_in_process_context.cc31
-rw-r--r--chromium/gpu/ipc/gl_in_process_context.h23
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.cc69
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.h58
-rw-r--r--chromium/gpu/ipc/host/BUILD.gn24
-rw-r--r--chromium/gpu/ipc/host/DEPS3
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.cc113
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.h46
-rw-r--r--chromium/gpu/ipc/host/gpu_switches.cc16
-rw-r--r--chromium/gpu/ipc/host/gpu_switches.h17
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.cc629
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache.h157
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache_unittest.cc109
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc502
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h121
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn9
-rw-r--r--chromium/gpu/ipc/service/child_window_surface_win.cc188
-rw-r--r--chromium/gpu/ipc/service/child_window_surface_win.h16
-rw-r--r--chromium/gpu/ipc/service/child_window_win.cc210
-rw-r--r--chromium/gpu/ipc/service/child_window_win.h47
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.cc21
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.h4
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc14
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h20
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_delegate.h6
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_unittest.cc2
-rw-r--r--chromium/gpu/ipc/service/gpu_command_buffer_stub.cc78
-rw-r--r--chromium/gpu/ipc/service/gpu_command_buffer_stub.h6
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc5
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc38
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h5
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h13
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider.h48
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_posix.cc22
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc84
-rw-r--r--chromium/gpu/ipc/service/gpu_vsync_provider_win.cc264
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface.h3
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_android.cc2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_linux.cc2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_mac.mm6
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_win.cc2
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.cc2
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.h3
-rw-r--r--chromium/gpu/swiftshader_tests_main.cc36
-rw-r--r--chromium/gpu/tools/compositor_model_bench/render_tree.cc78
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.h2
200 files changed, 7005 insertions, 2217 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index 4fc32198421..db9c0ec462a 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -147,6 +147,7 @@ test("gl_tests") {
"command_buffer/tests/gl_manager.h",
"command_buffer/tests/gl_map_buffer_range_unittest.cc",
"command_buffer/tests/gl_native_gmb_backbuffer_unittest.cc",
+ "command_buffer/tests/gl_object_bindings_unittest.cc",
"command_buffer/tests/gl_pointcoord_unittest.cc",
"command_buffer/tests/gl_program_unittest.cc",
"command_buffer/tests/gl_query_unittest.cc",
@@ -301,6 +302,7 @@ test("gpu_unittests") {
"ipc/client/gpu_memory_buffer_impl_shared_memory_unittest.cc",
"ipc/client/gpu_memory_buffer_impl_test_template.h",
"ipc/common/struct_traits_unittest.cc",
+ "ipc/host/shader_disk_cache_unittest.cc",
]
if (is_mac) {
@@ -325,8 +327,10 @@ test("gpu_unittests") {
"//gpu/command_buffer/common:gles2_utils",
"//gpu/ipc:gl_in_process_context",
"//gpu/ipc/common:test_interfaces",
+ "//gpu/ipc/host",
"//mojo/edk/system",
"//mojo/public/cpp/bindings",
+ "//net:test_support",
"//testing/gmock",
"//testing/gtest",
"//third_party/angle:translator",
@@ -398,8 +402,27 @@ fuzzer_test("gpu_fuzzer") {
"//ui/gl:test_support",
]
- libfuzzer_options = [
- "max_len=16384",
- "use_traces=1",
- ]
+ libfuzzer_options = [ "max_len=16384" ]
+}
+
+if (is_linux) {
+ fuzzer_test("gpu_angle_fuzzer") {
+ sources = [
+ "command_buffer/tests/fuzzer_main.cc",
+ ]
+
+ defines = [ "GPU_FUZZER_USE_ANGLE" ]
+
+ deps = [
+ ":gpu",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ "//gpu/command_buffer/common:gles2_utils",
+ "//ui/gfx/geometry",
+ "//ui/gl",
+ "//ui/gl:test_support",
+ ]
+
+ libfuzzer_options = [ "max_len=16384" ]
+ }
}
diff --git a/chromium/gpu/DEPS b/chromium/gpu/DEPS
index 1b002bcc807..a395bb7ce2f 100644
--- a/chromium/gpu/DEPS
+++ b/chromium/gpu/DEPS
@@ -3,6 +3,7 @@ include_rules = [
"+third_party/amd",
"+third_party/re2",
"+third_party/smhasher",
+ "+third_party/swiftshader",
"+third_party/protbuf",
"+crypto",
"+ui/gfx",
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_stream_texture_matrix.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_stream_texture_matrix.txt
index 1744f89588c..62626cf8359 100644
--- a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_stream_texture_matrix.txt
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_stream_texture_matrix.txt
@@ -23,6 +23,8 @@ Overview
of a stream texture. Intended for use with Android SurfaceTexture, which
doesn't provide the value until the front buffer is latched.
+ Also allows hints about whether an image could be promoted to an overlay.
+
New Procedures and Functions
The command
@@ -44,6 +46,29 @@ New Procedures and Functions
<transform> Provides an additional transform matrix that is applied
prior to the the stream texture transformation matrix.
+ The command
+
+ void OverlayPromotionHintCHROMIUM(
+ GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y)
+
+ Provides a hint about whether the GLImage bound to texture could be promoted
+ to an overlay or not.
+
+ <texture> is the texture id, which should have a stream texture image bound
+ to it.
+ <promotion_hint> indicates whether the GLImage could be promoted.
+ <display_x> is the x coordinate of the origin of the overlay if the image
+ could be promoted. Otherwise, it is 0.
+ <display_y> is the y coordinate of the origin of the overlay if the image
+ could be promoted. Otherwise, it is 0.
+
+
+ If <texture> is not a valid texture, or if it doesn't have a stream texture
+ image bound to it, then no action is taken.
+
Errors
None.
diff --git a/chromium/gpu/GLES2/gl2chromium_autogen.h b/chromium/gpu/GLES2/gl2chromium_autogen.h
index ad54560bc41..4a703a86ec9 100644
--- a/chromium/gpu/GLES2/gl2chromium_autogen.h
+++ b/chromium/gpu/GLES2/gl2chromium_autogen.h
@@ -382,6 +382,8 @@
#define glGetFragDataIndexEXT GLES2_GET_FUN(GetFragDataIndexEXT)
#define glUniformMatrix4fvStreamTextureMatrixCHROMIUM \
GLES2_GET_FUN(UniformMatrix4fvStreamTextureMatrixCHROMIUM)
+#define glOverlayPromotionHintCHROMIUM \
+ GLES2_GET_FUN(OverlayPromotionHintCHROMIUM)
#define glSwapBuffersWithDamageCHROMIUM \
GLES2_GET_FUN(SwapBuffersWithDamageCHROMIUM)
diff --git a/chromium/gpu/GLES2/gl2extchromium.h b/chromium/gpu/GLES2/gl2extchromium.h
index 6ef50e2db25..30924332ead 100644
--- a/chromium/gpu/GLES2/gl2extchromium.h
+++ b/chromium/gpu/GLES2/gl2extchromium.h
@@ -362,31 +362,37 @@ typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERCHROMIUMPROC) (GLint srcX0, GLint
#endif
#ifdef GL_GLEXT_PROTOTYPES
-GL_APICALL void GL_APIENTRY glCopyTextureCHROMIUM(
- GLenum source_id,
- GLenum dest_id,
- GLint internalformat,
- GLenum dest_type,
- GLboolean unpack_flip_y,
- GLboolean unpack_premultiply_alpha,
- GLboolean unpack_unmultiply_alpha);
+GL_APICALL void GL_APIENTRY
+glCopyTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
+ GLenum dest_id,
+ GLint dest_level,
+ GLint internalformat,
+ GLenum dest_type,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
+ GLboolean unpack_unmultiply_alpha);
-GL_APICALL void GL_APIENTRY glCopySubTextureCHROMIUM(
- GLenum source_id,
- GLenum dest_id,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLboolean unpack_flip_y,
- GLboolean unpack_premultiply_alpha,
- GLboolean unpack_unmultiply_alpha);
+GL_APICALL void GL_APIENTRY
+glCopySubTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
+ GLenum dest_id,
+ GLint dest_level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLboolean unpack_flip_y,
+ GLboolean unpack_premultiply_alpha,
+ GLboolean unpack_unmultiply_alpha);
#endif
typedef void(GL_APIENTRYP PFNGLCOPYTEXTURECHROMIUMPROC)(
GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
@@ -395,7 +401,9 @@ typedef void(GL_APIENTRYP PFNGLCOPYTEXTURECHROMIUMPROC)(
typedef void(GL_APIENTRYP PFNGLCOPYSUBTEXTURECHROMIUMPROC)(
GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
diff --git a/chromium/gpu/angle_deqp_tests_main.cc b/chromium/gpu/angle_deqp_tests_main.cc
index 9814d70e27c..daad62a413b 100644
--- a/chromium/gpu/angle_deqp_tests_main.cc
+++ b/chromium/gpu/angle_deqp_tests_main.cc
@@ -19,7 +19,14 @@ int RunHelper(base::TestSuite* test_suite) {
} // namespace
+// Defined in angle_deqp_gtest.cpp. Declared here so we don't need to make a
+// header that we import in Chromium.
+namespace angle {
+void InitTestHarness(int* argc, char** argv);
+}
+
int main(int argc, char** argv) {
+ angle::InitTestHarness(&argc, argv);
base::CommandLine::Init(argc, argv);
base::TestSuite test_suite(argc, argv);
int rt = base::LaunchUnitTestsSerially(
diff --git a/chromium/gpu/angle_unittest_main.cc b/chromium/gpu/angle_unittest_main.cc
index 7187829a233..2294b5cd60e 100644
--- a/chromium/gpu/angle_unittest_main.cc
+++ b/chromium/gpu/angle_unittest_main.cc
@@ -24,12 +24,12 @@ int RunHelper(base::TestSuite* test_suite) {
int main(int argc, char** argv) {
base::CommandLine::Init(argc, argv);
testing::InitGoogleMock(&argc, argv);
- ShInitialize();
+ sh::Initialize();
base::TestSuite test_suite(argc, argv);
int rt = base::LaunchUnitTestsSerially(
argc,
argv,
base::Bind(&RunHelper, base::Unretained(&test_suite)));
- ShFinalize();
+ sh::Finalize();
return rt;
}
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index d8c5f46c14d..dab6327cdf2 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -76,6 +76,8 @@ _CAPABILITY_FLAGS = [
{'name': 'cull_face'},
{'name': 'depth_test', 'state_flag': 'framebuffer_state_.clear_state_dirty'},
{'name': 'dither', 'default': True},
+ {'name': 'framebuffer_srgb_ext', 'default': True, 'no_init': True,
+ 'extension_flag': 'ext_srgb_write_control'},
{'name': 'polygon_offset_fill'},
{'name': 'sample_alpha_to_coverage'},
{'name': 'sample_coverage'},
@@ -1537,6 +1539,14 @@ _NAMED_TYPE_INFO = {
'GL_COMPARE_REF_TO_TEXTURE',
],
},
+ 'TextureSrgbDecodeExt': {
+ 'type': 'GLenum',
+ 'is_complete': True,
+ 'valid': [
+ 'GL_DECODE_EXT',
+ 'GL_SKIP_DECODE_EXT',
+ ],
+ },
'TextureSwizzle': {
'type': 'GLenum',
'is_complete': True,
@@ -2492,6 +2502,9 @@ _FUNCTION_INFO = {
'decoder_func': 'DoClear',
'defer_draws': True,
'trace_level': 2,
+ 'valid_args': {
+ '0': 'GL_COLOR_BUFFER_BIT'
+ },
},
'ClearBufferiv': {
'type': 'PUT',
@@ -3467,6 +3480,12 @@ _FUNCTION_INFO = {
'result': ['uint32_t'],
'trace_level': 1,
},
+ 'OverlayPromotionHintCHROMIUM': {
+ 'decoder_func': 'DoOverlayPromotionHintCHROMIUM',
+ 'extension': "CHROMIUM_uniform_stream_texture_matrix",
+ 'unit_test': False,
+ 'client_test': False,
+ },
'PauseTransformFeedback': {
'decoder_func': 'DoPauseTransformFeedback',
'unit_test': False,
@@ -4001,6 +4020,7 @@ _FUNCTION_INFO = {
'state': 'Scissor',
},
'Viewport': {
+ 'impl_func': False,
'decoder_func': 'DoViewport',
},
'ResizeCHROMIUM': {
@@ -10174,6 +10194,10 @@ void ContextState::InitCapabilities(const ContextState* prev_state) const {
def WriteCapabilities(test_prev, es3_caps):
for capability in _CAPABILITY_FLAGS:
capability_name = capability['name']
+ capability_no_init = 'no_init' in capability and \
+ capability['no_init'] == True
+ if capability_no_init:
+ continue
capability_es3 = 'es3' in capability and capability['es3'] == True
if capability_es3 and not es3_caps or not capability_es3 and es3_caps:
continue
@@ -10459,6 +10483,10 @@ namespace gles2 {
"""void GLES2DecoderTestBase::SetupInitCapabilitiesExpectations(
bool es3_capable) {""")
for capability in _CAPABILITY_FLAGS:
+ capability_no_init = 'no_init' in capability and \
+ capability['no_init'] == True
+ if capability_no_init:
+ continue
capability_es3 = 'es3' in capability and capability['es3'] == True
if capability_es3:
continue
diff --git a/chromium/gpu/command_buffer/client/client_context_state.cc b/chromium/gpu/command_buffer/client/client_context_state.cc
index df81f30a264..132142383bd 100644
--- a/chromium/gpu/command_buffer/client/client_context_state.cc
+++ b/chromium/gpu/command_buffer/client/client_context_state.cc
@@ -4,6 +4,8 @@
#include "gpu/command_buffer/client/client_context_state.h"
+#include "base/logging.h"
+
namespace gpu {
namespace gles2 {
@@ -13,6 +15,17 @@ ClientContextState::ClientContextState() {
ClientContextState::~ClientContextState() {
}
+void ClientContextState::SetViewport(
+ GLint x, GLint y, GLsizei width, GLsizei height) {
+ DCHECK_LE(0, width);
+ DCHECK_LE(0, height);
+
+ viewport_x = x;
+ viewport_y = y;
+ viewport_width = width;
+ viewport_height = height;
+}
+
// Include the auto-generated part of this file. We split this because it means
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
diff --git a/chromium/gpu/command_buffer/client/client_context_state.h b/chromium/gpu/command_buffer/client/client_context_state.h
index 45cd14b40a8..e9f5ad15f31 100644
--- a/chromium/gpu/command_buffer/client/client_context_state.h
+++ b/chromium/gpu/command_buffer/client/client_context_state.h
@@ -26,9 +26,17 @@ struct GLES2_IMPL_EXPORT ClientContextState {
// 'changed' will be true if the state was different from 'enabled.
bool SetCapabilityState(GLenum cap, bool enabled, bool* changed);
+ // Cache the user's valid viewport.
+ void SetViewport(GLint x, GLint y, GLsizei width, GLsizei height);
+
#include "gpu/command_buffer/client/client_context_state_autogen.h"
EnableFlags enable_flags;
+
+ GLint viewport_x = 0;
+ GLint viewport_y = 0;
+ GLsizei viewport_width = 0;
+ GLsizei viewport_height = 0;
};
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/client/client_test_helper.cc b/chromium/gpu/command_buffer/client/client_test_helper.cc
index ff7e0694487..12c889c6424 100644
--- a/chromium/gpu/command_buffer/client/client_test_helper.cc
+++ b/chromium/gpu/command_buffer/client/client_test_helper.cc
@@ -20,30 +20,33 @@ using ::testing::Invoke;
namespace gpu {
-MockCommandBufferBase::MockCommandBufferBase() : put_offset_(0) {
-}
+MockCommandBufferBase::MockCommandBufferBase() : put_offset_(0) {}
-MockCommandBufferBase::~MockCommandBufferBase() {
-}
+MockCommandBufferBase::~MockCommandBufferBase() {}
CommandBuffer::State MockCommandBufferBase::GetLastState() {
return state_;
}
-int32_t MockCommandBufferBase::GetLastToken() {
- return state_.token;
-}
-
void MockCommandBufferBase::SetGetOffset(int32_t get_offset) {
state_.get_offset = get_offset;
}
-void MockCommandBufferBase::WaitForTokenInRange(int32_t start, int32_t end) {}
+void MockCommandBufferBase::SetReleaseCount(uint64_t release_count) {
+ state_.release_count = release_count;
+}
+
+CommandBuffer::State MockCommandBufferBase::WaitForTokenInRange(int32_t start,
+ int32_t end) {
+ return state_;
+}
-void MockCommandBufferBase::WaitForGetOffsetInRange(int32_t start,
- int32_t end) {
+CommandBuffer::State MockCommandBufferBase::WaitForGetOffsetInRange(
+ int32_t start,
+ int32_t end) {
state_.get_offset = put_offset_;
OnFlush();
+ return state_;
}
void MockCommandBufferBase::SetGetBuffer(int transfer_buffer_id) {
@@ -125,8 +128,7 @@ MockClientCommandBuffer::MockClientCommandBuffer() {
DelegateToFake();
}
-MockClientCommandBuffer::~MockClientCommandBuffer() {
-}
+MockClientCommandBuffer::~MockClientCommandBuffer() {}
void MockClientCommandBuffer::Flush(int32_t put_offset) {
FlushHelper(put_offset);
@@ -138,30 +140,24 @@ void MockClientCommandBuffer::OrderingBarrier(int32_t put_offset) {
void MockClientCommandBuffer::DelegateToFake() {
ON_CALL(*this, DestroyTransferBuffer(_))
- .WillByDefault(Invoke(
- this, &MockCommandBufferBase::DestroyTransferBufferHelper));
+ .WillByDefault(
+ Invoke(this, &MockCommandBufferBase::DestroyTransferBufferHelper));
}
MockClientCommandBufferMockFlush::MockClientCommandBufferMockFlush() {
DelegateToFake();
}
-MockClientCommandBufferMockFlush::~MockClientCommandBufferMockFlush() {
-}
+MockClientCommandBufferMockFlush::~MockClientCommandBufferMockFlush() {}
void MockClientCommandBufferMockFlush::DelegateToFake() {
MockClientCommandBuffer::DelegateToFake();
ON_CALL(*this, Flush(_))
- .WillByDefault(Invoke(
- this, &MockCommandBufferBase::FlushHelper));
+ .WillByDefault(Invoke(this, &MockCommandBufferBase::FlushHelper));
}
-MockClientGpuControl::MockClientGpuControl() {
-}
+MockClientGpuControl::MockClientGpuControl() {}
-MockClientGpuControl::~MockClientGpuControl() {
-}
+MockClientGpuControl::~MockClientGpuControl() {}
} // namespace gpu
-
-
diff --git a/chromium/gpu/command_buffer/client/client_test_helper.h b/chromium/gpu/command_buffer/client/client_test_helper.h
index 3b85773f397..9df3e6bac71 100644
--- a/chromium/gpu/command_buffer/client/client_test_helper.h
+++ b/chromium/gpu/command_buffer/client/client_test_helper.h
@@ -24,8 +24,6 @@
namespace gpu {
-class CommandBufferHelper;
-
class MockCommandBufferBase : public CommandBufferServiceBase {
public:
static const int32_t kTransferBufferBaseId = 0x123;
@@ -35,11 +33,11 @@ class MockCommandBufferBase : public CommandBufferServiceBase {
~MockCommandBufferBase() override;
State GetLastState() override;
- int32_t GetLastToken() override;
- void WaitForTokenInRange(int32_t start, int32_t end) override;
- void WaitForGetOffsetInRange(int32_t start, int32_t end) override;
+ State WaitForTokenInRange(int32_t start, int32_t end) override;
+ State WaitForGetOffsetInRange(int32_t start, int32_t end) override;
void SetGetBuffer(int transfer_buffer_id) override;
void SetGetOffset(int32_t get_offset) override;
+ void SetReleaseCount(uint64_t release_count) override;
scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
int32_t* id) override;
scoped_refptr<gpu::Buffer> GetTransferBuffer(int32_t id) override;
@@ -121,6 +119,7 @@ class MockClientGpuControl : public GpuControl {
MOCK_METHOD1(IsFenceSyncRelease, bool(uint64_t release));
MOCK_METHOD1(IsFenceSyncFlushed, bool(uint64_t release));
MOCK_METHOD1(IsFenceSyncFlushReceived, bool(uint64_t release));
+ MOCK_METHOD1(IsFenceSyncReleased, bool(uint64_t release));
MOCK_METHOD2(SignalSyncToken, void(const SyncToken& sync_token,
const base::Closure& callback));
MOCK_METHOD1(CanWaitUnverifiedSyncToken, bool(const SyncToken*));
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
index 0304b8f7e5d..7021fa399f4 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.cc
@@ -27,12 +27,14 @@ CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
: command_buffer_(command_buffer),
ring_buffer_id_(-1),
ring_buffer_size_(0),
- entries_(NULL),
+ entries_(nullptr),
total_entry_count_(0),
immediate_entry_count_(0),
token_(0),
put_(0),
last_put_sent_(0),
+ cached_last_token_read_(0),
+ cached_get_offset_(0),
#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
commands_issued_(0),
#endif
@@ -40,13 +42,6 @@ CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
context_lost_(false),
flush_automatically_(true),
flush_generation_(0) {
- // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
- // Don't register a dump provider in these cases.
- // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
- if (base::ThreadTaskRunnerHandle::IsSet()) {
- base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- this, "gpu::CommandBufferHelper", base::ThreadTaskRunnerHandle::Get());
- }
}
void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
@@ -55,9 +50,8 @@ void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
}
bool CommandBufferHelper::IsContextLost() {
- if (!context_lost_) {
- context_lost_ = error::IsError(command_buffer()->GetLastError());
- }
+ if (!context_lost_)
+ context_lost_ = error::IsError(command_buffer()->GetLastState().error);
return context_lost_;
}
@@ -71,7 +65,7 @@ void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
}
// Get maximum safe contiguous entries.
- const int32_t curr_get = get_offset();
+ const int32_t curr_get = cached_get_offset_;
if (curr_get > put_) {
immediate_entry_count_ = curr_get - put_ - 1;
} else {
@@ -116,7 +110,7 @@ bool CommandBufferHelper::AllocateRingBuffer() {
command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
if (id < 0) {
ClearUsable();
- DCHECK(error::IsError(command_buffer()->GetLastError()));
+ DCHECK(context_lost_);
return false;
}
@@ -128,6 +122,7 @@ bool CommandBufferHelper::AllocateRingBuffer() {
// Call to SetGetBuffer(id) above resets get and put offsets to 0.
// No need to query it through IPC.
put_ = 0;
+ cached_get_offset_ = 0;
CalcImmediateEntries(0);
return true;
}
@@ -143,8 +138,8 @@ void CommandBufferHelper::FreeResources() {
}
void CommandBufferHelper::FreeRingBuffer() {
- CHECK((put_ == get_offset()) ||
- error::IsError(command_buffer_->GetLastState().error));
+ CHECK((put_ == cached_get_offset_) ||
+ error::IsError(command_buffer_->GetLastState().error));
FreeResources();
}
@@ -154,19 +149,25 @@ bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
}
CommandBufferHelper::~CommandBufferHelper() {
- base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
- this);
FreeResources();
}
+void CommandBufferHelper::UpdateCachedState(const CommandBuffer::State& state) {
+ cached_get_offset_ = state.get_offset;
+ cached_last_token_read_ = state.token;
+ context_lost_ = error::IsError(state.error);
+}
+
bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
DCHECK(start >= 0 && start <= total_entry_count_);
DCHECK(end >= 0 && end <= total_entry_count_);
if (!usable()) {
return false;
}
- command_buffer_->WaitForGetOffsetInRange(start, end);
- return command_buffer_->GetLastError() == gpu::error::kNoError;
+ CommandBuffer::State last_state =
+ command_buffer_->WaitForGetOffsetInRange(start, end);
+ UpdateCachedState(last_state);
+ return !context_lost_;
}
void CommandBufferHelper::Flush() {
@@ -213,7 +214,7 @@ bool CommandBufferHelper::Finish() {
return false;
}
// If there is no work just exit.
- if (put_ == get_offset()) {
+ if (put_ == cached_get_offset_) {
return true;
}
DCHECK(HaveRingBuffer() ||
@@ -221,7 +222,7 @@ bool CommandBufferHelper::Finish() {
Flush();
if (!WaitForGetOffsetInRange(put_, put_))
return false;
- DCHECK_EQ(get_offset(), put_);
+ DCHECK_EQ(cached_get_offset_, put_);
CalcImmediateEntries(0);
@@ -246,14 +247,25 @@ int32_t CommandBufferHelper::InsertToken() {
cmd->Init(token_);
if (token_ == 0) {
TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
- // we wrapped
- Finish();
- DCHECK_EQ(token_, last_token_read());
+ bool finished = Finish(); // we wrapped
+ DCHECK(!finished || (cached_last_token_read_ == 0));
}
}
return token_;
}
+bool CommandBufferHelper::HasTokenPassed(int32_t token) {
+ // If token_ wrapped around we Finish'd.
+ if (token > token_)
+ return true;
+ // Don't update state if we don't have to.
+ if (token <= cached_last_token_read_)
+ return true;
+ CommandBuffer::State last_state = command_buffer_->GetLastState();
+ UpdateCachedState(last_state);
+ return token <= cached_last_token_read_;
+}
+
// Waits until the current token value is greater or equal to the value passed
// in argument.
void CommandBufferHelper::WaitForToken(int32_t token) {
@@ -263,11 +275,17 @@ void CommandBufferHelper::WaitForToken(int32_t token) {
// Return immediately if corresponding InsertToken failed.
if (token < 0)
return;
- if (token > token_) return; // we wrapped
- if (last_token_read() >= token)
+ if (token > token_)
+ return; // we wrapped
+ if (cached_last_token_read_ >= token)
+ return;
+ UpdateCachedState(command_buffer_->GetLastState());
+ if (cached_last_token_read_ >= token)
return;
Flush();
- command_buffer_->WaitForTokenInRange(token, token_);
+ CommandBuffer::State last_state =
+ command_buffer_->WaitForTokenInRange(token, token_);
+ UpdateCachedState(last_state);
}
// Waits for available entries, basically waiting until get >= put + count + 1.
@@ -288,13 +306,13 @@ void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
// but we need to make sure get wraps first, actually that get is 1 or
// more (since put will wrap to 0 after we add the noops).
DCHECK_LE(1, put_);
- int32_t curr_get = get_offset();
+ int32_t curr_get = cached_get_offset_;
if (curr_get > put_ || curr_get == 0) {
TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
Flush();
if (!WaitForGetOffsetInRange(1, put_))
return;
- curr_get = get_offset();
+ curr_get = cached_get_offset_;
DCHECK_LE(curr_get, put_);
DCHECK_NE(0, curr_get);
}
@@ -328,7 +346,7 @@ void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
}
int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
- int32_t current_get_offset = get_offset();
+ int32_t current_get_offset = cached_get_offset_;
if (current_get_offset > put_) {
return current_get_offset - put_ - 1;
} else {
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
index 0c0edddfd7b..3ea507f8975 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -102,11 +102,7 @@ class GPU_EXPORT CommandBufferHelper
// Returns true if the token has passed.
// Parameters:
// the value of the token to check whether it has passed
- bool HasTokenPassed(int32_t token) const {
- if (token > token_)
- return true; // we wrapped
- return last_token_read() >= token;
- }
+ bool HasTokenPassed(int32_t token);
// Waits until the token of a particular value has passed through the command
// stream (i.e. commands inserted before that token have been executed).
@@ -179,12 +175,6 @@ class GPU_EXPORT CommandBufferHelper
return data;
}
- int32_t last_token_read() const { return command_buffer_->GetLastToken(); }
-
- int32_t get_offset() const {
- return command_buffer_->GetLastState().get_offset;
- }
-
// Common Commands
void Noop(uint32_t skip_count) {
cmd::Noop* cmd = GetImmediateCmdSpace<cmd::Noop>(
@@ -215,10 +205,7 @@ class GPU_EXPORT CommandBufferHelper
uint32_t shared_memory_offset) {
cmd::SetBucketData* cmd = GetCmdSpace<cmd::SetBucketData>();
if (cmd) {
- cmd->Init(bucket_id,
- offset,
- size,
- shared_memory_id,
+ cmd->Init(bucket_id, offset, size, shared_memory_id,
shared_memory_offset);
}
}
@@ -243,12 +230,8 @@ class GPU_EXPORT CommandBufferHelper
uint32_t data_memory_offset) {
cmd::GetBucketStart* cmd = GetCmdSpace<cmd::GetBucketStart>();
if (cmd) {
- cmd->Init(bucket_id,
- result_memory_id,
- result_memory_offset,
- data_memory_size,
- data_memory_id,
- data_memory_offset);
+ cmd->Init(bucket_id, result_memory_id, result_memory_offset,
+ data_memory_size, data_memory_id, data_memory_offset);
}
}
@@ -259,17 +242,12 @@ class GPU_EXPORT CommandBufferHelper
uint32_t shared_memory_offset) {
cmd::GetBucketData* cmd = GetCmdSpace<cmd::GetBucketData>();
if (cmd) {
- cmd->Init(bucket_id,
- offset,
- size,
- shared_memory_id,
+ cmd->Init(bucket_id, offset, size, shared_memory_id,
shared_memory_offset);
}
}
- CommandBuffer* command_buffer() const {
- return command_buffer_;
- }
+ CommandBuffer* command_buffer() const { return command_buffer_; }
scoped_refptr<Buffer> get_ring_buffer() const { return ring_buffer_; }
@@ -277,13 +255,9 @@ class GPU_EXPORT CommandBufferHelper
void FreeRingBuffer();
- bool HaveRingBuffer() const {
- return ring_buffer_id_ != -1;
- }
+ bool HaveRingBuffer() const { return ring_buffer_id_ != -1; }
- bool usable () const {
- return usable_;
- }
+ bool usable() const { return usable_; }
void ClearUsable() {
usable_ = false;
@@ -296,11 +270,6 @@ class GPU_EXPORT CommandBufferHelper
base::trace_event::ProcessMemoryDump* pmd) override;
private:
- // Returns the number of available entries (they may not be contiguous).
- int32_t AvailableEntries() {
- return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_;
- }
-
void CalcImmediateEntries(int waiting_count);
bool AllocateRingBuffer();
void FreeResources();
@@ -316,6 +285,10 @@ class GPU_EXPORT CommandBufferHelper
int32_t GetTotalFreeEntriesNoWaiting() const;
+ // Updates |cached_get_offset_|, |cached_last_token_read_| and |context_lost_|
+ // from given command buffer state.
+ void UpdateCachedState(const CommandBuffer::State& state);
+
CommandBuffer* command_buffer_;
int32_t ring_buffer_id_;
int32_t ring_buffer_size_;
@@ -326,7 +299,8 @@ class GPU_EXPORT CommandBufferHelper
int32_t token_;
int32_t put_;
int32_t last_put_sent_;
- int32_t last_barrier_put_sent_;
+ int32_t cached_last_token_read_;
+ int32_t cached_get_offset_;
#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
int commands_issued_;
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
index 40ae25e700f..3489ea8b8de 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
@@ -68,7 +68,7 @@ class CommandBufferServiceLocked : public CommandBufferService {
int FlushCount() { return flush_count_; }
- void WaitForGetOffsetInRange(int32_t start, int32_t end) override {
+ State WaitForGetOffsetInRange(int32_t start, int32_t end) override {
// Flush only if it's required to unblock this Wait.
if (last_flush_ != -1 &&
!CommandBuffer::InRange(start, end, previous_put_offset_)) {
@@ -76,7 +76,7 @@ class CommandBufferServiceLocked : public CommandBufferService {
CommandBufferService::Flush(last_flush_);
last_flush_ = -1;
}
- CommandBufferService::WaitForGetOffsetInRange(start, end);
+ return CommandBufferService::WaitForGetOffsetInRange(start, end);
}
private:
@@ -247,7 +247,7 @@ class CommandBufferHelperTest : public testing::Test {
int32_t GetPutOffset() { return command_buffer_->GetPutOffset(); }
- int32_t GetHelperGetOffset() { return helper_->get_offset(); }
+ int32_t GetHelperGetOffset() { return helper_->cached_get_offset_; }
int32_t GetHelperPutOffset() { return helper_->put_; }
diff --git a/chromium/gpu/command_buffer/client/context_support.h b/chromium/gpu/command_buffer/client/context_support.h
index c2d71734144..2e40d51d314 100644
--- a/chromium/gpu/command_buffer/client/context_support.h
+++ b/chromium/gpu/command_buffer/client/context_support.h
@@ -21,10 +21,15 @@ struct SyncToken;
class ContextSupport {
public:
- // Runs |callback| when a sync token is signalled.
+ // Runs |callback| when the given sync token is signalled. The sync token may
+ // belong to any context.
virtual void SignalSyncToken(const SyncToken& sync_token,
const base::Closure& callback) = 0;
+ // Returns true if the given sync token has been signalled. The sync token
+ // must belong to this context. This may be called from any thread.
+ virtual bool IsSyncTokenSignalled(const SyncToken& sync_token) = 0;
+
// Runs |callback| when a query created via glCreateQueryEXT() has cleared
// passed the glEndQueryEXT() point.
virtual void SignalQuery(uint32_t query, const base::Closure& callback) = 0;
diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 934fe97620c..77720ecfe74 100644
--- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -1338,19 +1338,23 @@ void GL_APIENTRY GLES2PostSubBufferCHROMIUM(GLint x,
gles2::GetGLContext()->PostSubBufferCHROMIUM(x, y, width, height);
}
void GL_APIENTRY GLES2CopyTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
gles2::GetGLContext()->CopyTextureCHROMIUM(
- source_id, dest_id, internalformat, dest_type, unpack_flip_y,
- unpack_premultiply_alpha, unpack_unmultiply_alpha);
+ source_id, source_level, dest_id, dest_level, internalformat, dest_type,
+ unpack_flip_y, unpack_premultiply_alpha, unpack_unmultiply_alpha);
}
void GL_APIENTRY
GLES2CopySubTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -1361,8 +1365,9 @@ GLES2CopySubTextureCHROMIUM(GLenum source_id,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
gles2::GetGLContext()->CopySubTextureCHROMIUM(
- source_id, dest_id, xoffset, yoffset, x, y, width, height, unpack_flip_y,
- unpack_premultiply_alpha, unpack_unmultiply_alpha);
+ source_id, source_level, dest_id, dest_level, xoffset, yoffset, x, y,
+ width, height, unpack_flip_y, unpack_premultiply_alpha,
+ unpack_unmultiply_alpha);
}
void GL_APIENTRY GLES2CompressedCopyTextureCHROMIUM(GLenum source_id,
GLenum dest_id) {
@@ -1709,6 +1714,13 @@ GLES2UniformMatrix4fvStreamTextureMatrixCHROMIUM(GLint location,
gles2::GetGLContext()->UniformMatrix4fvStreamTextureMatrixCHROMIUM(
location, transpose, transform);
}
+void GL_APIENTRY GLES2OverlayPromotionHintCHROMIUM(GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) {
+ gles2::GetGLContext()->OverlayPromotionHintCHROMIUM(texture, promotion_hint,
+ display_x, display_y);
+}
void GL_APIENTRY GLES2SwapBuffersWithDamageCHROMIUM(GLint x,
GLint y,
GLint width,
@@ -3015,6 +3027,10 @@ extern const NameToFunc g_gles2_function_table[] = {
glUniformMatrix4fvStreamTextureMatrixCHROMIUM),
},
{
+ "glOverlayPromotionHintCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glOverlayPromotionHintCHROMIUM),
+ },
+ {
"glSwapBuffersWithDamageCHROMIUM",
reinterpret_cast<GLES2FunctionPointer>(glSwapBuffersWithDamageCHROMIUM),
},
diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index 5cb447980de..300fc739ee5 100644
--- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -2505,7 +2505,9 @@ void PostSubBufferCHROMIUM(GLint x, GLint y, GLint width, GLint height) {
}
void CopyTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
@@ -2514,13 +2516,16 @@ void CopyTextureCHROMIUM(GLenum source_id,
gles2::cmds::CopyTextureCHROMIUM* c =
GetCmdSpace<gles2::cmds::CopyTextureCHROMIUM>();
if (c) {
- c->Init(source_id, dest_id, internalformat, dest_type, unpack_flip_y,
- unpack_premultiply_alpha, unpack_unmultiply_alpha);
+ c->Init(source_id, source_level, dest_id, dest_level, internalformat,
+ dest_type, unpack_flip_y, unpack_premultiply_alpha,
+ unpack_unmultiply_alpha);
}
}
void CopySubTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -2533,8 +2538,9 @@ void CopySubTextureCHROMIUM(GLenum source_id,
gles2::cmds::CopySubTextureCHROMIUM* c =
GetCmdSpace<gles2::cmds::CopySubTextureCHROMIUM>();
if (c) {
- c->Init(source_id, dest_id, xoffset, yoffset, x, y, width, height,
- unpack_flip_y, unpack_premultiply_alpha, unpack_unmultiply_alpha);
+ c->Init(source_id, source_level, dest_id, dest_level, xoffset, yoffset, x,
+ y, width, height, unpack_flip_y, unpack_premultiply_alpha,
+ unpack_unmultiply_alpha);
}
}
@@ -3166,6 +3172,17 @@ void UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate(
}
}
+void OverlayPromotionHintCHROMIUM(GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) {
+ gles2::cmds::OverlayPromotionHintCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::OverlayPromotionHintCHROMIUM>();
+ if (c) {
+ c->Init(texture, promotion_hint, display_x, display_y);
+ }
+}
+
void SwapBuffersWithDamageCHROMIUM(GLint x,
GLint y,
GLint width,
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index 3424bc2a531..0c1ac65d3e1 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -18,6 +18,7 @@
#include <sstream>
#include <string>
#include "base/atomic_sequence_num.h"
+#include "base/bits.h"
#include "base/compiler_specific.h"
#include "base/numerics/safe_math.h"
#include "base/strings/string_split.h"
@@ -219,6 +220,8 @@ bool GLES2Implementation::Initialize(
if (mapped_memory_limit != SharedMemoryLimits::kNoLimit) {
// Use smaller chunks if the client is very memory conscientious.
chunk_size = std::min(mapped_memory_limit / 4, chunk_size);
+ chunk_size = base::bits::Align(chunk_size,
+ FencedAllocator::kAllocAlignment);
}
mapped_memory_->set_chunk_size_multiple(chunk_size);
@@ -262,21 +265,10 @@ bool GLES2Implementation::Initialize(
return false;
}
- // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
- // Don't register a dump provider in these cases.
- // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
- if (base::ThreadTaskRunnerHandle::IsSet()) {
- base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- this, "GLES2Implementation", base::ThreadTaskRunnerHandle::Get());
- }
-
return true;
}
GLES2Implementation::~GLES2Implementation() {
- base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
- this);
-
// Make sure the queries are finished otherwise we'll delete the
// shared memory (mapped_memory_) which will free the memory used
// by the queries. The GPU process when validating that memory is still
@@ -405,6 +397,16 @@ void GLES2Implementation::SignalSyncToken(const gpu::SyncToken& sync_token,
}
}
+// This may be called from any thread. It's safe to access gpu_control_ without
+// the lock because it is const.
+bool GLES2Implementation::IsSyncTokenSignalled(
+ const gpu::SyncToken& sync_token) {
+ // Check that the sync token belongs to this context.
+ DCHECK_EQ(gpu_control_->GetNamespaceID(), sync_token.namespace_id());
+ DCHECK_EQ(gpu_control_->GetCommandBufferID(), sync_token.command_buffer_id());
+ return gpu_control_->IsFenceSyncReleased(sync_token.release_count());
+}
+
void GLES2Implementation::SignalQuery(uint32_t query,
const base::Closure& callback) {
// Flush previously entered commands to ensure ordering with any
@@ -441,6 +443,9 @@ bool GLES2Implementation::OnMemoryDump(
using base::trace_event::MemoryAllocatorDump;
using base::trace_event::MemoryDumpLevelOfDetail;
+ // Dump owned MappedMemoryManager memory as well.
+ mapped_memory_->OnMemoryDump(args, pmd);
+
if (!transfer_buffer_->HaveBuffer())
return true;
@@ -804,6 +809,15 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
case GL_MAX_VERTEX_UNIFORM_VECTORS:
*params = capabilities_.max_vertex_uniform_vectors;
return true;
+ case GL_MAX_VIEWPORT_DIMS:
+ if (capabilities_.max_viewport_width > 0 &&
+ capabilities_.max_viewport_height > 0) {
+ params[0] = capabilities_.max_viewport_width;
+ params[1] = capabilities_.max_viewport_height;
+ return true;
+ }
+ // If they are not cached on the client side yet, query the service side.
+ return false;
case GL_NUM_COMPRESSED_TEXTURE_FORMATS:
*params = capabilities_.num_compressed_texture_formats;
return true;
@@ -832,7 +846,8 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
*params = bound_pixel_unpack_transfer_buffer_id_;
return true;
case GL_READ_FRAMEBUFFER_BINDING:
- if (IsChromiumFramebufferMultisampleAvailable()) {
+ if (capabilities_.major_version >= 3 ||
+ IsChromiumFramebufferMultisampleAvailable()) {
*params = bound_read_framebuffer_;
return true;
}
@@ -847,6 +862,23 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
*params = static_cast<GLint>(query_tracker_->CheckAndResetDisjoint());
return true;
+ case GL_VIEWPORT:
+ if (state_.viewport_width > 0 &&
+ state_.viewport_height > 0 &&
+ capabilities_.max_viewport_width > 0 &&
+ capabilities_.max_viewport_height > 0) {
+ params[0] = state_.viewport_x;
+ params[1] = state_.viewport_y;
+ params[2] = std::min(state_.viewport_width,
+ capabilities_.max_viewport_width);
+ params[3] = std::min(state_.viewport_height,
+ capabilities_.max_viewport_height);
+ return true;
+ }
+ // If they haven't been cached on the client side, go to service side
+ // to query the underlying driver.
+ return false;
+
// Non-cached parameters.
case GL_ALIASED_LINE_WIDTH_RANGE:
case GL_ALIASED_POINT_SIZE_RANGE:
@@ -879,7 +911,6 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
case GL_IMPLEMENTATION_COLOR_READ_FORMAT:
case GL_IMPLEMENTATION_COLOR_READ_TYPE:
case GL_LINE_WIDTH:
- case GL_MAX_VIEWPORT_DIMS:
case GL_PACK_ALIGNMENT:
case GL_POLYGON_OFFSET_FACTOR:
case GL_POLYGON_OFFSET_FILL:
@@ -914,7 +945,6 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
case GL_STENCIL_WRITEMASK:
case GL_SUBPIXEL_BITS:
case GL_UNPACK_ALIGNMENT:
- case GL_VIEWPORT:
return false;
default:
break;
@@ -2388,7 +2418,7 @@ void GLES2Implementation::CompressedTexSubImage2D(
helper_->CompressedTexSubImage2D(
target, level, xoffset, yoffset, width, height, format, image_size,
0, ToGLuint(data));
- } else {
+ } else if (data) {
SetBucketContents(kResultBucketId, data, image_size);
helper_->CompressedTexSubImage2DBucket(
target, level, xoffset, yoffset, width, height, format,
@@ -2397,6 +2427,9 @@ void GLES2Implementation::CompressedTexSubImage2D(
// and we don't have to wait for the result so from the client's perspective
// it's cheap.
helper_->SetBucketSize(kResultBucketId, 0);
+ } else {
+ helper_->CompressedTexSubImage2D(target, level, xoffset, yoffset, width,
+ height, format, image_size, 0, 0);
}
CheckGLError();
}
@@ -2491,7 +2524,7 @@ void GLES2Implementation::CompressedTexSubImage3D(
helper_->CompressedTexSubImage3D(
target, level, xoffset, yoffset, zoffset, width, height, depth, format,
image_size, 0, ToGLuint(data));
- } else {
+ } else if (data) {
SetBucketContents(kResultBucketId, data, image_size);
helper_->CompressedTexSubImage3DBucket(
target, level, xoffset, yoffset, zoffset, width, height, depth, format,
@@ -2500,6 +2533,10 @@ void GLES2Implementation::CompressedTexSubImage3D(
// and we don't have to wait for the result so from the client's perspective
// it's cheap.
helper_->SetBucketSize(kResultBucketId, 0);
+ } else {
+ helper_->CompressedTexSubImage3D(target, level, xoffset, yoffset, zoffset,
+ width, height, depth, format, image_size,
+ 0, 0);
}
CheckGLError();
}
@@ -4340,20 +4377,16 @@ void GLES2Implementation::BindFramebufferHelper(
}
break;
case GL_READ_FRAMEBUFFER:
- if (!IsChromiumFramebufferMultisampleAvailable()) {
- SetGLErrorInvalidEnum("glBindFramebuffer", target, "target");
- return;
- }
+ DCHECK(capabilities_.major_version >= 3 ||
+ IsChromiumFramebufferMultisampleAvailable());
if (bound_read_framebuffer_ != framebuffer) {
bound_read_framebuffer_ = framebuffer;
changed = true;
}
break;
case GL_DRAW_FRAMEBUFFER:
- if (!IsChromiumFramebufferMultisampleAvailable()) {
- SetGLErrorInvalidEnum("glBindFramebuffer", target, "target");
- return;
- }
+ DCHECK(capabilities_.major_version >= 3 ||
+ IsChromiumFramebufferMultisampleAvailable());
if (bound_framebuffer_ != framebuffer) {
bound_framebuffer_ = framebuffer;
changed = true;
@@ -6327,7 +6360,7 @@ bool GLES2Implementation::PackStringsToBucket(GLsizei count,
const char* func_name) {
DCHECK_LE(0, count);
// Compute the total size.
- base::CheckedNumeric<size_t> total_size = count;
+ base::CheckedNumeric<uint32_t> total_size = count;
total_size += 1;
total_size *= sizeof(GLint);
if (!total_size.IsValid()) {
@@ -7030,6 +7063,22 @@ void GLES2Implementation::InvalidateCachedExtensions() {
cached_extensions_.clear();
}
+void GLES2Implementation::Viewport(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glViewport(" << x << ", " << y
+ << ", " << width << ", " << height << ")");
+ if (width < 0 || height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glViewport", "negative width/height");
+ return;
+ }
+ state_.SetViewport(x, y, width, height);
+ helper_->Viewport(x, y, width, height);
+ CheckGLError();
+}
+
// Include the auto-generated part of this file. We split this because it means
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index d26378a4387..b34cdc39c9a 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -251,6 +251,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation
// ContextSupport implementation.
void SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) override;
+ bool IsSyncTokenSignalled(const gpu::SyncToken& sync_token) override;
void SignalQuery(uint32_t query, const base::Closure& callback) override;
void SetAggressivelyFreeResources(bool aggressively_free_resources) override;
@@ -816,7 +817,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation
int current_trace_stack_;
- GpuControl* gpu_control_;
+ GpuControl* const gpu_control_;
Capabilities capabilities_;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
index 0635ca1ee20..3a66cb71e6c 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -942,7 +942,9 @@ void PostSubBufferCHROMIUM(GLint x,
GLint height) override;
void CopyTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
@@ -950,7 +952,9 @@ void CopyTextureCHROMIUM(GLenum source_id,
GLboolean unpack_unmultiply_alpha) override;
void CopySubTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -1199,6 +1203,11 @@ void UniformMatrix4fvStreamTextureMatrixCHROMIUM(
GLboolean transpose,
const GLfloat* transform) override;
+void OverlayPromotionHintCHROMIUM(GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) override;
+
void SwapBuffersWithDamageCHROMIUM(GLint x,
GLint y,
GLint width,
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
index 679822032dd..eed99df1622 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -2782,25 +2782,6 @@ void GLES2Implementation::VertexAttribI4uiv(GLuint indx, const GLuint* values) {
CheckGLError();
}
-void GLES2Implementation::Viewport(GLint x,
- GLint y,
- GLsizei width,
- GLsizei height) {
- GPU_CLIENT_SINGLE_THREAD_CHECK();
- GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glViewport(" << x << ", " << y
- << ", " << width << ", " << height << ")");
- if (width < 0) {
- SetGLError(GL_INVALID_VALUE, "glViewport", "width < 0");
- return;
- }
- if (height < 0) {
- SetGLError(GL_INVALID_VALUE, "glViewport", "height < 0");
- return;
- }
- helper_->Viewport(x, y, width, height);
- CheckGLError();
-}
-
void GLES2Implementation::BlitFramebufferCHROMIUM(GLint srcX0,
GLint srcY0,
GLint srcX1,
@@ -3131,7 +3112,9 @@ void GLES2Implementation::GetTranslatedShaderSourceANGLE(GLuint shader,
}
void GLES2Implementation::CopyTextureCHROMIUM(
GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
@@ -3140,21 +3123,23 @@ void GLES2Implementation::CopyTextureCHROMIUM(
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG(
"[" << GetLogPrefix() << "] glCopyTextureCHROMIUM("
- << GLES2Util::GetStringEnum(source_id) << ", "
- << GLES2Util::GetStringEnum(dest_id) << ", " << internalformat << ", "
- << GLES2Util::GetStringPixelType(dest_type) << ", "
- << GLES2Util::GetStringBool(unpack_flip_y) << ", "
+ << GLES2Util::GetStringEnum(source_id) << ", " << source_level << ", "
+ << GLES2Util::GetStringEnum(dest_id) << ", " << dest_level << ", "
+ << internalformat << ", " << GLES2Util::GetStringPixelType(dest_type)
+ << ", " << GLES2Util::GetStringBool(unpack_flip_y) << ", "
<< GLES2Util::GetStringBool(unpack_premultiply_alpha) << ", "
<< GLES2Util::GetStringBool(unpack_unmultiply_alpha) << ")");
- helper_->CopyTextureCHROMIUM(source_id, dest_id, internalformat, dest_type,
- unpack_flip_y, unpack_premultiply_alpha,
- unpack_unmultiply_alpha);
+ helper_->CopyTextureCHROMIUM(
+ source_id, source_level, dest_id, dest_level, internalformat, dest_type,
+ unpack_flip_y, unpack_premultiply_alpha, unpack_unmultiply_alpha);
CheckGLError();
}
void GLES2Implementation::CopySubTextureCHROMIUM(
GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -3167,10 +3152,11 @@ void GLES2Implementation::CopySubTextureCHROMIUM(
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG(
"[" << GetLogPrefix() << "] glCopySubTextureCHROMIUM("
- << GLES2Util::GetStringEnum(source_id) << ", "
- << GLES2Util::GetStringEnum(dest_id) << ", " << xoffset << ", "
- << yoffset << ", " << x << ", " << y << ", " << width << ", "
- << height << ", " << GLES2Util::GetStringBool(unpack_flip_y) << ", "
+ << GLES2Util::GetStringEnum(source_id) << ", " << source_level << ", "
+ << GLES2Util::GetStringEnum(dest_id) << ", " << dest_level << ", "
+ << xoffset << ", " << yoffset << ", " << x << ", " << y << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringBool(unpack_flip_y) << ", "
<< GLES2Util::GetStringBool(unpack_premultiply_alpha) << ", "
<< GLES2Util::GetStringBool(unpack_unmultiply_alpha) << ")");
if (width < 0) {
@@ -3181,9 +3167,10 @@ void GLES2Implementation::CopySubTextureCHROMIUM(
SetGLError(GL_INVALID_VALUE, "glCopySubTextureCHROMIUM", "height < 0");
return;
}
- helper_->CopySubTextureCHROMIUM(
- source_id, dest_id, xoffset, yoffset, x, y, width, height, unpack_flip_y,
- unpack_premultiply_alpha, unpack_unmultiply_alpha);
+ helper_->CopySubTextureCHROMIUM(source_id, source_level, dest_id, dest_level,
+ xoffset, yoffset, x, y, width, height,
+ unpack_flip_y, unpack_premultiply_alpha,
+ unpack_unmultiply_alpha);
CheckGLError();
}
@@ -3221,8 +3208,9 @@ void GLES2Implementation::DiscardFramebufferEXT(GLenum target,
const GLenum* attachments) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDiscardFramebufferEXT("
- << GLES2Util::GetStringEnum(target) << ", " << count
- << ", " << static_cast<const void*>(attachments) << ")");
+ << GLES2Util::GetStringFramebufferTarget(target) << ", "
+ << count << ", " << static_cast<const void*>(attachments)
+ << ")");
GPU_CLIENT_LOG_CODE_BLOCK({
for (GLsizei i = 0; i < count; ++i) {
GPU_CLIENT_LOG(" " << i << ": " << attachments[0 + i * 1]);
@@ -3506,4 +3494,18 @@ void GLES2Implementation::UniformMatrix4fvStreamTextureMatrixCHROMIUM(
CheckGLError();
}
+void GLES2Implementation::OverlayPromotionHintCHROMIUM(GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glOverlayPromotionHintCHROMIUM("
+ << texture << ", "
+ << GLES2Util::GetStringBool(promotion_hint) << ", "
+ << display_x << ", " << display_y << ")");
+ helper_->OverlayPromotionHintCHROMIUM(texture, promotion_hint, display_x,
+ display_y);
+ CheckGLError();
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
index 33b4eda5be9..fbf35d21917 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -404,6 +404,8 @@ class GLES2ImplementationTest : public testing::Test {
static const GLint kMaxVertexAttribs = 8;
static const GLint kMaxVertexTextureImageUnits = 0;
static const GLint kMaxVertexUniformVectors = 128;
+ static const GLint kMaxViewportWidth = 8192;
+ static const GLint kMaxViewportHeight = 6144;
static const GLint kNumCompressedTextureFormats = 0;
static const GLint kNumShaderBinaryFormats = 0;
static const GLuint kMaxTransformFeedbackSeparateAttribs = 4;
@@ -467,6 +469,8 @@ class GLES2ImplementationTest : public testing::Test {
capabilities.max_vertex_attribs = kMaxVertexAttribs;
capabilities.max_vertex_texture_image_units = kMaxVertexTextureImageUnits;
capabilities.max_vertex_uniform_vectors = kMaxVertexUniformVectors;
+ capabilities.max_viewport_width = kMaxViewportWidth;
+ capabilities.max_viewport_height = kMaxViewportHeight;
capabilities.num_compressed_texture_formats =
kNumCompressedTextureFormats;
capabilities.num_shader_binary_formats = kNumShaderBinaryFormats;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
index e0b5487a5e2..cbdf57f24ad 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -189,9 +189,9 @@ TEST_F(GLES2ImplementationTest, Clear) {
cmds::Clear cmd;
};
Cmds expected;
- expected.cmd.Init(1);
+ expected.cmd.Init(GL_COLOR_BUFFER_BIT);
- gl_->Clear(1);
+ gl_->Clear(GL_COLOR_BUFFER_BIT);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -2717,9 +2717,10 @@ TEST_F(GLES2ImplementationTest, CopyTextureCHROMIUM) {
cmds::CopyTextureCHROMIUM cmd;
};
Cmds expected;
- expected.cmd.Init(1, 2, GL_ALPHA, GL_UNSIGNED_BYTE, true, true, true);
+ expected.cmd.Init(1, 2, 3, 4, GL_ALPHA, GL_UNSIGNED_BYTE, true, true, true);
- gl_->CopyTextureCHROMIUM(1, 2, GL_ALPHA, GL_UNSIGNED_BYTE, true, true, true);
+ gl_->CopyTextureCHROMIUM(1, 2, 3, 4, GL_ALPHA, GL_UNSIGNED_BYTE, true, true,
+ true);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -2728,9 +2729,9 @@ TEST_F(GLES2ImplementationTest, CopySubTextureCHROMIUM) {
cmds::CopySubTextureCHROMIUM cmd;
};
Cmds expected;
- expected.cmd.Init(1, 2, 3, 4, 5, 6, 7, 8, true, true, true);
+ expected.cmd.Init(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, true, true, true);
- gl_->CopySubTextureCHROMIUM(1, 2, 3, 4, 5, 6, 7, 8, true, true, true);
+ gl_->CopySubTextureCHROMIUM(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, true, true, true);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
@@ -2802,8 +2803,8 @@ TEST_F(GLES2ImplementationTest, DiscardFramebufferEXT) {
data[ii][jj] = static_cast<GLenum>(ii * 1 + jj);
}
}
- expected.cmd.Init(1, 2, &data[0][0]);
- gl_->DiscardFramebufferEXT(1, 2, &data[0][0]);
+ expected.cmd.Init(GL_FRAMEBUFFER, 2, &data[0][0]);
+ gl_->DiscardFramebufferEXT(GL_FRAMEBUFFER, 2, &data[0][0]);
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
index 0055e6c167a..52e8ffba8bc 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -689,14 +689,18 @@ virtual void PostSubBufferCHROMIUM(GLint x,
GLint width,
GLint height) = 0;
virtual void CopyTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) = 0;
virtual void CopySubTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -886,6 +890,10 @@ virtual void UniformMatrix4fvStreamTextureMatrixCHROMIUM(
GLint location,
GLboolean transpose,
const GLfloat* transform) = 0;
+virtual void OverlayPromotionHintCHROMIUM(GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) = 0;
virtual void SwapBuffersWithDamageCHROMIUM(GLint x,
GLint y,
GLint width,
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index 75af4906662..ac0f7a6852c 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -668,14 +668,18 @@ void PostSubBufferCHROMIUM(GLint x,
GLint width,
GLint height) override;
void CopyTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) override;
void CopySubTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -860,6 +864,10 @@ void UniformMatrix4fvStreamTextureMatrixCHROMIUM(
GLint location,
GLboolean transpose,
const GLfloat* transform) override;
+void OverlayPromotionHintCHROMIUM(GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) override;
void SwapBuffersWithDamageCHROMIUM(GLint x,
GLint y,
GLint width,
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index ce62d915020..dc15f9d69de 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -916,7 +916,9 @@ void GLES2InterfaceStub::PostSubBufferCHROMIUM(GLint /* x */,
GLint /* height */) {}
void GLES2InterfaceStub::CopyTextureCHROMIUM(
GLenum /* source_id */,
+ GLint /* source_level */,
GLenum /* dest_id */,
+ GLint /* dest_level */,
GLint /* internalformat */,
GLenum /* dest_type */,
GLboolean /* unpack_flip_y */,
@@ -924,7 +926,9 @@ void GLES2InterfaceStub::CopyTextureCHROMIUM(
GLboolean /* unpack_unmultiply_alpha */) {}
void GLES2InterfaceStub::CopySubTextureCHROMIUM(
GLenum /* source_id */,
+ GLint /* source_level */,
GLenum /* dest_id */,
+ GLint /* dest_level */,
GLint /* xoffset */,
GLint /* yoffset */,
GLint /* x */,
@@ -1163,6 +1167,11 @@ void GLES2InterfaceStub::UniformMatrix4fvStreamTextureMatrixCHROMIUM(
GLint /* location */,
GLboolean /* transpose */,
const GLfloat* /* transform */) {}
+void GLES2InterfaceStub::OverlayPromotionHintCHROMIUM(
+ GLuint /* texture */,
+ GLboolean /* promotion_hint */,
+ GLint /* display_x */,
+ GLint /* display_y */) {}
void GLES2InterfaceStub::SwapBuffersWithDamageCHROMIUM(GLint /* x */,
GLint /* y */,
GLint /* width */,
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index 9277de1fa51..75006d58b32 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -668,14 +668,18 @@ void PostSubBufferCHROMIUM(GLint x,
GLint width,
GLint height) override;
void CopyTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) override;
void CopySubTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -860,6 +864,10 @@ void UniformMatrix4fvStreamTextureMatrixCHROMIUM(
GLint location,
GLboolean transpose,
const GLfloat* transform) override;
+void OverlayPromotionHintCHROMIUM(GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) override;
void SwapBuffersWithDamageCHROMIUM(GLint x,
GLint y,
GLint width,
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index 2d08ece5315..f09bc335668 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -1949,21 +1949,25 @@ void GLES2TraceImplementation::PostSubBufferCHROMIUM(GLint x,
void GLES2TraceImplementation::CopyTextureCHROMIUM(
GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CopyTextureCHROMIUM");
- gl_->CopyTextureCHROMIUM(source_id, dest_id, internalformat, dest_type,
- unpack_flip_y, unpack_premultiply_alpha,
- unpack_unmultiply_alpha);
+ gl_->CopyTextureCHROMIUM(source_id, source_level, dest_id, dest_level,
+ internalformat, dest_type, unpack_flip_y,
+ unpack_premultiply_alpha, unpack_unmultiply_alpha);
}
void GLES2TraceImplementation::CopySubTextureCHROMIUM(
GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -1974,8 +1978,9 @@ void GLES2TraceImplementation::CopySubTextureCHROMIUM(
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha) {
TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CopySubTextureCHROMIUM");
- gl_->CopySubTextureCHROMIUM(source_id, dest_id, xoffset, yoffset, x, y, width,
- height, unpack_flip_y, unpack_premultiply_alpha,
+ gl_->CopySubTextureCHROMIUM(source_id, source_level, dest_id, dest_level,
+ xoffset, yoffset, x, y, width, height,
+ unpack_flip_y, unpack_premultiply_alpha,
unpack_unmultiply_alpha);
}
@@ -2488,6 +2493,17 @@ void GLES2TraceImplementation::UniformMatrix4fvStreamTextureMatrixCHROMIUM(
transform);
}
+void GLES2TraceImplementation::OverlayPromotionHintCHROMIUM(
+ GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::OverlayPromotionHintCHROMIUM");
+ gl_->OverlayPromotionHintCHROMIUM(texture, promotion_hint, display_x,
+ display_y);
+}
+
void GLES2TraceImplementation::SwapBuffersWithDamageCHROMIUM(GLint x,
GLint y,
GLint width,
diff --git a/chromium/gpu/command_buffer/client/gpu_control.h b/chromium/gpu/command_buffer/client/gpu_control.h
index 813d5630960..62429898c88 100644
--- a/chromium/gpu/command_buffer/client/gpu_control.h
+++ b/chromium/gpu/command_buffer/client/gpu_control.h
@@ -24,10 +24,6 @@ namespace base {
class Lock;
}
-namespace gfx {
-class GpuMemoryBuffer;
-}
-
namespace gpu {
class GpuControlClient;
struct SyncToken;
@@ -86,18 +82,29 @@ class GPU_EXPORT GpuControl {
virtual CommandBufferId GetCommandBufferID() const = 0;
virtual int32_t GetExtraCommandBufferData() const = 0;
- // Fence Syncs use release counters at a context level, these fence syncs
- // need to be flushed before they can be shared with other contexts across
- // channels. Subclasses should implement these functions and take care of
- // figuring out when a fence sync has been flushed. The difference between
- // IsFenceSyncFlushed and IsFenceSyncFlushReceived, one is testing is the
- // client has issued the flush, and the other is testing if the service
- // has received the flush.
+ // Generates a fence sync which should be inserted into the GL command stream.
+ // When the service executes the fence sync it is released. Fence syncs are
+ // shared with other contexts as sync tokens which encapsulate the fence sync
+ // and the command buffer on which it was generated. Fence syncs need to be
+ // flushed before they can be used by other contexts. Furthermore, the flush
+ // must be verified before sending a sync token across channel boundaries.
virtual uint64_t GenerateFenceSyncRelease() = 0;
+
+ // Returns true if the fence sync is valid.
virtual bool IsFenceSyncRelease(uint64_t release) = 0;
+
+ // Returns true if the client has flushed the fence sync.
virtual bool IsFenceSyncFlushed(uint64_t release) = 0;
+
+ // Returns true if the service has received the fence sync. Used for verifying
+ // sync tokens.
virtual bool IsFenceSyncFlushReceived(uint64_t release) = 0;
+ // Returns true if the service has released (executed) the fence sync. Some
+ // implementations may support calling this from any thread without holding
+ // the lock provided by the client.
+ virtual bool IsFenceSyncReleased(uint64_t release) = 0;
+
// Runs |callback| when sync token is signalled.
virtual void SignalSyncToken(const SyncToken& sync_token,
const base::Closure& callback) = 0;
diff --git a/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.h b/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.h
index 921b42e8e34..b75780bbc14 100644
--- a/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.h
+++ b/chromium/gpu/command_buffer/client/gpu_memory_buffer_manager.h
@@ -19,26 +19,18 @@ struct SyncToken;
class GPU_EXPORT GpuMemoryBufferManager {
public:
GpuMemoryBufferManager();
+ virtual ~GpuMemoryBufferManager();
- // Allocates a GpuMemoryBuffer that can be shared with another process.
- virtual std::unique_ptr<gfx::GpuMemoryBuffer> AllocateGpuMemoryBuffer(
+ // Creates a GpuMemoryBuffer that can be shared with another process.
+ virtual std::unique_ptr<gfx::GpuMemoryBuffer> CreateGpuMemoryBuffer(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
gpu::SurfaceHandle surface_handle) = 0;
- // Creates a GpuMemoryBuffer from existing handle.
- virtual std::unique_ptr<gfx::GpuMemoryBuffer> CreateGpuMemoryBufferFromHandle(
- const gfx::GpuMemoryBufferHandle& handle,
- const gfx::Size& size,
- gfx::BufferFormat format) = 0;
-
// Associates destruction sync point with |buffer|.
virtual void SetDestructionSyncToken(gfx::GpuMemoryBuffer* buffer,
const gpu::SyncToken& sync_token) = 0;
-
- protected:
- virtual ~GpuMemoryBufferManager();
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/mapped_memory.cc b/chromium/gpu/command_buffer/client/mapped_memory.cc
index 0501d574e4b..708e121f39c 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory.cc
+++ b/chromium/gpu/command_buffer/client/mapped_memory.cc
@@ -47,19 +47,9 @@ MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
max_free_bytes_(unused_memory_reclaim_limit),
max_allocated_bytes_(SharedMemoryLimits::kNoLimit),
tracing_id_(g_next_mapped_memory_manager_tracing_id.GetNext()) {
- // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
- // Don't register a dump provider in these cases.
- // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
- if (base::ThreadTaskRunnerHandle::IsSet()) {
- base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- this, "gpu::MappedMemoryManager", base::ThreadTaskRunnerHandle::Get());
- }
}
MappedMemoryManager::~MappedMemoryManager() {
- base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
- this);
-
CommandBuffer* cmd_buf = helper_->command_buffer();
for (auto& chunk : chunks_) {
cmd_buf->DestroyTransferBuffer(chunk->shm_id());
diff --git a/chromium/gpu/command_buffer/client/mapped_memory.h b/chromium/gpu/command_buffer/client/mapped_memory.h
index 0af38b56e92..72a96764c5d 100644
--- a/chromium/gpu/command_buffer/client/mapped_memory.h
+++ b/chromium/gpu/command_buffer/client/mapped_memory.h
@@ -121,8 +121,7 @@ class GPU_EXPORT MemoryChunk {
};
// Manages MemoryChunks.
-class GPU_EXPORT MappedMemoryManager
- : public base::trace_event::MemoryDumpProvider {
+class GPU_EXPORT MappedMemoryManager {
public:
enum MemoryLimit {
kNoLimit = 0,
@@ -133,7 +132,7 @@ class GPU_EXPORT MappedMemoryManager
MappedMemoryManager(CommandBufferHelper* helper,
size_t unused_memory_reclaim_limit);
- ~MappedMemoryManager() override;
+ ~MappedMemoryManager();
unsigned int chunk_size_multiple() const {
return chunk_size_multiple_;
@@ -179,9 +178,9 @@ class GPU_EXPORT MappedMemoryManager
// Free Any Shared memory that is not in use.
void FreeUnused();
- // Overridden from base::trace_event::MemoryDumpProvider:
+ // Dump memory usage - called from GLES2Implementation.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
- base::trace_event::ProcessMemoryDump* pmd) override;
+ base::trace_event::ProcessMemoryDump* pmd);
// Used for testing
size_t num_chunks() const {
diff --git a/chromium/gpu/command_buffer/client/ring_buffer.cc b/chromium/gpu/command_buffer/client/ring_buffer.cc
index ebbb1182aed..118dc57276e 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer.cc
+++ b/chromium/gpu/command_buffer/client/ring_buffer.cc
@@ -146,10 +146,9 @@ void RingBuffer::DiscardBlock(void* pointer) {
}
unsigned int RingBuffer::GetLargestFreeSizeNoWaiting() {
- unsigned int last_token_read = helper_->last_token_read();
while (!blocks_.empty()) {
Block& block = blocks_.front();
- if (block.token > last_token_read || block.state == IN_USE) break;
+ if (!helper_->HasTokenPassed(block.token) || block.state == IN_USE) break;
FreeOldestBlock();
}
if (free_offset_ == in_use_offset_) {
diff --git a/chromium/gpu/command_buffer/cmd_buffer_functions.txt b/chromium/gpu/command_buffer/cmd_buffer_functions.txt
index 06b4910a657..fd142a41131 100644
--- a/chromium/gpu/command_buffer/cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/cmd_buffer_functions.txt
@@ -280,8 +280,8 @@ GL_APICALL GLuint GL_APIENTRY glCreateGpuMemoryBufferImageCHROMIUM (GLsize
GL_APICALL void GL_APIENTRY glDescheduleUntilFinishedCHROMIUM (void);
GL_APICALL void GL_APIENTRY glGetTranslatedShaderSourceANGLE (GLidShader shader, GLsizeiNotNegative bufsize, GLsizeiOptional* length, char* source);
GL_APICALL void GL_APIENTRY glPostSubBufferCHROMIUM (GLint x, GLint y, GLint width, GLint height);
-GL_APICALL void GL_APIENTRY glCopyTextureCHROMIUM (GLenum source_id, GLenum dest_id, GLintTextureInternalFormat internalformat, GLenumPixelType dest_type, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
-GL_APICALL void GL_APIENTRY glCopySubTextureCHROMIUM (GLenum source_id, GLenum dest_id, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
+GL_APICALL void GL_APIENTRY glCopyTextureCHROMIUM (GLenum source_id, GLint source_level, GLenum dest_id, GLint dest_level, GLintTextureInternalFormat internalformat, GLenumPixelType dest_type, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
+GL_APICALL void GL_APIENTRY glCopySubTextureCHROMIUM (GLenum source_id, GLint source_level, GLenum dest_id, GLint dest_level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height, GLboolean unpack_flip_y, GLboolean unpack_premultiply_alpha, GLboolean unpack_unmultiply_alpha);
GL_APICALL void GL_APIENTRY glCompressedCopyTextureCHROMIUM (GLenum source_id, GLenum dest_id);
GL_APICALL void GL_APIENTRY glDrawArraysInstancedANGLE (GLenumDrawMode mode, GLint first, GLsizei count, GLsizei primcount);
GL_APICALL void GL_APIENTRY glDrawElementsInstancedANGLE (GLenumDrawMode mode, GLsizei count, GLenumIndexType type, const void* indices, GLsizei primcount);
@@ -297,7 +297,7 @@ GL_APICALL void GL_APIENTRY glBindTexImage2DCHROMIUM (GLenumTextureBindT
GL_APICALL void GL_APIENTRY glReleaseTexImage2DCHROMIUM (GLenumTextureBindTarget target, GLint imageId);
GL_APICALL void GL_APIENTRY glTraceBeginCHROMIUM (const char* category_name, const char* trace_name);
GL_APICALL void GL_APIENTRY glTraceEndCHROMIUM (void);
-GL_APICALL void GL_APIENTRY glDiscardFramebufferEXT (GLenum target, GLsizei count, const GLenum* attachments);
+GL_APICALL void GL_APIENTRY glDiscardFramebufferEXT (GLenumFramebufferTarget target, GLsizei count, const GLenum* attachments);
GL_APICALL void GL_APIENTRY glLoseContextCHROMIUM (GLenumResetStatus current, GLenumResetStatus other);
GL_APICALL GLuint64 GL_APIENTRY glInsertFenceSyncCHROMIUM (void);
GL_APICALL void GL_APIENTRY glGenSyncTokenCHROMIUM (GLuint64 fence_sync, GLbyte* sync_token);
@@ -361,5 +361,6 @@ GL_APICALL GLint GL_APIENTRY glGetFragDataIndexEXT (GLidProgram program,
// Extension CHROMIUM_stream_texture_matrix
GL_APICALL void GL_APIENTRY glUniformMatrix4fvStreamTextureMatrixCHROMIUM (GLintUniformLocation location, GLbooleanFalseOnly transpose, const GLfloat* transform);
+GL_APICALL void GL_APIENTRY glOverlayPromotionHintCHROMIUM (GLidBindTexture texture, GLboolean promotion_hint, GLint display_x, GLint display_y);
GL_APICALL void GL_APIENTRY glSwapBuffersWithDamageCHROMIUM (GLint x, GLint y, GLint width, GLint height);
diff --git a/chromium/gpu/command_buffer/common/buffer.cc b/chromium/gpu/command_buffer/common/buffer.cc
index 8b48b4a4565..13b606c3286 100644
--- a/chromium/gpu/command_buffer/common/buffer.cc
+++ b/chromium/gpu/command_buffer/common/buffer.cc
@@ -13,6 +13,11 @@
#include "base/strings/stringprintf.h"
namespace gpu {
+
+bool BufferBacking::is_shared() const {
+ return false;
+}
+
SharedMemoryBufferBacking::SharedMemoryBufferBacking(
std::unique_ptr<base::SharedMemory> shared_memory,
size_t size)
@@ -20,6 +25,10 @@ SharedMemoryBufferBacking::SharedMemoryBufferBacking(
SharedMemoryBufferBacking::~SharedMemoryBufferBacking() {}
+bool SharedMemoryBufferBacking::is_shared() const {
+ return true;
+}
+
void* SharedMemoryBufferBacking::GetMemory() const {
return shared_memory_->memory();
}
diff --git a/chromium/gpu/command_buffer/common/buffer.h b/chromium/gpu/command_buffer/common/buffer.h
index 180779317b6..d4de9175471 100644
--- a/chromium/gpu/command_buffer/common/buffer.h
+++ b/chromium/gpu/command_buffer/common/buffer.h
@@ -25,6 +25,7 @@ namespace gpu {
class GPU_EXPORT BufferBacking {
public:
virtual ~BufferBacking() {}
+ virtual bool is_shared() const;
virtual void* GetMemory() const = 0;
virtual size_t GetSize() const = 0;
};
@@ -34,6 +35,7 @@ class GPU_EXPORT SharedMemoryBufferBacking : public BufferBacking {
SharedMemoryBufferBacking(std::unique_ptr<base::SharedMemory> shared_memory,
size_t size);
~SharedMemoryBufferBacking() override;
+ bool is_shared() const override;
void* GetMemory() const override;
size_t GetSize() const override;
base::SharedMemory* shared_memory() { return shared_memory_.get(); }
diff --git a/chromium/gpu/command_buffer/common/capabilities.cc b/chromium/gpu/command_buffer/common/capabilities.cc
index 397962e7ba8..2098ac641c1 100644
--- a/chromium/gpu/command_buffer/common/capabilities.cc
+++ b/chromium/gpu/command_buffer/common/capabilities.cc
@@ -20,6 +20,8 @@ Capabilities::Capabilities()
max_vertex_attribs(0),
max_vertex_texture_image_units(0),
max_vertex_uniform_vectors(0),
+ max_viewport_width(0),
+ max_viewport_height(0),
num_compressed_texture_formats(0),
num_shader_binary_formats(0),
bind_generates_resource_chromium(0),
diff --git a/chromium/gpu/command_buffer/common/capabilities.h b/chromium/gpu/command_buffer/common/capabilities.h
index 30cedc8d3f0..e4ff124b874 100644
--- a/chromium/gpu/command_buffer/common/capabilities.h
+++ b/chromium/gpu/command_buffer/common/capabilities.h
@@ -78,6 +78,9 @@ struct GPU_EXPORT Capabilities {
int max_vertex_attribs;
int max_vertex_texture_image_units;
int max_vertex_uniform_vectors;
+ // MAX_VIEWPORT_DIMS[2]
+ int max_viewport_width;
+ int max_viewport_height;
int num_compressed_texture_formats;
int num_shader_binary_formats;
int bind_generates_resource_chromium;
diff --git a/chromium/gpu/command_buffer/common/cmd_buffer_common.cc b/chromium/gpu/command_buffer/common/cmd_buffer_common.cc
index c9f4b164eee..bdb08f32cb9 100644
--- a/chromium/gpu/command_buffer/common/cmd_buffer_common.cc
+++ b/chromium/gpu/command_buffer/common/cmd_buffer_common.cc
@@ -32,17 +32,6 @@ const char* GetCommandName(CommandId command_id) {
}
} // namespace cmd
-
-#if !defined(NACL_WIN64)
-// TODO(apatrick): this is a temporary optimization while skia is calling
-// RendererGLContext::MakeCurrent prior to every GL call. It saves returning 6
-// ints redundantly when only the error is needed for the CommandBufferProxy
-// implementation.
-error::Error CommandBuffer::GetLastError() {
- return GetLastState().error;
-}
-#endif
-
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/command_buffer.h b/chromium/gpu/command_buffer/common/command_buffer.h
index dc3f7f7850a..789c6f8965a 100644
--- a/chromium/gpu/command_buffer/common/command_buffer.h
+++ b/chromium/gpu/command_buffer/common/command_buffer.h
@@ -13,10 +13,6 @@
#include "gpu/command_buffer/common/constants.h"
#include "gpu/gpu_export.h"
-namespace base {
-class SharedMemory;
-}
-
namespace gpu {
// Common interface for CommandBuffer implementations.
@@ -26,6 +22,7 @@ class GPU_EXPORT CommandBuffer {
State()
: get_offset(0),
token(-1),
+ release_count(0),
error(error::kNoError),
context_lost_reason(error::kUnknown),
generation(0) {
@@ -41,6 +38,10 @@ class GPU_EXPORT CommandBuffer {
// embedded in the command buffer. The default token value is zero.
int32_t token;
+ // The fence sync release count. Incremented by InsertFenceSync commands.
+ // Used by the client to monitor sync token progress.
+ uint64_t release_count;
+
// Error status.
error::Error error;
@@ -78,13 +79,6 @@ class GPU_EXPORT CommandBuffer {
// Returns the last state without synchronizing with the service.
virtual State GetLastState() = 0;
- // Returns the last token without synchronizing with the service. Note that
- // while you could just call GetLastState().token, GetLastState needs to be
- // fast as it is called for every command where GetLastToken is only called
- // by code that needs to know the last token so it can be slower but more up
- // to date than GetLastState.
- virtual int32_t GetLastToken() = 0;
-
// The writer calls this to update its put offset. This ensures the reader
// sees the latest added commands, and will eventually process them. On the
// service side, commands are processed up to the given put_offset before
@@ -98,11 +92,11 @@ class GPU_EXPORT CommandBuffer {
// The writer calls this to wait until the current token is within a
// specific range, inclusive. Can return early if an error is generated.
- virtual void WaitForTokenInRange(int32_t start, int32_t end) = 0;
+ virtual State WaitForTokenInRange(int32_t start, int32_t end) = 0;
// The writer calls this to wait until the current get offset is within a
// specific range, inclusive. Can return early if an error is generated.
- virtual void WaitForGetOffsetInRange(int32_t start, int32_t end) = 0;
+ virtual State WaitForGetOffsetInRange(int32_t start, int32_t end) = 0;
// Sets the buffer commands are read from.
// Also resets the get and put offsets to 0.
@@ -116,17 +110,6 @@ class GPU_EXPORT CommandBuffer {
// Destroy a transfer buffer. The ID must be positive.
virtual void DestroyTransferBuffer(int32_t id) = 0;
-// The NaCl Win64 build only really needs the struct definitions above; having
-// GetLastError declared would mean we'd have to also define it, and pull more
-// of gpu in to the NaCl Win64 build.
-#if !defined(NACL_WIN64)
- // TODO(apatrick): this is a temporary optimization while skia is calling
- // RendererGLContext::MakeCurrent prior to every GL call. It saves returning 6
- // ints redundantly when only the error is needed for the CommandBufferProxy
- // implementation.
- virtual error::Error GetLastError();
-#endif
-
private:
DISALLOW_COPY_AND_ASSIGN(CommandBuffer);
};
diff --git a/chromium/gpu/command_buffer/common/command_buffer_mock.h b/chromium/gpu/command_buffer/common/command_buffer_mock.h
index 6ece2d051dc..ff17e50f462 100644
--- a/chromium/gpu/command_buffer/common/command_buffer_mock.h
+++ b/chromium/gpu/command_buffer/common/command_buffer_mock.h
@@ -12,10 +12,6 @@
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "testing/gmock/include/gmock/gmock.h"
-namespace base {
-class SharedMemory;
-}
-
namespace gpu {
// An NPObject that implements a shared memory command buffer and a synchronous
@@ -29,10 +25,11 @@ class MockCommandBuffer : public CommandBufferServiceBase {
MOCK_METHOD0(GetLastToken, int32_t());
MOCK_METHOD1(Flush, void(int32_t put_offset));
MOCK_METHOD1(OrderingBarrier, void(int32_t put_offset));
- MOCK_METHOD2(WaitForTokenInRange, void(int32_t start, int32_t end));
- MOCK_METHOD2(WaitForGetOffsetInRange, void(int32_t start, int32_t end));
+ MOCK_METHOD2(WaitForTokenInRange, State(int32_t start, int32_t end));
+ MOCK_METHOD2(WaitForGetOffsetInRange, State(int32_t start, int32_t end));
MOCK_METHOD1(SetGetBuffer, void(int32_t transfer_buffer_id));
MOCK_METHOD1(SetGetOffset, void(int32_t get_offset));
+ MOCK_METHOD1(SetReleaseCount, void(uint64_t release_count));
MOCK_METHOD2(CreateTransferBuffer,
scoped_refptr<gpu::Buffer>(size_t size, int32_t* id));
MOCK_METHOD1(DestroyTransferBuffer, void(int32_t id));
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index ff02094ec04..c701dd4de33 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -12445,7 +12445,9 @@ struct CopyTextureCHROMIUM {
void SetHeader() { header.SetCmd<ValueType>(); }
void Init(GLenum _source_id,
+ GLint _source_level,
GLenum _dest_id,
+ GLint _dest_level,
GLint _internalformat,
GLenum _dest_type,
GLboolean _unpack_flip_y,
@@ -12453,7 +12455,9 @@ struct CopyTextureCHROMIUM {
GLboolean _unpack_unmultiply_alpha) {
SetHeader();
source_id = _source_id;
+ source_level = _source_level;
dest_id = _dest_id;
+ dest_level = _dest_level;
internalformat = _internalformat;
dest_type = _dest_type;
unpack_flip_y = _unpack_flip_y;
@@ -12463,21 +12467,26 @@ struct CopyTextureCHROMIUM {
void* Set(void* cmd,
GLenum _source_id,
+ GLint _source_level,
GLenum _dest_id,
+ GLint _dest_level,
GLint _internalformat,
GLenum _dest_type,
GLboolean _unpack_flip_y,
GLboolean _unpack_premultiply_alpha,
GLboolean _unpack_unmultiply_alpha) {
static_cast<ValueType*>(cmd)->Init(
- _source_id, _dest_id, _internalformat, _dest_type, _unpack_flip_y,
- _unpack_premultiply_alpha, _unpack_unmultiply_alpha);
+ _source_id, _source_level, _dest_id, _dest_level, _internalformat,
+ _dest_type, _unpack_flip_y, _unpack_premultiply_alpha,
+ _unpack_unmultiply_alpha);
return NextCmdAddress<ValueType>(cmd);
}
gpu::CommandHeader header;
uint32_t source_id;
+ int32_t source_level;
uint32_t dest_id;
+ int32_t dest_level;
int32_t internalformat;
uint32_t dest_type;
uint32_t unpack_flip_y;
@@ -12485,26 +12494,30 @@ struct CopyTextureCHROMIUM {
uint32_t unpack_unmultiply_alpha;
};
-static_assert(sizeof(CopyTextureCHROMIUM) == 32,
- "size of CopyTextureCHROMIUM should be 32");
+static_assert(sizeof(CopyTextureCHROMIUM) == 40,
+ "size of CopyTextureCHROMIUM should be 40");
static_assert(offsetof(CopyTextureCHROMIUM, header) == 0,
"offset of CopyTextureCHROMIUM header should be 0");
static_assert(offsetof(CopyTextureCHROMIUM, source_id) == 4,
"offset of CopyTextureCHROMIUM source_id should be 4");
-static_assert(offsetof(CopyTextureCHROMIUM, dest_id) == 8,
- "offset of CopyTextureCHROMIUM dest_id should be 8");
-static_assert(offsetof(CopyTextureCHROMIUM, internalformat) == 12,
- "offset of CopyTextureCHROMIUM internalformat should be 12");
-static_assert(offsetof(CopyTextureCHROMIUM, dest_type) == 16,
- "offset of CopyTextureCHROMIUM dest_type should be 16");
-static_assert(offsetof(CopyTextureCHROMIUM, unpack_flip_y) == 20,
- "offset of CopyTextureCHROMIUM unpack_flip_y should be 20");
-static_assert(
- offsetof(CopyTextureCHROMIUM, unpack_premultiply_alpha) == 24,
- "offset of CopyTextureCHROMIUM unpack_premultiply_alpha should be 24");
-static_assert(
- offsetof(CopyTextureCHROMIUM, unpack_unmultiply_alpha) == 28,
- "offset of CopyTextureCHROMIUM unpack_unmultiply_alpha should be 28");
+static_assert(offsetof(CopyTextureCHROMIUM, source_level) == 8,
+ "offset of CopyTextureCHROMIUM source_level should be 8");
+static_assert(offsetof(CopyTextureCHROMIUM, dest_id) == 12,
+ "offset of CopyTextureCHROMIUM dest_id should be 12");
+static_assert(offsetof(CopyTextureCHROMIUM, dest_level) == 16,
+ "offset of CopyTextureCHROMIUM dest_level should be 16");
+static_assert(offsetof(CopyTextureCHROMIUM, internalformat) == 20,
+ "offset of CopyTextureCHROMIUM internalformat should be 20");
+static_assert(offsetof(CopyTextureCHROMIUM, dest_type) == 24,
+ "offset of CopyTextureCHROMIUM dest_type should be 24");
+static_assert(offsetof(CopyTextureCHROMIUM, unpack_flip_y) == 28,
+ "offset of CopyTextureCHROMIUM unpack_flip_y should be 28");
+static_assert(
+ offsetof(CopyTextureCHROMIUM, unpack_premultiply_alpha) == 32,
+ "offset of CopyTextureCHROMIUM unpack_premultiply_alpha should be 32");
+static_assert(
+ offsetof(CopyTextureCHROMIUM, unpack_unmultiply_alpha) == 36,
+ "offset of CopyTextureCHROMIUM unpack_unmultiply_alpha should be 36");
struct CopySubTextureCHROMIUM {
typedef CopySubTextureCHROMIUM ValueType;
@@ -12519,7 +12532,9 @@ struct CopySubTextureCHROMIUM {
void SetHeader() { header.SetCmd<ValueType>(); }
void Init(GLenum _source_id,
+ GLint _source_level,
GLenum _dest_id,
+ GLint _dest_level,
GLint _xoffset,
GLint _yoffset,
GLint _x,
@@ -12531,7 +12546,9 @@ struct CopySubTextureCHROMIUM {
GLboolean _unpack_unmultiply_alpha) {
SetHeader();
source_id = _source_id;
+ source_level = _source_level;
dest_id = _dest_id;
+ dest_level = _dest_level;
xoffset = _xoffset;
yoffset = _yoffset;
x = _x;
@@ -12545,7 +12562,9 @@ struct CopySubTextureCHROMIUM {
void* Set(void* cmd,
GLenum _source_id,
+ GLint _source_level,
GLenum _dest_id,
+ GLint _dest_level,
GLint _xoffset,
GLint _yoffset,
GLint _x,
@@ -12556,14 +12575,17 @@ struct CopySubTextureCHROMIUM {
GLboolean _unpack_premultiply_alpha,
GLboolean _unpack_unmultiply_alpha) {
static_cast<ValueType*>(cmd)->Init(
- _source_id, _dest_id, _xoffset, _yoffset, _x, _y, _width, _height,
- _unpack_flip_y, _unpack_premultiply_alpha, _unpack_unmultiply_alpha);
+ _source_id, _source_level, _dest_id, _dest_level, _xoffset, _yoffset,
+ _x, _y, _width, _height, _unpack_flip_y, _unpack_premultiply_alpha,
+ _unpack_unmultiply_alpha);
return NextCmdAddress<ValueType>(cmd);
}
gpu::CommandHeader header;
uint32_t source_id;
+ int32_t source_level;
uint32_t dest_id;
+ int32_t dest_level;
int32_t xoffset;
int32_t yoffset;
int32_t x;
@@ -12575,34 +12597,38 @@ struct CopySubTextureCHROMIUM {
uint32_t unpack_unmultiply_alpha;
};
-static_assert(sizeof(CopySubTextureCHROMIUM) == 48,
- "size of CopySubTextureCHROMIUM should be 48");
+static_assert(sizeof(CopySubTextureCHROMIUM) == 56,
+ "size of CopySubTextureCHROMIUM should be 56");
static_assert(offsetof(CopySubTextureCHROMIUM, header) == 0,
"offset of CopySubTextureCHROMIUM header should be 0");
static_assert(offsetof(CopySubTextureCHROMIUM, source_id) == 4,
"offset of CopySubTextureCHROMIUM source_id should be 4");
-static_assert(offsetof(CopySubTextureCHROMIUM, dest_id) == 8,
- "offset of CopySubTextureCHROMIUM dest_id should be 8");
-static_assert(offsetof(CopySubTextureCHROMIUM, xoffset) == 12,
- "offset of CopySubTextureCHROMIUM xoffset should be 12");
-static_assert(offsetof(CopySubTextureCHROMIUM, yoffset) == 16,
- "offset of CopySubTextureCHROMIUM yoffset should be 16");
-static_assert(offsetof(CopySubTextureCHROMIUM, x) == 20,
- "offset of CopySubTextureCHROMIUM x should be 20");
-static_assert(offsetof(CopySubTextureCHROMIUM, y) == 24,
- "offset of CopySubTextureCHROMIUM y should be 24");
-static_assert(offsetof(CopySubTextureCHROMIUM, width) == 28,
- "offset of CopySubTextureCHROMIUM width should be 28");
-static_assert(offsetof(CopySubTextureCHROMIUM, height) == 32,
- "offset of CopySubTextureCHROMIUM height should be 32");
-static_assert(offsetof(CopySubTextureCHROMIUM, unpack_flip_y) == 36,
- "offset of CopySubTextureCHROMIUM unpack_flip_y should be 36");
-static_assert(
- offsetof(CopySubTextureCHROMIUM, unpack_premultiply_alpha) == 40,
- "offset of CopySubTextureCHROMIUM unpack_premultiply_alpha should be 40");
-static_assert(
- offsetof(CopySubTextureCHROMIUM, unpack_unmultiply_alpha) == 44,
- "offset of CopySubTextureCHROMIUM unpack_unmultiply_alpha should be 44");
+static_assert(offsetof(CopySubTextureCHROMIUM, source_level) == 8,
+ "offset of CopySubTextureCHROMIUM source_level should be 8");
+static_assert(offsetof(CopySubTextureCHROMIUM, dest_id) == 12,
+ "offset of CopySubTextureCHROMIUM dest_id should be 12");
+static_assert(offsetof(CopySubTextureCHROMIUM, dest_level) == 16,
+ "offset of CopySubTextureCHROMIUM dest_level should be 16");
+static_assert(offsetof(CopySubTextureCHROMIUM, xoffset) == 20,
+ "offset of CopySubTextureCHROMIUM xoffset should be 20");
+static_assert(offsetof(CopySubTextureCHROMIUM, yoffset) == 24,
+ "offset of CopySubTextureCHROMIUM yoffset should be 24");
+static_assert(offsetof(CopySubTextureCHROMIUM, x) == 28,
+ "offset of CopySubTextureCHROMIUM x should be 28");
+static_assert(offsetof(CopySubTextureCHROMIUM, y) == 32,
+ "offset of CopySubTextureCHROMIUM y should be 32");
+static_assert(offsetof(CopySubTextureCHROMIUM, width) == 36,
+ "offset of CopySubTextureCHROMIUM width should be 36");
+static_assert(offsetof(CopySubTextureCHROMIUM, height) == 40,
+ "offset of CopySubTextureCHROMIUM height should be 40");
+static_assert(offsetof(CopySubTextureCHROMIUM, unpack_flip_y) == 44,
+ "offset of CopySubTextureCHROMIUM unpack_flip_y should be 44");
+static_assert(
+ offsetof(CopySubTextureCHROMIUM, unpack_premultiply_alpha) == 48,
+ "offset of CopySubTextureCHROMIUM unpack_premultiply_alpha should be 48");
+static_assert(
+ offsetof(CopySubTextureCHROMIUM, unpack_unmultiply_alpha) == 52,
+ "offset of CopySubTextureCHROMIUM unpack_unmultiply_alpha should be 52");
struct CompressedCopyTextureCHROMIUM {
typedef CompressedCopyTextureCHROMIUM ValueType;
@@ -15553,6 +15579,60 @@ static_assert(offsetof(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate,
"offset of UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate "
"transpose should be 8");
+struct OverlayPromotionHintCHROMIUM {
+ typedef OverlayPromotionHintCHROMIUM ValueType;
+ static const CommandId kCmdId = kOverlayPromotionHintCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _texture,
+ GLboolean _promotion_hint,
+ GLint _display_x,
+ GLint _display_y) {
+ SetHeader();
+ texture = _texture;
+ promotion_hint = _promotion_hint;
+ display_x = _display_x;
+ display_y = _display_y;
+ }
+
+ void* Set(void* cmd,
+ GLuint _texture,
+ GLboolean _promotion_hint,
+ GLint _display_x,
+ GLint _display_y) {
+ static_cast<ValueType*>(cmd)->Init(_texture, _promotion_hint, _display_x,
+ _display_y);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture;
+ uint32_t promotion_hint;
+ int32_t display_x;
+ int32_t display_y;
+};
+
+static_assert(sizeof(OverlayPromotionHintCHROMIUM) == 20,
+ "size of OverlayPromotionHintCHROMIUM should be 20");
+static_assert(offsetof(OverlayPromotionHintCHROMIUM, header) == 0,
+ "offset of OverlayPromotionHintCHROMIUM header should be 0");
+static_assert(offsetof(OverlayPromotionHintCHROMIUM, texture) == 4,
+ "offset of OverlayPromotionHintCHROMIUM texture should be 4");
+static_assert(
+ offsetof(OverlayPromotionHintCHROMIUM, promotion_hint) == 8,
+ "offset of OverlayPromotionHintCHROMIUM promotion_hint should be 8");
+static_assert(offsetof(OverlayPromotionHintCHROMIUM, display_x) == 12,
+ "offset of OverlayPromotionHintCHROMIUM display_x should be 12");
+static_assert(offsetof(OverlayPromotionHintCHROMIUM, display_y) == 16,
+ "offset of OverlayPromotionHintCHROMIUM display_y should be 16");
+
struct SwapBuffersWithDamageCHROMIUM {
typedef SwapBuffersWithDamageCHROMIUM ValueType;
static const CommandId kCmdId = kSwapBuffersWithDamageCHROMIUM;
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 68f1dce5995..7dacd2ff73c 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -4159,21 +4159,23 @@ TEST_F(GLES2FormatTest, PostSubBufferCHROMIUM) {
TEST_F(GLES2FormatTest, CopyTextureCHROMIUM) {
cmds::CopyTextureCHROMIUM& cmd = *GetBufferAs<cmds::CopyTextureCHROMIUM>();
- void* next_cmd =
- cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLenum>(12),
- static_cast<GLint>(13), static_cast<GLenum>(14),
- static_cast<GLboolean>(15), static_cast<GLboolean>(16),
- static_cast<GLboolean>(17));
+ void* next_cmd = cmd.Set(
+ &cmd, static_cast<GLenum>(11), static_cast<GLint>(12),
+ static_cast<GLenum>(13), static_cast<GLint>(14), static_cast<GLint>(15),
+ static_cast<GLenum>(16), static_cast<GLboolean>(17),
+ static_cast<GLboolean>(18), static_cast<GLboolean>(19));
EXPECT_EQ(static_cast<uint32_t>(cmds::CopyTextureCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
EXPECT_EQ(static_cast<GLenum>(11), cmd.source_id);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.dest_id);
- EXPECT_EQ(static_cast<GLint>(13), cmd.internalformat);
- EXPECT_EQ(static_cast<GLenum>(14), cmd.dest_type);
- EXPECT_EQ(static_cast<GLboolean>(15), cmd.unpack_flip_y);
- EXPECT_EQ(static_cast<GLboolean>(16), cmd.unpack_premultiply_alpha);
- EXPECT_EQ(static_cast<GLboolean>(17), cmd.unpack_unmultiply_alpha);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.source_level);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.dest_id);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.dest_level);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.internalformat);
+ EXPECT_EQ(static_cast<GLenum>(16), cmd.dest_type);
+ EXPECT_EQ(static_cast<GLboolean>(17), cmd.unpack_flip_y);
+ EXPECT_EQ(static_cast<GLboolean>(18), cmd.unpack_premultiply_alpha);
+ EXPECT_EQ(static_cast<GLboolean>(19), cmd.unpack_unmultiply_alpha);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -4181,25 +4183,28 @@ TEST_F(GLES2FormatTest, CopySubTextureCHROMIUM) {
cmds::CopySubTextureCHROMIUM& cmd =
*GetBufferAs<cmds::CopySubTextureCHROMIUM>();
void* next_cmd = cmd.Set(
- &cmd, static_cast<GLenum>(11), static_cast<GLenum>(12),
- static_cast<GLint>(13), static_cast<GLint>(14), static_cast<GLint>(15),
- static_cast<GLint>(16), static_cast<GLsizei>(17),
- static_cast<GLsizei>(18), static_cast<GLboolean>(19),
- static_cast<GLboolean>(20), static_cast<GLboolean>(21));
+ &cmd, static_cast<GLenum>(11), static_cast<GLint>(12),
+ static_cast<GLenum>(13), static_cast<GLint>(14), static_cast<GLint>(15),
+ static_cast<GLint>(16), static_cast<GLint>(17), static_cast<GLint>(18),
+ static_cast<GLsizei>(19), static_cast<GLsizei>(20),
+ static_cast<GLboolean>(21), static_cast<GLboolean>(22),
+ static_cast<GLboolean>(23));
EXPECT_EQ(static_cast<uint32_t>(cmds::CopySubTextureCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
EXPECT_EQ(static_cast<GLenum>(11), cmd.source_id);
- EXPECT_EQ(static_cast<GLenum>(12), cmd.dest_id);
- EXPECT_EQ(static_cast<GLint>(13), cmd.xoffset);
- EXPECT_EQ(static_cast<GLint>(14), cmd.yoffset);
- EXPECT_EQ(static_cast<GLint>(15), cmd.x);
- EXPECT_EQ(static_cast<GLint>(16), cmd.y);
- EXPECT_EQ(static_cast<GLsizei>(17), cmd.width);
- EXPECT_EQ(static_cast<GLsizei>(18), cmd.height);
- EXPECT_EQ(static_cast<GLboolean>(19), cmd.unpack_flip_y);
- EXPECT_EQ(static_cast<GLboolean>(20), cmd.unpack_premultiply_alpha);
- EXPECT_EQ(static_cast<GLboolean>(21), cmd.unpack_unmultiply_alpha);
+ EXPECT_EQ(static_cast<GLint>(12), cmd.source_level);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.dest_id);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.dest_level);
+ EXPECT_EQ(static_cast<GLint>(15), cmd.xoffset);
+ EXPECT_EQ(static_cast<GLint>(16), cmd.yoffset);
+ EXPECT_EQ(static_cast<GLint>(17), cmd.x);
+ EXPECT_EQ(static_cast<GLint>(18), cmd.y);
+ EXPECT_EQ(static_cast<GLsizei>(19), cmd.width);
+ EXPECT_EQ(static_cast<GLsizei>(20), cmd.height);
+ EXPECT_EQ(static_cast<GLboolean>(21), cmd.unpack_flip_y);
+ EXPECT_EQ(static_cast<GLboolean>(22), cmd.unpack_premultiply_alpha);
+ EXPECT_EQ(static_cast<GLboolean>(23), cmd.unpack_unmultiply_alpha);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -5214,6 +5219,22 @@ TEST_F(GLES2FormatTest, UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) {
next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
}
+TEST_F(GLES2FormatTest, OverlayPromotionHintCHROMIUM) {
+ cmds::OverlayPromotionHintCHROMIUM& cmd =
+ *GetBufferAs<cmds::OverlayPromotionHintCHROMIUM>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLboolean>(12),
+ static_cast<GLint>(13), static_cast<GLint>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::OverlayPromotionHintCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture);
+ EXPECT_EQ(static_cast<GLboolean>(12), cmd.promotion_hint);
+ EXPECT_EQ(static_cast<GLint>(13), cmd.display_x);
+ EXPECT_EQ(static_cast<GLint>(14), cmd.display_y);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, SwapBuffersWithDamageCHROMIUM) {
cmds::SwapBuffersWithDamageCHROMIUM& cmd =
*GetBufferAs<cmds::SwapBuffersWithDamageCHROMIUM>();
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index a69e5e9a8c6..3c92dc468c8 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -328,7 +328,8 @@
OP(BindFragDataLocationEXTBucket) /* 569 */ \
OP(GetFragDataIndexEXT) /* 570 */ \
OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 571 */ \
- OP(SwapBuffersWithDamageCHROMIUM) /* 572 */
+ OP(OverlayPromotionHintCHROMIUM) /* 572 */ \
+ OP(SwapBuffersWithDamageCHROMIUM) /* 573 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index 817efc12436..7d97aeb6c50 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -428,6 +428,14 @@ int GLES2Util::GLGetNumValuesReturned(int id) const {
return 1;
case GL_TEXTURE_MAX_ANISOTROPY_EXT:
return 1;
+ case GL_TEXTURE_SWIZZLE_R:
+ return 1;
+ case GL_TEXTURE_SWIZZLE_G:
+ return 1;
+ case GL_TEXTURE_SWIZZLE_B:
+ return 1;
+ case GL_TEXTURE_SWIZZLE_A:
+ return 1;
// -- glGetVertexAttrib
case GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING:
@@ -475,8 +483,39 @@ int GLES2Util::GLGetNumValuesReturned(int id) const {
namespace {
+// Return the number of bytes per element, based on the element type.
+int BytesPerElement(int type) {
+ switch (type) {
+ case GL_FLOAT_32_UNSIGNED_INT_24_8_REV:
+ return 8;
+ case GL_FLOAT:
+ case GL_UNSIGNED_INT_24_8_OES:
+ case GL_UNSIGNED_INT:
+ case GL_INT:
+ case GL_UNSIGNED_INT_2_10_10_10_REV:
+ case GL_UNSIGNED_INT_10F_11F_11F_REV:
+ case GL_UNSIGNED_INT_5_9_9_9_REV:
+ return 4;
+ case GL_HALF_FLOAT:
+ case GL_HALF_FLOAT_OES:
+ case GL_UNSIGNED_SHORT:
+ case GL_SHORT:
+ case GL_UNSIGNED_SHORT_5_6_5:
+ case GL_UNSIGNED_SHORT_4_4_4_4:
+ case GL_UNSIGNED_SHORT_5_5_5_1:
+ return 2;
+ case GL_UNSIGNED_BYTE:
+ case GL_BYTE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+} // anonymous namespace
+
// Return the number of elements per group of a specified format.
-int ElementsPerGroup(int format, int type) {
+int GLES2Util::ElementsPerGroup(int format, int type) {
switch (type) {
case GL_UNSIGNED_SHORT_5_6_5:
case GL_UNSIGNED_SHORT_4_4_4_4:
@@ -521,37 +560,6 @@ int ElementsPerGroup(int format, int type) {
}
}
-// Return the number of bytes per element, based on the element type.
-int BytesPerElement(int type) {
- switch (type) {
- case GL_FLOAT_32_UNSIGNED_INT_24_8_REV:
- return 8;
- case GL_FLOAT:
- case GL_UNSIGNED_INT_24_8_OES:
- case GL_UNSIGNED_INT:
- case GL_INT:
- case GL_UNSIGNED_INT_2_10_10_10_REV:
- case GL_UNSIGNED_INT_10F_11F_11F_REV:
- case GL_UNSIGNED_INT_5_9_9_9_REV:
- return 4;
- case GL_HALF_FLOAT:
- case GL_HALF_FLOAT_OES:
- case GL_UNSIGNED_SHORT:
- case GL_SHORT:
- case GL_UNSIGNED_SHORT_5_6_5:
- case GL_UNSIGNED_SHORT_4_4_4_4:
- case GL_UNSIGNED_SHORT_5_5_5_1:
- return 2;
- case GL_UNSIGNED_BYTE:
- case GL_BYTE:
- return 1;
- default:
- return 0;
- }
-}
-
-} // anonymous namespace
-
uint32_t GLES2Util::ComputeImageGroupSize(int format, int type) {
int bytes_per_element = BytesPerElement(type);
DCHECK_GE(8, bytes_per_element);
@@ -1951,4 +1959,3 @@ bool ContextCreationAttribHelper::Parse(const std::vector<int32_t>& attribs) {
} // namespace gles2
} // namespace gpu
-
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
index fcd1c60d842..8d0f16baba0 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
@@ -15,6 +15,7 @@
#include <string>
#include <vector>
+#include "base/logging.h"
#include "base/macros.h"
#include "base/numerics/safe_math.h"
#include "gpu/command_buffer/common/gles2_utils_export.h"
@@ -139,6 +140,7 @@ class GLES2_UTILS_EXPORT GLES2Util {
// function is called. If 0 is returned the id is invalid.
int GLGetNumValuesReturned(int id) const;
+ static int ElementsPerGroup(int format, int type);
// Computes the size of a single group of elements from a format and type pair
static uint32_t ComputeImageGroupSize(int format, int type);
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h
index 64a72fc24d5..1775e7d1679 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_autogen.h
@@ -96,6 +96,7 @@ static std::string GetStringTextureSizedColorRenderableInternalFormat(
uint32_t value);
static std::string GetStringTextureSizedTextureFilterableInternalFormat(
uint32_t value);
+static std::string GetStringTextureSrgbDecodeExt(uint32_t value);
static std::string GetStringTextureStencilRenderableInternalFormat(
uint32_t value);
static std::string GetStringTextureSwizzle(uint32_t value);
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
index 9d6b4a5c697..5a6325c80f8 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -4828,6 +4828,15 @@ std::string GLES2Util::GetStringTextureSizedTextureFilterableInternalFormat(
arraysize(string_table), value);
}
+std::string GLES2Util::GetStringTextureSrgbDecodeExt(uint32_t value) {
+ static const EnumToString string_table[] = {
+ {GL_DECODE_EXT, "GL_DECODE_EXT"},
+ {GL_SKIP_DECODE_EXT, "GL_SKIP_DECODE_EXT"},
+ };
+ return GLES2Util::GetQualifiedEnumString(string_table,
+ arraysize(string_table), value);
+}
+
std::string GLES2Util::GetStringTextureStencilRenderableInternalFormat(
uint32_t value) {
static const EnumToString string_table[] = {
diff --git a/chromium/gpu/command_buffer/common/unittest_main.cc b/chromium/gpu/command_buffer/common/unittest_main.cc
index 7fff7931f30..cba4c3e1791 100644
--- a/chromium/gpu/command_buffer/common/unittest_main.cc
+++ b/chromium/gpu/command_buffer/common/unittest_main.cc
@@ -10,10 +10,6 @@
#include "build/build_config.h"
#include "mojo/edk/embedder/embedder.h"
-#if defined(OS_ANDROID)
-#include "gpu/ipc/client/android/in_process_surface_texture_manager.h"
-#endif
-
namespace {
class GpuTestSuite : public base::TestSuite {
@@ -21,9 +17,6 @@ class GpuTestSuite : public base::TestSuite {
GpuTestSuite(int argc, char** argv);
~GpuTestSuite() override;
- protected:
- void Initialize() override;
-
private:
DISALLOW_COPY_AND_ASSIGN(GpuTestSuite);
};
@@ -33,14 +26,6 @@ GpuTestSuite::GpuTestSuite(int argc, char** argv)
GpuTestSuite::~GpuTestSuite() {}
-void GpuTestSuite::Initialize() {
- base::TestSuite::Initialize();
-#if defined(OS_ANDROID)
- gpu::SurfaceTextureManager::SetInstance(
- gpu::InProcessSurfaceTextureManager::GetInstance());
-#endif
-}
-
} // namespace
int main(int argc, char** argv) {
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index 4bb5d79470a..10f6a8e819b 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -166,6 +166,7 @@ target(link_target_type, "service_sources") {
"//gpu/command_buffer/common:gles2_utils",
"//gpu/config:config_sources",
"//gpu/ipc/common:surface_handle_type",
+ "//media:media_features",
"//third_party/angle:angle_image_util",
"//third_party/angle:commit_id",
"//third_party/angle:translator",
diff --git a/chromium/gpu/command_buffer/service/DEPS b/chromium/gpu/command_buffer/service/DEPS
new file mode 100644
index 00000000000..d1cd8cdbe07
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+media/media_features.h",
+]
diff --git a/chromium/gpu/command_buffer/service/client_service_map.h b/chromium/gpu/command_buffer/service/client_service_map.h
index b28f1f7b486..904a0951716 100644
--- a/chromium/gpu/command_buffer/service/client_service_map.h
+++ b/chromium/gpu/command_buffer/service/client_service_map.h
@@ -54,6 +54,24 @@ class ClientServiceMap {
return invalid_service_id();
}
+ bool GetClientID(ServiceType service_id, ClientType* client_id) const {
+ if (service_id == 0) {
+ if (client_id) {
+ *client_id = 0;
+ }
+ return true;
+ }
+ for (auto mapping : client_to_service_) {
+ if (mapping.second == service_id) {
+ if (client_id) {
+ *client_id = mapping.first;
+ }
+ return true;
+ }
+ }
+ return false;
+ }
+
ServiceType invalid_service_id() const {
return std::numeric_limits<ServiceType>::max();
}
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service.cc b/chromium/gpu/command_buffer/service/command_buffer_service.cc
index ec9facb2333..3e6a92c320b 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_service.cc
@@ -42,24 +42,24 @@ class MemoryBufferBacking : public BufferBacking {
CommandBufferService::CommandBufferService(
TransferBufferManagerInterface* transfer_buffer_manager)
: ring_buffer_id_(-1),
- shared_state_(NULL),
+ shared_state_(nullptr),
num_entries_(0),
get_offset_(0),
put_offset_(0),
transfer_buffer_manager_(transfer_buffer_manager),
token_(0),
+ release_count_(0),
generation_(0),
error_(error::kNoError),
- context_lost_reason_(error::kUnknown) {
-}
+ context_lost_reason_(error::kUnknown) {}
-CommandBufferService::~CommandBufferService() {
-}
+CommandBufferService::~CommandBufferService() {}
CommandBufferService::State CommandBufferService::GetLastState() {
State state;
state.get_offset = get_offset_;
state.token = token_;
+ state.release_count = release_count_;
state.error = error_;
state.context_lost_reason = context_lost_reason_;
state.generation = ++generation_;
@@ -67,10 +67,6 @@ CommandBufferService::State CommandBufferService::GetLastState() {
return state;
}
-int32_t CommandBufferService::GetLastToken() {
- return GetLastState().token;
-}
-
void CommandBufferService::UpdateState() {
if (shared_state_) {
CommandBufferService::State state = GetLastState();
@@ -78,12 +74,17 @@ void CommandBufferService::UpdateState() {
}
}
-void CommandBufferService::WaitForTokenInRange(int32_t start, int32_t end) {
+CommandBuffer::State CommandBufferService::WaitForTokenInRange(int32_t start,
+ int32_t end) {
DCHECK(error_ != error::kNoError || InRange(start, end, token_));
+ return GetLastState();
}
-void CommandBufferService::WaitForGetOffsetInRange(int32_t start, int32_t end) {
+CommandBuffer::State CommandBufferService::WaitForGetOffsetInRange(
+ int32_t start,
+ int32_t end) {
DCHECK(error_ != error::kNoError || InRange(start, end, get_offset_));
+ return GetLastState();
}
void CommandBufferService::Flush(int32_t put_offset) {
@@ -136,6 +137,12 @@ void CommandBufferService::SetGetOffset(int32_t get_offset) {
get_offset_ = get_offset;
}
+void CommandBufferService::SetReleaseCount(uint64_t release_count) {
+ DCHECK(release_count >= release_count_);
+ release_count_ = release_count;
+ UpdateState();
+}
+
scoped_refptr<Buffer> CommandBufferService::CreateTransferBuffer(size_t size,
int32_t* id) {
static int32_t next_id = 1;
@@ -181,7 +188,6 @@ scoped_refptr<Buffer> CommandBufferService::CreateTransferBufferWithId(
return GetTransferBuffer(id);
}
-
void CommandBufferService::SetToken(int32_t token) {
token_ = token;
UpdateState();
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service.h b/chromium/gpu/command_buffer/service/command_buffer_service.h
index 0d305bf2582..9e030db0456 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service.h
+++ b/chromium/gpu/command_buffer/service/command_buffer_service.h
@@ -24,6 +24,9 @@ class GPU_EXPORT CommandBufferServiceBase : public CommandBuffer {
// Sets the current get offset. This can be called from any thread.
virtual void SetGetOffset(int32_t get_offset) = 0;
+ // Set the release count for the last fence sync seen in the command stream.
+ virtual void SetReleaseCount(uint64_t release_count) = 0;
+
// Get the transfer buffer associated with an ID. Returns a null buffer for
// ID 0.
virtual scoped_refptr<gpu::Buffer> GetTransferBuffer(int32_t id) = 0;
@@ -54,17 +57,17 @@ class GPU_EXPORT CommandBufferService : public CommandBufferServiceBase {
// CommandBuffer implementation:
State GetLastState() override;
- int32_t GetLastToken() override;
void Flush(int32_t put_offset) override;
void OrderingBarrier(int32_t put_offset) override;
- void WaitForTokenInRange(int32_t start, int32_t end) override;
- void WaitForGetOffsetInRange(int32_t start, int32_t end) override;
+ State WaitForTokenInRange(int32_t start, int32_t end) override;
+ State WaitForGetOffsetInRange(int32_t start, int32_t end) override;
void SetGetBuffer(int32_t transfer_buffer_id) override;
scoped_refptr<Buffer> CreateTransferBuffer(size_t size, int32_t* id) override;
void DestroyTransferBuffer(int32_t id) override;
// CommandBufferServiceBase implementation:
void SetGetOffset(int32_t get_offset) override;
+ void SetReleaseCount(uint64_t release_count) override;
scoped_refptr<Buffer> GetTransferBuffer(int32_t id) override;
void SetToken(int32_t token) override;
void SetParseError(error::Error error) override;
@@ -110,6 +113,7 @@ class GPU_EXPORT CommandBufferService : public CommandBufferServiceBase {
base::Closure parse_error_callback_;
scoped_refptr<TransferBufferManagerInterface> transfer_buffer_manager_;
int32_t token_;
+ uint64_t release_count_;
uint32_t generation_;
error::Error error_;
error::ContextLostReason context_lost_reason_;
diff --git a/chromium/gpu/command_buffer/service/context_state.cc b/chromium/gpu/command_buffer/service/context_state.cc
index 94192a6129b..5aa24fbacd0 100644
--- a/chromium/gpu/command_buffer/service/context_state.cc
+++ b/chromium/gpu/command_buffer/service/context_state.cc
@@ -221,7 +221,6 @@ ContextState::ContextState(FeatureInfo* feature_info,
pack_reverse_row_order(false),
ignore_cached_state(false),
fbo_binding_for_scissor_workaround_dirty(false),
- framebuffer_srgb_(false),
feature_info_(feature_info),
error_state_(ErrorState::Create(error_state_client, logger)) {
Initialize();
@@ -505,15 +504,8 @@ void ContextState::RestoreState(const ContextState* prev_state) {
RestoreIndexedUniformBufferBindings(prev_state);
RestoreGlobalState(prev_state);
- if (!prev_state) {
- if (feature_info_->feature_flags().desktop_srgb_support) {
- framebuffer_srgb_ = false;
- glDisable(GL_FRAMEBUFFER_SRGB);
- }
- } else if (framebuffer_srgb_ != prev_state->framebuffer_srgb_) {
- // FRAMEBUFFER_SRGB will be restored lazily at render time.
- framebuffer_srgb_ = prev_state->framebuffer_srgb_;
- }
+ // FRAMEBUFFER_SRGB will be restored lazily at render time.
+ framebuffer_srgb_valid_ = false;
}
ErrorState* ContextState::GetErrorState() {
@@ -706,10 +698,11 @@ PixelStoreParams ContextState::GetUnpackParams(Dimension dimension) {
}
void ContextState::EnableDisableFramebufferSRGB(bool enable) {
- if (framebuffer_srgb_ == enable)
+ if (framebuffer_srgb_valid_ && framebuffer_srgb_ == enable)
return;
EnableDisable(GL_FRAMEBUFFER_SRGB, enable);
framebuffer_srgb_ = enable;
+ framebuffer_srgb_valid_ = true;
}
void ContextState::InitStateManual(const ContextState*) const {
diff --git a/chromium/gpu/command_buffer/service/context_state.h b/chromium/gpu/command_buffer/service/context_state.h
index d4505cf9217..6566974e620 100644
--- a/chromium/gpu/command_buffer/service/context_state.h
+++ b/chromium/gpu/command_buffer/service/context_state.h
@@ -26,7 +26,6 @@ class Buffer;
class ErrorState;
class ErrorStateClient;
class FeatureInfo;
-class Framebuffer;
class IndexedBufferBindingHost;
class Logger;
class Program;
@@ -355,7 +354,11 @@ struct GPU_EXPORT ContextState {
void InitStateManual(const ContextState* prev_state) const;
- bool framebuffer_srgb_;
+ // EnableDisableFramebufferSRGB is called at very high frequency. Cache the
+ // true value of FRAMEBUFFER_SRGB, if we know it, to elide some of these
+ // calls.
+ bool framebuffer_srgb_valid_ = false;
+ bool framebuffer_srgb_ = false;
// Generic vertex attrib base types: FLOAT, INT, or UINT.
// Each base type is encoded into 2 bits, the lowest 2 bits for location 0,
diff --git a/chromium/gpu/command_buffer/service/context_state_autogen.h b/chromium/gpu/command_buffer/service/context_state_autogen.h
index c0da2cd1d8b..38fae6e48d5 100644
--- a/chromium/gpu/command_buffer/service/context_state_autogen.h
+++ b/chromium/gpu/command_buffer/service/context_state_autogen.h
@@ -22,6 +22,8 @@ struct EnableFlags {
bool cached_depth_test;
bool dither;
bool cached_dither;
+ bool framebuffer_srgb_ext;
+ bool cached_framebuffer_srgb_ext;
bool polygon_offset_fill;
bool cached_polygon_offset_fill;
bool sample_alpha_to_coverage;
@@ -143,6 +145,12 @@ inline void SetDeviceCapabilityState(GLenum cap, bool enable) {
return;
enable_flags.cached_dither = enable;
break;
+ case GL_FRAMEBUFFER_SRGB_EXT:
+ if (enable_flags.cached_framebuffer_srgb_ext == enable &&
+ !ignore_cached_state)
+ return;
+ enable_flags.cached_framebuffer_srgb_ext = enable;
+ break;
case GL_POLYGON_OFFSET_FILL:
if (enable_flags.cached_polygon_offset_fill == enable &&
!ignore_cached_state)
diff --git a/chromium/gpu/command_buffer/service/context_state_impl_autogen.h b/chromium/gpu/command_buffer/service/context_state_impl_autogen.h
index 6bc4a82eaf2..e3c39006d75 100644
--- a/chromium/gpu/command_buffer/service/context_state_impl_autogen.h
+++ b/chromium/gpu/command_buffer/service/context_state_impl_autogen.h
@@ -21,6 +21,8 @@ ContextState::EnableFlags::EnableFlags()
cached_depth_test(false),
dither(true),
cached_dither(true),
+ framebuffer_srgb_ext(true),
+ cached_framebuffer_srgb_ext(true),
polygon_offset_fill(false),
cached_polygon_offset_fill(false),
sample_alpha_to_coverage(false),
@@ -433,6 +435,8 @@ bool ContextState::GetEnabled(GLenum cap) const {
return enable_flags.depth_test;
case GL_DITHER:
return enable_flags.dither;
+ case GL_FRAMEBUFFER_SRGB_EXT:
+ return enable_flags.framebuffer_srgb_ext;
case GL_POLYGON_OFFSET_FILL:
return enable_flags.polygon_offset_fill;
case GL_SAMPLE_ALPHA_TO_COVERAGE:
@@ -835,6 +839,12 @@ bool ContextState::GetStateAsGLint(GLenum pname,
params[0] = static_cast<GLint>(enable_flags.dither);
}
return true;
+ case GL_FRAMEBUFFER_SRGB_EXT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLint>(enable_flags.framebuffer_srgb_ext);
+ }
+ return true;
case GL_POLYGON_OFFSET_FILL:
*num_written = 1;
if (params) {
@@ -1269,6 +1279,12 @@ bool ContextState::GetStateAsGLfloat(GLenum pname,
params[0] = static_cast<GLfloat>(enable_flags.dither);
}
return true;
+ case GL_FRAMEBUFFER_SRGB_EXT:
+ *num_written = 1;
+ if (params) {
+ params[0] = static_cast<GLfloat>(enable_flags.framebuffer_srgb_ext);
+ }
+ return true;
case GL_POLYGON_OFFSET_FILL:
*num_written = 1;
if (params) {
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index 77000c5037e..45062e16dc8 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -572,11 +572,13 @@ void FeatureInfo::InitializeFeatures() {
validators_.index_type.AddValue(GL_UNSIGNED_INT);
}
+ bool has_srgb_framebuffer_support = false;
if (gl_version_info_->IsAtLeastGL(3, 2) ||
(gl_version_info_->IsAtLeastGL(2, 0) &&
(extensions.Contains("GL_EXT_framebuffer_sRGB") ||
extensions.Contains("GL_ARB_framebuffer_sRGB")))) {
feature_flags_.desktop_srgb_support = true;
+ has_srgb_framebuffer_support = true;
}
// With EXT_sRGB, unsized SRGB_EXT and SRGB_ALPHA_EXT are accepted by the
// <format> and <internalformat> parameter of TexImage2D. GLES3 adds support
@@ -589,6 +591,7 @@ void FeatureInfo::InitializeFeatures() {
extensions.Contains("GL_EXT_sRGB")) ||
feature_flags_.desktop_srgb_support) &&
IsWebGL1OrES2Context()) {
+ feature_flags_.ext_srgb = true;
AddExtensionString("GL_EXT_sRGB");
validators_.texture_internal_format.AddValue(GL_SRGB_EXT);
validators_.texture_internal_format.AddValue(GL_SRGB_ALPHA_EXT);
@@ -599,15 +602,50 @@ void FeatureInfo::InitializeFeatures() {
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT);
validators_.texture_unsized_internal_format.AddValue(GL_SRGB_EXT);
validators_.texture_unsized_internal_format.AddValue(GL_SRGB_ALPHA_EXT);
+ has_srgb_framebuffer_support = true;
+ }
+ if (gl_version_info_->is_es3)
+ has_srgb_framebuffer_support = true;
+
+ if (has_srgb_framebuffer_support && !IsWebGLContext()) {
+ // GL_FRAMEBUFFER_SRGB_EXT is exposed by the GLES extension
+ // GL_EXT_sRGB_write_control (which is not part of the core, even in GLES3),
+ // and the desktop extension GL_ARB_framebuffer_sRGB (part of the core in
+ // 3.0).
+ if (feature_flags_.desktop_srgb_support ||
+ extensions.Contains("GL_EXT_sRGB_write_control")) {
+ feature_flags_.ext_srgb_write_control = true;
+ AddExtensionString("GL_EXT_sRGB_write_control");
+ validators_.capability.AddValue(GL_FRAMEBUFFER_SRGB_EXT);
+ }
+ }
+
+ // The extension GL_EXT_texture_sRGB_decode is the same on desktop and GLES.
+ if (extensions.Contains("GL_EXT_texture_sRGB_decode") && !IsWebGLContext()) {
+ AddExtensionString("GL_EXT_texture_sRGB_decode");
+ validators_.texture_parameter.AddValue(GL_TEXTURE_SRGB_DECODE_EXT);
}
- // On desktop, GL_EXT_texture_sRGB is required regardless of GL version,
- // since the sRGB formats in OpenGL 3.0 Core do not support S3TC.
- // TODO(kainino): Support GL_EXT_texture_compression_s3tc_srgb once ratified.
- if ((gl_version_info_->is_es && extensions.Contains("GL_NV_sRGB_formats")) ||
- (!gl_version_info_->is_es &&
- extensions.Contains("GL_EXT_texture_sRGB") &&
- extensions.Contains("GL_EXT_texture_compression_s3tc"))) {
+ bool have_s3tc_srgb = false;
+ if (gl_version_info_->is_es) {
+ // On mobile, the only extension that supports S3TC+sRGB is NV_sRGB_formats.
+ // The draft extension EXT_texture_compression_s3tc_srgb also supports it
+ // and is used if available (e.g. if ANGLE exposes it).
+ have_s3tc_srgb = extensions.Contains("GL_NV_sRGB_formats") ||
+ extensions.Contains("GL_EXT_texture_compression_s3tc_srgb");
+ } else {
+ // On desktop, strictly-speaking, S3TC+sRGB is only available if both
+ // EXT_texture_sRGB and EXT_texture_compression_s3tc_srgb are available.
+ //
+ // However, on macOS, S3TC+sRGB is supported on OpenGL 4.1 with only
+ // EXT_texture_compression_s3tc_srgb, so we allow that as well.
+ if (extensions.Contains("GL_EXT_texture_sRGB") ||
+ gl_version_info_->IsAtLeastGL(4, 1)) {
+ have_s3tc_srgb = extensions.Contains("GL_EXT_texture_compression_s3tc");
+ }
+ }
+
+ if (have_s3tc_srgb) {
AddExtensionString("GL_EXT_texture_compression_s3tc_srgb");
validators_.compressed_texture_format.AddValue(
@@ -1000,7 +1038,11 @@ void FeatureInfo::InitializeFeatures() {
validators_.g_l_state.AddValue(GL_TEXTURE_BINDING_EXTERNAL_OES);
}
- if (extensions.Contains("GL_OES_compressed_ETC1_RGB8_texture")) {
+ // TODO(kainino): If we add a way to query whether ANGLE is exposing
+ // native support for ETC1 textures, require that here. Otherwise, we could
+ // co-opt the native-ETC2-support query discussed below.
+ if (extensions.Contains("GL_OES_compressed_ETC1_RGB8_texture") &&
+ !gl_version_info_->is_angle) {
AddExtensionString("GL_OES_compressed_ETC1_RGB8_texture");
feature_flags_.oes_compressed_etc1_rgb8_texture = true;
validators_.compressed_texture_format.AddValue(GL_ETC1_RGB8_OES);
@@ -1491,6 +1533,13 @@ void FeatureInfo::EnableES3Validators() {
validators_.texture_sized_texture_filterable_internal_format.AddValue(
GL_BGRA8_EXT);
}
+
+ if (!IsWebGLContext()) {
+ validators_.texture_parameter.AddValue(GL_TEXTURE_SWIZZLE_R);
+ validators_.texture_parameter.AddValue(GL_TEXTURE_SWIZZLE_G);
+ validators_.texture_parameter.AddValue(GL_TEXTURE_SWIZZLE_B);
+ validators_.texture_parameter.AddValue(GL_TEXTURE_SWIZZLE_A);
+ }
}
bool FeatureInfo::IsWebGLContext() const {
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index d72dfc17aa9..278616a4f84 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -98,6 +98,8 @@ class GPU_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool khr_debug = false;
bool chromium_bind_generates_resource = false;
bool angle_webgl_compatibility = false;
+ bool ext_srgb_write_control = false;
+ bool ext_srgb = false;
};
FeatureInfo();
diff --git a/chromium/gpu/command_buffer/service/feature_info_unittest.cc b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
index 49b8d197da6..902a9531acd 100644
--- a/chromium/gpu/command_buffer/service/feature_info_unittest.cc
+++ b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
@@ -246,8 +246,11 @@ TEST_P(FeatureInfoTest, InitializeNoExtensions) {
NOTREACHED();
break;
}
+ // Note that because GL_EXT_sRGB is a substring of GL_EXT_sRGB_write_control,
+ // which is not part of the ES3 core, we have to be careful to search for
+ // "GL_EXT_sRGB ", and append a space to the end of the extension string.
if (expect_ext_srgb) {
- EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_sRGB"));
+ EXPECT_THAT(info_->extensions() + " ", HasSubstr("GL_EXT_sRGB "));
EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_SRGB_EXT));
EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_SRGB_ALPHA_EXT));
EXPECT_TRUE(info_->validators()->texture_internal_format.IsValid(
@@ -259,7 +262,7 @@ TEST_P(FeatureInfoTest, InitializeNoExtensions) {
EXPECT_TRUE(info_->validators()->framebuffer_parameter.IsValid(
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT));
} else {
- EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_EXT_sRGB")));
+ EXPECT_THAT(info_->extensions() + " ", Not(HasSubstr("GL_EXT_sRGB ")));
EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_SRGB_EXT));
EXPECT_FALSE(info_->validators()->texture_format.IsValid(
GL_SRGB_ALPHA_EXT));
@@ -593,7 +596,7 @@ TEST_P(FeatureInfoTest, InitializeEXT_sRGB) {
SetupInitExpectations("GL_EXT_sRGB GL_OES_rgb8_rgba8");
if (GetContextType() == CONTEXT_TYPE_OPENGLES3) {
- EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_EXT_sRGB")));
+ EXPECT_THAT(info_->extensions() + " ", Not(HasSubstr("GL_EXT_sRGB ")));
EXPECT_FALSE(info_->validators()->texture_format.IsValid(GL_SRGB_EXT));
EXPECT_FALSE(
info_->validators()->texture_format.IsValid(GL_SRGB_ALPHA_EXT));
@@ -606,7 +609,7 @@ TEST_P(FeatureInfoTest, InitializeEXT_sRGB) {
EXPECT_FALSE(info_->validators()->framebuffer_parameter.IsValid(
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT));
} else {
- EXPECT_THAT(info_->extensions(), HasSubstr("GL_EXT_sRGB"));
+ EXPECT_THAT(info_->extensions() + " ", HasSubstr("GL_EXT_sRGB "));
EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_SRGB_EXT));
EXPECT_TRUE(info_->validators()->texture_format.IsValid(GL_SRGB_ALPHA_EXT));
EXPECT_TRUE(
@@ -922,7 +925,7 @@ TEST_P(FeatureInfoTest, Initialize_texture_floatGLES3) {
TEST_P(FeatureInfoTest, Initialize_sRGBGLES3) {
SetupInitExpectationsWithGLVersion("", "", "OpenGL ES 3.0");
- EXPECT_THAT(info_->extensions(), Not(HasSubstr("GL_EXT_sRGB")));
+ EXPECT_THAT(info_->extensions() + " ", Not(HasSubstr("GL_EXT_sRGB ")));
EXPECT_FALSE(info_->validators()->texture_format.IsValid(
GL_SRGB_EXT));
EXPECT_FALSE(info_->validators()->texture_format.IsValid(
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager.cc b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
index 0fd59c5fd6b..63912958a87 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
@@ -525,21 +525,33 @@ bool Framebuffer::ValidateAndAdjustDrawBuffers(
if ((mask & fragment_output_type_mask) != (mask & draw_buffer_type_mask_))
return false;
- if (mask != adjusted_draw_buffer_bound_mask_) {
- // This won't be reached in every draw/clear call - only when framebuffer
- // or program has changed.
- for (uint32_t ii = 0; ii < manager_->max_draw_buffers_; ++ii) {
- adjusted_draw_buffers_[ii] = draw_buffers_[ii];
- uint32_t shift_bits = ii * 2;
- uint32_t buffer_mask = 0x3 << shift_bits;
- if ((buffer_mask & mask) == 0u) {
- adjusted_draw_buffers_[ii] = GL_NONE;
- }
+ AdjustDrawBuffersImpl(mask);
+ return true;
+}
+
+void Framebuffer::AdjustDrawBuffers() {
+ AdjustDrawBuffersImpl(draw_buffer_bound_mask_);
+}
+
+void Framebuffer::AdjustDrawBuffersImpl(uint32_t desired_mask) {
+ if (desired_mask == adjusted_draw_buffer_bound_mask_) {
+ return;
+ }
+ // This won't be reached in every clear call - only when framebuffer has
+ // changed.
+ for (uint32_t ii = 0; ii < manager_->max_draw_buffers_; ++ii) {
+ adjusted_draw_buffers_[ii] = draw_buffers_[ii];
+ if (adjusted_draw_buffers_[ii] == GL_NONE) {
+ continue;
+ }
+ uint32_t shift_bits = ii * 2;
+ uint32_t buffer_mask = 0x3 << shift_bits;
+ if ((buffer_mask & desired_mask) == 0u) {
+ adjusted_draw_buffers_[ii] = GL_NONE;
}
- adjusted_draw_buffer_bound_mask_ = mask;
- glDrawBuffersARB(manager_->max_draw_buffers_, adjusted_draw_buffers_.get());
}
- return true;
+ adjusted_draw_buffer_bound_mask_ = desired_mask;
+ glDrawBuffersARB(manager_->max_draw_buffers_, adjusted_draw_buffers_.get());
}
bool Framebuffer::ContainsActiveIntegerAttachments() const {
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager.h b/chromium/gpu/command_buffer/service/framebuffer_manager.h
index eab5809d631..e4433cc045d 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager.h
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager.h
@@ -27,7 +27,6 @@ class FramebufferCompletenessCache;
class FramebufferManager;
class Renderbuffer;
class RenderbufferManager;
-class Texture;
class TextureRef;
class TextureManager;
@@ -195,6 +194,11 @@ class GPU_EXPORT Framebuffer : public base::RefCounted<Framebuffer> {
bool ValidateAndAdjustDrawBuffers(uint32_t fragment_output_type_mask,
uint32_t fragment_output_written_mask);
+ // Filter out the draw buffers that have no images attached but are not NONE
+ // through DrawBuffers, to be on the safe side.
+ // This is applied before a clear call.
+ void AdjustDrawBuffers();
+
bool ContainsActiveIntegerAttachments() const;
// Return true if any draw buffers has an alpha channel.
@@ -248,6 +252,9 @@ class GPU_EXPORT Framebuffer : public base::RefCounted<Framebuffer> {
// This call is only valid on a complete fbo.
void UpdateDrawBufferMasks();
+ // Helper for ValidateAndAdjustDrawBuffers() and AdjustDrawBuffers().
+ void AdjustDrawBuffersImpl(uint32_t desired_mask);
+
// The managers that owns this.
FramebufferManager* manager_;
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual.h b/chromium/gpu/command_buffer/service/gl_context_virtual.h
index 44e353ee4f5..c7187799316 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual.h
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual.h
@@ -14,7 +14,6 @@
#include "ui/gl/gl_context.h"
namespace gl {
-class GPUPreference;
class GPUTimingClient;
class GLShareGroup;
class GLSurface;
diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
index daaa158ed02..5ab4f751601 100644
--- a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
+++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
@@ -25,6 +25,10 @@ class GPU_EXPORT GLStreamTextureImage : public gl::GLImage {
void Flush() override {}
+ virtual void NotifyPromotionHint(bool promotion_hint,
+ int display_x,
+ int display_y) {}
+
protected:
~GLStreamTextureImage() override {}
diff --git a/chromium/gpu/command_buffer/service/gl_surface_mock.h b/chromium/gpu/command_buffer/service/gl_surface_mock.h
index ae2e72de620..10e81debbac 100644
--- a/chromium/gpu/command_buffer/service/gl_surface_mock.h
+++ b/chromium/gpu/command_buffer/service/gl_surface_mock.h
@@ -16,7 +16,7 @@ class GLSurfaceMock : public gl::GLSurface {
public:
GLSurfaceMock();
- MOCK_METHOD1(Initialize, bool(gl::GLSurface::Format format));
+ MOCK_METHOD1(Initialize, bool(gl::GLSurfaceFormat format));
MOCK_METHOD0(Destroy, void());
MOCK_METHOD3(Resize,
bool(const gfx::Size& size, float scale_factor, bool alpha));
@@ -34,7 +34,7 @@ class GLSurfaceMock : public gl::GLSurface {
MOCK_METHOD0(GetShareHandle, void*());
MOCK_METHOD0(GetDisplay, void*());
MOCK_METHOD0(GetConfig, void*());
- MOCK_METHOD0(GetFormat, GLSurface::Format());
+ MOCK_METHOD0(GetFormat, gl::GLSurfaceFormat());
protected:
virtual ~GLSurfaceMock();
diff --git a/chromium/gpu/command_buffer/service/gl_utils.cc b/chromium/gpu/command_buffer/service/gl_utils.cc
index a89a5fa5476..19683c3fbe6 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.cc
+++ b/chromium/gpu/command_buffer/service/gl_utils.cc
@@ -174,6 +174,12 @@ void PopulateNumericCapabilities(Capabilities* caps,
&caps->max_vertex_texture_image_units);
glGetIntegerv(GL_MAX_VERTEX_UNIFORM_VECTORS,
&caps->max_vertex_uniform_vectors);
+ {
+ GLint dims[2] = {0, 0};
+ glGetIntegerv(GL_MAX_VIEWPORT_DIMS, dims);
+ caps->max_viewport_width = dims[0];
+ caps->max_viewport_height = dims[1];
+ }
glGetIntegerv(GL_NUM_COMPRESSED_TEXTURE_FORMATS,
&caps->num_compressed_texture_formats);
glGetIntegerv(GL_NUM_SHADER_BINARY_FORMATS, &caps->num_shader_binary_formats);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
index a8d5e4354b2..ae6ad70ebbf 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
@@ -8,6 +8,7 @@
#include "gpu/command_buffer/service/framebuffer_manager.h"
#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/texture_manager.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/gl_version_info.h"
@@ -207,9 +208,10 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::Destroy() {
// Reference GL_INTEL_framebuffer_CMAA for details.
void ApplyFramebufferAttachmentCMAAINTELResourceManager::
ApplyFramebufferAttachmentCMAAINTEL(
- gles2::GLES2Decoder* decoder,
- gles2::Framebuffer* framebuffer,
- gles2::CopyTextureCHROMIUMResourceManager* copier) {
+ GLES2Decoder* decoder,
+ Framebuffer* framebuffer,
+ CopyTextureCHROMIUMResourceManager* copier,
+ TextureManager* texture_manager) {
DCHECK(decoder);
DCHECK(initialized_);
if (!framebuffer)
@@ -241,16 +243,23 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::
// CMAA internally expects GL_RGBA8 textures.
// Process using a GL_RGBA8 copy if this is not the case.
- bool do_copy = internal_format != GL_RGBA8;
+ DCHECK(attachment->object_name());
+ TextureRef* texture =
+ texture_manager->GetTexture(attachment->object_name());
+ const bool rgba_immutable =
+ texture->texture()->IsImmutable() &&
+ TextureManager::ExtractFormatFromStorageFormat(internal_format) ==
+ GL_RGBA;
+ const bool do_copy = !rgba_immutable;
// CMAA Effect
if (do_copy) {
ApplyCMAAEffectTexture(source_texture, rgba8_texture_, do_copy);
copier->DoCopySubTexture(
- decoder, GL_TEXTURE_2D, rgba8_texture_, GL_RGBA8, GL_TEXTURE_2D,
- source_texture, internal_format, 0, 0, 0, 0, width_, height_,
- width_, height_, width_, height_, false, false, false);
+ decoder, GL_TEXTURE_2D, rgba8_texture_, 0, GL_RGBA8, GL_TEXTURE_2D,
+ source_texture, 0, internal_format, 0, 0, 0, 0, width_, height_,
+ width_, height_, width_, height_, false, false, false, DIRECT_DRAW);
} else {
ApplyCMAAEffectTexture(source_texture, source_texture, do_copy);
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h
index cc3c6319da8..f9bad673293 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h
@@ -8,15 +8,12 @@
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/gpu_export.h"
-namespace {
-class CMAAEffect;
-}
-
namespace gpu {
namespace gles2 {
class CopyTextureCHROMIUMResourceManager;
class GLES2Decoder;
class Framebuffer;
+class TextureManager;
// This class encapsulates the resources required to implement the
// GL_INTEL_framebuffer_CMAA extension via shaders.
@@ -36,9 +33,10 @@ class GPU_EXPORT ApplyFramebufferAttachmentCMAAINTELResourceManager {
// Applies the algorithm to the color attachments of the currently bound draw
// framebuffer.
void ApplyFramebufferAttachmentCMAAINTEL(
- gles2::GLES2Decoder* decoder,
- gles2::Framebuffer* framebuffer,
- gles2::CopyTextureCHROMIUMResourceManager* copier);
+ GLES2Decoder* decoder,
+ Framebuffer* framebuffer,
+ CopyTextureCHROMIUMResourceManager* copier,
+ TextureManager* texture_manager);
private:
// Applies the CMAA algorithm to a texture.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
index e6e3035c9e8..6133380b63a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -10,6 +10,7 @@
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
+#include "gpu/command_buffer/service/texture_manager.h"
#include "ui/gl/gl_version_info.h"
namespace {
@@ -19,113 +20,311 @@ const GLfloat kIdentityMatrix[16] = {1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f};
-enum FragmentShaderId {
- FRAGMENT_SHADER_COPY_TEXTURE_2D,
- FRAGMENT_SHADER_COPY_TEXTURE_RECTANGLE_ARB,
- FRAGMENT_SHADER_COPY_TEXTURE_EXTERNAL_OES,
- FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_2D,
- FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_RECTANGLE_ARB,
- FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_EXTERNAL_OES,
- FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_2D,
- FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_RECTANGLE_ARB,
- FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_EXTERNAL_OES,
- NUM_FRAGMENT_SHADERS,
+enum {
+ SAMPLER_2D,
+ SAMPLER_RECTANGLE_ARB,
+ SAMPLER_EXTERNAL_OES,
+ NUM_SAMPLERS
};
+enum {
+ S_FORMAT_ALPHA,
+ S_FORMAT_LUMINANCE,
+ S_FORMAT_LUMINANCE_ALPHA,
+ S_FORMAT_RED,
+ S_FORMAT_RGB,
+ S_FORMAT_RGBA,
+ S_FORMAT_RGB8,
+ S_FORMAT_RGBA8,
+ S_FORMAT_BGRA_EXT,
+ S_FORMAT_BGRA8_EXT,
+ S_FORMAT_RGB_YCBCR_420V_CHROMIUM,
+ S_FORMAT_RGB_YCBCR_422_CHROMIUM,
+ S_FORMAT_COMPRESSED,
+ NUM_S_FORMAT
+};
+
+enum {
+ D_FORMAT_RGB,
+ D_FORMAT_RGBA,
+ D_FORMAT_RGB8,
+ D_FORMAT_RGBA8,
+ D_FORMAT_BGRA_EXT,
+ D_FORMAT_BGRA8_EXT,
+ D_FORMAT_SRGB_EXT,
+ D_FORMAT_SRGB_ALPHA_EXT,
+ D_FORMAT_R8,
+ D_FORMAT_R8UI,
+ D_FORMAT_RG8,
+ D_FORMAT_RG8UI,
+ D_FORMAT_SRGB8,
+ D_FORMAT_RGB565,
+ D_FORMAT_RGB8UI,
+ D_FORMAT_SRGB8_ALPHA8,
+ D_FORMAT_RGB5_A1,
+ D_FORMAT_RGBA4,
+ D_FORMAT_RGBA8UI,
+ D_FORMAT_RGB9_E5,
+ D_FORMAT_R16F,
+ D_FORMAT_R32F,
+ D_FORMAT_RG16F,
+ D_FORMAT_RG32F,
+ D_FORMAT_RGB16F,
+ D_FORMAT_RGB32F,
+ D_FORMAT_RGBA16F,
+ D_FORMAT_RGBA32F,
+ D_FORMAT_R11F_G11F_B10F,
+ NUM_D_FORMAT
+};
+
+const unsigned kNumVertexShaders = NUM_SAMPLERS;
+const unsigned kNumFragmentShaders =
+ 4 * NUM_SAMPLERS * NUM_S_FORMAT * NUM_D_FORMAT;
+
+typedef unsigned ShaderId;
+
+ShaderId GetVertexShaderId(GLenum target) {
+ ShaderId id = 0;
+ switch (target) {
+ case GL_TEXTURE_2D:
+ id = SAMPLER_2D;
+ break;
+ case GL_TEXTURE_RECTANGLE_ARB:
+ id = SAMPLER_RECTANGLE_ARB;
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ id = SAMPLER_EXTERNAL_OES;
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ return id;
+}
+
// Returns the correct fragment shader id to evaluate the copy operation for
// the premultiply alpha pixel store settings and target.
-FragmentShaderId GetFragmentShaderId(bool premultiply_alpha,
+ShaderId GetFragmentShaderId(bool premultiply_alpha,
bool unpremultiply_alpha,
- GLenum target) {
- enum {
- SAMPLER_2D,
- SAMPLER_RECTANGLE_ARB,
- SAMPLER_EXTERNAL_OES,
- NUM_SAMPLERS
- };
-
- // bit 0: premultiply alpha
- // bit 1: unpremultiply alpha
- static FragmentShaderId shader_ids[][NUM_SAMPLERS] = {
- {
- FRAGMENT_SHADER_COPY_TEXTURE_2D,
- FRAGMENT_SHADER_COPY_TEXTURE_RECTANGLE_ARB,
- FRAGMENT_SHADER_COPY_TEXTURE_EXTERNAL_OES,
- },
- {
- FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_2D,
- FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_RECTANGLE_ARB,
- FRAGMENT_SHADER_COPY_TEXTURE_PREMULTIPLY_ALPHA_EXTERNAL_OES,
- },
- {
- FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_2D,
- FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_RECTANGLE_ARB,
- FRAGMENT_SHADER_COPY_TEXTURE_UNPREMULTIPLY_ALPHA_EXTERNAL_OES,
- },
- {
- FRAGMENT_SHADER_COPY_TEXTURE_2D,
- FRAGMENT_SHADER_COPY_TEXTURE_RECTANGLE_ARB,
- FRAGMENT_SHADER_COPY_TEXTURE_EXTERNAL_OES,
- }};
-
- unsigned index = (premultiply_alpha ? (1 << 0) : 0) |
- (unpremultiply_alpha ? (1 << 1) : 0);
+ GLenum target,
+ GLenum source_format,
+ GLenum dest_format) {
+ unsigned alphaIndex = 0;
+ unsigned targetIndex = 0;
+ unsigned sourceFormatIndex = 0;
+ unsigned destFormatIndex = 0;
+
+ alphaIndex = (premultiply_alpha ? (1 << 0) : 0) |
+ (unpremultiply_alpha ? (1 << 1) : 0);
switch (target) {
case GL_TEXTURE_2D:
- return shader_ids[index][SAMPLER_2D];
+ targetIndex = SAMPLER_2D;
+ break;
case GL_TEXTURE_RECTANGLE_ARB:
- return shader_ids[index][SAMPLER_RECTANGLE_ARB];
+ targetIndex = SAMPLER_RECTANGLE_ARB;
+ break;
case GL_TEXTURE_EXTERNAL_OES:
- return shader_ids[index][SAMPLER_EXTERNAL_OES];
+ targetIndex = SAMPLER_EXTERNAL_OES;
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ switch (source_format) {
+ case GL_ALPHA:
+ sourceFormatIndex = S_FORMAT_ALPHA;
+ break;
+ case GL_LUMINANCE:
+ sourceFormatIndex = S_FORMAT_LUMINANCE;
+ break;
+ case GL_LUMINANCE_ALPHA:
+ sourceFormatIndex = S_FORMAT_LUMINANCE_ALPHA;
+ break;
+ case GL_RED:
+ sourceFormatIndex = S_FORMAT_RED;
+ break;
+ case GL_RGB:
+ sourceFormatIndex = S_FORMAT_RGB;
+ break;
+ case GL_RGBA:
+ sourceFormatIndex = S_FORMAT_RGBA;
+ break;
+ case GL_RGB8:
+ sourceFormatIndex = S_FORMAT_RGB8;
+ break;
+ case GL_RGBA8:
+ sourceFormatIndex = S_FORMAT_RGBA8;
+ break;
+ case GL_BGRA_EXT:
+ sourceFormatIndex = S_FORMAT_BGRA_EXT;
+ break;
+ case GL_BGRA8_EXT:
+ sourceFormatIndex = S_FORMAT_BGRA8_EXT;
+ break;
+ case GL_RGB_YCBCR_420V_CHROMIUM:
+ sourceFormatIndex = S_FORMAT_RGB_YCBCR_420V_CHROMIUM;
+ break;
+ case GL_RGB_YCBCR_422_CHROMIUM:
+ sourceFormatIndex = S_FORMAT_RGB_YCBCR_422_CHROMIUM;
+ break;
+ case GL_ATC_RGB_AMD:
+ case GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD:
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ case GL_ETC1_RGB8_OES:
+ sourceFormatIndex = S_FORMAT_COMPRESSED;
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ switch (dest_format) {
+ case GL_RGB:
+ destFormatIndex = D_FORMAT_RGB;
+ break;
+ case GL_RGBA:
+ destFormatIndex = D_FORMAT_RGBA;
+ break;
+ case GL_RGB8:
+ destFormatIndex = D_FORMAT_RGB8;
+ break;
+ case GL_RGBA8:
+ destFormatIndex = D_FORMAT_RGBA8;
+ break;
+ case GL_BGRA_EXT:
+ destFormatIndex = D_FORMAT_BGRA_EXT;
+ break;
+ case GL_BGRA8_EXT:
+ destFormatIndex = D_FORMAT_BGRA8_EXT;
+ break;
+ case GL_SRGB_EXT:
+ destFormatIndex = D_FORMAT_SRGB_EXT;
+ break;
+ case GL_SRGB_ALPHA_EXT:
+ destFormatIndex = D_FORMAT_SRGB_ALPHA_EXT;
+ break;
+ case GL_R8:
+ destFormatIndex = D_FORMAT_R8;
+ break;
+ case GL_R8UI:
+ destFormatIndex = D_FORMAT_R8UI;
+ break;
+ case GL_RG8:
+ destFormatIndex = D_FORMAT_RG8;
+ break;
+ case GL_RG8UI:
+ destFormatIndex = D_FORMAT_RG8UI;
+ break;
+ case GL_SRGB8:
+ destFormatIndex = D_FORMAT_SRGB8;
+ break;
+ case GL_RGB565:
+ destFormatIndex = D_FORMAT_RGB565;
+ break;
+ case GL_RGB8UI:
+ destFormatIndex = D_FORMAT_RGB8UI;
+ break;
+ case GL_SRGB8_ALPHA8:
+ destFormatIndex = D_FORMAT_SRGB8_ALPHA8;
+ break;
+ case GL_RGB5_A1:
+ destFormatIndex = D_FORMAT_RGB5_A1;
+ break;
+ case GL_RGBA4:
+ destFormatIndex = D_FORMAT_RGBA4;
+ break;
+ case GL_RGBA8UI:
+ destFormatIndex = D_FORMAT_RGBA8UI;
+ break;
+ case GL_RGB9_E5:
+ destFormatIndex = D_FORMAT_RGB9_E5;
+ break;
+ case GL_R16F:
+ destFormatIndex = D_FORMAT_R16F;
+ break;
+ case GL_R32F:
+ destFormatIndex = D_FORMAT_R32F;
+ break;
+ case GL_RG16F:
+ destFormatIndex = D_FORMAT_RG16F;
+ break;
+ case GL_RG32F:
+ destFormatIndex = D_FORMAT_RG32F;
+ break;
+ case GL_RGB16F:
+ destFormatIndex = D_FORMAT_RGB16F;
+ break;
+ case GL_RGB32F:
+ destFormatIndex = D_FORMAT_RGB32F;
+ break;
+ case GL_RGBA16F:
+ destFormatIndex = D_FORMAT_RGBA16F;
+ break;
+ case GL_RGBA32F:
+ destFormatIndex = D_FORMAT_RGBA32F;
+ break;
+ case GL_R11F_G11F_B10F:
+ destFormatIndex = D_FORMAT_R11F_G11F_B10F;
+ break;
default:
+ NOTREACHED();
break;
}
- NOTREACHED();
- return shader_ids[0][SAMPLER_2D];
+ return alphaIndex + targetIndex * 4 + sourceFormatIndex * 4 * NUM_SAMPLERS +
+ destFormatIndex * 4 * NUM_SAMPLERS * NUM_S_FORMAT;
}
-const char* kShaderPrecisionPreamble = "\
- #ifdef GL_ES\n\
- precision mediump float;\n\
- #define TexCoordPrecision mediump\n\
- #else\n\
- #define TexCoordPrecision\n\
- #endif\n";
+const char* kShaderPrecisionPreamble =
+ "#ifdef GL_ES\n"
+ "precision mediump float;\n"
+ "#define TexCoordPrecision mediump\n"
+ "#else\n"
+ "#define TexCoordPrecision\n"
+ "#endif\n";
-std::string GetVertexShaderSource(const gl::GLVersionInfo& gl_version_info) {
+std::string GetVertexShaderSource(const gl::GLVersionInfo& gl_version_info,
+ GLenum target) {
std::string source;
- // Preamble for core and compatibility mode.
- if (gl_version_info.is_desktop_core_profile) {
- source += std::string("\
- #version 150\n\
- #define ATTRIBUTE in\n\
- #define VARYING out\n");
+ if (gl_version_info.is_es || gl_version_info.IsLowerThanGL(3, 2)) {
+ if (gl_version_info.is_es3 && target != GL_TEXTURE_EXTERNAL_OES) {
+ source += "#version 300 es\n";
+ source +=
+ "#define ATTRIBUTE in\n"
+ "#define VARYING out\n";
+ } else {
+ source +=
+ "#define ATTRIBUTE attribute\n"
+ "#define VARYING varying\n";
+ }
} else {
- source += std::string("\
- #define ATTRIBUTE attribute\n\
- #define VARYING varying\n");
+ source += "#version 150\n";
+ source +=
+ "#define ATTRIBUTE in\n"
+ "#define VARYING out\n";
}
// Preamble for texture precision.
- source += std::string(kShaderPrecisionPreamble);
+ source += kShaderPrecisionPreamble;
// Main shader source.
- source += std::string("\
- uniform vec2 u_vertex_dest_mult;\n\
- uniform vec2 u_vertex_dest_add;\n\
- uniform vec2 u_vertex_source_mult;\n\
- uniform vec2 u_vertex_source_add;\n\
- ATTRIBUTE vec2 a_position;\n\
- VARYING TexCoordPrecision vec2 v_uv;\n\
- void main(void) {\n\
- gl_Position = vec4(0, 0, 0, 1);\n\
- gl_Position.xy = a_position.xy * u_vertex_dest_mult + \
- u_vertex_dest_add;\n\
- v_uv = a_position.xy * u_vertex_source_mult + u_vertex_source_add;\n\
- }\n");
+ source +=
+ "uniform vec2 u_vertex_dest_mult;\n"
+ "uniform vec2 u_vertex_dest_add;\n"
+ "uniform vec2 u_vertex_source_mult;\n"
+ "uniform vec2 u_vertex_source_add;\n"
+ "ATTRIBUTE vec2 a_position;\n"
+ "VARYING TexCoordPrecision vec2 v_uv;\n"
+ "void main(void) {\n"
+ " gl_Position = vec4(0, 0, 0, 1);\n"
+ " gl_Position.xy =\n"
+ " a_position.xy * u_vertex_dest_mult + u_vertex_dest_add;\n"
+ " v_uv = a_position.xy * u_vertex_source_mult + u_vertex_source_add;\n"
+ "}\n";
return source;
}
@@ -134,97 +333,175 @@ std::string GetFragmentShaderSource(const gl::GLVersionInfo& gl_version_info,
bool premultiply_alpha,
bool unpremultiply_alpha,
bool nv_egl_stream_consumer_external,
- GLenum target) {
+ GLenum target,
+ GLenum source_format,
+ GLenum dest_format) {
std::string source;
// Preamble for core and compatibility mode.
- if (gl_version_info.is_desktop_core_profile) {
- source += std::string("\
- #version 150\n\
- out vec4 frag_color;\n\
- #define VARYING in\n\
- #define FRAGCOLOR frag_color\n\
- #define TextureLookup texture\n");
+ if (gl_version_info.is_es || gl_version_info.IsLowerThanGL(3, 2)) {
+ if (gl_version_info.is_es3 && target != GL_TEXTURE_EXTERNAL_OES) {
+ source += "#version 300 es\n";
+ }
+ if (target == GL_TEXTURE_EXTERNAL_OES) {
+ source += "#extension GL_OES_EGL_image_external : enable\n";
+
+ if (nv_egl_stream_consumer_external) {
+ source += "#extension GL_NV_EGL_stream_consumer_external : enable\n";
+ }
+ }
} else {
+ source += "#version 150\n";
+ }
+
+ // Preamble for texture precision.
+ source += kShaderPrecisionPreamble;
+
+ if (gpu::gles2::GLES2Util::IsSignedIntegerFormat(dest_format)) {
+ source += "#define TextureType ivec4\n";
+ source += "#define ZERO 0\n";
+ source += "#define MAX_COLOR 255\n";
+ if (gpu::gles2::GLES2Util::IsSignedIntegerFormat(source_format))
+ source += "#define InnerScaleValue 1\n";
+ else if (gpu::gles2::GLES2Util::IsUnsignedIntegerFormat(source_format))
+ source += "#define InnerScaleValue 1u\n";
+ else
+ source += "#define InnerScaleValue 255.0\n";
+ source += "#define OuterScaleValue 1\n";
+ } else if (gpu::gles2::GLES2Util::IsUnsignedIntegerFormat(dest_format)) {
+ source += "#define TextureType uvec4\n";
+ source += "#define ZERO 0u\n";
+ source += "#define MAX_COLOR 255u\n";
+ if (gpu::gles2::GLES2Util::IsSignedIntegerFormat(source_format))
+ source += "#define InnerScaleValue 1\n";
+ else if (gpu::gles2::GLES2Util::IsUnsignedIntegerFormat(source_format))
+ source += "#define InnerScaleValue 1u\n";
+ else
+ source += "#define InnerScaleValue 255.0\n";
+ source += "#define OuterScaleValue 1u\n";
+ } else {
+ source += "#define TextureType vec4\n";
+ source += "#define ZERO 0.0\n";
+ source += "#define MAX_COLOR 1.0\n";
+ if (gpu::gles2::GLES2Util::IsSignedIntegerFormat(source_format)) {
+ source += "#define InnerScaleValue 1\n";
+ source += "#define OuterScaleValue (1.0 / 255.0)\n";
+ } else if (gpu::gles2::GLES2Util::IsUnsignedIntegerFormat(source_format)) {
+ source += "#define InnerScaleValue 1u\n";
+ source += "#define OuterScaleValue (1.0 / 255.0)\n";
+ } else {
+ source += "#define InnerScaleValue 1.0\n";
+ source += "#define OuterScaleValue 1.0\n";
+ }
+ }
+ if (gl_version_info.is_es2 || gl_version_info.IsLowerThanGL(3, 2) ||
+ target == GL_TEXTURE_EXTERNAL_OES) {
switch (target) {
case GL_TEXTURE_2D:
- source += std::string("#define TextureLookup texture2D\n");
- break;
- case GL_TEXTURE_RECTANGLE_ARB:
- source += std::string("#define TextureLookup texture2DRect\n");
- break;
case GL_TEXTURE_EXTERNAL_OES:
- source +=
- std::string("#extension GL_OES_EGL_image_external : enable\n");
-
- if (nv_egl_stream_consumer_external) {
- source += std::string(
- "#extension GL_NV_EGL_stream_consumer_external : enable\n");
- }
-
- source += std::string("#define TextureLookup texture2D\n");
+ source += "#define TextureLookup texture2D\n";
break;
default:
NOTREACHED();
break;
}
- source += std::string("\
- #define VARYING varying\n\
- #define FRAGCOLOR gl_FragColor\n");
+
+ source +=
+ "#define VARYING varying\n"
+ "#define FRAGCOLOR gl_FragColor\n";
+ } else {
+ source +=
+ "#define VARYING in\n"
+ "out TextureType frag_color;\n"
+ "#define FRAGCOLOR frag_color\n"
+ "#define TextureLookup texture\n";
}
// Preamble for sampler type.
switch (target) {
case GL_TEXTURE_2D:
- source += std::string("#define SamplerType sampler2D\n");
+ source += "#define SamplerType sampler2D\n";
break;
case GL_TEXTURE_RECTANGLE_ARB:
- source += std::string("#define SamplerType sampler2DRect\n");
+ source += "#define SamplerType sampler2DRect\n";
break;
case GL_TEXTURE_EXTERNAL_OES:
- source += std::string("#define SamplerType samplerExternalOES\n");
+ source += "#define SamplerType samplerExternalOES\n";
break;
default:
NOTREACHED();
break;
}
- // Preamble for texture precision.
- source += std::string(kShaderPrecisionPreamble);
-
// Main shader source.
- source += std::string("\
- uniform SamplerType u_sampler;\n\
- uniform mat4 u_tex_coord_transform;\n\
- VARYING TexCoordPrecision vec2 v_uv;\n\
- void main(void) {\n\
- TexCoordPrecision vec4 uv = u_tex_coord_transform * vec4(v_uv, 0, 1);\n\
- FRAGCOLOR = TextureLookup(u_sampler, uv.st);\n");
+ source +=
+ "uniform SamplerType u_sampler;\n"
+ "uniform mat4 u_tex_coord_transform;\n"
+ "VARYING TexCoordPrecision vec2 v_uv;\n"
+ "void main(void) {\n"
+ " TexCoordPrecision vec4 uv =\n"
+ " u_tex_coord_transform * vec4(v_uv, 0, 1);\n"
+ " vec4 color = TextureLookup(u_sampler, uv.st);\n"
+ " FRAGCOLOR = TextureType(color * InnerScaleValue) * OuterScaleValue;\n";
// Post-processing to premultiply or un-premultiply alpha.
- if (premultiply_alpha) {
- source += std::string(" FRAGCOLOR.rgb *= FRAGCOLOR.a;\n");
- }
- if (unpremultiply_alpha) {
- source += std::string("\
- if (FRAGCOLOR.a > 0.0)\n\
- FRAGCOLOR.rgb /= FRAGCOLOR.a;\n");
+ // Check dest format has alpha channel first.
+ if ((gpu::gles2::GLES2Util::GetChannelsForFormat(dest_format) & 0x0008) !=
+ 0) {
+ if (premultiply_alpha) {
+ source += " FRAGCOLOR.rgb *= FRAGCOLOR.a;\n";
+ source += " FRAGCOLOR.rgb /= MAX_COLOR;\n";
+ }
+ if (unpremultiply_alpha) {
+ source +=
+ " if (FRAGCOLOR.a > ZERO) {\n"
+ " FRAGCOLOR.rgb /= FRAGCOLOR.a;\n"
+ " FRAGCOLOR.rgb *= MAX_COLOR;\n"
+ " }\n";
+ }
}
// Main function end.
- source += std::string(" }\n");
+ source += "}\n";
return source;
}
+GLenum getIntermediateFormat(GLenum format) {
+ switch (format) {
+ case GL_LUMINANCE_ALPHA:
+ case GL_LUMINANCE:
+ case GL_ALPHA:
+ return GL_RGBA;
+ case GL_SRGB_EXT:
+ return GL_SRGB_ALPHA_EXT;
+ case GL_RGB16F:
+ return GL_RGBA16F;
+ case GL_RGB9_E5:
+ case GL_RGB32F:
+ return GL_RGBA32F;
+ case GL_SRGB8:
+ return GL_SRGB8_ALPHA8;
+ case GL_RGB8UI:
+ return GL_RGBA8UI;
+ default:
+ return format;
+ }
+}
+
void CompileShader(GLuint shader, const char* shader_source) {
glShaderSource(shader, 1, &shader_source, 0);
glCompileShader(shader);
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
GLint compile_status;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_status);
- if (GL_TRUE != compile_status)
- DLOG(ERROR) << "CopyTextureCHROMIUM: shader compilation failure.";
+ if (GL_TRUE != compile_status) {
+ char buffer[1024];
+ GLsizei length = 0;
+ glGetShaderInfoLog(shader, sizeof(buffer), &length, buffer);
+ std::string log(buffer, length);
+ DLOG(ERROR) << "CopyTextureCHROMIUM: shader compilation failure: " << log;
+ }
#endif
}
@@ -235,19 +512,22 @@ void DeleteShader(GLuint shader) {
bool BindFramebufferTexture2D(GLenum target,
GLuint texture_id,
+ GLint level,
GLuint framebuffer) {
DCHECK(target == GL_TEXTURE_2D || target == GL_TEXTURE_RECTANGLE_ARB);
glActiveTexture(GL_TEXTURE0);
glBindTexture(target, texture_id);
// NVidia drivers require texture settings to be a certain way
// or they won't report FRAMEBUFFER_COMPLETE.
+ if (level > 0)
+ glTexParameteri(target, GL_TEXTURE_BASE_LEVEL, level);
glTexParameterf(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, framebuffer);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target,
- texture_id, 0);
+ texture_id, level);
#ifndef NDEBUG
GLenum fb_status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
@@ -262,22 +542,26 @@ bool BindFramebufferTexture2D(GLenum target,
void DoCopyTexImage2D(const gpu::gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
GLenum dest_internal_format,
GLsizei width,
GLsizei height,
GLuint framebuffer) {
DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), source_target);
DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), dest_target);
- if (BindFramebufferTexture2D(source_target, source_id, framebuffer)) {
+ DCHECK(source_level == 0 || decoder->GetFeatureInfo()->IsES3Capable());
+ if (BindFramebufferTexture2D(source_target, source_id, source_level,
+ framebuffer)) {
glBindTexture(dest_target, dest_id);
glTexParameterf(dest_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(dest_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(dest_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(dest_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glCopyTexImage2D(dest_target, 0 /* level */, dest_internal_format,
- 0 /* x */, 0 /* y */, width, height, 0 /* border */);
+ glCopyTexImage2D(dest_target, dest_level, dest_internal_format, 0 /* x */,
+ 0 /* y */, width, height, 0 /* border */);
}
decoder->RestoreTextureState(source_id);
@@ -290,8 +574,10 @@ void DoCopyTexImage2D(const gpu::gles2::GLES2Decoder* decoder,
void DoCopyTexSubImage2D(const gpu::gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint source_x,
@@ -302,14 +588,16 @@ void DoCopyTexSubImage2D(const gpu::gles2::GLES2Decoder* decoder,
DCHECK(source_target == GL_TEXTURE_2D ||
source_target == GL_TEXTURE_RECTANGLE_ARB);
DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), dest_target);
- if (BindFramebufferTexture2D(source_target, source_id, framebuffer)) {
+ DCHECK(source_level == 0 || decoder->GetFeatureInfo()->IsES3Capable());
+ if (BindFramebufferTexture2D(source_target, source_id, source_level,
+ framebuffer)) {
glBindTexture(dest_target, dest_id);
glTexParameterf(dest_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(dest_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(dest_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(dest_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- glCopyTexSubImage2D(dest_target, 0 /* level */, xoffset, yoffset,
- source_x, source_y, source_width, source_height);
+ glCopyTexSubImage2D(dest_target, dest_level, xoffset, yoffset, source_x,
+ source_y, source_width, source_height);
}
decoder->RestoreTextureState(source_id);
@@ -327,8 +615,8 @@ namespace gles2 {
CopyTextureCHROMIUMResourceManager::CopyTextureCHROMIUMResourceManager()
: initialized_(false),
nv_egl_stream_consumer_external_(false),
- vertex_shader_(0u),
- fragment_shaders_(NUM_FRAGMENT_SHADERS, 0u),
+ vertex_shaders_(kNumVertexShaders, 0u),
+ fragment_shaders_(kNumFragmentShaders, 0u),
vertex_array_object_id_(0u),
buffer_id_(0u),
framebuffer_(0u) {}
@@ -393,7 +681,8 @@ void CopyTextureCHROMIUMResourceManager::Destroy() {
glDeleteFramebuffersEXT(1, &framebuffer_);
framebuffer_ = 0;
- DeleteShader(vertex_shader_);
+ std::for_each(
+ vertex_shaders_.begin(), vertex_shaders_.end(), DeleteShader);
std::for_each(
fragment_shaders_.begin(), fragment_shaders_.end(), DeleteShader);
@@ -411,54 +700,74 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTexture(
const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
GLenum source_internal_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
GLenum dest_internal_format,
GLsizei width,
GLsizei height,
bool flip_y,
bool premultiply_alpha,
- bool unpremultiply_alpha) {
+ bool unpremultiply_alpha,
+ CopyTextureMethod method) {
bool premultiply_alpha_change = premultiply_alpha ^ unpremultiply_alpha;
- // GL_INVALID_OPERATION is generated if the currently bound framebuffer's
- // format does not contain a superset of the components required by the base
- // format of internalformat.
- // https://www.khronos.org/opengles/sdk/docs/man/xhtml/glCopyTexImage2D.xml
- bool source_format_contain_superset_of_dest_format =
- (source_internal_format == dest_internal_format &&
- source_internal_format != GL_BGRA_EXT) ||
- (source_internal_format == GL_RGBA && dest_internal_format == GL_RGB);
+
// GL_TEXTURE_RECTANGLE_ARB on FBO is supported by OpenGL, not GLES2,
// so restrict this to GL_TEXTURE_2D.
if (source_target == GL_TEXTURE_2D && dest_target == GL_TEXTURE_2D &&
- !flip_y && !premultiply_alpha_change &&
- source_format_contain_superset_of_dest_format) {
- DoCopyTexImage2D(decoder,
- source_target,
- source_id,
- dest_target,
- dest_id,
- dest_internal_format,
- width,
- height,
- framebuffer_);
+ !flip_y && !premultiply_alpha_change && method == DIRECT_COPY) {
+ DoCopyTexImage2D(decoder, source_target, source_id, source_level,
+ dest_target, dest_id, dest_level, dest_internal_format,
+ width, height, framebuffer_);
return;
}
+ GLuint dest_texture = dest_id;
+ GLuint intermediate_texture = 0;
+ GLint original_dest_level = dest_level;
+ if (method == DRAW_AND_COPY) {
+ GLenum adjusted_internal_format =
+ getIntermediateFormat(dest_internal_format);
+ glGenTextures(1, &intermediate_texture);
+ glBindTexture(dest_target, intermediate_texture);
+ GLenum format = TextureManager::ExtractFormatFromStorageFormat(
+ adjusted_internal_format);
+ GLenum type =
+ TextureManager::ExtractTypeFromStorageFormat(adjusted_internal_format);
+
+ glTexImage2D(dest_target, 0, adjusted_internal_format, width, height, 0,
+ format, type, nullptr);
+ dest_texture = intermediate_texture;
+ dest_level = 0;
+ dest_internal_format = adjusted_internal_format;
+ }
// Use kIdentityMatrix if no transform passed in.
- DoCopyTextureWithTransform(decoder, source_target, source_id, dest_target,
- dest_id, width, height, flip_y, premultiply_alpha,
- unpremultiply_alpha, kIdentityMatrix);
+ DoCopyTextureWithTransform(
+ decoder, source_target, source_id, source_level, source_internal_format,
+ dest_target, dest_texture, dest_level, dest_internal_format, width,
+ height, flip_y, premultiply_alpha, unpremultiply_alpha, kIdentityMatrix);
+
+ if (method == DRAW_AND_COPY) {
+ source_level = 0;
+ dest_level = original_dest_level;
+ DoCopyTexImage2D(decoder, dest_target, intermediate_texture, source_level,
+ dest_target, dest_id, dest_level, dest_internal_format,
+ width, height, framebuffer_);
+ glDeleteTextures(1, &intermediate_texture);
+ }
}
void CopyTextureCHROMIUMResourceManager::DoCopySubTexture(
const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
GLenum source_internal_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
GLenum dest_internal_format,
GLint xoffset,
GLint yoffset,
@@ -472,7 +781,8 @@ void CopyTextureCHROMIUMResourceManager::DoCopySubTexture(
GLsizei source_height,
bool flip_y,
bool premultiply_alpha,
- bool unpremultiply_alpha) {
+ bool unpremultiply_alpha,
+ CopyTextureMethod method) {
bool use_gl_copy_tex_sub_image_2d = true;
#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
// glDrawArrays is faster than glCopyTexSubImage2D on IA Mesa driver,
@@ -482,38 +792,70 @@ void CopyTextureCHROMIUMResourceManager::DoCopySubTexture(
use_gl_copy_tex_sub_image_2d = false;
#endif
bool premultiply_alpha_change = premultiply_alpha ^ unpremultiply_alpha;
- // GL_INVALID_OPERATION is generated if the currently bound framebuffer's
- // format does not contain a superset of the components required by the base
- // format of internalformat.
- // https://www.khronos.org/opengles/sdk/docs/man/xhtml/glCopyTexImage2D.xml
- bool source_format_contain_superset_of_dest_format =
- (source_internal_format == dest_internal_format &&
- source_internal_format != GL_BGRA_EXT) ||
- (source_internal_format == GL_RGBA && dest_internal_format == GL_RGB);
+
// GL_TEXTURE_RECTANGLE_ARB on FBO is supported by OpenGL, not GLES2,
// so restrict this to GL_TEXTURE_2D.
if (use_gl_copy_tex_sub_image_2d && source_target == GL_TEXTURE_2D &&
dest_target == GL_TEXTURE_2D && !flip_y && !premultiply_alpha_change &&
- source_format_contain_superset_of_dest_format) {
- DoCopyTexSubImage2D(decoder, source_target, source_id, dest_target, dest_id,
- xoffset, yoffset, x, y, width, height, framebuffer_);
+ method == DIRECT_COPY) {
+ DoCopyTexSubImage2D(decoder, source_target, source_id, source_level,
+ dest_target, dest_id, dest_level, xoffset, yoffset, x,
+ y, width, height, framebuffer_);
return;
}
+ GLint dest_xoffset = xoffset;
+ GLint dest_yoffset = yoffset;
+ GLuint dest_texture = dest_id;
+ GLint original_dest_level = dest_level;
+ GLuint intermediate_texture = 0;
+ if (method == DRAW_AND_COPY) {
+ GLenum adjusted_internal_format =
+ getIntermediateFormat(dest_internal_format);
+ glGenTextures(1, &intermediate_texture);
+ glBindTexture(dest_target, intermediate_texture);
+ GLenum format = TextureManager::ExtractFormatFromStorageFormat(
+ adjusted_internal_format);
+ GLenum type =
+ TextureManager::ExtractTypeFromStorageFormat(adjusted_internal_format);
+
+ glTexImage2D(dest_target, 0, adjusted_internal_format, width, height, 0,
+ format, type, nullptr);
+ dest_texture = intermediate_texture;
+ dest_level = 0;
+ dest_internal_format = adjusted_internal_format;
+ dest_xoffset = 0;
+ dest_yoffset = 0;
+ dest_width = width;
+ dest_height = height;
+ }
+
DoCopySubTextureWithTransform(
- decoder, source_target, source_id, source_internal_format, dest_target,
- dest_id, dest_internal_format, xoffset, yoffset, x, y, width, height,
- dest_width, dest_height, source_width, source_height, flip_y,
- premultiply_alpha, unpremultiply_alpha, kIdentityMatrix);
+ decoder, source_target, source_id, source_level, source_internal_format,
+ dest_target, dest_texture, dest_level, dest_internal_format, dest_xoffset,
+ dest_yoffset, x, y, width, height, dest_width, dest_height, source_width,
+ source_height, flip_y, premultiply_alpha, unpremultiply_alpha,
+ kIdentityMatrix);
+
+ if (method == DRAW_AND_COPY) {
+ source_level = 0;
+ dest_level = original_dest_level;
+ DoCopyTexSubImage2D(decoder, dest_target, intermediate_texture,
+ source_level, dest_target, dest_id, dest_level, xoffset,
+ yoffset, 0, 0, width, height, framebuffer_);
+ glDeleteTextures(1, &intermediate_texture);
+ }
}
void CopyTextureCHROMIUMResourceManager::DoCopySubTextureWithTransform(
const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
GLenum source_internal_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
GLenum dest_internal_format,
GLint xoffset,
GLint yoffset,
@@ -529,18 +871,23 @@ void CopyTextureCHROMIUMResourceManager::DoCopySubTextureWithTransform(
bool premultiply_alpha,
bool unpremultiply_alpha,
const GLfloat transform_matrix[16]) {
- DoCopyTextureInternal(decoder, source_target, source_id, dest_target, dest_id,
- xoffset, yoffset, x, y, width, height, dest_width, dest_height,
- source_width, source_height, flip_y, premultiply_alpha,
- unpremultiply_alpha, transform_matrix);
+ DoCopyTextureInternal(
+ decoder, source_target, source_id, source_level, source_internal_format,
+ dest_target, dest_id, dest_level, dest_internal_format, xoffset, yoffset,
+ x, y, width, height, dest_width, dest_height, source_width, source_height,
+ flip_y, premultiply_alpha, unpremultiply_alpha, transform_matrix);
}
void CopyTextureCHROMIUMResourceManager::DoCopyTextureWithTransform(
const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
+ GLenum source_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_format,
GLsizei width,
GLsizei height,
bool flip_y,
@@ -549,9 +896,10 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureWithTransform(
const GLfloat transform_matrix[16]) {
GLsizei dest_width = width;
GLsizei dest_height = height;
- DoCopyTextureInternal(decoder, source_target, source_id, dest_target, dest_id,
- 0, 0, 0, 0, width, height, dest_width, dest_height,
- width, height, flip_y, premultiply_alpha,
+ DoCopyTextureInternal(decoder, source_target, source_id, source_level,
+ source_format, dest_target, dest_id, dest_level,
+ dest_format, 0, 0, 0, 0, width, height, dest_width,
+ dest_height, width, height, flip_y, premultiply_alpha,
unpremultiply_alpha, transform_matrix);
}
@@ -559,8 +907,12 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
+ GLenum source_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_format,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -580,6 +932,8 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
source_target == GL_TEXTURE_EXTERNAL_OES);
DCHECK(dest_target == GL_TEXTURE_2D ||
dest_target == GL_TEXTURE_RECTANGLE_ARB);
+ DCHECK_GE(source_level, 0);
+ DCHECK_GE(dest_level, 0);
DCHECK_GE(xoffset, 0);
DCHECK_LE(xoffset + width, dest_width);
DCHECK_GE(yoffset, 0);
@@ -607,8 +961,11 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
glVertexAttribPointer(kVertexPositionAttrib, 2, GL_FLOAT, GL_FALSE, 0, 0);
}
- FragmentShaderId fragment_shader_id = GetFragmentShaderId(
- premultiply_alpha, unpremultiply_alpha, source_target);
+ ShaderId vertex_shader_id = GetVertexShaderId(source_target);
+ DCHECK_LT(static_cast<size_t>(vertex_shader_id), vertex_shaders_.size());
+ ShaderId fragment_shader_id = GetFragmentShaderId(
+ premultiply_alpha, unpremultiply_alpha, source_target,
+ source_format, dest_format);
DCHECK_LT(static_cast<size_t>(fragment_shader_id), fragment_shaders_.size());
ProgramMapKey key(fragment_shader_id);
@@ -616,18 +973,21 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
// Create program if necessary.
if (!info->program) {
info->program = glCreateProgram();
- if (!vertex_shader_) {
- vertex_shader_ = glCreateShader(GL_VERTEX_SHADER);
- std::string source = GetVertexShaderSource(gl_version_info);
- CompileShader(vertex_shader_, source.c_str());
+ GLuint* vertex_shader = &vertex_shaders_[vertex_shader_id];
+ if (!*vertex_shader) {
+ *vertex_shader = glCreateShader(GL_VERTEX_SHADER);
+ std::string source =
+ GetVertexShaderSource(gl_version_info, source_target);
+ CompileShader(*vertex_shader, source.c_str());
}
- glAttachShader(info->program, vertex_shader_);
+ glAttachShader(info->program, *vertex_shader);
GLuint* fragment_shader = &fragment_shaders_[fragment_shader_id];
if (!*fragment_shader) {
*fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
std::string source = GetFragmentShaderSource(
gl_version_info, premultiply_alpha, unpremultiply_alpha,
- nv_egl_stream_consumer_external_, source_target);
+ nv_egl_stream_consumer_external_, source_target, source_format,
+ dest_format);
CompileShader(*fragment_shader, source.c_str());
}
glAttachShader(info->program, *fragment_shader);
@@ -725,7 +1085,9 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
(x + width / 2.f) * m_x / source_width,
(y + height / 2.f) * m_y / source_height);
- if (BindFramebufferTexture2D(dest_target, dest_id, framebuffer_)) {
+ DCHECK(dest_level == 0 || decoder->GetFeatureInfo()->IsES3Capable());
+ if (BindFramebufferTexture2D(dest_target, dest_id, dest_level,
+ framebuffer_)) {
#ifndef NDEBUG
// glValidateProgram of MACOSX validates FBO unlike other platforms, so
// glValidateProgram must be called after FBO binding. crbug.com/463439
@@ -741,6 +1103,9 @@ void CopyTextureCHROMIUMResourceManager::DoCopyTextureInternal(
glUniform1i(info->sampler_handle, 0);
glBindTexture(source_target, source_id);
+ DCHECK(source_level == 0 || decoder->GetFeatureInfo()->IsES3Capable());
+ if (source_level > 0)
+ glTexParameteri(source_target, GL_TEXTURE_BASE_LEVEL, source_level);
glTexParameterf(source_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(source_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(source_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
index dd705323bad..93cdc58c726 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
@@ -18,6 +18,28 @@ namespace gles2 {
class GLES2Decoder;
+enum CopyTextureMethod {
+ // Use CopyTex{Sub}Image2D to copy from the source to the destination.
+ DIRECT_COPY,
+ // Draw from the source to the destination texture.
+ DIRECT_DRAW,
+ // Draw to an intermediate texture, and then copy to the destination texture.
+ DRAW_AND_COPY,
+ // CopyTexture isn't available.
+ NOT_COPYABLE
+};
+
+// TODOs(qiankun.miao@intel.com):
+// 1. Add readback path for RGB9_E5 and float formats (if extension isn't
+// available and they are not color-renderable).
+// 2. Support faces of cube map texture as valid dest target. The cube map
+// texture may be incomplete currently.
+// 3. Add support for levels other than 0.
+// 4. Support ALPHA, LUMINANCE and LUMINANCE_ALPHA formats on core profile.
+// 5. Update the extension doc after the whole work is done
+// in gpu/GLES2/extensions/CHROMIUM/CHROMIUM_copy_texture.txt. We probably
+// will need a ES2 version and a ES3 version.
+
// This class encapsulates the resources required to implement the
// GL_CHROMIUM_copy_texture extension. The copy operation is performed
// via glCopyTexImage2D() or a blit to a framebuffer object.
@@ -34,22 +56,27 @@ class GPU_EXPORT CopyTextureCHROMIUMResourceManager {
void DoCopyTexture(const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
GLenum source_internal_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
GLenum dest_internal_format,
GLsizei width,
GLsizei height,
bool flip_y,
bool premultiply_alpha,
- bool unpremultiply_alpha);
+ bool unpremultiply_alpha,
+ CopyTextureMethod method);
void DoCopySubTexture(const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
GLenum source_internal_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
GLenum dest_internal_format,
GLint xoffset,
GLint yoffset,
@@ -63,14 +90,17 @@ class GPU_EXPORT CopyTextureCHROMIUMResourceManager {
GLsizei source_height,
bool flip_y,
bool premultiply_alpha,
- bool unpremultiply_alpha);
+ bool unpremultiply_alpha,
+ CopyTextureMethod method);
void DoCopySubTextureWithTransform(const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
GLenum source_internal_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
GLenum dest_internal_format,
GLint xoffset,
GLint yoffset,
@@ -94,8 +124,12 @@ class GPU_EXPORT CopyTextureCHROMIUMResourceManager {
void DoCopyTextureWithTransform(const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
+ GLenum source_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_format,
GLsizei width,
GLsizei height,
bool flip_y,
@@ -136,8 +170,12 @@ class GPU_EXPORT CopyTextureCHROMIUMResourceManager {
void DoCopyTextureInternal(const gles2::GLES2Decoder* decoder,
GLenum source_target,
GLuint source_id,
+ GLint source_level,
+ GLenum source_format,
GLenum dest_target,
GLuint dest_id,
+ GLint dest_level,
+ GLenum dest_format,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -156,7 +194,7 @@ class GPU_EXPORT CopyTextureCHROMIUMResourceManager {
bool initialized_;
bool nv_egl_stream_consumer_external_;
typedef std::vector<GLuint> ShaderVector;
- GLuint vertex_shader_;
+ ShaderVector vertex_shaders_;
ShaderVector fragment_shaders_;
typedef int ProgramMapKey;
typedef base::hash_map<ProgramMapKey, ProgramInfo> ProgramMap;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 7203adce8bd..bbff690e899 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -707,6 +707,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Workarounds
void OnFboChanged() const;
void OnUseFramebuffer() const;
+ void UpdateFramebufferSRGB(Framebuffer* framebuffer);
error::ContextLostReason GetContextLostReasonFromResetStatus(
GLenum reset_status) const;
@@ -838,7 +839,7 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Get the service side ID for the bound draw framebuffer.
// If it's back buffer, 0 is returned.
- GLuint GetBoundDrawFramebufferServiceId();
+ GLuint GetBoundDrawFramebufferServiceId() const;
// Get the format/type of the currently bound frame buffer (either FBO or
// regular back buffer).
@@ -889,6 +890,10 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
const void* data,
ContextState::Dimension dimension);
+ bool ValidateCopyTexFormatHelper(GLenum internal_format,
+ GLenum read_format,
+ GLenum read_type,
+ std::string* output_error_msg);
// Validate if |format| is valid for CopyTex{Sub}Image functions.
// If not, generate a GL error and return false.
bool ValidateCopyTexFormat(const char* func_name, GLenum internal_format,
@@ -947,7 +952,9 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLsizeiptr size);
void DoCopyTextureCHROMIUM(GLuint source_id,
+ GLint source_level,
GLuint dest_id,
+ GLint dest_level,
GLenum internal_format,
GLenum dest_type,
GLboolean unpack_flip_y,
@@ -955,7 +962,9 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
GLboolean unpack_unmultiply_alpha);
void DoCopySubTextureCHROMIUM(GLuint source_id,
+ GLint source_level,
GLuint dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -1332,6 +1341,10 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// NONE through DrawBuffers, to be on the safe side. Return true.
bool ValidateAndAdjustDrawBuffers(const char* function_name);
+ // Filter out the draw buffers that have no images attached but are not NONE
+ // through DrawBuffers, to be on the safe side.
+ void AdjustDrawBuffers();
+
// Checks if all active uniform blocks in the current program are backed by
// a buffer of sufficient size.
// If not, generates an INVALID_OPERATION to avoid undefined behavior in
@@ -1671,6 +1684,12 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
// Wrapper for glLinkProgram
void DoLinkProgram(GLuint program);
+ // Wrapper for glOverlayPromotionHintCHROMIUIM
+ void DoOverlayPromotionHintCHROMIUM(GLuint client_id,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y);
+
// Wrapper for glReadBuffer
void DoReadBuffer(GLenum src);
@@ -2002,9 +2021,10 @@ class GLES2DecoderImpl : public GLES2Decoder, public ErrorStateClient {
bool ValidateCopyTextureCHROMIUMTextures(const char* function_name,
TextureRef* source_texture_ref,
TextureRef* dest_texture_ref);
- bool ValidateCopyTextureCHROMIUMInternalFormats(
+ CopyTextureMethod ValidateCopyTextureCHROMIUMInternalFormats(
const char* function_name,
TextureRef* source_texture_ref,
+ GLint source_level,
GLenum dest_internal_format);
bool ValidateCompressedCopyTextureCHROMIUM(const char* function_name,
TextureRef* source_texture_ref,
@@ -2756,10 +2776,13 @@ GLenum BackTexture::Target() {
bool BackTexture::AllocateNativeGpuMemoryBuffer(const gfx::Size& size,
GLenum format,
bool zero) {
- gfx::BufferFormat buffer_format = gfx::BufferFormat::RGBA_8888;
+ DCHECK(format == GL_RGB || format == GL_RGBA);
scoped_refptr<gl::GLImage> image =
decoder_->GetContextGroup()->image_factory()->CreateAnonymousImage(
- size, buffer_format, format);
+ size,
+ format == GL_RGB ? gfx::BufferFormat::RGBX_8888
+ : gfx::BufferFormat::RGBA_8888,
+ format);
if (!image || !image->BindTexImage(Target()))
return false;
@@ -3108,6 +3131,9 @@ bool GLES2DecoderImpl::Initialize(
case GL_TEXTURE_RECTANGLE_ARB:
supported = feature_info_->feature_flags().arb_texture_rectangle;
break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ supported = feature_info_->feature_flags().oes_egl_image_external;
+ break;
case GL_TEXTURE_2D:
supported = true;
break;
@@ -3482,6 +3508,7 @@ bool GLES2DecoderImpl::Initialize(
DoBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
DoBindFramebuffer(GL_FRAMEBUFFER, 0);
DoBindRenderbuffer(GL_RENDERBUFFER, 0);
+ UpdateFramebufferSRGB(nullptr);
bool call_gl_clear = !surfaceless_ && !offscreen;
#if defined(OS_ANDROID)
@@ -3574,6 +3601,12 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
&caps.max_vertex_texture_image_units, 1);
DoGetIntegerv(GL_MAX_VERTEX_UNIFORM_VECTORS, &caps.max_vertex_uniform_vectors,
1);
+ {
+ GLint dims[2] = {0, 0};
+ DoGetIntegerv(GL_MAX_VIEWPORT_DIMS, dims, 2);
+ caps.max_viewport_width = dims[0];
+ caps.max_viewport_height = dims[1];
+ }
DoGetIntegerv(GL_NUM_COMPRESSED_TEXTURE_FORMATS,
&caps.num_compressed_texture_formats, 1);
DoGetIntegerv(GL_NUM_SHADER_BINARY_FORMATS, &caps.num_shader_binary_formats,
@@ -3730,7 +3763,7 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() {
return true;
}
ShBuiltInResources resources;
- ShInitBuiltInResources(&resources);
+ sh::InitBuiltInResources(&resources);
resources.MaxVertexAttribs = group_->max_vertex_attribs();
resources.MaxVertexUniformVectors =
group_->max_vertex_uniform_vectors();
@@ -3850,6 +3883,8 @@ bool GLES2DecoderImpl::InitializeShaderTranslator() {
driver_bug_workarounds |= SH_DONT_REMOVE_INVARIANT_FOR_FRAGMENT_INPUT;
if (workarounds().remove_invariant_and_centroid_for_essl3)
driver_bug_workarounds |= SH_REMOVE_INVARIANT_AND_CENTROID_FOR_ESSL3;
+ if (workarounds().rewrite_float_unary_minus_operator)
+ driver_bug_workarounds |= SH_REWRITE_FLOAT_UNARY_MINUS_OPERATOR;
resources.WEBGL_debug_shader_precision =
group_->gpu_preferences().emulate_shader_precision;
@@ -4315,19 +4350,32 @@ bool GLES2DecoderImpl::CheckBoundDrawFramebufferValid(const char* func_name) {
if (!SupportsSeparateFramebufferBinds())
OnUseFramebuffer();
- if (valid && feature_info_->feature_flags().desktop_srgb_support) {
- // If framebuffer contains sRGB images, then enable FRAMEBUFFER_SRGB.
- // Otherwise, disable FRAMEBUFFER_SRGB. Assume default fbo does not have
- // sRGB image.
- // In theory, we can just leave FRAMEBUFFER_SRGB on. However, many drivers
- // behave incorrectly when all images are linear encoding, they still apply
- // the sRGB conversion, but when at least one image is sRGB, then they
- // behave correctly.
- bool enable_framebuffer_srgb =
- framebuffer && framebuffer->HasSRGBAttachments();
+
+ UpdateFramebufferSRGB(framebuffer);
+ return true;
+}
+
+void GLES2DecoderImpl::UpdateFramebufferSRGB(Framebuffer* framebuffer) {
+ // Manually set the value of FRAMEBUFFER_SRGB based on the state that was set
+ // by the client.
+ bool needs_enable_disable_framebuffer_srgb = false;
+ bool enable_framebuffer_srgb = true;
+ if (feature_info_->feature_flags().ext_srgb_write_control) {
+ needs_enable_disable_framebuffer_srgb = true;
+ enable_framebuffer_srgb &= state_.GetEnabled(GL_FRAMEBUFFER_SRGB);
+ }
+ // On desktop, enable FRAMEBUFFER_SRGB only if the framebuffer contains sRGB
+ // attachments. In theory, we can just leave FRAMEBUFFER_SRGB enabled,
+ // however,
+ // many drivers behave incorrectly when no attachments are sRGB. When at
+ // least one attachment is sRGB, then they behave correctly.
+ if (feature_info_->feature_flags().desktop_srgb_support) {
+ needs_enable_disable_framebuffer_srgb = true;
+ // Assume that the default fbo does not have an sRGB image.
+ enable_framebuffer_srgb &= framebuffer && framebuffer->HasSRGBAttachments();
+ }
+ if (needs_enable_disable_framebuffer_srgb)
state_.EnableDisableFramebufferSRGB(enable_framebuffer_srgb);
- }
- return valid;
}
bool GLES2DecoderImpl::CheckBoundReadFramebufferValid(
@@ -4408,7 +4456,7 @@ GLuint GLES2DecoderImpl::GetBoundReadFramebufferServiceId() {
return 0;
}
-GLuint GLES2DecoderImpl::GetBoundDrawFramebufferServiceId() {
+GLuint GLES2DecoderImpl::GetBoundDrawFramebufferServiceId() const {
Framebuffer* framebuffer = GetBoundDrawFramebuffer();
if (framebuffer) {
return framebuffer->service_id();
@@ -4791,6 +4839,10 @@ void GLES2DecoderImpl::Destroy(bool have_context) {
offscreen_resolved_frame_buffer_.reset();
offscreen_resolved_color_texture_.reset();
+ // Release all fences now, because some fence types need the context to be
+ // current on destruction.
+ pending_readpixel_fences_ = std::queue<FenceCallback>();
+
// Need to release these before releasing |group_| which may own the
// ShaderTranslatorCache.
fragment_translator_ = NULL;
@@ -5515,14 +5567,13 @@ void GLES2DecoderImpl::RestoreTextureState(unsigned service_id) const {
if (texture) {
GLenum target = texture->target();
glBindTexture(target, service_id);
- glTexParameteri(
- target, GL_TEXTURE_WRAP_S, texture->wrap_s());
- glTexParameteri(
- target, GL_TEXTURE_WRAP_T, texture->wrap_t());
- glTexParameteri(
- target, GL_TEXTURE_MIN_FILTER, texture->min_filter());
- glTexParameteri(
- target, GL_TEXTURE_MAG_FILTER, texture->mag_filter());
+ glTexParameteri(target, GL_TEXTURE_WRAP_S, texture->wrap_s());
+ glTexParameteri(target, GL_TEXTURE_WRAP_T, texture->wrap_t());
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER, texture->min_filter());
+ glTexParameteri(target, GL_TEXTURE_MAG_FILTER, texture->mag_filter());
+ if (feature_info_->IsWebGL2OrES3Context()) {
+ glTexParameteri(target, GL_TEXTURE_BASE_LEVEL, texture->base_level());
+ }
RestoreTextureUnitBindings(state_.active_texture_unit);
}
}
@@ -5561,14 +5612,16 @@ uint32_t GLES2DecoderImpl::GetAndClearBackbufferClearBitsForTest() {
}
void GLES2DecoderImpl::OnFboChanged() const {
- if (workarounds().restore_scissor_on_fbo_change)
- state_.fbo_binding_for_scissor_workaround_dirty = true;
+ state_.fbo_binding_for_scissor_workaround_dirty = true;
}
// Called after the FBO is checked for completeness.
void GLES2DecoderImpl::OnUseFramebuffer() const {
- if (state_.fbo_binding_for_scissor_workaround_dirty) {
- state_.fbo_binding_for_scissor_workaround_dirty = false;
+ if (!state_.fbo_binding_for_scissor_workaround_dirty)
+ return;
+ state_.fbo_binding_for_scissor_workaround_dirty = false;
+
+ if (workarounds().restore_scissor_on_fbo_change) {
// The driver forgets the correct scissor when modifying the FBO binding.
glScissor(state_.scissor_x,
state_.scissor_y,
@@ -5579,6 +5632,26 @@ void GLES2DecoderImpl::OnUseFramebuffer() const {
// it's unclear how this bug works.
glFlush();
}
+
+ if (workarounds().force_update_scissor_state_when_binding_fbo0 &&
+ GetBoundDrawFramebufferServiceId() == 0) {
+ // The theory is that FBO0 keeps some internal (in HW regs maybe?) scissor
+ // test state, but the driver forgets to update it with GL_SCISSOR_TEST
+ // when FBO0 gets bound. (So it stuck with whatever state we last switched
+ // from it.)
+ // If the internal scissor test state was enabled, it does update its
+ // internal scissor rect with GL_SCISSOR_BOX though.
+ if (state_.enable_flags.cached_scissor_test) {
+ // The driver early outs if the new state matches previous state so some
+ // shake up is needed.
+ glDisable(GL_SCISSOR_TEST);
+ glEnable(GL_SCISSOR_TEST);
+ } else {
+ // Ditto.
+ glEnable(GL_SCISSOR_TEST);
+ glDisable(GL_SCISSOR_TEST);
+ }
+ }
}
void GLES2DecoderImpl::DoBindFramebuffer(GLenum target, GLuint client_id) {
@@ -5873,7 +5946,7 @@ void GLES2DecoderImpl::InvalidateFramebufferImpl(
GLsizei height,
const char* function_name,
FramebufferOperation op) {
- Framebuffer* framebuffer = GetFramebufferInfoForTarget(GL_FRAMEBUFFER);
+ Framebuffer* framebuffer = GetFramebufferInfoForTarget(target);
// Because of performance issues, no-op if the format of the attachment is
// DEPTH_STENCIL and only one part is intended to be invalidated.
@@ -5881,7 +5954,7 @@ void GLES2DecoderImpl::InvalidateFramebufferImpl(
framebuffer->HasDepthStencilFormatAttachment();
bool invalidate_depth = false;
bool invalidate_stencil = false;
- std::unique_ptr<GLenum[]> validated_attachments(new GLenum[count]);
+ std::unique_ptr<GLenum[]> validated_attachments(new GLenum[count+1]);
GLsizei validated_count = 0;
// Validates the attachments. If one of them fails, the whole command fails.
@@ -5924,7 +5997,10 @@ void GLES2DecoderImpl::InvalidateFramebufferImpl(
validated_attachments[validated_count++] = attachment;
}
if (invalidate_depth && invalidate_stencil) {
- validated_attachments[validated_count++] = GL_DEPTH_STENCIL_ATTACHMENT;
+ // We do not use GL_DEPTH_STENCIL_ATTACHMENT here because
+ // it is not a valid token for glDiscardFramebufferEXT.
+ validated_attachments[validated_count++] = GL_DEPTH_ATTACHMENT;
+ validated_attachments[validated_count++] = GL_STENCIL_ATTACHMENT;
}
// If the default framebuffer is bound but we are still rendering to an
@@ -6134,24 +6210,23 @@ void GLES2DecoderImpl::DoGenerateMipmap(GLenum target) {
enable_srgb =
GetColorEncodingFromInternalFormat(internal_format) == GL_SRGB;
}
- if (!enable_srgb || !feature_info_->feature_flags().desktop_srgb_support ||
- !workarounds().decode_encode_srgb_for_generatemipmap) {
- if (feature_info_->feature_flags().desktop_srgb_support) {
- state_.EnableDisableFramebufferSRGB(enable_srgb);
- }
- glGenerateMipmapEXT(target);
- } else {
+ if (enable_srgb && feature_info_->feature_flags().desktop_srgb_support) {
+ state_.EnableDisableFramebufferSRGB(enable_srgb);
+ }
+ if (enable_srgb && workarounds().decode_encode_srgb_for_generatemipmap) {
if (target == GL_TEXTURE_2D) {
- state_.EnableDisableFramebufferSRGB(true);
if (!InitializeSRGBConverter("generateMipmap")) {
return;
}
srgb_converter_->GenerateMipmap(this, tex, target);
} else {
- // TODO(yizhou): If the target is GL_TEXTURE_3D or GL_TEXTURE_2D_ARRAY,
+ // TODO(yizhou): If the target is GL_TEXTURE_3D ,GL_TEXTURE_2D_ARRAY,
+ // GL_TEXTURE_CUBE_MAP,
// this change can not generate correct mipmap.
glGenerateMipmapEXT(target);
}
+ } else {
+ glGenerateMipmapEXT(target);
}
if (texture_zero_level_set) {
@@ -6713,17 +6788,16 @@ bool GLES2DecoderImpl::GetHelper(
case GL_SAMPLER_BINDING:
*num_written = 1;
if (params) {
- // TODO(vmiura): Need to implement this for ES3 clients. WebGL 2 tracks
- // this on the client side.
- *params = 0;
+ DCHECK_LT(state_.active_texture_unit, state_.sampler_units.size());
+ Sampler* sampler =
+ state_.sampler_units[state_.active_texture_unit].get();
+ *params = sampler ? sampler->client_id() : 0;
}
return true;
case GL_TRANSFORM_FEEDBACK_BINDING:
*num_written = 1;
if (params) {
- // TODO(vmiura): Need to implement this for ES3 clients. WebGL 2 tracks
- // this on the client side.
- *params = 0;
+ *params = state_.bound_transform_feedback->client_id();
}
return true;
case GL_NUM_PROGRAM_BINARY_FORMATS:
@@ -6995,11 +7069,9 @@ void GLES2DecoderImpl::DoBindAttribLocation(GLuint program_id,
// At this point, the program's shaders may not be translated yet,
// therefore, we may not find the hashed attribute name.
// glBindAttribLocation call with original name is useless.
- // So instead, we should simply cache the binding, and then call
+ // So instead, we simply cache the binding, and then call
// Program::ExecuteBindAttribLocationCalls() right before link.
program->SetAttribLocationBinding(name, static_cast<GLint>(index));
- // TODO(zmo): Get rid of the following glBindAttribLocation call.
- glBindAttribLocation(program->service_id(), index, name.c_str());
}
error::Error GLES2DecoderImpl::HandleBindAttribLocationBucket(
@@ -7225,6 +7297,11 @@ error::Error GLES2DecoderImpl::HandleDeleteProgram(
error::Error GLES2DecoderImpl::DoClear(GLbitfield mask) {
const char* func_name = "glClear";
DCHECK(!ShouldDeferDraws());
+ if (mask &
+ ~(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, func_name, "invalid mask");
+ return error::kNoError;
+ }
if (CheckBoundDrawFramebufferValid(func_name)) {
ApplyDirtyState();
if (workarounds().gl_clear_broken) {
@@ -7244,6 +7321,7 @@ error::Error GLES2DecoderImpl::DoClear(GLbitfield mask) {
return error::kNoError;
}
}
+ AdjustDrawBuffers();
glClear(mask);
}
return error::kNoError;
@@ -7416,6 +7494,11 @@ void GLES2DecoderImpl::DoDisable(GLenum cap) {
// DrawElements* for old desktop GL.
return;
}
+ if (cap == GL_FRAMEBUFFER_SRGB) {
+ // Enable and Disable GL_FRAMEBUFFER_SRGB is done manually in
+ // CheckBoundDrawFramebufferValid.
+ return;
+ }
glDisable(cap);
}
}
@@ -7428,6 +7511,11 @@ void GLES2DecoderImpl::DoEnable(GLenum cap) {
// DrawElements* for old desktop GL.
return;
}
+ if (cap == GL_FRAMEBUFFER_SRGB) {
+ // Enable and Disable GL_FRAMEBUFFER_SRGB is done manually in
+ // CheckBoundDrawFramebufferValid.
+ return;
+ }
glEnable(cap);
}
}
@@ -7861,6 +7949,15 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
return;
}
+ // If color/depth/stencil buffer have no image, we can remove corresponding
+ // bitfield from mask and return early if mask equals to 0.
+ // But validations should be done against the original mask.
+ GLbitfield mask_blit = mask;
+
+ // Detect that designated read/depth/stencil buffer in read framebuffer miss
+ // image, and the corresponding buffers in draw framebuffer have image.
+ bool read_framebuffer_miss_image = false;
+
// Check whether read framebuffer and draw framebuffer have identical image
// TODO(yunchao): consider doing something like CheckFramebufferStatus().
// We cache the validation results, and if read_framebuffer doesn't change,
@@ -7884,6 +7981,18 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
is_feedback_loop = FeedbackLoopTrue;
} else if (!read_framebuffer || !draw_framebuffer) {
is_feedback_loop = FeedbackLoopFalse;
+ if (read_framebuffer) {
+ if (((mask & GL_COLOR_BUFFER_BIT) != 0 &&
+ !GetBoundReadFramebufferInternalFormat()) ||
+ ((mask & GL_DEPTH_BUFFER_BIT) != 0 &&
+ !read_framebuffer->GetAttachment(GL_DEPTH_ATTACHMENT) &&
+ BoundFramebufferHasDepthAttachment()) ||
+ ((mask & GL_STENCIL_BUFFER_BIT) != 0 &&
+ !read_framebuffer->GetAttachment(GL_STENCIL_ATTACHMENT) &&
+ BoundFramebufferHasStencilAttachment())) {
+ read_framebuffer_miss_image = true;
+ }
+ }
} else {
DCHECK(read_framebuffer && draw_framebuffer);
if ((mask & GL_DEPTH_BUFFER_BIT) != 0) {
@@ -7892,7 +8001,10 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
const Framebuffer::Attachment* depth_buffer_draw =
draw_framebuffer->GetAttachment(GL_DEPTH_ATTACHMENT);
if (!depth_buffer_draw || !depth_buffer_read) {
- mask &= ~GL_DEPTH_BUFFER_BIT;
+ mask_blit &= ~GL_DEPTH_BUFFER_BIT;
+ if (depth_buffer_draw) {
+ read_framebuffer_miss_image = true;
+ }
} else if (depth_buffer_draw->IsSameAttachment(depth_buffer_read)) {
is_feedback_loop = FeedbackLoopTrue;
}
@@ -7903,13 +8015,14 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
const Framebuffer::Attachment* stencil_buffer_draw =
draw_framebuffer->GetAttachment(GL_STENCIL_ATTACHMENT);
if (!stencil_buffer_draw || !stencil_buffer_read) {
- mask &= ~GL_STENCIL_BUFFER_BIT;
+ mask_blit &= ~GL_STENCIL_BUFFER_BIT;
+ if (stencil_buffer_draw) {
+ read_framebuffer_miss_image = true;
+ }
} else if (stencil_buffer_draw->IsSameAttachment(stencil_buffer_read)) {
is_feedback_loop = FeedbackLoopTrue;
}
}
- if (!mask)
- return;
}
GLenum src_internal_format = GetBoundReadFramebufferInternalFormat();
@@ -7937,12 +8050,17 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
const Framebuffer::Attachment* read_buffer =
is_feedback_loop == FeedbackLoopUnknown ?
read_framebuffer->GetReadBufferAttachment() : nullptr;
+ bool draw_buffer_has_image = false;
for (uint32_t ii = 0; ii < group_->max_draw_buffers(); ++ii) {
GLenum dst_format = GetBoundColorDrawBufferInternalFormat(
static_cast<GLint>(ii));
GLenum dst_type = GetBoundColorDrawBufferType(static_cast<GLint>(ii));
if (dst_format == 0)
continue;
+ draw_buffer_has_image = true;
+ if (!src_internal_format) {
+ read_framebuffer_miss_image = true;
+ }
if (GetColorEncodingFromInternalFormat(dst_format) == GL_SRGB)
draw_buffers_has_srgb = true;
if (read_buffer_samples > 0 &&
@@ -7976,12 +8094,19 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
}
}
}
+ if (draw_framebuffer && !draw_buffer_has_image)
+ mask_blit &= ~GL_COLOR_BUFFER_BIT;
}
if (is_feedback_loop == FeedbackLoopTrue) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"source buffer and destination buffers are identical");
return;
}
+ if (read_framebuffer_miss_image == true) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
+ "The designated attachment point(s) in read framebuffer miss image");
+ return;
+ }
if ((mask & (GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)) != 0) {
if (filter != GL_NEAREST) {
@@ -7989,17 +8114,24 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
"invalid filter for depth/stencil");
return;
}
+ }
- if ((GetBoundFramebufferDepthFormat(GL_READ_FRAMEBUFFER) !=
- GetBoundFramebufferDepthFormat(GL_DRAW_FRAMEBUFFER)) ||
- (GetBoundFramebufferStencilFormat(GL_READ_FRAMEBUFFER) !=
- GetBoundFramebufferStencilFormat(GL_DRAW_FRAMEBUFFER))) {
- LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
- "src and dst formats differ for depth/stencil");
- return;
- }
+ mask = mask_blit;
+ if (!mask)
+ return;
+
+ if (((mask & GL_DEPTH_BUFFER_BIT) != 0 &&
+ (GetBoundFramebufferDepthFormat(GL_READ_FRAMEBUFFER) !=
+ GetBoundFramebufferDepthFormat(GL_DRAW_FRAMEBUFFER))) ||
+ ((mask & GL_STENCIL_BUFFER_BIT) != 0 &&
+ ((GetBoundFramebufferStencilFormat(GL_READ_FRAMEBUFFER) !=
+ GetBoundFramebufferStencilFormat(GL_DRAW_FRAMEBUFFER))))) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
+ "src and dst formats differ for depth/stencil");
+ return;
}
+
if (workarounds().adjust_src_dst_region_for_blitframebuffer) {
gfx::Size read_size = GetBoundReadFramebufferSize();
gfx::Rect src_bounds(0, 0, read_size.width(), read_size.height());
@@ -8015,8 +8147,10 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
"the width or height of src region overflow");
return;
}
- src_width = std::abs(src_width_temp.ValueOrDefault(0));
- src_height = std::abs(src_height_temp.ValueOrDefault(0));
+ if (!src_width_temp.Abs().AssignIfValid(&src_width))
+ src_width = 0;
+ if (!src_height_temp.Abs().AssignIfValid(&src_height))
+ src_height = 0;
gfx::Rect src_region(src_x, src_y, src_width, src_height);
if (!src_bounds.Contains(src_region) &&
@@ -8051,8 +8185,10 @@ void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
"the width or height of dst region overflow");
return;
}
- dst_width = std::abs(dst_width_temp.ValueOrDefault(0));
- dst_height = std::abs(dst_height_temp.ValueOrDefault(0));
+ if (!dst_width_temp.Abs().AssignIfValid(&dst_width))
+ dst_width = 0;
+ if (!dst_height_temp.Abs().AssignIfValid(&dst_height))
+ dst_height = 0;
GLfloat dst_mapping_width =
static_cast<GLfloat>(src_real_width) * dst_width / src_width;
@@ -8509,6 +8645,31 @@ void GLES2DecoderImpl::DoLinkProgram(GLuint program_id) {
ExitCommandProcessingEarly();
}
+void GLES2DecoderImpl::DoOverlayPromotionHintCHROMIUM(GLuint client_id,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) {
+ if (client_id == 0)
+ return;
+
+ TextureRef* texture_ref = GetTexture(client_id);
+ if (!texture_ref) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glOverlayPromotionHintCHROMIUM",
+ "invalid texture id");
+ return;
+ }
+ GLStreamTextureImage* image =
+ texture_ref->texture()->GetLevelStreamTextureImage(
+ GL_TEXTURE_EXTERNAL_OES, 0);
+ if (!image) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glOverlayPromotionHintCHROMIUM",
+ "texture has no StreamTextureImage");
+ return;
+ }
+
+ image->NotifyPromotionHint(promotion_hint != GL_FALSE, display_x, display_y);
+}
+
void GLES2DecoderImpl::DoReadBuffer(GLenum src) {
Framebuffer* framebuffer = GetFramebufferInfoForTarget(GL_READ_FRAMEBUFFER);
if (framebuffer) {
@@ -8720,6 +8881,16 @@ bool GLES2DecoderImpl::ValidateAndAdjustDrawBuffers(const char* func_name) {
return true;
}
+void GLES2DecoderImpl::AdjustDrawBuffers() {
+ if (!SupportsDrawBuffers()) {
+ return;
+ }
+ Framebuffer* framebuffer = framebuffer_state_.bound_draw_framebuffer.get();
+ if (framebuffer) {
+ framebuffer->AdjustDrawBuffers();
+ }
+}
+
bool GLES2DecoderImpl::ValidateUniformBlockBackings(const char* func_name) {
DCHECK(feature_info_->IsWebGL2OrES3Context());
if (!state_.current_program.get())
@@ -10524,6 +10695,7 @@ void GLES2DecoderImpl::GetTexParameterImpl(
}
return;
}
+ break;
case GL_TEXTURE_MAX_LEVEL:
if (workarounds().use_shadowed_tex_level_params) {
if (fparams) {
@@ -10533,6 +10705,35 @@ void GLES2DecoderImpl::GetTexParameterImpl(
}
return;
}
+ break;
+ case GL_TEXTURE_SWIZZLE_R:
+ if (fparams) {
+ fparams[0] = static_cast<GLfloat>(texture->swizzle_r());
+ } else {
+ iparams[0] = texture->swizzle_r();
+ }
+ return;
+ case GL_TEXTURE_SWIZZLE_G:
+ if (fparams) {
+ fparams[0] = static_cast<GLfloat>(texture->swizzle_g());
+ } else {
+ iparams[0] = texture->swizzle_g();
+ }
+ return;
+ case GL_TEXTURE_SWIZZLE_B:
+ if (fparams) {
+ fparams[0] = static_cast<GLfloat>(texture->swizzle_b());
+ } else {
+ iparams[0] = texture->swizzle_b();
+ }
+ return;
+ case GL_TEXTURE_SWIZZLE_A:
+ if (fparams) {
+ fparams[0] = static_cast<GLfloat>(texture->swizzle_a());
+ } else {
+ iparams[0] = texture->swizzle_a();
+ }
+ return;
default:
break;
}
@@ -10751,16 +10952,9 @@ error::Error GLES2DecoderImpl::HandleVertexAttribIPointer(
if (!state_.bound_array_buffer.get() ||
state_.bound_array_buffer->IsDeleted()) {
- if (state_.vertex_attrib_manager.get() ==
- state_.default_vertex_attrib_manager.get()) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION,
- "glVertexAttribIPointer", "no array buffer bound");
- return error::kNoError;
- } else if (offset != 0) {
+ if (offset != 0) {
LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION,
- "glVertexAttribIPointer", "client side arrays are not allowed");
+ GL_INVALID_OPERATION, "glVertexAttribIPointer", "offset != 0");
return error::kNoError;
}
}
@@ -10844,16 +11038,9 @@ error::Error GLES2DecoderImpl::HandleVertexAttribPointer(
if (!state_.bound_array_buffer.get() ||
state_.bound_array_buffer->IsDeleted()) {
- if (state_.vertex_attrib_manager.get() ==
- state_.default_vertex_attrib_manager.get()) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION,
- "glVertexAttribPointer", "no array buffer bound");
- return error::kNoError;
- } else if (offset != 0) {
+ if (offset != 0) {
LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION,
- "glVertexAttribPointer", "client side arrays are not allowed");
+ GL_INVALID_OPERATION, "glVertexAttribPointer", "offset != 0");
return error::kNoError;
}
}
@@ -13736,12 +13923,14 @@ error::Error GLES2DecoderImpl::DoCompressedTexSubImage(
return error::kNoError;
}
-bool GLES2DecoderImpl::ValidateCopyTexFormat(
- const char* func_name, GLenum internal_format,
- GLenum read_format, GLenum read_type) {
+bool GLES2DecoderImpl::ValidateCopyTexFormatHelper(
+ GLenum internal_format,
+ GLenum read_format,
+ GLenum read_type,
+ std::string* output_error_msg) {
+ DCHECK(output_error_msg);
if (read_format == 0) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION, func_name, "no valid color image");
+ *output_error_msg = std::string("no valid color image");
return false;
}
// Check we have compatible formats.
@@ -13749,13 +13938,12 @@ bool GLES2DecoderImpl::ValidateCopyTexFormat(
uint32_t channels_needed = GLES2Util::GetChannelsForFormat(internal_format);
if (!channels_needed ||
(channels_needed & channels_exist) != channels_needed) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION, func_name, "incompatible format");
+ *output_error_msg = std::string("incompatible format");
return false;
}
if (feature_info_->IsWebGL2OrES3Context()) {
GLint color_encoding = GetColorEncodingFromInternalFormat(read_format);
- bool float_mismatch= feature_info_->ext_color_buffer_float_available() ?
+ bool float_mismatch = feature_info_->ext_color_buffer_float_available() ?
(GLES2Util::IsIntegerFormat(internal_format) !=
GLES2Util::IsIntegerFormat(read_format)) :
GLES2Util::IsFloatFormat(internal_format);
@@ -13765,15 +13953,13 @@ bool GLES2DecoderImpl::ValidateCopyTexFormat(
GLES2Util::IsSignedIntegerFormat(read_format)) ||
(GLES2Util::IsUnsignedIntegerFormat(internal_format) !=
GLES2Util::IsUnsignedIntegerFormat(read_format))) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION, func_name, "incompatible format");
+ *output_error_msg = std::string("incompatible format");
return false;
}
}
if ((channels_needed & (GLES2Util::kDepth | GLES2Util::kStencil)) != 0) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION,
- func_name, "can not be used with depth or stencil textures");
+ *output_error_msg =
+ std::string("can not be used with depth or stencil textures");
return false;
}
if (feature_info_->IsWebGL2OrES3Context()) {
@@ -13790,9 +13976,7 @@ bool GLES2DecoderImpl::ValidateCopyTexFormat(
(dg > 0 && sg != dg) ||
(db > 0 && sb != db) ||
(da > 0 && sa != da)) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_OPERATION,
- func_name, "incompatible color component sizes");
+ *output_error_msg = std::string("incompatible color component sizes");
return false;
}
}
@@ -13800,6 +13984,20 @@ bool GLES2DecoderImpl::ValidateCopyTexFormat(
return true;
}
+bool GLES2DecoderImpl::ValidateCopyTexFormat(const char* func_name,
+ GLenum internal_format,
+ GLenum read_format,
+ GLenum read_type) {
+ std::string output_error_msg;
+ if (!ValidateCopyTexFormatHelper(internal_format, read_format, read_type,
+ &output_error_msg)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
+ output_error_msg.c_str());
+ return false;
+ }
+ return true;
+}
+
void GLES2DecoderImpl::DoCopyTexImage2D(
GLenum target,
GLint level,
@@ -15710,7 +15908,6 @@ error::Error GLES2DecoderImpl::HandleBeginQueryEXT(
return error::kNoError;
}
break;
- case GL_SAMPLES_PASSED:
case GL_ANY_SAMPLES_PASSED:
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
if (!features().occlusion_query_boolean) {
@@ -16002,24 +16199,70 @@ bool GLES2DecoderImpl::ValidateCopyTextureCHROMIUMTextures(
return true;
}
-bool GLES2DecoderImpl::ValidateCopyTextureCHROMIUMInternalFormats(
+CopyTextureMethod GLES2DecoderImpl::ValidateCopyTextureCHROMIUMInternalFormats(
const char* function_name,
TextureRef* source_texture_ref,
+ GLint source_level,
GLenum dest_internal_format) {
GLenum source_type = 0;
GLenum source_internal_format = 0;
Texture* source_texture = source_texture_ref->texture();
- source_texture->GetLevelType(source_texture->target(), 0, &source_type,
- &source_internal_format);
+ source_texture->GetLevelType(source_texture->target(), source_level,
+ &source_type, &source_internal_format);
+
+ bool valid_dest_format = false;
+ // TODO(qiankun.miao@intel.com): ALPHA, LUMINANCE and LUMINANCE_ALPHA formats
+ // are not supported on GL core profile. See crbug.com/577144. Enable the
+ // workaround for glCopyTexImage and glCopyTexSubImage in
+ // gles2_cmd_copy_tex_image.cc for glCopyTextureCHROMIUM implementation.
+ switch (dest_internal_format) {
+ case GL_RGB:
+ case GL_RGBA:
+ case GL_RGB8:
+ case GL_RGBA8:
+ valid_dest_format = true;
+ break;
+ case GL_BGRA_EXT:
+ case GL_BGRA8_EXT:
+ valid_dest_format =
+ feature_info_->feature_flags().ext_texture_format_bgra8888;
+ break;
+ case GL_SRGB_EXT:
+ case GL_SRGB_ALPHA_EXT:
+ valid_dest_format = feature_info_->feature_flags().ext_srgb;
+ break;
+ case GL_R8:
+ case GL_R8UI:
+ case GL_RG8:
+ case GL_RG8UI:
+ case GL_SRGB8:
+ case GL_RGB565:
+ case GL_RGB8UI:
+ case GL_SRGB8_ALPHA8:
+ case GL_RGB5_A1:
+ case GL_RGBA4:
+ case GL_RGBA8UI:
+ valid_dest_format = feature_info_->IsWebGL2OrES3Context();
+ break;
+ case GL_RGB9_E5:
+ valid_dest_format = !gl_version_info().is_es;
+ break;
+ case GL_R16F:
+ case GL_R32F:
+ case GL_RG16F:
+ case GL_RG32F:
+ case GL_RGB16F:
+ case GL_RGB32F:
+ case GL_RGBA16F:
+ case GL_RGBA32F:
+ case GL_R11F_G11F_B10F:
+ valid_dest_format = feature_info_->ext_color_buffer_float_available();
+ break;
+ default:
+ valid_dest_format = false;
+ break;
+ }
- // The destination format should be GL_RGB, or GL_RGBA. GL_ALPHA,
- // GL_LUMINANCE, and GL_LUMINANCE_ALPHA are not supported because they are not
- // renderable on some platforms.
- bool valid_dest_format =
- dest_internal_format == GL_RGB || dest_internal_format == GL_RGBA ||
- dest_internal_format == GL_RGB8 || dest_internal_format == GL_RGBA8 ||
- dest_internal_format == GL_BGRA_EXT ||
- dest_internal_format == GL_BGRA8_EXT;
bool valid_source_format =
source_internal_format == GL_RED || source_internal_format == GL_ALPHA ||
source_internal_format == GL_RGB || source_internal_format == GL_RGBA ||
@@ -16035,16 +16278,38 @@ bool GLES2DecoderImpl::ValidateCopyTextureCHROMIUMInternalFormats(
GLES2Util::GetStringEnum(source_internal_format);
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
msg.c_str());
- return false;
+ return NOT_COPYABLE;
}
if (!valid_dest_format) {
std::string msg = "invalid dest internal format " +
GLES2Util::GetStringEnum(dest_internal_format);
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name,
msg.c_str());
- return false;
+ return NOT_COPYABLE;
}
- return true;
+
+ bool source_format_color_renderable =
+ Texture::ColorRenderable(GetFeatureInfo(), source_internal_format, false);
+ bool dest_format_color_renderable =
+ Texture::ColorRenderable(GetFeatureInfo(), dest_internal_format, false);
+ std::string output_error_msg;
+
+ // CopyTexImage* should not allow internalformat of GL_BGRA_EXT and
+ // GL_BGRA8_EXT. crbug.com/663086.
+ bool copy_tex_image_format_valid =
+ source_internal_format != GL_BGRA_EXT &&
+ dest_internal_format != GL_BGRA_EXT &&
+ source_internal_format != GL_BGRA8_EXT &&
+ dest_internal_format != GL_BGRA8_EXT &&
+ ValidateCopyTexFormatHelper(dest_internal_format, source_internal_format,
+ source_type, &output_error_msg);
+ if (source_format_color_renderable && copy_tex_image_format_valid)
+ return DIRECT_COPY;
+
+ if (dest_format_color_renderable)
+ return DIRECT_DRAW;
+
+ return DRAW_AND_COPY;
}
bool GLES2DecoderImpl::ValidateCompressedCopyTextureCHROMIUM(
@@ -16096,7 +16361,9 @@ bool GLES2DecoderImpl::ValidateCompressedCopyTextureCHROMIUM(
void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
GLuint source_id,
+ GLint source_level,
GLuint dest_id,
+ GLint dest_level,
GLenum internal_format,
GLenum dest_type,
GLboolean unpack_flip_y,
@@ -16108,18 +16375,15 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
TextureRef* source_texture_ref = GetTexture(source_id);
TextureRef* dest_texture_ref = GetTexture(dest_id);
- if (!texture_manager()->ValidateTextureParameters(
- GetErrorState(), kFunctionName, true, internal_format, dest_type,
- internal_format, 0))
- return;
-
if (!ValidateCopyTextureCHROMIUMTextures(kFunctionName, source_texture_ref,
dest_texture_ref)) {
return;
}
- if (!ValidateCopyTextureCHROMIUMInternalFormats(
- kFunctionName, source_texture_ref, internal_format)) {
+ if (source_level < 0 || dest_level < 0 ||
+ (feature_info_->IsWebGL1OrES2Context() && source_level > 0)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ "source_level or dest_level out of range");
return;
}
@@ -16127,42 +16391,77 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
Texture* dest_texture = dest_texture_ref->texture();
GLenum source_target = source_texture->target();
GLenum dest_target = dest_texture->target();
+
+ GLenum source_type = 0;
+ GLenum source_internal_format = 0;
+ source_texture->GetLevelType(source_target, source_level, &source_type,
+ &source_internal_format);
+ GLenum format =
+ TextureManager::ExtractFormatFromStorageFormat(internal_format);
+ if (!texture_manager()->ValidateTextureParameters(
+ GetErrorState(), kFunctionName, true, format, dest_type,
+ internal_format, dest_level)) {
+ return;
+ }
+
+ CopyTextureMethod method = ValidateCopyTextureCHROMIUMInternalFormats(
+ kFunctionName, source_texture_ref, source_level, internal_format);
+ // INVALID_OPERATION is already generated by
+ // ValidateCopyTextureCHROMIUMInternalFormats.
+ if (method == NOT_COPYABLE) {
+ return;
+ }
+
+ // Draw to a fbo attaching level 0 of an intermediate texture,
+ // then copy from the fbo to dest texture level with glCopyTexImage2D.
+ // For WebGL 1.0 or OpenGL ES 2.0, DIRECT_DRAW path isn't available for
+ // dest_level > 0 due to level > 0 isn't supported by glFramebufferTexture2D
+ // in ES2 context. Go to DRAW_AND_COPY path in this case.
+ // TODO(qiankun.miao@intel.com): for WebGL 2.0 or OpenGL ES 3.0, both
+ // DIRECT_DRAW path for dest_level > 0 and DIRECT_COPY path for source_level >
+ // 0 are not available due to a framebuffer completeness bug:
+ // crbug.com/678526. Once the bug is fixed, the limitation for WebGL 2.0 and
+ // OpenGL ES 3.0 can be lifted.
+ if ((dest_level > 0 && method == DIRECT_DRAW) ||
+ (source_level > 0 && method == DIRECT_COPY)) {
+ method = DRAW_AND_COPY;
+ }
+
+ if (feature_info_->feature_flags().desktop_srgb_support) {
+ bool enable_framebuffer_srgb =
+ GetColorEncodingFromInternalFormat(source_internal_format) == GL_SRGB ||
+ GetColorEncodingFromInternalFormat(internal_format) == GL_SRGB;
+ state_.EnableDisableFramebufferSRGB(enable_framebuffer_srgb);
+ }
+
int source_width = 0;
int source_height = 0;
gl::GLImage* image =
- source_texture->GetLevelImage(source_target, 0);
+ source_texture->GetLevelImage(source_target, source_level);
if (image) {
gfx::Size size = image->GetSize();
source_width = size.width();
source_height = size.height();
if (source_width <= 0 || source_height <= 0) {
- LOCAL_SET_GL_ERROR(
- GL_INVALID_VALUE,
- "glCopyTextureChromium", "invalid image size");
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName, "invalid image size");
return;
}
} else {
- if (!source_texture->GetLevelSize(source_target, 0,
+ if (!source_texture->GetLevelSize(source_target, source_level,
&source_width, &source_height, nullptr)) {
- LOCAL_SET_GL_ERROR(GL_INVALID_VALUE,
- "glCopyTextureChromium",
- "source texture has no level 0");
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ "source texture has no data for level");
return;
}
// Check that this type of texture is allowed.
- if (!texture_manager()->ValidForTarget(source_target, 0,
+ if (!texture_manager()->ValidForTarget(source_target, source_level,
source_width, source_height, 1)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName, "Bad dimensions");
return;
}
}
- GLenum source_type = 0;
- GLenum source_internal_format = 0;
- source_texture->GetLevelType(source_target, 0, &source_type,
- &source_internal_format);
-
if (dest_texture->IsImmutable()) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
"texture is immutable");
@@ -16171,7 +16470,7 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
// Clear the source texture if necessary.
if (!texture_manager()->ClearTextureLevel(this, source_texture_ref,
- source_target, 0)) {
+ source_target, source_level)) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, kFunctionName, "dimensions too big");
return;
}
@@ -16184,10 +16483,10 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
int dest_width = 0;
int dest_height = 0;
bool dest_level_defined = dest_texture->GetLevelSize(
- dest_target, 0, &dest_width, &dest_height, nullptr);
+ dest_target, dest_level, &dest_width, &dest_height, nullptr);
if (dest_level_defined) {
- dest_texture->GetLevelType(dest_target, 0, &dest_type_previous,
+ dest_texture->GetLevelType(dest_target, dest_level, &dest_type_previous,
&dest_internal_format);
}
@@ -16199,32 +16498,34 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
// Ensure that the glTexImage2D succeeds.
LOCAL_COPY_REAL_GL_ERRORS_TO_WRAPPER(kFunctionName);
glBindTexture(dest_target, dest_texture->service_id());
- glTexImage2D(
- dest_target, 0, TextureManager::AdjustTexInternalFormat(
- feature_info_.get(), internal_format),
- source_width, source_height, 0,
- TextureManager::AdjustTexFormat(feature_info_.get(), internal_format),
- dest_type, NULL);
+ glTexImage2D(dest_target, dest_level,
+ TextureManager::AdjustTexInternalFormat(feature_info_.get(),
+ internal_format),
+ source_width, source_height, 0,
+ TextureManager::AdjustTexFormat(feature_info_.get(), format),
+ dest_type, nullptr);
GLenum error = LOCAL_PEEK_GL_ERROR(kFunctionName);
if (error != GL_NO_ERROR) {
RestoreCurrentTextureBindings(&state_, dest_target);
return;
}
- texture_manager()->SetLevelInfo(
- dest_texture_ref, dest_target, 0, internal_format, source_width,
- source_height, 1, 0, internal_format, dest_type,
- gfx::Rect(source_width, source_height));
+ texture_manager()->SetLevelInfo(dest_texture_ref, dest_target, dest_level,
+ internal_format, source_width,
+ source_height, 1, 0, format, dest_type,
+ gfx::Rect(source_width, source_height));
dest_texture->ApplyFormatWorkarounds(feature_info_.get());
} else {
- texture_manager()->SetLevelCleared(dest_texture_ref, dest_target, 0,
- true);
+ texture_manager()->SetLevelCleared(dest_texture_ref, dest_target,
+ dest_level, true);
}
// Try using GLImage::CopyTexImage when possible.
bool unpack_premultiply_alpha_change =
(unpack_premultiply_alpha ^ unpack_unmultiply_alpha) != 0;
- if (image && !unpack_flip_y && !unpack_premultiply_alpha_change) {
+ // TODO(qiankun.miao@intel.com): Support level > 0 for CopyTexImage.
+ if (image && dest_level == 0 && !unpack_flip_y &&
+ !unpack_premultiply_alpha_change) {
glBindTexture(dest_target, dest_texture->service_id());
if (image->CopyTexImage(dest_target))
return;
@@ -16237,27 +16538,32 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
if (source_target == GL_TEXTURE_EXTERNAL_OES) {
if (GLStreamTextureImage* image =
source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES,
- 0)) {
+ source_level)) {
GLfloat transform_matrix[16];
image->GetTextureMatrix(transform_matrix);
copy_texture_CHROMIUM_->DoCopyTextureWithTransform(
- this, source_target, source_texture->service_id(), dest_target,
- dest_texture->service_id(), source_width, source_height,
+ this, source_target, source_texture->service_id(), source_level,
+ source_internal_format, dest_target, dest_texture->service_id(),
+ dest_level, internal_format, source_width, source_height,
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
unpack_unmultiply_alpha == GL_TRUE, transform_matrix);
return;
}
}
+
copy_texture_CHROMIUM_->DoCopyTexture(
- this, source_target, source_texture->service_id(), source_internal_format,
- dest_target, dest_texture->service_id(), internal_format, source_width,
- source_height, unpack_flip_y == GL_TRUE,
- unpack_premultiply_alpha == GL_TRUE, unpack_unmultiply_alpha == GL_TRUE);
+ this, source_target, source_texture->service_id(), source_level,
+ source_internal_format, dest_target, dest_texture->service_id(),
+ dest_level, internal_format, source_width, source_height,
+ unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
+ unpack_unmultiply_alpha == GL_TRUE, method);
}
void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
GLuint source_id,
+ GLint source_level,
GLuint dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -16278,6 +16584,13 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
return;
}
+ if (source_level < 0 || dest_level < 0 ||
+ (feature_info_->IsWebGL1OrES2Context() && source_level > 0)) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
+ "source_level or dest_level out of range");
+ return;
+ }
+
Texture* source_texture = source_texture_ref->texture();
Texture* dest_texture = dest_texture_ref->texture();
GLenum source_target = source_texture->target();
@@ -16285,7 +16598,7 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
int source_width = 0;
int source_height = 0;
gl::GLImage* image =
- source_texture->GetLevelImage(source_target, 0);
+ source_texture->GetLevelImage(source_target, source_level);
if (image) {
gfx::Size size = image->GetSize();
source_width = size.width();
@@ -16310,23 +16623,23 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
return;
}
} else {
- if (!source_texture->GetLevelSize(source_target, 0,
+ if (!source_texture->GetLevelSize(source_target, source_level,
&source_width, &source_height, nullptr)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
- "source texture has no level 0");
+ "source texture has no data for level");
return;
}
// Check that this type of texture is allowed.
- if (!texture_manager()->ValidForTarget(source_target, 0,
+ if (!texture_manager()->ValidForTarget(source_target, source_level,
source_width, source_height, 1)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
"source texture bad dimensions");
return;
}
- if (!source_texture->ValidForTexture(source_target, 0, x, y, 0, width,
- height, 1)) {
+ if (!source_texture->ValidForTexture(source_target, source_level, x, y, 0,
+ width, height, 1)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
"source texture bad dimensions.");
return;
@@ -16335,33 +16648,58 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
GLenum source_type = 0;
GLenum source_internal_format = 0;
- source_texture->GetLevelType(source_target, 0, &source_type,
+ source_texture->GetLevelType(source_target, source_level, &source_type,
&source_internal_format);
GLenum dest_type = 0;
GLenum dest_internal_format = 0;
bool dest_level_defined = dest_texture->GetLevelType(
- dest_target, 0, &dest_type, &dest_internal_format);
+ dest_target, dest_level, &dest_type, &dest_internal_format);
if (!dest_level_defined) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName,
"destination texture is not defined");
return;
}
- if (!dest_texture->ValidForTexture(dest_target, 0, xoffset,
- yoffset, 0, width, height, 1)) {
+ if (!dest_texture->ValidForTexture(dest_target, dest_level, xoffset, yoffset,
+ 0, width, height, 1)) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, kFunctionName,
"destination texture bad dimensions.");
return;
}
- if (!ValidateCopyTextureCHROMIUMInternalFormats(
- kFunctionName, source_texture_ref, dest_internal_format)) {
+ CopyTextureMethod method = ValidateCopyTextureCHROMIUMInternalFormats(
+ kFunctionName, source_texture_ref, source_level, dest_internal_format);
+ // INVALID_OPERATION is already generated by
+ // ValidateCopyTextureCHROMIUMInternalFormats.
+ if (method == NOT_COPYABLE) {
return;
}
+ // Draw to a fbo attaching level 0 of an intermediate texture,
+ // then copy from the fbo to dest texture level with glCopyTexImage2D.
+ // For WebGL 1.0 or OpenGL ES 2.0, DIRECT_DRAW path isn't available for
+ // dest_level > 0 due to level > 0 isn't supported by glFramebufferTexture2D
+ // in ES2 context. Go to DRAW_AND_COPY path in this case.
+ // TODO(qiankun.miao@intel.com): for WebGL 2.0 or OpenGL ES 3.0, both
+ // DIRECT_DRAW path for dest_level > 0 and DIRECT_COPY path for source_level >
+ // 0 are not available due to a framebuffer completeness bug:
+ // crbug.com/678526. Once the bug is fixed, the limitation for WebGL 2.0 and
+ // OpenGL ES 3.0 can be lifted.
+ if ((dest_level > 0 && method == DIRECT_DRAW) ||
+ (source_level > 0 && method == DIRECT_COPY)) {
+ method = DRAW_AND_COPY;
+ }
+
+ if (feature_info_->feature_flags().desktop_srgb_support) {
+ bool enable_framebuffer_srgb =
+ GetColorEncodingFromInternalFormat(source_internal_format) == GL_SRGB ||
+ GetColorEncodingFromInternalFormat(dest_internal_format) == GL_SRGB;
+ state_.EnableDisableFramebufferSRGB(enable_framebuffer_srgb);
+ }
+
// Clear the source texture if necessary.
if (!texture_manager()->ClearTextureLevel(this, source_texture_ref,
- source_target, 0)) {
+ source_target, source_level)) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, kFunctionName,
"source texture dimensions too big");
return;
@@ -16372,38 +16710,41 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
int dest_width = 0;
int dest_height = 0;
- bool ok = dest_texture->GetLevelSize(
- dest_target, 0, &dest_width, &dest_height, nullptr);
+ bool ok = dest_texture->GetLevelSize(dest_target, dest_level, &dest_width,
+ &dest_height, nullptr);
DCHECK(ok);
if (xoffset != 0 || yoffset != 0 || width != dest_width ||
height != dest_height) {
gfx::Rect cleared_rect;
if (TextureManager::CombineAdjacentRects(
- dest_texture->GetLevelClearedRect(dest_target, 0),
+ dest_texture->GetLevelClearedRect(dest_target, dest_level),
gfx::Rect(xoffset, yoffset, width, height), &cleared_rect)) {
- DCHECK_GE(
- cleared_rect.size().GetArea(),
- dest_texture->GetLevelClearedRect(dest_target, 0).size().GetArea());
- texture_manager()->SetLevelClearedRect(dest_texture_ref, dest_target, 0,
- cleared_rect);
+ DCHECK_GE(cleared_rect.size().GetArea(),
+ dest_texture->GetLevelClearedRect(dest_target, dest_level)
+ .size()
+ .GetArea());
+ texture_manager()->SetLevelClearedRect(dest_texture_ref, dest_target,
+ dest_level, cleared_rect);
} else {
// Otherwise clear part of texture level that is not already cleared.
if (!texture_manager()->ClearTextureLevel(this, dest_texture_ref,
- dest_target, 0)) {
+ dest_target, dest_level)) {
LOCAL_SET_GL_ERROR(GL_OUT_OF_MEMORY, kFunctionName,
"destination texture dimensions too big");
return;
}
}
} else {
- texture_manager()->SetLevelCleared(dest_texture_ref, dest_target, 0,
- true);
+ texture_manager()->SetLevelCleared(dest_texture_ref, dest_target,
+ dest_level, true);
}
// Try using GLImage::CopyTexSubImage when possible.
bool unpack_premultiply_alpha_change =
(unpack_premultiply_alpha ^ unpack_unmultiply_alpha) != 0;
- if (image && !unpack_flip_y && !unpack_premultiply_alpha_change) {
+ // TODO(qiankun.miao@intel.com): Support level > 0 for CopyTexSubImage.
+ if (image && dest_level == 0 && !unpack_flip_y &&
+ !unpack_premultiply_alpha_change) {
ScopedTextureBinder binder(
&state_, dest_texture->service_id(), dest_target);
if (image->CopyTexSubImage(dest_target, gfx::Point(xoffset, yoffset),
@@ -16419,25 +16760,26 @@ void GLES2DecoderImpl::DoCopySubTextureCHROMIUM(
if (source_target == GL_TEXTURE_EXTERNAL_OES) {
if (GLStreamTextureImage* image =
source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES,
- 0)) {
+ source_level)) {
GLfloat transform_matrix[16];
image->GetTextureMatrix(transform_matrix);
copy_texture_CHROMIUM_->DoCopySubTextureWithTransform(
- this, source_target, source_texture->service_id(),
+ this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
- dest_internal_format, xoffset, yoffset, x, y, width, height,
- dest_width, dest_height, source_width, source_height,
+ dest_level, dest_internal_format, xoffset, yoffset, x, y, width,
+ height, dest_width, dest_height, source_width, source_height,
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
unpack_unmultiply_alpha == GL_TRUE, transform_matrix);
return;
}
}
copy_texture_CHROMIUM_->DoCopySubTexture(
- this, source_target, source_texture->service_id(), source_internal_format,
- dest_target, dest_texture->service_id(), dest_internal_format, xoffset,
- yoffset, x, y, width, height, dest_width, dest_height, source_width,
- source_height, unpack_flip_y == GL_TRUE,
- unpack_premultiply_alpha == GL_TRUE, unpack_unmultiply_alpha == GL_TRUE);
+ this, source_target, source_texture->service_id(), source_level,
+ source_internal_format, dest_target, dest_texture->service_id(),
+ dest_level, dest_internal_format, xoffset, yoffset, x, y, width, height,
+ dest_width, dest_height, source_width, source_height,
+ unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
+ unpack_unmultiply_alpha == GL_TRUE, method);
}
bool GLES2DecoderImpl::InitializeCopyTexImageBlitter(
@@ -16609,10 +16951,10 @@ void GLES2DecoderImpl::DoCompressedCopyTextureCHROMIUM(GLuint source_id,
gfx::Rect(source_width, source_height));
copy_texture_CHROMIUM_->DoCopyTexture(
- this, source_texture->target(), source_texture->service_id(),
+ this, source_texture->target(), source_texture->service_id(), 0,
source_internal_format, dest_texture->target(),
- dest_texture->service_id(), GL_RGBA, source_width, source_height, false,
- false, false);
+ dest_texture->service_id(), 0, GL_RGBA, source_width, source_height,
+ false, false, false, DIRECT_DRAW);
}
void GLES2DecoderImpl::TexStorageImpl(GLenum target,
@@ -16996,7 +17338,8 @@ void GLES2DecoderImpl::DoApplyScreenSpaceAntialiasingCHROMIUM() {
return;
apply_framebuffer_attachment_cmaa_intel_
->ApplyFramebufferAttachmentCMAAINTEL(this, bound_framebuffer,
- copy_texture_CHROMIUM_.get());
+ copy_texture_CHROMIUM_.get(),
+ texture_manager());
}
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index de6cafdad51..1e30865884e 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -4570,7 +4570,9 @@ error::Error GLES2DecoderImpl::HandleCopyTextureCHROMIUM(
const volatile gles2::cmds::CopyTextureCHROMIUM& c =
*static_cast<const volatile gles2::cmds::CopyTextureCHROMIUM*>(cmd_data);
GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLint source_level = static_cast<GLint>(c.source_level);
GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLint dest_level = static_cast<GLint>(c.dest_level);
GLint internalformat = static_cast<GLint>(c.internalformat);
GLenum dest_type = static_cast<GLenum>(c.dest_type);
GLboolean unpack_flip_y = static_cast<GLboolean>(c.unpack_flip_y);
@@ -4588,9 +4590,9 @@ error::Error GLES2DecoderImpl::HandleCopyTextureCHROMIUM(
"dest_type");
return error::kNoError;
}
- DoCopyTextureCHROMIUM(source_id, dest_id, internalformat, dest_type,
- unpack_flip_y, unpack_premultiply_alpha,
- unpack_unmultiply_alpha);
+ DoCopyTextureCHROMIUM(source_id, source_level, dest_id, dest_level,
+ internalformat, dest_type, unpack_flip_y,
+ unpack_premultiply_alpha, unpack_unmultiply_alpha);
return error::kNoError;
}
@@ -4601,7 +4603,9 @@ error::Error GLES2DecoderImpl::HandleCopySubTextureCHROMIUM(
*static_cast<const volatile gles2::cmds::CopySubTextureCHROMIUM*>(
cmd_data);
GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLint source_level = static_cast<GLint>(c.source_level);
GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLint dest_level = static_cast<GLint>(c.dest_level);
GLint xoffset = static_cast<GLint>(c.xoffset);
GLint yoffset = static_cast<GLint>(c.yoffset);
GLint x = static_cast<GLint>(c.x);
@@ -4623,9 +4627,9 @@ error::Error GLES2DecoderImpl::HandleCopySubTextureCHROMIUM(
"height < 0");
return error::kNoError;
}
- DoCopySubTextureCHROMIUM(source_id, dest_id, xoffset, yoffset, x, y, width,
- height, unpack_flip_y, unpack_premultiply_alpha,
- unpack_unmultiply_alpha);
+ DoCopySubTextureCHROMIUM(source_id, source_level, dest_id, dest_level,
+ xoffset, yoffset, x, y, width, height, unpack_flip_y,
+ unpack_premultiply_alpha, unpack_unmultiply_alpha);
return error::kNoError;
}
@@ -4818,6 +4822,11 @@ error::Error GLES2DecoderImpl::HandleDiscardFramebufferEXTImmediate(
volatile const GLenum* attachments =
GetImmediateDataAs<volatile const GLenum*>(c, data_size,
immediate_data_size);
+ if (!validators_->framebuffer_target.IsValid(target)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glDiscardFramebufferEXT", target,
+ "target");
+ return error::kNoError;
+ }
if (count < 0) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, "glDiscardFramebufferEXT",
"count < 0");
@@ -5102,6 +5111,20 @@ GLES2DecoderImpl::HandleUniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleOverlayPromotionHintCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::OverlayPromotionHintCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::OverlayPromotionHintCHROMIUM*>(
+ cmd_data);
+ GLuint texture = c.texture;
+ GLboolean promotion_hint = static_cast<GLboolean>(c.promotion_hint);
+ GLint display_x = static_cast<GLint>(c.display_x);
+ GLint display_y = static_cast<GLint>(c.display_y);
+ DoOverlayPromotionHintCHROMIUM(texture, promotion_hint, display_x, display_y);
+ return error::kNoError;
+}
+
bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
switch (cap) {
case GL_BLEND:
@@ -5135,6 +5158,14 @@ bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
return true;
}
return false;
+ case GL_FRAMEBUFFER_SRGB_EXT:
+ state_.enable_flags.framebuffer_srgb_ext = enabled;
+ if (state_.enable_flags.cached_framebuffer_srgb_ext != enabled ||
+ state_.ignore_cached_state) {
+ state_.enable_flags.cached_framebuffer_srgb_ext = enabled;
+ return true;
+ }
+ return false;
case GL_POLYGON_OFFSET_FILL:
state_.enable_flags.polygon_offset_fill = enabled;
if (state_.enable_flags.cached_polygon_offset_fill != enabled ||
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
index 65d92751090..206f6d39ef9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_mock.h
@@ -26,7 +26,6 @@ class GLSurface;
}
namespace gpu {
-class AsyncPixelTransferDelegate;
namespace gles2 {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index 12e6e006d30..eea4e8cb1c7 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -25,6 +25,18 @@ void DeleteServiceObjects(ClientServiceMap<ClientType, ServiceType>* id_map,
id_map->Clear();
}
+template <typename ClientType, typename ServiceType, typename ResultType>
+bool GetClientID(const ClientServiceMap<ClientType, ServiceType>* map,
+ ResultType service_id,
+ ResultType* result) {
+ ClientType client_id = 0;
+ if (!map->GetClientID(static_cast<ServiceType>(service_id), &client_id)) {
+ return false;
+ }
+ *result = static_cast<ResultType>(client_id);
+ return true;
+};
+
} // anonymous namespace
PassthroughResources::PassthroughResources() {}
@@ -159,9 +171,9 @@ bool GLES2DecoderPassthroughImpl::Initialize(
// Check for required extensions
if (!feature_info_->feature_flags().angle_robust_client_memory ||
- !feature_info_->feature_flags().chromium_bind_generates_resource ||
- (feature_info_->IsWebGLContext() !=
- feature_info_->feature_flags().angle_webgl_compatibility)) {
+ !feature_info_->feature_flags().chromium_bind_generates_resource) {
+ // TODO(geofflang): Verify that ANGLE_webgl_compatibility is enabled if this
+ // is a WebGL context (depends on crbug.com/671217).
Destroy(true);
return false;
}
@@ -179,21 +191,18 @@ bool GLES2DecoderPassthroughImpl::Initialize(
glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &num_texture_units);
active_texture_unit_ = 0;
- bound_textures_.resize(num_texture_units, 0);
+ bound_textures_[GL_TEXTURE_2D].resize(num_texture_units, 0);
+ bound_textures_[GL_TEXTURE_CUBE_MAP].resize(num_texture_units, 0);
+ if (feature_info_->IsWebGL2OrES3Context()) {
+ bound_textures_[GL_TEXTURE_2D_ARRAY].resize(num_texture_units, 0);
+ bound_textures_[GL_TEXTURE_3D].resize(num_texture_units, 0);
+ }
if (group_->gpu_preferences().enable_gpu_driver_debug_logging &&
feature_info_->feature_flags().khr_debug) {
InitializeGLDebugLogging();
}
- emulated_extensions_.push_back("GL_CHROMIUM_lose_context");
- emulated_extensions_.push_back("GL_CHROMIUM_pixel_transfer_buffer_object");
- emulated_extensions_.push_back("GL_CHROMIUM_resource_safe");
- emulated_extensions_.push_back("GL_CHROMIUM_strict_attribs");
- emulated_extensions_.push_back("GL_CHROMIUM_texture_mailbox");
- emulated_extensions_.push_back("GL_CHROMIUM_trace_marker");
- BuildExtensionsString();
-
set_initialized();
return true;
}
@@ -427,10 +436,13 @@ gpu::gles2::ImageManager* GLES2DecoderPassthroughImpl::GetImageManager() {
}
bool GLES2DecoderPassthroughImpl::HasPendingQueries() const {
- return false;
+ return !pending_queries_.empty();
}
-void GLES2DecoderPassthroughImpl::ProcessPendingQueries(bool did_finish) {}
+void GLES2DecoderPassthroughImpl::ProcessPendingQueries(bool did_finish) {
+ // TODO(geofflang): If this returned an error, store it somewhere.
+ ProcessQueries(did_finish);
+}
bool GLES2DecoderPassthroughImpl::HasMoreIdleWork() const {
return false;
@@ -539,14 +551,285 @@ GLES2DecoderPassthroughImpl::GetTranslator(GLenum type) {
return nullptr;
}
-void GLES2DecoderPassthroughImpl::BuildExtensionsString() {
- std::ostringstream combined_string_stream;
- combined_string_stream << reinterpret_cast<const char*>(
- glGetString(GL_EXTENSIONS))
- << " ";
- std::copy(emulated_extensions_.begin(), emulated_extensions_.end(),
- std::ostream_iterator<std::string>(combined_string_stream, " "));
- extension_string_ = combined_string_stream.str();
+void* GLES2DecoderPassthroughImpl::GetScratchMemory(size_t size) {
+ if (scratch_memory_.size() < size) {
+ scratch_memory_.resize(size, 0);
+ }
+ return scratch_memory_.data();
+}
+
+template <typename T>
+error::Error GLES2DecoderPassthroughImpl::PatchGetNumericResults(GLenum pname,
+ GLsizei length,
+ T* params) {
+ // Likely a gl error if no parameters were returned
+ if (length < 1) {
+ return error::kNoError;
+ }
+
+ switch (pname) {
+ case GL_NUM_EXTENSIONS:
+ // Currently handled on the client side.
+ params[0] = 0;
+ break;
+
+ case GL_TEXTURE_BINDING_2D:
+ case GL_TEXTURE_BINDING_CUBE_MAP:
+ case GL_TEXTURE_BINDING_2D_ARRAY:
+ case GL_TEXTURE_BINDING_3D:
+ if (!GetClientID(&resources_->texture_id_map, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ case GL_ARRAY_BUFFER_BINDING:
+ case GL_ELEMENT_ARRAY_BUFFER_BINDING:
+ case GL_PIXEL_PACK_BUFFER_BINDING:
+ case GL_PIXEL_UNPACK_BUFFER_BINDING:
+ case GL_TRANSFORM_FEEDBACK_BUFFER_BINDING:
+ case GL_COPY_READ_BUFFER_BINDING:
+ case GL_COPY_WRITE_BUFFER_BINDING:
+ case GL_UNIFORM_BUFFER_BINDING:
+ if (!GetClientID(&resources_->buffer_id_map, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ case GL_RENDERBUFFER_BINDING:
+ if (!GetClientID(&resources_->renderbuffer_id_map, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ case GL_SAMPLER_BINDING:
+ if (!GetClientID(&resources_->sampler_id_map, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ case GL_ACTIVE_PROGRAM:
+ if (!GetClientID(&resources_->program_id_map, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ case GL_FRAMEBUFFER_BINDING:
+ case GL_READ_FRAMEBUFFER_BINDING:
+ if (!GetClientID(&framebuffer_id_map_, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ case GL_TRANSFORM_FEEDBACK_BINDING:
+ if (!GetClientID(&transform_feedback_id_map_, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ case GL_VERTEX_ARRAY_BINDING:
+ if (!GetClientID(&vertex_array_id_map_, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return error::kNoError;
+}
+
+// Instantiate templated functions
+#define INSTANTIATE_PATCH_NUMERIC_RESULTS(type) \
+ template error::Error GLES2DecoderPassthroughImpl::PatchGetNumericResults( \
+ GLenum, GLsizei, type*)
+INSTANTIATE_PATCH_NUMERIC_RESULTS(GLint);
+INSTANTIATE_PATCH_NUMERIC_RESULTS(GLint64);
+INSTANTIATE_PATCH_NUMERIC_RESULTS(GLfloat);
+INSTANTIATE_PATCH_NUMERIC_RESULTS(GLboolean);
+#undef INSTANTIATE_PATCH_NUMERIC_RESULTS
+
+error::Error
+GLES2DecoderPassthroughImpl::PatchGetFramebufferAttachmentParameter(
+ GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLsizei length,
+ GLint* params) {
+ // Likely a gl error if no parameters were returned
+ if (length < 1) {
+ return error::kNoError;
+ }
+
+ switch (pname) {
+ // If the attached object name was requested, it needs to be converted back
+ // to a client id.
+ case GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME: {
+ GLint object_type = GL_NONE;
+ glGetFramebufferAttachmentParameterivEXT(
+ target, attachment, GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ &object_type);
+
+ switch (object_type) {
+ case GL_TEXTURE:
+ if (!GetClientID(&resources_->texture_id_map, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ case GL_RENDERBUFFER:
+ if (!GetClientID(&resources_->renderbuffer_id_map, *params, params)) {
+ return error::kInvalidArguments;
+ }
+ break;
+
+ case GL_NONE:
+ // Default framebuffer, don't transform the result
+ break;
+
+ default:
+ NOTREACHED();
+ break;
+ }
+ } break;
+
+ default:
+ break;
+ }
+
+ return error::kNoError;
+}
+
+void GLES2DecoderPassthroughImpl::InsertError(GLenum error,
+ const std::string&) {
+ // Message ignored for now
+ errors_.insert(error);
+}
+
+GLenum GLES2DecoderPassthroughImpl::PopError() {
+ GLenum error = GL_NO_ERROR;
+ if (!errors_.empty()) {
+ error = *errors_.begin();
+ errors_.erase(errors_.begin());
+ }
+ return error;
+}
+
+bool GLES2DecoderPassthroughImpl::FlushErrors() {
+ bool had_error = false;
+ GLenum error = glGetError();
+ while (error != GL_NO_ERROR) {
+ errors_.insert(error);
+ had_error = true;
+ error = glGetError();
+ }
+ return had_error;
+}
+
+bool GLES2DecoderPassthroughImpl::IsEmulatedQueryTarget(GLenum target) const {
+ // GL_COMMANDS_COMPLETED_CHROMIUM is implemented in ANGLE
+ switch (target) {
+ case GL_COMMANDS_ISSUED_CHROMIUM:
+ case GL_LATENCY_QUERY_CHROMIUM:
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ case GL_GET_ERROR_QUERY_CHROMIUM:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+error::Error GLES2DecoderPassthroughImpl::ProcessQueries(bool did_finish) {
+ while (!pending_queries_.empty()) {
+ const PendingQuery& query = pending_queries_.front();
+ GLint result_available = GL_FALSE;
+ GLuint64 result = 0;
+ switch (query.target) {
+ case GL_COMMANDS_ISSUED_CHROMIUM:
+ result_available = GL_TRUE;
+ result = GL_TRUE;
+ break;
+
+ case GL_LATENCY_QUERY_CHROMIUM:
+ result_available = GL_TRUE;
+ // TODO: time from when the query is ended?
+ result = (base::TimeTicks::Now() - base::TimeTicks()).InMilliseconds();
+ break;
+
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ // TODO: Use a fence and do a real async readback
+ result_available = GL_TRUE;
+ result = GL_TRUE;
+ break;
+
+ case GL_GET_ERROR_QUERY_CHROMIUM:
+ result_available = GL_TRUE;
+ FlushErrors();
+ result = PopError();
+ break;
+
+ default:
+ DCHECK(!IsEmulatedQueryTarget(query.target));
+ if (did_finish) {
+ result_available = GL_TRUE;
+ } else {
+ glGetQueryObjectiv(query.service_id, GL_QUERY_RESULT_AVAILABLE,
+ &result_available);
+ }
+ if (result_available == GL_TRUE) {
+ glGetQueryObjectui64v(query.service_id, GL_QUERY_RESULT, &result);
+ }
+ break;
+ }
+
+ if (!result_available) {
+ break;
+ }
+
+ QuerySync* sync = GetSharedMemoryAs<QuerySync*>(
+ query.shm_id, query.shm_offset, sizeof(QuerySync));
+ if (sync == nullptr) {
+ pending_queries_.pop_front();
+ return error::kOutOfBounds;
+ }
+
+ // Mark the query as complete
+ sync->result = result;
+ base::subtle::Release_Store(&sync->process_count, query.submit_count);
+ pending_queries_.pop_front();
+ }
+
+ // If glFinish() has been called, all of our queries should be completed.
+ DCHECK(!did_finish || pending_queries_.empty());
+ return error::kNoError;
+}
+
+void GLES2DecoderPassthroughImpl::UpdateTextureBinding(GLenum target,
+ GLuint client_id,
+ GLuint service_id) {
+ size_t cur_texture_unit = active_texture_unit_;
+ const auto& target_bound_textures = bound_textures_.at(target);
+ for (size_t bound_texture_index = 0;
+ bound_texture_index < target_bound_textures.size();
+ bound_texture_index++) {
+ GLuint bound_client_id = target_bound_textures[bound_texture_index];
+ if (bound_client_id == client_id) {
+ // Update the active texture unit if needed
+ if (bound_texture_index != cur_texture_unit) {
+ glActiveTexture(static_cast<GLenum>(GL_TEXTURE0 + bound_texture_index));
+ cur_texture_unit = bound_texture_index;
+ }
+
+ // Update the texture binding
+ glBindTexture(target, service_id);
+ }
+ }
+
+ // Reset the active texture unit if it was changed
+ if (cur_texture_unit != active_texture_unit_) {
+ glActiveTexture(static_cast<GLenum>(GL_TEXTURE0 + active_texture_unit_));
+ }
}
#define GLES2_CMD_OP(name) \
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index 6c3a5bdc374..6f3fb5ca11d 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -239,7 +239,53 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
scoped_refptr<ShaderTranslatorInterface> GetTranslator(GLenum type) override;
private:
- void BuildExtensionsString();
+ void* GetScratchMemory(size_t size);
+
+ template <typename T>
+ T* GetTypedScratchMemory(size_t count) {
+ return reinterpret_cast<T*>(GetScratchMemory(count * sizeof(T)));
+ }
+
+ template <typename T, typename GLGetFunction>
+ error::Error GetNumericHelper(GLenum pname,
+ GLsizei bufsize,
+ GLsizei* length,
+ T* params,
+ GLGetFunction get_call) {
+ // Get a scratch buffer to hold the result of the query
+ T* scratch_params = GetTypedScratchMemory<T>(bufsize);
+ get_call(pname, bufsize, length, scratch_params);
+
+ // Update the results of the query, if needed
+ error::Error error = PatchGetNumericResults(pname, *length, scratch_params);
+ if (error != error::kNoError) {
+ *length = 0;
+ return error;
+ }
+
+ // Copy into the destination
+ DCHECK(*length < bufsize);
+ std::copy(scratch_params, scratch_params + *length, params);
+
+ return error::kNoError;
+ }
+
+ template <typename T>
+ error::Error PatchGetNumericResults(GLenum pname, GLsizei length, T* params);
+ error::Error PatchGetFramebufferAttachmentParameter(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLsizei length,
+ GLint* params);
+
+ void InsertError(GLenum error, const std::string& message);
+ GLenum PopError();
+ bool FlushErrors();
+
+ bool IsEmulatedQueryTarget(GLenum target) const;
+ error::Error ProcessQueries(bool did_finish);
+
+ void UpdateTextureBinding(GLenum target, GLuint client_id, GLuint service_id);
int commands_to_process_;
@@ -301,10 +347,37 @@ class GLES2DecoderPassthroughImpl : public GLES2Decoder {
// State tracking of currently bound 2D textures (client IDs)
size_t active_texture_unit_;
- std::vector<GLuint> bound_textures_;
+ std::unordered_map<GLenum, std::vector<GLuint>> bound_textures_;
- std::vector<std::string> emulated_extensions_;
- std::string extension_string_;
+ // Track the service-id to type of all queries for validation
+ struct QueryInfo {
+ GLenum type = GL_NONE;
+ };
+ std::unordered_map<GLuint, QueryInfo> query_info_map_;
+
+ // All queries that are waiting for their results to be ready
+ struct PendingQuery {
+ GLenum target = GL_NONE;
+ GLuint service_id = 0;
+
+ int32_t shm_id = 0;
+ uint32_t shm_offset = 0;
+ base::subtle::Atomic32 submit_count = 0;
+ };
+ std::deque<PendingQuery> pending_queries_;
+
+ // Currently active queries
+ struct ActiveQuery {
+ GLuint service_id = 0;
+ int32_t shm_id = 0;
+ uint32_t shm_offset = 0;
+ };
+ std::unordered_map<GLenum, ActiveQuery> active_queries_;
+
+ std::set<GLenum> errors_;
+
+ // Cache of scratch memory
+ std::vector<uint8_t> scratch_memory_;
// Include the prototypes of all the doer functions from a separate header to
// keep this file clean.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index d7400f554ac..5135e42e4f8 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -194,12 +194,14 @@ error::Error DoGetActiveAttrib(GLuint program,
GLuint index,
GLint* size,
GLenum* type,
- std::string* name);
+ std::string* name,
+ int32_t* success);
error::Error DoGetActiveUniform(GLuint program,
GLuint index,
GLint* size,
GLenum* type,
- std::string* name);
+ std::string* name,
+ int32_t* success);
error::Error DoGetActiveUniformBlockiv(GLuint program,
GLuint index,
GLenum pname,
@@ -305,7 +307,8 @@ error::Error DoGetShaderInfoLog(GLuint shader, std::string* infolog);
error::Error DoGetShaderPrecisionFormat(GLenum shadertype,
GLenum precisiontype,
GLint* range,
- GLint* precision);
+ GLint* precision,
+ int32_t* success);
error::Error DoGetShaderSource(GLuint shader, std::string* source);
error::Error DoGetString(GLenum name, const char** result);
error::Error DoGetSynciv(GLuint sync,
@@ -327,7 +330,8 @@ error::Error DoGetTransformFeedbackVarying(GLuint program,
GLuint index,
GLsizei* size,
GLenum* type,
- std::string* name);
+ std::string* name,
+ int32_t* success);
error::Error DoGetUniformBlockIndex(GLuint program,
const char* name,
GLint* index);
@@ -415,7 +419,8 @@ error::Error DoReadPixels(GLint x,
GLenum type,
GLsizei bufsize,
GLsizei* length,
- void* pixels);
+ void* pixels,
+ int32_t* success);
error::Error DoReleaseShaderCompiler();
error::Error DoRenderbufferStorage(GLenum target,
GLenum internalformat,
@@ -677,10 +682,17 @@ error::Error DoTexStorage2DEXT(GLenum target,
GLsizei height);
error::Error DoGenQueriesEXT(GLsizei n, volatile GLuint* queries);
error::Error DoDeleteQueriesEXT(GLsizei n, const volatile GLuint* queries);
-error::Error DoQueryCounterEXT(GLuint id, GLenum target);
-error::Error DoBeginQueryEXT(GLenum target, GLuint id);
+error::Error DoQueryCounterEXT(GLuint id,
+ GLenum target,
+ int32_t sync_shm_id,
+ uint32_t sync_shm_offset,
+ uint32_t submit_count);
+error::Error DoBeginQueryEXT(GLenum target,
+ GLuint id,
+ int32_t sync_shm_id,
+ uint32_t sync_shm_offset);
error::Error DoBeginTransformFeedback(GLenum primitivemode);
-error::Error DoEndQueryEXT(GLenum target);
+error::Error DoEndQueryEXT(GLenum target, uint32_t submit_count);
error::Error DoEndTransformFeedback();
error::Error DoSetDisjointValueSyncCHROMIUM(DisjointValueSync* sync);
error::Error DoInsertEventMarkerEXT(GLsizei length, const char* marker);
@@ -728,14 +740,18 @@ error::Error DoPostSubBufferCHROMIUM(GLint x,
GLint width,
GLint height);
error::Error DoCopyTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
GLboolean unpack_premultiply_alpha,
GLboolean unpack_unmultiply_alpha);
error::Error DoCopySubTextureCHROMIUM(GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -933,3 +949,8 @@ error::Error DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
GLint location,
GLboolean transpose,
const volatile GLfloat* defaultValue);
+
+error::Error DoOverlayPromotionHintCHROMIUM(GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index f7c7a7d3cad..631d185285c 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -194,7 +194,12 @@ void AppendStringToBuffer(std::vector<uint8_t>* data,
// Implementations of commands
error::Error GLES2DecoderPassthroughImpl::DoActiveTexture(GLenum texture) {
+ FlushErrors();
glActiveTexture(texture);
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
active_texture_unit_ = static_cast<size_t>(texture) - GL_TEXTURE0;
return error::kNoError;
}
@@ -266,12 +271,36 @@ error::Error GLES2DecoderPassthroughImpl::DoBindSampler(GLuint unit,
error::Error GLES2DecoderPassthroughImpl::DoBindTexture(GLenum target,
GLuint texture) {
- glBindTexture(target, GetTextureServiceID(texture, resources_,
- bind_generates_resource_));
- if (target == GL_TEXTURE_2D &&
- active_texture_unit_ < bound_textures_.size()) {
- bound_textures_[active_texture_unit_] = texture;
+ GLuint service_id =
+ GetTextureServiceID(texture, resources_, bind_generates_resource_);
+
+ FlushErrors();
+
+ glBindTexture(target, service_id);
+
+ // Only update tracking if no error was generated in the bind call
+ if (FlushErrors()) {
+ return error::kNoError;
}
+
+ // Track the currently bound textures
+ DCHECK(bound_textures_.find(target) != bound_textures_.end());
+ DCHECK(bound_textures_[target].size() > active_texture_unit_);
+ bound_textures_[target][active_texture_unit_] = texture;
+
+ if (service_id != 0) {
+ // Create a new texture object to track this texture
+ auto texture_object_iter = resources_->texture_object_map.find(texture);
+ if (texture_object_iter == resources_->texture_object_map.end()) {
+ resources_->texture_object_map.insert(
+ std::make_pair(texture, new TexturePassthrough(service_id, target)));
+ } else {
+ // Shouldn't be possible to get here if this texture has a different
+ // target than the one it was just bound to
+ DCHECK(texture_object_iter->second->target() == target);
+ }
+ }
+
return error::kNoError;
}
@@ -696,12 +725,12 @@ error::Error GLES2DecoderPassthroughImpl::DoFenceSync(GLenum condition,
error::Error GLES2DecoderPassthroughImpl::DoFinish() {
glFinish();
- return error::kNoError;
+ return ProcessQueries(true);
}
error::Error GLES2DecoderPassthroughImpl::DoFlush() {
glFlush();
- return error::kNoError;
+ return ProcessQueries(false);
}
error::Error GLES2DecoderPassthroughImpl::DoFlushMappedBufferRange(
@@ -811,18 +840,49 @@ error::Error GLES2DecoderPassthroughImpl::DoGetActiveAttrib(GLuint program,
GLuint index,
GLint* size,
GLenum* type,
- std::string* name) {
- NOTIMPLEMENTED();
+ std::string* name,
+ int32_t* success) {
+ FlushErrors();
+
+ GLuint service_id = GetProgramServiceID(program, resources_);
+ GLint active_attribute_max_length = 0;
+ glGetProgramiv(service_id, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH,
+ &active_attribute_max_length);
+ if (FlushErrors()) {
+ *success = 0;
+ return error::kNoError;
+ }
+
+ std::vector<char> name_buffer(active_attribute_max_length, 0);
+ glGetActiveAttrib(service_id, index, name_buffer.size(), nullptr, size, type,
+ name_buffer.data());
+ *name = std::string(name_buffer.data());
+ *success = FlushErrors() ? 0 : 1;
return error::kNoError;
}
-error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniform(
- GLuint program,
- GLuint index,
- GLint* size,
- GLenum* type,
- std::string* name) {
- NOTIMPLEMENTED();
+error::Error GLES2DecoderPassthroughImpl::DoGetActiveUniform(GLuint program,
+ GLuint index,
+ GLint* size,
+ GLenum* type,
+ std::string* name,
+ int32_t* success) {
+ FlushErrors();
+
+ GLuint service_id = GetProgramServiceID(program, resources_);
+ GLint active_uniform_max_length = 0;
+ glGetProgramiv(service_id, GL_ACTIVE_UNIFORM_MAX_LENGTH,
+ &active_uniform_max_length);
+ if (FlushErrors()) {
+ *success = 0;
+ return error::kNoError;
+ }
+
+ std::vector<char> name_buffer(active_uniform_max_length, 0);
+ glGetActiveUniform(service_id, index, name_buffer.size(), nullptr, size, type,
+ name_buffer.data());
+ *name = std::string(name_buffer.data());
+ *success = FlushErrors() ? 0 : 1;
return error::kNoError;
}
@@ -878,8 +938,11 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBooleanv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLboolean* params) {
- glGetBooleanvRobustANGLE(pname, bufsize, length, params);
- return error::kNoError;
+ return GetNumericHelper(
+ pname, bufsize, length, params,
+ [](GLenum pname, GLsizei bufsize, GLsizei* length, GLboolean* params) {
+ glGetBooleanvRobustANGLE(pname, bufsize, length, params);
+ });
}
error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteri64v(
@@ -903,7 +966,8 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteriv(
}
error::Error GLES2DecoderPassthroughImpl::DoGetError(uint32_t* result) {
- *result = glGetError();
+ FlushErrors();
+ *result = PopError();
return error::kNoError;
}
@@ -911,8 +975,11 @@ error::Error GLES2DecoderPassthroughImpl::DoGetFloatv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLfloat* params) {
- glGetFloatvRobustANGLE(pname, bufsize, length, params);
- return error::kNoError;
+ return GetNumericHelper(
+ pname, bufsize, length, params,
+ [](GLenum pname, GLsizei bufsize, GLsizei* length, GLfloat* params) {
+ glGetFloatvRobustANGLE(pname, bufsize, length, params);
+ });
}
error::Error GLES2DecoderPassthroughImpl::DoGetFragDataLocation(
@@ -931,8 +998,23 @@ error::Error GLES2DecoderPassthroughImpl::DoGetFramebufferAttachmentParameteriv(
GLsizei bufsize,
GLsizei* length,
GLint* params) {
- glGetFramebufferAttachmentParameterivRobustANGLE(target, attachment, pname,
- bufsize, length, params);
+ // Get a scratch buffer to hold the result of the query
+ GLint* scratch_params = GetTypedScratchMemory<GLint>(bufsize);
+ glGetFramebufferAttachmentParameterivRobustANGLE(
+ target, attachment, pname, bufsize, length, scratch_params);
+
+ // Update the results of the query, if needed
+ error::Error error = PatchGetFramebufferAttachmentParameter(
+ target, attachment, pname, *length, scratch_params);
+ if (error != error::kNoError) {
+ *length = 0;
+ return error;
+ }
+
+ // Copy into the destination
+ DCHECK(*length < bufsize);
+ std::copy(scratch_params, scratch_params + *length, params);
+
return error::kNoError;
}
@@ -940,8 +1022,11 @@ error::Error GLES2DecoderPassthroughImpl::DoGetInteger64v(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint64* params) {
- glGetInteger64vRobustANGLE(pname, bufsize, length, params);
- return error::kNoError;
+ return GetNumericHelper(
+ pname, bufsize, length, params,
+ [](GLenum pname, GLsizei bufsize, GLsizei* length, GLint64* params) {
+ glGetInteger64vRobustANGLE(pname, bufsize, length, params);
+ });
}
error::Error GLES2DecoderPassthroughImpl::DoGetIntegeri_v(GLenum pname,
@@ -966,8 +1051,11 @@ error::Error GLES2DecoderPassthroughImpl::DoGetIntegerv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* params) {
- glGetIntegervRobustANGLE(pname, bufsize, length, params);
- return error::kNoError;
+ return GetNumericHelper(
+ pname, bufsize, length, params,
+ [](GLenum pname, GLsizei bufsize, GLsizei* length, GLint* params) {
+ glGetIntegervRobustANGLE(pname, bufsize, length, params);
+ });
}
error::Error GLES2DecoderPassthroughImpl::DoGetInternalformativ(GLenum target,
@@ -1063,8 +1151,11 @@ error::Error GLES2DecoderPassthroughImpl::DoGetShaderPrecisionFormat(
GLenum shadertype,
GLenum precisiontype,
GLint* range,
- GLint* precision) {
+ GLint* precision,
+ int32_t* success) {
+ FlushErrors();
glGetShaderPrecisionFormat(shadertype, precisiontype, range, precision);
+ *success = FlushErrors() ? 0 : 1;
return error::kNoError;
}
@@ -1091,7 +1182,7 @@ error::Error GLES2DecoderPassthroughImpl::DoGetString(GLenum name,
*result = GetServiceVendorString(feature_info_.get());
break;
case GL_EXTENSIONS:
- *result = extension_string_.c_str();
+ *result = feature_info_->extensions().c_str();
break;
default:
*result = reinterpret_cast<const char*>(glGetString(name));
@@ -1134,8 +1225,24 @@ error::Error GLES2DecoderPassthroughImpl::DoGetTransformFeedbackVarying(
GLuint index,
GLsizei* size,
GLenum* type,
- std::string* name) {
- NOTIMPLEMENTED();
+ std::string* name,
+ int32_t* success) {
+ FlushErrors();
+
+ GLuint service_id = GetProgramServiceID(program, resources_);
+ GLint transform_feedback_varying_max_length = 0;
+ glGetProgramiv(service_id, GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH,
+ &transform_feedback_varying_max_length);
+ if (FlushErrors()) {
+ *success = 0;
+ return error::kNoError;
+ }
+
+ std::vector<char> name_buffer(transform_feedback_varying_max_length, 0);
+ glGetTransformFeedbackVarying(service_id, index, name_buffer.size(), nullptr,
+ size, type, name_buffer.data());
+ *name = std::string(name_buffer.data());
+ *success = FlushErrors() ? 0 : 1;
return error::kNoError;
}
@@ -1396,9 +1503,12 @@ error::Error GLES2DecoderPassthroughImpl::DoReadPixels(GLint x,
GLenum type,
GLsizei bufsize,
GLsizei* length,
- void* pixels) {
+ void* pixels,
+ int32_t* success) {
+ FlushErrors();
glReadPixelsRobustANGLE(x, y, width, height, format, type, bufsize, length,
pixels);
+ *success = FlushErrors() ? 0 : 1;
return error::kNoError;
}
@@ -2139,21 +2249,118 @@ error::Error GLES2DecoderPassthroughImpl::DoGenQueriesEXT(
error::Error GLES2DecoderPassthroughImpl::DoDeleteQueriesEXT(
GLsizei n,
const volatile GLuint* queries) {
+ std::vector<GLuint> queries_copy(queries, queries + n);
+ // If any of these queries are pending or active, remove them from the lists
+ for (GLuint query_client_id : queries_copy) {
+ GLuint query_service_id = 0;
+ if (!query_id_map_.GetServiceID(query_client_id, &query_service_id) ||
+ query_service_id == 0) {
+ continue;
+ }
+
+ QueryInfo query_info = query_info_map_[query_service_id];
+ query_info_map_.erase(query_service_id);
+
+ if (query_info.type == GL_NONE) {
+ // Query was never started
+ continue;
+ }
+
+ auto active_queries_iter = active_queries_.find(query_info.type);
+ if (active_queries_iter != active_queries_.end()) {
+ active_queries_.erase(active_queries_iter);
+ }
+
+ auto pending_iter =
+ std::find_if(pending_queries_.begin(), pending_queries_.end(),
+ [query_service_id](const PendingQuery& pending_query) {
+ return pending_query.service_id == query_service_id;
+ });
+ if (pending_iter != pending_queries_.end()) {
+ pending_queries_.erase(pending_iter);
+ }
+ }
return DeleteHelper(
- n, queries, &query_id_map_,
+ queries_copy.size(), queries_copy.data(), &query_id_map_,
[](GLsizei n, GLuint* queries) { glDeleteQueries(n, queries); });
}
-error::Error GLES2DecoderPassthroughImpl::DoQueryCounterEXT(GLuint id,
- GLenum target) {
- glQueryCounter(GetQueryServiceID(id, &query_id_map_), target);
- return error::kNoError;
+error::Error GLES2DecoderPassthroughImpl::DoQueryCounterEXT(
+ GLuint id,
+ GLenum target,
+ int32_t sync_shm_id,
+ uint32_t sync_shm_offset,
+ uint32_t submit_count) {
+ GLuint service_id = GetQueryServiceID(id, &query_id_map_);
+
+ // Flush all previous errors
+ FlushErrors();
+
+ glQueryCounter(service_id, target);
+
+ // Check if a new error was generated
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
+ QueryInfo* query_info = &query_info_map_[service_id];
+ query_info->type = target;
+
+ PendingQuery pending_query;
+ pending_query.target = target;
+ pending_query.service_id = service_id;
+ pending_query.shm_id = sync_shm_id;
+ pending_query.shm_offset = sync_shm_offset;
+ pending_query.submit_count = submit_count;
+ pending_queries_.push_back(pending_query);
+
+ return ProcessQueries(false);
}
-error::Error GLES2DecoderPassthroughImpl::DoBeginQueryEXT(GLenum target,
- GLuint id) {
- // TODO(geofflang): Track active queries
- glBeginQuery(target, GetQueryServiceID(id, &query_id_map_));
+error::Error GLES2DecoderPassthroughImpl::DoBeginQueryEXT(
+ GLenum target,
+ GLuint id,
+ int32_t sync_shm_id,
+ uint32_t sync_shm_offset) {
+ GLuint service_id = GetQueryServiceID(id, &query_id_map_);
+ QueryInfo* query_info = &query_info_map_[service_id];
+
+ if (IsEmulatedQueryTarget(target)) {
+ if (active_queries_.find(target) != active_queries_.end()) {
+ InsertError(GL_INVALID_OPERATION, "Query already active on target.");
+ return error::kNoError;
+ }
+
+ if (id == 0) {
+ InsertError(GL_INVALID_OPERATION, "Query id is 0.");
+ return error::kNoError;
+ }
+
+ if (query_info->type != GL_NONE && query_info->type != target) {
+ InsertError(GL_INVALID_OPERATION,
+ "Query type does not match the target.");
+ return error::kNoError;
+ }
+ } else {
+ // Flush all previous errors
+ FlushErrors();
+
+ glBeginQuery(target, service_id);
+
+ // Check if a new error was generated
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+ }
+
+ query_info->type = target;
+
+ ActiveQuery query;
+ query.service_id = service_id;
+ query.shm_id = sync_shm_id;
+ query.shm_offset = sync_shm_offset;
+ active_queries_[target] = query;
+
return error::kNoError;
}
@@ -2163,10 +2370,38 @@ error::Error GLES2DecoderPassthroughImpl::DoBeginTransformFeedback(
return error::kNoError;
}
-error::Error GLES2DecoderPassthroughImpl::DoEndQueryEXT(GLenum target) {
- // TODO(geofflang): Track active queries
- glEndQuery(target);
- return error::kNoError;
+error::Error GLES2DecoderPassthroughImpl::DoEndQueryEXT(GLenum target,
+ uint32_t submit_count) {
+ if (IsEmulatedQueryTarget(target)) {
+ if (active_queries_.find(target) == active_queries_.end()) {
+ InsertError(GL_INVALID_OPERATION, "No active query on target.");
+ return error::kNoError;
+ }
+ } else {
+ // Flush all previous errors
+ FlushErrors();
+
+ glEndQuery(target);
+
+ // Check if a new error was generated
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+ }
+
+ DCHECK(active_queries_.find(target) != active_queries_.end());
+ ActiveQuery active_query = active_queries_[target];
+ active_queries_.erase(target);
+
+ PendingQuery pending_query;
+ pending_query.target = target;
+ pending_query.service_id = active_query.service_id;
+ pending_query.shm_id = active_query.shm_id;
+ pending_query.shm_offset = active_query.shm_offset;
+ pending_query.submit_count = submit_count;
+ pending_queries_.push_back(pending_query);
+
+ return ProcessQueries(false);
}
error::Error GLES2DecoderPassthroughImpl::DoEndTransformFeedback() {
@@ -2414,7 +2649,21 @@ error::Error GLES2DecoderPassthroughImpl::DoGetUniformsES3CHROMIUM(
error::Error GLES2DecoderPassthroughImpl::DoGetTranslatedShaderSourceANGLE(
GLuint shader,
std::string* source) {
- NOTIMPLEMENTED();
+ FlushErrors();
+ GLuint service_id = GetShaderServiceID(shader, resources_);
+ GLint translated_source_length = 0;
+ glGetShaderiv(service_id, GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE,
+ &translated_source_length);
+ if (FlushErrors()) {
+ return error::kNoError;
+ }
+
+ if (translated_source_length > 0) {
+ std::vector<char> buffer(translated_source_length, 0);
+ glGetTranslatedShaderSourceANGLE(service_id, translated_source_length,
+ nullptr, buffer.data());
+ *source = std::string(buffer.data());
+ }
return error::kNoError;
}
@@ -2441,7 +2690,9 @@ error::Error GLES2DecoderPassthroughImpl::DoPostSubBufferCHROMIUM(
error::Error GLES2DecoderPassthroughImpl::DoCopyTextureCHROMIUM(
GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint internalformat,
GLenum dest_type,
GLboolean unpack_flip_y,
@@ -2456,7 +2707,9 @@ error::Error GLES2DecoderPassthroughImpl::DoCopyTextureCHROMIUM(
error::Error GLES2DecoderPassthroughImpl::DoCopySubTextureCHROMIUM(
GLenum source_id,
+ GLint source_level,
GLenum dest_id,
+ GLint dest_level,
GLint xoffset,
GLint yoffset,
GLint x,
@@ -2511,26 +2764,23 @@ error::Error GLES2DecoderPassthroughImpl::DoVertexAttribDivisorANGLE(
error::Error GLES2DecoderPassthroughImpl::DoProduceTextureCHROMIUM(
GLenum target,
const volatile GLbyte* mailbox) {
- // TODO(geofflang): validation
-
- GLuint texture_client_id = bound_textures_[active_texture_unit_];
- scoped_refptr<TexturePassthrough> texture;
+ auto bound_textures_iter = bound_textures_.find(target);
+ if (bound_textures_iter == bound_textures_.end()) {
+ InsertError(GL_INVALID_OPERATION, "Invalid texture target.");
+ return error::kNoError;
+ }
+ GLuint texture_client_id = bound_textures_iter->second[active_texture_unit_];
auto texture_object_iter =
resources_->texture_object_map.find(texture_client_id);
- if (texture_object_iter != resources_->texture_object_map.end()) {
- texture = texture_object_iter->second.get();
- } else {
- GLuint service_id =
- GetTextureServiceID(texture_client_id, resources_, false);
- texture = new TexturePassthrough(service_id);
- resources_->texture_object_map.insert(
- std::make_pair(texture_client_id, texture));
+ if (texture_object_iter == resources_->texture_object_map.end()) {
+ InsertError(GL_INVALID_OPERATION, "Unknown texture for target.");
+ return error::kNoError;
}
const Mailbox& mb = Mailbox::FromVolatile(
*reinterpret_cast<const volatile Mailbox*>(mailbox));
- mailbox_manager_->ProduceTexture(mb, texture.get());
+ mailbox_manager_->ProduceTexture(mb, texture_object_iter->second.get());
return error::kNoError;
}
@@ -2538,19 +2788,17 @@ error::Error GLES2DecoderPassthroughImpl::DoProduceTextureDirectCHROMIUM(
GLuint texture_client_id,
GLenum target,
const volatile GLbyte* mailbox) {
- // TODO(geofflang): validation
-
- scoped_refptr<TexturePassthrough> texture;
auto texture_object_iter =
resources_->texture_object_map.find(texture_client_id);
- if (texture_object_iter != resources_->texture_object_map.end()) {
- texture = texture_object_iter->second.get();
- } else {
- GLuint service_id =
- GetTextureServiceID(texture_client_id, resources_, false);
- texture = new TexturePassthrough(service_id);
- resources_->texture_object_map.insert(
- std::make_pair(texture_client_id, texture));
+ if (texture_object_iter == resources_->texture_object_map.end()) {
+ InsertError(GL_INVALID_OPERATION, "Unknown texture for target.");
+ return error::kNoError;
+ }
+
+ scoped_refptr<TexturePassthrough> texture = texture_object_iter->second;
+ if (texture->target() != target) {
+ InsertError(GL_INVALID_OPERATION, "Texture target does not match.");
+ return error::kNoError;
}
const Mailbox& mb = Mailbox::FromVolatile(
@@ -2562,21 +2810,41 @@ error::Error GLES2DecoderPassthroughImpl::DoProduceTextureDirectCHROMIUM(
error::Error GLES2DecoderPassthroughImpl::DoConsumeTextureCHROMIUM(
GLenum target,
const volatile GLbyte* mailbox) {
- // TODO(geofflang): validation
+ auto bound_textures_iter = bound_textures_.find(target);
+ if (bound_textures_iter == bound_textures_.end()) {
+ InsertError(GL_INVALID_OPERATION, "Invalid texture target.");
+ return error::kNoError;
+ }
+
+ GLuint client_id = bound_textures_iter->second[active_texture_unit_];
+ if (client_id == 0) {
+ InsertError(GL_INVALID_OPERATION, "Unknown texture for target.");
+ return error::kNoError;
+ }
const Mailbox& mb = Mailbox::FromVolatile(
*reinterpret_cast<const volatile Mailbox*>(mailbox));
scoped_refptr<TexturePassthrough> texture = static_cast<TexturePassthrough*>(
group_->mailbox_manager()->ConsumeTexture(mb));
if (texture == nullptr) {
- // TODO(geofflang): error, missing mailbox
+ InsertError(GL_INVALID_OPERATION, "Invalid mailbox name.");
+ return error::kNoError;
+ }
+
+ if (texture->target() != target) {
+ InsertError(GL_INVALID_OPERATION, "Texture target does not match.");
return error::kNoError;
}
- GLuint client_id = bound_textures_[active_texture_unit_];
+ // Update id mappings
+ resources_->texture_id_map.RemoveClientID(client_id);
resources_->texture_id_map.SetIDMapping(client_id, texture->service_id());
resources_->texture_object_map.erase(client_id);
resources_->texture_object_map.insert(std::make_pair(client_id, texture));
+
+ // Bind the service id that now represents this texture
+ UpdateTextureBinding(target, client_id, texture->service_id());
+
return error::kNoError;
}
@@ -2584,25 +2852,35 @@ error::Error GLES2DecoderPassthroughImpl::DoCreateAndConsumeTextureINTERNAL(
GLenum target,
GLuint texture_client_id,
const volatile GLbyte* mailbox) {
- // TODO(geofflang): validation
-
if (resources_->texture_id_map.GetServiceID(texture_client_id, nullptr)) {
return error::kInvalidArguments;
}
+
const Mailbox& mb = Mailbox::FromVolatile(
*reinterpret_cast<const volatile Mailbox*>(mailbox));
scoped_refptr<TexturePassthrough> texture = static_cast<TexturePassthrough*>(
group_->mailbox_manager()->ConsumeTexture(mb));
if (texture == nullptr) {
- // TODO(geofflang): error, missing mailbox
+ InsertError(GL_INVALID_OPERATION, "Invalid mailbox name.");
+ return error::kNoError;
+ }
+
+ if (texture->target() != target) {
+ InsertError(GL_INVALID_OPERATION, "Texture target does not match.");
return error::kNoError;
}
+ // Update id mappings
+ resources_->texture_id_map.RemoveClientID(texture_client_id);
resources_->texture_id_map.SetIDMapping(texture_client_id,
texture->service_id());
resources_->texture_object_map.erase(texture_client_id);
resources_->texture_object_map.insert(
std::make_pair(texture_client_id, texture));
+
+ // Bind the service id that now represents this texture
+ UpdateTextureBinding(target, texture_client_id, texture->service_id());
+
return error::kNoError;
}
@@ -3035,5 +3313,14 @@ GLES2DecoderPassthroughImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoOverlayPromotionHintCHROMIUM(
+ GLuint texture,
+ GLboolean promotion_hint,
+ GLint display_x,
+ GLint display_y) {
+ NOTIMPLEMENTED();
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
index 5ec2a318d49..aba98b61a3d 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers.cc
@@ -170,13 +170,13 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetActiveAttrib(
}
std::string name;
- error::Error error =
- DoGetActiveAttrib(program, index, &result->size, &result->type, &name);
+ error::Error error = DoGetActiveAttrib(
+ program, index, &result->size, &result->type, &name, &result->success);
if (error != error::kNoError) {
+ result->success = 0;
return error;
}
- result->success = 1; // true.
Bucket* bucket = CreateBucket(name_bucket_id);
bucket->SetFromString(name.c_str());
return error::kNoError;
@@ -202,13 +202,13 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetActiveUniform(
}
std::string name;
- error::Error error =
- DoGetActiveUniform(program, index, &result->size, &result->type, &name);
+ error::Error error = DoGetActiveUniform(
+ program, index, &result->size, &result->type, &name, &result->success);
if (error != error::kNoError) {
+ result->success = 0;
return error;
}
- result->success = 1; // true.
Bucket* bucket = CreateBucket(name_bucket_id);
bucket->SetFromString(name.c_str());
return error::kNoError;
@@ -518,13 +518,13 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetShaderPrecisionFormat(
GLint range[2] = {0, 0};
GLint precision = 0;
- error::Error error = DoGetShaderPrecisionFormat(shader_type, precision_type,
- range, &precision);
+ error::Error error = DoGetShaderPrecisionFormat(
+ shader_type, precision_type, range, &precision, &result->success);
if (error != error::kNoError) {
+ result->success = 0;
return error;
}
- result->success = 1; // true
result->min_range = range[0];
result->max_range = range[1];
result->precision = precision;
@@ -596,13 +596,13 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetTransformFeedbackVarying(
GLsizei size = 0;
GLenum type = 0;
std::string name;
- error::Error error =
- DoGetTransformFeedbackVarying(program, index, &size, &type, &name);
+ error::Error error = DoGetTransformFeedbackVarying(
+ program, index, &size, &type, &name, &result->success);
if (error != error::kNoError) {
+ result->success = 0;
return error;
}
- result->success = 1; // true.
result->size = static_cast<int32_t>(size);
result->type = static_cast<uint32_t>(type);
Bucket* bucket = CreateBucket(name_bucket_id);
@@ -853,8 +853,9 @@ error::Error GLES2DecoderPassthroughImpl::HandleReadPixels(
GLsizei bufsize = buffer_size;
GLsizei length = 0;
- error::Error error =
- DoReadPixels(x, y, width, height, format, type, bufsize, &length, pixels);
+ int32_t success = 0;
+ error::Error error = DoReadPixels(x, y, width, height, format, type, bufsize,
+ &length, pixels, &success);
if (error != error::kNoError) {
return error;
}
@@ -876,7 +877,7 @@ error::Error GLES2DecoderPassthroughImpl::HandleReadPixels(
}
if (result) {
- result->success = 1;
+ result->success = success;
result->row_length = static_cast<uint32_t>(width);
result->num_rows = static_cast<uint32_t>(height);
}
@@ -925,22 +926,23 @@ error::Error GLES2DecoderPassthroughImpl::HandleTexImage2D(
GLint border = static_cast<GLint>(c.border);
GLenum format = static_cast<GLenum>(c.format);
GLenum type = static_cast<GLenum>(c.type);
+ uint32_t pixels_shm_id = c.pixels_shm_id;
+ uint32_t pixels_shm_offset = c.pixels_shm_offset;
- GLsizei imagesize = 0;
- const void* pixels = NULL;
- if (c.pixels_shm_id != 0 || c.pixels_shm_offset != 0) {
- unsigned int buffer_size = 0;
+ unsigned int buffer_size = 0;
+ const void* pixels = nullptr;
+
+ if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
- c.pixels_shm_id, c.pixels_shm_offset, &buffer_size);
+ pixels_shm_id, pixels_shm_offset, &buffer_size);
if (!pixels) {
return error::kOutOfBounds;
}
- imagesize = buffer_size;
}
error::Error error =
DoTexImage2D(target, level, internal_format, width, height, border,
- format, type, imagesize, pixels);
+ format, type, buffer_size, pixels);
if (error != error::kNoError) {
return error;
}
@@ -962,22 +964,23 @@ error::Error GLES2DecoderPassthroughImpl::HandleTexImage3D(
GLint border = static_cast<GLint>(c.border);
GLenum format = static_cast<GLenum>(c.format);
GLenum type = static_cast<GLenum>(c.type);
+ uint32_t pixels_shm_id = c.pixels_shm_id;
+ uint32_t pixels_shm_offset = c.pixels_shm_offset;
- GLsizei imagesize = 0;
- const void* pixels = NULL;
- if (c.pixels_shm_id != 0 || c.pixels_shm_offset != 0) {
- unsigned int buffer_size = 0;
+ unsigned int buffer_size = 0;
+ const void* pixels = nullptr;
+
+ if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
- c.pixels_shm_id, c.pixels_shm_offset, &buffer_size);
+ pixels_shm_id, pixels_shm_offset, &buffer_size);
if (!pixels) {
return error::kOutOfBounds;
}
- imagesize = buffer_size;
}
error::Error error =
DoTexImage3D(target, level, internal_format, width, height, depth, border,
- format, type, imagesize, pixels);
+ format, type, buffer_size, pixels);
if (error != error::kNoError) {
return error;
}
@@ -998,17 +1001,23 @@ error::Error GLES2DecoderPassthroughImpl::HandleTexSubImage2D(
GLsizei height = static_cast<GLsizei>(c.height);
GLenum format = static_cast<GLenum>(c.format);
GLenum type = static_cast<GLenum>(c.type);
+ uint32_t pixels_shm_id = c.pixels_shm_id;
+ uint32_t pixels_shm_offset = c.pixels_shm_offset;
unsigned int buffer_size = 0;
- const void* pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
- c.pixels_shm_id, c.pixels_shm_offset, &buffer_size);
- if (!pixels) {
- return error::kOutOfBounds;
+ const void* pixels = nullptr;
+
+ if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
+ pixels_shm_id, pixels_shm_offset, &buffer_size);
+ if (!pixels) {
+ return error::kOutOfBounds;
+ }
}
- GLsizei imagesize = buffer_size;
- error::Error error = DoTexSubImage2D(target, level, xoffset, yoffset, width,
- height, format, type, imagesize, pixels);
+ error::Error error =
+ DoTexSubImage2D(target, level, xoffset, yoffset, width, height, format,
+ type, buffer_size, pixels);
if (error != error::kNoError) {
return error;
}
@@ -1031,18 +1040,23 @@ error::Error GLES2DecoderPassthroughImpl::HandleTexSubImage3D(
GLsizei depth = static_cast<GLsizei>(c.depth);
GLenum format = static_cast<GLenum>(c.format);
GLenum type = static_cast<GLenum>(c.type);
+ uint32_t pixels_shm_id = c.pixels_shm_id;
+ uint32_t pixels_shm_offset = c.pixels_shm_offset;
unsigned int buffer_size = 0;
- const void* pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
- c.pixels_shm_id, c.pixels_shm_offset, &buffer_size);
- if (!pixels) {
- return error::kOutOfBounds;
+ const void* pixels = nullptr;
+
+ if (pixels_shm_id != 0 || pixels_shm_offset != 0) {
+ pixels = GetSharedMemoryAndSizeAs<uint8_t*>(
+ pixels_shm_id, pixels_shm_offset, &buffer_size);
+ if (!pixels) {
+ return error::kOutOfBounds;
+ }
}
- GLsizei imagesize = buffer_size;
error::Error error =
DoTexSubImage3D(target, level, xoffset, yoffset, zoffset, width, height,
- depth, format, type, imagesize, pixels);
+ depth, format, type, buffer_size, pixels);
if (error != error::kNoError) {
return error;
}
@@ -1133,8 +1147,12 @@ error::Error GLES2DecoderPassthroughImpl::HandleQueryCounterEXT(
*static_cast<const volatile gles2::cmds::QueryCounterEXT*>(cmd_data);
GLuint id = static_cast<GLuint>(c.id);
GLenum target = static_cast<GLenum>(c.target);
+ int32_t sync_shm_id = static_cast<int32_t>(c.sync_data_shm_id);
+ uint32_t sync_shm_offset = static_cast<uint32_t>(c.sync_data_shm_offset);
+ uint32_t submit_count = static_cast<GLuint>(c.submit_count);
- error::Error error = DoQueryCounterEXT(id, target);
+ error::Error error =
+ DoQueryCounterEXT(id, target, sync_shm_id, sync_shm_offset, submit_count);
if (error != error::kNoError) {
return error;
}
@@ -1149,8 +1167,11 @@ error::Error GLES2DecoderPassthroughImpl::HandleBeginQueryEXT(
*static_cast<const volatile gles2::cmds::BeginQueryEXT*>(cmd_data);
GLenum target = static_cast<GLenum>(c.target);
GLuint id = static_cast<GLuint>(c.id);
+ int32_t sync_shm_id = static_cast<int32_t>(c.sync_data_shm_id);
+ uint32_t sync_shm_offset = static_cast<uint32_t>(c.sync_data_shm_offset);
- error::Error error = DoBeginQueryEXT(target, id);
+ error::Error error =
+ DoBeginQueryEXT(target, id, sync_shm_id, sync_shm_offset);
if (error != error::kNoError) {
return error;
}
@@ -1164,8 +1185,9 @@ error::Error GLES2DecoderPassthroughImpl::HandleEndQueryEXT(
const volatile gles2::cmds::EndQueryEXT& c =
*static_cast<const volatile gles2::cmds::EndQueryEXT*>(cmd_data);
GLenum target = static_cast<GLenum>(c.target);
+ uint32_t submit_count = static_cast<GLuint>(c.submit_count);
- error::Error error = DoEndQueryEXT(target);
+ error::Error error = DoEndQueryEXT(target, submit_count);
if (error != error::kNoError) {
return error;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index 6f3935fa187..9e6c461650c 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -3677,7 +3677,9 @@ error::Error GLES2DecoderPassthroughImpl::HandleCopyTextureCHROMIUM(
const volatile gles2::cmds::CopyTextureCHROMIUM& c =
*static_cast<const volatile gles2::cmds::CopyTextureCHROMIUM*>(cmd_data);
GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLint source_level = static_cast<GLint>(c.source_level);
GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLint dest_level = static_cast<GLint>(c.dest_level);
GLint internalformat = static_cast<GLint>(c.internalformat);
GLenum dest_type = static_cast<GLenum>(c.dest_type);
GLboolean unpack_flip_y = static_cast<GLboolean>(c.unpack_flip_y);
@@ -3686,8 +3688,8 @@ error::Error GLES2DecoderPassthroughImpl::HandleCopyTextureCHROMIUM(
GLboolean unpack_unmultiply_alpha =
static_cast<GLboolean>(c.unpack_unmultiply_alpha);
error::Error error = DoCopyTextureCHROMIUM(
- source_id, dest_id, internalformat, dest_type, unpack_flip_y,
- unpack_premultiply_alpha, unpack_unmultiply_alpha);
+ source_id, source_level, dest_id, dest_level, internalformat, dest_type,
+ unpack_flip_y, unpack_premultiply_alpha, unpack_unmultiply_alpha);
if (error != error::kNoError) {
return error;
}
@@ -3701,7 +3703,9 @@ error::Error GLES2DecoderPassthroughImpl::HandleCopySubTextureCHROMIUM(
*static_cast<const volatile gles2::cmds::CopySubTextureCHROMIUM*>(
cmd_data);
GLenum source_id = static_cast<GLenum>(c.source_id);
+ GLint source_level = static_cast<GLint>(c.source_level);
GLenum dest_id = static_cast<GLenum>(c.dest_id);
+ GLint dest_level = static_cast<GLint>(c.dest_level);
GLint xoffset = static_cast<GLint>(c.xoffset);
GLint yoffset = static_cast<GLint>(c.yoffset);
GLint x = static_cast<GLint>(c.x);
@@ -3714,8 +3718,9 @@ error::Error GLES2DecoderPassthroughImpl::HandleCopySubTextureCHROMIUM(
GLboolean unpack_unmultiply_alpha =
static_cast<GLboolean>(c.unpack_unmultiply_alpha);
error::Error error = DoCopySubTextureCHROMIUM(
- source_id, dest_id, xoffset, yoffset, x, y, width, height, unpack_flip_y,
- unpack_premultiply_alpha, unpack_unmultiply_alpha);
+ source_id, source_level, dest_id, dest_level, xoffset, yoffset, x, y,
+ width, height, unpack_flip_y, unpack_premultiply_alpha,
+ unpack_unmultiply_alpha);
if (error != error::kNoError) {
return error;
}
@@ -4130,5 +4135,23 @@ error::Error GLES2DecoderPassthroughImpl::
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleOverlayPromotionHintCHROMIUM(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::OverlayPromotionHintCHROMIUM& c =
+ *static_cast<const volatile gles2::cmds::OverlayPromotionHintCHROMIUM*>(
+ cmd_data);
+ GLuint texture = c.texture;
+ GLboolean promotion_hint = static_cast<GLboolean>(c.promotion_hint);
+ GLint display_x = static_cast<GLint>(c.display_x);
+ GLint display_y = static_cast<GLint>(c.display_y);
+ error::Error error = DoOverlayPromotionHintCHROMIUM(texture, promotion_hint,
+ display_x, display_y);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index 87c5e463e5a..5c88abfb971 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -90,6 +90,15 @@ void GLES2DecoderManualInitTest::EnableDisableTest(GLenum cap,
TEST_P(GLES3DecoderTest, Basic) {
// Make sure the setup is correct for ES3.
EXPECT_TRUE(feature_info()->IsWebGL2OrES3Context());
+ EXPECT_FALSE(feature_info()->IsWebGLContext());
+ EXPECT_TRUE(feature_info()->validators()->texture_bind_target.IsValid(
+ GL_TEXTURE_3D));
+}
+
+TEST_P(WebGL2DecoderTest, Basic) {
+ // Make sure the setup is correct for WebGL2.
+ EXPECT_TRUE(feature_info()->IsWebGL2OrES3Context());
+ EXPECT_TRUE(feature_info()->IsWebGLContext());
EXPECT_TRUE(feature_info()->validators()->texture_bind_target.IsValid(
GL_TEXTURE_3D));
}
@@ -1475,6 +1484,17 @@ class GLES2DecoderDoCommandsTest : public GLES2DecoderTest {
int entries_per_cmd_;
};
+TEST_P(GLES3DecoderTest, BeginInvalidTargetQueryFails) {
+ BeginQueryEXT begin_cmd;
+ begin_cmd.Init(GL_SAMPLES_PASSED,
+ kNewClientId,
+ kSharedMemoryId,
+ kSharedMemoryOffset);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(begin_cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+
TEST_P(GLES3DecoderTest, BindTransformFeedbackValidArgs) {
EXPECT_CALL(*gl_, BindTransformFeedback(GL_TRANSFORM_FEEDBACK,
kServiceTransformFeedbackId));
@@ -1525,6 +1545,56 @@ TEST_P(GLES3DecoderTest, GetInteger64i_vValidArgs) {
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
+TEST_P(GLES3DecoderTest, GetSamplerBinding) {
+ const GLuint kClientID = 12;
+ const GLuint kServiceID = 1012;
+ const GLuint kUnit = 0;
+ DoCreateSampler(kClientID, kServiceID);
+ DoBindSampler(kUnit, kClientID, kServiceID);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ typedef cmds::GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ cmds::GetIntegerv cmd;
+ cmd.Init(GL_SAMPLER_BINDING, shared_memory_id_, shared_memory_offset_);
+ result->size = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(kClientID, static_cast<GLuint>(result->GetData()[0]));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES3DecoderTest, GetTransformFeedbackBinding) {
+ const GLuint kClientID = 12;
+ const GLuint kServiceID = 1012;
+ const GLenum kTarget = GL_TRANSFORM_FEEDBACK;
+ DoCreateTransformFeedback(kClientID, kServiceID);
+ DoBindTransformFeedback(kTarget, kClientID, kServiceID);
+
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+
+ typedef cmds::GetIntegerv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ cmds::GetIntegerv cmd;
+ cmd.Init(
+ GL_TRANSFORM_FEEDBACK_BINDING, shared_memory_id_, shared_memory_offset_);
+ result->size = 0;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, result->GetNumResults());
+ EXPECT_EQ(kClientID, static_cast<GLuint>(result->GetData()[0]));
+
+ DoBindTransformFeedback(kTarget, 0, kServiceDefaultTransformFeedbackId);
+ DoDeleteTransformFeedback(kClientID, kServiceID);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
// Test that processing with 0 entries does nothing.
TEST_P(GLES2DecoderDoCommandsTest, DoCommandsOneOfZero) {
int num_processed = -1;
@@ -1695,6 +1765,14 @@ void GLES3DecoderTest::SetUp() {
InitDecoder(init);
}
+void WebGL2DecoderTest::SetUp() {
+ InitState init;
+ init.gl_version = "OpenGL ES 3.0";
+ init.bind_generates_resource = true;
+ init.context_type = CONTEXT_TYPE_WEBGL2;
+ InitDecoder(init);
+}
+
void GLES3DecoderWithShaderTest::SetUp() {
InitState init;
init.gl_version = "OpenGL ES 3.0";
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
index aaa1c476305..72577a3d886 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest.h
@@ -26,10 +26,6 @@
#include "ui/gl/gl_mock.h"
#include "ui/gl/gl_surface_stub.h"
-namespace base {
-class CommandLine;
-}
-
namespace gpu {
namespace gles2 {
@@ -82,6 +78,15 @@ class GLES3DecoderTest : public GLES2DecoderTest {
void SetUp() override;
};
+class WebGL2DecoderTest : public GLES2DecoderTest {
+ public:
+ WebGL2DecoderTest() { shader_language_version_ = 300; }
+
+ // Override default setup so ES3 capabilities are enabled by default
+ // and WebGL2 specific rules are enforced.
+ void SetUp() override;
+};
+
class GLES3DecoderWithShaderTest : public GLES2DecoderWithShaderTest {
public:
GLES3DecoderWithShaderTest() { shader_language_version_ = 300; }
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
index a72a99c6e61..3cc0df74c42 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
@@ -222,10 +222,10 @@ TEST_P(GLES2DecoderTest1, CheckFramebufferStatusInvalidArgsBadSharedMemoryId) {
}
TEST_P(GLES2DecoderTest1, ClearValidArgs) {
- EXPECT_CALL(*gl_, Clear(1));
+ EXPECT_CALL(*gl_, Clear(GL_COLOR_BUFFER_BIT));
SpecializedSetup<cmds::Clear, 0>(true);
cmds::Clear cmd;
- cmd.Init(1);
+ cmd.Init(GL_COLOR_BUFFER_BIT);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
index bbc96b4d59f..ab8e52d0969 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.cc
@@ -434,6 +434,11 @@ void GLES2DecoderTestBase::InitDecoderWithCommandLine(
EXPECT_CALL(*gl_, BindRenderbufferEXT(GL_RENDERBUFFER, 0))
.Times(1)
.RetiresOnSaturation();
+ if (feature_info->feature_flags().desktop_srgb_support) {
+ EXPECT_CALL(*gl_, Disable(GL_FRAMEBUFFER_SRGB))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
// TODO(boliu): Remove OS_ANDROID once crbug.com/259023 is fixed and the
// workaround has been reverted.
@@ -562,7 +567,7 @@ void GLES2DecoderTestBase::ResetDecoder() {
engine_.reset();
::gl::MockGLInterface::SetGLInterface(NULL);
gl_.reset();
- gl::init::ClearGLBindings();
+ gl::init::ShutdownGL();
}
void GLES2DecoderTestBase::TearDown() {
@@ -650,6 +655,64 @@ void GLES2DecoderTestBase::DoFenceSync(
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
+void GLES2DecoderTestBase::DoCreateSampler(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, GenSamplers(1, _))
+ .WillOnce(SetArgPointee<1>(service_id));
+ cmds::GenSamplersImmediate* cmd =
+ GetImmediateAs<cmds::GenSamplersImmediate>();
+ GLuint temp = client_id;
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+}
+
+void GLES2DecoderTestBase::DoBindSampler(
+ GLuint unit, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, BindSampler(unit, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindSampler cmd;
+ cmd.Init(unit, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoDeleteSampler(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteSamplers(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteSamplersImmediate>(client_id);
+}
+
+void GLES2DecoderTestBase::DoCreateTransformFeedback(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, GenTransformFeedbacks(1, _))
+ .WillOnce(SetArgPointee<1>(service_id));
+ cmds::GenTransformFeedbacksImmediate* cmd =
+ GetImmediateAs<cmds::GenTransformFeedbacksImmediate>();
+ GLuint temp = client_id;
+ cmd->Init(1, &temp);
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(*cmd, sizeof(temp)));
+}
+
+void GLES2DecoderTestBase::DoBindTransformFeedback(
+ GLenum target, GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, BindTransformFeedback(target, service_id))
+ .Times(1)
+ .RetiresOnSaturation();
+ cmds::BindTransformFeedback cmd;
+ cmd.Init(target, client_id);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+}
+
+void GLES2DecoderTestBase::DoDeleteTransformFeedback(
+ GLuint client_id, GLuint service_id) {
+ EXPECT_CALL(*gl_, DeleteTransformFeedbacks(1, Pointee(service_id)))
+ .Times(1)
+ .RetiresOnSaturation();
+ GenHelper<cmds::DeleteTransformFeedbacksImmediate>(client_id);
+}
+
void GLES2DecoderTestBase::SetBucketData(
uint32_t bucket_id, const void* data, uint32_t data_size) {
DCHECK(data || data_size == 0);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
index ffb4dde783d..d7f71fa8f8e 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_base.h
@@ -168,6 +168,8 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool> {
void DoCreateProgram(GLuint client_id, GLuint service_id);
void DoCreateShader(GLenum shader_type, GLuint client_id, GLuint service_id);
void DoFenceSync(GLuint client_id, GLuint service_id);
+ void DoCreateSampler(GLuint client_id, GLuint service_id);
+ void DoCreateTransformFeedback(GLuint client_id, GLuint service_id);
void SetBucketData(uint32_t bucket_id, const void* data, uint32_t data_size);
void SetBucketAsCString(uint32_t bucket_id, const char* str);
@@ -269,6 +271,9 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool> {
void EnsureRenderbufferBound(bool expect_bind);
void DoBindTexture(GLenum target, GLuint client_id, GLuint service_id);
void DoBindVertexArrayOES(GLuint client_id, GLuint service_id);
+ void DoBindSampler(GLuint unit, GLuint client_id, GLuint service_id);
+ void DoBindTransformFeedback(
+ GLenum target, GLuint client_id, GLuint service_id);
bool DoIsBuffer(GLuint client_id);
bool DoIsFramebuffer(GLuint client_id);
@@ -286,6 +291,8 @@ class GLES2DecoderTestBase : public ::testing::TestWithParam<bool> {
void DoDeleteRenderbuffer(GLuint client_id, GLuint service_id);
void DoDeleteShader(GLuint client_id, GLuint service_id);
void DoDeleteTexture(GLuint client_id, GLuint service_id);
+ void DoDeleteSampler(GLuint client_id, GLuint service_id);
+ void DoDeleteTransformFeedback(GLuint client_id, GLuint service_id);
void DoCompressedTexImage2D(GLenum target,
GLint level,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
index bf03c32aad5..7d61f120de4 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_drawing.cc
@@ -2349,5 +2349,13 @@ TEST_P(GLES3DecoderTest, DrawNoProgram) {
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
+TEST_P(GLES2DecoderTest, ClearInvalidValue) {
+ EXPECT_CALL(*gl_, Clear(_)).Times(0);
+ Clear cmd;
+ cmd.Init(0xffffffff);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
index 82a416c1150..3600c198217 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc
@@ -3397,7 +3397,8 @@ TEST_P(GLES2DecoderManualInitTest, InvalidateFramebufferBinding) {
// EXPECT_EQ can't be used to compare function pointers
EXPECT_TRUE(
gl::MockGLInterface::GetGLProcAddress("glInvalidateFramebuffer") !=
- gl::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+ reinterpret_cast<gl::GLFunctionPointerType>(
+ gl::g_driver_gl.fn.glDiscardFramebufferEXTFn));
EXPECT_TRUE(
gl::MockGLInterface::GetGLProcAddress("glInvalidateFramebuffer") !=
gl::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT"));
@@ -3447,7 +3448,8 @@ TEST_P(GLES2DecoderManualInitTest, DiscardFramebufferEXT) {
// EXPECT_EQ can't be used to compare function pointers
EXPECT_TRUE(
gl::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT") ==
- gl::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+ reinterpret_cast<gl::GLFunctionPointerType>(
+ gl::g_driver_gl.fn.glDiscardFramebufferEXTFn));
const GLenum target = GL_FRAMEBUFFER;
const GLsizei count = 1;
@@ -3489,7 +3491,8 @@ TEST_P(GLES2DecoderManualInitTest, ClearBackbufferBitsOnDiscardFramebufferEXT) {
// EXPECT_EQ can't be used to compare function pointers.
EXPECT_TRUE(
gl::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT") ==
- gl::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+ reinterpret_cast<gl::GLFunctionPointerType>(
+ gl::g_driver_gl.fn.glDiscardFramebufferEXTFn));
const GLenum target = GL_FRAMEBUFFER;
const GLsizei count = 1;
@@ -3553,6 +3556,92 @@ TEST_P(GLES2DecoderTest, DiscardFramebufferEXTUnsupported) {
ExecuteImmediateCmd(cmd, sizeof(attachments)));
}
+TEST_P(GLES3DecoderTest, DiscardFramebufferEXTInvalidTarget) {
+ const GLenum target = GL_RED; // Invalid
+ const GLsizei count = 1;
+ const GLenum attachments[] = {GL_COLOR_ATTACHMENT0};
+
+ SetupTexture();
+ DoBindFramebuffer(
+ GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_,
+ kServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ EXPECT_TRUE(framebuffer->IsCleared());
+
+ EXPECT_CALL(*gl_, InvalidateFramebuffer(target, count, _)).Times(0);
+ DiscardFramebufferEXTImmediate& cmd =
+ *GetImmediateAs<DiscardFramebufferEXTImmediate>();
+ cmd.Init(target, count, attachments);
+
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(attachments)));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ EXPECT_TRUE(framebuffer->IsCleared());
+}
+
+TEST_P(GLES3DecoderTest, DiscardFramebufferEXTUseCorrectTarget) {
+ const GLenum target = GL_READ_FRAMEBUFFER;
+ const GLsizei count = 1;
+ const GLenum attachments[] = {GL_COLOR_ATTACHMENT0};
+
+ SetupTexture();
+ DoBindFramebuffer(
+ GL_READ_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
+ DoFramebufferTexture2D(GL_READ_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_,
+ kServiceTextureId,
+ 0,
+ GL_NO_ERROR);
+
+ EXPECT_CALL(*gl_, GenFramebuffersEXT(_, _))
+ .WillOnce(SetArgPointee<1>(kServiceFramebufferId + 1))
+ .RetiresOnSaturation();
+ DoBindFramebuffer(GL_DRAW_FRAMEBUFFER, client_framebuffer_id_ + 1,
+ kServiceFramebufferId + 1);
+ EXPECT_CALL(*gl_, GenTextures(_, _))
+ .WillOnce(SetArgPointee<1>(kServiceTextureId + 1))
+ .RetiresOnSaturation();
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_ + 1, kServiceTextureId + 1);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ kSharedMemoryId, kSharedMemoryOffset);
+ DoFramebufferTexture2D(GL_DRAW_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ client_texture_id_ + 1,
+ kServiceTextureId + 1,
+ 0,
+ GL_NO_ERROR);
+
+ FramebufferManager* framebuffer_manager = group().framebuffer_manager();
+ Framebuffer* framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
+ EXPECT_TRUE(framebuffer->IsCleared());
+ Framebuffer* other_framebuffer =
+ framebuffer_manager->GetFramebuffer(client_framebuffer_id_ + 1);
+ EXPECT_TRUE(other_framebuffer->IsCleared());
+
+ EXPECT_CALL(*gl_, InvalidateFramebuffer(target, count, _))
+ .Times(1)
+ .RetiresOnSaturation();
+ DiscardFramebufferEXTImmediate& cmd =
+ *GetImmediateAs<DiscardFramebufferEXTImmediate>();
+ cmd.Init(target, count, attachments);
+
+ EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(attachments)));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_FALSE(framebuffer->IsCleared());
+ EXPECT_TRUE(other_framebuffer->IsCleared());
+}
+
TEST_P(GLES2DecoderManualInitTest,
DiscardedAttachmentsEXTMarksFramebufferIncomplete) {
InitState init;
@@ -3626,7 +3715,8 @@ TEST_P(GLES2DecoderManualInitTest,
// and the framebuffer as incomplete.
EXPECT_TRUE(
gl::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT") ==
- gl::g_driver_gl.fn.glDiscardFramebufferEXTFn);
+ reinterpret_cast<gl::GLFunctionPointerType>(
+ gl::g_driver_gl.fn.glDiscardFramebufferEXTFn));
const GLenum target = GL_FRAMEBUFFER;
const GLsizei count = 1;
@@ -3790,7 +3880,7 @@ TEST_P(GLES3DecoderTest, InvalidateFramebufferDepthStencilAttachment) {
EXPECT_FALSE(framebuffer->HasUnclearedAttachment(GL_STENCIL_ATTACHMENT));
attachments[0] = GL_DEPTH_STENCIL_ATTACHMENT;
- EXPECT_CALL(*gl_, InvalidateFramebuffer(target, 1, _))
+ EXPECT_CALL(*gl_, InvalidateFramebuffer(target, 2, _))
.Times(1)
.RetiresOnSaturation();
cmd.Init(target, count, attachments);
@@ -3862,21 +3952,18 @@ TEST_P(GLES3DecoderTest, BlitFramebufferDisabledReadBuffer) {
1.0f, // depth
false, // scissor test
0, 0, 128, 64);
- EXPECT_CALL(*gl_, BlitFramebufferEXT(0, 0, _, _, 0, 0, _, _,
- GL_COLOR_BUFFER_BIT, GL_LINEAR))
- .Times(1)
- .RetiresOnSaturation();
BlitFramebufferCHROMIUM cmd;
cmd.Init(0, 0, 1, 1, 0, 0, 1, 1, GL_COLOR_BUFFER_BIT, GL_LINEAR);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ // Generate INVALID_OPERATION because of missing read buffer image.
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
}
TEST_P(GLES3DecoderTest, BlitFramebufferMissingDepthOrStencil) {
// Run BlitFramebufferCHROMIUM with depth or stencil bits, from/to a read/draw
- // framebuffer that doesn't have depth/stencil. The bits should be silently
- // ignored.
+ // framebuffer that doesn't have depth/stencil. It should generate
+ // INVALID_OPERATION.
DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
kServiceRenderbufferId);
DoRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8,
@@ -3924,16 +4011,13 @@ TEST_P(GLES3DecoderTest, BlitFramebufferMissingDepthOrStencil) {
EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_READ_FRAMEBUFFER))
.WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
.RetiresOnSaturation();
- EXPECT_CALL(*gl_, BlitFramebufferEXT(0, 0, 1, 1, 0, 0, 1, 1,
- _, _))
- .Times(0);
BlitFramebufferCHROMIUM cmd;
cmd.Init(0, 0, 1, 1, 0, 0, 1, 1, GL_DEPTH_BUFFER_BIT, GL_NEAREST);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
cmd.Init(0, 0, 1, 1, 0, 0, 1, 1, GL_STENCIL_BUFFER_BIT, GL_NEAREST);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
// Switch FBOs and try the same.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
index d32d866fe19..b80fbc0f32f 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_programs.cc
@@ -1868,9 +1868,6 @@ TEST_P(GLES2DecoderTest, BindAttribLocationBucket) {
const uint32_t kBucketId = 123;
const GLint kLocation = 2;
const char* kName = "testing";
- EXPECT_CALL(*gl_,
- BindAttribLocation(kServiceProgramId, kLocation, StrEq(kName)))
- .Times(1);
SetBucketAsCString(kBucketId, kName);
BindAttribLocationBucket cmd;
cmd.Init(client_program_id_, kLocation, kBucketId);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index 67e71bd4ce1..266869dfdb4 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -4720,6 +4720,68 @@ TEST_P(GLES2DecoderTest, BindTextureInvalidArgs) {
EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
}
+TEST_P(GLES3DecoderTest, TexSwizzleAllowed) {
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLenum kSwizzleParam = GL_TEXTURE_SWIZZLE_R;
+ const GLenum kSwizzleValue = GL_BLUE;
+ const GLenum kInvalidSwizzleValue = GL_RG;
+
+ {
+ EXPECT_CALL(*gl_, TexParameteri(kTarget, kSwizzleParam, kSwizzleValue));
+ TexParameteri cmd;
+ cmd.Init(kTarget, kSwizzleParam, kSwizzleValue);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ {
+ TexParameteri cmd;
+ cmd.Init(kTarget, kSwizzleParam, kInvalidSwizzleValue);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ }
+
+ {
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ typedef GetTexParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetTexParameteriv cmd;
+ cmd.Init(kTarget, kSwizzleParam, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(kSwizzleParam),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(kSwizzleValue, static_cast<GLenum>(result->GetData()[0]));
+ }
+}
+
+TEST_P(WebGL2DecoderTest, TexSwizzleDisabled) {
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLenum kSwizzleParam = GL_TEXTURE_SWIZZLE_R;
+ const GLenum kSwizzleValue = GL_BLUE;
+
+ {
+ TexParameteri cmd;
+ cmd.Init(kTarget, kSwizzleParam, kSwizzleValue);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ }
+
+ {
+ typedef GetTexParameteriv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ GetTexParameteriv cmd;
+ cmd.Init(kTarget, kSwizzleParam, shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+ }
+}
+
// TODO(gman): Complete this test.
// TEST_P(GLES2DecoderTest, CompressedTexImage2DGLError) {
// }
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc b/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc
index 1f85360f677..97201e637ee 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_srgb_converter.cc
@@ -41,10 +41,39 @@ void SRGBConverter::InitializeSRGBConverterProgram() {
srgb_converter_program_ = glCreateProgram();
+ const char* kShaderPrecisionPreamble =
+ "#ifdef GL_ES\n"
+ "precision mediump float;\n"
+ "#define TexCoordPrecision mediump\n"
+ "#else\n"
+ "#define TexCoordPrecision\n"
+ "#endif\n";
+
+ std::string vs_source;
+ if (feature_info_->gl_version_info().is_es) {
+ if (feature_info_->gl_version_info().is_es3) {
+ vs_source += "#version 300 es\n";
+ vs_source +=
+ "#define ATTRIBUTE in\n"
+ "#define VARYING out\n";
+ } else {
+ vs_source +=
+ "#define ATTRIBUTE attribute\n"
+ "#define VARYING varying\n";
+ }
+ } else {
+ vs_source += "#version 150\n";
+ vs_source +=
+ "#define ATTRIBUTE in\n"
+ "#define VARYING out\n";
+ }
+
+ vs_source += kShaderPrecisionPreamble;
+
+ // TODO(yizhou): gles 2.0 does not support gl_VertexID.
// Compile the vertex shader
- const char* vs_source =
- "#version 150\n"
- "out vec2 v_texcoord;\n"
+ vs_source +=
+ "VARYING TexCoordPrecision vec2 v_texcoord;\n"
"\n"
"void main()\n"
"{\n"
@@ -64,7 +93,7 @@ void SRGBConverter::InitializeSRGBConverterProgram() {
" v_texcoord = quad_positions[gl_VertexID];\n"
"}\n";
GLuint vs = glCreateShader(GL_VERTEX_SHADER);
- CompileShader(vs, vs_source);
+ CompileShader(vs, vs_source.c_str());
glAttachShader(srgb_converter_program_, vs);
glDeleteShader(vs);
@@ -80,20 +109,50 @@ void SRGBConverter::InitializeSRGBConverterProgram() {
// encoding, we don't need to use the equation to explicitly encode linear
// to srgb in fragment shader.
// As a result, we just use a simple fragment shader to do srgb conversion.
- const char* fs_source =
- "#version 150\n"
- "uniform sampler2D u_source_texture;\n"
- "in vec2 v_texcoord;\n"
- "out vec4 output_color;\n"
+ std::string fs_source;
+ if (feature_info_->gl_version_info().is_es) {
+ if (feature_info_->gl_version_info().is_es3) {
+ fs_source += "#version 300 es\n";
+ }
+ } else {
+ fs_source += "#version 150\n";
+ }
+
+ fs_source += kShaderPrecisionPreamble;
+
+ if (feature_info_->gl_version_info().is_es) {
+ if (feature_info_->gl_version_info().is_es3) {
+ fs_source +=
+ "#define VARYING in\n"
+ "out vec4 frag_color;\n"
+ "#define FRAGCOLOR frag_color\n"
+ "#define TextureLookup texture\n";
+ } else {
+ fs_source +=
+ "#define VARYING varying\n"
+ "#define FRAGCOLOR gl_FragColor\n"
+ "#define TextureLookup texture2D\n";
+ }
+ } else {
+ fs_source +=
+ "#define VARYING in\n"
+ "out vec4 frag_color;\n"
+ "#define FRAGCOLOR frag_color\n"
+ "#define TextureLookup texture\n";
+ }
+
+ fs_source +=
+ "uniform mediump sampler2D u_source_texture;\n"
+ "VARYING TexCoordPrecision vec2 v_texcoord;\n"
"\n"
"void main()\n"
"{\n"
- " vec4 c = texture(u_source_texture, v_texcoord);\n"
- " output_color = c;\n"
+ " vec4 c = TextureLookup(u_source_texture, v_texcoord);\n"
+ " FRAGCOLOR = c;\n"
"}\n";
GLuint fs = glCreateShader(GL_FRAGMENT_SHADER);
- CompileShader(fs, fs_source);
+ CompileShader(fs, fs_source.c_str());
glAttachShader(srgb_converter_program_, fs);
glDeleteShader(fs);
@@ -243,9 +302,8 @@ void SRGBConverter::Blit(
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, srgb_converter_textures_[1]);
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F,
- c.width(), c.height(),
- 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, c.width(), c.height(), 0,
+ GL_RGBA, GL_FLOAT, nullptr);
glBindFramebufferEXT(GL_FRAMEBUFFER, srgb_decoder_fbo_);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, srgb_converter_textures_[1], 0);
@@ -277,10 +335,8 @@ void SRGBConverter::Blit(
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
glTexImage2D(
GL_TEXTURE_2D, 0, decode ? GL_RGBA32F : src_framebuffer_internal_format,
- width_draw, height_draw, 0,
- decode ? GL_RGBA : src_framebuffer_format,
- decode ? GL_UNSIGNED_BYTE : src_framebuffer_type,
- nullptr);
+ width_draw, height_draw, 0, decode ? GL_RGBA : src_framebuffer_format,
+ decode ? GL_FLOAT : src_framebuffer_type, nullptr);
glBindFramebufferEXT(GL_DRAW_FRAMEBUFFER, srgb_encoder_fbo_);
glFramebufferTexture2DEXT(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
@@ -354,9 +410,11 @@ void SRGBConverter::GenerateMipmap(const gles2::GLES2Decoder* decoder,
GLsizei depth;
GLenum type = 0;
GLenum internal_format = 0;
+ GLenum format = 0;
GLsizei base_level = tex->base_level();
tex->GetLevelSize(target, base_level, &width, &height, &depth);
tex->GetLevelType(target, base_level, &type, &internal_format);
+ format = TextureManager::ExtractFormatFromStorageFormat(internal_format);
const GLint mipmap_levels =
TextureManager::ComputeMipMapCount(target, width, height, depth);
@@ -364,7 +422,7 @@ void SRGBConverter::GenerateMipmap(const gles2::GLES2Decoder* decoder,
if (feature_info_->ext_color_buffer_float_available() &&
feature_info_->oes_texture_float_linear_available()) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, nullptr);
+ GL_FLOAT, nullptr);
} else {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA,
GL_UNSIGNED_BYTE, nullptr);
@@ -411,7 +469,7 @@ void SRGBConverter::GenerateMipmap(const gles2::GLES2Decoder* decoder,
// generate mipmap for tex manually
glBindTexture(GL_TEXTURE_2D, tex->service_id());
glTexImage2D(GL_TEXTURE_2D, level, internal_format, width, height, 0,
- GL_SRGB, type, NULL);
+ format, type, nullptr);
glFramebufferTexture2DEXT(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, tex->service_id(), level);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
index 1223a22f1b5..2be3bab9c4a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_autogen.h
@@ -344,6 +344,12 @@ TextureMinFilterModeValidator texture_min_filter_mode;
ValueValidator<GLenum> texture_parameter;
ValueValidator<GLenum> texture_sized_color_renderable_internal_format;
ValueValidator<GLenum> texture_sized_texture_filterable_internal_format;
+class TextureSrgbDecodeExtValidator {
+ public:
+ bool IsValid(const GLenum value) const;
+};
+TextureSrgbDecodeExtValidator texture_srgb_decode_ext;
+
ValueValidator<GLenum> texture_stencil_renderable_internal_format;
class TextureSwizzleValidator {
public:
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
index 632eaeedfb8..280f736988c 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
@@ -1216,6 +1216,16 @@ static const GLenum
GL_RGB_YCBCR_420V_CHROMIUM,
};
+bool Validators::TextureSrgbDecodeExtValidator::IsValid(
+ const GLenum value) const {
+ switch (value) {
+ case GL_DECODE_EXT:
+ case GL_SKIP_DECODE_EXT:
+ return true;
+ }
+ return false;
+};
+
static const GLenum
valid_texture_stencil_renderable_internal_format_table_es3[] = {
GL_STENCIL_INDEX8, GL_DEPTH24_STENCIL8, GL_DEPTH32F_STENCIL8,
diff --git a/chromium/gpu/command_buffer/service/gpu_preferences.h b/chromium/gpu/command_buffer/service/gpu_preferences.h
index 8d170985f17..b7db89f457c 100644
--- a/chromium/gpu/command_buffer/service/gpu_preferences.h
+++ b/chromium/gpu/command_buffer/service/gpu_preferences.h
@@ -11,6 +11,7 @@
#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/gpu_export.h"
+#include "media/media_features.h"
namespace gpu {
@@ -51,7 +52,7 @@ struct GPU_EXPORT GpuPreferences {
bool disable_vaapi_accelerated_video_encode = false;
#endif
-#if defined(ENABLE_WEBRTC)
+#if BUILDFLAG(ENABLE_WEBRTC)
// Disables HW encode acceleration for WebRTC.
bool disable_web_rtc_hw_encoding = false;
#endif
diff --git a/chromium/gpu/command_buffer/service/gpu_service_test.cc b/chromium/gpu/command_buffer/service/gpu_service_test.cc
index b1608eb6f5c..5781728ee2e 100644
--- a/chromium/gpu/command_buffer/service/gpu_service_test.cc
+++ b/chromium/gpu/command_buffer/service/gpu_service_test.cc
@@ -50,7 +50,7 @@ void GpuServiceTest::TearDown() {
surface_ = nullptr;
::gl::MockGLInterface::SetGLInterface(NULL);
gl_.reset();
- gl::init::ClearGLBindings();
+ gl::init::ShutdownGL();
ran_teardown_ = true;
testing::Test::TearDown();
diff --git a/chromium/gpu/command_buffer/service/mailbox_manager_impl.h b/chromium/gpu/command_buffer/service/mailbox_manager_impl.h
index e44ac046dbd..ff3d4a80416 100644
--- a/chromium/gpu/command_buffer/service/mailbox_manager_impl.h
+++ b/chromium/gpu/command_buffer/service/mailbox_manager_impl.h
@@ -19,7 +19,6 @@ namespace gpu {
namespace gles2 {
class TextureBase;
-class TextureManager;
// Manages resources scoped beyond the context or context group level.
class GPU_EXPORT MailboxManagerImpl : public MailboxManager {
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.cc b/chromium/gpu/command_buffer/service/memory_program_cache.cc
index c38fd78edb0..410914db2b0 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.cc
@@ -202,10 +202,14 @@ void RunShaderCallback(const ShaderCacheCallback& callback,
} // namespace
-MemoryProgramCache::MemoryProgramCache(size_t max_cache_size_bytes,
- bool disable_gpu_shader_disk_cache)
+MemoryProgramCache::MemoryProgramCache(
+ size_t max_cache_size_bytes,
+ bool disable_gpu_shader_disk_cache,
+ bool disable_program_caching_for_transform_feedback)
: max_size_bytes_(max_cache_size_bytes),
disable_gpu_shader_disk_cache_(disable_gpu_shader_disk_cache),
+ disable_program_caching_for_transform_feedback_(
+ disable_program_caching_for_transform_feedback),
curr_size_bytes_(0),
store_(ProgramMRUCache::NO_AUTO_EVICT) {
}
@@ -291,6 +295,10 @@ void MemoryProgramCache::SaveLinkedProgram(
const std::vector<std::string>& transform_feedback_varyings,
GLenum transform_feedback_buffer_mode,
const ShaderCacheCallback& shader_callback) {
+ if (disable_program_caching_for_transform_feedback_ &&
+ !transform_feedback_varyings.empty()) {
+ return;
+ }
GLenum format;
GLsizei length = 0;
glGetProgramiv(program, GL_PROGRAM_BINARY_LENGTH_OES, &length);
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache.h b/chromium/gpu/command_buffer/service/memory_program_cache.h
index 628bdb160dd..f4cc2ec4921 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache.h
+++ b/chromium/gpu/command_buffer/service/memory_program_cache.h
@@ -20,15 +20,14 @@
namespace gpu {
-struct GpuPreferences;
-
namespace gles2 {
// Program cache that stores binaries completely in-memory
class GPU_EXPORT MemoryProgramCache : public ProgramCache {
public:
MemoryProgramCache(size_t max_cache_size_bytes,
- bool disable_gpu_shader_disk_cache);
+ bool disable_gpu_shader_disk_cache,
+ bool disable_program_caching_for_transform_feedback);
~MemoryProgramCache() override;
ProgramLoadResult LoadLinkedProgram(
@@ -166,6 +165,7 @@ class GPU_EXPORT MemoryProgramCache : public ProgramCache {
const size_t max_size_bytes_;
const bool disable_gpu_shader_disk_cache_;
+ const bool disable_program_caching_for_transform_feedback_;
size_t curr_size_bytes_;
ProgramMRUCache store_;
diff --git a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
index a28b79d9f22..b6e157730c3 100644
--- a/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/memory_program_cache_unittest.cc
@@ -70,13 +70,15 @@ class MemoryProgramCacheTest : public GpuServiceTest {
public:
static const size_t kCacheSizeBytes = 1024;
static const bool kDisableGpuDiskCache = false;
+ static const bool kDisableCachingForTransformFeedback = false;
static const GLuint kVertexShaderClientId = 90;
static const GLuint kVertexShaderServiceId = 100;
static const GLuint kFragmentShaderClientId = 91;
static const GLuint kFragmentShaderServiceId = 100;
MemoryProgramCacheTest()
- : cache_(new MemoryProgramCache(kCacheSizeBytes, kDisableGpuDiskCache)),
+ : cache_(new MemoryProgramCache(kCacheSizeBytes, kDisableGpuDiskCache,
+ kDisableCachingForTransformFeedback)),
shader_manager_(nullptr),
vertex_shader_(nullptr),
fragment_shader_(nullptr),
@@ -537,6 +539,40 @@ TEST_F(MemoryProgramCacheTest, LoadFailOnDifferentTransformFeedbackVaryings) {
base::Unretained(this))));
}
+TEST_F(MemoryProgramCacheTest, LoadFailIfTransformFeedbackCachingDisabled) {
+ const GLenum kFormat = 1;
+ const int kProgramId = 10;
+ const int kBinaryLength = 20;
+ char test_binary[kBinaryLength];
+ for (int i = 0; i < kBinaryLength; ++i) {
+ test_binary[i] = i;
+ }
+ ProgramBinaryEmulator emulator(kBinaryLength, kFormat, test_binary);
+
+ // Forcibly reset the program cache so we can disable caching of
+ // programs which include transform feedback varyings.
+ cache_.reset(new MemoryProgramCache(
+ kCacheSizeBytes, kDisableGpuDiskCache, true));
+ varyings_.push_back("test");
+ cache_->SaveLinkedProgram(kProgramId,
+ vertex_shader_,
+ fragment_shader_,
+ NULL,
+ varyings_,
+ GL_INTERLEAVED_ATTRIBS,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this)));
+ EXPECT_EQ(ProgramCache::PROGRAM_LOAD_FAILURE, cache_->LoadLinkedProgram(
+ kProgramId,
+ vertex_shader_,
+ fragment_shader_,
+ NULL,
+ varyings_,
+ GL_INTERLEAVED_ATTRIBS,
+ base::Bind(&MemoryProgramCacheTest::ShaderCacheCb,
+ base::Unretained(this))));
+}
+
TEST_F(MemoryProgramCacheTest, MemoryProgramCacheEviction) {
const GLenum kFormat = 1;
const int kProgramId = 10;
diff --git a/chromium/gpu/command_buffer/service/program_manager.cc b/chromium/gpu/command_buffer/service/program_manager.cc
index 3861267d96f..92615df4490 100644
--- a/chromium/gpu/command_buffer/service/program_manager.cc
+++ b/chromium/gpu/command_buffer/service/program_manager.cc
@@ -225,38 +225,6 @@ GLsizeiptr VertexShaderOutputTypeToSize(const sh::Varying& varying) {
return total.ValueOrDefault(std::numeric_limits<GLsizeiptr>::max());
}
-// Scoped object which logs three UMA stats related to program cleanup when
-// destructed:
-// - GPU.DestroyProgramManagerPrograms.Elapsed - The amount of time spent
-// destroying programs during ProgramManager::Destroy.
-// - GPU.DestroyProgramManagerPrograms.Programs - The number of progams
-// destroyed during ProgramManager::Destroy.
-// - GPU.DestroyProgramManagerPrograms.ProgramsPerMs - The number of programs
-// destroyed per millisecond during ProgramManager::Destroy. Only logged if
-// both the number of programs and the elapsed time are greater than zero.
-class ProgramDeletionScopedUmaTimeAndRate {
- public:
- ProgramDeletionScopedUmaTimeAndRate(int32_t count)
- : count_(count), start_(base::TimeTicks::Now()) {}
-
- ~ProgramDeletionScopedUmaTimeAndRate() {
- base::TimeDelta elapsed = base::TimeTicks::Now() - start_;
- UMA_HISTOGRAM_TIMES("GPU.DestroyProgramManagerPrograms.Elapsed", elapsed);
- UMA_HISTOGRAM_COUNTS("GPU.DestroyProgramManagerPrograms.Programs", count_);
-
- double elapsed_ms = elapsed.InMillisecondsF();
- if (count_ > 0 && elapsed_ms > 0) {
- double rate = static_cast<double>(count_) / elapsed_ms;
- UMA_HISTOGRAM_COUNTS("GPU.DestroyProgramManagerPrograms.ProgramsPerMs",
- base::saturated_cast<int32_t>(rate));
- }
- }
-
- private:
- const int32_t count_;
- const base::TimeTicks start_;
-};
-
} // anonymous namespace.
Program::UniformInfo::UniformInfo()
@@ -2112,9 +2080,8 @@ bool Program::CheckVaryingsPacking(
for (const auto& key_value : combined_map) {
variables.push_back(*key_value.second);
}
- return ShCheckVariablesWithinPackingLimits(
- static_cast<int>(manager_->max_varying_vectors()),
- variables);
+ return sh::CheckVariablesWithinPackingLimits(
+ static_cast<int>(manager_->max_varying_vectors()), variables);
}
void Program::GetProgramInfo(
@@ -2590,8 +2557,6 @@ ProgramManager::~ProgramManager() {
void ProgramManager::Destroy(bool have_context) {
have_context_ = have_context;
- ProgramDeletionScopedUmaTimeAndRate scoped_histogram(
- base::saturated_cast<int32_t>(programs_.size()));
while (!programs_.empty()) {
programs_.erase(programs_.begin());
if (progress_reporter_)
diff --git a/chromium/gpu/command_buffer/service/sampler_manager.cc b/chromium/gpu/command_buffer/service/sampler_manager.cc
index c76cbf95dac..9b8ec3c4b65 100644
--- a/chromium/gpu/command_buffer/service/sampler_manager.cc
+++ b/chromium/gpu/command_buffer/service/sampler_manager.cc
@@ -30,8 +30,9 @@ SamplerState::SamplerState()
min_lod(-1000.0f) {
}
-Sampler::Sampler(SamplerManager* manager, GLuint service_id)
+Sampler::Sampler(SamplerManager* manager, GLuint client_id, GLuint service_id)
: manager_(manager),
+ client_id_(client_id),
service_id_(service_id),
deleted_(false) {
DCHECK(manager);
@@ -147,7 +148,7 @@ void SamplerManager::Destroy(bool have_context) {
Sampler* SamplerManager::CreateSampler(GLuint client_id, GLuint service_id) {
DCHECK_NE(0u, service_id);
auto result = samplers_.insert(std::make_pair(client_id,
- scoped_refptr<Sampler>(new Sampler(this, service_id))));
+ scoped_refptr<Sampler>(new Sampler(this, client_id, service_id))));
DCHECK(result.second);
return result.first->second.get();
}
diff --git a/chromium/gpu/command_buffer/service/sampler_manager.h b/chromium/gpu/command_buffer/service/sampler_manager.h
index 8d8534328a5..61c51160044 100644
--- a/chromium/gpu/command_buffer/service/sampler_manager.h
+++ b/chromium/gpu/command_buffer/service/sampler_manager.h
@@ -37,9 +37,12 @@ struct SamplerState {
class GPU_EXPORT Sampler : public base::RefCounted<Sampler> {
public:
- Sampler(SamplerManager* manager, GLuint service_id);
+ Sampler(SamplerManager* manager, GLuint client_id, GLuint service_id);
+
+ GLuint client_id() const {
+ return client_id_;
+ }
- // The service side OpenGL id of the texture.
GLuint service_id() const {
return service_id_;
}
@@ -114,7 +117,7 @@ class GPU_EXPORT Sampler : public base::RefCounted<Sampler> {
// The manager that owns this Sampler.
SamplerManager* manager_;
- // The id of the texure
+ GLuint client_id_;
GLuint service_id_;
// Sampler parameters.
diff --git a/chromium/gpu/command_buffer/service/shader_translator.cc b/chromium/gpu/command_buffer/service/shader_translator.cc
index 60b6d63692a..d5067b58c8d 100644
--- a/chromium/gpu/command_buffer/service/shader_translator.cc
+++ b/chromium/gpu/command_buffer/service/shader_translator.cc
@@ -27,12 +27,12 @@ class ShaderTranslatorInitializer {
public:
ShaderTranslatorInitializer() {
TRACE_EVENT0("gpu", "ShInitialize");
- CHECK(ShInitialize());
+ CHECK(sh::Initialize());
}
~ShaderTranslatorInitializer() {
TRACE_EVENT0("gpu", "ShFinalize");
- ShFinalize();
+ sh::Finalize();
}
};
@@ -43,7 +43,7 @@ void GetAttributes(ShHandle compiler, AttributeMap* var_map) {
if (!var_map)
return;
var_map->clear();
- const std::vector<sh::Attribute>* attribs = ShGetAttributes(compiler);
+ const std::vector<sh::Attribute>* attribs = sh::GetAttributes(compiler);
if (attribs) {
for (size_t ii = 0; ii < attribs->size(); ++ii)
(*var_map)[(*attribs)[ii].mappedName] = (*attribs)[ii];
@@ -54,7 +54,7 @@ void GetUniforms(ShHandle compiler, UniformMap* var_map) {
if (!var_map)
return;
var_map->clear();
- const std::vector<sh::Uniform>* uniforms = ShGetUniforms(compiler);
+ const std::vector<sh::Uniform>* uniforms = sh::GetUniforms(compiler);
if (uniforms) {
for (size_t ii = 0; ii < uniforms->size(); ++ii)
(*var_map)[(*uniforms)[ii].mappedName] = (*uniforms)[ii];
@@ -65,7 +65,7 @@ void GetVaryings(ShHandle compiler, VaryingMap* var_map) {
if (!var_map)
return;
var_map->clear();
- const std::vector<sh::Varying>* varyings = ShGetVaryings(compiler);
+ const std::vector<sh::Varying>* varyings = sh::GetVaryings(compiler);
if (varyings) {
for (size_t ii = 0; ii < varyings->size(); ++ii)
(*var_map)[(*varyings)[ii].mappedName] = (*varyings)[ii];
@@ -74,7 +74,7 @@ void GetVaryings(ShHandle compiler, VaryingMap* var_map) {
void GetOutputVariables(ShHandle compiler, OutputVariableList* var_list) {
if (!var_list)
return;
- *var_list = *ShGetOutputVariables(compiler);
+ *var_list = *sh::GetOutputVariables(compiler);
}
void GetInterfaceBlocks(ShHandle compiler, InterfaceBlockMap* var_map) {
@@ -82,7 +82,7 @@ void GetInterfaceBlocks(ShHandle compiler, InterfaceBlockMap* var_map) {
return;
var_map->clear();
const std::vector<sh::InterfaceBlock>* interface_blocks =
- ShGetInterfaceBlocks(compiler);
+ sh::GetInterfaceBlocks(compiler);
if (interface_blocks) {
for (const auto& block : *interface_blocks) {
(*var_map)[block.mappedName] = block;
@@ -96,7 +96,7 @@ void GetNameHashingInfo(ShHandle compiler, NameMap* name_map) {
name_map->clear();
typedef std::map<std::string, std::string> NameMapANGLE;
- const NameMapANGLE* angle_map = ShGetNameHashingMap(compiler);
+ const NameMapANGLE* angle_map = sh::GetNameHashingMap(compiler);
DCHECK(angle_map);
for (NameMapANGLE::const_iterator iter = angle_map->begin();
@@ -182,8 +182,8 @@ bool ShaderTranslator::Init(GLenum shader_type,
{
TRACE_EVENT0("gpu", "ShConstructCompiler");
- compiler_ = ShConstructCompiler(shader_type, shader_spec,
- shader_output_language, resources);
+ compiler_ = sh::ConstructCompiler(shader_type, shader_spec,
+ shader_output_language, resources);
}
compile_options_ =
@@ -226,16 +226,15 @@ bool ShaderTranslator::Translate(const std::string& shader_source,
{
TRACE_EVENT0("gpu", "ShCompile");
const char* const shader_strings[] = { shader_source.c_str() };
- success = ShCompile(
- compiler_, shader_strings, 1, GetCompileOptions());
+ success = sh::Compile(compiler_, shader_strings, 1, GetCompileOptions());
}
if (success) {
// Get translated shader.
if (translated_source) {
- *translated_source = ShGetObjectCode(compiler_);
+ *translated_source = sh::GetObjectCode(compiler_);
}
// Get shader version.
- *shader_version = ShGetShaderVersion(compiler_);
+ *shader_version = sh::GetShaderVersion(compiler_);
// Get info for attribs, uniforms, varyings and output variables.
GetAttributes(compiler_, attrib_map);
GetUniforms(compiler_, uniform_map);
@@ -248,11 +247,11 @@ bool ShaderTranslator::Translate(const std::string& shader_source,
// Get info log.
if (info_log) {
- *info_log = ShGetInfoLog(compiler_);
+ *info_log = sh::GetInfoLog(compiler_);
}
// We don't need results in the compiler anymore.
- ShClearResults(compiler_);
+ sh::ClearResults(compiler_);
return success;
}
@@ -261,8 +260,8 @@ std::string ShaderTranslator::GetStringForOptionsThatWouldAffectCompilation()
const {
DCHECK(compiler_ != NULL);
return std::string(":CompileOptions:" +
- base::Uint64ToString(GetCompileOptions())) +
- ShGetBuiltInResourcesString(compiler_);
+ base::Uint64ToString(GetCompileOptions())) +
+ sh::GetBuiltInResourcesString(compiler_);
}
void ShaderTranslator::AddDestructionObserver(
@@ -280,7 +279,7 @@ ShaderTranslator::~ShaderTranslator() {
observer.OnDestruct(this);
if (compiler_ != NULL)
- ShDestruct(compiler_);
+ sh::Destruct(compiler_);
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/shader_translator_cache_unittest.cc b/chromium/gpu/command_buffer/service/shader_translator_cache_unittest.cc
index eefd2e69a7a..a23a753b34b 100644
--- a/chromium/gpu/command_buffer/service/shader_translator_cache_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shader_translator_cache_unittest.cc
@@ -16,11 +16,11 @@ TEST(ShaderTranslatorCacheTest, InitParamComparable) {
ShBuiltInResources a_resources;
memset(&a_resources, 88, sizeof(a_resources));
- ShInitBuiltInResources(&a_resources);
+ sh::InitBuiltInResources(&a_resources);
ShBuiltInResources b_resources;
memset(&b_resources, 77, sizeof(b_resources));
- ShInitBuiltInResources(&b_resources);
+ sh::InitBuiltInResources(&b_resources);
EXPECT_TRUE(memcmp(&a_resources, &b_resources, sizeof(a_resources)) == 0);
diff --git a/chromium/gpu/command_buffer/service/shader_translator_unittest.cc b/chromium/gpu/command_buffer/service/shader_translator_unittest.cc
index 0059aa088fa..e48f6111306 100644
--- a/chromium/gpu/command_buffer/service/shader_translator_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shader_translator_unittest.cc
@@ -24,7 +24,7 @@ class ShaderTranslatorTest : public testing::Test {
protected:
void SetUp() override {
ShBuiltInResources resources;
- ShInitBuiltInResources(&resources);
+ sh::InitBuiltInResources(&resources);
resources.MaxExpressionComplexity = 32;
resources.MaxCallStackDepth = 32;
@@ -63,7 +63,7 @@ class ES3ShaderTranslatorTest : public testing::Test {
protected:
void SetUp() override {
ShBuiltInResources resources;
- ShInitBuiltInResources(&resources);
+ sh::InitBuiltInResources(&resources);
resources.MaxExpressionComplexity = 32;
resources.MaxCallStackDepth = 32;
@@ -424,7 +424,7 @@ TEST_F(ShaderTranslatorTest, OptionsString) {
scoped_refptr<ShaderTranslator> translator_3 = new ShaderTranslator();
ShBuiltInResources resources;
- ShInitBuiltInResources(&resources);
+ sh::InitBuiltInResources(&resources);
ASSERT_TRUE(translator_1->Init(GL_VERTEX_SHADER, SH_GLES2_SPEC, &resources,
SH_GLSL_150_CORE_OUTPUT,
@@ -464,7 +464,7 @@ class ShaderTranslatorOutputVersionTest
// https://bugs.chromium.org/p/angleproject/issues/detail?id=1277
TEST_F(ShaderTranslatorOutputVersionTest, DISABLED_CompatibilityOutput) {
ShBuiltInResources resources;
- ShInitBuiltInResources(&resources);
+ sh::InitBuiltInResources(&resources);
ShCompileOptions compile_options = SH_OBJECT_CODE;
ShShaderOutput shader_output_language = SH_GLSL_COMPATIBILITY_OUTPUT;
scoped_refptr<ShaderTranslator> vertex_translator = new ShaderTranslator();
@@ -532,7 +532,7 @@ TEST_P(ShaderTranslatorOutputVersionTest, HasCorrectOutputGLSLVersion) {
scoped_refptr<ShaderTranslator> translator = new ShaderTranslator();
ShBuiltInResources resources;
- ShInitBuiltInResources(&resources);
+ sh::InitBuiltInResources(&resources);
ShCompileOptions compile_options = SH_OBJECT_CODE;
ShShaderOutput shader_output_language =
ShaderTranslator::GetShaderOutputLanguageForContext(
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index dd7d59c5474..e9307a24f28 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -439,12 +439,17 @@ void TextureManager::Destroy(bool have_context) {
}
TextureBase::TextureBase(GLuint service_id)
- : service_id_(service_id), mailbox_manager_(nullptr) {}
+ : service_id_(service_id), target_(GL_NONE), mailbox_manager_(nullptr) {}
TextureBase::~TextureBase() {
DCHECK_EQ(nullptr, mailbox_manager_);
}
+void TextureBase::SetTarget(GLenum target) {
+ DCHECK_EQ(0u, target_); // you can only set this once.
+ target_ = target;
+}
+
void TextureBase::DeleteFromMailboxManager() {
if (mailbox_manager_) {
mailbox_manager_->TextureDeleted(this);
@@ -457,8 +462,10 @@ void TextureBase::SetMailboxManager(MailboxManager* mailbox_manager) {
mailbox_manager_ = mailbox_manager;
}
-TexturePassthrough::TexturePassthrough(GLuint service_id)
- : TextureBase(service_id), have_context_(true) {}
+TexturePassthrough::TexturePassthrough(GLuint service_id, GLenum target)
+ : TextureBase(service_id), have_context_(true) {
+ TextureBase::SetTarget(target);
+}
TexturePassthrough::~TexturePassthrough() {
DeleteFromMailboxManager();
@@ -478,7 +485,6 @@ Texture::Texture(GLuint service_id)
cleared_(true),
num_uncleared_mips_(0),
num_npot_faces_(0),
- target_(0),
usage_(GL_NONE),
base_level_(0),
max_level_(1000),
@@ -746,8 +752,7 @@ void Texture::MarkMipmapsGenerated() {
}
void Texture::SetTarget(GLenum target, GLint max_levels) {
- DCHECK_EQ(0u, target_); // you can only set this once.
- target_ = target;
+ TextureBase::SetTarget(target);
size_t num_faces = (target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
face_infos_.resize(num_faces);
for (size_t ii = 0; ii < num_faces; ++ii) {
@@ -785,11 +790,15 @@ bool Texture::CanGenerateMipmaps(const FeatureInfo* feature_info) const {
return false;
}
- if (!Texture::ColorRenderable(feature_info, base.internal_format,
- immutable_) ||
- !Texture::TextureFilterable(feature_info, base.internal_format, base.type,
- immutable_)) {
- return false;
+ if (!feature_info->validators()->texture_unsized_internal_format.IsValid(
+ base.internal_format)) {
+ if (!Texture::ColorRenderable(feature_info, base.internal_format,
+ immutable_) ||
+ !Texture::TextureFilterable(feature_info, base.internal_format,
+ base.type,
+ immutable_)) {
+ return false;
+ }
}
for (size_t ii = 0; ii < face_infos_.size(); ++ii) {
@@ -869,7 +878,9 @@ bool Texture::ColorRenderable(const FeatureInfo* feature_info,
bool immutable) {
if (feature_info->validators()->texture_unsized_internal_format.IsValid(
internal_format)) {
- return true;
+ return internal_format != GL_ALPHA && internal_format != GL_LUMINANCE &&
+ internal_format != GL_LUMINANCE_ALPHA &&
+ internal_format != GL_SRGB_EXT;
}
return SizedFormatAvailable(feature_info, immutable, internal_format) &&
@@ -1342,6 +1353,11 @@ GLenum Texture::SetParameteri(
}
swizzle_a_ = param;
break;
+ case GL_TEXTURE_SRGB_DECODE_EXT:
+ if (!feature_info->validators()->texture_srgb_decode_ext.IsValid(param)) {
+ return GL_INVALID_ENUM;
+ }
+ break;
case GL_TEXTURE_IMMUTABLE_FORMAT:
case GL_TEXTURE_IMMUTABLE_LEVELS:
return GL_INVALID_ENUM;
@@ -1772,16 +1788,8 @@ bool Texture::CanRenderTo(const FeatureInfo* feature_info, GLint level) const {
DCHECK(level >= 0 &&
level < static_cast<GLint>(face_infos_[0].level_infos.size()));
GLenum internal_format = face_infos_[0].level_infos[level].internal_format;
- bool color_renderable =
- ((feature_info->validators()->texture_unsized_internal_format.IsValid(
- internal_format) &&
- internal_format != GL_ALPHA && internal_format != GL_LUMINANCE &&
- internal_format != GL_LUMINANCE_ALPHA &&
- internal_format != GL_SRGB_EXT) ||
- (SizedFormatAvailable(feature_info, immutable_, internal_format) &&
- feature_info->validators()
- ->texture_sized_color_renderable_internal_format.IsValid(
- internal_format)));
+ bool color_renderable = ColorRenderable(feature_info, internal_format,
+ immutable_);
bool depth_renderable = feature_info->validators()->
texture_depth_renderable_internal_format.IsValid(internal_format);
bool stencil_renderable = feature_info->validators()->
@@ -3458,6 +3466,7 @@ GLenum TextureManager::ExtractFormatFromStorageFormat(GLenum internalformat) {
case GL_RGBA32I:
return GL_RGBA_INTEGER;
case GL_BGRA_EXT:
+ case GL_BGRA8_EXT:
return GL_BGRA_EXT;
case GL_DEPTH_COMPONENT16:
case GL_DEPTH_COMPONENT24:
@@ -3487,8 +3496,6 @@ GLenum TextureManager::ExtractFormatFromStorageFormat(GLenum internalformat) {
return GL_LUMINANCE;
case GL_LUMINANCE_ALPHA16F_EXT:
return GL_LUMINANCE_ALPHA;
- case GL_BGRA8_EXT:
- return GL_BGRA_EXT;
default:
return GL_NONE;
}
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index 11c8b630505..8cf2edb1548 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -33,7 +33,6 @@ class GLES2Decoder;
class GLStreamTextureImage;
struct ContextState;
struct DecoderFramebufferState;
-class Display;
class ErrorState;
class FeatureInfo;
class FramebufferManager;
@@ -51,10 +50,21 @@ class GPU_EXPORT TextureBase {
// The service side OpenGL id of the texture.
GLuint service_id() const { return service_id_; }
+ // Returns the target this texure was first bound to or 0 if it has not
+ // been bound. Once a texture is bound to a specific target it can never be
+ // bound to a different target.
+ GLenum target() const { return target_; }
+
protected:
// The id of the texture.
GLuint service_id_;
+ // The target. 0 if unset, otherwise GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP.
+ // Or GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3).
+ GLenum target_;
+
+ void SetTarget(GLenum target);
+
void DeleteFromMailboxManager();
private:
@@ -71,7 +81,7 @@ class GPU_EXPORT TextureBase {
class TexturePassthrough final : public TextureBase,
public base::RefCounted<TexturePassthrough> {
public:
- explicit TexturePassthrough(GLuint service_id);
+ TexturePassthrough(GLuint service_id, GLenum target);
// Notify the texture that the context is lost and it shouldn't delete the
// native GL texture in the destructor
@@ -195,13 +205,6 @@ class GPU_EXPORT Texture final : public TextureBase {
owned_service_id_ = service_id;
}
- // Returns the target this texure was first bound to or 0 if it has not
- // been bound. Once a texture is bound to a specific target it can never be
- // bound to a different target.
- GLenum target() const {
- return target_;
- }
-
bool SafeToRenderFrom() const {
return cleared_;
}
@@ -322,6 +325,10 @@ class GPU_EXPORT Texture final : public TextureBase {
bool EmulatingRGB();
+ static bool ColorRenderable(const FeatureInfo* feature_info,
+ GLenum internal_format,
+ bool immutable);
+
private:
friend class MailboxManagerImpl;
friend class MailboxManagerSync;
@@ -499,10 +506,6 @@ class GPU_EXPORT Texture final : public TextureBase {
GLenum format,
GLenum type);
- static bool ColorRenderable(const FeatureInfo* feature_info,
- GLenum internal_format,
- bool immutable);
-
static bool TextureFilterable(const FeatureInfo* feature_info,
GLenum internal_format,
GLenum type,
@@ -593,10 +596,6 @@ class GPU_EXPORT Texture final : public TextureBase {
int num_uncleared_mips_;
int num_npot_faces_;
- // The target. 0 if unset, otherwise GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP.
- // Or GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3).
- GLenum target_;
-
// Texture parameters.
SamplerState sampler_state_;
GLenum usage_;
diff --git a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
index a3b51f825ea..471ec773cbd 100644
--- a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
@@ -785,6 +785,34 @@ TEST_F(TextureTest, ZeroSizeCanNotRender) {
EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
}
+TEST_F(TextureTest, CanRenderTo) {
+ TestHelper::SetupFeatureInfoInitExpectations(gl_.get(), "");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->InitializeForTesting();
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 0, GL_RGB, 1, 1, 1,
+ 0, GL_RGB, GL_UNSIGNED_BYTE, gfx::Rect(1, 1));
+ EXPECT_TRUE(texture_ref_->texture()->CanRenderTo(feature_info.get(), 0));
+ manager_->SetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 0, GL_RGBA, 0, 0, 1,
+ 0, GL_RGBA, GL_UNSIGNED_BYTE, gfx::Rect());
+ EXPECT_TRUE(texture_ref_->texture()->CanRenderTo(feature_info.get(), 0));
+}
+
+TEST_F(TextureTest, CanNotRenderTo) {
+ TestHelper::SetupFeatureInfoInitExpectations(gl_.get(), "");
+ scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
+ feature_info->InitializeForTesting();
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
+ manager_->SetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 0, GL_LUMINANCE, 1,
+ 1, 1, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE,
+ gfx::Rect(1, 1));
+ EXPECT_FALSE(texture_ref_->texture()->CanRenderTo(feature_info.get(), 0));
+ manager_->SetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 0,
+ GL_LUMINANCE_ALPHA, 0, 0, 1, 0, GL_LUMINANCE_ALPHA,
+ GL_UNSIGNED_BYTE, gfx::Rect());
+ EXPECT_FALSE(texture_ref_->texture()->CanRenderTo(feature_info.get(), 0));
+}
+
TEST_F(TextureTest, EstimatedSize) {
manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
manager_->SetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 0, GL_RGBA, 8, 4, 1,
diff --git a/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc b/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc
index 9f583b9f656..8fd4050d04b 100644
--- a/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc
@@ -35,8 +35,10 @@ TransferBufferManager::TransferBufferManager(
TransferBufferManager::~TransferBufferManager() {
while (!registered_buffers_.empty()) {
BufferMap::iterator it = registered_buffers_.begin();
- DCHECK(shared_memory_bytes_allocated_ >= it->second->size());
- shared_memory_bytes_allocated_ -= it->second->size();
+ if (it->second->backing()->is_shared()) {
+ DCHECK(shared_memory_bytes_allocated_ >= it->second->size());
+ shared_memory_bytes_allocated_ -= it->second->size();
+ }
registered_buffers_.erase(it);
}
DCHECK(!shared_memory_bytes_allocated_);
@@ -77,8 +79,8 @@ bool TransferBufferManager::RegisterTransferBuffer(
DCHECK(!(reinterpret_cast<uintptr_t>(buffer->memory()) &
(kCommandBufferEntrySize - 1)));
- shared_memory_bytes_allocated_ += buffer->size();
-
+ if (buffer->backing()->is_shared())
+ shared_memory_bytes_allocated_ += buffer->size();
registered_buffers_[id] = buffer;
return true;
@@ -91,9 +93,10 @@ void TransferBufferManager::DestroyTransferBuffer(int32_t id) {
return;
}
- DCHECK(shared_memory_bytes_allocated_ >= it->second->size());
- shared_memory_bytes_allocated_ -= it->second->size();
-
+ if (it->second->backing()->is_shared()) {
+ DCHECK(shared_memory_bytes_allocated_ >= it->second->size());
+ shared_memory_bytes_allocated_ -= it->second->size();
+ }
registered_buffers_.erase(it);
}
@@ -135,10 +138,12 @@ bool TransferBufferManager::OnMemoryDump(
MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, buffer->size());
- auto guid =
- GetBufferGUIDForTracing(memory_tracker_->ClientTracingId(), buffer_id);
- pmd->CreateSharedGlobalAllocatorDump(guid);
- pmd->AddOwnershipEdge(dump->guid(), guid);
+ if (buffer->backing()->is_shared()) {
+ auto guid = GetBufferGUIDForTracing(memory_tracker_->ClientTracingId(),
+ buffer_id);
+ pmd->CreateSharedGlobalAllocatorDump(guid);
+ pmd->AddOwnershipEdge(dump->guid(), guid);
+ }
}
return true;
diff --git a/chromium/gpu/command_buffer/service/transform_feedback_manager.cc b/chromium/gpu/command_buffer/service/transform_feedback_manager.cc
index 5ceb1b78eb1..7f20e973d18 100644
--- a/chromium/gpu/command_buffer/service/transform_feedback_manager.cc
+++ b/chromium/gpu/command_buffer/service/transform_feedback_manager.cc
@@ -11,16 +11,19 @@ namespace gpu {
namespace gles2 {
TransformFeedback::TransformFeedback(TransformFeedbackManager* manager,
+ GLuint client_id,
GLuint service_id)
: IndexedBufferBindingHost(
manager->max_transform_feedback_separate_attribs(),
manager->needs_emulation()),
manager_(manager),
+ client_id_(client_id),
service_id_(service_id),
has_been_bound_(false),
active_(false),
paused_(false),
primitive_mode_(GL_NONE) {
+ DCHECK_LE(0u, client_id);
DCHECK_LT(0u, service_id);
}
@@ -95,7 +98,7 @@ void TransformFeedbackManager::Destroy() {
TransformFeedback* TransformFeedbackManager::CreateTransformFeedback(
GLuint client_id, GLuint service_id) {
scoped_refptr<TransformFeedback> transform_feedback(
- new TransformFeedback(this, service_id));
+ new TransformFeedback(this, client_id, service_id));
auto result = transform_feedbacks_.insert(
std::make_pair(client_id, transform_feedback));
DCHECK(result.second);
diff --git a/chromium/gpu/command_buffer/service/transform_feedback_manager.h b/chromium/gpu/command_buffer/service/transform_feedback_manager.h
index 81c5c85e1d8..edb52270b51 100644
--- a/chromium/gpu/command_buffer/service/transform_feedback_manager.h
+++ b/chromium/gpu/command_buffer/service/transform_feedback_manager.h
@@ -13,10 +13,6 @@
#include "gpu/command_buffer/service/indexed_buffer_binding_host.h"
#include "gpu/gpu_export.h"
-namespace gfx {
-struct GLVersionInfo;
-};
-
namespace gpu {
namespace gles2 {
@@ -26,7 +22,8 @@ class TransformFeedbackManager;
// Info about TransformFeedbacks currently in the system.
class GPU_EXPORT TransformFeedback : public IndexedBufferBindingHost {
public:
- TransformFeedback(TransformFeedbackManager* manager, GLuint service_id);
+ TransformFeedback(
+ TransformFeedbackManager* manager, GLuint client_id, GLuint service_id);
// All the following functions do state update and call the underlying GL
// function. All validations have been done already and the GL function is
@@ -37,6 +34,10 @@ class GPU_EXPORT TransformFeedback : public IndexedBufferBindingHost {
void DoPauseTransformFeedback();
void DoResumeTransformFeedback();
+ GLuint client_id() const {
+ return client_id_;
+ }
+
GLuint service_id() const {
return service_id_;
}
@@ -63,6 +64,7 @@ class GPU_EXPORT TransformFeedback : public IndexedBufferBindingHost {
// The manager that owns this Buffer.
TransformFeedbackManager* manager_;
+ GLuint client_id_;
GLuint service_id_;
bool has_been_bound_;
diff --git a/chromium/gpu/config/gpu_control_list_jsons.h b/chromium/gpu/config/gpu_control_list_jsons.h
index 671aa9edefa..71d574cbe82 100644
--- a/chromium/gpu/config/gpu_control_list_jsons.h
+++ b/chromium/gpu/config/gpu_control_list_jsons.h
@@ -10,7 +10,6 @@
namespace gpu {
GPU_EXPORT extern const char kGpuDriverBugListJson[];
-GPU_EXPORT extern const char kGpuSwitchingListJson[];
GPU_EXPORT extern const char kSoftwareRenderingListJson[];
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_driver_bug_list_json.cc b/chromium/gpu/config/gpu_driver_bug_list_json.cc
index 85be6b7d204..b9354a97b95 100644
--- a/chromium/gpu/config/gpu_driver_bug_list_json.cc
+++ b/chromium/gpu/config/gpu_driver_bug_list_json.cc
@@ -19,7 +19,7 @@ const char kGpuDriverBugListJson[] = LONG_STRING_CONST(
{
"name": "gpu driver bug list",
// Please update the version number whenever you change this file.
- "version": "9.24",
+ "version": "9.29",
"entries": [
{
"id": 1,
@@ -1739,7 +1739,11 @@ LONG_STRING_CONST(
"description": "Mac Drivers store texture level parameters on int16_t that overflow",
"cr_bugs": [610153],
"os": {
- "type": "macosx"
+ "type": "macosx",
+ "version": {
+ "op": "<",
+ "value": "10.12.2"
+ }
},
"features": [
"use_shadowed_tex_level_params"
@@ -2171,14 +2175,15 @@ LONG_STRING_CONST(
},
{
"id": 201,
- "cr_bugs": [659326],
+ "cr_bugs": [659326,639760],
"description": "AMD drivers in Linux require invariant qualifier to match between vertex and fragment shaders",
"os": {
"type": "linux"
},
"vendor_id": "0x1002",
"features": [
- "dont_remove_invariant_for_fragment_input"
+ "dont_remove_invariant_for_fragment_input",
+ "remove_invariant_and_centroid_for_essl3"
]
},
{
@@ -2207,6 +2212,127 @@ LONG_STRING_CONST(
"features": [
"remove_invariant_and_centroid_for_essl3"
]
+ },
+ {
+ "id": 205,
+ "description": "Adreno 5xx support for EXT_multisampled_render_to_texture is buggy on Android 7.1",
+ "cr_bugs": [663811],
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "=",
+ "value": "7.1.0"
+ }
+ },
+ "gl_renderer": "Adreno \\(TM\\) 5.*",
+ "disabled_extensions": [
+ "GL_EXT_multisampled_render_to_texture"
+ ]
+ },
+ {
+ "id": 206,
+ "description": "Disable KHR_blend_equation_advanced until cc shaders are updated",
+ "cr_bugs": [661715],
+ "disabled_extensions": [
+ "GL_KHR_blend_equation_advanced",
+ "GL_KHR_blend_equation_advanced_coherent"
+ ]
+ },
+ {
+ "id": 207,
+ "description": "Decode and Encode before generateMipmap for srgb format textures on Windows",
+ "cr_bugs": [634519],
+ "os" : {
+ "type": "win"
+ },
+ "features" : [
+ "decode_encode_srgb_for_generatemipmap"
+ ]
+ },
+ {
+ "id": 208,
+ "description": "Decode and Encode before generateMipmap for srgb format textures on Linux Mesa ANGLE path",
+ "cr_bugs": [634519],
+ "os": {
+ "type": "linux"
+ },
+ "gl_renderer": "ANGLE.*",
+ "vendor_id": "0x8086",
+ "features": [
+ "decode_encode_srgb_for_generatemipmap"
+ ]
+ },
+ {
+ "id": 209,
+ "description": "Decode and Encode before generateMipmap for srgb format textures on Chromeos Intel",
+ "cr_bugs": [634519],
+ "os": {
+ "type": "chromeos"
+ },
+ "vendor_id": "0x8086",
+ "features": [
+ "decode_encode_srgb_for_generatemipmap"
+ ]
+ },
+ {
+ "id": 210,
+ "description": "Decode and Encode before generateMipmap for srgb format textures on Linux AMD",
+ "cr_bugs": [634519],
+ "os": {
+ "type": "linux"
+ },
+ "vendor_id": "0x1002",
+ "features": [
+ "decode_encode_srgb_for_generatemipmap"
+ ]
+ },
+ {
+ "id": 211,
+ "description": "Rewrite -float to 0.0 - float for Intel Mac",
+ "cr_bugs": [672380],
+ "os": {
+ "type": "macosx",
+ "version": {
+ "op": "<=",
+ "value": "10.11"
+ }
+ },
+ "vendor_id": "0x8086",
+ "features": [
+ "rewrite_float_unary_minus_operator"
+ ]
+ },
+ {
+ "id": 212,
+ "description": "Program binaries don't contain transform feedback varyings on Qualcomm GPUs",
+ "cr_bugs": [658074],
+ "os": {
+ "type": "android"
+ },
+ "gl_renderer": "Adreno.*",
+ "features": [
+ "disable_program_caching_for_transform_feedback"
+ ]
+ },
+ {
+ "id": 213,
+ "description": "The Mali-Gxx driver does not guarantee flush ordering",
+ "cr_bugs": [678508],
+ "gl_vendor": "ARM.*",
+ "gl_renderer": "Mali-G.*",
+ "features": [
+ "use_virtualized_gl_contexts"
+ ]
+ },
+ {
+ "id": 214,
+ "description": "Certain versions of Qualcomm driver don't setup scissor state correctly when FBO0 is bound.",
+ "cr_bugs": [670607],
+ "gl_vendor": "Qualcomm.*",
+ "machine_model_name": ["Nexus 7"],
+ "features": [
+ "force_update_scissor_state_when_binding_fbo0"
+ ]
}
]
// Please update the version number at beginning of this file whenever you
diff --git a/chromium/gpu/config/gpu_driver_bug_workaround_type.h b/chromium/gpu/config/gpu_driver_bug_workaround_type.h
index 1d5d4a811fe..fecb82c19a4 100644
--- a/chromium/gpu/config/gpu_driver_bug_workaround_type.h
+++ b/chromium/gpu/config/gpu_driver_bug_workaround_type.h
@@ -73,6 +73,8 @@
disable_post_sub_buffers_for_onscreen_surfaces) \
GPU_OP(DISABLE_PROGRAM_CACHE, \
disable_program_cache) \
+ GPU_OP(DISABLE_PROGRAM_CACHING_FOR_TRANSFORM_FEEDBACK, \
+ disable_program_caching_for_transform_feedback) \
GPU_OP(DISABLE_PROGRAM_DISK_CACHE, \
disable_program_disk_cache) \
GPU_OP(DISABLE_TEXTURE_CUBE_MAP_SEAMLESS, \
@@ -103,6 +105,8 @@
force_discrete_gpu) \
GPU_OP(FORCE_INTEGRATED_GPU, \
force_integrated_gpu) \
+ GPU_OP(FORCE_UPDATE_SCISSOR_STATE_WHEN_BINDING_FBO0, \
+ force_update_scissor_state_when_binding_fbo0) \
GPU_OP(GET_FRAG_DATA_INFO_BUG, \
get_frag_data_info_bug) \
GPU_OP(GL_CLEAR_BROKEN, \
@@ -153,6 +157,8 @@
reverse_point_sprite_coord_origin) \
GPU_OP(REWRITE_DO_WHILE_LOOPS, \
rewrite_do_while_loops) \
+ GPU_OP(REWRITE_FLOAT_UNARY_MINUS_OPERATOR, \
+ rewrite_float_unary_minus_operator) \
GPU_OP(REWRITE_TEXELFETCHOFFSET_TO_TEXELFETCH, \
rewrite_texelfetchoffset_to_texelfetch) \
GPU_OP(SCALARIZE_VEC_AND_MAT_CONSTRUCTOR_ARGS, \
diff --git a/chromium/gpu/config/gpu_info.cc b/chromium/gpu/config/gpu_info.cc
index 3af11f67644..6ce2d784dcf 100644
--- a/chromium/gpu/config/gpu_info.cc
+++ b/chromium/gpu/config/gpu_info.cc
@@ -76,6 +76,7 @@ GPUInfo::GPUInfo()
sandboxed(false),
process_crash_count(0),
in_process_gpu(true),
+ passthrough_cmd_decoder(false),
basic_info_state(kCollectInfoNone),
context_info_state(kCollectInfoNone),
#if defined(OS_WIN)
@@ -125,6 +126,7 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
bool sandboxed;
int process_crash_count;
bool in_process_gpu;
+ bool passthrough_cmd_decoder;
CollectInfoResult basic_info_state;
CollectInfoResult context_info_state;
#if defined(OS_WIN)
@@ -188,6 +190,7 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
enumerator->AddBool("sandboxed", sandboxed);
enumerator->AddInt("processCrashCount", process_crash_count);
enumerator->AddBool("inProcessGpu", in_process_gpu);
+ enumerator->AddBool("passthroughCmdDecoder", passthrough_cmd_decoder);
enumerator->AddInt("basicInfoState", basic_info_state);
enumerator->AddInt("contextInfoState", context_info_state);
#if defined(OS_WIN)
diff --git a/chromium/gpu/config/gpu_info.h b/chromium/gpu/config/gpu_info.h
index fbb6aac4de9..86479489797 100644
--- a/chromium/gpu/config/gpu_info.h
+++ b/chromium/gpu/config/gpu_info.h
@@ -224,6 +224,9 @@ struct GPU_EXPORT GPUInfo {
// True if the GPU is running in the browser process instead of its own.
bool in_process_gpu;
+ // True if the GPU process is using the passthrough command decoder.
+ bool passthrough_cmd_decoder;
+
// The state of whether the basic/context/DxDiagnostics info is collected and
// if the collection fails or not.
CollectInfoResult basic_info_state;
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index 79ae1c88393..2d2dfe2db47 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -228,6 +228,8 @@ void MergeGPUInfoGL(GPUInfo* basic_gpu_info,
basic_gpu_info->sandboxed = context_gpu_info.sandboxed;
basic_gpu_info->direct_rendering = context_gpu_info.direct_rendering;
basic_gpu_info->in_process_gpu = context_gpu_info.in_process_gpu;
+ basic_gpu_info->passthrough_cmd_decoder =
+ context_gpu_info.passthrough_cmd_decoder;
basic_gpu_info->context_info_state = context_gpu_info.context_info_state;
basic_gpu_info->initialization_time = context_gpu_info.initialization_time;
basic_gpu_info->video_decode_accelerator_capabilities =
diff --git a/chromium/gpu/config/gpu_info_collector_unittest.cc b/chromium/gpu/config/gpu_info_collector_unittest.cc
index 7c31ce6002a..68c1ddc06fb 100644
--- a/chromium/gpu/config/gpu_info_collector_unittest.cc
+++ b/chromium/gpu/config/gpu_info_collector_unittest.cc
@@ -166,7 +166,7 @@ class GPUInfoCollectorTest
void TearDown() override {
::gl::MockGLInterface::SetGLInterface(NULL);
gl_.reset();
- gl::init::ClearGLBindings();
+ gl::init::ShutdownGL();
testing::Test::TearDown();
}
diff --git a/chromium/gpu/config/gpu_info_collector_win.cc b/chromium/gpu/config/gpu_info_collector_win.cc
index 1354f3fe5bf..f983b65a9be 100644
--- a/chromium/gpu/config/gpu_info_collector_win.cc
+++ b/chromium/gpu/config/gpu_info_collector_win.cc
@@ -365,7 +365,7 @@ CollectInfoResult CollectGpuID(uint32_t* vendor_id, uint32_t* device_id) {
*vendor_id = 0;
*device_id = 0;
- // Taken from http://developer.nvidia.com/object/device_ids.html
+ // Taken from http://www.nvidia.com/object/device_ids.html
DISPLAY_DEVICE dd;
dd.cb = sizeof(DISPLAY_DEVICE);
std::wstring id;
@@ -411,7 +411,7 @@ CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
DISPLAY_LINK_INSTALLATION_STATUS_MAX);
}
- // Taken from http://developer.nvidia.com/object/device_ids.html
+ // Taken from http://www.nvidia.com/object/device_ids.html
DISPLAY_DEVICE dd;
dd.cb = sizeof(DISPLAY_DEVICE);
std::wstring id;
@@ -423,8 +423,11 @@ CollectInfoResult CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
}
if (id.length() <= 20) {
- gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
- return kCollectInfoNonFatalFailure;
+ // Check if it is the RDP mirror driver "RDPUDD Chained DD"
+ if (wcscmp(dd.DeviceString, L"RDPUDD Chained DD") != 0) {
+ gpu_info->basic_info_state = kCollectInfoNonFatalFailure;
+ return kCollectInfoNonFatalFailure;
+ }
}
DeviceIDToVendorAndDevice(id, &gpu_info->gpu.vendor_id,
diff --git a/chromium/gpu/config/software_rendering_list_json.cc b/chromium/gpu/config/software_rendering_list_json.cc
index ed76fd25880..79be0cedc1f 100644
--- a/chromium/gpu/config/software_rendering_list_json.cc
+++ b/chromium/gpu/config/software_rendering_list_json.cc
@@ -18,7 +18,7 @@ const char kSoftwareRenderingListJson[] = LONG_STRING_CONST(
{
"name": "software rendering list",
// Please update the version number whenever you change this file.
- "version": "12.06",
+ "version": "12.13",
"entries": [
{
"id": 1,
@@ -1070,6 +1070,7 @@ LONG_STRING_CONST(
},
"vendor_id": "0x8086",
"device_id": ["0x0116", "0x0126"],
+ "multi_gpu_category": "any",
"features": [
"all"
]
@@ -1254,7 +1255,7 @@ LONG_STRING_CONST(
},
{
"id": 122,
- "description": "GPU rasterization should only be enabled on NVIDIA Pascal and Maxwell, Intel Broadwell+, and AMD RX-R5 GPUs for now.",
+ "description": "GPU rasterization should only be enabled on NVIDIA Pascal and Maxwell, Intel Broadwell+, and AMD RX-R2 GPUs for now.",
"cr_bugs": [643850],
"os": {
"type": "win"
@@ -1291,7 +1292,7 @@ LONG_STRING_CONST(
"type": "win",
"version": {
"op": ">=",
- "value": "10.0"
+ "value": "8.1"
}
},
"vendor_id": "0x8086",
@@ -1316,34 +1317,22 @@ LONG_STRING_CONST(
"type": "win",
"version": {
"op": ">=",
- "value": "10.0"
+ "value": "8.1"
}
},
"vendor_id": "0x1002",
- "device_id": ["0x1309", "0x130a", "0x130c", "0x130d", "0x130e",
- "0x130f", "0x130f", "0x130f", "0x130f", "0x130f",
- "0x1313", "0x1313", "0x1313", "0x1313", "0x1315",
- "0x1315", "0x1315", "0x1315", "0x1315", "0x1316",
- "0x1318", "0x131c", "0x131d", "0x6600", "0x6604",
- "0x6604", "0x6605", "0x6605", "0x6610", "0x6610",
- "0x6610", "0x6611", "0x6617", "0x6640", "0x6646",
- "0x6646", "0x6647", "0x6647", "0x6658", "0x665d",
- "0x665f", "0x6660", "0x6660", "0x6663", "0x6664",
- "0x6665", "0x6665", "0x6665", "0x6667", "0x67b0",
- "0x67b0", "0x67b1", "0x67b1", "0x67b9", "0x67df",
- "0x67df", "0x67ef", "0x67ef", "0x67ef", "0x67ef",
- "0x6810", "0x6811", "0x6820", "0x6820", "0x6821",
- "0x6821", "0x682b", "0x6835", "0x6900", "0x6900",
- "0x6900", "0x6900", "0x6900", "0x6901", "0x6907",
- "0x6907", "0x6920", "0x6920", "0x6921", "0x6938",
- "0x6938", "0x6938", "0x6939", "0x6939", "0x6939",
- "0x7300", "0x7300", "0x7300", "0x7300", "0x9851",
- "0x9851", "0x9855", "0x9855", "0x9874", "0x9874",
- "0x9874", "0x9874", "0x9874", "0x9874", "0x9874",
- "0x9874", "0x9874", "0x9874", "0x9874", "0x9874",
- "0x9874", "0x9874", "0x9874", "0x9874", "0x9874",
- "0x9874", "0x9874", "0x9874", "0x9874", "0x98e4",
- "0x98e4", "0x98e4"]
+ "device_id": ["0x1309", "0x130a", "0x130b", "0x130c", "0x130d",
+ "0x130e", "0x130f", "0x1313", "0x1315", "0x1316",
+ "0x1318", "0x131b", "0x131c", "0x131d", "0x6600",
+ "0x6604", "0x6605", "0x6610", "0x6611", "0x6617",
+ "0x6640", "0x6646", "0x6647", "0x6647", "0x6658",
+ "0x665d", "0x665f", "0x6660", "0x6663", "0x6664",
+ "0x6665", "0x6667", "0x67b0", "0x67b1", "0x67b9",
+ "0x67df", "0x67ef", "0x6810", "0x6811", "0x6820",
+ "0x6821", "0x682b", "0x6835", "0x6900", "0x6901",
+ "0x6907", "0x6920", "0x6921", "0x6938", "0x6939",
+ "0x7300", "0x9851", "0x9852", "0x9853", "0x9854",
+ "0x9855", "0x9856", "0x9874", "0x98e4"]
}
]
},
@@ -1365,7 +1354,7 @@ LONG_STRING_CONST(
},
{
"id": 124,
- "description": "New AMD drivers have rendering glitches with GPU Rasterization",
+ "description": "Some AMD drivers have rendering glitches with GPU Rasterization",
"cr_bugs": [653538],
"os" : {
"type": "win"
@@ -1375,6 +1364,14 @@ LONG_STRING_CONST(
"op": ">",
"value": "16.200.1035.1001"
},
+ "exceptions": [
+ {
+ "driver_version": {
+ "op": ">=",
+ "value": "21.19.384.0"
+ }
+ }
+ ],
"features": [
"gpu_rasterization"
]
@@ -1410,18 +1407,6 @@ LONG_STRING_CONST(
]
},
{
- "id": 127,
- "description": "AMD cards have rendering issues with GPU rasterization on Windows",
- "cr_bugs": [660897],
- "os": {
- "type": "win"
- },
- "vendor_id": "0x1002",
- "features": [
- "gpu_rasterization"
- ]
- },
- {
"id": 128,
"description": "WebGL 2 is not yet ready on Android",
"cr_bugs": [295792, 641635],
@@ -1447,6 +1432,54 @@ LONG_STRING_CONST(
"features": [
"all"
]
+ },
+ {
+ "id": 130,
+ "description": "Older NVIDIA GPUs on macOS render incorrectly",
+ "cr_bugs": [676829, 676975],
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x10de",
+ "device_id": ["0x0407", "0x0647", "0x0863"],
+ "multi_gpu_category": "any",
+ "features": [
+ "all"
+ ]
+ },
+ {
+ "id": 131,
+ "description": "Mesa drivers older than 10.4.3 is crash prone on Linux Intel i965gm",
+ "cr_bugs": [462426],
+ "os": {
+ "type": "linux"
+ },
+ "driver_vendor": "Mesa",
+ "driver_version": {
+ "op": "<",
+ "value": "10.4.3"
+ },
+ "vendor_id": "8086",
+ "device_id": ["0x2a02"],
+ "features": [
+ "all"
+ ]
+ },
+ {
+ "id": 132,
+ "description": "MediaCodec on VideoCore IV HW crashes on JB",
+ "cr_bugs": [654905],
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "<",
+ "value": "4.4"
+ }
+ },
+ "gl_renderer": ".*VideoCore IV.*",
+ "features": [
+ "accelerated_video_decode"
+ ]
}
]
}
diff --git a/chromium/gpu/gles2_conform_support/egl/context.cc b/chromium/gpu/gles2_conform_support/egl/context.cc
index 2d870e7dc2d..041eac6f7f0 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.cc
+++ b/chromium/gpu/gles2_conform_support/egl/context.cc
@@ -221,6 +221,11 @@ bool Context::IsFenceSyncFlushReceived(uint64_t release) {
return display_->IsFenceSyncFlushReceived(release);
}
+bool Context::IsFenceSyncReleased(uint64_t release) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
void Context::SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) {
NOTIMPLEMENTED();
diff --git a/chromium/gpu/gles2_conform_support/egl/context.h b/chromium/gpu/gles2_conform_support/egl/context.h
index 341b5c0658d..7d45ea70515 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.h
+++ b/chromium/gpu/gles2_conform_support/egl/context.h
@@ -25,7 +25,6 @@
namespace gpu {
class TransferBuffer;
-class TransferBufferManagerInterface;
namespace gles2 {
class GLES2CmdHelper;
@@ -78,6 +77,7 @@ class Context : public base::RefCountedThreadSafe<Context>,
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;
bool IsFenceSyncFlushReceived(uint64_t release) override;
+ bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) override;
diff --git a/chromium/gpu/ipc/BUILD.gn b/chromium/gpu/ipc/BUILD.gn
index a30ffdcfa98..61afbc6f518 100644
--- a/chromium/gpu/ipc/BUILD.gn
+++ b/chromium/gpu/ipc/BUILD.gn
@@ -16,9 +16,16 @@ group("command_buffer") {
}
}
-source_set("command_buffer_sources") {
+if (is_component_build) {
+ link_target_type = "source_set"
+} else {
+ link_target_type = "static_library"
+}
+target(link_target_type, "command_buffer_sources") {
visibility = [ "//gpu/*" ]
sources = [
+ "gpu_in_process_thread_service.cc",
+ "gpu_in_process_thread_service.h",
"in_process_command_buffer.cc",
"in_process_command_buffer.h",
]
@@ -31,6 +38,8 @@ source_set("command_buffer_sources") {
"//gpu/command_buffer/common:common_sources",
"//gpu/command_buffer/service:service_sources",
"//gpu/config:config_sources",
+ "//gpu/ipc/client:ipc_client_sources",
+ "//gpu/ipc/service:ipc_service_sources",
"//ui/gfx",
"//ui/gl",
"//ui/gl/init",
diff --git a/chromium/gpu/ipc/client/BUILD.gn b/chromium/gpu/ipc/client/BUILD.gn
index aa4f317e4af..53775227407 100644
--- a/chromium/gpu/ipc/client/BUILD.gn
+++ b/chromium/gpu/ipc/client/BUILD.gn
@@ -35,13 +35,6 @@ source_set("ipc_client_sources") {
"gpu_process_hosted_ca_layer_tree_params.h",
]
}
- if (is_android) {
- sources += [
- "android/in_process_surface_texture_manager.cc",
- "android/in_process_surface_texture_manager.h",
- ]
- libs = [ "android" ]
- }
if (use_ozone) {
sources += [
"gpu_memory_buffer_impl_ozone_native_pixmap.cc",
diff --git a/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.cc b/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.cc
deleted file mode 100644
index 84fc5eee88d..00000000000
--- a/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/client/android/in_process_surface_texture_manager.h"
-
-#include <android/native_window.h>
-#include <android/native_window_jni.h>
-
-#include "base/android/jni_android.h"
-#include "base/containers/scoped_ptr_hash_map.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-
-namespace gpu {
-
-// static
-InProcessSurfaceTextureManager* InProcessSurfaceTextureManager::GetInstance() {
- return base::Singleton<
- InProcessSurfaceTextureManager,
- base::LeakySingletonTraits<InProcessSurfaceTextureManager>>::get();
-}
-
-void InProcessSurfaceTextureManager::RegisterSurfaceTexture(
- int surface_texture_id,
- int client_id,
- gl::SurfaceTexture* surface_texture) {
- base::AutoLock lock(lock_);
-
- DCHECK(surface_textures_.find(surface_texture_id) == surface_textures_.end());
- surface_textures_.add(
- surface_texture_id,
- base::MakeUnique<gl::ScopedJavaSurface>(surface_texture));
-}
-
-void InProcessSurfaceTextureManager::UnregisterSurfaceTexture(
- int surface_texture_id,
- int client_id) {
- base::AutoLock lock(lock_);
-
- DCHECK(surface_textures_.find(surface_texture_id) != surface_textures_.end());
- surface_textures_.erase(surface_texture_id);
-}
-
-gfx::AcceleratedWidget
-InProcessSurfaceTextureManager::AcquireNativeWidgetForSurfaceTexture(
- int surface_texture_id) {
- base::AutoLock lock(lock_);
-
- DCHECK(surface_textures_.find(surface_texture_id) != surface_textures_.end());
- JNIEnv* env = base::android::AttachCurrentThread();
- return ANativeWindow_fromSurface(
- env, surface_textures_.get(surface_texture_id)->j_surface().obj());
-}
-
-InProcessSurfaceTextureManager::InProcessSurfaceTextureManager() {}
-
-InProcessSurfaceTextureManager::~InProcessSurfaceTextureManager() {}
-
-} // namespace gpu
diff --git a/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.h b/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.h
deleted file mode 100644
index 106fc7aa54a..00000000000
--- a/chromium/gpu/ipc/client/android/in_process_surface_texture_manager.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_CLIENT_ANDROID_IN_PROCESS_SURFACE_TEXTURE_MANAGER_H_
-#define GPU_IPC_CLIENT_ANDROID_IN_PROCESS_SURFACE_TEXTURE_MANAGER_H_
-
-#include <memory>
-
-#include "base/containers/scoped_ptr_hash_map.h"
-#include "base/macros.h"
-#include "base/memory/singleton.h"
-#include "base/synchronization/lock.h"
-#include "gpu/gpu_export.h"
-#include "gpu/ipc/common/android/surface_texture_manager.h"
-#include "ui/gl/android/scoped_java_surface.h"
-
-namespace gpu {
-
-class GPU_EXPORT InProcessSurfaceTextureManager : public SurfaceTextureManager {
- public:
- static GPU_EXPORT InProcessSurfaceTextureManager* GetInstance();
-
- // Overridden from SurfaceTextureManager:
- void RegisterSurfaceTexture(int surface_texture_id,
- int client_id,
- gl::SurfaceTexture* surface_texture) override;
- void UnregisterSurfaceTexture(int surface_texture_id, int client_id) override;
- gfx::AcceleratedWidget AcquireNativeWidgetForSurfaceTexture(
- int surface_texture_id) override;
-
- private:
- friend struct base::DefaultSingletonTraits<InProcessSurfaceTextureManager>;
-
- InProcessSurfaceTextureManager();
- ~InProcessSurfaceTextureManager() override;
-
- using SurfaceTextureMap =
- base::ScopedPtrHashMap<int, std::unique_ptr<gl::ScopedJavaSurface>>;
- SurfaceTextureMap surface_textures_;
- base::Lock lock_;
-
- DISALLOW_COPY_AND_ASSIGN(InProcessSurfaceTextureManager);
-};
-
-} // namespace gpu
-
-#endif // GPU_IPC_CLIENT_ANDROID_IN_PROCESS_SURFACE_TEXTURE_MANAGER_H_
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index 07c31ff9a95..552f23b92d4 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -106,9 +106,9 @@ CommandBufferProxyImpl::~CommandBufferProxyImpl() {
}
bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
- std::unique_ptr<base::AutoLock> lock;
+ base::Optional<base::AutoLock> lock;
if (lock_)
- lock.reset(new base::AutoLock(*lock_));
+ lock.emplace(*lock_);
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
@@ -123,6 +123,7 @@ bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
if (!handled) {
LOG(ERROR) << "Gpu process sent invalid message.";
+ base::AutoLock last_state_lock(last_state_lock_);
OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage,
gpu::error::kLostContext);
}
@@ -130,9 +131,10 @@ bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
}
void CommandBufferProxyImpl::OnChannelError() {
- std::unique_ptr<base::AutoLock> lock;
+ base::Optional<base::AutoLock> lock;
if (lock_)
- lock.reset(new base::AutoLock(*lock_));
+ lock.emplace(*lock_);
+ base::AutoLock last_state_lock(last_state_lock_);
gpu::error::ContextLostReason context_lost_reason =
gpu::error::kGpuChannelLost;
@@ -148,6 +150,7 @@ void CommandBufferProxyImpl::OnChannelError() {
void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason,
gpu::error::Error error) {
+ base::AutoLock lock(last_state_lock_);
OnGpuAsyncMessageError(reason, error);
}
@@ -177,6 +180,7 @@ void CommandBufferProxyImpl::OnSignalAck(uint32_t id) {
SignalTaskMap::iterator it = signal_tasks_.find(id);
if (it == signal_tasks_.end()) {
LOG(ERROR) << "Gpu process sent invalid SignalAck.";
+ base::AutoLock lock(last_state_lock_);
OnGpuAsyncMessageError(gpu::error::kInvalidGpuMessage,
gpu::error::kLostContext);
return;
@@ -210,7 +214,6 @@ bool CommandBufferProxyImpl::Initialize(
if (!base::SharedMemory::IsHandleValid(handle))
return false;
-
// TODO(vadimt): Remove ScopedTracker below once crbug.com/125248 is fixed.
tracked_objects::ScopedTracker tracking_profile(
FROM_HERE_WITH_EXPLICIT_FUNCTION(
@@ -239,17 +242,15 @@ bool CommandBufferProxyImpl::Initialize(
return true;
}
-gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
- return last_state_;
-}
-
-int32_t CommandBufferProxyImpl::GetLastToken() {
+CommandBuffer::State CommandBufferProxyImpl::GetLastState() {
+ base::AutoLock lock(last_state_lock_);
TryUpdateState();
- return last_state_.token;
+ return last_state_;
}
void CommandBufferProxyImpl::Flush(int32_t put_offset) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -283,6 +284,7 @@ void CommandBufferProxyImpl::Flush(int32_t put_offset) {
void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -333,30 +335,53 @@ void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback(
update_vsync_parameters_completion_callback_ = callback;
}
-void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) {
+gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForTokenInRange(
+ int32_t start,
+ int32_t end) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start,
"end", end);
+ // Error needs to be checked in case the state was updated on another thread.
+ // We need to make sure that the reentrant context loss callback is called so
+ // that the share group is also lost before we return any error up the stack.
+ if (last_state_.error != gpu::error::kNoError) {
+ if (gpu_control_client_)
+ gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
+ return last_state_;
+ }
TryUpdateState();
if (!InRange(start, end, last_state_.token) &&
last_state_.error == gpu::error::kNoError) {
gpu::CommandBuffer::State state;
if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(route_id_, start, end,
- &state)))
+ &state))) {
SetStateFromSyncReply(state);
+ }
}
if (!InRange(start, end, last_state_.token) &&
last_state_.error == gpu::error::kNoError) {
LOG(ERROR) << "GPU state invalid after WaitForTokenInRange.";
OnGpuSyncReplyError();
}
+ return last_state_;
}
-void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start,
- int32_t end) {
+gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForGetOffsetInRange(
+ int32_t start,
+ int32_t end) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start",
start, "end", end);
+ // Error needs to be checked in case the state was updated on another thread.
+ // We need to make sure that the reentrant context loss callback is called so
+ // that the share group is also lost before we return any error up the stack.
+ if (last_state_.error != gpu::error::kNoError) {
+ if (gpu_control_client_)
+ gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
+ return last_state_;
+ }
TryUpdateState();
if (!InRange(start, end, last_state_.get_offset) &&
last_state_.error == gpu::error::kNoError) {
@@ -370,10 +395,12 @@ void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start,
LOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange.";
OnGpuSyncReplyError();
}
+ return last_state_;
}
void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -385,6 +412,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
size_t size,
int32_t* id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
*id = -1;
if (last_state_.error != gpu::error::kNoError)
@@ -428,6 +456,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer(
void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -448,6 +477,7 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
size_t height,
unsigned internal_format) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return -1;
@@ -462,10 +492,9 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
// This handle is owned by the GPU process and must be passed to it or it
// will leak. In otherwords, do not early out on error between here and the
// sending of the CreateImage IPC below.
- bool requires_sync_token = false;
gfx::GpuMemoryBufferHandle handle =
- channel_->ShareGpuMemoryBufferToGpuProcess(gpu_memory_buffer->GetHandle(),
- &requires_sync_token);
+ gfx::CloneHandleForIPC(gpu_memory_buffer->GetHandle());
+ bool requires_sync_token = handle.type == gfx::IO_SURFACE_BUFFER;
uint64_t image_fence_sync = 0;
if (requires_sync_token) {
@@ -509,6 +538,7 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
void CommandBufferProxyImpl::DestroyImage(int32_t id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -522,7 +552,7 @@ int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
unsigned usage) {
CheckLock();
std::unique_ptr<gfx::GpuMemoryBuffer> buffer(
- channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer(
+ channel_->gpu_memory_buffer_manager()->CreateGpuMemoryBuffer(
gfx::Size(width, height),
gpu::DefaultBufferFormatForImageFormat(internal_format),
gfx::BufferUsage::SCANOUT, gpu::kNullSurfaceHandle));
@@ -536,6 +566,7 @@ int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage(
uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return 0;
@@ -588,6 +619,7 @@ bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) {
bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return false;
@@ -611,9 +643,18 @@ bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) {
return false;
}
+// This can be called from any thread without holding |lock_|. Use a thread-safe
+// non-error throwing variant of TryUpdateState for this.
+bool CommandBufferProxyImpl::IsFenceSyncReleased(uint64_t release) {
+ base::AutoLock lock(last_state_lock_);
+ TryUpdateStateThreadSafe();
+ return release <= last_state_.release_count;
+}
+
void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -648,6 +689,7 @@ bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken(
void CommandBufferProxyImpl::SignalQuery(uint32_t query,
const base::Closure& callback) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -666,6 +708,7 @@ void CommandBufferProxyImpl::SignalQuery(uint32_t query,
void CommandBufferProxyImpl::TakeFrontBuffer(const gpu::Mailbox& mailbox) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -676,6 +719,7 @@ void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox,
const gpu::SyncToken& sync_token,
bool is_lost) {
CheckLock();
+ base::AutoLock lock(last_state_lock_);
if (last_state_.error != gpu::error::kNoError)
return;
@@ -683,36 +727,47 @@ void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox,
Send(new GpuCommandBufferMsg_ReturnFrontBuffer(route_id_, mailbox, is_lost));
}
-gpu::error::Error CommandBufferProxyImpl::GetLastError() {
- return last_state_.error;
-}
-
bool CommandBufferProxyImpl::Send(IPC::Message* msg) {
- // Caller should not intentionally send a message if the context is lost.
- DCHECK(last_state_.error == gpu::error::kNoError);
DCHECK(channel_);
-
- if (!msg->is_sync()) {
- bool result = channel_->Send(msg);
- // Send() should always return true for async messages.
- DCHECK(result);
- return true;
+ last_state_lock_.AssertAcquired();
+ DCHECK_EQ(gpu::error::kNoError, last_state_.error);
+
+ last_state_lock_.Release();
+
+ // Call is_sync() before sending message.
+ bool is_sync = msg->is_sync();
+ bool result = channel_->Send(msg);
+ // Send() should always return true for async messages.
+ DCHECK(is_sync || result);
+
+ last_state_lock_.Acquire();
+
+ if (last_state_.error != gpu::error::kNoError) {
+ // Error needs to be checked in case the state was updated on another thread
+ // while we were waiting on Send. We need to make sure that the reentrant
+ // context loss callback is called so that the share group is also lost
+ // before we return any error up the stack.
+ if (gpu_control_client_)
+ gpu_control_client_->OnGpuControlLostContextMaybeReentrant();
+ return false;
}
- if (channel_->Send(msg))
- return true;
+ if (!result) {
+ // Flag the command buffer as lost. Defer deleting the channel until
+ // OnChannelError is called after returning to the message loop in case it
+ // is referenced elsewhere.
+ DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
+ OnClientError(gpu::error::kLostContext);
+ return false;
+ }
- // Flag the command buffer as lost. Defer deleting the channel until
- // OnChannelError is called after returning to the message loop in case
- // it is referenced elsewhere.
- DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context.";
- OnClientError(gpu::error::kLostContext);
- return false;
+ return true;
}
void CommandBufferProxyImpl::SetStateFromSyncReply(
const gpu::CommandBuffer::State& state) {
- DCHECK(last_state_.error == gpu::error::kNoError);
+ CheckLock();
+ last_state_lock_.AssertAcquired();
// Handle wraparound. It works as long as we don't have more than 2B state
// updates in flight across which reordering occurs.
if (state.generation - last_state_.generation < 0x80000000U)
@@ -722,6 +777,8 @@ void CommandBufferProxyImpl::SetStateFromSyncReply(
}
void CommandBufferProxyImpl::TryUpdateState() {
+ CheckLock();
+ last_state_lock_.AssertAcquired();
if (last_state_.error == gpu::error::kNoError) {
shared_state()->Read(&last_state_);
if (last_state_.error != gpu::error::kNoError)
@@ -729,7 +786,21 @@ void CommandBufferProxyImpl::TryUpdateState() {
}
}
+void CommandBufferProxyImpl::TryUpdateStateThreadSafe() {
+ last_state_lock_.AssertAcquired();
+ if (last_state_.error == gpu::error::kNoError) {
+ shared_state()->Read(&last_state_);
+ if (last_state_.error != gpu::error::kNoError) {
+ callback_thread_->PostTask(
+ FROM_HERE,
+ base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel,
+ weak_this_));
+ }
+ }
+}
+
void CommandBufferProxyImpl::TryUpdateStateDontReportError() {
+ last_state_lock_.AssertAcquired();
if (last_state_.error == gpu::error::kNoError)
shared_state()->Read(&last_state_);
}
@@ -800,6 +871,8 @@ void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase,
}
void CommandBufferProxyImpl::OnGpuSyncReplyError() {
+ CheckLock();
+ last_state_lock_.AssertAcquired();
last_state_.error = gpu::error::kLostContext;
last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage;
// This method may be inside a callstack from the GpuControlClient (we got a
@@ -812,15 +885,20 @@ void CommandBufferProxyImpl::OnGpuAsyncMessageError(
gpu::error::ContextLostReason reason,
gpu::error::Error error) {
CheckLock();
+ last_state_lock_.AssertAcquired();
last_state_.error = error;
last_state_.context_lost_reason = reason;
// This method only occurs when receiving IPC messages, so we know it's not in
- // a callstack from the GpuControlClient.
+ // a callstack from the GpuControlClient. Unlock the state lock to prevent
+ // a deadlock when calling the context loss callback.
+ base::AutoUnlock unlock(last_state_lock_);
DisconnectChannel();
}
void CommandBufferProxyImpl::OnGpuStateError() {
- DCHECK(last_state_.error != gpu::error::kNoError);
+ CheckLock();
+ last_state_lock_.AssertAcquired();
+ DCHECK_NE(gpu::error::kNoError, last_state_.error);
// This method may be inside a callstack from the GpuControlClient (we
// encountered an error while trying to perform some action). So avoid
// re-entering the GpuControlClient here.
@@ -829,6 +907,7 @@ void CommandBufferProxyImpl::OnGpuStateError() {
void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) {
CheckLock();
+ last_state_lock_.AssertAcquired();
last_state_.error = error;
last_state_.context_lost_reason = gpu::error::kUnknown;
// This method may be inside a callstack from the GpuControlClient (we
@@ -839,6 +918,7 @@ void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) {
void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() {
CheckLock();
+ last_state_lock_.AssertAcquired();
// Inform the GpuControlClient of the lost state immediately, though this may
// be a re-entrant call to the client so we use the MaybeReentrant variant.
if (gpu_control_client_)
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index 5c8b269982b..ea8f8bfcaeb 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -16,7 +16,6 @@
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/containers/hash_tables.h"
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -45,10 +44,6 @@ namespace base {
class SharedMemory;
}
-namespace gfx {
-class Size;
-}
-
namespace gpu {
struct GpuProcessHostedCALayerTreeParamsMac;
struct Mailbox;
@@ -100,11 +95,10 @@ class GPU_EXPORT CommandBufferProxyImpl
// CommandBuffer implementation:
State GetLastState() override;
- int32_t GetLastToken() override;
void Flush(int32_t put_offset) override;
void OrderingBarrier(int32_t put_offset) override;
- void WaitForTokenInRange(int32_t start, int32_t end) override;
- void WaitForGetOffsetInRange(int32_t start, int32_t end) override;
+ State WaitForTokenInRange(int32_t start, int32_t end) override;
+ State WaitForGetOffsetInRange(int32_t start, int32_t end) override;
void SetGetBuffer(int32_t shm_id) override;
scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
int32_t* id) override;
@@ -132,6 +126,7 @@ class GPU_EXPORT CommandBufferProxyImpl
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;
bool IsFenceSyncFlushReceived(uint64_t release) override;
+ bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) override;
@@ -161,12 +156,6 @@ class GPU_EXPORT CommandBufferProxyImpl
void SetUpdateVSyncParametersCallback(
const UpdateVSyncParametersCallback& callback);
- // TODO(apatrick): this is a temporary optimization while skia is calling
- // ContentGLContext::MakeCurrent prior to every GL call. It saves returning 6
- // ints redundantly when only the error is needed for the
- // CommandBufferProxyImpl implementation.
- gpu::error::Error GetLastError() override;
-
int32_t route_id() const { return route_id_; }
const scoped_refptr<GpuChannelHost>& channel() const { return channel_; }
@@ -215,6 +204,9 @@ class GPU_EXPORT CommandBufferProxyImpl
// Try to read an updated copy of the state from shared memory, and calls
// OnGpuStateError() if the new state has an error.
void TryUpdateState();
+ // Like above but calls the error handler and disconnects channel by posting
+ // a task.
+ void TryUpdateStateThreadSafe();
// Like the above but does not call the error event handler if the new state
// has an error.
void TryUpdateStateDontReportError();
@@ -244,6 +236,16 @@ class GPU_EXPORT CommandBufferProxyImpl
// The shared memory area used to update state.
gpu::CommandBufferSharedState* shared_state() const;
+ // The shared memory area used to update state.
+ std::unique_ptr<base::SharedMemory> shared_state_shm_;
+
+ // The last cached state received from the service.
+ State last_state_;
+
+ // Lock to access shared state e.g. sync token release count across multiple
+ // threads. This allows tracking command buffer progress from another thread.
+ base::Lock last_state_lock_;
+
// There should be a lock_ if this is going to be used across multiple
// threads, or we guarantee it is used by a single thread by using a thread
// checker if no lock_ is set.
@@ -256,12 +258,6 @@ class GPU_EXPORT CommandBufferProxyImpl
// Unowned list of DeletionObservers.
base::ObserverList<DeletionObserver> deletion_observers_;
- // The last cached state received from the service.
- State last_state_;
-
- // The shared memory area used to update state.
- std::unique_ptr<base::SharedMemory> shared_state_shm_;
-
scoped_refptr<GpuChannelHost> channel_;
const gpu::CommandBufferId command_buffer_id_;
const int32_t route_id_;
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.cc b/chromium/gpu/ipc/client/gpu_channel_host.cc
index b639d8ffa54..5e41fd9e274 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.cc
+++ b/chromium/gpu/ipc/client/gpu_channel_host.cc
@@ -220,7 +220,7 @@ void GpuChannelHost::RemoveRoute(int route_id) {
}
base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
- base::SharedMemoryHandle source_handle) {
+ const base::SharedMemoryHandle& source_handle) {
if (IsLost())
return base::SharedMemory::NULLHandle();
@@ -232,52 +232,6 @@ int32_t GpuChannelHost::ReserveTransferBufferId() {
return g_next_transfer_buffer_id.GetNext() + 1;
}
-gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
- const gfx::GpuMemoryBufferHandle& source_handle,
- bool* requires_sync_point) {
- switch (source_handle.type) {
- case gfx::SHARED_MEMORY_BUFFER: {
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SHARED_MEMORY_BUFFER;
- handle.handle = ShareToGpuProcess(source_handle.handle);
- handle.offset = source_handle.offset;
- handle.stride = source_handle.stride;
- *requires_sync_point = false;
- return handle;
- }
-#if defined(USE_OZONE)
- case gfx::OZONE_NATIVE_PIXMAP: {
- std::vector<base::ScopedFD> scoped_fds;
- for (auto& fd : source_handle.native_pixmap_handle.fds) {
- base::ScopedFD scoped_fd(HANDLE_EINTR(dup(fd.fd)));
- if (!scoped_fd.is_valid()) {
- PLOG(ERROR) << "dup";
- return gfx::GpuMemoryBufferHandle();
- }
- scoped_fds.emplace_back(std::move(scoped_fd));
- }
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::OZONE_NATIVE_PIXMAP;
- handle.id = source_handle.id;
- for (auto& scoped_fd : scoped_fds) {
- handle.native_pixmap_handle.fds.emplace_back(scoped_fd.release(),
- true /* auto_close */);
- }
- handle.native_pixmap_handle.planes =
- source_handle.native_pixmap_handle.planes;
- *requires_sync_point = false;
- return handle;
- }
-#endif
- case gfx::IO_SURFACE_BUFFER:
- *requires_sync_point = true;
- return source_handle;
- default:
- NOTREACHED();
- return gfx::GpuMemoryBufferHandle();
- }
-}
-
int32_t GpuChannelHost::ReserveImageId() {
return next_image_id_.GetNext();
}
diff --git a/chromium/gpu/ipc/client/gpu_channel_host.h b/chromium/gpu/ipc/client/gpu_channel_host.h
index 5acf6c79a5f..989f1df6a8d 100644
--- a/chromium/gpu/ipc/client/gpu_channel_host.h
+++ b/chromium/gpu/ipc/client/gpu_channel_host.h
@@ -13,7 +13,6 @@
#include <vector>
#include "base/atomic_sequence_num.h"
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -139,18 +138,11 @@ class GPU_EXPORT GpuChannelHost
// GPU process. The caller is responsible for ensuring it is closed. Returns
// an invalid handle on failure.
base::SharedMemoryHandle ShareToGpuProcess(
- base::SharedMemoryHandle source_handle);
+ const base::SharedMemoryHandle& source_handle);
// Reserve one unused transfer buffer ID.
int32_t ReserveTransferBufferId();
- // Returns a GPU memory buffer handle to the buffer that can be sent via
- // IPC to the GPU process. The caller is responsible for ensuring it is
- // closed. Returns an invalid handle on failure.
- gfx::GpuMemoryBufferHandle ShareGpuMemoryBufferToGpuProcess(
- const gfx::GpuMemoryBufferHandle& source_handle,
- bool* requires_sync_point);
-
// Reserve one unused image ID.
int32_t ReserveImageId();
diff --git a/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc b/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
index fdce1c024d4..7562908be44 100644
--- a/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
+++ b/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
@@ -12,6 +12,7 @@
#include "base/threading/thread_task_runner_handle.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
#include "gpu/command_buffer/client/shared_memory_limits.h"
+#include "gpu/ipc/common/surface_handle.h"
#include "gpu/ipc/gl_in_process_context.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_surface.h"
@@ -32,16 +33,16 @@ class ContextTestBase : public testing::Test {
attributes.sample_buffers = 1;
attributes.bind_generates_resource = false;
- context_.reset(gpu::GLInProcessContext::Create(
- nullptr, /* service */
- nullptr, /* surface */
- true, /* offscreen */
- gfx::kNullAcceleratedWidget, /* window */
- nullptr, /* share_context */
- attributes, gpu::SharedMemoryLimits(),
- nullptr, /* gpu_memory_buffer_manager */
- nullptr, /* image_factory */
- base::ThreadTaskRunnerHandle::Get()));
+ context_.reset(
+ gpu::GLInProcessContext::Create(nullptr, /* service */
+ nullptr, /* surface */
+ true, /* offscreen */
+ gpu::kNullSurfaceHandle, /* window */
+ nullptr, /* share_context */
+ attributes, gpu::SharedMemoryLimits(),
+ nullptr, /* gpu_memory_buffer_manager */
+ nullptr, /* image_factory */
+ base::ThreadTaskRunnerHandle::Get()));
gl_ = context_->GetImplementation();
context_support_ = context_->GetImplementation();
}
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc
index 185e921f78a..a673cbf2555 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.cc
@@ -30,7 +30,8 @@ GpuMemoryBufferImpl::GpuMemoryBufferImpl(gfx::GpuMemoryBufferId id,
GpuMemoryBufferImpl::~GpuMemoryBufferImpl() {
DCHECK(!mapped_);
- callback_.Run(destruction_sync_token_);
+ if (!callback_.is_null())
+ callback_.Run(destruction_sync_token_);
}
// static
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.h b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.h
index a8dc56b7e3a..43e23e34bc5 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl.h
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl.h
@@ -24,9 +24,9 @@ class GPU_EXPORT GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
~GpuMemoryBufferImpl() override;
// Creates an instance from the given |handle|. |size| and |internalformat|
- // should match what was used to allocate the |handle|. |callback| is
- // called when instance is deleted, which is not necessarily on the same
- // thread as this function was called on and instance was created on.
+ // should match what was used to allocate the |handle|. |callback|, if
+ // non-null, is called when instance is deleted, which is not necessarily on
+ // the same thread as this function was called on and instance was created on.
static std::unique_ptr<GpuMemoryBufferImpl> CreateFromHandle(
const gfx::GpuMemoryBufferHandle& handle,
const gfx::Size& size,
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc
index 8e84b7dd73f..e85f23be46d 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_ozone_native_pixmap.cc
@@ -47,22 +47,26 @@ GpuMemoryBufferImplOzoneNativePixmap::CreateFromHandle(
gfx::BufferFormat format,
gfx::BufferUsage usage,
const DestructionCallback& callback) {
- DCHECK_EQ(handle.native_pixmap_handle.fds.size(), 1u);
+ DCHECK_LE(handle.native_pixmap_handle.fds.size(), 1u);
// GpuMemoryBufferImpl needs the FD to implement GetHandle() but
// ui::ClientNativePixmapFactory::ImportFromHandle is expected to take
// ownership of the FD passed in the handle so we have to dup it here in
// order to pass a valid FD to the GpuMemoryBufferImpl ctor.
- base::ScopedFD scoped_fd(
- HANDLE_EINTR(dup(handle.native_pixmap_handle.fds[0].fd)));
- if (!scoped_fd.is_valid()) {
- PLOG(ERROR) << "dup";
- return nullptr;
+ base::ScopedFD scoped_fd;
+ if (!handle.native_pixmap_handle.fds.empty()) {
+ scoped_fd.reset(HANDLE_EINTR(dup(handle.native_pixmap_handle.fds[0].fd)));
+ if (!scoped_fd.is_valid()) {
+ PLOG(ERROR) << "dup";
+ return nullptr;
+ }
}
gfx::NativePixmapHandle native_pixmap_handle;
- native_pixmap_handle.fds.emplace_back(handle.native_pixmap_handle.fds[0].fd,
- true /* auto_close */);
+ if (scoped_fd.is_valid()) {
+ native_pixmap_handle.fds.emplace_back(handle.native_pixmap_handle.fds[0].fd,
+ true /* auto_close */);
+ }
native_pixmap_handle.planes = handle.native_pixmap_handle.planes;
std::unique_ptr<ui::ClientNativePixmap> native_pixmap =
ui::ClientNativePixmapFactory::GetInstance()->ImportFromHandle(
@@ -125,8 +129,10 @@ gfx::GpuMemoryBufferHandle GpuMemoryBufferImplOzoneNativePixmap::GetHandle()
gfx::GpuMemoryBufferHandle handle;
handle.type = gfx::OZONE_NATIVE_PIXMAP;
handle.id = id_;
- handle.native_pixmap_handle.fds.emplace_back(fd_.get(),
- false /* auto_close */);
+ if (fd_.is_valid()) {
+ handle.native_pixmap_handle.fds.emplace_back(fd_.get(),
+ false /* auto_close */);
+ }
handle.native_pixmap_handle.planes = planes_;
return handle;
}
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
index 910cbe9b4a2..9c2e1bc60c6 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.cc
@@ -15,11 +15,6 @@
#include "ui/gl/gl_bindings.h"
namespace gpu {
-namespace {
-
-void Noop() {}
-
-} // namespace
GpuMemoryBufferImplSharedMemory::GpuMemoryBufferImplSharedMemory(
gfx::GpuMemoryBufferId id,
@@ -59,7 +54,7 @@ GpuMemoryBufferImplSharedMemory::Create(gfx::GpuMemoryBufferId id,
// static
gfx::GpuMemoryBufferHandle
-GpuMemoryBufferImplSharedMemory::AllocateForChildProcess(
+GpuMemoryBufferImplSharedMemory::CreateGpuMemoryBuffer(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format) {
@@ -164,16 +159,8 @@ base::Closure GpuMemoryBufferImplSharedMemory::AllocateForTesting(
gfx::BufferFormat format,
gfx::BufferUsage usage,
gfx::GpuMemoryBufferHandle* handle) {
- base::SharedMemory shared_memory;
- bool rv = shared_memory.CreateAnonymous(
- gfx::BufferSizeForBufferFormat(size, format));
- DCHECK(rv);
- handle->type = gfx::SHARED_MEMORY_BUFFER;
- handle->offset = 0;
- handle->stride = static_cast<int32_t>(
- gfx::RowSizeForBufferFormat(size.width(), format, 0));
- handle->handle = base::SharedMemory::DuplicateHandle(shared_memory.handle());
- return base::Bind(&Noop);
+ *handle = CreateGpuMemoryBuffer(handle->id, size, format);
+ return base::Bind(&base::DoNothing);
}
bool GpuMemoryBufferImplSharedMemory::Map() {
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h
index b991f9572fa..243d3d5beaf 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h
@@ -26,7 +26,7 @@ class GPU_EXPORT GpuMemoryBufferImplSharedMemory : public GpuMemoryBufferImpl {
gfx::BufferFormat format,
const DestructionCallback& callback);
- static gfx::GpuMemoryBufferHandle AllocateForChildProcess(
+ static gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
gfx::GpuMemoryBufferId id,
const gfx::Size& size,
gfx::BufferFormat format);
diff --git a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h
index 1cab70de963..fb27bb8653e 100644
--- a/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h
+++ b/chromium/gpu/ipc/client/gpu_memory_buffer_impl_test_template.h
@@ -22,7 +22,7 @@ namespace gpu {
template <typename GpuMemoryBufferImplType>
class GpuMemoryBufferImplTest : public testing::Test {
public:
- GpuMemoryBufferImpl::DestructionCallback AllocateGpuMemoryBuffer(
+ GpuMemoryBufferImpl::DestructionCallback CreateGpuMemoryBuffer(
const gfx::Size& size,
gfx::BufferFormat format,
gfx::BufferUsage usage,
@@ -62,8 +62,8 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, CreateFromHandle) {
bool destroyed = false;
gfx::GpuMemoryBufferHandle handle;
GpuMemoryBufferImpl::DestructionCallback destroy_callback =
- TestFixture::AllocateGpuMemoryBuffer(kBufferSize, format, usage,
- &handle, &destroyed);
+ TestFixture::CreateGpuMemoryBuffer(kBufferSize, format, usage,
+ &handle, &destroyed);
std::unique_ptr<TypeParam> buffer(TypeParam::CreateFromHandle(
handle, kBufferSize, format, usage, destroy_callback));
ASSERT_TRUE(buffer);
@@ -88,7 +88,7 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, Map) {
gfx::GpuMemoryBufferHandle handle;
GpuMemoryBufferImpl::DestructionCallback destroy_callback =
- TestFixture::AllocateGpuMemoryBuffer(
+ TestFixture::CreateGpuMemoryBuffer(
kBufferSize, format, gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
&handle, nullptr);
std::unique_ptr<TypeParam> buffer(TypeParam::CreateFromHandle(
@@ -138,7 +138,7 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, PersistentMap) {
gfx::GpuMemoryBufferHandle handle;
GpuMemoryBufferImpl::DestructionCallback destroy_callback =
- TestFixture::AllocateGpuMemoryBuffer(
+ TestFixture::CreateGpuMemoryBuffer(
kBufferSize, format,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT, &handle,
nullptr);
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index 73136d7258c..396041edf39 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -106,8 +106,6 @@ source_set("ipc_common_sources") {
sources += [
"android/scoped_surface_request_conduit.cc",
"android/scoped_surface_request_conduit.h",
- "android/surface_texture_manager.cc",
- "android/surface_texture_manager.h",
"android/surface_texture_peer.cc",
"android/surface_texture_peer.h",
]
@@ -143,6 +141,7 @@ mojom("interfaces") {
"capabilities.mojom",
"dx_diag_node.mojom",
"gpu_info.mojom",
+ "gpu_preferences.mojom",
"mailbox.mojom",
"mailbox_holder.mojom",
"surface_handle.mojom",
diff --git a/chromium/gpu/ipc/common/android/surface_texture_manager.cc b/chromium/gpu/ipc/common/android/surface_texture_manager.cc
deleted file mode 100644
index 22b27d03fb1..00000000000
--- a/chromium/gpu/ipc/common/android/surface_texture_manager.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/common/android/surface_texture_manager.h"
-
-#include "base/logging.h"
-
-namespace gpu {
-namespace {
-
-SurfaceTextureManager* g_instance = nullptr;
-
-} // namespace
-
-// static
-SurfaceTextureManager* SurfaceTextureManager::GetInstance() {
- DCHECK(g_instance);
- return g_instance;
-}
-
-// static
-void SurfaceTextureManager::SetInstance(SurfaceTextureManager* instance) {
- DCHECK(!g_instance || !instance);
- g_instance = instance;
-}
-
-} // namespace gpu
diff --git a/chromium/gpu/ipc/common/android/surface_texture_manager.h b/chromium/gpu/ipc/common/android/surface_texture_manager.h
deleted file mode 100644
index 8a0134b6804..00000000000
--- a/chromium/gpu/ipc/common/android/surface_texture_manager.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_COMMON_ANDROID_SURFACE_TEXTURE_MANAGER_H_
-#define GPU_IPC_COMMON_ANDROID_SURFACE_TEXTURE_MANAGER_H_
-
-#include "gpu/gpu_export.h"
-#include "ui/gfx/native_widget_types.h"
-
-namespace gl {
-class SurfaceTexture;
-}
-
-namespace gpu {
-
-class GPU_EXPORT SurfaceTextureManager {
- public:
- static SurfaceTextureManager* GetInstance();
- static void SetInstance(SurfaceTextureManager* instance);
-
- // Register a surface texture for use in another process.
- virtual void RegisterSurfaceTexture(int surface_texture_id,
- int client_id,
- gl::SurfaceTexture* surface_texture) = 0;
-
- // Unregister a surface texture previously registered for use in another
- // process.
- virtual void UnregisterSurfaceTexture(int surface_texture_id,
- int client_id) = 0;
-
- // Acquire native widget for a registered surface texture.
- virtual gfx::AcceleratedWidget AcquireNativeWidgetForSurfaceTexture(
- int surface_texture_id) = 0;
-
- protected:
- virtual ~SurfaceTextureManager() {}
-};
-
-} // namespace gpu
-
-#endif // GPU_IPC_COMMON_ANDROID_SURFACE_TEXTURE_MANAGER_H_
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index 2dc02806687..ef65b579e28 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -6,7 +6,8 @@
module gpu.mojom;
import "gpu/ipc/common/dx_diag_node.mojom";
-import "mojo/common/common_custom_types.mojom";
+import "mojo/common/time.mojom";
+import "mojo/common/version.mojom";
import "ui/gfx/geometry/mojo/geometry.mojom";
// gpu::GPUInfo::GPUDevice
@@ -60,6 +61,7 @@ struct VideoDecodeAcceleratorSupportedProfile {
// gpu::VideoDecodeAcceleratorCapabilities
struct VideoDecodeAcceleratorCapabilities {
+ array<VideoDecodeAcceleratorSupportedProfile> supported_profiles;
uint32 flags;
};
@@ -102,6 +104,7 @@ struct GpuInfo {
bool sandboxed;
int32 process_crash_count;
bool in_process_gpu;
+ bool passthrough_cmd_decoder;
CollectInfoResult basic_info_state;
CollectInfoResult context_info_state;
CollectInfoResult dx_diagnostics_info_state;
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
index 6c2bd82051f..b132575c9d1 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
@@ -197,6 +197,8 @@ bool StructTraits<gpu::mojom::VideoDecodeAcceleratorCapabilitiesDataView,
gpu::VideoDecodeAcceleratorCapabilities>::
Read(gpu::mojom::VideoDecodeAcceleratorCapabilitiesDataView data,
gpu::VideoDecodeAcceleratorCapabilities* out) {
+ if (!data.ReadSupportedProfiles(&out->supported_profiles))
+ return false;
out->flags = data.flags();
return true;
}
@@ -224,6 +226,7 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
out->direct_rendering = data.direct_rendering();
out->sandboxed = data.sandboxed();
out->in_process_gpu = data.in_process_gpu();
+ out->passthrough_cmd_decoder = data.passthrough_cmd_decoder();
out->process_crash_count = data.process_crash_count();
out->jpeg_decode_accelerator_supported =
data.jpeg_decode_accelerator_supported();
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
index ea76e0e279e..a6b9527a080 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
@@ -96,6 +96,11 @@ struct StructTraits<gpu::mojom::VideoDecodeAcceleratorCapabilitiesDataView,
static uint32_t flags(const gpu::VideoDecodeAcceleratorCapabilities& input) {
return input.flags;
}
+
+ static std::vector<gpu::VideoDecodeAcceleratorSupportedProfile>
+ supported_profiles(const gpu::VideoDecodeAcceleratorCapabilities& input) {
+ return input.supported_profiles;
+ }
};
template <>
@@ -243,6 +248,10 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
return input.in_process_gpu;
}
+ static bool passthrough_cmd_decoder(const gpu::GPUInfo& input) {
+ return input.passthrough_cmd_decoder;
+ }
+
static gpu::CollectInfoResult basic_info_state(const gpu::GPUInfo& input) {
return input.basic_info_state;
}
diff --git a/chromium/gpu/ipc/common/gpu_param_traits_macros.h b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
index e6409d2b5a8..64248121f4d 100644
--- a/chromium/gpu/ipc/common/gpu_param_traits_macros.h
+++ b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
@@ -91,6 +91,7 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::GPUInfo)
IPC_STRUCT_TRAITS_MEMBER(sandboxed)
IPC_STRUCT_TRAITS_MEMBER(process_crash_count)
IPC_STRUCT_TRAITS_MEMBER(in_process_gpu)
+ IPC_STRUCT_TRAITS_MEMBER(passthrough_cmd_decoder)
IPC_STRUCT_TRAITS_MEMBER(basic_info_state)
IPC_STRUCT_TRAITS_MEMBER(context_info_state)
#if defined(OS_WIN)
diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom
new file mode 100644
index 00000000000..e1565ec7d49
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_preferences.mojom
@@ -0,0 +1,53 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// gpu/command_buffer/service/gpu_preferences.h
+module gpu.mojom;
+
+// gpu::GpuPreferences::VpxDecodeVendors
+enum VpxDecodeVendors {
+ VPX_VENDOR_NONE = 0,
+ VPX_VENDOR_MICROSOFT = 1,
+ VPX_VENDOR_AMD = 2,
+ VPX_VENDOR_ALL = 3,
+};
+
+// gpu::GpuPreferences
+struct GpuPreferences {
+ bool single_process;
+ bool in_process_gpu;
+ bool ui_prioritize_in_gpu_process;
+ bool disable_accelerated_video_decode;
+
+ bool disable_vaapi_accelerated_video_encode;
+
+ bool disable_web_rtc_hw_encoding;
+
+ VpxDecodeVendors enable_accelerated_vpx_decode;
+ bool enable_low_latency_dxva;
+ bool enable_zero_copy_dxgi_video;
+ bool enable_nv12_dxgi_video;
+
+ bool compile_shader_always_succeeds;
+ bool disable_gl_error_limit;
+ bool disable_glsl_translator;
+ bool disable_gpu_driver_bug_workarounds;
+ bool disable_shader_name_hashing;
+ bool enable_gpu_command_logging;
+ bool enable_gpu_debugging;
+ bool enable_gpu_service_logging_gpu;
+ bool enable_gpu_driver_debug_logging;
+ bool disable_gpu_program_cache;
+ bool enforce_gl_minimums;
+ uint32 force_gpu_mem_available;
+ uint32 gpu_program_cache_size;
+ bool disable_gpu_shader_disk_cache;
+ bool enable_threaded_texture_mailboxes;
+ bool gl_shader_interm_output;
+ bool emulate_shader_precision;
+ bool enable_gpu_service_logging;
+ bool enable_gpu_service_tracing;
+ bool enable_es3_apis;
+ bool use_passthrough_cmd_decoder;
+};
diff --git a/chromium/gpu/ipc/common/gpu_preferences.typemap b/chromium/gpu/ipc/common/gpu_preferences.typemap
new file mode 100644
index 00000000000..0dfa9025eac
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_preferences.typemap
@@ -0,0 +1,16 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom = "//gpu/ipc/common/gpu_preferences.mojom"
+public_headers = [ "//gpu/command_buffer/service/gpu_preferences.h" ]
+traits_headers = [ "//gpu/ipc/common/gpu_preferences_struct_traits.h" ]
+public_deps = [
+ "//gpu/command_buffer/service",
+ "//media:media_features",
+ "//mojo/common:struct_traits",
+]
+type_mappings = [
+ "gpu.mojom.GpuPreferences=gpu::GpuPreferences",
+ "gpu.mojom.VpxDecodeVendors=gpu::GpuPreferences::VpxDecodeVendors",
+]
diff --git a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
new file mode 100644
index 00000000000..0f2a121e08e
--- /dev/null
+++ b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
@@ -0,0 +1,238 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_GPU_PREFERENCES_STRUCT_TRAITS_H_
+#define GPU_IPC_COMMON_GPU_PREFERENCES_STRUCT_TRAITS_H_
+
+#include "gpu/ipc/common/gpu_preferences.mojom.h"
+
+namespace mojo {
+
+template <>
+struct EnumTraits<gpu::mojom::VpxDecodeVendors,
+ gpu::GpuPreferences::VpxDecodeVendors> {
+ static gpu::mojom::VpxDecodeVendors ToMojom(
+ gpu::GpuPreferences::VpxDecodeVendors vpx) {
+ switch (vpx) {
+ case gpu::GpuPreferences::VPX_VENDOR_NONE:
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_NONE;
+ case gpu::GpuPreferences::VPX_VENDOR_MICROSOFT:
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_MICROSOFT;
+ case gpu::GpuPreferences::VPX_VENDOR_AMD:
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_AMD;
+ case gpu::GpuPreferences::VPX_VENDOR_ALL:
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_ALL;
+ }
+ NOTREACHED();
+ return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_NONE;
+ }
+
+ static bool FromMojom(gpu::mojom::VpxDecodeVendors input,
+ gpu::GpuPreferences::VpxDecodeVendors* out) {
+ switch (input) {
+ case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_NONE:
+ *out = gpu::GpuPreferences::VPX_VENDOR_NONE;
+ return true;
+ case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_MICROSOFT:
+ *out = gpu::GpuPreferences::VPX_VENDOR_MICROSOFT;
+ return true;
+ case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_AMD:
+ *out = gpu::GpuPreferences::VPX_VENDOR_AMD;
+ return true;
+ case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_ALL:
+ *out = gpu::GpuPreferences::VPX_VENDOR_ALL;
+ return true;
+ }
+ return false;
+ }
+};
+
+template <>
+struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
+ static bool Read(gpu::mojom::GpuPreferencesDataView prefs,
+ gpu::GpuPreferences* out) {
+ out->single_process = prefs.single_process();
+ out->in_process_gpu = prefs.in_process_gpu();
+ out->ui_prioritize_in_gpu_process = prefs.ui_prioritize_in_gpu_process();
+ out->disable_accelerated_video_decode =
+ prefs.disable_accelerated_video_decode();
+
+#if defined(OS_CHROMEOS)
+ out->disable_vaapi_accelerated_video_encode =
+ prefs.disable_vaapi_accelerated_video_encode();
+#endif
+
+#if BUILDFLAG(ENABLE_WEBRTC)
+ out->disable_web_rtc_hw_encoding = prefs.disable_web_rtc_hw_encoding();
+#endif
+
+#if defined(OS_WIN)
+ if (!prefs.ReadEnableAcceleratedVpxDecode(
+ &out->enable_accelerated_vpx_decode))
+ return false;
+ out->enable_low_latency_dxva = prefs.enable_low_latency_dxva();
+ out->enable_zero_copy_dxgi_video = prefs.enable_zero_copy_dxgi_video();
+ out->enable_nv12_dxgi_video = prefs.enable_nv12_dxgi_video();
+#endif
+
+ out->compile_shader_always_succeeds =
+ prefs.compile_shader_always_succeeds();
+ out->disable_gl_error_limit = prefs.disable_gl_error_limit();
+ out->disable_glsl_translator = prefs.disable_glsl_translator();
+ out->disable_gpu_driver_bug_workarounds =
+ prefs.disable_gpu_driver_bug_workarounds();
+ out->disable_shader_name_hashing = prefs.disable_shader_name_hashing();
+ out->enable_gpu_command_logging = prefs.enable_gpu_command_logging();
+ out->enable_gpu_debugging = prefs.enable_gpu_debugging();
+ out->enable_gpu_service_logging_gpu =
+ prefs.enable_gpu_service_logging_gpu();
+ out->enable_gpu_driver_debug_logging =
+ prefs.enable_gpu_driver_debug_logging();
+ out->disable_gpu_program_cache = prefs.disable_gpu_program_cache();
+ out->enforce_gl_minimums = prefs.enforce_gl_minimums();
+ out->force_gpu_mem_available = prefs.force_gpu_mem_available();
+ out->gpu_program_cache_size = prefs.gpu_program_cache_size();
+ out->disable_gpu_shader_disk_cache = prefs.disable_gpu_shader_disk_cache();
+ out->enable_threaded_texture_mailboxes =
+ prefs.enable_threaded_texture_mailboxes();
+ out->gl_shader_interm_output = prefs.gl_shader_interm_output();
+ out->emulate_shader_precision = prefs.emulate_shader_precision();
+ out->enable_gpu_service_logging = prefs.enable_gpu_service_logging();
+ out->enable_gpu_service_tracing = prefs.enable_gpu_service_tracing();
+ out->enable_es3_apis = prefs.enable_es3_apis();
+ out->use_passthrough_cmd_decoder = prefs.use_passthrough_cmd_decoder();
+ return true;
+ }
+
+ static bool single_process(const gpu::GpuPreferences& prefs) {
+ return prefs.single_process;
+ }
+ static bool in_process_gpu(const gpu::GpuPreferences& prefs) {
+ return prefs.in_process_gpu;
+ }
+ static bool ui_prioritize_in_gpu_process(const gpu::GpuPreferences& prefs) {
+ return prefs.ui_prioritize_in_gpu_process;
+ }
+ static bool disable_accelerated_video_decode(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.disable_accelerated_video_decode;
+ }
+
+ static bool disable_vaapi_accelerated_video_encode(
+ const gpu::GpuPreferences& prefs) {
+#if defined(OS_CHROMEOS)
+ return prefs.disable_vaapi_accelerated_video_encode;
+#else
+ return false;
+#endif
+ }
+
+ static bool disable_web_rtc_hw_encoding(const gpu::GpuPreferences& prefs) {
+#if BUILDFLAG(ENABLE_WEBRTC)
+ return prefs.disable_web_rtc_hw_encoding;
+#else
+ return false;
+#endif
+ }
+
+ static gpu::GpuPreferences::VpxDecodeVendors enable_accelerated_vpx_decode(
+ const gpu::GpuPreferences& prefs) {
+#if defined(OS_WIN)
+ return prefs.enable_accelerated_vpx_decode;
+#else
+ return gpu::GpuPreferences::VPX_VENDOR_MICROSOFT;
+#endif
+ }
+ static bool enable_low_latency_dxva(const gpu::GpuPreferences& prefs) {
+#if defined(OS_WIN)
+ return prefs.enable_low_latency_dxva;
+#else
+ return false;
+#endif
+ }
+ static bool enable_zero_copy_dxgi_video(const gpu::GpuPreferences& prefs) {
+#if defined(OS_WIN)
+ return prefs.enable_zero_copy_dxgi_video;
+#else
+ return false;
+#endif
+ }
+ static bool enable_nv12_dxgi_video(const gpu::GpuPreferences& prefs) {
+#if defined(OS_WIN)
+ return prefs.enable_nv12_dxgi_video;
+#else
+ return false;
+#endif
+ }
+ static bool compile_shader_always_succeeds(const gpu::GpuPreferences& prefs) {
+ return prefs.compile_shader_always_succeeds;
+ }
+ static bool disable_gl_error_limit(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_gl_error_limit;
+ }
+ static bool disable_glsl_translator(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_glsl_translator;
+ }
+ static bool disable_gpu_driver_bug_workarounds(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.disable_gpu_driver_bug_workarounds;
+ }
+ static bool disable_shader_name_hashing(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_shader_name_hashing;
+ }
+ static bool enable_gpu_command_logging(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_command_logging;
+ }
+ static bool enable_gpu_debugging(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_debugging;
+ }
+ static bool enable_gpu_service_logging_gpu(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_service_logging_gpu;
+ }
+ static bool enable_gpu_driver_debug_logging(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_driver_debug_logging;
+ }
+ static bool disable_gpu_program_cache(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_gpu_program_cache;
+ }
+ static bool enforce_gl_minimums(const gpu::GpuPreferences& prefs) {
+ return prefs.enforce_gl_minimums;
+ }
+ static uint32_t force_gpu_mem_available(const gpu::GpuPreferences& prefs) {
+ return prefs.force_gpu_mem_available;
+ }
+ static uint32_t gpu_program_cache_size(const gpu::GpuPreferences& prefs) {
+ return prefs.gpu_program_cache_size;
+ }
+ static bool disable_gpu_shader_disk_cache(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_gpu_shader_disk_cache;
+ }
+ static bool enable_threaded_texture_mailboxes(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.enable_threaded_texture_mailboxes;
+ }
+ static bool gl_shader_interm_output(const gpu::GpuPreferences& prefs) {
+ return prefs.gl_shader_interm_output;
+ }
+ static bool emulate_shader_precision(const gpu::GpuPreferences& prefs) {
+ return prefs.emulate_shader_precision;
+ }
+ static bool enable_gpu_service_logging(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_service_logging;
+ }
+ static bool enable_gpu_service_tracing(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_gpu_service_tracing;
+ }
+ static bool enable_es3_apis(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_es3_apis;
+ }
+ static bool use_passthrough_cmd_decoder(const gpu::GpuPreferences& prefs) {
+ return prefs.use_passthrough_cmd_decoder;
+ }
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_GPU_PREFERENCES_STRUCT_TRAITS_H_
diff --git a/chromium/gpu/ipc/common/gpu_surface_tracker.cc b/chromium/gpu/ipc/common/gpu_surface_tracker.cc
index fa02242491b..b2b8dacdaf7 100644
--- a/chromium/gpu/ipc/common/gpu_surface_tracker.cc
+++ b/chromium/gpu/ipc/common/gpu_surface_tracker.cc
@@ -64,12 +64,12 @@ gfx::AcceleratedWidget GpuSurfaceTracker::AcquireNativeWidget(
#if defined(OS_ANDROID)
void GpuSurfaceTracker::RegisterViewSurface(
- int surface_id, const base::android::JavaRef<jobject>& j_surface) {
+ int surface_id, jobject j_surface) {
base::AutoLock lock(surface_view_map_lock_);
DCHECK(surface_view_map_.find(surface_id) == surface_view_map_.end());
surface_view_map_[surface_id] =
- gl::ScopedJavaSurface::AcquireExternalSurface(j_surface.obj());
+ gl::ScopedJavaSurface::AcquireExternalSurface(j_surface);
CHECK(surface_view_map_[surface_id].IsValid());
}
diff --git a/chromium/gpu/ipc/common/gpu_surface_tracker.h b/chromium/gpu/ipc/common/gpu_surface_tracker.h
index b781ae27d76..6d372712521 100644
--- a/chromium/gpu/ipc/common/gpu_surface_tracker.h
+++ b/chromium/gpu/ipc/common/gpu_surface_tracker.h
@@ -23,10 +23,10 @@ namespace gpu {
// window surfaces exposed to the GPU process. Every surface gets registered to
// this class, and gets a handle. The handle can be passed to
// CommandBufferProxyImpl::Create or to
-// GpuMemoryBufferManager::AllocateGpuMemoryBuffer.
+// GpuMemoryBufferManager::CreateGpuMemoryBuffer.
// On Android, the handle is used in the GPU process to get a reference to the
// ANativeWindow, using GpuSurfaceLookup (implemented by
-// SurfaceTextureManagerImpl).
+// ChildProcessSurfaceManager).
// On Mac, the handle just passes through the GPU process, and is sent back via
// GpuCommandBufferMsg_SwapBuffersCompleted to reference the surface.
// This class is thread safe.
@@ -39,8 +39,7 @@ class GPU_EXPORT GpuSurfaceTracker : public gpu::GpuSurfaceLookup {
gpu::SurfaceHandle surface_handle) override;
#if defined(OS_ANDROID)
- void RegisterViewSurface(int surface_id,
- const base::android::JavaRef<jobject>& j_surface);
+ void RegisterViewSurface(int surface_id, jobject j_surface);
void UnregisterViewSurface(int surface_id);
gl::ScopedJavaSurface AcquireJavaSurface(int surface_id) override;
#endif
diff --git a/chromium/gpu/ipc/common/struct_traits_unittest.cc b/chromium/gpu/ipc/common/struct_traits_unittest.cc
index 70ddbc0ad15..2223921dfbf 100644
--- a/chromium/gpu/ipc/common/struct_traits_unittest.cc
+++ b/chromium/gpu/ipc/common/struct_traits_unittest.cc
@@ -74,6 +74,11 @@ class StructTraitsTest : public testing::Test, public mojom::TraitsTestService {
callback.Run(v);
}
+ void EchoGpuPreferences(const GpuPreferences& prefs,
+ const EchoGpuPreferencesCallback& callback) override {
+ callback.Run(prefs);
+ }
+
base::MessageLoop loop_;
mojo::BindingSet<TraitsTestService> traits_test_bindings_;
@@ -148,6 +153,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
const bool sandboxed = true;
const int process_crash_count = 0xdead;
const bool in_process_gpu = true;
+ const bool passthrough_cmd_decoder = true;
const gpu::CollectInfoResult basic_info_state =
gpu::CollectInfoResult::kCollectInfoSuccess;
const gpu::CollectInfoResult context_info_state =
@@ -197,6 +203,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
input.sandboxed = sandboxed;
input.process_crash_count = process_crash_count;
input.in_process_gpu = in_process_gpu;
+ input.passthrough_cmd_decoder = passthrough_cmd_decoder;
input.basic_info_state = basic_info_state;
input.context_info_state = context_info_state;
#if defined(OS_WIN)
@@ -259,6 +266,7 @@ TEST_F(StructTraitsTest, GpuInfo) {
EXPECT_EQ(sandboxed, output.sandboxed);
EXPECT_EQ(process_crash_count, output.process_crash_count);
EXPECT_EQ(in_process_gpu, output.in_process_gpu);
+ EXPECT_EQ(passthrough_cmd_decoder, output.passthrough_cmd_decoder);
EXPECT_EQ(basic_info_state, output.basic_info_state);
EXPECT_EQ(context_info_state, output.context_info_state);
#if defined(OS_WIN)
@@ -386,11 +394,16 @@ TEST_F(StructTraitsTest, VideoDecodeAcceleratorCapabilities) {
gpu::VideoDecodeAcceleratorCapabilities input;
input.flags = flags;
+ input.supported_profiles.push_back(
+ gpu::VideoDecodeAcceleratorSupportedProfile());
+ input.supported_profiles.push_back(
+ gpu::VideoDecodeAcceleratorSupportedProfile());
mojom::TraitsTestServicePtr proxy = GetTraitsTestProxy();
gpu::VideoDecodeAcceleratorCapabilities output;
proxy->EchoVideoDecodeAcceleratorCapabilities(input, &output);
EXPECT_EQ(flags, output.flags);
+ EXPECT_EQ(input.supported_profiles.size(), output.supported_profiles.size());
}
TEST_F(StructTraitsTest, VideoEncodeAcceleratorSupportedProfile) {
@@ -415,4 +428,28 @@ TEST_F(StructTraitsTest, VideoEncodeAcceleratorSupportedProfile) {
EXPECT_EQ(max_framerate_denominator, output.max_framerate_denominator);
}
+TEST_F(StructTraitsTest, GpuPreferences) {
+ GpuPreferences prefs;
+ prefs.single_process = true;
+ prefs.in_process_gpu = true;
+ prefs.ui_prioritize_in_gpu_process = true;
+#if defined(OS_WIN)
+ const GpuPreferences::VpxDecodeVendors vendor =
+ GpuPreferences::VPX_VENDOR_AMD;
+ prefs.enable_accelerated_vpx_decode = vendor;
+#endif
+ prefs.enable_gpu_driver_debug_logging = true;
+
+ mojom::TraitsTestServicePtr proxy = GetTraitsTestProxy();
+ GpuPreferences echo;
+ proxy->EchoGpuPreferences(prefs, &echo);
+ EXPECT_TRUE(echo.single_process);
+ EXPECT_TRUE(echo.in_process_gpu);
+ EXPECT_TRUE(echo.ui_prioritize_in_gpu_process);
+ EXPECT_TRUE(echo.enable_gpu_driver_debug_logging);
+#if defined(OS_WIN)
+ EXPECT_EQ(vendor, echo.enable_accelerated_vpx_decode);
+#endif
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/traits_test_service.mojom b/chromium/gpu/ipc/common/traits_test_service.mojom
index bd7a961a8f9..f2d911d39d4 100644
--- a/chromium/gpu/ipc/common/traits_test_service.mojom
+++ b/chromium/gpu/ipc/common/traits_test_service.mojom
@@ -6,6 +6,7 @@ module gpu.mojom;
import "gpu/ipc/common/dx_diag_node.mojom";
import "gpu/ipc/common/gpu_info.mojom";
+import "gpu/ipc/common/gpu_preferences.mojom";
import "gpu/ipc/common/mailbox.mojom";
import "gpu/ipc/common/mailbox_holder.mojom";
import "gpu/ipc/common/sync_token.mojom";
@@ -46,4 +47,7 @@ interface TraitsTestService {
EchoVideoEncodeAcceleratorSupportedProfile(
VideoEncodeAcceleratorSupportedProfile v) =>
(VideoEncodeAcceleratorSupportedProfile pass);
+
+ [Sync]
+ EchoGpuPreferences(GpuPreferences prefs) => (GpuPreferences pass);
};
diff --git a/chromium/gpu/ipc/common/typemaps.gni b/chromium/gpu/ipc/common/typemaps.gni
index 5f353e0a294..23acc1fd9f8 100644
--- a/chromium/gpu/ipc/common/typemaps.gni
+++ b/chromium/gpu/ipc/common/typemaps.gni
@@ -5,6 +5,7 @@
typemaps = [
"//gpu/ipc/common/capabilities.typemap",
"//gpu/ipc/common/gpu_info.typemap",
+ "//gpu/ipc/common/gpu_preferences.typemap",
"//gpu/ipc/common/dx_diag_node.typemap",
"//gpu/ipc/common/mailbox.typemap",
"//gpu/ipc/common/mailbox_holder.typemap",
diff --git a/chromium/gpu/ipc/gl_in_process_context.cc b/chromium/gpu/ipc/gl_in_process_context.cc
index 16a1cb16500..c854913758b 100644
--- a/chromium/gpu/ipc/gl_in_process_context.cc
+++ b/chromium/gpu/ipc/gl_in_process_context.cc
@@ -54,7 +54,7 @@ class GLInProcessContextImpl
bool Initialize(scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
GLInProcessContext* share_context,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gpu::gles2::ContextCreationAttribHelper& attribs,
const scoped_refptr<InProcessCommandBuffer::Service>& service,
const SharedMemoryLimits& mem_limits,
@@ -63,7 +63,14 @@ class GLInProcessContextImpl
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
// GLInProcessContext implementation:
+ gpu::Capabilities GetCapabilities() override;
gles2::GLES2Implementation* GetImplementation() override;
+ void SetSwapBuffersCompletionCallback(
+ const gpu::InProcessCommandBuffer::SwapBuffersCompletionCallback&
+ callback) override;
+ void SetUpdateVSyncParametersCallback(
+ const gpu::InProcessCommandBuffer::UpdateVSyncParametersCallback&
+ callback) override;
void SetLock(base::Lock* lock) override;
private:
@@ -84,10 +91,26 @@ GLInProcessContextImpl::~GLInProcessContextImpl() {
Destroy();
}
+Capabilities GLInProcessContextImpl::GetCapabilities() {
+ return command_buffer_->GetCapabilities();
+}
+
gles2::GLES2Implementation* GLInProcessContextImpl::GetImplementation() {
return gles2_implementation_.get();
}
+void GLInProcessContextImpl::SetSwapBuffersCompletionCallback(
+ const gpu::InProcessCommandBuffer::SwapBuffersCompletionCallback&
+ callback) {
+ command_buffer_->SetSwapBuffersCompletionCallback(callback);
+}
+
+void GLInProcessContextImpl::SetUpdateVSyncParametersCallback(
+ const gpu::InProcessCommandBuffer::UpdateVSyncParametersCallback&
+ callback) {
+ command_buffer_->SetUpdateVSyncParametersCallback(callback);
+}
+
void GLInProcessContextImpl::SetLock(base::Lock* lock) {
NOTREACHED();
}
@@ -96,7 +119,7 @@ bool GLInProcessContextImpl::Initialize(
scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
GLInProcessContext* share_context,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gles2::ContextCreationAttribHelper& attribs,
const scoped_refptr<InProcessCommandBuffer::Service>& service,
const SharedMemoryLimits& mem_limits,
@@ -183,7 +206,7 @@ GLInProcessContext* GLInProcessContext::Create(
scoped_refptr<gpu::InProcessCommandBuffer::Service> service,
scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
GLInProcessContext* share_context,
const ::gpu::gles2::ContextCreationAttribHelper& attribs,
const SharedMemoryLimits& memory_limits,
@@ -196,7 +219,7 @@ GLInProcessContext* GLInProcessContext::Create(
if (surface) {
DCHECK_EQ(surface->IsOffscreen(), is_offscreen);
- DCHECK_EQ(gfx::kNullAcceleratedWidget, window);
+ DCHECK_EQ(kNullSurfaceHandle, window);
}
std::unique_ptr<GLInProcessContextImpl> context(new GLInProcessContextImpl);
diff --git a/chromium/gpu/ipc/gl_in_process_context.h b/chromium/gpu/ipc/gl_in_process_context.h
index e03363f3aca..eef93398c24 100644
--- a/chromium/gpu/ipc/gl_in_process_context.h
+++ b/chromium/gpu/ipc/gl_in_process_context.h
@@ -17,17 +17,8 @@
#include "ui/gl/gl_surface.h"
#include "ui/gl/gpu_preference.h"
-namespace gfx {
-class Size;
-}
-
-#if defined(OS_ANDROID)
-namespace gl {
-class SurfaceTexture;
-}
-#endif
-
namespace gpu {
+class InProcessCommandBuffer;
struct SharedMemoryLimits;
namespace gles2 {
@@ -50,7 +41,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GLInProcessContext {
scoped_refptr<gpu::InProcessCommandBuffer::Service> service,
scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
GLInProcessContext* share_context,
const gpu::gles2::ContextCreationAttribHelper& attribs,
const SharedMemoryLimits& memory_limits,
@@ -58,11 +49,21 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GLInProcessContext {
ImageFactory* image_factory,
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
+ virtual gpu::Capabilities GetCapabilities() = 0;
+
// Allows direct access to the GLES2 implementation so a GLInProcessContext
// can be used without making it current.
virtual gles2::GLES2Implementation* GetImplementation() = 0;
virtual void SetLock(base::Lock* lock) = 0;
+
+ virtual void SetSwapBuffersCompletionCallback(
+ const gpu::InProcessCommandBuffer::SwapBuffersCompletionCallback&
+ callback) = 0;
+
+ virtual void SetUpdateVSyncParametersCallback(
+ const gpu::InProcessCommandBuffer::UpdateVSyncParametersCallback&
+ callback) = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.cc b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
new file mode 100644
index 00000000000..b21b864e3b1
--- /dev/null
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
@@ -0,0 +1,69 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/gpu_in_process_thread_service.h"
+
+#include "base/lazy_instance.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace gpu {
+
+GpuInProcessThreadService::GpuInProcessThreadService(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ gpu::SyncPointManager* sync_point_manager,
+ gpu::gles2::MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group)
+ : gpu::InProcessCommandBuffer::Service(mailbox_manager, share_group),
+ task_runner_(task_runner),
+ sync_point_manager_(sync_point_manager) {}
+
+void GpuInProcessThreadService::ScheduleTask(const base::Closure& task) {
+ task_runner_->PostTask(FROM_HERE, task);
+}
+
+void GpuInProcessThreadService::ScheduleDelayedWork(const base::Closure& task) {
+ task_runner_->PostDelayedTask(FROM_HERE, task,
+ base::TimeDelta::FromMilliseconds(2));
+}
+bool GpuInProcessThreadService::UseVirtualizedGLContexts() {
+ return true;
+}
+
+scoped_refptr<gpu::gles2::ShaderTranslatorCache>
+GpuInProcessThreadService::shader_translator_cache() {
+ if (!shader_translator_cache_) {
+ shader_translator_cache_ = make_scoped_refptr(
+ new gpu::gles2::ShaderTranslatorCache(gpu_preferences()));
+ }
+ return shader_translator_cache_;
+}
+
+scoped_refptr<gpu::gles2::FramebufferCompletenessCache>
+GpuInProcessThreadService::framebuffer_completeness_cache() {
+ if (!framebuffer_completeness_cache_.get()) {
+ framebuffer_completeness_cache_ =
+ make_scoped_refptr(new gpu::gles2::FramebufferCompletenessCache);
+ }
+ return framebuffer_completeness_cache_;
+}
+
+gpu::SyncPointManager* GpuInProcessThreadService::sync_point_manager() {
+ return sync_point_manager_;
+}
+
+void GpuInProcessThreadService::AddRef() const {
+ base::RefCountedThreadSafe<GpuInProcessThreadService>::AddRef();
+}
+
+void GpuInProcessThreadService::Release() const {
+ base::RefCountedThreadSafe<GpuInProcessThreadService>::Release();
+}
+
+bool GpuInProcessThreadService::BlockThreadOnWaitSyncToken() const {
+ return false;
+}
+
+GpuInProcessThreadService::~GpuInProcessThreadService() {}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.h b/chromium/gpu/ipc/gpu_in_process_thread_service.h
new file mode 100644
index 00000000000..413592f8117
--- /dev/null
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.h
@@ -0,0 +1,58 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_GPU_IN_PROCESS_THREAD_SERVICE_H_
+#define GPU_IPC_GPU_IN_PROCESS_THREAD_SERVICE_H_
+
+#include "base/compiler_specific.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/gpu_export.h"
+#include "gpu/ipc/in_process_command_buffer.h"
+#include "ui/gl/gl_share_group.h"
+
+namespace gpu {
+
+// Default Service class when no service is specified. GpuInProcessThreadService
+// is used by Mus and unit tests.
+class GPU_EXPORT GpuInProcessThreadService
+ : public NON_EXPORTED_BASE(gpu::InProcessCommandBuffer::Service),
+ public base::RefCountedThreadSafe<GpuInProcessThreadService> {
+ public:
+ GpuInProcessThreadService(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ gpu::SyncPointManager* sync_point_manager,
+ gpu::gles2::MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group);
+
+ // gpu::InProcessCommandBuffer::Service implementation.
+ void ScheduleTask(const base::Closure& task) override;
+ void ScheduleDelayedWork(const base::Closure& task) override;
+ bool UseVirtualizedGLContexts() override;
+ scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache()
+ override;
+ scoped_refptr<gpu::gles2::FramebufferCompletenessCache>
+ framebuffer_completeness_cache() override;
+ gpu::SyncPointManager* sync_point_manager() override;
+ void AddRef() const override;
+ void Release() const override;
+ bool BlockThreadOnWaitSyncToken() const override;
+
+ private:
+ friend class base::RefCountedThreadSafe<GpuInProcessThreadService>;
+
+ ~GpuInProcessThreadService() override;
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ gpu::SyncPointManager* sync_point_manager_; // Non-owning.
+ scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
+ scoped_refptr<gpu::gles2::FramebufferCompletenessCache>
+ framebuffer_completeness_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuInProcessThreadService);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_GPU_IN_PROCESS_THREAD_SERVICE_H_
diff --git a/chromium/gpu/ipc/host/BUILD.gn b/chromium/gpu/ipc/host/BUILD.gn
new file mode 100644
index 00000000000..daf7057b924
--- /dev/null
+++ b/chromium/gpu/ipc/host/BUILD.gn
@@ -0,0 +1,24 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/ui.gni")
+
+source_set("host") {
+ sources = [
+ "gpu_memory_buffer_support.cc",
+ "gpu_memory_buffer_support.h",
+ "gpu_switches.cc",
+ "gpu_switches.h",
+ "shader_disk_cache.cc",
+ "shader_disk_cache.h",
+ ]
+
+ deps = [
+ "//base",
+ "//gpu/ipc/common",
+ "//net",
+ "//ui/gfx",
+ "//ui/gl",
+ ]
+}
diff --git a/chromium/gpu/ipc/host/DEPS b/chromium/gpu/ipc/host/DEPS
new file mode 100644
index 00000000000..8fa9d48d882
--- /dev/null
+++ b/chromium/gpu/ipc/host/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+net",
+]
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
new file mode 100644
index 00000000000..cbd36559365
--- /dev/null
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
@@ -0,0 +1,113 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/host/gpu_memory_buffer_support.h"
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "gpu/ipc/common/gpu_memory_buffer_support.h"
+#include "gpu/ipc/host/gpu_switches.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_switches.h"
+
+namespace gpu {
+
+bool AreNativeGpuMemoryBuffersEnabled() {
+ // Disable native buffers when using Mesa.
+ if (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kUseGL) == gl::kGLImplementationOSMesaName) {
+ return false;
+ }
+
+#if defined(OS_MACOSX)
+ return !base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableNativeGpuMemoryBuffers);
+#else
+ return base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableNativeGpuMemoryBuffers);
+#endif
+}
+
+GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations() {
+ GpuMemoryBufferConfigurationSet configurations;
+
+#if defined(USE_OZONE) || defined(OS_MACOSX)
+ if (AreNativeGpuMemoryBuffersEnabled()) {
+ const gfx::BufferFormat kNativeFormats[] = {
+ gfx::BufferFormat::R_8,
+ gfx::BufferFormat::RG_88,
+ gfx::BufferFormat::BGR_565,
+ gfx::BufferFormat::RGBA_4444,
+ gfx::BufferFormat::RGBA_8888,
+ gfx::BufferFormat::BGRA_8888,
+ gfx::BufferFormat::UYVY_422,
+ gfx::BufferFormat::YVU_420,
+ gfx::BufferFormat::YUV_420_BIPLANAR};
+ const gfx::BufferUsage kNativeUsages[] = {
+ gfx::BufferUsage::GPU_READ, gfx::BufferUsage::SCANOUT,
+ gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
+ gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT};
+ for (auto format : kNativeFormats) {
+ for (auto usage : kNativeUsages) {
+ if (IsNativeGpuMemoryBufferConfigurationSupported(format, usage))
+ configurations.insert(std::make_pair(format, usage));
+ }
+ }
+ }
+
+ // Disable native buffers only when using Mesa.
+ bool force_native_gpu_read_write_formats =
+ base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kUseGL) != gl::kGLImplementationOSMesaName;
+ if (force_native_gpu_read_write_formats) {
+ const gfx::BufferFormat kGPUReadWriteFormats[] = {
+ gfx::BufferFormat::BGR_565, gfx::BufferFormat::RGBA_8888,
+ gfx::BufferFormat::RGBX_8888, gfx::BufferFormat::BGRA_8888,
+ gfx::BufferFormat::BGRX_8888, gfx::BufferFormat::UYVY_422,
+ gfx::BufferFormat::YVU_420, gfx::BufferFormat::YUV_420_BIPLANAR};
+ const gfx::BufferUsage kGPUReadWriteUsages[] = {gfx::BufferUsage::GPU_READ,
+ gfx::BufferUsage::SCANOUT};
+ for (auto format : kGPUReadWriteFormats) {
+ for (auto usage : kGPUReadWriteUsages) {
+ if (IsNativeGpuMemoryBufferConfigurationSupported(format, usage))
+ configurations.insert(std::make_pair(format, usage));
+ }
+ }
+ }
+#endif // defined(USE_OZONE) || defined(OS_MACOSX)
+
+ return configurations;
+}
+
+uint32_t GetImageTextureTarget(gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
+#if defined(USE_OZONE) || defined(OS_MACOSX)
+ GpuMemoryBufferConfigurationSet native_configurations =
+ GetNativeGpuMemoryBufferConfigurations();
+ if (native_configurations.find(std::make_pair(format, usage)) ==
+ native_configurations.end()) {
+ return GL_TEXTURE_2D;
+ }
+
+ switch (GetNativeGpuMemoryBufferType()) {
+ case gfx::OZONE_NATIVE_PIXMAP:
+ // GPU memory buffers that are shared with the GL using EGLImages
+ // require TEXTURE_EXTERNAL_OES.
+ return GL_TEXTURE_EXTERNAL_OES;
+ case gfx::IO_SURFACE_BUFFER:
+ // IOSurface backed images require GL_TEXTURE_RECTANGLE_ARB.
+ return GL_TEXTURE_RECTANGLE_ARB;
+ case gfx::SHARED_MEMORY_BUFFER:
+ case gfx::EMPTY_BUFFER:
+ break;
+ }
+ NOTREACHED();
+ return GL_TEXTURE_2D;
+#else // defined(USE_OZONE) || defined(OS_MACOSX)
+ return GL_TEXTURE_2D;
+#endif
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.h b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
new file mode 100644
index 00000000000..14d396f527d
--- /dev/null
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_HOST_GPU_MEMORY_BUFFER_SUPPORT_H_
+#define GPU_IPC_HOST_GPU_MEMORY_BUFFER_SUPPORT_H_
+
+#include "base/containers/hash_tables.h"
+#include "base/hash.h"
+#include "ui/gfx/buffer_types.h"
+
+namespace gpu {
+
+using GpuMemoryBufferConfigurationKey =
+ std::pair<gfx::BufferFormat, gfx::BufferUsage>;
+using GpuMemoryBufferConfigurationSet =
+ base::hash_set<GpuMemoryBufferConfigurationKey>;
+
+} // namespace gpu
+
+namespace BASE_HASH_NAMESPACE {
+
+template <>
+struct hash<gpu::GpuMemoryBufferConfigurationKey> {
+ size_t operator()(const gpu::GpuMemoryBufferConfigurationKey& key) const {
+ return base::HashInts(static_cast<int>(key.first),
+ static_cast<int>(key.second));
+ }
+};
+
+} // namespace BASE_HASH_NAMESPACE
+
+namespace gpu {
+
+bool AreNativeGpuMemoryBuffersEnabled();
+
+// Returns the set of supported configurations.
+GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations();
+
+// Returns the OpenGL target to use for image textures.
+uint32_t GetImageTextureTarget(gfx::BufferFormat format,
+ gfx::BufferUsage usage);
+
+} // namespace gpu
+
+#endif // GPU_IPC_HOST_GPU_MEMORY_BUFFER_SUPPORT_H_
diff --git a/chromium/gpu/ipc/host/gpu_switches.cc b/chromium/gpu/ipc/host/gpu_switches.cc
new file mode 100644
index 00000000000..1834e3c64b8
--- /dev/null
+++ b/chromium/gpu/ipc/host/gpu_switches.cc
@@ -0,0 +1,16 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/host/gpu_switches.h"
+
+namespace switches {
+
+// Enable native GPU memory buffer support when available.
+const char kEnableNativeGpuMemoryBuffers[] = "enable-native-gpu-memory-buffers";
+
+// Disables native GPU memory buffer support.
+const char kDisableNativeGpuMemoryBuffers[] =
+ "disable-native-gpu-memory-buffers";
+
+} // namespace switches
diff --git a/chromium/gpu/ipc/host/gpu_switches.h b/chromium/gpu/ipc/host/gpu_switches.h
new file mode 100644
index 00000000000..7f205af4f4a
--- /dev/null
+++ b/chromium/gpu/ipc/host/gpu_switches.h
@@ -0,0 +1,17 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the command-line switches used by gpu/ipc/host
+
+#ifndef GPU_IPC_HOST_GPU_SWITCHES_H_
+#define GPU_IPC_HOST_GPU_SWITCHES_H_
+
+namespace switches {
+
+extern const char kEnableNativeGpuMemoryBuffers[];
+extern const char kDisableNativeGpuMemoryBuffers[];
+
+} // namespace switches
+
+#endif // GPU_IPC_HOST_GPU_SWITCHES_H_
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.cc b/chromium/gpu/ipc/host/shader_disk_cache.cc
new file mode 100644
index 00000000000..7d1ad889c70
--- /dev/null
+++ b/chromium/gpu/ipc/host/shader_disk_cache.cc
@@ -0,0 +1,629 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/host/shader_disk_cache.h"
+
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_checker.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "net/base/cache_type.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+
+namespace gpu {
+
+namespace {
+
+static const base::FilePath::CharType kGpuCachePath[] =
+ FILE_PATH_LITERAL("GPUCache");
+
+} // namespace
+
+// ShaderDiskCacheEntry handles the work of caching/updating the cached
+// shaders.
+class ShaderDiskCacheEntry : public base::ThreadChecker {
+ public:
+ ShaderDiskCacheEntry(ShaderDiskCache* cache,
+ const std::string& key,
+ const std::string& shader);
+ ~ShaderDiskCacheEntry();
+
+ void Cache();
+ void OnOpComplete(int rv);
+ void set_entry(disk_cache::Entry* entry) { entry_ = entry; }
+
+ private:
+ enum OpType {
+ OPEN_ENTRY,
+ WRITE_DATA,
+ CREATE_ENTRY,
+ };
+
+ int OpenCallback(int rv);
+ int WriteCallback(int rv);
+ int IOComplete(int rv);
+
+ ShaderDiskCache* cache_;
+ OpType op_type_;
+ std::string key_;
+ std::string shader_;
+ disk_cache::Entry* entry_;
+ base::WeakPtr<ShaderDiskCacheEntry> weak_ptr_;
+ base::WeakPtrFactory<ShaderDiskCacheEntry> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderDiskCacheEntry);
+};
+
+// ShaderDiskReadHelper is used to load all of the cached shaders from the
+// disk cache and send to the memory cache.
+class ShaderDiskReadHelper : public base::ThreadChecker {
+ public:
+ using ShaderLoadedCallback = ShaderDiskCache::ShaderLoadedCallback;
+ ShaderDiskReadHelper(ShaderDiskCache* cache,
+ const ShaderLoadedCallback& callback);
+ ~ShaderDiskReadHelper();
+
+ void LoadCache();
+ void OnOpComplete(int rv);
+ void set_entry(disk_cache::Entry* entry) { entry_ = entry; }
+
+ private:
+ enum OpType {
+ TERMINATE,
+ OPEN_NEXT,
+ OPEN_NEXT_COMPLETE,
+ READ_COMPLETE,
+ ITERATION_FINISHED
+ };
+
+ int OpenNextEntry();
+ int OpenNextEntryComplete(int rv);
+ int ReadComplete(int rv);
+ int IterationComplete(int rv);
+
+ ShaderDiskCache* cache_;
+ ShaderLoadedCallback shader_loaded_callback_;
+ OpType op_type_;
+ std::unique_ptr<disk_cache::Backend::Iterator> iter_;
+ scoped_refptr<net::IOBufferWithSize> buf_;
+ disk_cache::Entry* entry_;
+ base::WeakPtrFactory<ShaderDiskReadHelper> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderDiskReadHelper);
+};
+
+class ShaderClearHelper : public base::ThreadChecker {
+ public:
+ ShaderClearHelper(ShaderCacheFactory* factory,
+ scoped_refptr<ShaderDiskCache> cache,
+ const base::FilePath& path,
+ const base::Time& delete_begin,
+ const base::Time& delete_end,
+ const base::Closure& callback);
+ ~ShaderClearHelper();
+
+ void Clear();
+
+ private:
+ enum OpType { TERMINATE, VERIFY_CACHE_SETUP, DELETE_CACHE };
+
+ void DoClearShaderCache(int rv);
+
+ ShaderCacheFactory* factory_;
+ scoped_refptr<ShaderDiskCache> cache_;
+ OpType op_type_;
+ base::FilePath path_;
+ base::Time delete_begin_;
+ base::Time delete_end_;
+ base::Closure callback_;
+ base::WeakPtrFactory<ShaderClearHelper> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderClearHelper);
+};
+
+// When the cache is asked to open an entry an Entry** is passed to it. The
+// underying Entry* must stay alive for the duration of the call, so it is
+// owned by the callback. If the underlying state machine is deleted before
+// the callback runs, close the entry.
+template <typename T>
+void OnEntryOpenComplete(base::WeakPtr<T> state_machine,
+ std::unique_ptr<disk_cache::Entry*> entry,
+ int rv) {
+ if (!state_machine) {
+ if (rv == net::OK)
+ (*entry)->Close();
+ return;
+ }
+ state_machine->set_entry(*entry);
+ state_machine->OnOpComplete(rv);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderDiskCacheEntry
+
+ShaderDiskCacheEntry::ShaderDiskCacheEntry(ShaderDiskCache* cache,
+ const std::string& key,
+ const std::string& shader)
+ : cache_(cache),
+ op_type_(OPEN_ENTRY),
+ key_(key),
+ shader_(shader),
+ entry_(nullptr),
+ weak_ptr_factory_(this) {
+ weak_ptr_ = weak_ptr_factory_.GetWeakPtr();
+}
+
+ShaderDiskCacheEntry::~ShaderDiskCacheEntry() {
+ DCHECK(CalledOnValidThread());
+ if (entry_)
+ entry_->Close();
+}
+
+void ShaderDiskCacheEntry::Cache() {
+ DCHECK(CalledOnValidThread());
+
+ // The Entry* passed to the cache must stay alive even if this class is
+ // deleted, so store it in the callback.
+ auto entry = base::MakeUnique<disk_cache::Entry*>(nullptr);
+ disk_cache::Entry** closure_owned_entry_ptr = entry.get();
+ auto callback = base::Bind(&OnEntryOpenComplete<ShaderDiskCacheEntry>,
+ weak_ptr_factory_.GetWeakPtr(),
+ base::Passed(std::move(entry)));
+
+ int rv =
+ cache_->backend()->OpenEntry(key_, closure_owned_entry_ptr, callback);
+
+ if (rv != net::ERR_IO_PENDING) {
+ entry_ = *closure_owned_entry_ptr;
+ OnOpComplete(rv);
+ }
+}
+
+void ShaderDiskCacheEntry::OnOpComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ // The function calls inside the switch block below can end up destroying
+ // |this|. So hold on to a WeakPtr<>, and terminate the while loop if |this|
+ // has been destroyed.
+ auto weak_ptr = std::move(weak_ptr_);
+ do {
+ switch (op_type_) {
+ case OPEN_ENTRY:
+ rv = OpenCallback(rv);
+ break;
+ case CREATE_ENTRY:
+ rv = WriteCallback(rv);
+ break;
+ case WRITE_DATA:
+ rv = IOComplete(rv);
+ break;
+ }
+ } while (rv != net::ERR_IO_PENDING && weak_ptr);
+ if (weak_ptr)
+ weak_ptr_ = std::move(weak_ptr);
+}
+
+int ShaderDiskCacheEntry::OpenCallback(int rv) {
+ DCHECK(CalledOnValidThread());
+ if (rv == net::OK) {
+ cache_->backend()->OnExternalCacheHit(key_);
+ cache_->EntryComplete(this);
+ return rv;
+ }
+
+ op_type_ = CREATE_ENTRY;
+
+ // The Entry* passed to the cache must stay alive even if this class is
+ // deleted, so store it in the callback.
+ auto entry = base::MakeUnique<disk_cache::Entry*>(nullptr);
+ disk_cache::Entry** closure_owned_entry_ptr = entry.get();
+ auto callback = base::Bind(&OnEntryOpenComplete<ShaderDiskCacheEntry>,
+ weak_ptr_factory_.GetWeakPtr(),
+ base::Passed(std::move(entry)));
+
+ int create_rv =
+ cache_->backend()->CreateEntry(key_, closure_owned_entry_ptr, callback);
+
+ if (create_rv != net::ERR_IO_PENDING)
+ entry_ = *closure_owned_entry_ptr;
+ return create_rv;
+}
+
+int ShaderDiskCacheEntry::WriteCallback(int rv) {
+ DCHECK(CalledOnValidThread());
+ if (rv != net::OK) {
+ LOG(ERROR) << "Failed to create shader cache entry: " << rv;
+ cache_->EntryComplete(this);
+ return rv;
+ }
+
+ op_type_ = WRITE_DATA;
+ scoped_refptr<net::StringIOBuffer> io_buf = new net::StringIOBuffer(shader_);
+ return entry_->WriteData(1, 0, io_buf.get(), shader_.length(),
+ base::Bind(&ShaderDiskCacheEntry::OnOpComplete,
+ weak_ptr_factory_.GetWeakPtr()),
+ false);
+}
+
+int ShaderDiskCacheEntry::IOComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ cache_->EntryComplete(this);
+ return rv;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderDiskReadHelper
+
+ShaderDiskReadHelper::ShaderDiskReadHelper(ShaderDiskCache* cache,
+ const ShaderLoadedCallback& callback)
+ : cache_(cache),
+ shader_loaded_callback_(callback),
+ op_type_(OPEN_NEXT),
+ buf_(NULL),
+ entry_(NULL),
+ weak_ptr_factory_(this) {}
+
+ShaderDiskReadHelper::~ShaderDiskReadHelper() {
+ DCHECK(CalledOnValidThread());
+ if (entry_)
+ entry_->Close();
+ iter_ = nullptr;
+}
+
+void ShaderDiskReadHelper::LoadCache() {
+ DCHECK(CalledOnValidThread());
+ OnOpComplete(net::OK);
+}
+
+void ShaderDiskReadHelper::OnOpComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ do {
+ switch (op_type_) {
+ case OPEN_NEXT:
+ rv = OpenNextEntry();
+ break;
+ case OPEN_NEXT_COMPLETE:
+ rv = OpenNextEntryComplete(rv);
+ break;
+ case READ_COMPLETE:
+ rv = ReadComplete(rv);
+ break;
+ case ITERATION_FINISHED:
+ rv = IterationComplete(rv);
+ break;
+ case TERMINATE:
+ cache_->ReadComplete();
+ rv = net::ERR_IO_PENDING; // break the loop
+ break;
+ }
+ } while (rv != net::ERR_IO_PENDING);
+}
+
+int ShaderDiskReadHelper::OpenNextEntry() {
+ DCHECK(CalledOnValidThread());
+ op_type_ = OPEN_NEXT_COMPLETE;
+ if (!iter_)
+ iter_ = cache_->backend()->CreateIterator();
+
+ // The Entry* passed to the cache must stay alive even if this class is
+ // deleted, so store it in the callback.
+ auto entry = base::MakeUnique<disk_cache::Entry*>(nullptr);
+ disk_cache::Entry** closure_owned_entry_ptr = entry.get();
+ auto callback = base::Bind(&OnEntryOpenComplete<ShaderDiskReadHelper>,
+ weak_ptr_factory_.GetWeakPtr(),
+ base::Passed(std::move(entry)));
+
+ int rv = iter_->OpenNextEntry(closure_owned_entry_ptr, callback);
+
+ if (rv != net::ERR_IO_PENDING)
+ entry_ = *closure_owned_entry_ptr;
+ return rv;
+}
+
+int ShaderDiskReadHelper::OpenNextEntryComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ if (rv == net::ERR_FAILED) {
+ iter_.reset();
+ op_type_ = ITERATION_FINISHED;
+ return net::OK;
+ }
+
+ if (rv < 0)
+ return rv;
+
+ op_type_ = READ_COMPLETE;
+ buf_ = new net::IOBufferWithSize(entry_->GetDataSize(1));
+ return entry_->ReadData(1, 0, buf_.get(), buf_->size(),
+ base::Bind(&ShaderDiskReadHelper::OnOpComplete,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+int ShaderDiskReadHelper::ReadComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ if (rv && rv == buf_->size() && !shader_loaded_callback_.is_null()) {
+ shader_loaded_callback_.Run(entry_->GetKey(),
+ std::string(buf_->data(), buf_->size()));
+ }
+
+ buf_ = NULL;
+ entry_->Close();
+ entry_ = NULL;
+
+ op_type_ = OPEN_NEXT;
+ return net::OK;
+}
+
+int ShaderDiskReadHelper::IterationComplete(int rv) {
+ DCHECK(CalledOnValidThread());
+ iter_.reset();
+ op_type_ = TERMINATE;
+ return net::OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderClearHelper
+
+ShaderClearHelper::ShaderClearHelper(ShaderCacheFactory* factory,
+ scoped_refptr<ShaderDiskCache> cache,
+ const base::FilePath& path,
+ const base::Time& delete_begin,
+ const base::Time& delete_end,
+ const base::Closure& callback)
+ : factory_(factory),
+ cache_(std::move(cache)),
+ op_type_(VERIFY_CACHE_SETUP),
+ path_(path),
+ delete_begin_(delete_begin),
+ delete_end_(delete_end),
+ callback_(callback),
+ weak_ptr_factory_(this) {}
+
+ShaderClearHelper::~ShaderClearHelper() {
+ DCHECK(CalledOnValidThread());
+}
+
+void ShaderClearHelper::Clear() {
+ DCHECK(CalledOnValidThread());
+ DoClearShaderCache(net::OK);
+}
+
+void ShaderClearHelper::DoClearShaderCache(int rv) {
+ DCHECK(CalledOnValidThread());
+ while (rv != net::ERR_IO_PENDING) {
+ switch (op_type_) {
+ case VERIFY_CACHE_SETUP:
+ rv = cache_->SetAvailableCallback(
+ base::Bind(&ShaderClearHelper::DoClearShaderCache,
+ weak_ptr_factory_.GetWeakPtr()));
+ op_type_ = DELETE_CACHE;
+ break;
+ case DELETE_CACHE:
+ rv = cache_->Clear(delete_begin_, delete_end_,
+ base::Bind(&ShaderClearHelper::DoClearShaderCache,
+ weak_ptr_factory_.GetWeakPtr()));
+ op_type_ = TERMINATE;
+ break;
+ case TERMINATE:
+ callback_.Run();
+ // Calling CacheCleared() destroys |this|.
+ factory_->CacheCleared(path_);
+ rv = net::ERR_IO_PENDING; // Break the loop.
+ break;
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderCacheFactory
+
+ShaderCacheFactory::ShaderCacheFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner)
+ : cache_task_runner_(std::move(cache_task_runner)) {}
+
+ShaderCacheFactory::~ShaderCacheFactory() {}
+
+void ShaderCacheFactory::SetCacheInfo(int32_t client_id,
+ const base::FilePath& path) {
+ DCHECK(CalledOnValidThread());
+ client_id_to_path_map_[client_id] = path;
+}
+
+void ShaderCacheFactory::RemoveCacheInfo(int32_t client_id) {
+ DCHECK(CalledOnValidThread());
+ client_id_to_path_map_.erase(client_id);
+}
+
+scoped_refptr<ShaderDiskCache> ShaderCacheFactory::Get(int32_t client_id) {
+ DCHECK(CalledOnValidThread());
+ ClientIdToPathMap::iterator iter = client_id_to_path_map_.find(client_id);
+ if (iter == client_id_to_path_map_.end())
+ return NULL;
+ return ShaderCacheFactory::GetByPath(iter->second);
+}
+
+scoped_refptr<ShaderDiskCache> ShaderCacheFactory::GetByPath(
+ const base::FilePath& path) {
+ DCHECK(CalledOnValidThread());
+ ShaderCacheMap::iterator iter = shader_cache_map_.find(path);
+ if (iter != shader_cache_map_.end())
+ return iter->second;
+
+ ShaderDiskCache* cache = new ShaderDiskCache(this, path);
+ cache->Init(cache_task_runner_);
+ return cache;
+}
+
+void ShaderCacheFactory::AddToCache(const base::FilePath& key,
+ ShaderDiskCache* cache) {
+ DCHECK(CalledOnValidThread());
+ shader_cache_map_[key] = cache;
+}
+
+void ShaderCacheFactory::RemoveFromCache(const base::FilePath& key) {
+ DCHECK(CalledOnValidThread());
+ shader_cache_map_.erase(key);
+}
+
+void ShaderCacheFactory::ClearByPath(const base::FilePath& path,
+ const base::Time& delete_begin,
+ const base::Time& delete_end,
+ const base::Closure& callback) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!callback.is_null());
+
+ auto helper = base::MakeUnique<ShaderClearHelper>(
+ this, GetByPath(path), path, delete_begin, delete_end, callback);
+
+ // We could receive requests to clear the same path with different
+ // begin/end times. So, we keep a list of requests. If we haven't seen this
+ // path before we kick off the clear and add it to the list. If we have see it
+ // already, then we already have a clear running. We add this clear to the
+ // list and wait for any previous clears to finish.
+ ShaderClearMap::iterator iter = shader_clear_map_.find(path);
+ if (iter != shader_clear_map_.end()) {
+ iter->second.push(std::move(helper));
+ return;
+ }
+
+ // Insert the helper in the map before calling Clear(), since it can lead to a
+ // call back into CacheCleared().
+ ShaderClearHelper* helper_ptr = helper.get();
+ shader_clear_map_.insert(
+ std::pair<base::FilePath, ShaderClearQueue>(path, ShaderClearQueue()));
+ shader_clear_map_[path].push(std::move(helper));
+ helper_ptr->Clear();
+}
+
+void ShaderCacheFactory::CacheCleared(const base::FilePath& path) {
+ DCHECK(CalledOnValidThread());
+
+ ShaderClearMap::iterator iter = shader_clear_map_.find(path);
+ if (iter == shader_clear_map_.end()) {
+ LOG(ERROR) << "Completed clear but missing clear helper.";
+ return;
+ }
+
+ iter->second.pop();
+
+ // If there are remaining items in the list we trigger the Clear on the
+ // next one.
+ if (!iter->second.empty()) {
+ iter->second.front()->Clear();
+ return;
+ }
+
+ shader_clear_map_.erase(iter);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ShaderDiskCache
+
+ShaderDiskCache::ShaderDiskCache(ShaderCacheFactory* factory,
+ const base::FilePath& cache_path)
+ : factory_(factory),
+ cache_available_(false),
+ cache_path_(cache_path),
+ is_initialized_(false) {
+ factory_->AddToCache(cache_path_, this);
+}
+
+ShaderDiskCache::~ShaderDiskCache() {
+ factory_->RemoveFromCache(cache_path_);
+}
+
+void ShaderDiskCache::Init(
+ scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner) {
+ if (is_initialized_) {
+ NOTREACHED(); // can't initialize disk cache twice.
+ return;
+ }
+ is_initialized_ = true;
+
+ int rv = disk_cache::CreateCacheBackend(
+ net::SHADER_CACHE, net::CACHE_BACKEND_DEFAULT,
+ cache_path_.Append(kGpuCachePath),
+ gpu::kDefaultMaxProgramCacheMemoryBytes, true, cache_task_runner, NULL,
+ &backend_, base::Bind(&ShaderDiskCache::CacheCreatedCallback, this));
+
+ if (rv == net::OK)
+ cache_available_ = true;
+}
+
+void ShaderDiskCache::Cache(const std::string& key, const std::string& shader) {
+ if (!cache_available_)
+ return;
+
+ auto shim = base::MakeUnique<ShaderDiskCacheEntry>(this, key, shader);
+ shim->Cache();
+ auto* raw_ptr = shim.get();
+ entries_.insert(std::make_pair(raw_ptr, std::move(shim)));
+}
+
+int ShaderDiskCache::Clear(const base::Time begin_time,
+ const base::Time end_time,
+ const net::CompletionCallback& completion_callback) {
+ int rv;
+ if (begin_time.is_null()) {
+ rv = backend_->DoomAllEntries(completion_callback);
+ } else {
+ rv =
+ backend_->DoomEntriesBetween(begin_time, end_time, completion_callback);
+ }
+ return rv;
+}
+
+int32_t ShaderDiskCache::Size() {
+ if (!cache_available_)
+ return -1;
+ return backend_->GetEntryCount();
+}
+
+int ShaderDiskCache::SetAvailableCallback(
+ const net::CompletionCallback& callback) {
+ if (cache_available_)
+ return net::OK;
+ available_callback_ = callback;
+ return net::ERR_IO_PENDING;
+}
+
+void ShaderDiskCache::CacheCreatedCallback(int rv) {
+ if (rv != net::OK) {
+ LOG(ERROR) << "Shader Cache Creation failed: " << rv;
+ return;
+ }
+ helper_ =
+ base::MakeUnique<ShaderDiskReadHelper>(this, shader_loaded_callback_);
+ helper_->LoadCache();
+}
+
+void ShaderDiskCache::EntryComplete(ShaderDiskCacheEntry* entry) {
+ entries_.erase(entry);
+ if (entries_.empty() && !cache_complete_callback_.is_null())
+ cache_complete_callback_.Run(net::OK);
+}
+
+void ShaderDiskCache::ReadComplete() {
+ helper_ = nullptr;
+
+ // The cache is considered available after we have finished reading any
+ // of the old cache values off disk. This prevents a potential race where we
+ // are reading from disk and execute a cache clear at the same time.
+ cache_available_ = true;
+ if (!available_callback_.is_null()) {
+ available_callback_.Run(net::OK);
+ available_callback_.Reset();
+ }
+}
+
+int ShaderDiskCache::SetCacheCompleteCallback(
+ const net::CompletionCallback& callback) {
+ if (entries_.empty()) {
+ return net::OK;
+ }
+ cache_complete_callback_ = callback;
+ return net::ERR_IO_PENDING;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/host/shader_disk_cache.h b/chromium/gpu/ipc/host/shader_disk_cache.h
new file mode 100644
index 00000000000..4080737d2fe
--- /dev/null
+++ b/chromium/gpu/ipc/host/shader_disk_cache.h
@@ -0,0 +1,157 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_HOST_SHADER_DISK_CACHE_H_
+#define GPU_IPC_HOST_SHADER_DISK_CACHE_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <queue>
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/thread_checker.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace gpu {
+
+class ShaderCacheFactory;
+class ShaderDiskCacheEntry;
+class ShaderDiskReadHelper;
+class ShaderClearHelper;
+
+// ShaderDiskCache is the interface to the on disk cache for
+// GL shaders.
+class ShaderDiskCache : public base::RefCounted<ShaderDiskCache> {
+ public:
+ using ShaderLoadedCallback =
+ base::Callback<void(const std::string&, const std::string&)>;
+
+ void set_shader_loaded_callback(const ShaderLoadedCallback& callback) {
+ shader_loaded_callback_ = callback;
+ }
+
+ // Store the |shader| into the cache under |key|.
+ void Cache(const std::string& key, const std::string& shader);
+
+ // Clear a range of entries. This supports unbounded deletes in either
+ // direction by using null Time values for either |begin_time| or |end_time|.
+ // The return value is a net error code. If this method returns
+ // ERR_IO_PENDING, the |completion_callback| will be invoked when the
+ // operation completes.
+ int Clear(const base::Time begin_time,
+ const base::Time end_time,
+ const net::CompletionCallback& completion_callback);
+
+ // Sets a callback for when the cache is available. If the cache is
+ // already available the callback will not be called and net::OK is returned.
+ // If the callback is set net::ERR_IO_PENDING is returned and the callback
+ // will be executed when the cache is available.
+ int SetAvailableCallback(const net::CompletionCallback& callback);
+
+ // Returns the number of elements currently in the cache.
+ int32_t Size();
+
+ // Set a callback notification for when all current entries have been
+ // written to the cache.
+ // The return value is a net error code. If this method returns
+ // ERR_IO_PENDING, the |callback| will be invoked when all entries have
+ // been written to the cache.
+ int SetCacheCompleteCallback(const net::CompletionCallback& callback);
+
+ private:
+ friend class base::RefCounted<ShaderDiskCache>;
+ friend class ShaderDiskCacheEntry;
+ friend class ShaderDiskReadHelper;
+ friend class ShaderCacheFactory;
+
+ ShaderDiskCache(ShaderCacheFactory* factory,
+ const base::FilePath& cache_path);
+ ~ShaderDiskCache();
+
+ void Init(scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner);
+ void CacheCreatedCallback(int rv);
+
+ disk_cache::Backend* backend() { return backend_.get(); }
+
+ void EntryComplete(ShaderDiskCacheEntry* entry);
+ void ReadComplete();
+
+ ShaderCacheFactory* factory_;
+ bool cache_available_;
+ base::FilePath cache_path_;
+ bool is_initialized_;
+ net::CompletionCallback available_callback_;
+ net::CompletionCallback cache_complete_callback_;
+ ShaderLoadedCallback shader_loaded_callback_;
+
+ std::unique_ptr<disk_cache::Backend> backend_;
+
+ std::unique_ptr<ShaderDiskReadHelper> helper_;
+ std::unordered_map<ShaderDiskCacheEntry*,
+ std::unique_ptr<ShaderDiskCacheEntry>>
+ entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderDiskCache);
+};
+
+// ShaderCacheFactory maintains a cache of ShaderDiskCache objects
+// so we only create one per profile directory.
+class ShaderCacheFactory : NON_EXPORTED_BASE(public base::ThreadChecker) {
+ public:
+ explicit ShaderCacheFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner);
+ ~ShaderCacheFactory();
+
+ // Clear the shader disk cache for the given |path|. This supports unbounded
+ // deletes in either direction by using null Time values for either
+ // |begin_time| or |end_time|. The |callback| will be executed when the
+ // clear is complete.
+ void ClearByPath(const base::FilePath& path,
+ const base::Time& begin_time,
+ const base::Time& end_time,
+ const base::Closure& callback);
+
+ // Retrieve the shader disk cache for the provided |client_id|.
+ scoped_refptr<ShaderDiskCache> Get(int32_t client_id);
+
+ // Set the |path| to be used for the disk cache for |client_id|.
+ void SetCacheInfo(int32_t client_id, const base::FilePath& path);
+
+ // Remove the path mapping for |client_id|.
+ void RemoveCacheInfo(int32_t client_id);
+
+ // Set the provided |cache| into the cache map for the given |path|.
+ void AddToCache(const base::FilePath& path, ShaderDiskCache* cache);
+
+ // Remove the provided |path| from our cache map.
+ void RemoveFromCache(const base::FilePath& path);
+
+ private:
+ friend class ShaderClearHelper;
+
+ scoped_refptr<base::SingleThreadTaskRunner> cache_task_runner_;
+
+ scoped_refptr<ShaderDiskCache> GetByPath(const base::FilePath& path);
+ void CacheCleared(const base::FilePath& path);
+
+ using ShaderCacheMap = std::map<base::FilePath, ShaderDiskCache*>;
+ ShaderCacheMap shader_cache_map_;
+
+ using ClientIdToPathMap = std::map<int32_t, base::FilePath>;
+ ClientIdToPathMap client_id_to_path_map_;
+
+ using ShaderClearQueue = std::queue<std::unique_ptr<ShaderClearHelper>>;
+ using ShaderClearMap = std::map<base::FilePath, ShaderClearQueue>;
+ ShaderClearMap shader_clear_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderCacheFactory);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_HOST_SHADER_DISK_CACHE_H_
diff --git a/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
new file mode 100644
index 00000000000..209b6304c67
--- /dev/null
+++ b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
@@ -0,0 +1,109 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/ipc/host/shader_disk_cache.h"
+#include "net/base/test_completion_callback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace {
+
+const int kDefaultClientId = 42;
+const char kCacheKey[] = "key";
+const char kCacheValue[] = "cached value";
+
+} // namespace
+
+class ShaderDiskCacheTest : public testing::Test {
+ public:
+ ShaderDiskCacheTest()
+ : cache_thread_("CacheThread") {
+ base::Thread::Options options;
+ options.message_loop_type = base::MessageLoop::TYPE_IO;
+ CHECK(cache_thread_.StartWithOptions(options));
+ factory_ =
+ base::MakeUnique<ShaderCacheFactory>(cache_thread_.task_runner());
+ }
+
+ ~ShaderDiskCacheTest() override {}
+
+ const base::FilePath& cache_path() { return temp_dir_.GetPath(); }
+
+ void InitCache() {
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ factory_->SetCacheInfo(kDefaultClientId, cache_path());
+ }
+
+ ShaderCacheFactory* factory() { return factory_.get(); }
+
+ private:
+ void TearDown() override { factory_->RemoveCacheInfo(kDefaultClientId); }
+
+ std::unique_ptr<ShaderCacheFactory> factory_;
+ base::ScopedTempDir temp_dir_;
+ base::Thread cache_thread_;
+ base::MessageLoopForIO message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShaderDiskCacheTest);
+};
+
+TEST_F(ShaderDiskCacheTest, ClearsCache) {
+ InitCache();
+
+ scoped_refptr<ShaderDiskCache> cache = factory()->Get(kDefaultClientId);
+ ASSERT_TRUE(cache.get() != NULL);
+
+ net::TestCompletionCallback available_cb;
+ int rv = cache->SetAvailableCallback(available_cb.callback());
+ ASSERT_EQ(net::OK, available_cb.GetResult(rv));
+ EXPECT_EQ(0, cache->Size());
+
+ cache->Cache(kCacheKey, kCacheValue);
+
+ net::TestCompletionCallback complete_cb;
+ rv = cache->SetCacheCompleteCallback(complete_cb.callback());
+ ASSERT_EQ(net::OK, complete_cb.GetResult(rv));
+ EXPECT_EQ(1, cache->Size());
+
+ base::Time time;
+ net::TestCompletionCallback clear_cb;
+ rv = cache->Clear(time, time, clear_cb.callback());
+ ASSERT_EQ(net::OK, clear_cb.GetResult(rv));
+ EXPECT_EQ(0, cache->Size());
+};
+
+// For https://crbug.com/663589.
+TEST_F(ShaderDiskCacheTest, SafeToDeleteCacheMidEntryOpen) {
+ InitCache();
+
+ // Create a cache and wait for it to open.
+ scoped_refptr<ShaderDiskCache> cache = factory()->Get(kDefaultClientId);
+ ASSERT_TRUE(cache.get() != NULL);
+ net::TestCompletionCallback available_cb;
+ int rv = cache->SetAvailableCallback(available_cb.callback());
+ ASSERT_EQ(net::OK, available_cb.GetResult(rv));
+ EXPECT_EQ(0, cache->Size());
+
+ // Start writing an entry to the cache but delete it before the backend has
+ // finished opening the entry. There is a race here, so this usually (but
+ // not always) crashes if there is a problem.
+ cache->Cache(kCacheKey, kCacheValue);
+ cache = nullptr;
+
+ // Open a new cache (to pass time on the cache thread) and verify all is
+ // well.
+ cache = factory()->Get(kDefaultClientId);
+ ASSERT_TRUE(cache.get() != NULL);
+ net::TestCompletionCallback available_cb2;
+ int rv2 = cache->SetAvailableCallback(available_cb2.callback());
+ ASSERT_EQ(net::OK, available_cb2.GetResult(rv2));
+};
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index 54c0f0d3ebd..e400376138f 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -17,6 +17,7 @@
#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/weak_ptr.h"
#include "base/numerics/safe_conversions.h"
#include "base/sequence_checker.h"
@@ -40,6 +41,8 @@
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "gpu/ipc/gpu_in_process_thread_service.h"
+#include "gpu/ipc/service/image_transport_surface.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
@@ -52,6 +55,10 @@
#include "base/process/process_handle.h"
#endif
+#if defined(OS_MACOSX)
+#include "gpu/ipc/client/gpu_process_hosted_ca_layer_tree_params.h"
+#endif
+
namespace gpu {
namespace {
@@ -66,27 +73,27 @@ static void RunTaskWithResult(base::Callback<T(void)> task,
completion->Signal();
}
-struct ScopedOrderNumberProcessor {
- ScopedOrderNumberProcessor(SyncPointOrderData* order_data, uint32_t order_num)
- : order_data_(order_data), order_num_(order_num) {
- order_data_->BeginProcessingOrderNumber(order_num_);
+class GpuInProcessThreadHolder : public base::Thread {
+ public:
+ GpuInProcessThreadHolder()
+ : base::Thread("GpuThread"),
+ sync_point_manager_(new SyncPointManager(false)) {
+ Start();
}
- ~ScopedOrderNumberProcessor() {
- order_data_->FinishProcessingOrderNumber(order_num_);
+ ~GpuInProcessThreadHolder() override { Stop(); }
+
+ const scoped_refptr<InProcessCommandBuffer::Service>& GetGpuThreadService() {
+ if (!gpu_thread_service_) {
+ gpu_thread_service_ = new GpuInProcessThreadService(
+ task_runner(), sync_point_manager_.get(), nullptr, nullptr);
+ }
+ return gpu_thread_service_;
}
private:
- SyncPointOrderData* order_data_;
- uint32_t order_num_;
-};
-
-struct GpuInProcessThreadHolder {
- GpuInProcessThreadHolder()
- : sync_point_manager(new SyncPointManager(false)),
- gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {}
- std::unique_ptr<SyncPointManager> sync_point_manager;
- scoped_refptr<InProcessCommandBuffer::Service> gpu_thread;
+ std::unique_ptr<SyncPointManager> sync_point_manager_;
+ scoped_refptr<InProcessCommandBuffer::Service> gpu_thread_service_;
};
base::LazyInstance<GpuInProcessThreadHolder> g_default_service =
@@ -101,34 +108,6 @@ class ScopedEvent {
base::WaitableEvent* event_;
};
-base::SharedMemoryHandle ShareToGpuThread(
- base::SharedMemoryHandle source_handle) {
- return base::SharedMemory::DuplicateHandle(source_handle);
-}
-
-gfx::GpuMemoryBufferHandle ShareGpuMemoryBufferToGpuThread(
- const gfx::GpuMemoryBufferHandle& source_handle,
- bool* requires_sync_point) {
- switch (source_handle.type) {
- case gfx::SHARED_MEMORY_BUFFER: {
- gfx::GpuMemoryBufferHandle handle;
- handle.type = gfx::SHARED_MEMORY_BUFFER;
- handle.handle = ShareToGpuThread(source_handle.handle);
- handle.offset = source_handle.offset;
- handle.stride = source_handle.stride;
- *requires_sync_point = false;
- return handle;
- }
- case gfx::IO_SURFACE_BUFFER:
- case gfx::OZONE_NATIVE_PIXMAP:
- *requires_sync_point = true;
- return source_handle;
- default:
- NOTREACHED();
- return gfx::GpuMemoryBufferHandle();
- }
-}
-
scoped_refptr<InProcessCommandBuffer::Service> GetInitialService(
const scoped_refptr<InProcessCommandBuffer::Service>& service) {
if (service)
@@ -141,7 +120,7 @@ scoped_refptr<InProcessCommandBuffer::Service> GetInitialService(
// ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager,
// which causes a deadlock because it's already locked.
base::ThreadTaskRunnerHandle::IsSet();
- return g_default_service.Get().gpu_thread;
+ return g_default_service.Get().GetGpuThreadService();
}
} // anonyous namespace
@@ -153,6 +132,13 @@ InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences)
: gpu_preferences_(gpu_preferences),
gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {}
+InProcessCommandBuffer::Service::Service(
+ gpu::gles2::MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group)
+ : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()),
+ mailbox_manager_(mailbox_manager),
+ share_group_(share_group) {}
+
InProcessCommandBuffer::Service::~Service() {}
const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() {
@@ -183,11 +169,14 @@ gpu::gles2::ProgramCache* InProcessCommandBuffer::Service::program_cache() {
(gl::g_driver_gl.ext.b_GL_ARB_get_program_binary ||
gl::g_driver_gl.ext.b_GL_OES_get_program_binary) &&
!gpu_preferences().disable_gpu_program_cache) {
+ const GpuDriverBugWorkarounds& workarounds = gpu_driver_bug_workarounds_;
bool disable_disk_cache =
gpu_preferences_.disable_gpu_shader_disk_cache ||
- gpu_driver_bug_workarounds_.disable_program_disk_cache;
+ workarounds.disable_program_disk_cache;
program_cache_.reset(new gles2::MemoryProgramCache(
- gpu_preferences_.gpu_program_cache_size, disable_disk_cache));
+ gpu_preferences_.gpu_program_cache_size,
+ disable_disk_cache,
+ workarounds.disable_program_caching_for_transform_feedback));
}
return program_cache_.get();
}
@@ -251,7 +240,7 @@ void InProcessCommandBuffer::PumpCommandsOnGpuThread() {
bool InProcessCommandBuffer::Initialize(
scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gles2::ContextCreationAttribHelper& attribs,
InProcessCommandBuffer* share_group,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
@@ -286,8 +275,8 @@ bool InProcessCommandBuffer::Initialize(
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
bool result = false;
- QueueTask(
- base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
+ QueueTask(true, base::Bind(&RunTaskWithResult<bool>, init_task, &result,
+ &completion));
completion.Wait();
gpu_memory_buffer_manager_ = gpu_memory_buffer_manager;
@@ -320,14 +309,17 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
bool bind_generates_resource = false;
scoped_refptr<gles2::FeatureInfo> feature_info =
new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds());
- decoder_.reset(gles2::GLES2Decoder::Create(
+
+ context_group_ =
params.context_group
? params.context_group->decoder_->GetContextGroup()
: new gles2::ContextGroup(
service_->gpu_preferences(), service_->mailbox_manager(), NULL,
service_->shader_translator_cache(),
service_->framebuffer_completeness_cache(), feature_info,
- bind_generates_resource, nullptr, nullptr)));
+ bind_generates_resource, nullptr, nullptr);
+
+ decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get()));
executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(),
decoder_.get()));
@@ -338,10 +330,18 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
decoder_->set_engine(executor_.get());
if (!surface_.get()) {
- if (params.is_offscreen)
+ if (params.is_offscreen) {
surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
- else
- surface_ = gl::init::CreateViewGLSurface(params.window);
+ } else {
+ surface_ = ImageTransportSurface::CreateNativeSurface(
+ gpu_thread_weak_ptr_factory_.GetWeakPtr(), params.window,
+ gl::GLSurfaceFormat());
+ if (!surface_ || !surface_->Initialize(gl::GLSurfaceFormat())) {
+ surface_ = nullptr;
+ DLOG(ERROR) << "Failed to create surface.";
+ return false;
+ }
+ }
}
if (!surface_.get()) {
@@ -444,8 +444,8 @@ void InProcessCommandBuffer::Destroy() {
bool result = false;
base::Callback<bool(void)> destroy_task = base::Bind(
&InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
- QueueTask(
- base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
+ QueueTask(true, base::Bind(&RunTaskWithResult<bool>, destroy_task, &result,
+ &completion));
completion.Wait();
}
@@ -468,6 +468,10 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
}
gl_share_group_ = nullptr;
+ base::AutoLock lock(task_queue_lock_);
+ std::queue<std::unique_ptr<GpuTask>> empty;
+ task_queue_.swap(empty);
+
return true;
}
@@ -496,52 +500,69 @@ void InProcessCommandBuffer::OnContextLost() {
gpu_control_client_->OnGpuControlLostContext();
}
-CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
- CheckSequencedThread();
- base::AutoLock lock(state_after_last_flush_lock_);
- if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
- last_state_ = state_after_last_flush_;
- return last_state_;
+void InProcessCommandBuffer::QueueTask(bool out_of_order,
+ const base::Closure& task) {
+ if (out_of_order) {
+ service_->ScheduleTask(task);
+ return;
+ }
+ // Release the |task_queue_lock_| before calling ScheduleTask because
+ // the callback may get called immediately and attempt to acquire the lock.
+ SyncPointManager* sync_manager = service_->sync_point_manager();
+ uint32_t order_num =
+ sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
+ {
+ base::AutoLock lock(task_queue_lock_);
+ task_queue_.push(base::MakeUnique<GpuTask>(task, order_num));
+ }
+ service_->ScheduleTask(base::Bind(
+ &InProcessCommandBuffer::ProcessTasksOnGpuThread, gpu_thread_weak_ptr_));
+}
+
+void InProcessCommandBuffer::ProcessTasksOnGpuThread() {
+ while (executor_->scheduled()) {
+ base::AutoLock lock(task_queue_lock_);
+ if (task_queue_.empty())
+ break;
+ GpuTask* task = task_queue_.front().get();
+ sync_point_order_data_->BeginProcessingOrderNumber(task->order_number);
+ task->callback.Run();
+ if (!executor_->scheduled() && !service_->BlockThreadOnWaitSyncToken()) {
+ sync_point_order_data_->PauseProcessingOrderNumber(task->order_number);
+ return;
+ }
+ sync_point_order_data_->FinishProcessingOrderNumber(task->order_number);
+ task_queue_.pop();
+ }
}
CommandBuffer::State InProcessCommandBuffer::GetLastState() {
CheckSequencedThread();
+ base::AutoLock lock(last_state_lock_);
return last_state_;
}
-int32_t InProcessCommandBuffer::GetLastToken() {
+void InProcessCommandBuffer::UpdateLastStateOnGpuThread() {
CheckSequencedThread();
- GetStateFast();
- return last_state_.token;
+ command_buffer_lock_.AssertAcquired();
+ base::AutoLock lock(last_state_lock_);
+ State state = command_buffer_->GetLastState();
+ if (state.generation - last_state_.generation < 0x80000000U)
+ last_state_ = state;
}
-void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset,
- uint32_t order_num) {
+void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset) {
CheckSequencedThread();
ScopedEvent handle_flush(&flush_event_);
base::AutoLock lock(command_buffer_lock_);
- {
- ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
- order_num);
- command_buffer_->Flush(put_offset);
- {
- // Update state before signaling the flush event.
- base::AutoLock lock(state_after_last_flush_lock_);
- state_after_last_flush_ = command_buffer_->GetLastState();
- }
-
- // Currently the in process command buffer does not support being
- // descheduled, if it does we would need to back off on calling the finish
- // processing number function until the message is rescheduled and finished
- // processing. This DCHECK is to enforce this.
- DCHECK(error::IsError(state_after_last_flush_.error) ||
- put_offset == state_after_last_flush_.get_offset);
- }
+ command_buffer_->Flush(put_offset);
+ // Update state before signaling the flush event.
+ UpdateLastStateOnGpuThread();
// If we've processed all pending commands but still have pending queries,
// pump idle work until the query is passed.
- if (put_offset == state_after_last_flush_.get_offset &&
+ if (put_offset == command_buffer_->GetLastState().get_offset &&
(executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) {
ScheduleDelayedWorkOnGpuThread();
}
@@ -572,19 +593,16 @@ void InProcessCommandBuffer::ScheduleDelayedWorkOnGpuThread() {
void InProcessCommandBuffer::Flush(int32_t put_offset) {
CheckSequencedThread();
- if (last_state_.error != gpu::error::kNoError)
+ if (GetLastState().error != gpu::error::kNoError)
return;
if (last_put_offset_ == put_offset)
return;
- SyncPointManager* sync_manager = service_->sync_point_manager();
- const uint32_t order_num =
- sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
last_put_offset_ = put_offset;
base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
- gpu_thread_weak_ptr_, put_offset, order_num);
- QueueTask(task);
+ gpu_thread_weak_ptr_, put_offset);
+ QueueTask(false, task);
flushed_fence_sync_release_ = next_fence_sync_release_ - 1;
}
@@ -593,28 +611,34 @@ void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) {
Flush(put_offset);
}
-void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) {
+CommandBuffer::State InProcessCommandBuffer::WaitForTokenInRange(int32_t start,
+ int32_t end) {
CheckSequencedThread();
- while (!InRange(start, end, GetLastToken()) &&
- last_state_.error == gpu::error::kNoError)
+ State last_state = GetLastState();
+ while (!InRange(start, end, last_state.token) &&
+ last_state.error == gpu::error::kNoError) {
flush_event_.Wait();
+ last_state = GetLastState();
+ }
+ return last_state;
}
-void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start,
- int32_t end) {
+CommandBuffer::State InProcessCommandBuffer::WaitForGetOffsetInRange(
+ int32_t start,
+ int32_t end) {
CheckSequencedThread();
-
- GetStateFast();
- while (!InRange(start, end, last_state_.get_offset) &&
- last_state_.error == gpu::error::kNoError) {
+ State last_state = GetLastState();
+ while (!InRange(start, end, last_state.get_offset) &&
+ last_state.error == gpu::error::kNoError) {
flush_event_.Wait();
- GetStateFast();
+ last_state = GetLastState();
}
+ return last_state;
}
void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) {
CheckSequencedThread();
- if (last_state_.error != gpu::error::kNoError)
+ if (GetLastState().error != gpu::error::kNoError)
return;
base::WaitableEvent completion(
@@ -623,13 +647,10 @@ void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) {
base::Closure task =
base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread,
base::Unretained(this), shm_id, &completion);
- QueueTask(task);
+ QueueTask(false, task);
completion.Wait();
- {
- base::AutoLock lock(state_after_last_flush_lock_);
- state_after_last_flush_ = command_buffer_->GetLastState();
- }
+ last_put_offset_ = 0;
}
void InProcessCommandBuffer::SetGetBufferOnGpuThread(
@@ -637,7 +658,7 @@ void InProcessCommandBuffer::SetGetBufferOnGpuThread(
base::WaitableEvent* completion) {
base::AutoLock lock(command_buffer_lock_);
command_buffer_->SetGetBuffer(shm_id);
- last_put_offset_ = 0;
+ UpdateLastStateOnGpuThread();
completion->Signal();
}
@@ -655,7 +676,7 @@ void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) {
base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread,
base::Unretained(this), id);
- QueueTask(task);
+ QueueTask(false, task);
}
void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) {
@@ -692,13 +713,9 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
// This handle is owned by the GPU thread and must be passed to it or it
// will leak. In otherwords, do not early out on error between here and the
// queuing of the CreateImage task below.
- bool requires_sync_point = false;
- gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread(
- gpu_memory_buffer->GetHandle(), &requires_sync_point);
-
- SyncPointManager* sync_manager = service_->sync_point_manager();
- const uint32_t order_num =
- sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager);
+ gfx::GpuMemoryBufferHandle handle =
+ gfx::CloneHandleForIPC(gpu_memory_buffer->GetHandle());
+ bool requires_sync_point = handle.type == gfx::IO_SURFACE_BUFFER;
uint64_t fence_sync = 0;
if (requires_sync_point) {
@@ -708,12 +725,13 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_);
}
- QueueTask(base::Bind(
- &InProcessCommandBuffer::CreateImageOnGpuThread, base::Unretained(this),
- new_id, handle, gfx::Size(base::checked_cast<int>(width),
- base::checked_cast<int>(height)),
- gpu_memory_buffer->GetFormat(),
- base::checked_cast<uint32_t>(internalformat), order_num, fence_sync));
+ QueueTask(false, base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread,
+ base::Unretained(this), new_id, handle,
+ gfx::Size(base::checked_cast<int>(width),
+ base::checked_cast<int>(height)),
+ gpu_memory_buffer->GetFormat(),
+ base::checked_cast<uint32_t>(internalformat),
+ fence_sync));
if (fence_sync) {
flushed_fence_sync_release_ = fence_sync;
@@ -733,10 +751,7 @@ void InProcessCommandBuffer::CreateImageOnGpuThread(
const gfx::Size& size,
gfx::BufferFormat format,
uint32_t internalformat,
- uint32_t order_num,
uint64_t fence_sync) {
- ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(),
- order_num);
if (!decoder_)
return;
@@ -795,8 +810,8 @@ void InProcessCommandBuffer::CreateImageOnGpuThread(
void InProcessCommandBuffer::DestroyImage(int32_t id) {
CheckSequencedThread();
- QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
- base::Unretained(this), id));
+ QueueTask(false, base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread,
+ base::Unretained(this), id));
}
void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) {
@@ -822,7 +837,7 @@ int32_t InProcessCommandBuffer::CreateGpuMemoryBufferImage(
DCHECK(gpu_memory_buffer_manager_);
std::unique_ptr<gfx::GpuMemoryBuffer> buffer(
- gpu_memory_buffer_manager_->AllocateGpuMemoryBuffer(
+ gpu_memory_buffer_manager_->CreateGpuMemoryBuffer(
gfx::Size(base::checked_cast<int>(width),
base::checked_cast<int>(height)),
gpu::DefaultBufferFormatForImageFormat(internalformat),
@@ -850,6 +865,7 @@ bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
gpu::CommandBufferNamespace namespace_id,
gpu::CommandBufferId command_buffer_id,
uint64_t release) {
+ DCHECK(!waiting_for_sync_point_);
gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager();
DCHECK(sync_point_manager);
@@ -860,28 +876,77 @@ bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
if (!release_state)
return true;
- if (!release_state->IsFenceSyncReleased(release)) {
- // Use waitable event which is signalled when the release fence is released.
- sync_point_client_->Wait(
- release_state.get(), release,
- base::Bind(&base::WaitableEvent::Signal,
- base::Unretained(&fence_sync_wait_event_)));
- fence_sync_wait_event_.Wait();
+ if (service_->BlockThreadOnWaitSyncToken()) {
+ if (!release_state->IsFenceSyncReleased(release)) {
+ // Use waitable event which is signalled when the release fence is
+ // released.
+ sync_point_client_->Wait(
+ release_state.get(), release,
+ base::Bind(&base::WaitableEvent::Signal,
+ base::Unretained(&fence_sync_wait_event_)));
+ fence_sync_wait_event_.Wait();
+ }
+
+ gles2::MailboxManager* mailbox_manager =
+ decoder_->GetContextGroup()->mailbox_manager();
+ SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
+ mailbox_manager->PullTextureUpdates(sync_token);
+ return true;
}
+ if (release_state->IsFenceSyncReleased(release)) {
+ gles2::MailboxManager* mailbox_manager =
+ decoder_->GetContextGroup()->mailbox_manager();
+ SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
+ mailbox_manager->PullTextureUpdates(sync_token);
+ return true;
+ }
+
+ waiting_for_sync_point_ = true;
+ sync_point_client_->Wait(
+ release_state.get(), release,
+ base::Bind(&InProcessCommandBuffer::OnWaitFenceSyncCompleted,
+ gpu_thread_weak_ptr_factory_.GetWeakPtr(), namespace_id,
+ command_buffer_id, release));
+
+ if (!waiting_for_sync_point_)
+ return true;
+
+ executor_->SetScheduled(false);
+ return false;
+}
+
+void InProcessCommandBuffer::OnWaitFenceSyncCompleted(
+ CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id,
+ uint64_t release) {
+ DCHECK(waiting_for_sync_point_);
gles2::MailboxManager* mailbox_manager =
decoder_->GetContextGroup()->mailbox_manager();
SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
mailbox_manager->PullTextureUpdates(sync_token);
- return true;
+ waiting_for_sync_point_ = false;
+ executor_->SetScheduled(true);
+ QueueTask(false, base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
+ gpu_thread_weak_ptr_, last_put_offset_));
}
void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() {
- NOTIMPLEMENTED();
+ if (!service_->BlockThreadOnWaitSyncToken()) {
+ DCHECK(executor_->scheduled());
+ DCHECK(executor_->HasPollingWork());
+
+ executor_->SetScheduled(false);
+ }
}
void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() {
- NOTIMPLEMENTED();
+ if (!service_->BlockThreadOnWaitSyncToken()) {
+ DCHECK(!executor_->scheduled());
+
+ executor_->SetScheduled(true);
+ ProcessTasksOnGpuThread();
+ }
}
void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
@@ -906,9 +971,9 @@ void InProcessCommandBuffer::SignalSyncTokenOnGpuThread(
void InProcessCommandBuffer::SignalQuery(unsigned query_id,
const base::Closure& callback) {
CheckSequencedThread();
- QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
- base::Unretained(this), query_id,
- WrapCallback(callback)));
+ QueueTask(false, base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread,
+ base::Unretained(this), query_id,
+ WrapCallback(callback)));
}
void InProcessCommandBuffer::SignalQueryOnGpuThread(
@@ -961,12 +1026,17 @@ bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) {
return IsFenceSyncFlushed(release);
}
+bool InProcessCommandBuffer::IsFenceSyncReleased(uint64_t release) {
+ return release <= GetLastState().release_count;
+}
+
void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token,
const base::Closure& callback) {
CheckSequencedThread();
- QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
- base::Unretained(this), sync_token,
- WrapCallback(callback)));
+ QueueTask(
+ true,
+ base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread,
+ base::Unretained(this), sync_token, WrapCallback(callback)));
}
bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
@@ -974,9 +1044,92 @@ bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
return sync_token->namespace_id() == GetNamespaceID();
}
-gpu::error::Error InProcessCommandBuffer::GetLastError() {
- CheckSequencedThread();
- return last_state_.error;
+#if defined(OS_WIN)
+void InProcessCommandBuffer::DidCreateAcceleratedSurfaceChildWindow(
+ SurfaceHandle parent_window,
+ SurfaceHandle child_window) {
+ // TODO(fsamuel): Implement this.
+}
+#endif
+
+void InProcessCommandBuffer::DidSwapBuffersComplete(
+ SwapBuffersCompleteParams params) {
+ if (!origin_task_runner_) {
+ DidSwapBuffersCompleteOnOriginThread(std::move(params));
+ return;
+ }
+ origin_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&InProcessCommandBuffer::DidSwapBuffersCompleteOnOriginThread,
+ client_thread_weak_ptr_, base::Passed(&params)));
+}
+
+const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const {
+ return context_group_->feature_info();
+}
+
+void InProcessCommandBuffer::SetLatencyInfoCallback(
+ const LatencyInfoCallback& callback) {
+ // TODO(fsamuel): Implement this.
+}
+
+void InProcessCommandBuffer::UpdateVSyncParameters(base::TimeTicks timebase,
+ base::TimeDelta interval) {
+ if (!origin_task_runner_) {
+ UpdateVSyncParametersOnOriginThread(timebase, interval);
+ return;
+ }
+ origin_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&InProcessCommandBuffer::UpdateVSyncParametersOnOriginThread,
+ client_thread_weak_ptr_, timebase, interval));
+}
+
+void InProcessCommandBuffer::DidSwapBuffersCompleteOnOriginThread(
+ SwapBuffersCompleteParams params) {
+#if defined(OS_MACOSX)
+ gpu::GpuProcessHostedCALayerTreeParamsMac params_mac;
+ params_mac.ca_context_id = params.ca_context_id;
+ params_mac.fullscreen_low_power_ca_context_valid =
+ params.fullscreen_low_power_ca_context_valid;
+ params_mac.fullscreen_low_power_ca_context_id =
+ params.fullscreen_low_power_ca_context_id;
+ params_mac.io_surface.reset(IOSurfaceLookupFromMachPort(params.io_surface));
+ params_mac.pixel_size = params.pixel_size;
+ params_mac.scale_factor = params.scale_factor;
+ params_mac.responses = std::move(params.in_use_responses);
+ gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = &params_mac;
+#else
+ gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = nullptr;
+#endif
+ if (!swap_buffers_completion_callback_.is_null()) {
+ if (!ui::LatencyInfo::Verify(
+ params.latency_info,
+ "InProcessCommandBuffer::DidSwapBuffersComplete")) {
+ swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(),
+ params.result, mac_frame_ptr);
+ } else {
+ swap_buffers_completion_callback_.Run(params.latency_info, params.result,
+ mac_frame_ptr);
+ }
+ }
+}
+
+void InProcessCommandBuffer::UpdateVSyncParametersOnOriginThread(
+ base::TimeTicks timebase,
+ base::TimeDelta interval) {
+ if (!update_vsync_parameters_completion_callback_.is_null())
+ update_vsync_parameters_completion_callback_.Run(timebase, interval);
+}
+
+void InProcessCommandBuffer::SetSwapBuffersCompletionCallback(
+ const SwapBuffersCompletionCallback& callback) {
+ swap_buffers_completion_callback_ = callback;
+}
+
+void InProcessCommandBuffer::SetUpdateVSyncParametersCallback(
+ const UpdateVSyncParametersCallback& callback) {
+ update_vsync_parameters_completion_callback_ = callback;
}
namespace {
@@ -1015,55 +1168,10 @@ base::Closure InProcessCommandBuffer::WrapCallback(
return wrapped_callback;
}
-GpuInProcessThread::GpuInProcessThread(SyncPointManager* sync_point_manager)
- : base::Thread("GpuThread"), sync_point_manager_(sync_point_manager) {
- Start();
-}
-
-GpuInProcessThread::~GpuInProcessThread() {
- Stop();
-}
-
-void GpuInProcessThread::AddRef() const {
- base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
-}
-void GpuInProcessThread::Release() const {
- base::RefCountedThreadSafe<GpuInProcessThread>::Release();
-}
-
-void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
- task_runner()->PostTask(FROM_HERE, task);
-}
+InProcessCommandBuffer::GpuTask::GpuTask(const base::Closure& callback,
+ uint32_t order_number)
+ : callback(callback), order_number(order_number) {}
-void GpuInProcessThread::ScheduleDelayedWork(const base::Closure& callback) {
- // Match delay with GpuCommandBufferStub.
- task_runner()->PostDelayedTask(FROM_HERE, callback,
- base::TimeDelta::FromMilliseconds(2));
-}
-
-bool GpuInProcessThread::UseVirtualizedGLContexts() {
- return false;
-}
-
-scoped_refptr<gles2::ShaderTranslatorCache>
-GpuInProcessThread::shader_translator_cache() {
- if (!shader_translator_cache_.get()) {
- shader_translator_cache_ =
- new gpu::gles2::ShaderTranslatorCache(gpu_preferences());
- }
- return shader_translator_cache_;
-}
-
-scoped_refptr<gles2::FramebufferCompletenessCache>
-GpuInProcessThread::framebuffer_completeness_cache() {
- if (!framebuffer_completeness_cache_.get())
- framebuffer_completeness_cache_ =
- new gpu::gles2::FramebufferCompletenessCache;
- return framebuffer_completeness_cache_;
-}
-
-SyncPointManager* GpuInProcessThread::sync_point_manager() {
- return sync_point_manager_;
-}
+InProcessCommandBuffer::GpuTask::~GpuTask() {}
} // namespace gpu
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index ac4ef023878..6a55ec011b7 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -15,7 +15,6 @@
#include "base/atomic_sequence_num.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -25,9 +24,12 @@
#include "base/threading/thread.h"
#include "gpu/command_buffer/client/gpu_control.h"
#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/service/command_executor.h"
+#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/gpu_preferences.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/gpu_export.h"
+#include "gpu/ipc/service/image_transport_surface_delegate.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/native_widget_types.h"
#include "ui/gl/gl_surface.h"
@@ -48,9 +50,11 @@ class Size;
}
namespace gpu {
+
class SyncPointClient;
class SyncPointOrderData;
class SyncPointManager;
+struct GpuProcessHostedCALayerTreeParamsMac;
namespace gles2 {
struct ContextCreationAttribHelper;
@@ -72,7 +76,8 @@ class TransferBufferManagerInterface;
// However, the behavior for accessing one context (i.e. one instance of this
// class) from different client threads is undefined.
class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
- public GpuControl {
+ public GpuControl,
+ public ImageTransportSurfaceDelegate {
public:
class Service;
explicit InProcessCommandBuffer(const scoped_refptr<Service>& service);
@@ -83,7 +88,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
// a new GLSurface.
bool Initialize(scoped_refptr<gl::GLSurface> surface,
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gles2::ContextCreationAttribHelper& attribs,
InProcessCommandBuffer* share_group,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
@@ -92,16 +97,14 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
// CommandBuffer implementation:
State GetLastState() override;
- int32_t GetLastToken() override;
void Flush(int32_t put_offset) override;
void OrderingBarrier(int32_t put_offset) override;
- void WaitForTokenInRange(int32_t start, int32_t end) override;
- void WaitForGetOffsetInRange(int32_t start, int32_t end) override;
+ State WaitForTokenInRange(int32_t start, int32_t end) override;
+ State WaitForGetOffsetInRange(int32_t start, int32_t end) override;
void SetGetBuffer(int32_t shm_id) override;
scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
int32_t* id) override;
void DestroyTransferBuffer(int32_t id) override;
- gpu::error::Error GetLastError() override;
// GpuControl implementation:
// NOTE: The GpuControlClient will be called on the client thread.
@@ -126,15 +129,47 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;
bool IsFenceSyncFlushReceived(uint64_t release) override;
+ bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const SyncToken& sync_token,
const base::Closure& callback) override;
bool CanWaitUnverifiedSyncToken(const SyncToken* sync_token) override;
+// ImageTransportSurfaceDelegate implementation:
+#if defined(OS_WIN)
+ void DidCreateAcceleratedSurfaceChildWindow(
+ SurfaceHandle parent_window,
+ SurfaceHandle child_window) override;
+#endif
+ void DidSwapBuffersComplete(SwapBuffersCompleteParams params) override;
+ const gles2::FeatureInfo* GetFeatureInfo() const override;
+ void SetLatencyInfoCallback(const LatencyInfoCallback& callback) override;
+ void UpdateVSyncParameters(base::TimeTicks timebase,
+ base::TimeDelta interval) override;
+
+ using SwapBuffersCompletionCallback = base::Callback<void(
+ const std::vector<ui::LatencyInfo>& latency_info,
+ gfx::SwapResult result,
+ const gpu::GpuProcessHostedCALayerTreeParamsMac* params_mac)>;
+ void SetSwapBuffersCompletionCallback(
+ const SwapBuffersCompletionCallback& callback);
+
+ using UpdateVSyncParametersCallback =
+ base::Callback<void(base::TimeTicks timebase, base::TimeDelta interval)>;
+ void SetUpdateVSyncParametersCallback(
+ const UpdateVSyncParametersCallback& callback);
+
+ void DidSwapBuffersCompleteOnOriginThread(SwapBuffersCompleteParams params);
+ void UpdateVSyncParametersOnOriginThread(base::TimeTicks timebase,
+ base::TimeDelta interval);
+
// The serializer interface to the GPU service (i.e. thread).
class Service {
public:
Service();
Service(const gpu::GpuPreferences& gpu_preferences);
+ Service(gpu::gles2::MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group);
+
virtual ~Service();
virtual void AddRef() const = 0;
@@ -158,19 +193,20 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
scoped_refptr<gl::GLShareGroup> share_group();
scoped_refptr<gles2::MailboxManager> mailbox_manager();
gpu::gles2::ProgramCache* program_cache();
+ virtual bool BlockThreadOnWaitSyncToken() const = 0;
- private:
+ protected:
const GpuPreferences gpu_preferences_;
const GpuDriverBugWorkarounds gpu_driver_bug_workarounds_;
- scoped_refptr<gl::GLShareGroup> share_group_;
scoped_refptr<gles2::MailboxManager> mailbox_manager_;
+ scoped_refptr<gl::GLShareGroup> share_group_;
std::unique_ptr<gpu::gles2::ProgramCache> program_cache_;
};
private:
struct InitializeOnGpuThreadParams {
bool is_offscreen;
- gfx::AcceleratedWidget window;
+ SurfaceHandle window;
const gles2::ContextCreationAttribHelper& attribs;
gpu::Capabilities* capabilities; // Ouptut.
InProcessCommandBuffer* context_group;
@@ -178,7 +214,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
InitializeOnGpuThreadParams(
bool is_offscreen,
- gfx::AcceleratedWidget window,
+ SurfaceHandle window,
const gles2::ContextCreationAttribHelper& attribs,
gpu::Capabilities* capabilities,
InProcessCommandBuffer* share_group,
@@ -194,17 +230,21 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
bool InitializeOnGpuThread(const InitializeOnGpuThreadParams& params);
void Destroy();
bool DestroyOnGpuThread();
- void FlushOnGpuThread(int32_t put_offset, uint32_t order_num);
+ void FlushOnGpuThread(int32_t put_offset);
+ void UpdateLastStateOnGpuThread();
void ScheduleDelayedWorkOnGpuThread();
bool MakeCurrent();
base::Closure WrapCallback(const base::Closure& callback);
- State GetStateFast();
- void QueueTask(const base::Closure& task) { service_->ScheduleTask(task); }
+ void QueueTask(bool out_of_order, const base::Closure& task);
+ void ProcessTasksOnGpuThread();
void CheckSequencedThread();
void FenceSyncReleaseOnGpuThread(uint64_t release);
bool WaitFenceSyncOnGpuThread(gpu::CommandBufferNamespace namespace_id,
gpu::CommandBufferId command_buffer_id,
uint64_t release);
+ void OnWaitFenceSyncCompleted(CommandBufferNamespace namespace_id,
+ CommandBufferId command_buffer_id,
+ uint64_t release);
void DescheduleUntilFinishedOnGpuThread();
void RescheduleAfterFinishedOnGpuThread();
void SignalSyncTokenOnGpuThread(const SyncToken& sync_token,
@@ -216,7 +256,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
const gfx::Size& size,
gfx::BufferFormat format,
uint32_t internalformat,
- uint32_t order_num,
+ // uint32_t order_num,
uint64_t fence_sync);
void DestroyImageOnGpuThread(int32_t id);
void SetGetBufferOnGpuThread(int32_t shm_id, base::WaitableEvent* completion);
@@ -232,6 +272,8 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
// Members accessed on the gpu thread (possibly with the exception of
// creation):
+ bool waiting_for_sync_point_ = false;
+
scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
scoped_refptr<TransferBufferManagerInterface> transfer_buffer_manager_;
std::unique_ptr<CommandExecutor> executor_;
@@ -251,6 +293,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
bool context_lost_;
#endif
State last_state_;
+ base::Lock last_state_lock_;
int32_t last_put_offset_;
gpu::Capabilities capabilities_;
GpuMemoryBufferManager* gpu_memory_buffer_manager_;
@@ -263,8 +306,10 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
base::Lock command_buffer_lock_;
base::WaitableEvent flush_event_;
scoped_refptr<Service> service_;
- State state_after_last_flush_;
- base::Lock state_after_last_flush_lock_;
+
+ // The group of contexts that share namespaces with this context.
+ scoped_refptr<gles2::ContextGroup> context_group_;
+
scoped_refptr<gl::GLShareGroup> gl_share_group_;
base::WaitableEvent fence_sync_wait_event_;
@@ -272,6 +317,18 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
// the client thread.
std::unique_ptr<base::SequenceChecker> sequence_checker_;
+ base::Lock task_queue_lock_;
+ struct GpuTask {
+ GpuTask(const base::Closure& callback, uint32_t order_number);
+ ~GpuTask();
+ base::Closure callback;
+ uint32_t order_number;
+ };
+ std::queue<std::unique_ptr<GpuTask>> task_queue_;
+
+ SwapBuffersCompletionCallback swap_buffers_completion_callback_;
+ UpdateVSyncParametersCallback update_vsync_parameters_completion_callback_;
+
base::WeakPtr<InProcessCommandBuffer> client_thread_weak_ptr_;
base::WeakPtr<InProcessCommandBuffer> gpu_thread_weak_ptr_;
base::WeakPtrFactory<InProcessCommandBuffer> client_thread_weak_ptr_factory_;
@@ -280,36 +337,6 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
DISALLOW_COPY_AND_ASSIGN(InProcessCommandBuffer);
};
-// Default Service class when a null service is used.
-class GPU_EXPORT GpuInProcessThread
- : public base::Thread,
- public NON_EXPORTED_BASE(InProcessCommandBuffer::Service),
- public base::RefCountedThreadSafe<GpuInProcessThread> {
- public:
- explicit GpuInProcessThread(SyncPointManager* sync_point_manager);
-
- void AddRef() const override;
- void Release() const override;
- void ScheduleTask(const base::Closure& task) override;
- void ScheduleDelayedWork(const base::Closure& callback) override;
- bool UseVirtualizedGLContexts() override;
- scoped_refptr<gles2::ShaderTranslatorCache> shader_translator_cache()
- override;
- scoped_refptr<gles2::FramebufferCompletenessCache>
- framebuffer_completeness_cache() override;
- SyncPointManager* sync_point_manager() override;
-
- private:
- ~GpuInProcessThread() override;
- friend class base::RefCountedThreadSafe<GpuInProcessThread>;
-
- SyncPointManager* sync_point_manager_; // Non-owning.
- scoped_refptr<gpu::gles2::ShaderTranslatorCache> shader_translator_cache_;
- scoped_refptr<gpu::gles2::FramebufferCompletenessCache>
- framebuffer_completeness_cache_;
- DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
-};
-
} // namespace gpu
#endif // GPU_IPC_IN_PROCESS_COMMAND_BUFFER_H_
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index 59ca51e0496..e8c5edcc427 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -44,6 +44,9 @@ target(link_target_type, "ipc_service_sources") {
"gpu_memory_manager.h",
"gpu_memory_tracking.cc",
"gpu_memory_tracking.h",
+ "gpu_vsync_provider.h",
+ "gpu_vsync_provider_posix.cc",
+ "gpu_vsync_provider_win.cc",
"gpu_watchdog_thread.cc",
"gpu_watchdog_thread.h",
"image_transport_surface.h",
@@ -78,6 +81,8 @@ target(link_target_type, "ipc_service_sources") {
sources += [
"child_window_surface_win.cc",
"child_window_surface_win.h",
+ "child_window_win.cc",
+ "child_window_win.h",
"image_transport_surface_win.cc",
]
}
@@ -129,9 +134,6 @@ source_set("test_support") {
"//testing/gtest:gtest",
]
deps = [
- # TODO(markdittmer): Shouldn't depend on client code for server tests.
- # See crbug.com/608800.
- "//gpu/ipc/client",
"//gpu/ipc/common",
]
}
@@ -143,6 +145,7 @@ test("gpu_ipc_service_unittests") {
"gpu_channel_test_common.cc",
"gpu_channel_test_common.h",
"gpu_channel_unittest.cc",
+ "gpu_vsync_provider_unittest_win.cc",
]
deps = [
":service",
diff --git a/chromium/gpu/ipc/service/child_window_surface_win.cc b/chromium/gpu/ipc/service/child_window_surface_win.cc
index e7f482064f4..92a9392b1d5 100644
--- a/chromium/gpu/ipc/service/child_window_surface_win.cc
+++ b/chromium/gpu/ipc/service/child_window_surface_win.cc
@@ -8,15 +8,10 @@
#include "base/compiler_specific.h"
#include "base/memory/ptr_util.h"
-#include "base/threading/thread.h"
-#include "base/win/scoped_hdc.h"
-#include "base/win/wrapped_window_proc.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
#include "ui/gfx/native_widget_types.h"
-#include "ui/gfx/win/hwnd_util.h"
-#include "ui/gfx/win/window_impl.h"
#include "ui/gl/egl_util.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_surface_egl.h"
@@ -24,144 +19,11 @@
namespace gpu {
-// This owns the thread and contains data that's shared between the threads.
-struct SharedData {
- SharedData() : thread("Window owner thread") {}
-
- base::Lock rect_lock;
- gfx::Rect rect_to_clear;
-
- base::Thread thread;
-};
-
-namespace {
-
-ATOM g_window_class;
-
-// This runs on the window owner thread.
-LRESULT CALLBACK IntermediateWindowProc(HWND window,
- UINT message,
- WPARAM w_param,
- LPARAM l_param) {
- switch (message) {
- case WM_ERASEBKGND:
- // Prevent windows from erasing the background.
- return 1;
- case WM_PAINT:
- PAINTSTRUCT paint;
- if (BeginPaint(window, &paint)) {
- SharedData* shared_data =
- reinterpret_cast<SharedData*>(gfx::GetWindowUserData(window));
- DCHECK(shared_data);
- {
- base::AutoLock lock(shared_data->rect_lock);
- shared_data->rect_to_clear.Union(gfx::Rect(paint.rcPaint));
- }
-
- EndPaint(window, &paint);
- }
- return 0;
- default:
- return DefWindowProc(window, message, w_param, l_param);
- }
-}
-
-// This runs on the window owner thread.
-void InitializeWindowClass() {
- if (g_window_class)
- return;
-
- WNDCLASSEX intermediate_class;
- base::win::InitializeWindowClass(
- L"Intermediate D3D Window",
- &base::win::WrappedWindowProc<IntermediateWindowProc>, CS_OWNDC, 0, 0,
- nullptr, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)), nullptr,
- nullptr, nullptr, &intermediate_class);
- g_window_class = RegisterClassEx(&intermediate_class);
- if (!g_window_class) {
- LOG(ERROR) << "RegisterClass failed.";
- return;
- }
-}
-
-// Hidden popup window used as a parent for the child surface window.
-// Must be created and destroyed on the thread.
-class HiddenPopupWindow : public gfx::WindowImpl {
- public:
- static HWND Create() {
- gfx::WindowImpl* window = new HiddenPopupWindow;
-
- window->set_window_style(WS_POPUP);
- window->set_window_ex_style(WS_EX_TOOLWINDOW);
- window->Init(GetDesktopWindow(), gfx::Rect());
- EnableWindow(window->hwnd(), FALSE);
- // The |window| instance is now owned by the window user data.
- DCHECK_EQ(window, gfx::GetWindowUserData(window->hwnd()));
- return window->hwnd();
- }
-
- static void Destroy(HWND window) {
- // This uses the fact that the window user data contains a pointer
- // to gfx::WindowImpl instance.
- gfx::WindowImpl* window_data =
- reinterpret_cast<gfx::WindowImpl*>(gfx::GetWindowUserData(window));
- DCHECK_EQ(window, window_data->hwnd());
- DestroyWindow(window);
- delete window_data;
- }
-
- private:
- // Explicitly do nothing in Close. We do this as some external apps may get a
- // handle to this window and attempt to close it.
- void OnClose() {}
-
- CR_BEGIN_MSG_MAP_EX(HiddenPopupWindow)
- CR_MSG_WM_CLOSE(OnClose)
- CR_END_MSG_MAP()
-};
-
-// This runs on the window owner thread.
-void CreateWindowsOnThread(const gfx::Size& size,
- base::WaitableEvent* event,
- SharedData* shared_data,
- HWND* child_window,
- HWND* parent_window) {
- InitializeWindowClass();
- DCHECK(g_window_class);
-
- // Create hidden parent window on the current thread.
- *parent_window = HiddenPopupWindow::Create();
- // Create child window.
- HWND window = CreateWindowEx(
- WS_EX_NOPARENTNOTIFY, reinterpret_cast<wchar_t*>(g_window_class), L"",
- WS_CHILDWINDOW | WS_DISABLED | WS_VISIBLE, 0, 0, size.width(),
- size.height(), *parent_window, NULL, NULL, NULL);
- CHECK(window);
- *child_window = window;
- gfx::SetWindowUserData(window, shared_data);
- event->Signal();
-}
-
-// This runs on the main thread after the window was destroyed on window owner
-// thread.
-void DestroySharedData(std::unique_ptr<SharedData> shared_data) {
- shared_data->thread.Stop();
-}
-
-// This runs on the window owner thread.
-void DestroyWindowsOnThread(HWND child_window, HWND hidden_popup_window) {
- DestroyWindow(child_window);
- HiddenPopupWindow::Destroy(hidden_popup_window);
-}
-
-} // namespace
-
ChildWindowSurfaceWin::ChildWindowSurfaceWin(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
HWND parent_window)
: gl::NativeViewGLSurfaceEGL(0),
- parent_window_(parent_window),
- delegate_(delegate),
+ child_window_(delegate, parent_window),
alpha_(true),
first_swap_(true) {
// Don't use EGL_ANGLE_window_fixed_size so that we can avoid recreating the
@@ -203,25 +65,9 @@ bool ChildWindowSurfaceWin::InitializeNativeWindow() {
if (window_)
return true;
- shared_data_ = base::MakeUnique<SharedData>();
-
- base::Thread::Options options(base::MessageLoop::TYPE_UI, 0);
- shared_data_->thread.StartWithOptions(options);
-
- base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
-
- RECT window_rect;
- GetClientRect(parent_window_, &window_rect);
-
- shared_data_->thread.task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&CreateWindowsOnThread, gfx::Rect(window_rect).size(), &event,
- shared_data_.get(), &window_, &initial_parent_window_));
- event.Wait();
-
- delegate_->DidCreateAcceleratedSurfaceChildWindow(parent_window_, window_);
- return true;
+ bool result = child_window_.Initialize();
+ window_ = child_window_.window();
+ return result;
}
bool ChildWindowSurfaceWin::Resize(const gfx::Size& size,
@@ -281,7 +127,7 @@ gfx::SwapResult ChildWindowSurfaceWin::SwapBuffers() {
glFinish();
first_swap_ = false;
}
- ClearInvalidContents();
+ child_window_.ClearInvalidContents();
return result;
}
@@ -291,33 +137,11 @@ gfx::SwapResult ChildWindowSurfaceWin::PostSubBuffer(int x,
int height) {
gfx::SwapResult result =
NativeViewGLSurfaceEGL::PostSubBuffer(x, y, width, height);
- ClearInvalidContents();
+ child_window_.ClearInvalidContents();
return result;
}
-void ChildWindowSurfaceWin::ClearInvalidContents() {
- base::AutoLock lock(shared_data_->rect_lock);
- if (!shared_data_->rect_to_clear.IsEmpty()) {
- base::win::ScopedGetDC dc(window_);
-
- RECT rect = shared_data_->rect_to_clear.ToRECT();
-
- // DirectComposition composites with the contents under the SwapChain,
- // so ensure that's cleared. GDI treats black as transparent.
- FillRect(dc, &rect, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)));
- shared_data_->rect_to_clear = gfx::Rect();
- }
-}
-
ChildWindowSurfaceWin::~ChildWindowSurfaceWin() {
- if (shared_data_) {
- scoped_refptr<base::TaskRunner> task_runner =
- shared_data_->thread.task_runner();
- task_runner->PostTaskAndReply(
- FROM_HERE,
- base::Bind(&DestroyWindowsOnThread, window_, initial_parent_window_),
- base::Bind(&DestroySharedData, base::Passed(std::move(shared_data_))));
- }
}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/child_window_surface_win.h b/chromium/gpu/ipc/service/child_window_surface_win.h
index add4d490e8f..9f5017b35e5 100644
--- a/chromium/gpu/ipc/service/child_window_surface_win.h
+++ b/chromium/gpu/ipc/service/child_window_surface_win.h
@@ -6,6 +6,7 @@
#define GPU_IPC_SERVICE_CHILD_WINDOW_SURFACE_WIN_H_
#include "base/memory/weak_ptr.h"
+#include "gpu/ipc/service/child_window_win.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h"
#include "ui/gl/gl_surface_egl.h"
@@ -13,9 +14,6 @@
namespace gpu {
-class GpuChannelManager;
-struct SharedData;
-
class ChildWindowSurfaceWin : public gl::NativeViewGLSurfaceEGL {
public:
ChildWindowSurfaceWin(base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
@@ -34,17 +32,7 @@ class ChildWindowSurfaceWin : public gl::NativeViewGLSurfaceEGL {
~ChildWindowSurfaceWin() override;
private:
- void ClearInvalidContents();
-
- // This member contains all the data that can be accessed from the main or
- // window owner threads.
- std::unique_ptr<SharedData> shared_data_;
- // The eventual parent of the window living in the browser process.
- HWND parent_window_;
- // The window is initially created with this parent window. We need to keep it
- // around so that we can destroy it at the end.
- HWND initial_parent_window_;
- base::WeakPtr<ImageTransportSurfaceDelegate> delegate_;
+ ChildWindowWin child_window_;
bool alpha_;
bool first_swap_;
diff --git a/chromium/gpu/ipc/service/child_window_win.cc b/chromium/gpu/ipc/service/child_window_win.cc
new file mode 100644
index 00000000000..1246c577318
--- /dev/null
+++ b/chromium/gpu/ipc/service/child_window_win.cc
@@ -0,0 +1,210 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/child_window_win.h"
+
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/thread.h"
+#include "base/win/scoped_hdc.h"
+#include "base/win/wrapped_window_proc.h"
+#include "gpu/ipc/common/gpu_messages.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
+#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
+#include "ui/gfx/native_widget_types.h"
+#include "ui/gfx/win/hwnd_util.h"
+#include "ui/gfx/win/window_impl.h"
+
+namespace gpu {
+
+// This owns the thread and contains data that's shared between the threads.
+struct SharedData {
+ SharedData() : thread("Window owner thread") {}
+
+ base::Lock rect_lock;
+ gfx::Rect rect_to_clear;
+
+ base::Thread thread;
+};
+
+namespace {
+
+ATOM g_window_class;
+
+// This runs on the window owner thread.
+LRESULT CALLBACK IntermediateWindowProc(HWND window,
+ UINT message,
+ WPARAM w_param,
+ LPARAM l_param) {
+ switch (message) {
+ case WM_ERASEBKGND:
+ // Prevent windows from erasing the background.
+ return 1;
+ case WM_PAINT:
+ PAINTSTRUCT paint;
+ if (BeginPaint(window, &paint)) {
+ SharedData* shared_data =
+ reinterpret_cast<SharedData*>(gfx::GetWindowUserData(window));
+ DCHECK(shared_data);
+ {
+ base::AutoLock lock(shared_data->rect_lock);
+ shared_data->rect_to_clear.Union(gfx::Rect(paint.rcPaint));
+ }
+
+ EndPaint(window, &paint);
+ }
+ return 0;
+ default:
+ return DefWindowProc(window, message, w_param, l_param);
+ }
+}
+
+// This runs on the window owner thread.
+void InitializeWindowClass() {
+ if (g_window_class)
+ return;
+
+ WNDCLASSEX intermediate_class;
+ base::win::InitializeWindowClass(
+ L"Intermediate D3D Window",
+ &base::win::WrappedWindowProc<IntermediateWindowProc>, CS_OWNDC, 0, 0,
+ nullptr, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)), nullptr,
+ nullptr, nullptr, &intermediate_class);
+ g_window_class = RegisterClassEx(&intermediate_class);
+ if (!g_window_class) {
+ LOG(ERROR) << "RegisterClass failed.";
+ return;
+ }
+}
+
+// Hidden popup window used as a parent for the child surface window.
+// Must be created and destroyed on the thread.
+class HiddenPopupWindow : public gfx::WindowImpl {
+ public:
+ static HWND Create() {
+ gfx::WindowImpl* window = new HiddenPopupWindow;
+
+ window->set_window_style(WS_POPUP);
+ window->set_window_ex_style(WS_EX_TOOLWINDOW);
+ window->Init(GetDesktopWindow(), gfx::Rect());
+ EnableWindow(window->hwnd(), FALSE);
+ // The |window| instance is now owned by the window user data.
+ DCHECK_EQ(window, gfx::GetWindowUserData(window->hwnd()));
+ return window->hwnd();
+ }
+
+ static void Destroy(HWND window) {
+ // This uses the fact that the window user data contains a pointer
+ // to gfx::WindowImpl instance.
+ gfx::WindowImpl* window_data =
+ reinterpret_cast<gfx::WindowImpl*>(gfx::GetWindowUserData(window));
+ DCHECK_EQ(window, window_data->hwnd());
+ DestroyWindow(window);
+ delete window_data;
+ }
+
+ private:
+ // Explicitly do nothing in Close. We do this as some external apps may get a
+ // handle to this window and attempt to close it.
+ void OnClose() {}
+
+ CR_BEGIN_MSG_MAP_EX(HiddenPopupWindow)
+ CR_MSG_WM_CLOSE(OnClose)
+ CR_END_MSG_MAP()
+};
+
+// This runs on the window owner thread.
+void CreateWindowsOnThread(const gfx::Size& size,
+ base::WaitableEvent* event,
+ SharedData* shared_data,
+ HWND* child_window,
+ HWND* parent_window) {
+ InitializeWindowClass();
+ DCHECK(g_window_class);
+
+ // Create hidden parent window on the current thread.
+ *parent_window = HiddenPopupWindow::Create();
+ // Create child window.
+ HWND window = CreateWindowEx(
+ WS_EX_NOPARENTNOTIFY, reinterpret_cast<wchar_t*>(g_window_class), L"",
+ WS_CHILDWINDOW | WS_DISABLED | WS_VISIBLE, 0, 0, size.width(),
+ size.height(), *parent_window, NULL, NULL, NULL);
+ CHECK(window);
+ *child_window = window;
+ gfx::SetWindowUserData(window, shared_data);
+ event->Signal();
+}
+
+// This runs on the main thread after the window was destroyed on window owner
+// thread.
+void DestroySharedData(std::unique_ptr<SharedData> shared_data) {
+ shared_data->thread.Stop();
+}
+
+// This runs on the window owner thread.
+void DestroyWindowsOnThread(HWND child_window, HWND hidden_popup_window) {
+ DestroyWindow(child_window);
+ HiddenPopupWindow::Destroy(hidden_popup_window);
+}
+
+} // namespace
+
+ChildWindowWin::ChildWindowWin(
+ base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
+ HWND parent_window)
+ : parent_window_(parent_window), window_(nullptr), delegate_(delegate) {}
+
+bool ChildWindowWin::Initialize() {
+ if (window_)
+ return true;
+
+ shared_data_ = base::MakeUnique<SharedData>();
+
+ base::Thread::Options options(base::MessageLoop::TYPE_UI, 0);
+ shared_data_->thread.StartWithOptions(options);
+
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+
+ RECT window_rect;
+ GetClientRect(parent_window_, &window_rect);
+
+ shared_data_->thread.task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&CreateWindowsOnThread, gfx::Rect(window_rect).size(), &event,
+ shared_data_.get(), &window_, &initial_parent_window_));
+ event.Wait();
+
+ delegate_->DidCreateAcceleratedSurfaceChildWindow(parent_window_, window_);
+ return true;
+}
+
+void ChildWindowWin::ClearInvalidContents() {
+ base::AutoLock lock(shared_data_->rect_lock);
+ if (!shared_data_->rect_to_clear.IsEmpty()) {
+ base::win::ScopedGetDC dc(window_);
+
+ RECT rect = shared_data_->rect_to_clear.ToRECT();
+
+ // DirectComposition composites with the contents under the SwapChain,
+ // so ensure that's cleared. GDI treats black as transparent.
+ FillRect(dc, &rect, reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)));
+ shared_data_->rect_to_clear = gfx::Rect();
+ }
+}
+
+ChildWindowWin::~ChildWindowWin() {
+ if (shared_data_) {
+ scoped_refptr<base::TaskRunner> task_runner =
+ shared_data_->thread.task_runner();
+ task_runner->PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&DestroyWindowsOnThread, window_, initial_parent_window_),
+ base::Bind(&DestroySharedData, base::Passed(std::move(shared_data_))));
+ }
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/child_window_win.h b/chromium/gpu/ipc/service/child_window_win.h
new file mode 100644
index 00000000000..2bccf9ff926
--- /dev/null
+++ b/chromium/gpu/ipc/service/child_window_win.h
@@ -0,0 +1,47 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
+#define GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
+
+#include "base/memory/weak_ptr.h"
+#include "gpu/ipc/service/image_transport_surface_delegate.h"
+
+#include <windows.h>
+
+namespace gpu {
+
+struct SharedData;
+
+// The window DirectComposition renders into needs to be owned by the process
+// that's currently doing the rendering. The class creates and owns a window
+// which is reparented by the browser to be a child of its window.
+class ChildWindowWin {
+ public:
+ ChildWindowWin(base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
+ HWND parent_window);
+ ~ChildWindowWin();
+
+ bool Initialize();
+ void ClearInvalidContents();
+ HWND window() const { return window_; }
+
+ private:
+ // This member contains all the data that can be accessed from the main or
+ // window owner threads.
+ std::unique_ptr<SharedData> shared_data_;
+ // The eventual parent of the window living in the browser process.
+ HWND parent_window_;
+ HWND window_;
+ // The window is initially created with this parent window. We need to keep it
+ // around so that we can destroy it at the end.
+ HWND initial_parent_window_;
+ base::WeakPtr<ImageTransportSurfaceDelegate> delegate_;
+
+ DISALLOW_COPY_AND_ASSIGN(ChildWindowWin);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
diff --git a/chromium/gpu/ipc/service/gpu_channel.cc b/chromium/gpu/ipc/service/gpu_channel.cc
index 8ed788a3817..4a99a6db4b6 100644
--- a/chromium/gpu/ipc/service/gpu_channel.cc
+++ b/chromium/gpu/ipc/service/gpu_channel.cc
@@ -701,7 +701,11 @@ void GpuChannel::OnStreamRescheduled(int32_t stream_id, bool scheduled) {
}
GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) {
- return stubs_.get(route_id);
+ auto it = stubs_.find(route_id);
+ if (it == stubs_.end())
+ return nullptr;
+
+ return it->second.get();
}
void GpuChannel::LoseAllContexts() {
@@ -772,7 +776,7 @@ void GpuChannel::HandleMessage(
const IPC::Message& msg = channel_msg->message;
int32_t routing_id = msg.routing_id();
- GpuCommandBufferStub* stub = stubs_.get(routing_id);
+ GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id);
DCHECK(!stub || stub->IsScheduled());
@@ -873,7 +877,7 @@ void GpuChannel::RemoveRouteFromStream(int32_t route_id) {
#if defined(OS_ANDROID)
const GpuCommandBufferStub* GpuChannel::GetOneStub() const {
for (const auto& kv : stubs_) {
- const GpuCommandBufferStub* stub = kv.second;
+ const GpuCommandBufferStub* stub = kv.second.get();
if (stub->decoder() && !stub->decoder()->WasContextLost())
return stub;
}
@@ -896,7 +900,7 @@ void GpuChannel::OnCreateCommandBuffer(
if (stub) {
*result = true;
*capabilities = stub->decoder()->GetCapabilities();
- stubs_.set(route_id, std::move(stub));
+ stubs_[route_id] = std::move(stub);
} else {
*result = false;
*capabilities = gpu::Capabilities();
@@ -915,7 +919,7 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer(
}
int32_t share_group_id = init_params.share_group_id;
- GpuCommandBufferStub* share_group = stubs_.get(share_group_id);
+ GpuCommandBufferStub* share_group = LookupCommandBuffer(share_group_id);
if (!share_group && share_group_id != MSG_ROUTING_NONE) {
DLOG(ERROR)
@@ -977,7 +981,12 @@ void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) {
TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
"route_id", route_id);
- std::unique_ptr<GpuCommandBufferStub> stub = stubs_.take_and_erase(route_id);
+ std::unique_ptr<GpuCommandBufferStub> stub;
+ auto it = stubs_.find(route_id);
+ if (it != stubs_.end()) {
+ stub = std::move(it->second);
+ stubs_.erase(it);
+ }
// In case the renderer is currently blocked waiting for a sync reply from the
// stub, we need to make sure to reschedule the correct stream here.
if (stub && !stub->IsScheduled()) {
diff --git a/chromium/gpu/ipc/service/gpu_channel.h b/chromium/gpu/ipc/service/gpu_channel.h
index 47c1ba63604..cbe08696305 100644
--- a/chromium/gpu/ipc/service/gpu_channel.h
+++ b/chromium/gpu/ipc/service/gpu_channel.h
@@ -10,9 +10,9 @@
#include <memory>
#include <string>
+#include <unordered_map>
#include "base/containers/hash_tables.h"
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -177,7 +177,7 @@ class GPU_EXPORT GpuChannel
scoped_refptr<GpuChannelMessageFilter> filter_;
// Map of routing id to command buffer stub.
- base::ScopedPtrHashMap<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_;
+ std::unordered_map<int32_t, std::unique_ptr<GpuCommandBufferStub>> stubs_;
private:
friend class TestGpuChannel;
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index 34f2be4a1cc..d043e1304aa 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -11,6 +11,7 @@
#include "base/command_line.h"
#include "base/location.h"
#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
@@ -87,11 +88,14 @@ gles2::ProgramCache* GpuChannelManager::program_cache() {
(gl::g_driver_gl.ext.b_GL_ARB_get_program_binary ||
gl::g_driver_gl.ext.b_GL_OES_get_program_binary) &&
!gpu_preferences_.disable_gpu_program_cache) {
+ const GpuDriverBugWorkarounds& workarounds = gpu_driver_bug_workarounds_;
bool disable_disk_cache =
gpu_preferences_.disable_gpu_shader_disk_cache ||
- gpu_driver_bug_workarounds_.disable_program_disk_cache;
+ workarounds.disable_program_disk_cache;
program_cache_.reset(new gles2::MemoryProgramCache(
- gpu_preferences_.gpu_program_cache_size, disable_disk_cache));
+ gpu_preferences_.gpu_program_cache_size,
+ disable_disk_cache,
+ workarounds.disable_program_caching_for_transform_feedback));
}
return program_cache_.get();
}
@@ -120,7 +124,7 @@ void GpuChannelManager::RemoveChannel(int client_id) {
GpuChannel* GpuChannelManager::LookupChannel(int32_t client_id) const {
const auto& it = gpu_channels_.find(client_id);
- return it != gpu_channels_.end() ? it->second : nullptr;
+ return it != gpu_channels_.end() ? it->second.get() : nullptr;
}
std::unique_ptr<GpuChannel> GpuChannelManager::CreateGpuChannel(
@@ -147,7 +151,7 @@ IPC::ChannelHandle GpuChannelManager::EstablishChannel(
CreateGpuChannel(client_id, client_tracing_id, preempts,
allow_view_command_buffers, allow_real_time_streams));
IPC::ChannelHandle channel_handle = channel->Init(shutdown_event_);
- gpu_channels_.set(client_id, std::move(channel));
+ gpu_channels_[client_id] = std::move(channel);
return channel_handle;
}
@@ -275,7 +279,7 @@ void GpuChannelManager::ScheduleWakeUpGpu() {
void GpuChannelManager::DoWakeUpGpu() {
const GpuCommandBufferStub* stub = nullptr;
for (const auto& kv : gpu_channels_) {
- const GpuChannel* channel = kv.second;
+ const GpuChannel* channel = kv.second.get();
stub = channel->GetOneStub();
if (stub) {
DCHECK(stub->decoder());
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index a71ca9b41a5..70a10e4f70b 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -10,9 +10,9 @@
#include <deque>
#include <memory>
#include <string>
+#include <unordered_map>
#include <vector>
-#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -133,6 +133,12 @@ class GPU_EXPORT GpuChannelManager {
return exiting_for_lost_context_;
}
+ gles2::MailboxManager* mailbox_manager() const {
+ return mailbox_manager_.get();
+ }
+
+ gl::GLShareGroup* share_group() const { return share_group_.get(); }
+
protected:
virtual std::unique_ptr<GpuChannel> CreateGpuChannel(
int client_id,
@@ -145,21 +151,15 @@ class GPU_EXPORT GpuChannelManager {
return sync_point_manager_;
}
- gl::GLShareGroup* share_group() const { return share_group_.get(); }
- gles2::MailboxManager* mailbox_manager() const {
- return mailbox_manager_.get();
- }
- PreemptionFlag* preemption_flag() const {
- return preemption_flag_.get();
- }
+ PreemptionFlag* preemption_flag() const { return preemption_flag_.get(); }
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
- // These objects manage channels to individual renderer processes there is
+ // These objects manage channels to individual renderer processes. There is
// one channel for each renderer process that has connected to this GPU
// process.
- base::ScopedPtrHashMap<int32_t, std::unique_ptr<GpuChannel>> gpu_channels_;
+ std::unordered_map<int32_t, std::unique_ptr<GpuChannel>> gpu_channels_;
private:
void InternalDestroyGpuMemoryBuffer(gfx::GpuMemoryBufferId id, int client_id);
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
index 419cc8c1ac1..9201a5391a1 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
@@ -10,14 +10,8 @@
class GURL;
-namespace IPC {
-struct ChannelHandle;
-}
-
namespace gpu {
-struct GPUMemoryUmaStats;
-
class GpuChannelManagerDelegate {
public:
// Tells the delegate that an offscreen context was created for the provided
diff --git a/chromium/gpu/ipc/service/gpu_channel_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
index b48b7ae14e3..8387448b8e1 100644
--- a/chromium/gpu/ipc/service/gpu_channel_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
@@ -35,7 +35,7 @@ class GpuChannelTest : public GpuChannelTestCommon {
void TearDown() override {
GpuChannelTestCommon::TearDown();
- gl::init::ClearGLBindings();
+ gl::init::ShutdownGL();
}
GpuChannel* CreateChannel(int32_t client_id,
diff --git a/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc b/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
index b6396195afa..3f41a305550 100644
--- a/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gpu_command_buffer_stub.cc
@@ -11,8 +11,10 @@
#include "base/hash.h"
#include "base/json/json_writer.h"
#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
#include "base/memory/ptr_util.h"
#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_macros.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
@@ -54,6 +56,27 @@
#include "gpu/ipc/service/stream_texture_android.h"
#endif
+// Macro to reduce code duplication when logging memory in
+// GpuCommandBufferMemoryTracker. This is needed as the UMA_HISTOGRAM_* macros
+// require a unique call-site per histogram (you can't funnel multiple strings
+// into the same call-site).
+#define GPU_COMMAND_BUFFER_MEMORY_BLOCK(category) \
+ do { \
+ uint64_t mb_used = tracking_group_->GetSize() / (1024 * 1024); \
+ switch (context_type_) { \
+ case gles2::CONTEXT_TYPE_WEBGL1: \
+ case gles2::CONTEXT_TYPE_WEBGL2: \
+ UMA_HISTOGRAM_MEMORY_LARGE_MB("GPU.ContextMemory.WebGL." category, \
+ mb_used); \
+ break; \
+ case gles2::CONTEXT_TYPE_OPENGLES2: \
+ case gles2::CONTEXT_TYPE_OPENGLES3: \
+ UMA_HISTOGRAM_MEMORY_LARGE_MB("GPU.ContextMemory.GLES." category, \
+ mb_used); \
+ break; \
+ } \
+ } while (false)
+
namespace gpu {
struct WaitForCommandState {
WaitForCommandState(int32_t start, int32_t end, IPC::Message* reply)
@@ -70,15 +93,29 @@ namespace {
// ContextGroup's memory type managers and the GpuMemoryManager class.
class GpuCommandBufferMemoryTracker : public gles2::MemoryTracker {
public:
- explicit GpuCommandBufferMemoryTracker(GpuChannel* channel,
- uint64_t share_group_tracing_guid)
+ explicit GpuCommandBufferMemoryTracker(
+ GpuChannel* channel,
+ uint64_t share_group_tracing_guid,
+ gles2::ContextType context_type,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner)
: tracking_group_(
channel->gpu_channel_manager()
->gpu_memory_manager()
->CreateTrackingGroup(channel->GetClientPID(), this)),
client_tracing_id_(channel->client_tracing_id()),
client_id_(channel->client_id()),
- share_group_tracing_guid_(share_group_tracing_guid) {}
+ share_group_tracing_guid_(share_group_tracing_guid),
+ context_type_(context_type),
+ memory_pressure_listener_(new base::MemoryPressureListener(
+ base::Bind(&GpuCommandBufferMemoryTracker::LogMemoryStatsPressure,
+ base::Unretained(this)))) {
+ // Set up |memory_stats_timer_| to call LogMemoryPeriodic periodically
+ // via the provided |task_runner|.
+ memory_stats_timer_.SetTaskRunner(std::move(task_runner));
+ memory_stats_timer_.Start(
+ FROM_HERE, base::TimeDelta::FromSeconds(30), this,
+ &GpuCommandBufferMemoryTracker::LogMemoryStatsPeriodic);
+ }
void TrackMemoryAllocatedChange(
size_t old_size, size_t new_size) override {
@@ -88,7 +125,7 @@ class GpuCommandBufferMemoryTracker : public gles2::MemoryTracker {
bool EnsureGPUMemoryAvailable(size_t size_needed) override {
return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
- };
+ }
uint64_t ClientTracingId() const override { return client_tracing_id_; }
int ClientId() const override { return client_id_; }
@@ -97,12 +134,29 @@ class GpuCommandBufferMemoryTracker : public gles2::MemoryTracker {
}
private:
- ~GpuCommandBufferMemoryTracker() override {}
+ ~GpuCommandBufferMemoryTracker() override { LogMemoryStatsShutdown(); }
+
+ void LogMemoryStatsPeriodic() { GPU_COMMAND_BUFFER_MEMORY_BLOCK("Periodic"); }
+ void LogMemoryStatsShutdown() { GPU_COMMAND_BUFFER_MEMORY_BLOCK("Shutdown"); }
+ void LogMemoryStatsPressure(
+ base::MemoryPressureListener::MemoryPressureLevel pressure_level) {
+ // Only log on CRITICAL memory pressure.
+ if (pressure_level ==
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
+ GPU_COMMAND_BUFFER_MEMORY_BLOCK("Pressure");
+ }
+ }
+
std::unique_ptr<GpuMemoryTrackingGroup> tracking_group_;
const uint64_t client_tracing_id_;
const int client_id_;
const uint64_t share_group_tracing_guid_;
+ // Variables used in memory stat histogram logging.
+ const gles2::ContextType context_type_;
+ base::RepeatingTimer memory_stats_timer_;
+ std::unique_ptr<base::MemoryPressureListener> memory_pressure_listener_;
+
DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
};
@@ -508,8 +562,9 @@ bool GpuCommandBufferStub::Initialize(
channel_->gpu_channel_manager()->gpu_memory_buffer_factory();
context_group_ = new gles2::ContextGroup(
manager->gpu_preferences(), channel_->mailbox_manager(),
- new GpuCommandBufferMemoryTracker(channel_,
- command_buffer_id_.GetUnsafeValue()),
+ new GpuCommandBufferMemoryTracker(
+ channel_, command_buffer_id_.GetUnsafeValue(),
+ init_params.attribs.context_type, channel_->task_runner()),
manager->shader_translator_cache(),
manager->framebuffer_completeness_cache(), feature_info,
init_params.attribs.bind_generates_resource,
@@ -532,7 +587,7 @@ bool GpuCommandBufferStub::Initialize(
// only a single context. See crbug.com/510243 for details.
use_virtualized_gl_context_ |= channel_->mailbox_manager()->UsesSync();
- gl::GLSurface::Format surface_format = gl::GLSurface::SURFACE_DEFAULT;
+ gl::GLSurfaceFormat surface_format = gl::GLSurfaceFormat();
bool offscreen = (surface_handle_ == kNullSurfaceHandle);
gl::GLSurface* default_surface = manager->GetDefaultOffscreenSurface();
if (!default_surface) {
@@ -544,10 +599,12 @@ bool GpuCommandBufferStub::Initialize(
init_params.attribs.green_size <= 6 &&
init_params.attribs.blue_size <= 5 &&
init_params.attribs.alpha_size == 0)
- surface_format = gl::GLSurface::SURFACE_RGB565;
+ surface_format.SetRGB565();
+ // TODO(klausw): explicitly copy rgba sizes?
+
// We can only use virtualized contexts for onscreen command buffers if their
// config is compatible with the offscreen ones - otherwise MakeCurrent fails.
- if (surface_format != default_surface->GetFormat() && !offscreen)
+ if (!surface_format.IsCompatible(default_surface->GetFormat()) && !offscreen)
use_virtualized_gl_context_ = false;
#endif
@@ -947,6 +1004,7 @@ void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) {
mailbox_manager->PushTextureUpdates(sync_token);
}
+ command_buffer_->SetReleaseCount(release);
sync_point_client_->ReleaseFenceSync(release);
}
diff --git a/chromium/gpu/ipc/service/gpu_command_buffer_stub.h b/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
index a0999f8c059..91b8dfe1c36 100644
--- a/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/gpu_command_buffer_stub.h
@@ -41,20 +41,14 @@ namespace gpu {
struct Mailbox;
struct SyncToken;
class SyncPointClient;
-class SyncPointManager;
-namespace gles2 {
-class MailboxManager;
-}
}
struct GPUCreateCommandBufferConfig;
struct GpuCommandBufferMsg_CreateImage_Params;
-struct GpuCommandBufferMsg_SwapBuffersCompleted_Params;
namespace gpu {
class GpuChannel;
-class GpuWatchdogThread;
struct WaitForCommandState;
class GPU_EXPORT GpuCommandBufferStub
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index 1e912037747..add75a14728 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -17,6 +17,7 @@
#include "gpu/config/gpu_util.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "gpu/ipc/service/switches.h"
+#include "ui/gfx/switches.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_switches.h"
#include "ui/gl/init/gl_factory.h"
@@ -127,6 +128,7 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
// to run slowly in that case.
bool enable_watchdog =
!command_line.HasSwitch(switches::kDisableGpuWatchdog) &&
+ !command_line.HasSwitch(switches::kHeadless) &&
!RunningOnValgrind();
// Disable the watchdog in debug builds because they tend to only be run by
@@ -158,6 +160,9 @@ bool GpuInit::InitializeAndStartSandbox(const base::CommandLine& command_line) {
#endif
gpu_info_.in_process_gpu = false;
+ gpu_info_.passthrough_cmd_decoder =
+ command_line.HasSwitch(switches::kUsePassthroughCmdDecoder);
+
sandbox_helper_->PreSandboxStartup();
#if defined(OS_LINUX)
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc
index e59f155c6ee..8b266b8933b 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.cc
@@ -32,9 +32,9 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateGpuMemoryBuffer(
->GetSurfaceFactoryOzone()
->CreateNativePixmap(surface_handle, size, format, usage);
if (!pixmap.get()) {
- DLOG(ERROR) << "Failed to create pixmap " << size.width() << "x"
- << size.height() << " format " << static_cast<int>(format)
- << ", usage " << static_cast<int>(usage);
+ DLOG(ERROR) << "Failed to create pixmap " << size.ToString() << " format "
+ << static_cast<int>(format) << ", usage "
+ << static_cast<int>(usage);
return gfx::GpuMemoryBufferHandle();
}
@@ -109,10 +109,40 @@ GpuMemoryBufferFactoryOzoneNativePixmap::CreateImageForGpuMemoryBuffer(
scoped_refptr<ui::GLImageOzoneNativePixmap> image(
new ui::GLImageOzoneNativePixmap(size, internalformat));
if (!image->Initialize(pixmap.get(), format)) {
- LOG(ERROR) << "Failed to create GLImage";
+ LOG(ERROR) << "Failed to create GLImage " << size.ToString() << " format "
+ << static_cast<int>(format);
return nullptr;
}
return image;
}
+scoped_refptr<gl::GLImage>
+GpuMemoryBufferFactoryOzoneNativePixmap::CreateAnonymousImage(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ unsigned internalformat) {
+ scoped_refptr<ui::NativePixmap> pixmap =
+ ui::OzonePlatform::GetInstance()
+ ->GetSurfaceFactoryOzone()
+ ->CreateNativePixmap(gpu::kNullSurfaceHandle, size, format,
+ gfx::BufferUsage::SCANOUT);
+ if (!pixmap.get()) {
+ LOG(ERROR) << "Failed to create pixmap " << size.ToString() << " format "
+ << static_cast<int>(format);
+ return nullptr;
+ }
+ scoped_refptr<ui::GLImageOzoneNativePixmap> image(
+ new ui::GLImageOzoneNativePixmap(size, internalformat));
+ if (!image->Initialize(pixmap.get(), format)) {
+ LOG(ERROR) << "Failed to create GLImage " << size.ToString() << " format "
+ << static_cast<int>(format);
+ return nullptr;
+ }
+ return image;
+}
+
+unsigned GpuMemoryBufferFactoryOzoneNativePixmap::RequiredTextureType() {
+ return GL_TEXTURE_EXTERNAL_OES;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h
index 5a132bbdaeb..45be7524496 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_ozone_native_pixmap.h
@@ -49,6 +49,11 @@ class GPU_EXPORT GpuMemoryBufferFactoryOzoneNativePixmap
unsigned internalformat,
int client_id,
SurfaceHandle surface_handle) override;
+ scoped_refptr<gl::GLImage> CreateAnonymousImage(
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ unsigned internalformat) override;
+ unsigned RequiredTextureType() override;
private:
using NativePixmapMapKey = std::pair<int, int>;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
index 4c958f2c440..0af2d6e80bd 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
@@ -8,14 +8,6 @@
#ifndef GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_TEST_TEMPLATE_H_
#define GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_TEST_TEMPLATE_H_
-#if defined(OS_ANDROID)
-// TODO(markdittmer): Service code shouldn't depend on client code.
-// See crbug.com/608800.
-#include "gpu/ipc/client/android/in_process_surface_texture_manager.h"
-
-#include "gpu/ipc/common/android/surface_texture_manager.h"
-#endif
-
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -32,11 +24,6 @@ class GpuMemoryBufferFactoryTest : public testing::Test {
TYPED_TEST_CASE_P(GpuMemoryBufferFactoryTest);
TYPED_TEST_P(GpuMemoryBufferFactoryTest, CreateGpuMemoryBuffer) {
-#if defined(OS_ANDROID)
- SurfaceTextureManager::SetInstance(
- InProcessSurfaceTextureManager::GetInstance());
-#endif
-
const gfx::GpuMemoryBufferId kBufferId(1);
const int kClientId = 1;
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider.h b/chromium/gpu/ipc/service/gpu_vsync_provider.h
new file mode 100644
index 00000000000..13d0ac210a1
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_SERVICE_GPU_VSYNC_PROVIDER_H_
+#define GPU_IPC_SERVICE_GPU_VSYNC_PROVIDER_H_
+
+#include <memory>
+
+#include "base/callback.h"
+#include "base/time/time.h"
+#include "gpu/gpu_export.h"
+#include "gpu/ipc/common/surface_handle.h"
+
+namespace gpu {
+
+class GpuVSyncWorker;
+
+// Implements waiting for VSync signal on background thread.
+class GPU_EXPORT GpuVSyncProvider {
+ public:
+ // Once VSync is enabled, this callback is repeatedly invoked on every VSync.
+ // The call is made on background thread to avoid increased latency due to
+ // serializing callback invocation with other GPU tasks. The code that
+ // implements the callback function is expected to handle that.
+ using VSyncCallback = base::Callback<void(base::TimeTicks timestamp)>;
+
+ ~GpuVSyncProvider();
+
+ static std::unique_ptr<GpuVSyncProvider> Create(const VSyncCallback& callback,
+ SurfaceHandle surface_handle);
+
+ // Enable or disable VSync production.
+ void EnableVSync(bool enabled);
+
+ private:
+#if defined(OS_WIN)
+ GpuVSyncProvider(const VSyncCallback& callback, SurfaceHandle surface_handle);
+
+ std::unique_ptr<GpuVSyncWorker> vsync_worker_;
+#endif // defined(OS_WIN)
+
+ DISALLOW_COPY_AND_ASSIGN(GpuVSyncProvider);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_SERVICE_GPU_VSYNC_PROVIDER_H_
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_posix.cc b/chromium/gpu/ipc/service/gpu_vsync_provider_posix.cc
new file mode 100644
index 00000000000..00039f65023
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_posix.cc
@@ -0,0 +1,22 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/gpu_vsync_provider.h"
+
+namespace gpu {
+
+/* static */
+std::unique_ptr<GpuVSyncProvider> GpuVSyncProvider::Create(
+ const VSyncCallback& callback,
+ SurfaceHandle surface_handle) {
+ return std::unique_ptr<GpuVSyncProvider>();
+}
+
+GpuVSyncProvider::~GpuVSyncProvider() = default;
+
+void GpuVSyncProvider::EnableVSync(bool enabled) {
+ NOTREACHED();
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc b/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc
new file mode 100644
index 00000000000..2b96b4a493c
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc
@@ -0,0 +1,84 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/gpu_vsync_provider.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/base/win/hidden_window.h"
+
+namespace gpu {
+
+class GpuVSyncProviderTest : public testing::Test {
+ public:
+ GpuVSyncProviderTest()
+ : vsync_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED) {}
+ ~GpuVSyncProviderTest() override {}
+
+ void SetUp() override {}
+
+ void TearDown() override {}
+
+ void OnVSync(base::TimeTicks timestamp) {
+ // This is called on VSync worker thread.
+ base::AutoLock lock(lock_);
+ if (++vsync_count_ == 3)
+ vsync_event_.Signal();
+ }
+
+ int vsync_count() {
+ base::AutoLock lock(lock_);
+ return vsync_count_;
+ }
+
+ void reset_vsync_count() {
+ base::AutoLock lock(lock_);
+ vsync_count_ = 0;
+ }
+
+ protected:
+ base::WaitableEvent vsync_event_;
+
+ private:
+ base::Lock lock_;
+ int vsync_count_ = 0;
+};
+
+TEST_F(GpuVSyncProviderTest, VSyncSignalTest) {
+ SurfaceHandle window = ui::GetHiddenWindow();
+
+ std::unique_ptr<GpuVSyncProvider> provider = GpuVSyncProvider::Create(
+ base::Bind(&GpuVSyncProviderTest::OnVSync, base::Unretained(this)),
+ window);
+
+ constexpr base::TimeDelta wait_timeout =
+ base::TimeDelta::FromMilliseconds(300);
+
+ // Verify that there are no VSync signals before provider is enabled
+ bool wait_result = vsync_event_.TimedWait(wait_timeout);
+ EXPECT_FALSE(wait_result);
+ EXPECT_EQ(0, vsync_count());
+
+ provider->EnableVSync(true);
+
+ vsync_event_.Wait();
+
+ provider->EnableVSync(false);
+
+ // Verify that VSync callbacks stop coming after disabling.
+ // Please note that it might still be possible for one
+ // callback to be in flight on VSync worker thread, so |vsync_count_|
+ // could still be incremented once, but not enough times to trigger
+ // |vsync_event_|.
+ reset_vsync_count();
+ wait_result = vsync_event_.TimedWait(wait_timeout);
+ EXPECT_FALSE(wait_result);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc b/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
new file mode 100644
index 00000000000..a996e6b6b02
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_vsync_provider_win.cc
@@ -0,0 +1,264 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/gpu_vsync_provider.h"
+
+#include <string>
+
+#include "base/atomicops.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread.h"
+#include "base/trace_event/trace_event.h"
+
+#include <windows.h>
+
+namespace gpu {
+
+namespace {
+// from <D3dkmthk.h>
+typedef LONG NTSTATUS;
+typedef UINT D3DKMT_HANDLE;
+typedef UINT D3DDDI_VIDEO_PRESENT_SOURCE_ID;
+
+#define STATUS_SUCCESS ((NTSTATUS)0x00000000L)
+
+typedef struct _D3DKMT_OPENADAPTERFROMHDC {
+ HDC hDc;
+ D3DKMT_HANDLE hAdapter;
+ LUID AdapterLuid;
+ D3DDDI_VIDEO_PRESENT_SOURCE_ID VidPnSourceId;
+} D3DKMT_OPENADAPTERFROMHDC;
+
+typedef struct _D3DKMT_CLOSEADAPTER {
+ D3DKMT_HANDLE hAdapter;
+} D3DKMT_CLOSEADAPTER;
+
+typedef struct _D3DKMT_WAITFORVERTICALBLANKEVENT {
+ D3DKMT_HANDLE hAdapter;
+ D3DKMT_HANDLE hDevice;
+ D3DDDI_VIDEO_PRESENT_SOURCE_ID VidPnSourceId;
+} D3DKMT_WAITFORVERTICALBLANKEVENT;
+
+typedef NTSTATUS(APIENTRY* PFND3DKMTOPENADAPTERFROMHDC)(
+ D3DKMT_OPENADAPTERFROMHDC*);
+typedef NTSTATUS(APIENTRY* PFND3DKMTCLOSEADAPTER)(D3DKMT_CLOSEADAPTER*);
+typedef NTSTATUS(APIENTRY* PFND3DKMTWAITFORVERTICALBLANKEVENT)(
+ D3DKMT_WAITFORVERTICALBLANKEVENT*);
+} // namespace
+
+// The actual implementation of background tasks plus any state that might be
+// needed on the worker thread.
+class GpuVSyncWorker : public base::Thread {
+ public:
+ GpuVSyncWorker(const GpuVSyncProvider::VSyncCallback& callback,
+ SurfaceHandle surface_handle);
+ ~GpuVSyncWorker() override;
+
+ void Enable(bool enabled);
+ void StartRunningVSyncOnThread();
+ void WaitForVSyncOnThread();
+ void SendVSyncUpdate(base::TimeTicks timestamp);
+
+ private:
+ void Reschedule();
+ void OpenAdapter(const wchar_t* device_name);
+ void CloseAdapter();
+ bool WaitForVBlankEvent();
+
+ // Specifies whether background tasks are running.
+ // This can be set on background thread only.
+ bool running_ = false;
+
+ // Specified whether the worker is enabled. This is accessed from both
+ // threads but can be changed on the main thread only.
+ base::subtle::AtomicWord enabled_ = false;
+
+ const GpuVSyncProvider::VSyncCallback callback_;
+ const SurfaceHandle surface_handle_;
+
+ PFND3DKMTOPENADAPTERFROMHDC open_adapter_from_hdc_ptr_;
+ PFND3DKMTCLOSEADAPTER close_adapter_ptr_;
+ PFND3DKMTWAITFORVERTICALBLANKEVENT wait_for_vertical_blank_event_ptr_;
+
+ std::wstring current_device_name_;
+ D3DKMT_HANDLE current_adapter_handle_ = 0;
+ D3DDDI_VIDEO_PRESENT_SOURCE_ID current_source_id_ = 0;
+};
+
+GpuVSyncWorker::GpuVSyncWorker(const GpuVSyncProvider::VSyncCallback& callback,
+ SurfaceHandle surface_handle)
+ : base::Thread(base::StringPrintf("VSync-%d", surface_handle)),
+ callback_(callback),
+ surface_handle_(surface_handle) {
+ HMODULE gdi32 = GetModuleHandle(L"gdi32");
+ if (!gdi32) {
+ NOTREACHED() << "Can't open gdi32.dll";
+ return;
+ }
+
+ open_adapter_from_hdc_ptr_ = reinterpret_cast<PFND3DKMTOPENADAPTERFROMHDC>(
+ ::GetProcAddress(gdi32, "D3DKMTOpenAdapterFromHdc"));
+ if (!open_adapter_from_hdc_ptr_) {
+ NOTREACHED() << "Can't find D3DKMTOpenAdapterFromHdc in gdi32.dll";
+ return;
+ }
+
+ close_adapter_ptr_ = reinterpret_cast<PFND3DKMTCLOSEADAPTER>(
+ ::GetProcAddress(gdi32, "D3DKMTCloseAdapter"));
+ if (!close_adapter_ptr_) {
+ NOTREACHED() << "Can't find D3DKMTCloseAdapter in gdi32.dll";
+ return;
+ }
+
+ wait_for_vertical_blank_event_ptr_ =
+ reinterpret_cast<PFND3DKMTWAITFORVERTICALBLANKEVENT>(
+ ::GetProcAddress(gdi32, "D3DKMTWaitForVerticalBlankEvent"));
+ if (!wait_for_vertical_blank_event_ptr_) {
+ NOTREACHED() << "Can't find D3DKMTWaitForVerticalBlankEvent in gdi32.dll";
+ return;
+ }
+}
+
+GpuVSyncWorker::~GpuVSyncWorker() {
+ // Thread::Close() call below will block until this task has finished running
+ // so it is safe to post it here and pass unretained pointer.
+ task_runner()->PostTask(FROM_HERE, base::Bind(&GpuVSyncWorker::CloseAdapter,
+ base::Unretained(this)));
+ Stop();
+
+ DCHECK_EQ(0u, current_adapter_handle_);
+ DCHECK(current_device_name_.empty());
+}
+
+void GpuVSyncWorker::Enable(bool enabled) {
+ auto was_enabled = base::subtle::NoBarrier_AtomicExchange(&enabled_, enabled);
+
+ if (enabled && !was_enabled)
+ task_runner()->PostTask(
+ FROM_HERE, base::Bind(&GpuVSyncWorker::StartRunningVSyncOnThread,
+ base::Unretained(this)));
+}
+
+void GpuVSyncWorker::StartRunningVSyncOnThread() {
+ DCHECK(base::PlatformThread::CurrentId() == GetThreadId());
+
+ if (!running_) {
+ running_ = true;
+ WaitForVSyncOnThread();
+ }
+}
+
+void GpuVSyncWorker::WaitForVSyncOnThread() {
+ DCHECK(base::PlatformThread::CurrentId() == GetThreadId());
+
+ TRACE_EVENT0("gpu", "GpuVSyncWorker::WaitForVSyncOnThread");
+
+ HMONITOR monitor =
+ MonitorFromWindow(surface_handle_, MONITOR_DEFAULTTONEAREST);
+ MONITORINFOEX monitor_info;
+ monitor_info.cbSize = sizeof(MONITORINFOEX);
+ BOOL success = GetMonitorInfo(monitor, &monitor_info);
+ CHECK(success);
+
+ if (current_device_name_.compare(monitor_info.szDevice) != 0) {
+ // Monitor changed. Close the current adapter handle and open a new one.
+ CloseAdapter();
+ OpenAdapter(monitor_info.szDevice);
+ }
+
+ if (WaitForVBlankEvent()) {
+ // Note: this sends update on background thread which the callback is
+ // expected to handle.
+ SendVSyncUpdate(base::TimeTicks::Now());
+ }
+
+ Reschedule();
+}
+
+void GpuVSyncWorker::SendVSyncUpdate(base::TimeTicks timestamp) {
+ if (base::subtle::NoBarrier_Load(&enabled_)) {
+ TRACE_EVENT0("gpu", "GpuVSyncWorker::SendVSyncUpdate");
+ callback_.Run(timestamp);
+ }
+}
+
+void GpuVSyncWorker::Reschedule() {
+ // Restart the task if still enabled.
+ if (base::subtle::NoBarrier_Load(&enabled_)) {
+ task_runner()->PostTask(FROM_HERE,
+ base::Bind(&GpuVSyncWorker::WaitForVSyncOnThread,
+ base::Unretained(this)));
+ } else {
+ running_ = false;
+ }
+}
+
+void GpuVSyncWorker::OpenAdapter(const wchar_t* device_name) {
+ DCHECK_EQ(0u, current_adapter_handle_);
+
+ HDC hdc = CreateDC(NULL, device_name, NULL, NULL);
+
+ D3DKMT_OPENADAPTERFROMHDC open_adapter_data;
+ open_adapter_data.hDc = hdc;
+
+ NTSTATUS result = open_adapter_from_hdc_ptr_(&open_adapter_data);
+ DeleteDC(hdc);
+
+ CHECK(result == STATUS_SUCCESS);
+
+ current_device_name_ = device_name;
+ current_adapter_handle_ = open_adapter_data.hAdapter;
+ current_source_id_ = open_adapter_data.VidPnSourceId;
+}
+
+void GpuVSyncWorker::CloseAdapter() {
+ if (current_adapter_handle_ != 0) {
+ D3DKMT_CLOSEADAPTER close_adapter_data;
+ close_adapter_data.hAdapter = current_adapter_handle_;
+
+ NTSTATUS result = close_adapter_ptr_(&close_adapter_data);
+ CHECK(result == STATUS_SUCCESS);
+
+ current_adapter_handle_ = 0;
+ current_device_name_.clear();
+ }
+}
+
+bool GpuVSyncWorker::WaitForVBlankEvent() {
+ D3DKMT_WAITFORVERTICALBLANKEVENT wait_for_vertical_blank_event_data;
+ wait_for_vertical_blank_event_data.hAdapter = current_adapter_handle_;
+ wait_for_vertical_blank_event_data.hDevice = 0;
+ wait_for_vertical_blank_event_data.VidPnSourceId = current_source_id_;
+
+ NTSTATUS result =
+ wait_for_vertical_blank_event_ptr_(&wait_for_vertical_blank_event_data);
+
+ return result == STATUS_SUCCESS;
+}
+
+/* static */
+std::unique_ptr<GpuVSyncProvider> GpuVSyncProvider::Create(
+ const VSyncCallback& callback,
+ SurfaceHandle surface_handle) {
+ return std::unique_ptr<GpuVSyncProvider>(
+ new GpuVSyncProvider(callback, surface_handle));
+}
+
+GpuVSyncProvider::GpuVSyncProvider(const VSyncCallback& callback,
+ SurfaceHandle surface_handle)
+ : vsync_worker_(new GpuVSyncWorker(callback, surface_handle)) {
+ // Start the thread.
+ base::Thread::Options options;
+ // TODO(stanisc): might consider even higher priority - REALTIME_AUDIO.
+ options.priority = base::ThreadPriority::DISPLAY;
+ vsync_worker_->StartWithOptions(options);
+}
+
+GpuVSyncProvider::~GpuVSyncProvider() = default;
+
+void GpuVSyncProvider::EnableVSync(bool enabled) {
+ vsync_worker_->Enable(enabled);
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_transport_surface.h b/chromium/gpu/ipc/service/image_transport_surface.h
index 1b6b78f5196..41f3c974a31 100644
--- a/chromium/gpu/ipc/service/image_transport_surface.h
+++ b/chromium/gpu/ipc/service/image_transport_surface.h
@@ -16,7 +16,6 @@
#include "ui/gl/gl_surface.h"
namespace gpu {
-class GpuChannelManager;
class ImageTransportSurfaceDelegate;
// The GPU process is agnostic as to how it displays results. On some platforms
@@ -36,7 +35,7 @@ class ImageTransportSurface {
static scoped_refptr<gl::GLSurface> CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> stub,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format);
+ gl::GLSurfaceFormat format);
private:
DISALLOW_COPY_AND_ASSIGN(ImageTransportSurface);
diff --git a/chromium/gpu/ipc/service/image_transport_surface_android.cc b/chromium/gpu/ipc/service/image_transport_surface_android.cc
index 214265901bf..f0eee668e7d 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_android.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_android.cc
@@ -16,7 +16,7 @@ namespace gpu {
scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
if (gl::GetGLImplementation() == gl::kGLImplementationMockGL)
return new gl::GLSurfaceStub;
DCHECK(GpuSurfaceLookup::GetInstance());
diff --git a/chromium/gpu/ipc/service/image_transport_surface_linux.cc b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
index 2cbce86d811..54cf4b2571a 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_linux.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
@@ -13,7 +13,7 @@ namespace gpu {
scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
DCHECK_NE(surface_handle, kNullSurfaceHandle);
scoped_refptr<gl::GLSurface> surface;
#if defined(USE_OZONE)
diff --git a/chromium/gpu/ipc/service/image_transport_surface_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_mac.mm
index 42591a7bcc9..2d306748ad6 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_mac.mm
@@ -21,7 +21,9 @@ class DRTSurfaceOSMesa : public gl::GLSurfaceOSMesa {
public:
// Size doesn't matter, the surface is resized to the right size later.
DRTSurfaceOSMesa()
- : GLSurfaceOSMesa(gl::GLSurface::SURFACE_OSMESA_RGBA, gfx::Size(1, 1)) {}
+ : GLSurfaceOSMesa(
+ gl::GLSurfaceFormat(gl::GLSurfaceFormat::PIXEL_LAYOUT_RGBA),
+ gfx::Size(1, 1)) {}
// Implement a subset of GLSurface.
gfx::SwapResult SwapBuffers() override;
@@ -43,7 +45,7 @@ bool g_allow_os_mesa = false;
scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
DCHECK_NE(surface_handle, kNullSurfaceHandle);
switch (gl::GetGLImplementation()) {
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index d2261e151a8..cdd267df5cf 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -39,7 +39,7 @@ class ImageTransportSurfaceOverlayMac : public gl::GLSurface,
base::WeakPtr<ImageTransportSurfaceDelegate> delegate);
// GLSurface implementation
- bool Initialize(gl::GLSurface::Format format) override;
+ bool Initialize(gl::GLSurfaceFormat format) override;
void Destroy() override;
bool Resize(const gfx::Size& size,
float scale_factor,
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index 84a8fc82d00..5c570f7d346 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -91,7 +91,7 @@ ImageTransportSurfaceOverlayMac::~ImageTransportSurfaceOverlayMac() {
Destroy();
}
-bool ImageTransportSurfaceOverlayMac::Initialize(gl::GLSurface::Format format) {
+bool ImageTransportSurfaceOverlayMac::Initialize(gl::GLSurfaceFormat format) {
delegate_->SetLatencyInfoCallback(
base::Bind(&ImageTransportSurfaceOverlayMac::SetLatencyInfo,
base::Unretained(this)));
diff --git a/chromium/gpu/ipc/service/image_transport_surface_win.cc b/chromium/gpu/ipc/service/image_transport_surface_win.cc
index 8ba97f66a3b..a4b0a5d8b58 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_win.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_win.cc
@@ -21,7 +21,7 @@ namespace gpu {
scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
SurfaceHandle surface_handle,
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
DCHECK_NE(surface_handle, kNullSurfaceHandle);
scoped_refptr<gl::GLSurface> surface;
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
index e269cd2c935..e3587409bbf 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
@@ -23,7 +23,7 @@ PassThroughImageTransportSurface::PassThroughImageTransportSurface(
weak_ptr_factory_(this) {}
bool PassThroughImageTransportSurface::Initialize(
- gl::GLSurface::Format format) {
+ gl::GLSurfaceFormat format) {
// The surface is assumed to have already been initialized.
delegate_->SetLatencyInfoCallback(
base::Bind(&PassThroughImageTransportSurface::SetLatencyInfo,
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
index 941120da81d..eb1f9f0b373 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
@@ -18,7 +18,6 @@
#include "ui/gl/gl_surface.h"
namespace gpu {
-class GpuChannelManager;
// An implementation of ImageTransportSurface that implements GLSurface through
// GLSurfaceAdapter, thereby forwarding GLSurface methods through to it.
@@ -29,7 +28,7 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
gl::GLSurface* surface);
// GLSurface implementation.
- bool Initialize(gl::GLSurface::Format format) override;
+ bool Initialize(gl::GLSurfaceFormat format) override;
void Destroy() override;
gfx::SwapResult SwapBuffers() override;
void SwapBuffersAsync(const SwapCompletionCallback& callback) override;
diff --git a/chromium/gpu/swiftshader_tests_main.cc b/chromium/gpu/swiftshader_tests_main.cc
new file mode 100644
index 00000000000..4badbb9d8c2
--- /dev/null
+++ b/chromium/gpu/swiftshader_tests_main.cc
@@ -0,0 +1,36 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/message_loop/message_loop.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/test_suite.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/swiftshader/tests/unittests/SwiftShaderTest.h"
+
+namespace {
+
+int RunHelper(base::TestSuite* test_suite) {
+ base::MessageLoop message_loop;
+ return test_suite->Run();
+}
+
+} // namespace
+
+int main(int argc, char** argv) {
+ base::CommandLine::Init(argc, argv);
+ testing::InitGoogleMock(&argc, argv);
+ testing::AddGlobalTestEnvironment(new SwiftShaderTestEnvironment());
+ base::TestSuite test_suite(argc, argv);
+ int rt = base::LaunchUnitTestsWithOptions(
+ argc, argv,
+ 1, // Run tests serially.
+ 0, // Disable batching.
+ true, // Use job objects.
+ base::Bind(&RunHelper, base::Unretained(&test_suite)));
+ return rt;
+}
diff --git a/chromium/gpu/tools/compositor_model_bench/render_tree.cc b/chromium/gpu/tools/compositor_model_bench/render_tree.cc
index 642c5fe5bd1..752975c28b9 100644
--- a/chromium/gpu/tools/compositor_model_bench/render_tree.cc
+++ b/chromium/gpu/tools/compositor_model_bench/render_tree.cc
@@ -119,21 +119,21 @@ std::unique_ptr<RenderNode> InterpretNode(const base::DictionaryValue& node);
std::string ValueTypeAsString(Value::Type type) {
switch (type) {
- case Value::TYPE_NULL:
+ case Value::Type::NONE:
return "NULL";
- case Value::TYPE_BOOLEAN:
+ case Value::Type::BOOLEAN:
return "BOOLEAN";
- case Value::TYPE_INTEGER:
+ case Value::Type::INTEGER:
return "INTEGER";
- case Value::TYPE_DOUBLE:
+ case Value::Type::DOUBLE:
return "DOUBLE";
- case Value::TYPE_STRING:
+ case Value::Type::STRING:
return "STRING";
- case Value::TYPE_BINARY:
+ case Value::Type::BINARY:
return "BINARY";
- case Value::TYPE_DICTIONARY:
+ case Value::Type::DICTIONARY:
return "DICTIONARY";
- case Value::TYPE_LIST:
+ case Value::Type::LIST:
return "LIST";
default:
return "(UNKNOWN TYPE)";
@@ -180,12 +180,12 @@ bool VerifyListEntry(const base::ListValue& l,
}
bool InterpretCommonContents(const base::DictionaryValue& node, RenderNode* c) {
- if (!VerifyDictionaryEntry(node, "layerID", Value::TYPE_INTEGER) ||
- !VerifyDictionaryEntry(node, "width", Value::TYPE_INTEGER) ||
- !VerifyDictionaryEntry(node, "height", Value::TYPE_INTEGER) ||
- !VerifyDictionaryEntry(node, "drawsContent", Value::TYPE_BOOLEAN) ||
- !VerifyDictionaryEntry(node, "targetSurfaceID", Value::TYPE_INTEGER) ||
- !VerifyDictionaryEntry(node, "transform", Value::TYPE_LIST)) {
+ if (!VerifyDictionaryEntry(node, "layerID", Value::Type::INTEGER) ||
+ !VerifyDictionaryEntry(node, "width", Value::Type::INTEGER) ||
+ !VerifyDictionaryEntry(node, "height", Value::Type::INTEGER) ||
+ !VerifyDictionaryEntry(node, "drawsContent", Value::Type::BOOLEAN) ||
+ !VerifyDictionaryEntry(node, "targetSurfaceID", Value::Type::INTEGER) ||
+ !VerifyDictionaryEntry(node, "transform", Value::Type::LIST)) {
return false;
}
@@ -213,7 +213,7 @@ bool InterpretCommonContents(const base::DictionaryValue& node, RenderNode* c) {
}
float transform_mat[16];
for (int i = 0; i < 16; ++i) {
- if (!VerifyListEntry(*transform, i, Value::TYPE_DOUBLE, "Transform"))
+ if (!VerifyListEntry(*transform, i, Value::Type::DOUBLE, "Transform"))
return false;
double el;
transform->GetDouble(i, &el);
@@ -224,16 +224,16 @@ bool InterpretCommonContents(const base::DictionaryValue& node, RenderNode* c) {
if (!node.HasKey("tiles"))
return true;
- if (!VerifyDictionaryEntry(node, "tiles", Value::TYPE_DICTIONARY))
+ if (!VerifyDictionaryEntry(node, "tiles", Value::Type::DICTIONARY))
return false;
const base::DictionaryValue* tiles_dict;
node.GetDictionary("tiles", &tiles_dict);
- if (!VerifyDictionaryEntry(*tiles_dict, "dim", Value::TYPE_LIST))
+ if (!VerifyDictionaryEntry(*tiles_dict, "dim", Value::Type::LIST))
return false;
const base::ListValue* dim;
tiles_dict->GetList("dim", &dim);
- if (!VerifyListEntry(*dim, 0, Value::TYPE_INTEGER, "Tile dimension") ||
- !VerifyListEntry(*dim, 1, Value::TYPE_INTEGER, "Tile dimension")) {
+ if (!VerifyListEntry(*dim, 0, Value::Type::INTEGER, "Tile dimension") ||
+ !VerifyListEntry(*dim, 1, Value::Type::INTEGER, "Tile dimension")) {
return false;
}
int tile_width;
@@ -243,25 +243,25 @@ bool InterpretCommonContents(const base::DictionaryValue& node, RenderNode* c) {
dim->GetInteger(1, &tile_height);
c->set_tile_height(tile_height);
- if (!VerifyDictionaryEntry(*tiles_dict, "info", Value::TYPE_LIST))
+ if (!VerifyDictionaryEntry(*tiles_dict, "info", Value::Type::LIST))
return false;
const base::ListValue* tiles;
tiles_dict->GetList("info", &tiles);
for (unsigned int i = 0; i < tiles->GetSize(); ++i) {
- if (!VerifyListEntry(*tiles, i, Value::TYPE_DICTIONARY, "Tile info"))
+ if (!VerifyListEntry(*tiles, i, Value::Type::DICTIONARY, "Tile info"))
return false;
const base::DictionaryValue* tdict;
tiles->GetDictionary(i, &tdict);
- if (!VerifyDictionaryEntry(*tdict, "x", Value::TYPE_INTEGER) ||
- !VerifyDictionaryEntry(*tdict, "y", Value::TYPE_INTEGER)) {
+ if (!VerifyDictionaryEntry(*tdict, "x", Value::Type::INTEGER) ||
+ !VerifyDictionaryEntry(*tdict, "y", Value::Type::INTEGER)) {
return false;
}
Tile t;
tdict->GetInteger("x", &t.x);
tdict->GetInteger("y", &t.y);
if (tdict->HasKey("texID")) {
- if (!VerifyDictionaryEntry(*tdict, "texID", Value::TYPE_INTEGER))
+ if (!VerifyDictionaryEntry(*tdict, "texID", Value::Type::INTEGER))
return false;
tdict->GetInteger("texID", &t.texID);
} else {
@@ -273,9 +273,9 @@ bool InterpretCommonContents(const base::DictionaryValue& node, RenderNode* c) {
}
bool InterpretCCData(const base::DictionaryValue& node, CCNode* c) {
- if (!VerifyDictionaryEntry(node, "vertex_shader", Value::TYPE_STRING) ||
- !VerifyDictionaryEntry(node, "fragment_shader", Value::TYPE_STRING) ||
- !VerifyDictionaryEntry(node, "textures", Value::TYPE_LIST)) {
+ if (!VerifyDictionaryEntry(node, "vertex_shader", Value::Type::STRING) ||
+ !VerifyDictionaryEntry(node, "fragment_shader", Value::Type::STRING) ||
+ !VerifyDictionaryEntry(node, "textures", Value::Type::LIST)) {
return false;
}
std::string vertex_shader_name, fragment_shader_name;
@@ -287,15 +287,15 @@ bool InterpretCCData(const base::DictionaryValue& node, CCNode* c) {
const base::ListValue* textures;
node.GetList("textures", &textures);
for (unsigned int i = 0; i < textures->GetSize(); ++i) {
- if (!VerifyListEntry(*textures, i, Value::TYPE_DICTIONARY, "Tex list"))
+ if (!VerifyListEntry(*textures, i, Value::Type::DICTIONARY, "Tex list"))
return false;
const base::DictionaryValue* tex;
textures->GetDictionary(i, &tex);
- if (!VerifyDictionaryEntry(*tex, "texID", Value::TYPE_INTEGER) ||
- !VerifyDictionaryEntry(*tex, "height", Value::TYPE_INTEGER) ||
- !VerifyDictionaryEntry(*tex, "width", Value::TYPE_INTEGER) ||
- !VerifyDictionaryEntry(*tex, "format", Value::TYPE_STRING)) {
+ if (!VerifyDictionaryEntry(*tex, "texID", Value::Type::INTEGER) ||
+ !VerifyDictionaryEntry(*tex, "height", Value::Type::INTEGER) ||
+ !VerifyDictionaryEntry(*tex, "width", Value::Type::INTEGER) ||
+ !VerifyDictionaryEntry(*tex, "format", Value::Type::STRING)) {
return false;
}
Texture t;
@@ -338,9 +338,9 @@ std::unique_ptr<RenderNode> InterpretContentLayer(
if (!InterpretCommonContents(node, n.get()))
return nullptr;
- if (!VerifyDictionaryEntry(node, "type", Value::TYPE_STRING) ||
- !VerifyDictionaryEntry(node, "skipsDraw", Value::TYPE_BOOLEAN) ||
- !VerifyDictionaryEntry(node, "children", Value::TYPE_LIST)) {
+ if (!VerifyDictionaryEntry(node, "type", Value::Type::STRING) ||
+ !VerifyDictionaryEntry(node, "skipsDraw", Value::Type::BOOLEAN) ||
+ !VerifyDictionaryEntry(node, "children", Value::Type::LIST)) {
return nullptr;
}
@@ -371,7 +371,7 @@ std::unique_ptr<RenderNode> InterpretCanvasLayer(
if (!InterpretCommonContents(node, n.get()))
return nullptr;
- if (!VerifyDictionaryEntry(node, "type", Value::TYPE_STRING))
+ if (!VerifyDictionaryEntry(node, "type", Value::Type::STRING))
return nullptr;
std::string type;
@@ -390,7 +390,7 @@ std::unique_ptr<RenderNode> InterpretVideoLayer(
if (!InterpretCommonContents(node, n.get()))
return nullptr;
- if (!VerifyDictionaryEntry(node, "type", Value::TYPE_STRING))
+ if (!VerifyDictionaryEntry(node, "type", Value::Type::STRING))
return nullptr;
std::string type;
@@ -409,7 +409,7 @@ std::unique_ptr<RenderNode> InterpretImageLayer(
if (!InterpretCommonContents(node, n.get()))
return nullptr;
- if (!VerifyDictionaryEntry(node, "type", Value::TYPE_STRING))
+ if (!VerifyDictionaryEntry(node, "type", Value::Type::STRING))
return nullptr;
std::string type;
@@ -423,7 +423,7 @@ std::unique_ptr<RenderNode> InterpretImageLayer(
}
std::unique_ptr<RenderNode> InterpretNode(const base::DictionaryValue& node) {
- if (!VerifyDictionaryEntry(node, "type", Value::TYPE_STRING))
+ if (!VerifyDictionaryEntry(node, "type", Value::Type::STRING))
return nullptr;
std::string type;
diff --git a/chromium/gpu/vulkan/vulkan_implementation.h b/chromium/gpu/vulkan/vulkan_implementation.h
index b669316e255..06d5e755b41 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.h
+++ b/chromium/gpu/vulkan/vulkan_implementation.h
@@ -10,8 +10,6 @@
#include "gpu/vulkan/vulkan_export.h"
namespace gpu {
-class VulkanCommandPool;
-
VULKAN_EXPORT bool InitializeVulkan();
VULKAN_EXPORT bool VulkanSupported();